diff --git a/.github/ISSUE_TEMPLATE.md b/.github/ISSUE_TEMPLATE.md new file mode 100644 index 0000000000..ec74343346 --- /dev/null +++ b/.github/ISSUE_TEMPLATE.md @@ -0,0 +1,43 @@ +Hi there, + +Thank you for opening an issue. Please note that we try to keep the Terraform issue tracker reserved for bug reports and feature requests. For general usage questions, please see: https://www.terraform.io/community.html. + +### Terraform Version +Run `terraform -v` to show the version. If you are not running the latest version of Terraform, please upgrade because your issue may have already been fixed. + +### Affected Resource(s) +Please list the resources as a list, for example: +- ibm_compute_instance +- ibm_storage_file + +If this issue appears to affect multiple resources, it may be an issue with Terraform's core, so please mention this. + +### Terraform Configuration Files +```hcl +# Copy-paste your Terraform configurations here - for large Terraform configs, +# please use a service like Dropbox and share a link to the ZIP file. For +# security, you can also encrypt the files using our GPG public key. +``` + +### Debug Output +Please provider a link to a GitHub Gist containing the complete debug output: https://www.terraform.io/docs/internals/debugging.html. Please do NOT paste the debug output in the issue; just paste a link to the Gist. + +### Panic Output +If Terraform produced a panic, please provide a link to a GitHub Gist containing the output of the `crash.log`. + +### Expected Behavior +What should have happened? + +### Actual Behavior +What actually happened? + +### Steps to Reproduce +Please list the steps required to reproduce the issue, for example: +1. `terraform apply` + +### Important Factoids +Are there anything atypical about your accounts that we should know? For example: Running in EC2 Classic? Custom version of OpenStack? Tight ACLs? + +### References +Are there any other GitHub issues (open or closed) or Pull Requests that should be linked here? For example: +- GH-1234 diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000000..fa67e05ea0 --- /dev/null +++ b/.gitignore @@ -0,0 +1,32 @@ +*.dll +*.exe +.DS_Store +example.tf +terraform.tfplan +terraform.tfstate +bin/ +modules-dev/ +/pkg/ +website/.vagrant +website/.bundle +website/build +website/node_modules +.vagrant/ +*.backup +./*.tfstate +.terraform/ +*.log +*.bak +*~ +.*.swp +.idea +*.iml +*.test +*.iml + +website/vendor + +# Test exclusions +!command/test-fixtures/**/*.tfstate +!command/test-fixtures/**/.terraform/ + diff --git a/GNUmakefile b/GNUmakefile new file mode 100644 index 0000000000..d0a258160f --- /dev/null +++ b/GNUmakefile @@ -0,0 +1,70 @@ +TEST?=$$(go list ./... |grep -v 'vendor') +GOFMT_FILES?=$$(find . -name '*.go' |grep -v vendor) +COVER_TEST?=$$(go list ./... |grep -v 'vendor') + +default: build + +tools: + @go get github.com/kardianos/govendor + @go get github.com/mitchellh/gox + @go get golang.org/x/tools/cmd/cover + + +build: fmtcheck + go install + +bin: fmtcheck vet tools + @TF_RELEASE=1 sh -c "'$(CURDIR)/scripts/build.sh'" + +dev: fmtcheck vet tools + @TF_DEV=1 sh -c "'$(CURDIR)/scripts/build.sh'" + +test: fmtcheck + go test -i $(TEST) || exit 1 + echo $(TEST) | \ + xargs -t -n4 go test $(TESTARGS) -timeout=30s -parallel=4 + +testacc: fmtcheck + TF_ACC=1 go test $(TEST) -v $(TESTARGS) -timeout 300m + +testrace: fmtcheck + TF_ACC= go test -race $(TEST) $(TESTARGS) + +cover: + @go tool cover 2>/dev/null; if [ $$? -eq 3 ]; then \ + go get -u golang.org/x/tools/cmd/cover; \ + fi + go test $(COVER_TEST) -coverprofile=coverage.out + go tool cover -html=coverage.out + rm coverage.out + +vet: + @echo "go vet ." + @go vet $$(go list ./... | grep -v vendor/) ; if [ $$? -eq 1 ]; then \ + echo ""; \ + echo "Vet found suspicious constructs. Please check the reported constructs"; \ + echo "and fix them if necessary before submitting the code for review."; \ + exit 1; \ + fi + +fmt: + gofmt -w $(GOFMT_FILES) + +fmtcheck: + @sh -c "'$(CURDIR)/scripts/gofmtcheck.sh'" + +errcheck: + @sh -c "'$(CURDIR)/scripts/errcheck.sh'" + +vendor-status: + @govendor status + +test-compile: fmtcheck + @if [ "$(TEST)" = "./..." ]; then \ + echo "ERROR: Set TEST to a specific package. For example,"; \ + echo " make test-compile TEST=./builtin/providers/aws"; \ + exit 1; \ + fi + go test -c $(TEST) $(TESTARGS) + +.PHONY: build bin dev test testacc testrace cover vet fmt fmtcheck errcheck vendor-status test-compile diff --git a/LICENSE b/LICENSE new file mode 100644 index 0000000000..c33dcc7c92 --- /dev/null +++ b/LICENSE @@ -0,0 +1,354 @@ +Mozilla Public License, version 2.0 + +1. Definitions + +1.1. “Contributor” + + means each individual or legal entity that creates, contributes to the + creation of, or owns Covered Software. + +1.2. “Contributor Version” + + means the combination of the Contributions of others (if any) used by a + Contributor and that particular Contributor’s Contribution. + +1.3. “Contribution” + + means Covered Software of a particular Contributor. + +1.4. “Covered Software” + + means Source Code Form to which the initial Contributor has attached the + notice in Exhibit A, the Executable Form of such Source Code Form, and + Modifications of such Source Code Form, in each case including portions + thereof. + +1.5. “Incompatible With Secondary Licenses” + means + + a. that the initial Contributor has attached the notice described in + Exhibit B to the Covered Software; or + + b. that the Covered Software was made available under the terms of version + 1.1 or earlier of the License, but not also under the terms of a + Secondary License. + +1.6. “Executable Form” + + means any form of the work other than Source Code Form. + +1.7. “Larger Work” + + means a work that combines Covered Software with other material, in a separate + file or files, that is not Covered Software. + +1.8. “License” + + means this document. + +1.9. “Licensable” + + means having the right to grant, to the maximum extent possible, whether at the + time of the initial grant or subsequently, any and all of the rights conveyed by + this License. + +1.10. “Modifications” + + means any of the following: + + a. any file in Source Code Form that results from an addition to, deletion + from, or modification of the contents of Covered Software; or + + b. any new file in Source Code Form that contains any Covered Software. + +1.11. “Patent Claims” of a Contributor + + means any patent claim(s), including without limitation, method, process, + and apparatus claims, in any patent Licensable by such Contributor that + would be infringed, but for the grant of the License, by the making, + using, selling, offering for sale, having made, import, or transfer of + either its Contributions or its Contributor Version. + +1.12. “Secondary License” + + means either the GNU General Public License, Version 2.0, the GNU Lesser + General Public License, Version 2.1, the GNU Affero General Public + License, Version 3.0, or any later versions of those licenses. + +1.13. “Source Code Form” + + means the form of the work preferred for making modifications. + +1.14. “You” (or “Your”) + + means an individual or a legal entity exercising rights under this + License. For legal entities, “You” includes any entity that controls, is + controlled by, or is under common control with You. For purposes of this + definition, “control” means (a) the power, direct or indirect, to cause + the direction or management of such entity, whether by contract or + otherwise, or (b) ownership of more than fifty percent (50%) of the + outstanding shares or beneficial ownership of such entity. + + +2. License Grants and Conditions + +2.1. Grants + + Each Contributor hereby grants You a world-wide, royalty-free, + non-exclusive license: + + a. under intellectual property rights (other than patent or trademark) + Licensable by such Contributor to use, reproduce, make available, + modify, display, perform, distribute, and otherwise exploit its + Contributions, either on an unmodified basis, with Modifications, or as + part of a Larger Work; and + + b. under Patent Claims of such Contributor to make, use, sell, offer for + sale, have made, import, and otherwise transfer either its Contributions + or its Contributor Version. + +2.2. Effective Date + + The licenses granted in Section 2.1 with respect to any Contribution become + effective for each Contribution on the date the Contributor first distributes + such Contribution. + +2.3. Limitations on Grant Scope + + The licenses granted in this Section 2 are the only rights granted under this + License. No additional rights or licenses will be implied from the distribution + or licensing of Covered Software under this License. Notwithstanding Section + 2.1(b) above, no patent license is granted by a Contributor: + + a. for any code that a Contributor has removed from Covered Software; or + + b. for infringements caused by: (i) Your and any other third party’s + modifications of Covered Software, or (ii) the combination of its + Contributions with other software (except as part of its Contributor + Version); or + + c. under Patent Claims infringed by Covered Software in the absence of its + Contributions. + + This License does not grant any rights in the trademarks, service marks, or + logos of any Contributor (except as may be necessary to comply with the + notice requirements in Section 3.4). + +2.4. Subsequent Licenses + + No Contributor makes additional grants as a result of Your choice to + distribute the Covered Software under a subsequent version of this License + (see Section 10.2) or under the terms of a Secondary License (if permitted + under the terms of Section 3.3). + +2.5. Representation + + Each Contributor represents that the Contributor believes its Contributions + are its original creation(s) or it has sufficient rights to grant the + rights to its Contributions conveyed by this License. + +2.6. Fair Use + + This License is not intended to limit any rights You have under applicable + copyright doctrines of fair use, fair dealing, or other equivalents. + +2.7. Conditions + + Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in + Section 2.1. + + +3. Responsibilities + +3.1. Distribution of Source Form + + All distribution of Covered Software in Source Code Form, including any + Modifications that You create or to which You contribute, must be under the + terms of this License. You must inform recipients that the Source Code Form + of the Covered Software is governed by the terms of this License, and how + they can obtain a copy of this License. You may not attempt to alter or + restrict the recipients’ rights in the Source Code Form. + +3.2. Distribution of Executable Form + + If You distribute Covered Software in Executable Form then: + + a. such Covered Software must also be made available in Source Code Form, + as described in Section 3.1, and You must inform recipients of the + Executable Form how they can obtain a copy of such Source Code Form by + reasonable means in a timely manner, at a charge no more than the cost + of distribution to the recipient; and + + b. You may distribute such Executable Form under the terms of this License, + or sublicense it under different terms, provided that the license for + the Executable Form does not attempt to limit or alter the recipients’ + rights in the Source Code Form under this License. + +3.3. Distribution of a Larger Work + + You may create and distribute a Larger Work under terms of Your choice, + provided that You also comply with the requirements of this License for the + Covered Software. If the Larger Work is a combination of Covered Software + with a work governed by one or more Secondary Licenses, and the Covered + Software is not Incompatible With Secondary Licenses, this License permits + You to additionally distribute such Covered Software under the terms of + such Secondary License(s), so that the recipient of the Larger Work may, at + their option, further distribute the Covered Software under the terms of + either this License or such Secondary License(s). + +3.4. Notices + + You may not remove or alter the substance of any license notices (including + copyright notices, patent notices, disclaimers of warranty, or limitations + of liability) contained within the Source Code Form of the Covered + Software, except that You may alter any license notices to the extent + required to remedy known factual inaccuracies. + +3.5. Application of Additional Terms + + You may choose to offer, and to charge a fee for, warranty, support, + indemnity or liability obligations to one or more recipients of Covered + Software. However, You may do so only on Your own behalf, and not on behalf + of any Contributor. You must make it absolutely clear that any such + warranty, support, indemnity, or liability obligation is offered by You + alone, and You hereby agree to indemnify every Contributor for any + liability incurred by such Contributor as a result of warranty, support, + indemnity or liability terms You offer. You may include additional + disclaimers of warranty and limitations of liability specific to any + jurisdiction. + +4. Inability to Comply Due to Statute or Regulation + + If it is impossible for You to comply with any of the terms of this License + with respect to some or all of the Covered Software due to statute, judicial + order, or regulation then You must: (a) comply with the terms of this License + to the maximum extent possible; and (b) describe the limitations and the code + they affect. Such description must be placed in a text file included with all + distributions of the Covered Software under this License. Except to the + extent prohibited by statute or regulation, such description must be + sufficiently detailed for a recipient of ordinary skill to be able to + understand it. + +5. Termination + +5.1. The rights granted under this License will terminate automatically if You + fail to comply with any of its terms. However, if You become compliant, + then the rights granted under this License from a particular Contributor + are reinstated (a) provisionally, unless and until such Contributor + explicitly and finally terminates Your grants, and (b) on an ongoing basis, + if such Contributor fails to notify You of the non-compliance by some + reasonable means prior to 60 days after You have come back into compliance. + Moreover, Your grants from a particular Contributor are reinstated on an + ongoing basis if such Contributor notifies You of the non-compliance by + some reasonable means, this is the first time You have received notice of + non-compliance with this License from such Contributor, and You become + compliant prior to 30 days after Your receipt of the notice. + +5.2. If You initiate litigation against any entity by asserting a patent + infringement claim (excluding declaratory judgment actions, counter-claims, + and cross-claims) alleging that a Contributor Version directly or + indirectly infringes any patent, then the rights granted to You by any and + all Contributors for the Covered Software under Section 2.1 of this License + shall terminate. + +5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user + license agreements (excluding distributors and resellers) which have been + validly granted by You or Your distributors under this License prior to + termination shall survive termination. + +6. Disclaimer of Warranty + + Covered Software is provided under this License on an “as is” basis, without + warranty of any kind, either expressed, implied, or statutory, including, + without limitation, warranties that the Covered Software is free of defects, + merchantable, fit for a particular purpose or non-infringing. The entire + risk as to the quality and performance of the Covered Software is with You. + Should any Covered Software prove defective in any respect, You (not any + Contributor) assume the cost of any necessary servicing, repair, or + correction. This disclaimer of warranty constitutes an essential part of this + License. No use of any Covered Software is authorized under this License + except under this disclaimer. + +7. Limitation of Liability + + Under no circumstances and under no legal theory, whether tort (including + negligence), contract, or otherwise, shall any Contributor, or anyone who + distributes Covered Software as permitted above, be liable to You for any + direct, indirect, special, incidental, or consequential damages of any + character including, without limitation, damages for lost profits, loss of + goodwill, work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses, even if such party shall have been + informed of the possibility of such damages. This limitation of liability + shall not apply to liability for death or personal injury resulting from such + party’s negligence to the extent applicable law prohibits such limitation. + Some jurisdictions do not allow the exclusion or limitation of incidental or + consequential damages, so this exclusion and limitation may not apply to You. + +8. Litigation + + Any litigation relating to this License may be brought only in the courts of + a jurisdiction where the defendant maintains its principal place of business + and such litigation shall be governed by laws of that jurisdiction, without + reference to its conflict-of-law provisions. Nothing in this Section shall + prevent a party’s ability to bring cross-claims or counter-claims. + +9. Miscellaneous + + This License represents the complete agreement concerning the subject matter + hereof. If any provision of this License is held to be unenforceable, such + provision shall be reformed only to the extent necessary to make it + enforceable. Any law or regulation which provides that the language of a + contract shall be construed against the drafter shall not be used to construe + this License against a Contributor. + + +10. Versions of the License + +10.1. New Versions + + Mozilla Foundation is the license steward. Except as provided in Section + 10.3, no one other than the license steward has the right to modify or + publish new versions of this License. Each version will be given a + distinguishing version number. + +10.2. Effect of New Versions + + You may distribute the Covered Software under the terms of the version of + the License under which You originally received the Covered Software, or + under the terms of any subsequent version published by the license + steward. + +10.3. Modified Versions + + If you create software not governed by this License, and you want to + create a new license for such software, you may create and use a modified + version of this License if you rename the license and remove any + references to the name of the license steward (except to note that such + modified license differs from this License). + +10.4. Distributing Source Code Form that is Incompatible With Secondary Licenses + If You choose to distribute Source Code Form that is Incompatible With + Secondary Licenses under the terms of this version of the License, the + notice described in Exhibit B of this License must be attached. + +Exhibit A - Source Code Form License Notice + + This Source Code Form is subject to the + terms of the Mozilla Public License, v. + 2.0. If a copy of the MPL was not + distributed with this file, You can + obtain one at + http://mozilla.org/MPL/2.0/. + +If it is not possible or desirable to put the notice in a particular file, then +You may include the notice in a location (such as a LICENSE file in a relevant +directory) where a recipient would be likely to look for such a notice. + +You may add additional accurate notices of copyright ownership. + +Exhibit B - “Incompatible With Secondary Licenses” Notice + + This Source Code Form is “Incompatible + With Secondary Licenses”, as defined by + the Mozilla Public License, v. 2.0. + diff --git a/README.md b/README.md new file mode 100644 index 0000000000..7255715711 --- /dev/null +++ b/README.md @@ -0,0 +1,62 @@ +Terraform Provider +================== + +- Website: https://www.terraform.io +- [![Gitter chat](https://badges.gitter.im/hashicorp-terraform/Lobby.png)](https://gitter.im/hashicorp-terraform/Lobby) +- Mailing list: [Google Groups](http://groups.google.com/group/terraform-tool) + +Requirements +------------ + +- [Terraform](https://www.terraform.io/downloads.html) 0.9.3+ +- [Go](https://golang.org/doc/install) 1.8 (to build the provider plugin) + +Building The Provider +--------------------- + +Clone repository to: `$GOPATH/src/github.com/terraform-providers/terraform-provider-ibm` + +```sh +$ mkdir -p $GOPATH/src/github.com/terraform-providers; cd $GOPATH/src/github.com/terraform-providers +$ git clone git@github.com:IBM-Bluemix/terraform-provider-ibm.git +``` + +Enter the provider directory and build the provider + +```sh +$ cd $GOPATH/src/github.com/terraform-providers/terraform-provider-ibm +$ make build +``` + +Using the provider +---------------------- + +See the [IBM Provider documentation](https://ibm-bluemix.github.io/tf-ibm-docs) to get started using the IBM provider. + +Developing the Provider +--------------------------- + +If you wish to work on the provider, you'll first need [Go](http://www.golang.org) installed on your machine (version 1.8+ is *required*). You'll also need to correctly setup a [GOPATH](http://golang.org/doc/code.html#GOPATH), as well as adding `$GOPATH/bin` to your `$PATH`. + +To compile the provider, run `make build`. This will build the provider and put the provider binary in the `$GOPATH/bin` directory. + +```sh +$ make build +... +$ $GOPATH/bin/terraform-provider-ibm +... +``` + +In order to test the provider, you can simply run `make test`. + +```sh +$ make test +``` + +In order to run the full suite of Acceptance tests, run `make testacc`. + +*Note:* Acceptance tests create real resources, and often cost money to run. + +```sh +$ make testacc +``` diff --git a/examples/ibm-app/README.md b/examples/ibm-app/README.md new file mode 100644 index 0000000000..ec2884a814 --- /dev/null +++ b/examples/ibm-app/README.md @@ -0,0 +1,33 @@ +# IBM Application example + +This example shows how to deploy an application in the IBM PaaS + +In the variables.tf you would find `git_repo` which is git url of a Cloud Foundry application repository. +You must provide valid values for the variables `org` and `space`. + +When you perform `terraform apply` the provisioner will download the code from the `git_repo` and zip it at +location specified by variable `app_zip`. + +The example provisions a cloudant db service instance, create routes and assigns that route and service instance to the application. + +To run, configure your IBM Cloud provider + +Running the example + +For planning phase + +``` +terraform plan +``` + +For apply phase + +``` +terraform apply +``` + +To remove the stack wait for few minutes and test the stack by launching a browser with cluster url. + +``` + terraform destroy +``` \ No newline at end of file diff --git a/examples/ibm-app/main.tf b/examples/ibm-app/main.tf new file mode 100644 index 0000000000..b8d2ca91a5 --- /dev/null +++ b/examples/ibm-app/main.tf @@ -0,0 +1,69 @@ +resource "null_resource" "prepare_app_zip" { + triggers = { + app_version = "${var.app_version}" + git_repo = "${var.git_repo}" + } + provisioner "local-exec" { + command = < /tmp/nginx.log +service nginx start \ No newline at end of file diff --git a/examples/ibm-compute-asg/outputs.tf b/examples/ibm-compute-asg/outputs.tf new file mode 100644 index 0000000000..7a07d0e0f5 --- /dev/null +++ b/examples/ibm-compute-asg/outputs.tf @@ -0,0 +1,4 @@ +#ip_address - cluster address +output "cluster_address" { + value = "http://${ibm_lb.local_lb.ip_address}" +} diff --git a/examples/ibm-compute-asg/variables.tf b/examples/ibm-compute-asg/variables.tf new file mode 100644 index 0000000000..a8654a210a --- /dev/null +++ b/examples/ibm-compute-asg/variables.tf @@ -0,0 +1,107 @@ +variable "ssh_key_path" { + default = "~/.ssh/id_rsa.pub" +} + +variable "ssh-label" { + default = "ssh_key_scale_group" +} + +variable "lb-connections" { + default = 250 +} + +variable "datacenter" { + default = "dal09" +} + +variable "lb-dedicated" { + default = false +} + +variable "lb-servvice-group-port" { + default = 80 +} + +variable "lb-servvice-group-routing-method" { + default = "CONSISTENT_HASH_IP" +} + +variable "lb-servvice-group-routing-type" { + default = "HTTP" +} + +variable "lb-servvice-group-routing-allocation" { + default = 100 +} + +variable "auto-scale-name" { + default = "sample-http-cluster" +} + +variable "auto-scale-region" { + default = "na-usa-central-1" +} + +variable "auto-scale-cooldown" { + default = 30 +} + +variable "auto-scale-minimum-member-count" { + default = 1 +} + +variable "auto-scale-maximumm-member-count" { + default = 10 +} + +variable "auto-scale-termination-policy" { + default = "CLOSEST_TO_NEXT_CHARGE" +} + +variable "auto-scale-lb-service-port" { + default = 80 +} + +variable "auto-scale-lb-service-health-check-type" { + default = "HTTP" +} + +variable "vm-hostname" { + default = "virtual-guest" +} + +variable "vm-domain" { + default = "example.com" +} + +variable "vm-cores" { + default = 1 +} + +variable "vm-memory" { + default = 4096 +} + +variable "vm-os-reference-code" { + default = "UBUNTU_14_64" +} + +variable "vm-post-install-script-uri" { + default = "https://raw.githubusercontent.com/hkantare/test/master/nginx.sh" +} + +variable "scale-policy-name" { + default = "scale-policy" +} + +variable "scale-policy-type" { + default = "ABSOLUTE" +} + +variable "scale-policy-scale-amount" { + default = 2 +} + +variable "scale-policy-cooldown" { + default = 35 +} diff --git a/examples/ibm-compute-public-ip/README.md b/examples/ibm-compute-public-ip/README.md new file mode 100644 index 0000000000..8f1bb4fc0f --- /dev/null +++ b/examples/ibm-compute-public-ip/README.md @@ -0,0 +1,13 @@ +#Global IP + +The Global ip example launches a web server, install nginx. It also configures the firewall set of rules to allow access to certain ip addresses/ports from specific internet addresses while denying traffic from other sources. +Global IP's provide IP flexibility by allowing users to shift workloads between servers (even in different datacenters) + +To run, configure your IBMCLOUD provider + +Running the example +``` +terraform apply +``` + +After the apply is done then type the global IP from outputs in your browser and see the nginx welcome page \ No newline at end of file diff --git a/examples/ibm-compute-public-ip/gip.sh b/examples/ibm-compute-public-ip/gip.sh new file mode 100644 index 0000000000..8a058c7861 --- /dev/null +++ b/examples/ibm-compute-public-ip/gip.sh @@ -0,0 +1,4 @@ +#!/bin/bash -v +apt-get update -y +apt-get install -y nginx > /tmp/nginx.log + diff --git a/examples/ibm-compute-public-ip/main.tf b/examples/ibm-compute-public-ip/main.tf new file mode 100644 index 0000000000..1044737202 --- /dev/null +++ b/examples/ibm-compute-public-ip/main.tf @@ -0,0 +1,62 @@ +provider "ibm" { +} + +resource "ibm_compute_ssh_key" "ssh_key_gip" { + label = "${var.ssh_label}" + public_key = "${file(var.ssh_key_path)}" +} + +resource "ibm_compute_vm_instance" "vm1" { + hostname = "terraform-ibm" + domain = "example.com" + os_reference_code = "DEBIAN_7_64" + datacenter = "${var.datacenter}" + network_speed = 100 + hourly_billing = true + private_network_only = false + cores = 1 + memory = 1024 + disks = [25] + local_disk = false + ssh_key_ids = [ + "${ibm_compute_ssh_key.ssh_key_gip.id}" + ] + provisioner "remote-exec" { + script = "gip.sh" + } +} + +resource "ibm_network_public_ip" "test-global-ip" { + routes_to = "${ibm_compute_vm_instance.vm1.ipv4_address}" +} + +resource "ibm_firewall" "accfw" { + ha_enabled = false + public_vlan_id = "${ibm_compute_vm_instance.vm1.public_vlan_id}" +} + +resource "ibm_firewall_policy" "rules" { + firewall_id = "${ibm_firewall.accfw.id}" + rules = { + "action" = "deny" + "src_ip_address"= "0.0.0.0" + "src_ip_cidr"= 0 + "dst_ip_address"= "any" + "dst_ip_cidr"= 32 + "dst_port_range_start"= 1 + "dst_port_range_end"= 65535 + "notes"= "Deny all" + "protocol"= "tcp" + } + rules = { + "action" = "permit" + "src_ip_address"= "0.0.0.0" + "src_ip_cidr"= 0 + "dst_ip_address"= "any" + "dst_ip_cidr"= 32 + "dst_port_range_start"= 22 + "dst_port_range_end"= 22 + "notes"= "Allow SSH" + "protocol"= "tcp" + } +} diff --git a/examples/ibm-compute-public-ip/output.tf b/examples/ibm-compute-public-ip/output.tf new file mode 100644 index 0000000000..202ee6ace6 --- /dev/null +++ b/examples/ibm-compute-public-ip/output.tf @@ -0,0 +1,3 @@ +output "global ip"{ + value = "http://${ibm_network_public_ip.test-global-ip.ip_address}" +} diff --git a/examples/ibm-compute-public-ip/variables.tf b/examples/ibm-compute-public-ip/variables.tf new file mode 100644 index 0000000000..6c800a6cbb --- /dev/null +++ b/examples/ibm-compute-public-ip/variables.tf @@ -0,0 +1,12 @@ +variable "datacenter" { + description = "The datacenter" + default = "dal01" +} + +variable "ssh_label" { + default = "Personal" +} + +variable "ssh_key_path" { + default = "~/.ssh/id2_rsa.pub" +} diff --git a/examples/ibm-lb-vpx/README.md b/examples/ibm-lb-vpx/README.md new file mode 100644 index 0000000000..3fe0d45799 --- /dev/null +++ b/examples/ibm-lb-vpx/README.md @@ -0,0 +1,29 @@ +# Cluster with citrix load balancer example + +This example shows how to launch a cluster using virtual servers and citrix load balancer + +This sample configuration will stand up a cluster of nodes, running on virtual guests, behind a global load balancer. The URL of the cluster is provided as an output. + +Please Note: If Netscaler VPX 10.5 is used, terraform uses Netscaler's REST API([NITRO API](https://docs.citrix.com/en-us/netscaler/11/nitro-api.html)) for ibmcloud_infra_lb_vpx_vip resource management. The NITRO API is only accessable in SoftLayer private network, so it is necessary to execute terraform in SoftLayer's private network when you deploy Netscaler VPX 10.5 devices. SoftLayer [SSL VPN](http://www.softlayer.com/VPN-Access) also can be used for private network connection. + +To run, configure your IBM Cloud provider + +Running the example + +For planning phase + +``` +terraform plan +``` + +For apply phase + +``` +terraform apply +``` + +To remove the stack wait for few minutes and test the stack by launching a browser with cluster URL. + +``` + terraform destroy +``` \ No newline at end of file diff --git a/examples/ibm-lb-vpx/main.tf b/examples/ibm-lb-vpx/main.tf new file mode 100644 index 0000000000..ee2ca11fdd --- /dev/null +++ b/examples/ibm-lb-vpx/main.tf @@ -0,0 +1,51 @@ +provider "ibm" {} + +# Create a new ssh key +resource "ibm_compute_ssh_key" "ssh_key_performance" { + label = "${var.ssh_key_label}" + notes = "for scale group" + public_key = "${file(var.ssh_key_path)}" +} + +resource "ibm_compute_vm_instance" "virtualguest" { + count = "${var.vm_count}" + hostname = "ng-vm${count.index+1}" + domain = "terraform.ibm.com" + os_reference_code = "DEBIAN_7_64" + datacenter = "${var.datacenter}" + network_speed = 10 + hourly_billing = true + private_network_only = false + cores = 1 + memory = 1024 + disks = [25] + local_disk = false + post_install_script_uri = "https://raw.githubusercontent.com/hkantare/test/master/nginx.sh" +} + +resource "ibm_lb_vpx" "citrix_vpx" { + datacenter = "${var.datacenter}" + speed = 10 + version = "10.1" + plan = "Standard" + ip_count = 2 +} + +resource "ibm_lb_vpx_vip" "citrix_vpx_vip" { + name = "test_load_balancer_vip" + nad_controller_id = "${ibm_lb_vpx.citrix_vpx.id}" + load_balancing_method = "lc" + source_port = "${var.port}" + type = "HTTP" + virtual_ip_address = "${ibm_lb_vpx.citrix_vpx.vip_pool[0]}" +} + +resource "ibm_lb_vpx_service" "citrix_vpx_service" { + name = "ng-service${count.index+1}" + vip_id = "${ibm_lb_vpx_vip.citrix_vpx_vip.id}" + destination_ip_address = "${element(ibm_compute_vm_instance.virtualguest.*.ipv4_address, count.index)}" + destination_port = "${var.port}" + weight = 55 + connection_limit = 5000 + health_check = "HTTP" +} diff --git a/examples/ibm-lb-vpx/outputs.tf b/examples/ibm-lb-vpx/outputs.tf new file mode 100644 index 0000000000..7a317af599 --- /dev/null +++ b/examples/ibm-lb-vpx/outputs.tf @@ -0,0 +1,4 @@ +#ip_address - cluster address +output "cluster_address" { + value = "http://${ibm_lb_vpx_vip.citrix_vpx_vip.virtual_ip_address}" +} diff --git a/examples/ibm-lb-vpx/variables.tf b/examples/ibm-lb-vpx/variables.tf new file mode 100644 index 0000000000..16c0a5a5c7 --- /dev/null +++ b/examples/ibm-lb-vpx/variables.tf @@ -0,0 +1,19 @@ +variable "ssh_key_path" { + default = "~/.ssh/id_rsa.pub" +} + +variable "ssh_key_label" { + default = "ssh_key_cluster" +} + +variable vm_count { + default = 2 +} + +variable port { + default = 80 +} + +variable datacenter { + default = "dal09" +} diff --git a/examples/ibm-network-vlan/README.md b/examples/ibm-network-vlan/README.md new file mode 100644 index 0000000000..65e8857d5d --- /dev/null +++ b/examples/ibm-network-vlan/README.md @@ -0,0 +1,19 @@ +### IBM Cloud vlan type Resource Example + +The example launches vlan type Resource. +To run, configure your IBM Cloud provider +### Get up and running + +* Planning phase + + terraform plan + + +* Apply phase + + terraform apply + + +* Destroy + + terraform destroy \ No newline at end of file diff --git a/examples/ibm-network-vlan/main.tf b/examples/ibm-network-vlan/main.tf new file mode 100644 index 0000000000..2e56832d2c --- /dev/null +++ b/examples/ibm-network-vlan/main.tf @@ -0,0 +1,46 @@ +provider "ibm" { +} + +# Create a public vlan +resource "ibm_network_vlan" "test_vlan_public" { + name = "${var.vlan_name_public}" + datacenter = "${var.datacenter}" + type = "PUBLIC" + subnet_size = 8 +} + +# Create a private vlan +resource "ibm_network_vlan" "test_vlan_private" { + name = "${var.vlan_name_private}" + datacenter = "${var.datacenter}" + type = "PRIVATE" + subnet_size = 8 +} + +# Create a new ssh key +resource "ibm_compute_ssh_key" "ssh_key" { + label = "${var.ssh_label}" + notes = "for public vlan test" + public_key = "${file(var.ssh_public_key)}" +} + +# Create a new virtual guest using image "CENTOS_7_64" +resource "ibm_compute_vm_instance" "vm1" { + hostname = "${var.vm_hostname}" + os_reference_code = "${var.vm_os_reference_code}" + domain = "${var.vm_domain}" + datacenter = "${var.datacenter}" + network_speed = "${var.vm_network_speed}" + hourly_billing = true + private_network_only = false + cores = "${var.vm_cores}" + memory = "${var.vm_memory}" + disks = "${var.vm_disks}" + user_metadata = "{\"value\":\"newvalue\"}" + dedicated_acct_host_only = true + local_disk = false + ssh_key_ids = ["${ibm_compute_ssh_key.ssh_key.id}"] + public_vlan_id = "${ibm_network_vlan.test_vlan_public.id}" + private_vlan_id = "${ibm_network_vlan.test_vlan_private.id}" + +} diff --git a/examples/ibm-network-vlan/variables.tf b/examples/ibm-network-vlan/variables.tf new file mode 100644 index 0000000000..49b99f8a93 --- /dev/null +++ b/examples/ibm-network-vlan/variables.tf @@ -0,0 +1,40 @@ +variable "ssh_label" { + default = "ssh_key_vlan" +} + +variable "ssh_public_key" { + default = "~/.ssh/id_rsa.pub" +} + +variable "vm_hostname" { + default = "vlan-provider" +} +variable "vm_os_reference_code" { + default = "CENTOS_7_64" +} +variable "datacenter" { + default = "dal06" +} +variable "vm_domain" { + default = "terraformuat.ibm.com" +} +variable "vm_network_speed" { + default = 10 +} +variable "vm_cores" { + default = 1 +} +variable "vm_memory" { + default = 1024 +} +variable "vm_disks" { + default = [25, 10, 20] +} + +variable "vlan_name_public" { + default = "test_vlan_public" +} + +variable "vlan_name_private" { + default = "test_vlan_private" +} diff --git a/examples/ibm-service-instance/README.md b/examples/ibm-service-instance/README.md new file mode 100644 index 0000000000..73bacb27bb --- /dev/null +++ b/examples/ibm-service-instance/README.md @@ -0,0 +1,27 @@ +# Cloud Foundry Service example + +This example shows how to create a cf service instance and how to bind a service key + +This creates a service instance of specified service offering type and plan. It also creates a key for service instance + +To run, configure your IBM Cloud provider + +Running the example + +For planning phase + +``` +terraform plan +``` + +For apply phase + +``` +terraform apply +``` + +To remove the stack + +``` + terraform destroy +``` diff --git a/examples/ibm-service-instance/main.tf b/examples/ibm-service-instance/main.tf new file mode 100644 index 0000000000..d6b06440d8 --- /dev/null +++ b/examples/ibm-service-instance/main.tf @@ -0,0 +1,19 @@ +provider "ibm" {} + +data "ibm_space" "spacedata" { + space = "${var.space}" + org = "${var.org}" +} + +resource "ibm_service_instance" "service-instance" { + name = "${var.service_instance_name}" + space_guid = "${data.ibm_space.spacedata.id}" + service = "${var.service}" + plan = "${var.plan}" + tags = ["cluster-service", "cluster-bind"] +} + +resource "ibm_service_key" "serviceKey" { + name = "${var.service_key_name}" + service_instance_guid = "${ibm_service_instance.service-instance.id}" +} diff --git a/examples/ibm-service-instance/outputs.tf b/examples/ibm-service-instance/outputs.tf new file mode 100644 index 0000000000..0942e601e7 --- /dev/null +++ b/examples/ibm-service-instance/outputs.tf @@ -0,0 +1,4 @@ +#service instance guid +output "guid" { + value = "${ibm_service_instance.service-instance.id}" +} diff --git a/examples/ibm-service-instance/variables.tf b/examples/ibm-service-instance/variables.tf new file mode 100644 index 0000000000..a0d6bca5c5 --- /dev/null +++ b/examples/ibm-service-instance/variables.tf @@ -0,0 +1,23 @@ +variable "space" { + default = "dev" +} + +variable "org" { + default = "myorg" +} + +variable "service" { + default = "cloudantNoSQLDB" +} + +variable "plan" { + default = "Lite" +} + +variable "service_instance_name" { + default = "mycloudantdb" +} + +variable "service_key_name" { + default = "mycloudantdbkey" +} diff --git a/ibm/config.go b/ibm/config.go new file mode 100644 index 0000000000..b3c8617fc5 --- /dev/null +++ b/ibm/config.go @@ -0,0 +1,187 @@ +package ibm + +import ( + "errors" + "fmt" + "log" + "os" + "time" + + slsession "github.com/softlayer/softlayer-go/session" + + bluemix "github.com/IBM-Bluemix/bluemix-go" + "github.com/IBM-Bluemix/bluemix-go/api/account/accountv2" + "github.com/IBM-Bluemix/bluemix-go/api/container/containerv1" + "github.com/IBM-Bluemix/bluemix-go/api/mccp/mccpv2" + bxsession "github.com/IBM-Bluemix/bluemix-go/session" +) + +//SoftlayerRestEndpoint rest endpoint of SoftLayer +const SoftlayerRestEndpoint = "https://api.softlayer.com/rest/v3" + +//BluemixRegion ... +var BluemixRegion string + +var ( + errEmptySoftLayerCredentials = errors.New("softlayer_username and softlayer_api_key must be provided. Please see the documentation on how to configure them") + errEmptyBluemixCredentials = errors.New("bluemix_api_key must be provided. Please see the documentation on how to configure it") +) + +//Config stores user provider input +type Config struct { + //BluemixAPIKey is the Bluemix api key + BluemixAPIKey string + //Bluemix region + Region string + //Bluemix API timeout + BluemixTimeout time.Duration + + //Softlayer end point url + SoftLayerEndpointURL string + + //Softlayer API timeout + SoftLayerTimeout time.Duration + + // Softlayer User Name + SoftLayerUserName string + + // Softlayer API Key + SoftLayerAPIKey string + + //Retry Count for API calls + //Unexposed in the schema at this point as they are used only during session creation for a few calls + //When sdk implements it we an expose them for expected behaviour + //https://github.com/softlayer/softlayer-go/issues/41 + RetryCount int + //Constant Retry Delay for API calls + RetryDelay time.Duration +} + +//Session stores the information required for communication with the SoftLayer and Bluemix API +type Session struct { + // SoftLayerSesssion is the the SoftLayer session used to connect to the SoftLayer API + SoftLayerSession *slsession.Session + + // BluemixSession is the the Bluemix session used to connect to the Bluemix API + BluemixSession *bxsession.Session +} + +// ClientSession ... +type ClientSession interface { + SoftLayerSession() *slsession.Session + BluemixSession() (*bxsession.Session, error) + ContainerAPI() (containerv1.ContainerServiceAPI, error) + MccpAPI() (mccpv2.MccpServiceAPI, error) + BluemixAcccountAPI() (accountv2.AccountServiceAPI, error) +} + +type clientSession struct { + session *Session + + csConfigErr error + csServiceAPI containerv1.ContainerServiceAPI + + cfConfigErr error + cfServiceAPI mccpv2.MccpServiceAPI + + accountConfigErr error + bmxAccountServiceAPI accountv2.AccountServiceAPI +} + +// SoftLayerSession providers SoftLayer Session +func (sess clientSession) SoftLayerSession() *slsession.Session { + return sess.session.SoftLayerSession +} + +// MccpAPI provides Multi Cloud Controller Proxy APIs ... +func (sess clientSession) MccpAPI() (mccpv2.MccpServiceAPI, error) { + return sess.cfServiceAPI, sess.cfConfigErr +} + +// BluemixAcccountAPI ... +func (sess clientSession) BluemixAcccountAPI() (accountv2.AccountServiceAPI, error) { + return sess.bmxAccountServiceAPI, sess.accountConfigErr +} + +// ContainerAPI provides Container Service APIs ... +func (sess clientSession) ContainerAPI() (containerv1.ContainerServiceAPI, error) { + return sess.csServiceAPI, sess.csConfigErr +} + +// BluemixSession to provide the Bluemix Session +func (sess clientSession) BluemixSession() (*bxsession.Session, error) { + return sess.session.BluemixSession, sess.cfConfigErr +} + +// ClientSession configures and returns a fully initialized ClientSession +func (c *Config) ClientSession() (interface{}, error) { + sess, err := newSession(c) + if err != nil { + return nil, err + } + session := clientSession{ + session: sess, + } + if sess.BluemixSession == nil { + //Can be nil only if bluemix_api_key is not provided + log.Println("Skipping Bluemix Clients configuration") + session.csConfigErr = errEmptyBluemixCredentials + session.cfConfigErr = errEmptyBluemixCredentials + session.accountConfigErr = errEmptyBluemixCredentials + return session, nil + } + + BluemixRegion = sess.BluemixSession.Config.Region + cfAPI, err := mccpv2.New(sess.BluemixSession) + if err != nil { + session.cfConfigErr = fmt.Errorf("Error occured while configuring MCCP service: %q", err) + } + session.cfServiceAPI = cfAPI + + accAPI, err := accountv2.New(sess.BluemixSession) + if err != nil { + session.accountConfigErr = fmt.Errorf("Error occured while configuring Account Service: %q", err) + } + session.bmxAccountServiceAPI = accAPI + + clusterAPI, err := containerv1.New(sess.BluemixSession) + if err != nil { + session.csConfigErr = fmt.Errorf("Error occured while configuring Container Service for K8s cluster: %q", err) + } + session.csServiceAPI = clusterAPI + return session, nil +} + +func newSession(c *Config) (*Session, error) { + ibmSession := &Session{} + + log.Println("Configuring SoftLayer Session ") + softlayerSession := &slsession.Session{ + Endpoint: c.SoftLayerEndpointURL, + Timeout: c.SoftLayerTimeout, + UserName: c.SoftLayerUserName, + APIKey: c.SoftLayerAPIKey, + Debug: os.Getenv("TF_LOG") != "", + } + ibmSession.SoftLayerSession = softlayerSession + + if c.BluemixAPIKey != "" { + log.Println("Configuring Bluemix Session") + var sess *bxsession.Session + bmxConfig := &bluemix.Config{ + BluemixAPIKey: c.BluemixAPIKey, + Debug: os.Getenv("TF_LOG") != "", + HTTPTimeout: c.BluemixTimeout, + Region: c.Region, + RetryDelay: &c.RetryDelay, + MaxRetries: &c.RetryCount, + } + sess, err := bxsession.New(bmxConfig) + if err != nil { + return nil, err + } + ibmSession.BluemixSession = sess + } + + return ibmSession, nil +} diff --git a/ibm/data_source_ibm_account.go b/ibm/data_source_ibm_account.go new file mode 100644 index 0000000000..2a05186fa5 --- /dev/null +++ b/ibm/data_source_ibm_account.go @@ -0,0 +1,39 @@ +package ibm + +import ( + "fmt" + + "github.com/hashicorp/terraform/helper/schema" +) + +func dataSourceIBMAccount() *schema.Resource { + return &schema.Resource{ + Read: dataSourceIBMAccountRead, + + Schema: map[string]*schema.Schema{ + "org_guid": { + Description: "The guid of the org", + Type: schema.TypeString, + Required: true, + }, + }, + } +} + +func dataSourceIBMAccountRead(d *schema.ResourceData, meta interface{}) error { + bmxSess, err := meta.(ClientSession).BluemixSession() + if err != nil { + return err + } + accClient, err := meta.(ClientSession).BluemixAcccountAPI() + if err != nil { + return err + } + orgGUID := d.Get("org_guid").(string) + account, err := accClient.Accounts().FindByOrg(orgGUID, bmxSess.Config.Region) + if err != nil { + return fmt.Errorf("Error retrieving organisation: %s", err) + } + d.SetId(account.GUID) + return nil +} diff --git a/ibm/data_source_ibm_account_test.go b/ibm/data_source_ibm_account_test.go new file mode 100644 index 0000000000..91eea7224a --- /dev/null +++ b/ibm/data_source_ibm_account_test.go @@ -0,0 +1,39 @@ +package ibm + +import ( + "fmt" + "testing" + + "github.com/hashicorp/terraform/helper/resource" +) + +func TestAccIBMAccountDataSource_basic(t *testing.T) { + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccCheckIBMAccountDataSourceConfig(), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttr("data.ibm_org.testacc_ds_org", "org", cfOrganization), + resource.TestCheckResourceAttrSet( + "data.ibm_account.testacc_acc", "id"), + ), + }, + }, + }) +} + +func testAccCheckIBMAccountDataSourceConfig() string { + return fmt.Sprintf(` + +data "ibm_org" "testacc_ds_org" { + org = "%s" +} + +data "ibm_account" "testacc_acc" { + org_guid = "${data.ibm_org.testacc_ds_org.id}" +}`, cfOrganization) + +} diff --git a/ibm/data_source_ibm_app.go b/ibm/data_source_ibm_app.go new file mode 100644 index 0000000000..d79a9624b7 --- /dev/null +++ b/ibm/data_source_ibm_app.go @@ -0,0 +1,114 @@ +package ibm + +import ( + "github.com/hashicorp/terraform/helper/schema" +) + +func dataSourceIBMApp() *schema.Resource { + return &schema.Resource{ + Read: dataSourceIBMAppRead, + + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Required: true, + Description: "The name for the app", + }, + "space_guid": { + Description: "Define space guid to which app belongs", + Type: schema.TypeString, + Required: true, + }, + "memory": { + Description: "The amount of memory each instance should have. In megabytes.", + Type: schema.TypeInt, + Computed: true, + }, + "instances": { + Description: "The number of instances", + Type: schema.TypeInt, + Computed: true, + }, + "disk_quota": { + Description: "The maximum amount of disk available to an instance of an app. In megabytes.", + Type: schema.TypeInt, + Computed: true, + }, + "buildpack": { + Description: "Buildpack to build the app. 3 options: a) Blank means autodetection; b) A Git Url pointing to a buildpack; c) Name of an installed buildpack.", + Type: schema.TypeString, + Computed: true, + }, + "environment_json": { + Description: "Key/value pairs of all the environment variables to run in your app. Does not include any system or service variables.", + Type: schema.TypeMap, + Computed: true, + }, + "route_guid": { + Description: "Define the route guids which should be bound to the application.", + Type: schema.TypeSet, + Elem: &schema.Schema{Type: schema.TypeString}, + Set: schema.HashString, + Computed: true, + }, + "service_instance_guid": { + Description: "Define the service instance guids that should be bound to this application.", + Type: schema.TypeSet, + Computed: true, + Elem: &schema.Schema{Type: schema.TypeString}, + Set: schema.HashString, + }, + "package_state": { + Description: "The state of the application package whether staged, pending etc", + Type: schema.TypeString, + Computed: true, + }, + "state": { + Description: "The state of the application", + Type: schema.TypeString, + Computed: true, + }, + }, + } +} + +func dataSourceIBMAppRead(d *schema.ResourceData, meta interface{}) error { + cfClient, err := meta.(ClientSession).MccpAPI() + if err != nil { + return err + } + appAPI := cfClient.Apps() + name := d.Get("name").(string) + spaceGUID := d.Get("space_guid").(string) + + app, err := appAPI.FindByName(spaceGUID, name) + if err != nil { + return err + } + d.SetId(app.GUID) + d.Set("memory", app.Memory) + d.Set("disk_quota", app.DiskQuota) + if app.BuildPack != nil { + d.Set("buildpack", app.BuildPack) + } + d.Set("environment_json", app.EnvironmentJSON) + d.Set("package_state", app.PackageState) + d.Set("state", app.State) + d.Set("instances", app.Instances) + + route, err := appAPI.ListRoutes(app.GUID) + if err != nil { + return err + } + if len(route) > 0 { + d.Set("route_guid", flattenRoute(route)) + } + svcBindings, err := appAPI.ListServiceBindings(app.GUID) + if err != nil { + return err + } + if len(svcBindings) > 0 { + d.Set("service_instance_guid", flattenServiceBindings(svcBindings)) + } + return nil +} diff --git a/ibm/data_source_ibm_app_domain_private.go b/ibm/data_source_ibm_app_domain_private.go new file mode 100644 index 0000000000..7683826b77 --- /dev/null +++ b/ibm/data_source_ibm_app_domain_private.go @@ -0,0 +1,36 @@ +package ibm + +import ( + "fmt" + + "github.com/hashicorp/terraform/helper/schema" +) + +func dataSourceIBMAppDomainPrivate() *schema.Resource { + return &schema.Resource{ + Read: dataSourceIBMAppDomainPrivateRead, + + Schema: map[string]*schema.Schema{ + "name": { + Description: "The name of the private domain", + Type: schema.TypeString, + Required: true, + }, + }, + } +} + +func dataSourceIBMAppDomainPrivateRead(d *schema.ResourceData, meta interface{}) error { + cfAPI, err := meta.(ClientSession).MccpAPI() + if err != nil { + return err + } + domainName := d.Get("name").(string) + prdomain, err := cfAPI.PrivateDomains().FindByName(domainName) + if err != nil { + return fmt.Errorf("Error retrieving domain: %s", err) + } + d.SetId(prdomain.GUID) + return nil + +} diff --git a/ibm/data_source_ibm_app_domain_private_test.go b/ibm/data_source_ibm_app_domain_private_test.go new file mode 100644 index 0000000000..1e2861ee3a --- /dev/null +++ b/ibm/data_source_ibm_app_domain_private_test.go @@ -0,0 +1,45 @@ +package ibm + +import ( + "fmt" + "testing" + + "github.com/hashicorp/terraform/helper/acctest" + "github.com/hashicorp/terraform/helper/resource" +) + +func TestAccIBMAppDomainPrivateDataSource_basic(t *testing.T) { + name := fmt.Sprintf("terraform%d.com", acctest.RandInt()) + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccCheckIBMAppDomainPrivateDataSourceConfig(name), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttrSet( + "data.ibm_app_domain_private.testacc_domain", "id"), + ), + }, + }, + }) +} + +func testAccCheckIBMAppDomainPrivateDataSourceConfig(name string) string { + return fmt.Sprintf(` + + data "ibm_org" "orgdata" { + org = "%s" + } + + resource "ibm_app_domain_private" "domain" { + name = "%s" + org_guid = "${data.ibm_org.orgdata.id}" + } + + data "ibm_app_domain_private" "testacc_domain" { + name = "${ibm_app_domain_private.domain.name}" + }`, cfOrganization, name) + +} diff --git a/ibm/data_source_ibm_app_domain_shared.go b/ibm/data_source_ibm_app_domain_shared.go new file mode 100644 index 0000000000..73e886cc7a --- /dev/null +++ b/ibm/data_source_ibm_app_domain_shared.go @@ -0,0 +1,37 @@ +package ibm + +import ( + "fmt" + + "github.com/hashicorp/terraform/helper/schema" +) + +func dataSourceIBMAppDomainShared() *schema.Resource { + return &schema.Resource{ + Read: dataSourceIBMAppDomainSharedRead, + + Schema: map[string]*schema.Schema{ + "name": { + Description: "The name of the shared domain", + Type: schema.TypeString, + Required: true, + ValidateFunc: validateDomainName, + }, + }, + } +} + +func dataSourceIBMAppDomainSharedRead(d *schema.ResourceData, meta interface{}) error { + cfClient, err := meta.(ClientSession).MccpAPI() + if err != nil { + return err + } + domainName := d.Get("name").(string) + shdomain, err := cfClient.SharedDomains().FindByName(domainName) + if err != nil { + return fmt.Errorf("Error retrieving shared domain: %s", err) + } + d.SetId(shdomain.GUID) + return nil + +} diff --git a/ibm/data_source_ibm_app_domain_shared_test.go b/ibm/data_source_ibm_app_domain_shared_test.go new file mode 100644 index 0000000000..3547405f63 --- /dev/null +++ b/ibm/data_source_ibm_app_domain_shared_test.go @@ -0,0 +1,34 @@ +package ibm + +import ( + "fmt" + "testing" + + "github.com/hashicorp/terraform/helper/resource" +) + +func TestAccIBMAppDomainSharedDataSource_basic(t *testing.T) { + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccCheckIBMAppDomainSharedDataSourceConfig(), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttrSet( + "data.ibm_app_domain_shared.testacc_domain", "id"), + ), + }, + }, + }) +} + +func testAccCheckIBMAppDomainSharedDataSourceConfig() string { + return fmt.Sprintf(` + + data "ibm_app_domain_shared" "testacc_domain" { + name = "mybluemix.net" + }`) + +} diff --git a/ibm/data_source_ibm_app_route.go b/ibm/data_source_ibm_app_route.go new file mode 100644 index 0000000000..db5069fc7d --- /dev/null +++ b/ibm/data_source_ibm_app_route.go @@ -0,0 +1,87 @@ +package ibm + +import ( + "fmt" + + v2 "github.com/IBM-Bluemix/bluemix-go/api/mccp/mccpv2" + "github.com/IBM-Bluemix/bluemix-go/helpers" + + "github.com/hashicorp/terraform/helper/schema" +) + +func dataSourceIBMAppRoute() *schema.Resource { + return &schema.Resource{ + Read: dataSourceIBMAppRouteRead, + + Schema: map[string]*schema.Schema{ + "space_guid": { + Description: "The guid of the space", + Type: schema.TypeString, + Required: true, + }, + "domain_guid": { + Description: "The guid of the domain", + Type: schema.TypeString, + Required: true, + }, + "host": { + Description: "The host of the route", + Type: schema.TypeString, + Optional: true, + }, + "path": { + Type: schema.TypeString, + Optional: true, + Description: "The path of the route", + ValidateFunc: validateRoutePath, + }, + "port": { + Type: schema.TypeString, + Optional: true, + Description: "The port of the route", + ValidateFunc: validateRoutePort, + }, + }, + } +} + +func dataSourceIBMAppRouteRead(d *schema.ResourceData, meta interface{}) error { + cfClient, err := meta.(ClientSession).MccpAPI() + if err != nil { + return err + } + spaceAPI := cfClient.Spaces() + spaceGUID := d.Get("space_guid").(string) + domainGUID := d.Get("domain_guid").(string) + + params := v2.RouteFilter{ + DomainGUID: domainGUID, + } + + if host, ok := d.GetOk("host"); ok { + params.Host = helpers.String(host.(string)) + } + + if port, ok := d.GetOk("port"); ok { + params.Port = helpers.Int(port.(int)) + } + + if path, ok := d.GetOk("path"); ok { + params.Path = helpers.String(path.(string)) + } + route, err := spaceAPI.ListRoutes(spaceGUID, params) + if err != nil { + return fmt.Errorf("Error retrieving route: %s", err) + } + if len(route) == 0 { + return fmt.Errorf("No route satifies the given parameters") + } + + if len(route) > 1 { + return fmt.Errorf("More than one route satifies the given parameters") + } + + d.SetId(route[0].GUID) + return nil + +} diff --git a/ibm/data_source_ibm_app_route_test.go b/ibm/data_source_ibm_app_route_test.go new file mode 100644 index 0000000000..ebf1d1d743 --- /dev/null +++ b/ibm/data_source_ibm_app_route_test.go @@ -0,0 +1,55 @@ +package ibm + +import ( + "fmt" + "testing" + + "github.com/hashicorp/terraform/helper/acctest" + "github.com/hashicorp/terraform/helper/resource" +) + +func TestAccIBMAppRouteDataSource_basic(t *testing.T) { + host := fmt.Sprintf("terraform_%d", acctest.RandInt()) + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccCheckIBMAppRouteDataSourceConfig(host), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttrSet( + "data.ibm_app_route.testacc_route", "id"), + ), + }, + }, + }) +} + +func testAccCheckIBMAppRouteDataSourceConfig(host string) string { + return fmt.Sprintf(` + data "ibm_space" "spacedata" { + org = "%s" + space = "%s" + } + + data "ibm_app_domain_shared" "domain" { + name = "mybluemix.net" + } + + resource "ibm_app_route" "route" { + domain_guid = "${data.ibm_app_domain_shared.domain.id}" + space_guid = "${data.ibm_space.spacedata.id}" + host = "%s" + path = "/app" + } + + data "ibm_app_route" "testacc_route" { + domain_guid = "${ibm_app_route.route.domain_guid}" + space_guid = "${ibm_app_route.route.space_guid}" + host = "${ibm_app_route.route.host}" + path = "${ibm_app_route.route.path}" + } + `, cfOrganization, cfSpace, host) + +} diff --git a/ibm/data_source_ibm_app_test.go b/ibm/data_source_ibm_app_test.go new file mode 100644 index 0000000000..8b817123a4 --- /dev/null +++ b/ibm/data_source_ibm_app_test.go @@ -0,0 +1,97 @@ +package ibm + +import ( + "fmt" + "testing" + + "github.com/IBM-Bluemix/bluemix-go/api/mccp/mccpv2" + "github.com/hashicorp/terraform/helper/acctest" + "github.com/hashicorp/terraform/helper/resource" +) + +func TestAccIBMAppDataSource_Basic(t *testing.T) { + var conf mccpv2.AppFields + appName := fmt.Sprintf("terraform_%d", acctest.RandInt()) + routeHostName := fmt.Sprintf("terraform-route-host-%d", acctest.RandInt()) + svcName := fmt.Sprintf("tfsvc-%d", acctest.RandInt()) + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckIBMAppDestroy, + Steps: []resource.TestStep{ + + resource.TestStep{ + Config: testAccCheckIBMAppDataSourceBasic(routeHostName, svcName, appName), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckIBMAppExists("ibm_app.app", &conf), + resource.TestCheckResourceAttr("ibm_app.app", "name", appName), + resource.TestCheckResourceAttrSet("data.ibm_app.ds", "id"), + resource.TestCheckResourceAttr("data.ibm_app.ds", "name", appName), + resource.TestCheckResourceAttr("data.ibm_app.ds", "buildpack", "sdk-for-nodejs"), + resource.TestCheckResourceAttr("data.ibm_app.ds", "environment_json.%", "1"), + resource.TestCheckResourceAttr("data.ibm_app.ds", "environment_json.test", "test1"), + resource.TestCheckResourceAttr("data.ibm_app.ds", "state", "STARTED"), + resource.TestCheckResourceAttr("data.ibm_app.ds", "package_state", "STAGED"), + resource.TestCheckResourceAttr("data.ibm_app.ds", "route_guid.#", "1"), + resource.TestCheckResourceAttr("data.ibm_app.ds", "service_instance_guid.#", "1"), + resource.TestCheckResourceAttr("data.ibm_app.ds", "memory", "128"), + resource.TestCheckResourceAttr("data.ibm_app.ds", "instances", "1"), + resource.TestCheckResourceAttr("data.ibm_app.ds", "disk_quota", "512"), + ), + }, + }, + }) +} + +func testAccCheckIBMAppDataSourceBasic(routeHost, serviceInstanceName, appName string) (config string) { + config = fmt.Sprintf(` +data "ibm_space" "space" { + org = "%s" + space = "%s" +} + +data "ibm_app_domain_shared" "domain" { + name = "mybluemix.net" +} + +resource "ibm_app_route" "route" { + domain_guid = "${data.ibm_app_domain_shared.domain.id}" + space_guid = "${data.ibm_space.space.id}" + host = "%s" +} + +resource "ibm_service_instance" "service" { + name = "%s" + space_guid = "${data.ibm_space.space.id}" + service = "cleardb" + plan = "cb5" + tags = ["cluster-service"] +} + +resource "ibm_app" "app" { + name = "%s" + space_guid = "${data.ibm_space.space.id}" + app_path = "test-fixtures/app1.zip" + wait_time_minutes = 20 + buildpack = "sdk-for-nodejs" + instances = 1 + route_guid = ["${ibm_app_route.route.id}"] + service_instance_guid = ["${ibm_service_instance.service.id}"] + disk_quota = 512 + memory = 128 + instances = 1 + disk_quota = 512 + + environment_json = { + "test" = "test1" + } +} + +data "ibm_app" "ds" { + name = "${ibm_app.app.name}" + space_guid = "${data.ibm_space.space.id}" +} +`, cfOrganization, cfSpace, routeHost, serviceInstanceName, appName) + return +} diff --git a/ibm/data_source_ibm_compute_image_template.go b/ibm/data_source_ibm_compute_image_template.go new file mode 100644 index 0000000000..ebacf09289 --- /dev/null +++ b/ibm/data_source_ibm_compute_image_template.go @@ -0,0 +1,71 @@ +package ibm + +import ( + "fmt" + + "github.com/hashicorp/terraform/helper/schema" + "github.com/softlayer/softlayer-go/filter" + "github.com/softlayer/softlayer-go/services" +) + +func dataSourceIBMComputeImageTemplate() *schema.Resource { + return &schema.Resource{ + Read: dataSourceIBMComputeImageTemplateRead, + + // TODO: based on need add properties for visibility, type of image, + // notes, size, shared on accounts if needed + Schema: map[string]*schema.Schema{ + "id": { + Description: "The internal id of the image template", + Type: schema.TypeInt, + Computed: true, + }, + + "name": { + Description: "The name of this image template", + Type: schema.TypeString, + Required: true, + }, + }, + } +} + +func dataSourceIBMComputeImageTemplateRead(d *schema.ResourceData, meta interface{}) error { + sess := meta.(ClientSession).SoftLayerSession() + service := services.GetAccountService(sess) + + name := d.Get("name").(string) + + imageTemplates, err := service. + Mask("id,name"). + GetBlockDeviceTemplateGroups() + if err != nil { + return fmt.Errorf("Error looking up image template [%s]: %s", name, err) + } + + for _, imageTemplate := range imageTemplates { + if imageTemplate.Name != nil && *imageTemplate.Name == name { + d.SetId(fmt.Sprintf("%d", *imageTemplate.Id)) + return nil + } + } + + // Image not found among private nor shared images in the account. + // Looking up in the public images + templateService := services.GetVirtualGuestBlockDeviceTemplateGroupService(sess) + pubImageTemplates, err := templateService. + Mask("id,name"). + Filter(filter.Path("name").Eq(name).Build()). + GetPublicImages() + if err != nil { + return fmt.Errorf("Error looking up image template [%s] among the public images: %s", name, err) + } + + if len(pubImageTemplates) > 0 { + imageTemplate := pubImageTemplates[0] + d.SetId(fmt.Sprintf("%d", *imageTemplate.Id)) + return nil + } + + return fmt.Errorf("Could not find image template with name [%s]", name) +} diff --git a/ibm/data_source_ibm_compute_image_template_test.go b/ibm/data_source_ibm_compute_image_template_test.go new file mode 100644 index 0000000000..5a5b8e8906 --- /dev/null +++ b/ibm/data_source_ibm_compute_image_template_test.go @@ -0,0 +1,61 @@ +package ibm + +import ( + "regexp" + "testing" + + "github.com/hashicorp/terraform/helper/resource" +) + +func TestAccIBMComputeImageTemplateDataSource_Basic(t *testing.T) { + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + Steps: []resource.TestStep{ + // Tests looking up private or shared images + { + Config: testAccCheckIBMComputeImageTemplateDataSourceConfig_basic, + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttr( + "data.ibm_compute_image_template.tfacc_img_tmpl", + "name", + "jumpbox", + ), + resource.TestMatchResourceAttr( + "data.ibm_compute_image_template.tfacc_img_tmpl", + "id", + regexp.MustCompile("^[0-9]+$"), + ), + ), + }, + // Tests looking up a public image + { + Config: testAccCheckIBMComputeImageTemplateDataSourceConfig_basic2, + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttr( + "data.ibm_compute_image_template.tfacc_img_tmpl", + "name", + "RightImage_Ubuntu_12.04_amd64_v13.5", + ), + resource.TestMatchResourceAttr( + "data.ibm_compute_image_template.tfacc_img_tmpl", + "id", + regexp.MustCompile("^[0-9]+$"), + ), + ), + }, + }, + }) +} + +const testAccCheckIBMComputeImageTemplateDataSourceConfig_basic = ` +data "ibm_compute_image_template" "tfacc_img_tmpl" { + name = "jumpbox" +} +` + +const testAccCheckIBMComputeImageTemplateDataSourceConfig_basic2 = ` +data "ibm_compute_image_template" "tfacc_img_tmpl" { + name = "RightImage_Ubuntu_12.04_amd64_v13.5" +} +` diff --git a/ibm/data_source_ibm_compute_ssh_key.go b/ibm/data_source_ibm_compute_ssh_key.go new file mode 100644 index 0000000000..8bf3b69763 --- /dev/null +++ b/ibm/data_source_ibm_compute_ssh_key.go @@ -0,0 +1,110 @@ +package ibm + +import ( + "fmt" + "sort" + "strings" + + "github.com/hashicorp/terraform/helper/schema" + "github.com/softlayer/softlayer-go/datatypes" + "github.com/softlayer/softlayer-go/filter" + "github.com/softlayer/softlayer-go/services" +) + +func dataSourceIBMComputeSSHKey() *schema.Resource { + return &schema.Resource{ + Read: dataSourceIBMComputeSSHKeyRead, + + Schema: map[string]*schema.Schema{ + "label": &schema.Schema{ + Description: "The label associated with the ssh key", + Type: schema.TypeString, + Required: true, + }, + + "public_key": &schema.Schema{ + Description: "The public ssh key", + Type: schema.TypeString, + Computed: true, + }, + + "fingerprint": &schema.Schema{ + Description: "A sequence of bytes to authenticate or lookup a longer ssh key", + Type: schema.TypeString, + Computed: true, + }, + + "notes": &schema.Schema{ + Description: "A small note about a ssh key to use at your discretion", + Type: schema.TypeString, + Computed: true, + }, + + "most_recent": &schema.Schema{ + Description: "If true and multiple entries are found, the most recently created key is used. " + + "If false, an error is returned", + Type: schema.TypeBool, + Optional: true, + Default: false, + }, + }, + } +} + +func dataSourceIBMComputeSSHKeyRead(d *schema.ResourceData, meta interface{}) error { + sess := meta.(ClientSession).SoftLayerSession() + service := services.GetAccountService(sess) + + label := d.Get("label").(string) + mostRecent := d.Get("most_recent").(bool) + + keys, err := service. + Filter(filter.Build(filter.Path("sshKeys.label").Eq(label))). + Mask("id,label,key,fingerprint,notes,createDate"). + GetSshKeys() + + if err != nil { + return fmt.Errorf("Error retrieving SSH key: %s", err) + } + if len(keys) == 0 { + return fmt.Errorf("No ssh key found with name [%s]", label) + } + + var key datatypes.Security_Ssh_Key + if len(keys) > 1 { + if mostRecent { + key = mostRecentSSHKey(keys) + } else { + return fmt.Errorf( + "More than one ssh key found with label matching [%s]. "+ + "Either set 'most_recent' to true in your "+ + "configuration to force the most recent ssh key "+ + "to be used, or ensure that the label is unique", label) + } + } else { + key = keys[0] + } + + d.SetId(fmt.Sprintf("%d", *key.Id)) + d.Set("name", label) + d.Set("public_key", strings.TrimSpace(*key.Key)) + d.Set("fingerprint", key.Fingerprint) + d.Set("notes", key.Notes) + return nil +} + +type sshKeys []datatypes.Security_Ssh_Key + +func (k sshKeys) Len() int { return len(k) } + +func (k sshKeys) Swap(i, j int) { k[i], k[j] = k[j], k[i] } + +func (k sshKeys) Less(i, j int) bool { + return k[i].CreateDate.Before(k[j].CreateDate.Time) +} + +func mostRecentSSHKey(keys sshKeys) datatypes.Security_Ssh_Key { + sortedKeys := keys + sort.Sort(sortedKeys) + return sortedKeys[len(sortedKeys)-1] +} diff --git a/ibm/data_source_ibm_compute_ssh_key_test.go b/ibm/data_source_ibm_compute_ssh_key_test.go new file mode 100644 index 0000000000..d57192c7c7 --- /dev/null +++ b/ibm/data_source_ibm_compute_ssh_key_test.go @@ -0,0 +1,47 @@ +package ibm + +import ( + "fmt" + "regexp" + "strings" + "testing" + + "github.com/hashicorp/terraform/helper/acctest" + "github.com/hashicorp/terraform/helper/resource" +) + +func TestAccIBMComputeSSHKeyDataSource_basic(t *testing.T) { + label := fmt.Sprintf("ssh_key_test_ds_label_%d", acctest.RandInt()) + notes := fmt.Sprintf("ssh_key_test_ds_notes_%d", acctest.RandInt()) + + publicKey := strings.TrimSpace(` +ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCKVmnMOlHKcZK8tpt3MP1lqOLAcqcJzhsvJcjscgVERRN7/9484SOBJ3HSKxxNG5JN8owAjy5f9yYwcUg+JaUVuytn5Pv3aeYROHGGg+5G346xaq3DAwX6Y5ykr2fvjObgncQBnuU5KHWCECO/4h8uWuwh/kfniXPVjFToc+gnkqA+3RKpAecZhFXwfalQ9mMuYGFxn+fwn8cYEApsJbsEmb0iJwPiZ5hjFC8wREuiTlhPHDgkBLOiycd20op2nXzDbHfCHInquEe/gYxEitALONxm0swBOwJZwlTDOB7C6y2dzlrtxr1L59m7pCkWI4EtTRLvleehBoj3u7jB4usR +`) + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccCheckIBMComputeSSHKeyDataSourceConfig(label, notes, publicKey), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttr("data.ibm_compute_ssh_key.testacc_ds_ssh_key", "public_key", publicKey), + resource.TestCheckResourceAttr("data.ibm_compute_ssh_key.testacc_ds_ssh_key", "notes", notes), + resource.TestMatchResourceAttr("data.ibm_compute_ssh_key.testacc_ds_ssh_key", "fingerprint", regexp.MustCompile("^[0-9a-f]{2}:")), + ), + }, + }, + }) +} + +func testAccCheckIBMComputeSSHKeyDataSourceConfig(label, notes, publicKey string) string { + return fmt.Sprintf(` +resource "ibm_compute_ssh_key" "testacc_ds_ssh_key" { + label = "%s" + notes = "%s" + public_key = "%s" +} +data "ibm_compute_ssh_key" "testacc_ds_ssh_key" { + label = "${ibm_compute_ssh_key.testacc_ds_ssh_key.label}" +}`, label, notes, publicKey) +} diff --git a/ibm/data_source_ibm_compute_vm_instance.go b/ibm/data_source_ibm_compute_vm_instance.go new file mode 100644 index 0000000000..9c91cb2712 --- /dev/null +++ b/ibm/data_source_ibm_compute_vm_instance.go @@ -0,0 +1,138 @@ +package ibm + +import ( + "fmt" + "sort" + + "github.com/hashicorp/terraform/helper/schema" + "github.com/softlayer/softlayer-go/datatypes" + "github.com/softlayer/softlayer-go/filter" + "github.com/softlayer/softlayer-go/services" +) + +func dataSourceIBMComputeVmInstance() *schema.Resource { + return &schema.Resource{ + Read: dataSourceIBMComputeVmInstanceRead, + + Schema: map[string]*schema.Schema{ + + "hostname": &schema.Schema{ + Description: "The hostname of the virtual guest", + Type: schema.TypeString, + Required: true, + }, + + "domain": &schema.Schema{ + Description: "The domain of the virtual guest", + Type: schema.TypeString, + Required: true, + }, + + "datacenter": &schema.Schema{ + Description: "Datacenter in which the virtual guest is deployed", + Type: schema.TypeString, + Computed: true, + }, + + "cores": &schema.Schema{ + Description: "Number of cpu cores", + Type: schema.TypeInt, + Computed: true, + }, + "status": &schema.Schema{ + Description: "The VSI status", + Type: schema.TypeString, + Computed: true, + }, + "last_known_power_state": &schema.Schema{ + Description: "The last known power state of a virtual guest in the event the guest is turned off outside of IMS or has gone offline.", + Type: schema.TypeString, + Computed: true, + }, + "power_state": &schema.Schema{ + Description: "The current power state of a virtual guest.", + Type: schema.TypeString, + Computed: true, + }, + "most_recent": &schema.Schema{ + Description: "If true and multiple entries are found, the most recently created virtual guest is used. " + + "If false, an error is returned", + Type: schema.TypeBool, + Optional: true, + Default: false, + }, + }, + } +} + +func dataSourceIBMComputeVmInstanceRead(d *schema.ResourceData, meta interface{}) error { + sess := meta.(ClientSession).SoftLayerSession() + service := services.GetAccountService(sess) + + hostname := d.Get("hostname").(string) + domain := d.Get("domain").(string) + mostRecent := d.Get("most_recent").(bool) + + vgs, err := service. + Filter(filter.Build(filter.Path("virtualGuests.hostname").Eq(hostname), + filter.Path("virtualGuests.domain").Eq(domain))).Mask( + "hostname,domain,startCpus,datacenter[id,name,longName],statusId,status,id,powerState,lastKnownPowerState,createDate", + ).GetVirtualGuests() + + if err != nil { + return fmt.Errorf("Error retrieving virtual guest details for host %s: %s", hostname, err) + } + if len(vgs) == 0 { + return fmt.Errorf("No virtual guest with hostname %s and domain %s", hostname, domain) + } + var vg datatypes.Virtual_Guest + + if len(vgs) > 1 { + if mostRecent { + vg = mostRecentVirtualGuest(vgs) + } else { + return fmt.Errorf( + "More than one virtual guest found with host matching [%s] and domain "+ + "matching [%s]. Set 'most_recent' to true in your configuration to force the most recent virtual guest "+ + "to be used", hostname, domain) + } + } else { + vg = vgs[0] + } + + d.SetId(fmt.Sprintf("%d", *vg.Id)) + d.Set("hostname", vg.Hostname) + d.Set("domain", vg.Domain) + + if vg.Datacenter != nil { + d.Set("datacenter", *vg.Datacenter.Name) + } + d.Set("cores", *vg.StartCpus) + if vg.Status != nil { + d.Set("status", vg.Status.KeyName) + } + if vg.PowerState != nil { + d.Set("power_state", vg.PowerState.KeyName) + } + if vg.LastKnownPowerState != nil { + d.Set("last_known_power_state", vg.LastKnownPowerState.KeyName) + } + + return nil +} + +type virtualGuests []datatypes.Virtual_Guest + +func (k virtualGuests) Len() int { return len(k) } + +func (k virtualGuests) Swap(i, j int) { k[i], k[j] = k[j], k[i] } + +func (k virtualGuests) Less(i, j int) bool { + return k[i].CreateDate.Before(k[j].CreateDate.Time) +} + +func mostRecentVirtualGuest(keys virtualGuests) datatypes.Virtual_Guest { + sortedKeys := keys + sort.Sort(sortedKeys) + return sortedKeys[len(sortedKeys)-1] +} diff --git a/ibm/data_source_ibm_compute_vm_instance_test.go b/ibm/data_source_ibm_compute_vm_instance_test.go new file mode 100644 index 0000000000..711ff2ea2b --- /dev/null +++ b/ibm/data_source_ibm_compute_vm_instance_test.go @@ -0,0 +1,51 @@ +package ibm + +import ( + "fmt" + "testing" + + "github.com/hashicorp/terraform/helper/acctest" + "github.com/hashicorp/terraform/helper/resource" +) + +func TestAccIBMComputeVmInstanceDataSource_basic(t *testing.T) { + hostname := acctest.RandString(16) + domain := "ds.terraform.ibm.com" + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccCheckIBMComputeVmInstanceDataSourceConfigBasic(hostname, domain), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttr("data.ibm_compute_vm_instance.tf-vg-ds-acc-test", "power_state", "RUNNING"), + resource.TestCheckResourceAttr("data.ibm_compute_vm_instance.tf-vg-ds-acc-test", "status", "ACTIVE"), + ), + }, + }, + }) +} + +func testAccCheckIBMComputeVmInstanceDataSourceConfigBasic(hostname, domain string) string { + return fmt.Sprintf(` + resource "ibm_compute_vm_instance" "tf-vg-acc-test" { + hostname = "%s" + domain = "%s" + os_reference_code = "DEBIAN_7_64" + datacenter = "dal06" + network_speed = 10 + hourly_billing = true + private_network_only = false + cores = 1 + memory = 1024 + disks = [25, 10, 20] + tags = ["data-source-test"] + dedicated_acct_host_only = true + local_disk = false +} +data "ibm_compute_vm_instance" "tf-vg-ds-acc-test" { + hostname = "${ibm_compute_vm_instance.tf-vg-acc-test.hostname}" + domain = "${ibm_compute_vm_instance.tf-vg-acc-test.domain}" +}`, hostname, domain) +} diff --git a/ibm/data_source_ibm_container_cluster.go b/ibm/data_source_ibm_container_cluster.go new file mode 100644 index 0000000000..69ad09bc56 --- /dev/null +++ b/ibm/data_source_ibm_container_cluster.go @@ -0,0 +1,115 @@ +package ibm + +import ( + "fmt" + + "github.com/hashicorp/terraform/helper/schema" +) + +func dataSourceIBMContainerCluster() *schema.Resource { + return &schema.Resource{ + Read: dataSourceIBMContainerClusterRead, + + Schema: map[string]*schema.Schema{ + "cluster_name_id": { + Description: "Name or id of the cluster", + Type: schema.TypeString, + Required: true, + }, + "worker_count": { + Description: "Number of workers", + Type: schema.TypeInt, + Computed: true, + }, + "workers": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + "bounded_services": { + Type: schema.TypeSet, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "service_name": { + Type: schema.TypeString, + Computed: true, + }, + "service_id": { + Type: schema.TypeString, + Computed: true, + }, + "service_key_name": { + Type: schema.TypeString, + Computed: true, + }, + "namespace": { + Type: schema.TypeString, + Computed: true, + }, + }, + }, + }, + "org_guid": { + Description: "The bluemix organization guid this cluster belongs to", + Type: schema.TypeString, + Required: true, + }, + "space_guid": { + Description: "The bluemix space guid this cluster belongs to", + Type: schema.TypeString, + Required: true, + }, + "account_guid": { + Description: "The bluemix account guid this cluster belongs to", + Type: schema.TypeString, + Required: true, + }, + }, + } +} + +func dataSourceIBMContainerClusterRead(d *schema.ResourceData, meta interface{}) error { + csClient, err := meta.(ClientSession).ContainerAPI() + if err != nil { + return err + } + csAPI := csClient.Clusters() + wrkAPI := csClient.Workers() + + targetEnv := getClusterTargetHeader(d) + name := d.Get("cluster_name_id").(string) + + clusterFields, err := csAPI.Find(name, targetEnv) + if err != nil { + return fmt.Errorf("Error retrieving cluster: %s", err) + } + workerFields, err := wrkAPI.List(name, targetEnv) + if err != nil { + return fmt.Errorf("Error retrieving workers for cluster: %s", err) + } + workers := make([]string, len(workerFields)) + for i, worker := range workerFields { + workers[i] = worker.ID + } + servicesBoundToCluster, err := csAPI.ListServicesBoundToCluster(name, "", targetEnv) + if err != nil { + return fmt.Errorf("Error retrieving services bound to cluster: %s", err) + } + boundedServices := make([]map[string]interface{}, 0) + for _, service := range servicesBoundToCluster { + boundedService := make(map[string]interface{}) + boundedService["service_name"] = service.ServiceName + boundedService["service_id"] = service.ServiceID + boundedService["service_key_name"] = service.ServiceKeyName + boundedService["namespace"] = service.Namespace + boundedServices = append(boundedServices, boundedService) + } + + d.SetId(clusterFields.ID) + d.Set("worker_count", clusterFields.WorkerCount) + d.Set("workers", workers) + d.Set("bounded_services", boundedServices) + + return nil +} diff --git a/ibm/data_source_ibm_container_cluster_config.go b/ibm/data_source_ibm_container_cluster_config.go new file mode 100644 index 0000000000..cde3cf49d8 --- /dev/null +++ b/ibm/data_source_ibm_container_cluster_config.go @@ -0,0 +1,106 @@ +package ibm + +import ( + "fmt" + "log" + "path/filepath" + + v1 "github.com/IBM-Bluemix/bluemix-go/api/container/containerv1" + "github.com/IBM-Bluemix/bluemix-go/helpers" + "github.com/hashicorp/terraform/helper/schema" + homedir "github.com/mitchellh/go-homedir" +) + +func dataSourceIBMContainerClusterConfig() *schema.Resource { + return &schema.Resource{ + Read: dataSourceIBMContainerClusterConfigRead, + + Schema: map[string]*schema.Schema{ + + "org_guid": { + Description: "The bluemix organization guid this cluster belongs to", + Type: schema.TypeString, + Required: true, + }, + "space_guid": { + Description: "The bluemix space guid this cluster belongs to", + Type: schema.TypeString, + Required: true, + }, + "account_guid": { + Description: "The bluemix account guid this cluster belongs to", + Type: schema.TypeString, + Required: true, + }, + "cluster_name_id": { + Description: "The name/id of the cluster", + Type: schema.TypeString, + Required: true, + }, + "config_dir": { + Description: "The directory where the cluster config to be downloaded. Default is home directory ", + Type: schema.TypeString, + Optional: true, + Computed: true, + }, + "download": { + Description: "If set to false will not download the config, otherwise they are downloaded each time but onto the same path for a given cluster name/id", + Type: schema.TypeBool, + Optional: true, + Default: true, + }, + "admin": { + Description: "If set to true will download the config for admin", + Type: schema.TypeBool, + Optional: true, + Default: false, + }, + "config_file_path": { + Description: "The absolute path to the kubernetes config yml file ", + Type: schema.TypeString, + Computed: true, + }, + }, + } +} + +func dataSourceIBMContainerClusterConfigRead(d *schema.ResourceData, meta interface{}) error { + csClient, err := meta.(ClientSession).ContainerAPI() + if err != nil { + return err + } + csAPI := csClient.Clusters() + name := d.Get("cluster_name_id").(string) + download := d.Get("download").(bool) + admin := d.Get("admin").(bool) + configDir := d.Get("config_dir").(string) + + if len(configDir) == 0 { + configDir, err = homedir.Dir() + if err != nil { + return fmt.Errorf("Error fetching homedir: %s", err) + } + } + var configPath string + if !download { + log.Println("Skipping download of the cluster config", "Going to check if it already exists") + expectedDir := v1.ComputeClusterConfigDir(configDir, name, admin) + configPath = filepath.Join(expectedDir, "config.yml") + if !helpers.FileExists(configPath) { + return fmt.Errorf(`Couldn't find the cluster config at expected path %s. Please set "download" to true to download the new config`, configPath) + } + + } else { + targetEnv := getClusterTargetHeader(d) + var err error + configPath, err = csAPI.GetClusterConfig(name, configDir, admin, targetEnv) + if err != nil { + return fmt.Errorf("Error downloading the cluster config [%s]: %s", name, err) + } + } + + d.SetId(name) + d.Set("config_dir", configDir) + d.Set("config_file_path", configPath) + return nil +} diff --git a/ibm/data_source_ibm_container_cluster_config_test.go b/ibm/data_source_ibm_container_cluster_config_test.go new file mode 100644 index 0000000000..00a0df2dda --- /dev/null +++ b/ibm/data_source_ibm_container_cluster_config_test.go @@ -0,0 +1,73 @@ +package ibm + +import ( + "fmt" + "testing" + + "github.com/hashicorp/terraform/helper/acctest" + "github.com/hashicorp/terraform/helper/resource" + "github.com/mitchellh/go-homedir" +) + +func TestAccIBMContainerClusterConfigDataSource_basic(t *testing.T) { + homeDir, err := homedir.Dir() + clusterName := fmt.Sprintf("terraform_%d", acctest.RandInt()) + if err != nil { + t.Fatalf("Error fetching homedir: %s", err) + } + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccCheckIBMContainerClusterDataSourceConfig(clusterName), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttr("data.ibm_container_cluster_config.testacc_ds_cluster", "config_dir", homeDir), + resource.TestCheckResourceAttrSet( + "data.ibm_container_cluster_config.testacc_ds_cluster", "config_file_path"), + ), + }, + }, + }) +} + +func testAccCheckIBMContainerClusterDataSourceConfig(clustername string) string { + return fmt.Sprintf(` +data "ibm_org" "testacc_ds_org" { + org = "%s" +} + +data "ibm_space" "testacc_ds_space" { + org = "%s" + space = "%s" +} + +data "ibm_account" "testacc_acc" { + org_guid = "${data.ibm_org.testacc_ds_org.id}" +} + + +resource "ibm_container_cluster" "testacc_cluster" { + name = "%s" + datacenter = "dal10" + org_guid = "${data.ibm_org.testacc_ds_org.id}" + space_guid = "${data.ibm_space.testacc_ds_space.id}" + account_guid = "${data.ibm_account.testacc_acc.id}" + + workers = [{ + name = "worker1" + + action = "add" + }] + machine_type = "free" + isolation = "public" + public_vlan_id = "vlan" + private_vlan_id = "vlan" +} +data "ibm_container_cluster_config" "testacc_ds_cluster" { + cluster_name_id = "${ibm_container_cluster.testacc_cluster.id}" + org_guid = "${data.ibm_org.testacc_ds_org.id}" + space_guid = "${data.ibm_space.testacc_ds_space.id}" + account_guid = "${data.ibm_account.testacc_acc.id}" +}`, cfOrganization, cfOrganization, cfSpace, clustername) +} diff --git a/ibm/data_source_ibm_container_cluster_test.go b/ibm/data_source_ibm_container_cluster_test.go new file mode 100644 index 0000000000..c2139a1a56 --- /dev/null +++ b/ibm/data_source_ibm_container_cluster_test.go @@ -0,0 +1,84 @@ +package ibm + +import ( + "fmt" + "testing" + + "github.com/hashicorp/terraform/helper/acctest" + "github.com/hashicorp/terraform/helper/resource" +) + +func TestAccIBMContainerClusterDataSource_basic(t *testing.T) { + clusterName := fmt.Sprintf("terraform_%d", acctest.RandInt()) + serviceName := fmt.Sprintf("terraform_%d", acctest.RandInt()) + serviceKeyName := fmt.Sprintf("terraform_%d", acctest.RandInt()) + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccCheckIBMContainerClusterDataSource(clusterName, serviceName, serviceKeyName), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttr("data.ibm_container_cluster.testacc_ds_cluster", "worker_count", "1"), + resource.TestCheckResourceAttr("data.ibm_container_cluster.testacc_ds_cluster", "bounded_services.#", "1"), + ), + }, + }, + }) +} + +func testAccCheckIBMContainerClusterDataSource(clusterName, serviceName, serviceKeyName string) string { + return fmt.Sprintf(` +data "ibm_org" "testacc_ds_org" { + org = "%s" +} +data "ibm_space" "testacc_ds_space" { + org = "%s" + space = "%s" +} +data "ibm_account" "testacc_acc" { + org_guid = "${data.ibm_org.testacc_ds_org.id}" +} +resource "ibm_container_cluster" "testacc_cluster" { + name = "%s" + datacenter = "dal10" + org_guid = "${data.ibm_org.testacc_ds_org.id}" + space_guid = "${data.ibm_space.testacc_ds_space.id}" + account_guid = "${data.ibm_account.testacc_acc.id}" + workers = [{ + name = "worker1" + action = "add" + }] + machine_type = "free" + isolation = "public" + public_vlan_id = "vlan" + private_vlan_id = "vlan" +} +resource "ibm_service_instance" "service" { + name = "%s" + space_guid = "${data.ibm_space.testacc_ds_space.id}" + service = "cloudantNoSQLDB" + plan = "Lite" + tags = ["cluster-service", "cluster-bind"] +} +resource "ibm_service_key" "serviceKey" { + name = "%s" + service_instance_guid = "${ibm_service_instance.service.id}" +} +resource "ibm_container_bind_service" "bind_service" { + cluster_name_id = "${ibm_container_cluster.testacc_cluster.name}" + service_instance_space_guid = "${data.ibm_space.testacc_ds_space.id}" + service_instance_name_id = "${ibm_service_instance.service.id}" + namespace_id = "default" + org_guid = "${data.ibm_org.testacc_ds_org.id}" + space_guid = "${data.ibm_space.testacc_ds_space.id}" + account_guid = "${data.ibm_account.testacc_acc.id}" +} +data "ibm_container_cluster" "testacc_ds_cluster" { + org_guid = "${data.ibm_org.testacc_ds_org.id}" + space_guid = "${data.ibm_space.testacc_ds_space.id}" + account_guid = "${data.ibm_account.testacc_acc.id}" + cluster_name_id = "${ibm_container_cluster.testacc_cluster.id}" +} +`, cfOrganization, cfOrganization, cfSpace, clusterName, serviceName, serviceKeyName) +} diff --git a/ibm/data_source_ibm_container_cluster_worker.go b/ibm/data_source_ibm_container_cluster_worker.go new file mode 100644 index 0000000000..163f986b3e --- /dev/null +++ b/ibm/data_source_ibm_container_cluster_worker.go @@ -0,0 +1,88 @@ +package ibm + +import ( + "fmt" + + "github.com/hashicorp/terraform/helper/schema" +) + +func dataSourceIBMContainerClusterWorker() *schema.Resource { + return &schema.Resource{ + Read: dataSourceIBMContainerClusterWorkerRead, + + Schema: map[string]*schema.Schema{ + "worker_id": { + Description: "ID of the worker", + Type: schema.TypeString, + Required: true, + }, + "state": { + Description: "State of the worker", + Type: schema.TypeString, + Computed: true, + }, + "status": { + Description: "Status of the worker", + Type: schema.TypeString, + Computed: true, + }, + "private_vlan": { + Type: schema.TypeString, + Computed: true, + }, + "public_vlan": { + Type: schema.TypeString, + Computed: true, + }, + "private_ip": { + Type: schema.TypeString, + Computed: true, + }, + "public_ip": { + Type: schema.TypeString, + Computed: true, + }, + "org_guid": { + Description: "The bluemix organization guid this cluster belongs to", + Type: schema.TypeString, + Required: true, + }, + "space_guid": { + Description: "The bluemix space guid this cluster belongs to", + Type: schema.TypeString, + Required: true, + }, + "account_guid": { + Description: "The bluemix account guid this cluster belongs to", + Type: schema.TypeString, + Required: true, + }, + }, + } +} + +func dataSourceIBMContainerClusterWorkerRead(d *schema.ResourceData, meta interface{}) error { + csClient, err := meta.(ClientSession).ContainerAPI() + if err != nil { + return err + } + + wrkAPI := csClient.Workers() + workerID := d.Get("worker_id").(string) + targetEnv := getClusterTargetHeader(d) + + workerFields, err := wrkAPI.Get(workerID, targetEnv) + if err != nil { + return fmt.Errorf("Error retrieving worker: %s", err) + } + + d.SetId(workerFields.ID) + d.Set("state", workerFields.State) + d.Set("status", workerFields.Status) + d.Set("private_vlan", workerFields.PrivateVlan) + d.Set("public_vlan", workerFields.PublicVlan) + d.Set("private_ip", workerFields.PrivateIP) + d.Set("public_ip", workerFields.PublicIP) + + return nil +} diff --git a/ibm/data_source_ibm_container_cluster_worker_test.go b/ibm/data_source_ibm_container_cluster_worker_test.go new file mode 100644 index 0000000000..25f239c945 --- /dev/null +++ b/ibm/data_source_ibm_container_cluster_worker_test.go @@ -0,0 +1,71 @@ +package ibm + +import ( + "fmt" + "testing" + + "github.com/hashicorp/terraform/helper/acctest" + "github.com/hashicorp/terraform/helper/resource" +) + +func TestAccIBMContainerClusterWorkerDataSource_basic(t *testing.T) { + clusterName := fmt.Sprintf("terraform_%d", acctest.RandInt()) + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccCheckIBMContainerClusterWorkerDataSourceConfig(clusterName), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttr("data.ibm_container_cluster_worker.testacc_ds_worker", "state", "normal"), + ), + }, + }, + }) +} + +func testAccCheckIBMContainerClusterWorkerDataSourceConfig(clusterName string) string { + return fmt.Sprintf(` +data "ibm_org" "org" { + org = "%s" +} + +data "ibm_space" "space" { + org = "%s" + space = "%s" +} + +data "ibm_account" "acc" { + org_guid = "${data.ibm_org.org.id}" +} + +resource "ibm_container_cluster" "testacc_cluster" { + name = "%s" + datacenter = "dal10" + workers = [{ + name = "worker1" + action = "add" + },] + machine_type = "free" + isolation = "public" + public_vlan_id = "vlan" + private_vlan_id = "vlan" + + org_guid = "${data.ibm_org.org.id}" + space_guid = "${data.ibm_space.space.id}" + account_guid = "${data.ibm_account.acc.id}" +} +data "ibm_container_cluster" "testacc_ds_cluster" { + org_guid = "${data.ibm_org.org.id}" + space_guid = "${data.ibm_space.space.id}" + account_guid = "${data.ibm_account.acc.id}" + cluster_name_id = "${ibm_container_cluster.testacc_cluster.id}" +} +data "ibm_container_cluster_worker" "testacc_ds_worker" { + org_guid = "${data.ibm_org.org.id}" + space_guid = "${data.ibm_space.space.id}" + account_guid = "${data.ibm_account.acc.id}" + worker_id = "${data.ibm_container_cluster.testacc_ds_cluster.workers[0]}" +} +`, cfOrganization, cfOrganization, cfSpace, clusterName) +} diff --git a/ibm/data_source_ibm_dns_domain.go b/ibm/data_source_ibm_dns_domain.go new file mode 100644 index 0000000000..3024acf052 --- /dev/null +++ b/ibm/data_source_ibm_dns_domain.go @@ -0,0 +1,52 @@ +package ibm + +import ( + "fmt" + + "github.com/hashicorp/terraform/helper/schema" + "github.com/softlayer/softlayer-go/filter" + "github.com/softlayer/softlayer-go/services" +) + +func dataSourceIBMDNSDomain() *schema.Resource { + return &schema.Resource{ + Read: dataSourceIBMDNSDomainRead, + + Schema: map[string]*schema.Schema{ + "id": &schema.Schema{ + Description: "A domain record's internal identifier", + Type: schema.TypeInt, + Computed: true, + }, + + "name": &schema.Schema{ + Description: "The name of the domain", + Type: schema.TypeString, + Required: true, + }, + }, + } +} + +func dataSourceIBMDNSDomainRead(d *schema.ResourceData, meta interface{}) error { + sess := meta.(ClientSession).SoftLayerSession() + service := services.GetAccountService(sess) + + name := d.Get("name").(string) + + names, err := service. + Filter(filter.Build(filter.Path("domains.name").Eq(name))). + Mask("id,name"). + GetDomains() + + if err != nil { + return fmt.Errorf("Error retrieving domain: %s", err) + } + + if len(names) == 0 { + return fmt.Errorf("No domain found with name [%s]", name) + } + + d.SetId(fmt.Sprintf("%d", *names[0].Id)) + return nil +} diff --git a/ibm/data_source_ibm_dns_domain_test.go b/ibm/data_source_ibm_dns_domain_test.go new file mode 100644 index 0000000000..833c99885e --- /dev/null +++ b/ibm/data_source_ibm_dns_domain_test.go @@ -0,0 +1,39 @@ +package ibm + +import ( + "fmt" + "regexp" + "testing" + + "github.com/hashicorp/terraform/helper/acctest" + "github.com/hashicorp/terraform/helper/resource" +) + +func TestAccIBMDNSDomainDataSource_Basic(t *testing.T) { + + var domainName = acctest.RandString(16) + ".com" + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: fmt.Sprintf(testAccCheckIBMDNSDomainDataSourceConfig_basic, domainName), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttr("data.ibm_dns_domain.domain_id", "name", domainName), + resource.TestMatchResourceAttr("data.ibm_dns_domain.domain_id", "id", regexp.MustCompile("^[0-9]+$")), + ), + }, + }, + }) +} + +// The datasource to apply +const testAccCheckIBMDNSDomainDataSourceConfig_basic = ` +resource "ibm_dns_domain" "ds_domain_test" { + name = "%s" +} +data "ibm_dns_domain" "domain_id" { + name = "${ibm_dns_domain.ds_domain_test.name}" +} +` diff --git a/ibm/data_source_ibm_network_vlan.go b/ibm/data_source_ibm_network_vlan.go new file mode 100644 index 0000000000..7007f6abf3 --- /dev/null +++ b/ibm/data_source_ibm_network_vlan.go @@ -0,0 +1,132 @@ +package ibm + +import ( + "errors" + "fmt" + + "github.com/hashicorp/terraform/helper/schema" + "github.com/softlayer/softlayer-go/datatypes" + "github.com/softlayer/softlayer-go/filter" + "github.com/softlayer/softlayer-go/services" +) + +func dataSourceIBMNetworkVlan() *schema.Resource { + return &schema.Resource{ + Read: dataSourceIBMNetworkVlanRead, + + Schema: map[string]*schema.Schema{ + "id": { + Type: schema.TypeInt, + Computed: true, + }, + + "name": { + Type: schema.TypeString, + Optional: true, + Computed: true, + }, + + "number": { + Type: schema.TypeInt, + Optional: true, + Computed: true, + }, + + "router_hostname": { + Type: schema.TypeString, + Optional: true, + Computed: true, + }, + + "subnets": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + }, + } +} + +func dataSourceIBMNetworkVlanRead(d *schema.ResourceData, meta interface{}) error { + sess := meta.(ClientSession).SoftLayerSession() + service := services.GetAccountService(sess) + + name := d.Get("name").(string) + number := d.Get("number").(int) + routerHostname := d.Get("router_hostname").(string) + var vlan *datatypes.Network_Vlan + var err error + + if number != 0 && routerHostname != "" { + // Got vlan number and router, get vlan, and compute name + vlan, err = getVlan(number, routerHostname, meta) + if err != nil { + return err + } + + d.SetId(fmt.Sprintf("%d", *vlan.Id)) + if vlan.Name != nil { + d.Set("name", *vlan.Name) + } + } else if name != "" { + // Got name, get vlan, and compute router hostname and vlan number + networkVlans, err := service. + Mask("id,vlanNumber,name,primaryRouter[hostname],primarySubnets[networkIdentifier,cidr]"). + Filter(filter.Path("networkVlans.name").Eq(name).Build()). + GetNetworkVlans() + if err != nil { + return fmt.Errorf("Error obtaining VLAN id: %s", err) + } else if len(networkVlans) == 0 { + return fmt.Errorf("No VLAN was found with the name '%s'", name) + } + + vlan = &networkVlans[0] + d.SetId(fmt.Sprintf("%d", *vlan.Id)) + d.Set("number", *vlan.VlanNumber) + + if vlan.PrimaryRouter != nil && vlan.PrimaryRouter.Hostname != nil { + d.Set("router_hostname", *vlan.PrimaryRouter.Hostname) + } + } else { + return errors.New("Missing required properties. Need a VLAN name, or the VLAN's number and router hostname.") + } + + // Get subnets in cidr format for display + if len(vlan.PrimarySubnets) > 0 { + subnets := make([]string, len(vlan.PrimarySubnets)) + for i, subnet := range vlan.PrimarySubnets { + subnets[i] = fmt.Sprintf("%s/%d", *subnet.NetworkIdentifier, *subnet.Cidr) + } + + d.Set("subnets", subnets) + } + + return nil +} + +func getVlan(vlanNumber int, primaryRouterHostname string, meta interface{}) (*datatypes.Network_Vlan, error) { + service := services.GetAccountService(meta.(ClientSession).SoftLayerSession()) + + networkVlans, err := service. + Mask("id,name,primarySubnets[networkIdentifier,cidr]"). + Filter( + filter.Build( + filter.Path("networkVlans.primaryRouter.hostname").Eq(primaryRouterHostname), + filter.Path("networkVlans.vlanNumber").Eq(vlanNumber), + ), + ). + GetNetworkVlans() + + if err != nil { + return &datatypes.Network_Vlan{}, fmt.Errorf("Error looking up Vlan: %s", err) + } + + if len(networkVlans) < 1 { + return &datatypes.Network_Vlan{}, fmt.Errorf( + "Unable to locate a vlan matching the provided router hostname and vlan number: %s/%d", + primaryRouterHostname, + vlanNumber) + } + + return &networkVlans[0], nil +} diff --git a/ibm/data_source_ibm_network_vlan_test.go b/ibm/data_source_ibm_network_vlan_test.go new file mode 100644 index 0000000000..36539d8e4c --- /dev/null +++ b/ibm/data_source_ibm_network_vlan_test.go @@ -0,0 +1,47 @@ +package ibm + +import ( + "fmt" + "regexp" + "testing" + + "github.com/hashicorp/terraform/helper/acctest" + "github.com/hashicorp/terraform/helper/resource" +) + +func TestAccIBMNetworkVlanDataSource_Basic(t *testing.T) { + + name := fmt.Sprintf("terraformuat_vlan_%s", acctest.RandString(2)) + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + Steps: []resource.TestStep{ + { + Config: testAccCheckIBMNetworkVlanDataSourceConfig(name), + Check: resource.ComposeTestCheckFunc( + testAccCheckIBMResources("data.ibm_network_vlan.tfacc_vlan", "number", + "ibm_network_vlan.test_vlan_private", "vlan_number"), + //resource.TestCheckResourceAttr("data.ibm_network_vlan.tfacc_vlan", "number", number), + resource.TestCheckResourceAttr("data.ibm_network_vlan.tfacc_vlan", "name", name), + resource.TestMatchResourceAttr("data.ibm_network_vlan.tfacc_vlan", "id", regexp.MustCompile("^[0-9]+$")), + ), + }, + }, + }) +} + +func testAccCheckIBMNetworkVlanDataSourceConfig(name string) string { + return fmt.Sprintf(` + resource "ibm_network_vlan" "test_vlan_private" { + name = "%s" + datacenter = "dal06" + type = "PRIVATE" + subnet_size = 8 + +} +data "ibm_network_vlan" "tfacc_vlan" { + number = "${ibm_network_vlan.test_vlan_private.vlan_number}" + name = "${ibm_network_vlan.test_vlan_private.name}" +}`, name) +} diff --git a/ibm/data_source_ibm_org.go b/ibm/data_source_ibm_org.go new file mode 100644 index 0000000000..ae6f55d836 --- /dev/null +++ b/ibm/data_source_ibm_org.go @@ -0,0 +1,38 @@ +package ibm + +import ( + "fmt" + + "github.com/hashicorp/terraform/helper/schema" +) + +func dataSourceIBMOrg() *schema.Resource { + return &schema.Resource{ + Read: dataSourceIBMOrgRead, + + Schema: map[string]*schema.Schema{ + "org": { + Description: "Org name, for example myorg@domain", + Type: schema.TypeString, + Required: true, + }, + }, + } +} + +func dataSourceIBMOrgRead(d *schema.ResourceData, meta interface{}) error { + cfAPI, err := meta.(ClientSession).MccpAPI() + if err != nil { + return err + } + orgAPI := cfAPI.Organizations() + org := d.Get("org").(string) + orgFields, err := orgAPI.FindByName(org, BluemixRegion) + if err != nil { + return fmt.Errorf("Error retrieving organisation: %s", err) + } + + d.SetId(orgFields.GUID) + + return nil +} diff --git a/ibm/data_source_ibm_org_test.go b/ibm/data_source_ibm_org_test.go new file mode 100644 index 0000000000..287e5bef0b --- /dev/null +++ b/ibm/data_source_ibm_org_test.go @@ -0,0 +1,33 @@ +package ibm + +import ( + "fmt" + "testing" + + "github.com/hashicorp/terraform/helper/resource" +) + +func TestAccIBMOrgDataSource_basic(t *testing.T) { + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccCheckIBMOrgDataSourceConfig(), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttr("data.ibm_org.testacc_ds_org", "org", cfOrganization), + ), + }, + }, + }) +} + +func testAccCheckIBMOrgDataSourceConfig() string { + return fmt.Sprintf(` + +data "ibm_org" "testacc_ds_org" { + org = "%s" +}`, cfOrganization) + +} diff --git a/ibm/data_source_ibm_service_instance.go b/ibm/data_source_ibm_service_instance.go new file mode 100644 index 0000000000..7033f27705 --- /dev/null +++ b/ibm/data_source_ibm_service_instance.go @@ -0,0 +1,57 @@ +package ibm + +import ( + "fmt" + + "github.com/hashicorp/terraform/helper/schema" +) + +func dataSourceIBMServiceInstance() *schema.Resource { + return &schema.Resource{ + Read: dataSourceIBMServiceInstanceRead, + + Schema: map[string]*schema.Schema{ + "name": { + Description: "Service instance name for example, cleardb", + Type: schema.TypeString, + Required: true, + }, + + "credentials": { + Description: "Credentials asociated with the key", + Type: schema.TypeMap, + Computed: true, + }, + + "service_plan_guid": { + Description: "The uniquie identifier of the service offering plan type", + Type: schema.TypeString, + Computed: true, + }, + }, + } +} + +func dataSourceIBMServiceInstanceRead(d *schema.ResourceData, meta interface{}) error { + cfClient, err := meta.(ClientSession).MccpAPI() + if err != nil { + return err + } + siAPI := cfClient.ServiceInstances() + name := d.Get("name").(string) + inst, err := siAPI.FindByName(name) + if err != nil { + return err + } + + serviceInstance, err := siAPI.Get(inst.GUID) + if err != nil { + return fmt.Errorf("Error retrieving service: %s", err) + } + + d.SetId(serviceInstance.Metadata.GUID) + d.Set("credentials", serviceInstance.Entity.Credentials) + d.Set("service_plan_guid", serviceInstance.Entity.ServicePlanGUID) + + return nil +} diff --git a/ibm/data_source_ibm_service_instance_test.go b/ibm/data_source_ibm_service_instance_test.go new file mode 100644 index 0000000000..a04fadcefa --- /dev/null +++ b/ibm/data_source_ibm_service_instance_test.go @@ -0,0 +1,48 @@ +package ibm + +import ( + "fmt" + "testing" + + "github.com/hashicorp/terraform/helper/acctest" + "github.com/hashicorp/terraform/helper/resource" +) + +func TestAccIBMServiceInstanceDataSource_basic(t *testing.T) { + serviceName := fmt.Sprintf("terraform_%d", acctest.RandInt()) + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccCheckIBMServiceInstanceDataSourceConfig(serviceName), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttr("data.ibm_service_instance.testacc_ds_service_instance", "name", serviceName), + ), + }, + }, + }) +} + +func testAccCheckIBMServiceInstanceDataSourceConfig(serviceName string) string { + return fmt.Sprintf(` + data "ibm_space" "spacedata" { + org = "%s" + space = "%s" + } + + resource "ibm_service_instance" "service" { + name = "%s" + space_guid = "${data.ibm_space.spacedata.id}" + service = "cleardb" + plan = "cb5" + tags = ["cluster-service","cluster-bind"] + } + + + data "ibm_service_instance" "testacc_ds_service_instance" { + name = "${ibm_service_instance.service.name}" +}`, cfOrganization, cfSpace, serviceName) + +} diff --git a/ibm/data_source_ibm_service_key.go b/ibm/data_source_ibm_service_key.go new file mode 100644 index 0000000000..02a86e3ed2 --- /dev/null +++ b/ibm/data_source_ibm_service_key.go @@ -0,0 +1,59 @@ +package ibm + +import ( + "fmt" + + "github.com/hashicorp/terraform/helper/schema" +) + +func dataSourceIBMServiceKey() *schema.Resource { + return &schema.Resource{ + Read: dataSourceIBMServiceKeyRead, + + Schema: map[string]*schema.Schema{ + "credentials": { + Description: "Credentials asociated with the key", + Type: schema.TypeMap, + Computed: true, + }, + + "name": { + Description: "The name of the service key", + Type: schema.TypeString, + Required: true, + }, + "service_instance_name": { + Description: "Service instance name for example, cleardbinstance", + Type: schema.TypeString, + Required: true, + }, + }, + } +} + +func dataSourceIBMServiceKeyRead(d *schema.ResourceData, meta interface{}) error { + cfClient, err := meta.(ClientSession).MccpAPI() + if err != nil { + return err + } + siAPI := cfClient.ServiceInstances() + skAPI := cfClient.ServiceKeys() + serviceInstanceName := d.Get("service_instance_name").(string) + name := d.Get("name").(string) + inst, err := siAPI.FindByName(serviceInstanceName) + if err != nil { + return err + } + serviceInstance, err := siAPI.Get(inst.GUID) + if err != nil { + return fmt.Errorf("Error retrieving service: %s", err) + } + serviceKey, err := skAPI.FindByName(serviceInstance.Metadata.GUID, name) + if err != nil { + return fmt.Errorf("Error retrieving service key: %s", err) + } + d.SetId(serviceKey.GUID) + d.Set("credentials", serviceKey.Credentials) + + return nil +} diff --git a/ibm/data_source_ibm_service_key_test.go b/ibm/data_source_ibm_service_key_test.go new file mode 100644 index 0000000000..923dbd2f7f --- /dev/null +++ b/ibm/data_source_ibm_service_key_test.go @@ -0,0 +1,54 @@ +package ibm + +import ( + "fmt" + "testing" + + "github.com/hashicorp/terraform/helper/acctest" + "github.com/hashicorp/terraform/helper/resource" +) + +func TestAccIBMServiceKeyDataSource_basic(t *testing.T) { + serviceName := fmt.Sprintf("terraform_%d", acctest.RandInt()) + serviceKey := fmt.Sprintf("terraform_%d", acctest.RandInt()) + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccCheckIBMServiceKeyDataSourceConfig(serviceName, serviceKey), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttr("data.ibm_service_key.testacc_ds_service_key", "name", serviceKey), + ), + }, + }, + }) +} + +func testAccCheckIBMServiceKeyDataSourceConfig(serviceName, serviceKey string) string { + return fmt.Sprintf(` + data "ibm_space" "spacedata" { + org = "%s" + space = "%s" + } + + resource "ibm_service_instance" "service" { + name = "%s" + space_guid = "${data.ibm_space.spacedata.id}" + service = "cleardb" + plan = "cb5" + tags = ["cluster-service","cluster-bind"] + } + + resource "ibm_service_key" "servicekey" { + name = "%s" + service_instance_guid = "${ibm_service_instance.service.id}" + } + + data "ibm_service_key" "testacc_ds_service_key" { + name = "${ibm_service_key.servicekey.name}" + service_instance_name = "${ibm_service_instance.service.name}" +}`, cfOrganization, cfSpace, serviceName, serviceKey) + +} diff --git a/ibm/data_source_ibm_service_plan.go b/ibm/data_source_ibm_service_plan.go new file mode 100644 index 0000000000..5c633ade47 --- /dev/null +++ b/ibm/data_source_ibm_service_plan.go @@ -0,0 +1,50 @@ +package ibm + +import ( + "fmt" + + "github.com/hashicorp/terraform/helper/schema" +) + +func dataSourceIBMServicePlan() *schema.Resource { + return &schema.Resource{ + Read: dataSourceIBMServicePlanRead, + + Schema: map[string]*schema.Schema{ + "service": { + Description: "Service name for example, cleardb", + Type: schema.TypeString, + Required: true, + }, + + "plan": { + Description: "The plan type ex- shared ", + Type: schema.TypeString, + Required: true, + }, + }, + } +} + +func dataSourceIBMServicePlanRead(d *schema.ResourceData, meta interface{}) error { + cfClient, err := meta.(ClientSession).MccpAPI() + if err != nil { + return err + } + soffAPI := cfClient.ServiceOfferings() + spAPI := cfClient.ServicePlans() + + service := d.Get("service").(string) + plan := d.Get("plan").(string) + serviceOff, err := soffAPI.FindByLabel(service) + if err != nil { + return fmt.Errorf("Error retrieving service offering: %s", err) + } + servicePlan, err := spAPI.FindPlanInServiceOffering(serviceOff.GUID, plan) + if err != nil { + return fmt.Errorf("Error retrieving plan: %s", err) + } + + d.SetId(servicePlan.GUID) + return nil +} diff --git a/ibm/data_source_ibm_service_plan_test.go b/ibm/data_source_ibm_service_plan_test.go new file mode 100644 index 0000000000..7b1b64bf34 --- /dev/null +++ b/ibm/data_source_ibm_service_plan_test.go @@ -0,0 +1,36 @@ +package ibm + +import ( + "fmt" + "testing" + + "github.com/hashicorp/terraform/helper/resource" +) + +func TestAccIBMServicePlanDataSource_basic(t *testing.T) { + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccCheckIBMServicePlanDataSourceConfig(), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttr("data.ibm_service_plan.testacc_ds_service_plan", "service", "cleardb"), + resource.TestCheckResourceAttr("data.ibm_service_plan.testacc_ds_service_plan", "plan", "cb5"), + ), + }, + }, + }) +} + +func testAccCheckIBMServicePlanDataSourceConfig() string { + return fmt.Sprintf(` + +data "ibm_service_plan" "testacc_ds_service_plan" { + service = "cleardb" + plan = "cb5" +}`, + ) + +} diff --git a/ibm/data_source_ibm_space.go b/ibm/data_source_ibm_space.go new file mode 100644 index 0000000000..7a12930da7 --- /dev/null +++ b/ibm/data_source_ibm_space.go @@ -0,0 +1,89 @@ +package ibm + +import ( + "fmt" + + "github.com/hashicorp/terraform/helper/schema" +) + +func dataSourceIBMSpace() *schema.Resource { + return &schema.Resource{ + Read: dataSourceIBMSpaceRead, + + Schema: map[string]*schema.Schema{ + "space": { + Description: "Space name, for example dev", + Type: schema.TypeString, + Required: true, + }, + "org": { + Description: "The org this space belongs to", + Type: schema.TypeString, + Required: true, + }, + "auditors": { + Type: schema.TypeSet, + Computed: true, + Elem: &schema.Schema{Type: schema.TypeString}, + Description: "The IBMID of the users who have auditor role in this space, ex - user@example.com", + }, + "managers": { + Type: schema.TypeSet, + Computed: true, + Elem: &schema.Schema{Type: schema.TypeString}, + Description: "The IBMID of the users who have manager role in this space, ex - user@example.com", + }, + "developers": { + Type: schema.TypeSet, + Computed: true, + Elem: &schema.Schema{Type: schema.TypeString}, + Description: "The IBMID of the users who have developer role in this space, ex - user@example.com", + }, + }, + } +} + +func dataSourceIBMSpaceRead(d *schema.ResourceData, meta interface{}) error { + cfClient, err := meta.(ClientSession).MccpAPI() + if err != nil { + return err + } + orgAPI := cfClient.Organizations() + spaceAPI := cfClient.Spaces() + + space := d.Get("space").(string) + org := d.Get("org").(string) + + orgFields, err := orgAPI.FindByName(org, BluemixRegion) + if err != nil { + return fmt.Errorf("Error retrieving org: %s", err) + } + spaceFields, err := spaceAPI.FindByNameInOrg(orgFields.GUID, space, BluemixRegion) + if err != nil { + return fmt.Errorf("Error retrieving space: %s", err) + } + + spaceGUID := spaceFields.GUID + d.SetId(spaceGUID) + + auditors, err := spaceAPI.ListAuditors(spaceGUID) + if err != nil { + return fmt.Errorf("Error retrieving auditors in the space: %s", err) + } + + managers, err := spaceAPI.ListManagers(spaceGUID) + if err != nil { + return fmt.Errorf("Error retrieving managers in the space: %s", err) + } + + developers, err := spaceAPI.ListDevelopers(spaceGUID) + if err != nil { + return fmt.Errorf("Error retrieving developers in space: %s", err) + } + + d.Set("auditors", flattenSpaceRoleUsers(auditors)) + d.Set("managers", flattenSpaceRoleUsers(managers)) + d.Set("developers", flattenSpaceRoleUsers(developers)) + + return nil +} diff --git a/ibm/data_source_ibm_space_test.go b/ibm/data_source_ibm_space_test.go new file mode 100644 index 0000000000..9e15d10c9f --- /dev/null +++ b/ibm/data_source_ibm_space_test.go @@ -0,0 +1,34 @@ +package ibm + +import ( + "fmt" + "testing" + + "github.com/hashicorp/terraform/helper/resource" +) + +func TestAccIBMSpaceDataSource_basic(t *testing.T) { + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccCheckIBMSpaceDataSourceConfig(), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttr("data.ibm_space.testacc_ds_space", "org", cfOrganization), + resource.TestCheckResourceAttr("data.ibm_space.testacc_ds_space", "space", cfSpace), + ), + }, + }, + }) +} + +func testAccCheckIBMSpaceDataSourceConfig() string { + return fmt.Sprintf(` +data "ibm_space" "testacc_ds_space" { + org = "%s" + space = "%s" +}`, cfOrganization, cfSpace) + +} diff --git a/ibm/provider.go b/ibm/provider.go new file mode 100644 index 0000000000..d7b0122bd2 --- /dev/null +++ b/ibm/provider.go @@ -0,0 +1,136 @@ +package ibm + +import ( + "time" + + "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform/terraform" +) + +// Provider returns a terraform.ResourceProvider. +func Provider() terraform.ResourceProvider { + return &schema.Provider{ + Schema: map[string]*schema.Schema{ + "bluemix_api_key": { + Type: schema.TypeString, + Optional: true, + Description: "The Bluemix API Key", + DefaultFunc: schema.MultiEnvDefaultFunc([]string{"BM_API_KEY", "BLUEMIX_API_KEY"}, ""), + }, + "bluemix_timeout": { + Type: schema.TypeInt, + Optional: true, + Description: "The timeout (in seconds) to set for any Bluemix API calls made.", + DefaultFunc: schema.MultiEnvDefaultFunc([]string{"BM_TIMEOUT", "BLUEMIX_TIMEOUT"}, 60), + }, + "region": { + Type: schema.TypeString, + Optional: true, + Description: "The Bluemix Region (for example 'us-south').", + DefaultFunc: schema.MultiEnvDefaultFunc([]string{"BM_REGION", "BLUEMIX_REGION"}, "us-south"), + }, + "softlayer_api_key": { + Type: schema.TypeString, + Optional: true, + Description: "The SoftLayer API Key", + DefaultFunc: schema.MultiEnvDefaultFunc([]string{"SL_API_KEY", "SOFTLAYER_API_KEY"}, ""), + }, + "softlayer_username": { + Type: schema.TypeString, + Optional: true, + Description: "The SoftLayer user name", + DefaultFunc: schema.MultiEnvDefaultFunc([]string{"SL_USERNAME", "SOFTLAYER_USERNAME"}, ""), + }, + "softlayer_timeout": { + Type: schema.TypeInt, + Optional: true, + Description: "The timeout (in seconds) to set for any SoftLayer API calls made.", + DefaultFunc: schema.MultiEnvDefaultFunc([]string{"SL_TIMEOUT", "SOFTLAYER_TIMEOUT"}, 60), + }, + }, + + DataSourcesMap: map[string]*schema.Resource{ + "ibm_account": dataSourceIBMAccount(), + "ibm_app": dataSourceIBMApp(), + "ibm_app_domain_private": dataSourceIBMAppDomainPrivate(), + "ibm_app_domain_shared": dataSourceIBMAppDomainShared(), + "ibm_app_route": dataSourceIBMAppRoute(), + "ibm_compute_image_template": dataSourceIBMComputeImageTemplate(), + "ibm_compute_ssh_key": dataSourceIBMComputeSSHKey(), + "ibm_compute_vm_instance": dataSourceIBMComputeVmInstance(), + "ibm_container_cluster": dataSourceIBMContainerCluster(), + "ibm_container_cluster_config": dataSourceIBMContainerClusterConfig(), + "ibm_container_cluster_worker": dataSourceIBMContainerClusterWorker(), + "ibm_dns_domain": dataSourceIBMDNSDomain(), + "ibm_network_vlan": dataSourceIBMNetworkVlan(), + "ibm_org": dataSourceIBMOrg(), + "ibm_service_instance": dataSourceIBMServiceInstance(), + "ibm_service_key": dataSourceIBMServiceKey(), + "ibm_service_plan": dataSourceIBMServicePlan(), + "ibm_space": dataSourceIBMSpace(), + }, + + ResourcesMap: map[string]*schema.Resource{ + + "ibm_app": resourceIBMApp(), + "ibm_app_domain_private": resourceIBMAppDomainPrivate(), + "ibm_app_domain_shared": resourceIBMAppDomainShared(), + "ibm_app_route": resourceIBMAppRoute(), + "ibm_compute_autoscale_group": resourceIBMComputeAutoScaleGroup(), + "ibm_compute_autoscale_policy": resourceIBMComputeAutoScalePolicy(), + "ibm_compute_bare_metal": resourceIBMComputeBareMetal(), + "ibm_compute_monitor": resourceIBMComputeMonitor(), + "ibm_compute_provisioning_hook": resourceIBMComputeProvisioningHook(), + "ibm_compute_ssh_key": resourceIBMComputeSSHKey(), + "ibm_compute_ssl_certificate": resourceIBMComputeSSLCertificate(), + "ibm_compute_user": resourceIBMComputeUser(), + "ibm_compute_vm_instance": resourceIBMComputeVmInstance(), + "ibm_container_cluster": resourceIBMContainerCluster(), + "ibm_container_bind_service": resourceIBMContainerBindService(), + "ibm_dns_domain": resourceIBMDNSDomain(), + "ibm_dns_record": resourceIBMDNSRecord(), + "ibm_firewall": resourceIBMFirewall(), + "ibm_firewall_policy": resourceIBMFirewallPolicy(), + "ibm_lb": resourceIBMLb(), + "ibm_lb_service": resourceIBMLbService(), + "ibm_lb_service_group": resourceIBMLbServiceGroup(), + "ibm_lb_vpx": resourceIBMLbVpx(), + "ibm_lb_vpx_ha": resourceIBMLbVpxHa(), + "ibm_lb_vpx_service": resourceIBMLbVpxService(), + "ibm_lb_vpx_vip": resourceIBMLbVpxVip(), + "ibm_network_public_ip": resourceIBMNetworkPublicIp(), + "ibm_network_vlan": resourceIBMNetworkVlan(), + "ibm_object_storage_account": resourceIBMObjectStorageAccount(), + "ibm_service_instance": resourceIBMServiceInstance(), + "ibm_service_key": resourceIBMServiceKey(), + "ibm_space": resourceIBMSpace(), + "ibm_storage_block": resourceIBMStorageBlock(), + "ibm_storage_file": resourceIBMStorageFile(), + }, + + ConfigureFunc: providerConfigure, + } +} + +func providerConfigure(d *schema.ResourceData) (interface{}, error) { + bluemixAPIKey := d.Get("bluemix_api_key").(string) + softlayerUsername := d.Get("softlayer_username").(string) + softlayerAPIKey := d.Get("softlayer_api_key").(string) + softlayerTimeout := d.Get("softlayer_timeout").(int) + bluemixTimeout := d.Get("bluemix_timeout").(int) + region := d.Get("region").(string) + + config := Config{ + BluemixAPIKey: bluemixAPIKey, + Region: region, + BluemixTimeout: time.Duration(bluemixTimeout) * time.Second, + SoftLayerTimeout: time.Duration(softlayerTimeout) * time.Second, + SoftLayerUserName: softlayerUsername, + SoftLayerAPIKey: softlayerAPIKey, + RetryCount: 3, + RetryDelay: 30 * time.Millisecond, + SoftLayerEndpointURL: SoftlayerRestEndpoint, + } + + return config.ClientSession() +} diff --git a/ibm/provider_test.go b/ibm/provider_test.go new file mode 100644 index 0000000000..d8106b6a95 --- /dev/null +++ b/ibm/provider_test.go @@ -0,0 +1,67 @@ +package ibm + +import ( + "fmt" + "os" + "testing" + + "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform/terraform" +) + +var cfOrganization string +var cfSpace string +var ibmid1 string +var ibmid2 string + +func init() { + cfOrganization = os.Getenv("IBM_ORG") + if cfOrganization == "" { + fmt.Println("[WARN] Set the environment variable IBM_ORG for testing ibm_org resource Some tests for that resource will fail if this is not set correctly") + } + cfSpace = os.Getenv("IBM_SPACE") + if cfSpace == "" { + fmt.Println("[WARN] Set the environment variable IBM_SPACE for testing ibm_space resource Some tests for that resource will fail if this is not set correctly") + } + ibmid1 = os.Getenv("IBM_ID1") + if ibmid1 == "" { + fmt.Println("[WARN] Set the environment variable IBM_ID1 for testing ibm_space resource Some tests for that resource will fail if this is not set correctly") + } + + ibmid2 = os.Getenv("IBM_ID2") + if ibmid2 == "" { + fmt.Println("[WARN] Set the environment variable IBM_ID2 for testing ibm_space resource Some tests for that resource will fail if this is not set correctly") + } +} + +var testAccProviders map[string]terraform.ResourceProvider +var testAccProvider *schema.Provider + +func init() { + testAccProvider = Provider().(*schema.Provider) + testAccProviders = map[string]terraform.ResourceProvider{ + "ibm": testAccProvider, + } +} + +func TestProvider(t *testing.T) { + if err := Provider().(*schema.Provider).InternalValidate(); err != nil { + t.Fatalf("err: %s", err) + } +} + +func TestProvider_impl(t *testing.T) { + var _ terraform.ResourceProvider = Provider() +} + +func testAccPreCheck(t *testing.T) { + if v := os.Getenv("BM_API_KEY"); v == "" { + t.Fatal("BM_API_KEY must be set for acceptance tests") + } + if v := os.Getenv("SL_API_KEY"); v == "" { + t.Fatal("SL_API_KEY must be set for acceptance tests") + } + if v := os.Getenv("SL_USERNAME"); v == "" { + t.Fatal("SL_USERNAME must be set for acceptance tests") + } +} diff --git a/ibm/resource_ibm_app.go b/ibm/resource_ibm_app.go new file mode 100644 index 0000000000..d006e684be --- /dev/null +++ b/ibm/resource_ibm_app.go @@ -0,0 +1,532 @@ +package ibm + +import ( + "fmt" + "log" + "time" + + v2 "github.com/IBM-Bluemix/bluemix-go/api/mccp/mccpv2" + "github.com/IBM-Bluemix/bluemix-go/bmxerror" + "github.com/IBM-Bluemix/bluemix-go/helpers" + "github.com/hashicorp/terraform/helper/schema" + homedir "github.com/mitchellh/go-homedir" +) + +func resourceIBMApp() *schema.Resource { + return &schema.Resource{ + Create: resourceIBMAppCreate, + Read: resourceIBMAppRead, + Update: resourceIBMAppUpdate, + Delete: resourceIBMAppDelete, + Exists: resourceIBMAppExists, + Importer: &schema.ResourceImporter{}, + + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Required: true, + Description: "The name for the app", + }, + "memory": { + Description: "The amount of memory each instance should have. In megabytes.", + Type: schema.TypeInt, + Optional: true, + Computed: true, + }, + "instances": { + Description: "The number of instances", + Type: schema.TypeInt, + Optional: true, + Default: 1, + }, + "disk_quota": { + Description: "The maximum amount of disk available to an instance of an app. In megabytes.", + Type: schema.TypeInt, + Optional: true, + Computed: true, + }, + "space_guid": { + Description: "Define space guid to which app belongs", + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + "buildpack": { + Description: "Buildpack to build the app. 3 options: a) Blank means autodetection; b) A Git Url pointing to a buildpack; c) Name of an installed buildpack.", + Type: schema.TypeString, + Optional: true, + }, + "environment_json": { + Description: "Key/value pairs of all the environment variables to run in your app. Does not include any system or service variables.", + Type: schema.TypeMap, + Optional: true, + }, + "route_guid": { + Description: "Define the route guids which should be bound to the application.", + Type: schema.TypeSet, + Optional: true, + Elem: &schema.Schema{Type: schema.TypeString}, + Set: schema.HashString, + }, + "service_instance_guid": { + Description: "Define the service instance guids that should be bound to this application.", + Type: schema.TypeSet, + Optional: true, + Elem: &schema.Schema{Type: schema.TypeString}, + Set: schema.HashString, + }, + "app_path": { + Description: "Define the path of the zip file of the application.", + Type: schema.TypeString, + Required: true, + }, + "app_version": { + Description: "Version of the application", + Type: schema.TypeString, + Optional: true, + }, + "command": { + Description: "The initial command for the app", + Type: schema.TypeString, + Optional: true, + }, + "wait_time_minutes": { + Description: "Define timeout to wait for the app instances to start/update/restage etc. For example, if memory is updated then instances are automatically destroyed and new one spun up by the Cloud controller.", + Type: schema.TypeInt, + Optional: true, + Default: 20, + }, + }, + } +} + +func resourceIBMAppCreate(d *schema.ResourceData, meta interface{}) error { + cfClient, err := meta.(ClientSession).MccpAPI() + if err != nil { + return err + } + appAPI := cfClient.Apps() + name := d.Get("name").(string) + spaceGUID := d.Get("space_guid").(string) + + appCreatePayload := v2.AppRequest{ + Name: helpers.String(name), + SpaceGUID: helpers.String(spaceGUID), + } + + if memory, ok := d.GetOk("memory"); ok { + appCreatePayload.Memory = memory.(int) + } + + if instances, ok := d.GetOk("instances"); ok { + appCreatePayload.Instances = instances.(int) + } + + if diskQuota, ok := d.GetOk("disk_quota"); ok { + appCreatePayload.DiskQuota = diskQuota.(int) + } + + if buildpack, ok := d.GetOk("buildpack"); ok { + appCreatePayload.BuildPack = helpers.String(buildpack.(string)) + } + + if environmentJSON, ok := d.GetOk("environment_json"); ok { + appCreatePayload.EnvironmentJSON = helpers.Map(environmentJSON.(map[string]interface{})) + + } + + if command, ok := d.GetOk("command"); ok { + appCreatePayload.Command = helpers.String(command.(string)) + } + + _, err = appAPI.FindByName(spaceGUID, name) + if err == nil { + return fmt.Errorf("%s already exists in the given space %s", name, spaceGUID) + } + + log.Println("[INFO] Creating Cloud Foundary Application") + app, err := appAPI.Create(appCreatePayload) + if err != nil { + return fmt.Errorf("Error creating app: %s", err) + } + + appGUID := app.Metadata.GUID + log.Println("[INFO] Cloud Foundary Application is created successfully") + + d.SetId(appGUID) + + if v, ok := d.Get("route_guid").(*schema.Set); ok && v.Len() > 0 { + log.Println("[INFO] Bind the route with cloud foundary application") + for _, routeID := range v.List() { + _, err := appAPI.BindRoute(appGUID, routeID.(string)) + if err != nil { + return fmt.Errorf("Error binding route %s to app: %s", routeID.(string), err) + } + } + } + if v, ok := d.Get("service_instance_guid").(*schema.Set); ok && v.Len() > 0 { + sbAPI := cfClient.ServiceBindings() + for _, svcID := range v.List() { + req := v2.ServiceBindingRequest{ + ServiceInstanceGUID: svcID.(string), + AppGUID: appGUID, + } + _, err := sbAPI.Create(req) + if err != nil { + return fmt.Errorf("Error binding service instance %s to app: %s", svcID.(string), err) + } + } + } + log.Println("[INFO] Upload the app bits to the cloud foundary application") + applicationZip, err := processAppZipPath(d.Get("app_path").(string)) + if err != nil { + return err + } + + _, err = appAPI.Upload(appGUID, applicationZip) + if err != nil { + return fmt.Errorf("Error uploading app bits: %s", err) + } + + err = restartApp(appGUID, d, meta) + if err != nil { + return err + } + log.Printf("[INFO] Application: %s has started successfully", name) + return resourceIBMAppRead(d, meta) +} + +func resourceIBMAppRead(d *schema.ResourceData, meta interface{}) error { + cfClient, err := meta.(ClientSession).MccpAPI() + if err != nil { + return err + } + appAPI := cfClient.Apps() + appGUID := d.Id() + + appData, err := appAPI.Get(appGUID) + if err != nil { + return fmt.Errorf("Error retrieving app details %s : %s", appGUID, err) + } + + d.SetId(appData.Metadata.GUID) + d.Set("name", appData.Entity.Name) + d.Set("memory", appData.Entity.Memory) + d.Set("instances", appData.Entity.Instances) + d.Set("space_guid", appData.Entity.SpaceGUID) + d.Set("disk_quota", appData.Entity.DiskQuota) + d.Set("buildpack", appData.Entity.BuildPack) + d.Set("environment_json", appData.Entity.EnvironmentJSON) + d.Set("command", appData.Entity.Command) + + route, err := appAPI.ListRoutes(appGUID) + if err != nil { + return err + } + if len(route) > 0 { + d.Set("route_guid", flattenRoute(route)) + } + + svcBindings, err := appAPI.ListServiceBindings(appGUID) + if err != nil { + return err + } + if len(svcBindings) > 0 { + d.Set("service_instance_guid", flattenServiceBindings(svcBindings)) + } + + return nil + +} + +func resourceIBMAppUpdate(d *schema.ResourceData, meta interface{}) error { + cfClient, err := meta.(ClientSession).MccpAPI() + if err != nil { + return err + } + appAPI := cfClient.Apps() + appGUID := d.Id() + + appUpdatePayload := v2.AppRequest{} + restartRequired := false + restageRequired := false + + waitTimeout := time.Duration(d.Get("wait_time_minutes").(int)) * time.Minute + + if d.HasChange("name") { + appUpdatePayload.Name = helpers.String(d.Get("name").(string)) + } + + if d.HasChange("memory") { + appUpdatePayload.Memory = d.Get("memory").(int) + } + + if d.HasChange("instances") { + appUpdatePayload.Instances = d.Get("instances").(int) + } + + if d.HasChange("disk_quota") { + appUpdatePayload.DiskQuota = d.Get("disk_quota").(int) + } + + if d.HasChange("buildpack") { + appUpdatePayload.BuildPack = helpers.String(d.Get("buildpack").(string)) + restageRequired = true + } + + if d.HasChange("command") { + appUpdatePayload.Command = helpers.String(d.Get("command").(string)) + restartRequired = true + } + + if d.HasChange("environment_json") { + appUpdatePayload.EnvironmentJSON = helpers.Map(d.Get("environment_json").(map[string]interface{})) + restageRequired = true + } + log.Println("[INFO] Update cloud foundary application") + + _, err = appAPI.Update(appGUID, appUpdatePayload) + if err != nil { + return fmt.Errorf("Error updating application: %s", err) + } + //TODO find the digest of the zip and avoid upload if it is same + if d.HasChange("app_path") || d.HasChange("app_version") { + appZipLoc, err := processAppZipPath(d.Get("app_path").(string)) + if err != nil { + return err + } + log.Println("[DEBUG] Uploading application bits") + _, err = appAPI.Upload(appGUID, appZipLoc) + if err != nil { + return fmt.Errorf("Error uploading app: %s", err) + } + restartRequired = true + } + + err = updateRouteGUID(appGUID, appAPI, d) + if err != nil { + return err + } + + restage, err := updateServiceInstanceGUID(appGUID, d, meta) + if err != nil { + return err + } + if restage { + restageRequired = true + } + + /*Wait if any previous staging is going on + log.Println("[INFO] Waiting to see any previous staging is on or not") + state, err := appAPI.WaitForAppStatus(v2.AppStagedState, appGUID, waitTimeout) + if waitTimeout != 0 && (err != nil || state == v2.AppPendingState) { + return fmt.Errorf("The application is still in %s from last operations.Please try again after sometime by increasing timeout value %q", state, err) + }*/ + + //If restage and restart both are required then we only need restage as that starts over everything + if restageRequired { + log.Println("[INFO] Restage since buildpack has changed") + err := restageApp(appGUID, d, meta) + if err != nil { + return err + } + } else if restartRequired { + err := restartApp(appGUID, d, meta) + if err != nil { + return err + } + } else { + //In case only memory/disk etc are updated then cloud controller would destroy the current instances + //and spin new ones, so we are waiting till they come up again + state, err := appAPI.WaitForInstanceStatus(v2.AppRunningState, appGUID, waitTimeout) + if waitTimeout != 0 && (err != nil || state != v2.AppRunningState) { + return fmt.Errorf("All applications instances aren't %s, Current status is %s, %q", v2.AppRunningState, state, err) + } + } + + return resourceIBMAppRead(d, meta) +} + +func resourceIBMAppDelete(d *schema.ResourceData, meta interface{}) error { + cfClient, err := meta.(ClientSession).MccpAPI() + if err != nil { + return err + } + appAPI := cfClient.Apps() + id := d.Id() + + err = appAPI.Delete(id, false, true) + if err != nil { + return fmt.Errorf("Error deleting app: %s", err) + } + + d.SetId("") + return nil +} + +func resourceIBMAppExists(d *schema.ResourceData, meta interface{}) (bool, error) { + cfClient, err := meta.(ClientSession).MccpAPI() + if err != nil { + return false, err + } + appAPI := cfClient.Apps() + id := d.Id() + + app, err := appAPI.Get(id) + if err != nil { + if apiErr, ok := err.(bmxerror.RequestFailure); ok { + if apiErr.StatusCode() == 404 { + return false, nil + } + } + return false, fmt.Errorf("Error communicating with the API: %s", err) + } + + return app.Metadata.GUID == id, nil +} + +func updateRouteGUID(appGUID string, appAPI v2.Apps, d *schema.ResourceData) (err error) { + if d.HasChange("route_guid") { + ors, nrs := d.GetChange("route_guid") + or := ors.(*schema.Set) + nr := nrs.(*schema.Set) + + remove := expandStringList(or.Difference(nr).List()) + add := expandStringList(nr.Difference(or).List()) + + if len(add) > 0 { + for i := range add { + _, err = appAPI.BindRoute(appGUID, add[i]) + if err != nil { + return fmt.Errorf("Error while binding route %q to application %s: %q", add[i], appGUID, err) + } + } + } + if len(remove) > 0 { + for i := range remove { + err = appAPI.UnBindRoute(appGUID, remove[i]) + if err != nil { + return fmt.Errorf("Error while un-binding route %q from application %s: %q", add[i], appGUID, err) + } + } + } + } + return +} + +func updateServiceInstanceGUID(appGUID string, d *schema.ResourceData, meta interface{}) (restageRequired bool, err error) { + cfClient, err := meta.(ClientSession).MccpAPI() + if err != nil { + return false, err + } + appAPI := cfClient.Apps() + sbAPI := cfClient.ServiceBindings() + if d.HasChange("service_instance_guid") { + oss, nss := d.GetChange("service_instance_guid") + os := oss.(*schema.Set) + ns := nss.(*schema.Set) + remove := expandStringList(os.Difference(ns).List()) + add := expandStringList(ns.Difference(os).List()) + + if len(add) > 0 { + for i := range add { + sbPayload := v2.ServiceBindingRequest{ + ServiceInstanceGUID: add[i], + AppGUID: appGUID, + } + _, err = sbAPI.Create(sbPayload) + if err != nil { + err = fmt.Errorf("Error while binding service instance %s to application %s: %q", add[i], appGUID, err) + return + } + restageRequired = true + } + } + if len(remove) > 0 { + var appFilters, svcFilters string + var bindings []v2.ServiceBinding + appFilters, err = new(v2.Filter).Name("app_guid").Eq(appGUID).Build() + if err != nil { + return + } + svcFilters, err = new(v2.Filter).Name("service_instance_guid").In(remove...).Build() + if err != nil { + return + } + bindings, err = sbAPI.List(appFilters, svcFilters) + if err != nil { + return + } + sbIds := make([]string, len(bindings)) + for i, sb := range bindings { + sbIds[i] = sb.GUID + } + err = appAPI.DeleteServiceBindings(appGUID, sbIds...) + if err != nil { + err = fmt.Errorf("Error while un-binding service instances %s to application %s: %q", remove, appGUID, err) + return + } + } + } + return +} +func restartApp(appGUID string, d *schema.ResourceData, meta interface{}) error { + cfClient, _ := meta.(ClientSession).MccpAPI() + appAPI := cfClient.Apps() + + appUpdatePayload := v2.AppRequest{ + State: helpers.String(v2.AppStoppedState), + } + log.Println("[INFO] Stopping Application") + _, err := appAPI.Update(appGUID, appUpdatePayload) + if err != nil { + return fmt.Errorf("Error updating application status to %s %s", v2.AppStoppedState, err) + } + waitTimeout := time.Duration(d.Get("wait_time_minutes").(int)) * time.Minute + log.Println("[INFO] Starting Application") + status, err := appAPI.Start(appGUID, waitTimeout) + if err != nil { + return fmt.Errorf("Error while starting application : %s", err) + } + if waitTimeout != 0 { + return checkAppStatus(status) + } + return nil +} + +func restageApp(appGUID string, d *schema.ResourceData, meta interface{}) error { + cfClient, _ := meta.(ClientSession).MccpAPI() + appAPI := cfClient.Apps() + + log.Println("[INFO] Restage Application") + waitTimeout := time.Duration(d.Get("wait_time_minutes").(int)) * time.Minute + status, err := appAPI.Restage(appGUID, waitTimeout) + if err != nil { + return fmt.Errorf("Error while restaging application : %s", err) + } + if waitTimeout != 0 { + return checkAppStatus(status) + } + return nil +} + +func checkAppStatus(status *v2.AppState) error { + if status.PackageState != v2.AppStagedState { + return fmt.Errorf("Applications couldn't be staged, current status is %s", status.PackageState) + } + if status.InstanceState != v2.AppRunningState { + return fmt.Errorf("All applications instances aren't %s, Current status is %s", v2.AppRunningState, status.InstanceState) + } + return nil +} + +func processAppZipPath(path string) (string, error) { + applicationZip, err := homedir.Expand(path) + if err != nil { + return path, fmt.Errorf("home directory in the given path %s couldn't be expanded", path) + } + if !helpers.FileExists(applicationZip) { + return path, fmt.Errorf("The given app path: %s doesn't exist", path) + } + return applicationZip, nil +} diff --git a/ibm/resource_ibm_app_domain_private.go b/ibm/resource_ibm_app_domain_private.go new file mode 100644 index 0000000000..9245d93888 --- /dev/null +++ b/ibm/resource_ibm_app_domain_private.go @@ -0,0 +1,115 @@ +package ibm + +import ( + "fmt" + + v2 "github.com/IBM-Bluemix/bluemix-go/api/mccp/mccpv2" + + "github.com/IBM-Bluemix/bluemix-go/bmxerror" + "github.com/hashicorp/terraform/helper/schema" +) + +func resourceIBMAppDomainPrivate() *schema.Resource { + return &schema.Resource{ + Create: resourceIBMAppDomainPrivateCreate, + Read: resourceIBMAppDomainPrivateRead, + Delete: resourceIBMAppDomainPrivateDelete, + Exists: resourceIBMAppDomainPrivateExists, + Importer: &schema.ResourceImporter{}, + + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: "The name of the domain", + ValidateFunc: validateDomainName, + }, + + "org_guid": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: "The organization that owns the domain.", + }, + }, + } +} + +func resourceIBMAppDomainPrivateCreate(d *schema.ResourceData, meta interface{}) error { + cfClient, err := meta.(ClientSession).MccpAPI() + if err != nil { + return err + } + name := d.Get("name").(string) + orgGUID := d.Get("org_guid").(string) + + params := v2.PrivateDomainRequest{ + Name: name, + OrgGUID: orgGUID, + } + + prdomain, err := cfClient.PrivateDomains().Create(params) + if err != nil { + return fmt.Errorf("Error creating private domain: %s", err) + } + + d.SetId(prdomain.Metadata.GUID) + + return resourceIBMAppDomainPrivateRead(d, meta) +} + +func resourceIBMAppDomainPrivateRead(d *schema.ResourceData, meta interface{}) error { + cfClient, err := meta.(ClientSession).MccpAPI() + if err != nil { + return err + } + prdomainGUID := d.Id() + + prdomain, err := cfClient.PrivateDomains().Get(prdomainGUID) + if err != nil { + return fmt.Errorf("Error retrieving private domain: %s", err) + } + d.Set("name", prdomain.Entity.Name) + d.Set("org_guid", prdomain.Entity.OwningOrganizationGUID) + + return nil +} + +func resourceIBMAppDomainPrivateDelete(d *schema.ResourceData, meta interface{}) error { + cfClient, err := meta.(ClientSession).MccpAPI() + if err != nil { + return err + } + + prdomainGUID := d.Id() + + err = cfClient.PrivateDomains().Delete(prdomainGUID, true) + if err != nil { + return fmt.Errorf("Error deleting private domain: %s", err) + } + + d.SetId("") + + return nil +} + +func resourceIBMAppDomainPrivateExists(d *schema.ResourceData, meta interface{}) (bool, error) { + cfClient, err := meta.(ClientSession).MccpAPI() + if err != nil { + return false, err + } + prdomainGUID := d.Id() + + prdomain, err := cfClient.PrivateDomains().Get(prdomainGUID) + if err != nil { + if apiErr, ok := err.(bmxerror.RequestFailure); ok { + if apiErr.StatusCode() == 404 { + return false, nil + } + } + return false, fmt.Errorf("Error communicating with the API: %s", err) + } + + return prdomain.Metadata.GUID == prdomainGUID, nil +} diff --git a/ibm/resource_ibm_app_domain_private_test.go b/ibm/resource_ibm_app_domain_private_test.go new file mode 100644 index 0000000000..b216a871cb --- /dev/null +++ b/ibm/resource_ibm_app_domain_private_test.go @@ -0,0 +1,116 @@ +package ibm + +import ( + "fmt" + "testing" + + "strings" + + "github.com/hashicorp/terraform/helper/acctest" + "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/terraform" + + "github.com/IBM-Bluemix/bluemix-go/api/mccp/mccpv2" +) + +func TestAccIBMAppDomainPrivate_Basic(t *testing.T) { + var conf mccpv2.PrivateDomainFields + name := fmt.Sprintf("terraform%d.com", acctest.RandInt()) + updateName := fmt.Sprintf("terraformnew%d.com", acctest.RandInt()) + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccCheckIBMAppDomainPrivate_basic(name), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckIBMAppDomainPrivateExists("ibm_app_domain_private.domain", &conf), + resource.TestCheckResourceAttr("ibm_app_domain_private.domain", "name", name), + ), + }, + resource.TestStep{ + Config: testAccCheckIBMAppDomainPrivate_updateName(updateName), + Check: resource.ComposeAggregateTestCheckFunc( + resource.TestCheckResourceAttr("ibm_app_domain_private.domain", "name", updateName), + ), + }, + }, + }) +} + +func testAccCheckIBMAppDomainPrivateExists(n string, obj *mccpv2.PrivateDomainFields) resource.TestCheckFunc { + + return func(s *terraform.State) error { + rs, ok := s.RootModule().Resources[n] + if !ok { + return fmt.Errorf("Not found: %s", n) + } + + cfClient, err := testAccProvider.Meta().(ClientSession).MccpAPI() + if err != nil { + return err + } + privateDomainGUID := rs.Primary.ID + + prdomain, err := cfClient.PrivateDomains().Get(privateDomainGUID) + if err != nil { + return err + } + + *obj = *prdomain + return nil + } +} + +func testAccCheckIBMAppDomainPrivateDestroy(s *terraform.State) error { + cfClient, err := testAccProvider.Meta().(ClientSession).MccpAPI() + if err != nil { + return err + } + + for _, rs := range s.RootModule().Resources { + if rs.Type != "ibm_app_domain_private" { + continue + } + + privateDomainGUID := rs.Primary.ID + + // Try to find the private domain + _, err := cfClient.PrivateDomains().Get(privateDomainGUID) + + if err != nil && !strings.Contains(err.Error(), "404") { + return fmt.Errorf("Error waiting for CF private domain (%s) to be destroyed: %s", rs.Primary.ID, err) + } + } + + return nil +} + +func testAccCheckIBMAppDomainPrivate_basic(name string) string { + return fmt.Sprintf(` + + data "ibm_org" "orgdata" { + org = "%s" + } + + resource "ibm_app_domain_private" "domain" { + name = "%s" + org_guid = "${data.ibm_org.orgdata.id}" + } + `, cfOrganization, name) +} + +func testAccCheckIBMAppDomainPrivate_updateName(updateName string) string { + return fmt.Sprintf(` + + data "ibm_org" "orgdata" { + org = "%s" + } + + resource "ibm_app_domain_private" "domain" { + name = "%s" + org_guid = "${data.ibm_org.orgdata.id}" + } + `, cfOrganization, updateName) +} diff --git a/ibm/resource_ibm_app_domain_shared.go b/ibm/resource_ibm_app_domain_shared.go new file mode 100644 index 0000000000..0be546b11a --- /dev/null +++ b/ibm/resource_ibm_app_domain_shared.go @@ -0,0 +1,115 @@ +package ibm + +import ( + "fmt" + + v2 "github.com/IBM-Bluemix/bluemix-go/api/mccp/mccpv2" + + "github.com/IBM-Bluemix/bluemix-go/bmxerror" + "github.com/hashicorp/terraform/helper/schema" +) + +func resourceIBMAppDomainShared() *schema.Resource { + return &schema.Resource{ + Create: resourceIBMAppDomainSharedCreate, + Read: resourceIBMAppDomainSharedRead, + Delete: resourceIBMAppDomainSharedDelete, + Exists: resourceIBMAppDomainSharedExists, + Importer: &schema.ResourceImporter{}, + + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: "The name of the domain", + ValidateFunc: validateDomainName, + }, + + "router_group_guid": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: "The guid of the router group.", + }, + }, + } +} + +func resourceIBMAppDomainSharedCreate(d *schema.ResourceData, meta interface{}) error { + cfClient, err := meta.(ClientSession).MccpAPI() + if err != nil { + return err + } + name := d.Get("name").(string) + routerGroupGUID := d.Get("router_group_guid").(string) + + params := v2.SharedDomainRequest{ + Name: name, + RouterGroupGUID: routerGroupGUID, + } + + shdomain, err := cfClient.SharedDomains().Create(params) + if err != nil { + return fmt.Errorf("Error creating shared domain: %s", err) + } + + d.SetId(shdomain.Metadata.GUID) + + return resourceIBMAppDomainSharedRead(d, meta) +} + +func resourceIBMAppDomainSharedRead(d *schema.ResourceData, meta interface{}) error { + cfClient, err := meta.(ClientSession).MccpAPI() + if err != nil { + return err + } + shdomainGUID := d.Id() + + shdomain, err := cfClient.SharedDomains().Get(shdomainGUID) + if err != nil { + return fmt.Errorf("Error retrieving shared domain: %s", err) + } + d.Set("name", shdomain.Entity.Name) + d.Set("router_group_guid", shdomain.Entity.RouterGroupGUID) + + return nil +} + +func resourceIBMAppDomainSharedDelete(d *schema.ResourceData, meta interface{}) error { + cfClient, err := meta.(ClientSession).MccpAPI() + if err != nil { + return err + } + + shdomainGUID := d.Id() + + err = cfClient.SharedDomains().Delete(shdomainGUID, true) + if err != nil { + return fmt.Errorf("Error deleting shared domain: %s", err) + } + + d.SetId("") + + return nil +} + +func resourceIBMAppDomainSharedExists(d *schema.ResourceData, meta interface{}) (bool, error) { + cfClient, err := meta.(ClientSession).MccpAPI() + if err != nil { + return false, err + } + shdomainGUID := d.Id() + + shdomain, err := cfClient.SharedDomains().Get(shdomainGUID) + if err != nil { + if apiErr, ok := err.(bmxerror.RequestFailure); ok { + if apiErr.StatusCode() == 404 { + return false, nil + } + } + return false, fmt.Errorf("Error communicating with the API: %s", err) + } + + return shdomain.Metadata.GUID == shdomainGUID, nil +} diff --git a/ibm/resource_ibm_app_domain_shared_test.go b/ibm/resource_ibm_app_domain_shared_test.go new file mode 100644 index 0000000000..97d47260f5 --- /dev/null +++ b/ibm/resource_ibm_app_domain_shared_test.go @@ -0,0 +1,90 @@ +package ibm + +import ( + "fmt" + "testing" + + "strings" + + "github.com/hashicorp/terraform/helper/acctest" + "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/terraform" + + "github.com/IBM-Bluemix/bluemix-go/api/mccp/mccpv2" +) + +func TestAccIBMAppDomainShared_Basic(t *testing.T) { + var conf mccpv2.SharedDomainFields + name := fmt.Sprintf("terraform%d.com", acctest.RandInt()) + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccCheckIBMAppDomainShared_basic(name), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckIBMAppDomainSharedExists("ibm_app_domain_shared.domain", &conf), + resource.TestCheckResourceAttr("ibm_app_domain_shared.domain", "name", name), + ), + }, + }, + }) +} + +func testAccCheckIBMAppDomainSharedExists(n string, obj *mccpv2.SharedDomainFields) resource.TestCheckFunc { + + return func(s *terraform.State) error { + rs, ok := s.RootModule().Resources[n] + if !ok { + return fmt.Errorf("Not found: %s", n) + } + + cfClient, err := testAccProvider.Meta().(ClientSession).MccpAPI() + if err != nil { + return err + } + sharedDomainGUID := rs.Primary.ID + + shdomain, err := cfClient.SharedDomains().Get(sharedDomainGUID) + if err != nil { + return err + } + + *obj = *shdomain + return nil + } +} + +func testAccCheckIBMAppDomainSharedDestroy(s *terraform.State) error { + cfClient, err := testAccProvider.Meta().(ClientSession).MccpAPI() + if err != nil { + return err + } + + for _, rs := range s.RootModule().Resources { + if rs.Type != "ibm_app_domain_shared" { + continue + } + + sharedDomainGUID := rs.Primary.ID + + // Try to find the shared domain + _, err := cfClient.SharedDomains().Get(sharedDomainGUID) + + if err != nil && !strings.Contains(err.Error(), "404") { + return fmt.Errorf("Error waiting for CF shared domain (%s) to be destroyed: %s", rs.Primary.ID, err) + } + } + + return nil +} + +func testAccCheckIBMAppDomainShared_basic(name string) string { + return fmt.Sprintf(` + + resource "ibm_app_domain_shared" "domain" { + name = "%s" + } + `, name) +} diff --git a/ibm/resource_ibm_app_route.go b/ibm/resource_ibm_app_route.go new file mode 100644 index 0000000000..75f16e1781 --- /dev/null +++ b/ibm/resource_ibm_app_route.go @@ -0,0 +1,181 @@ +package ibm + +import ( + "fmt" + + v2 "github.com/IBM-Bluemix/bluemix-go/api/mccp/mccpv2" + "github.com/IBM-Bluemix/bluemix-go/helpers" + + "github.com/IBM-Bluemix/bluemix-go/bmxerror" + "github.com/hashicorp/terraform/helper/schema" +) + +func resourceIBMAppRoute() *schema.Resource { + return &schema.Resource{ + Create: resourceIBMAppRouteCreate, + Read: resourceIBMAppRouteRead, + Update: resourceIBMAppRouteUpdate, + Delete: resourceIBMAppRouteDelete, + Exists: resourceIBMAppRouteExists, + Importer: &schema.ResourceImporter{}, + + Schema: map[string]*schema.Schema{ + "host": { + Type: schema.TypeString, + Optional: true, + Description: "The host portion of the route. Required for shared-domains.", + }, + + "space_guid": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: "The guid of the associated space", + }, + + "domain_guid": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: "The guid of the associated domain", + }, + + "port": { + Description: "The port of the route. Supported for domains of TCP router groups only.", + Optional: true, + Type: schema.TypeInt, + ValidateFunc: validateRoutePort, + }, + + "path": { + Description: "The path for a route as raw text.Paths must be between 2 and 128 characters.Paths must start with a forward slash '/'.Paths must not contain a '?'", + Optional: true, + Type: schema.TypeString, + ValidateFunc: validateRoutePath, + }, + }, + } +} + +func resourceIBMAppRouteCreate(d *schema.ResourceData, meta interface{}) error { + cfClient, err := meta.(ClientSession).MccpAPI() + if err != nil { + return err + } + + spaceGUID := d.Get("space_guid").(string) + domainGUID := d.Get("domain_guid").(string) + + params := v2.RouteRequest{ + SpaceGUID: spaceGUID, + DomainGUID: domainGUID, + } + + if host, ok := d.GetOk("host"); ok { + params.Host = host.(string) + } + + if port, ok := d.GetOk("port"); ok { + params.Port = helpers.Int(port.(int)) + } + + if path, ok := d.GetOk("path"); ok { + params.Path = path.(string) + } + + route, err := cfClient.Routes().Create(params) + if err != nil { + return fmt.Errorf("Error creating route: %s", err) + } + + d.SetId(route.Metadata.GUID) + + return resourceIBMAppRouteRead(d, meta) +} + +func resourceIBMAppRouteRead(d *schema.ResourceData, meta interface{}) error { + cfClient, err := meta.(ClientSession).MccpAPI() + if err != nil { + return err + } + routeGUID := d.Id() + + route, err := cfClient.Routes().Get(routeGUID) + if err != nil { + return fmt.Errorf("Error retrieving route: %s", err) + } + + d.Set("host", route.Entity.Host) + d.Set("space_guid", route.Entity.SpaceGUID) + d.Set("domain_guid", route.Entity.DomainGUID) + if route.Entity.Port != nil { + d.Set("port", route.Entity.Port) + } + d.Set("path", route.Entity.Path) + + return nil +} + +func resourceIBMAppRouteUpdate(d *schema.ResourceData, meta interface{}) error { + cfClient, err := meta.(ClientSession).MccpAPI() + if err != nil { + return err + } + + routeGUID := d.Id() + params := v2.RouteUpdateRequest{} + + if d.HasChange("host") { + params.Host = helpers.String(d.Get("host").(string)) + } + + if d.HasChange("port") { + params.Port = helpers.Int(d.Get("port").(int)) + } + + if d.HasChange("path") { + params.Path = helpers.String(d.Get("path").(string)) + } + + _, err = cfClient.Routes().Update(routeGUID, params) + if err != nil { + return fmt.Errorf("Error updating route: %s", err) + } + return resourceIBMAppRouteRead(d, meta) +} + +func resourceIBMAppRouteDelete(d *schema.ResourceData, meta interface{}) error { + cfClient, err := meta.(ClientSession).MccpAPI() + if err != nil { + return err + } + routeGUID := d.Id() + + err = cfClient.Routes().Delete(routeGUID, true) + if err != nil { + return fmt.Errorf("Error deleting route: %s", err) + } + + d.SetId("") + + return nil +} +func resourceIBMAppRouteExists(d *schema.ResourceData, meta interface{}) (bool, error) { + cfClient, err := meta.(ClientSession).MccpAPI() + if err != nil { + return false, err + } + routeGUID := d.Id() + + route, err := cfClient.Routes().Get(routeGUID) + if err != nil { + if apiErr, ok := err.(bmxerror.RequestFailure); ok { + if apiErr.StatusCode() == 404 { + return false, nil + } + } + return false, fmt.Errorf("Error communicating with the API: %s", err) + } + + return route.Metadata.GUID == routeGUID, nil +} diff --git a/ibm/resource_ibm_app_route_test.go b/ibm/resource_ibm_app_route_test.go new file mode 100644 index 0000000000..b721a1e726 --- /dev/null +++ b/ibm/resource_ibm_app_route_test.go @@ -0,0 +1,161 @@ +package ibm + +import ( + "fmt" + "testing" + + "strings" + + "github.com/hashicorp/terraform/helper/acctest" + "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/terraform" + + "github.com/IBM-Bluemix/bluemix-go/api/mccp/mccpv2" +) + +func TestAccIBMAppRoute_Basic(t *testing.T) { + var conf mccpv2.RouteFields + host := fmt.Sprintf("terraform_%d", acctest.RandInt()) + updateHost := fmt.Sprintf("terraform_%d", acctest.RandInt()) + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckIBMAppRouteDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccCheckIBMAppRoute_basic(host), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckIBMAppRouteExists("ibm_app_route.route", &conf), + resource.TestCheckResourceAttr("ibm_app_route.route", "host", host), + resource.TestCheckResourceAttr("ibm_app_route.route", "path", "/app"), + ), + }, + resource.TestStep{ + Config: testAccCheckIBMAppRoute_updatePath(host), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckIBMAppRouteExists("ibm_app_route.route", &conf), + resource.TestCheckResourceAttr("ibm_app_route.route", "host", host), + resource.TestCheckResourceAttr("ibm_app_route.route", "path", "/app1"), + ), + }, + resource.TestStep{ + Config: testAccCheckIBMAppRoute_updateHost(updateHost), + Check: resource.ComposeAggregateTestCheckFunc( + resource.TestCheckResourceAttr("ibm_app_route.route", "host", updateHost), + resource.TestCheckResourceAttr("ibm_app_route.route", "path", ""), + ), + }, + }, + }) +} + +func testAccCheckIBMAppRouteDestroy(s *terraform.State) error { + cfClient, err := testAccProvider.Meta().(ClientSession).MccpAPI() + if err != nil { + return err + } + + for _, rs := range s.RootModule().Resources { + if rs.Type != "ibm_app_route" { + continue + } + + routeGuid := rs.Primary.ID + + // Try to find the key + _, err := cfClient.Routes().Get(routeGuid) + + if err != nil && !strings.Contains(err.Error(), "404") { + return fmt.Errorf("Error waiting for CF route (%s) to be destroyed: %s", rs.Primary.ID, err) + } + } + + return nil +} + +func testAccCheckIBMAppRouteExists(n string, obj *mccpv2.RouteFields) resource.TestCheckFunc { + + return func(s *terraform.State) error { + rs, ok := s.RootModule().Resources[n] + if !ok { + return fmt.Errorf("Not found: %s", n) + } + + cfClient, err := testAccProvider.Meta().(ClientSession).MccpAPI() + if err != nil { + return err + } + routeGuid := rs.Primary.ID + + route, err := cfClient.Routes().Get(routeGuid) + if err != nil { + return err + } + + *obj = *route + return nil + } +} + +func testAccCheckIBMAppRoute_basic(host string) string { + return fmt.Sprintf(` + + data "ibm_space" "spacedata" { + org = "%s" + space = "%s" + } + + data "ibm_app_domain_shared" "domain" { + name = "mybluemix.net" + } + + resource "ibm_app_route" "route" { + domain_guid = "${data.ibm_app_domain_shared.domain.id}" + space_guid = "${data.ibm_space.spacedata.id}" + host = "%s" + path = "/app" + } + `, cfOrganization, cfSpace, host) +} + +func testAccCheckIBMAppRoute_updatePath(host string) string { + return fmt.Sprintf(` + + data "ibm_space" "spacedata" { + org = "%s" + space = "%s" + } + + data "ibm_app_domain_shared" "domain" { + name = "mybluemix.net" + } + + resource "ibm_app_route" "route" { + domain_guid = "${data.ibm_app_domain_shared.domain.id}" + space_guid = "${data.ibm_space.spacedata.id}" + host = "%s" + path = "/app1" + } + `, cfOrganization, cfSpace, host) +} + +func testAccCheckIBMAppRoute_updateHost(updateHost string) string { + return fmt.Sprintf(` + + data "ibm_space" "spacedata" { + org = "%s" + space = "%s" + } + + data "ibm_app_domain_shared" "domain" { + name = "mybluemix.net" + } + + resource "ibm_app_route" "route" { + domain_guid = "${data.ibm_app_domain_shared.domain.id}" + space_guid = "${data.ibm_space.spacedata.id}" + host = "%s" + } + `, cfOrganization, cfSpace, updateHost) +} diff --git a/ibm/resource_ibm_app_test.go b/ibm/resource_ibm_app_test.go new file mode 100644 index 0000000000..38ccf00659 --- /dev/null +++ b/ibm/resource_ibm_app_test.go @@ -0,0 +1,554 @@ +package ibm + +import ( + "fmt" + "regexp" + "testing" + + "github.com/IBM-Bluemix/bluemix-go/api/mccp/mccpv2" + "github.com/hashicorp/terraform/helper/acctest" + "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/terraform" +) + +func TestAccIBMApp_Invalid_Application_Path(t *testing.T) { + name := fmt.Sprintf("terraform_%d", acctest.RandInt()) + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckIBMAppDestroy, + Steps: []resource.TestStep{ + + resource.TestStep{ + Config: testAccCheckIBMAppInvalidPath(name), + ExpectError: regexp.MustCompile(`The given app path: doesn't exist`), + }, + }, + }) +} + +func TestAccIBMApp_Basic(t *testing.T) { + var conf mccpv2.AppFields + name := fmt.Sprintf("terraform_%d", acctest.RandInt()) + updatedName := fmt.Sprintf("terraform_updated_%d", acctest.RandInt()) + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckIBMAppDestroy, + Steps: []resource.TestStep{ + + resource.TestStep{ + Config: testAccCheckIBMAppCreate(name), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckIBMAppExists("ibm_app.app", &conf), + resource.TestCheckResourceAttr("ibm_app.app", "name", name), + ), + }, + resource.TestStep{ + Config: testAccCheckIBMAppUpdate(updatedName), + Check: resource.ComposeAggregateTestCheckFunc( + resource.TestCheckResourceAttr("ibm_app.app", "name", updatedName), + resource.TestCheckResourceAttr("ibm_app.app", "instances", "2"), + resource.TestCheckResourceAttr("ibm_app.app", "memory", "128"), + resource.TestCheckResourceAttr("ibm_app.app", "disk_quota", "512"), + resource.TestCheckResourceAttr("ibm_app.app", "environment_json.test", "test1"), + ), + }, + }, + }) +} + +func TestAccIBMApp_with_routes(t *testing.T) { + var conf mccpv2.AppFields + name := fmt.Sprintf("terraform_%d", acctest.RandInt()) + route1 := fmt.Sprintf("terraform-%d", acctest.RandInt()) + route2 := fmt.Sprintf("terraform-%d", acctest.RandInt()) + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckIBMAppDestroy, + Steps: []resource.TestStep{ + + resource.TestStep{ + Config: testAccCheckIBMAppBindRoute(name, route1), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckIBMAppExists("ibm_app.app", &conf), + resource.TestCheckResourceAttr("ibm_app.app", "name", name), + resource.TestCheckResourceAttr("ibm_app.app", "instances", "1"), + resource.TestCheckResourceAttr("ibm_app.app", "memory", "128"), + resource.TestCheckResourceAttr("ibm_app.app", "disk_quota", "512"), + resource.TestCheckResourceAttr("ibm_app.app", "environment_json.test", "test1"), + resource.TestCheckResourceAttr("ibm_app.app", "route_guid.#", "1"), + ), + }, + resource.TestStep{ + Config: testAccCheckIBMAppAddMultipleRoute(name, route1, route2), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckIBMAppExists("ibm_app.app", &conf), + resource.TestCheckResourceAttr("ibm_app.app", "name", name), + resource.TestCheckResourceAttr("ibm_app.app", "instances", "1"), + resource.TestCheckResourceAttr("ibm_app.app", "memory", "128"), + resource.TestCheckResourceAttr("ibm_app.app", "disk_quota", "512"), + resource.TestCheckResourceAttr("ibm_app.app", "environment_json.test", "test1"), + resource.TestCheckResourceAttr("ibm_app.app", "route_guid.#", "2"), + ), + }, + resource.TestStep{ + Config: testAccCheckIBMAppUnBindRoute(name, route1, route2), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckIBMAppExists("ibm_app.app", &conf), + resource.TestCheckResourceAttr("ibm_app.app", "name", name), + resource.TestCheckResourceAttr("ibm_app.app", "instances", "1"), + resource.TestCheckResourceAttr("ibm_app.app", "memory", "128"), + resource.TestCheckResourceAttr("ibm_app.app", "disk_quota", "512"), + resource.TestCheckResourceAttr("ibm_app.app", "environment_json.test", "test1"), + resource.TestCheckResourceAttr("ibm_app.app", "route_guid.#", "1"), + ), + }, + }, + }) + +} + +func TestAccIBMApp_with_service_instances(t *testing.T) { + var conf mccpv2.AppFields + name := fmt.Sprintf("terraform_%d", acctest.RandInt()) + route := fmt.Sprintf("terraform-%d", acctest.RandInt()) + serviceName1 := fmt.Sprintf("terraform_%d", acctest.RandInt()) + serviceName2 := fmt.Sprintf("terraform_%d", acctest.RandInt()) + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckIBMAppDestroy, + Steps: []resource.TestStep{ + + resource.TestStep{ + Config: testAccCheckIBMAppBindService(name, route, serviceName1), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckIBMAppExists("ibm_app.app", &conf), + resource.TestCheckResourceAttr("ibm_app.app", "name", name), + resource.TestCheckResourceAttr("ibm_app.app", "instances", "1"), + resource.TestCheckResourceAttr("ibm_app.app", "memory", "128"), + resource.TestCheckResourceAttr("ibm_app.app", "disk_quota", "512"), + resource.TestCheckResourceAttr("ibm_app.app", "environment_json.test", "test1"), + resource.TestCheckResourceAttr("ibm_app.app", "route_guid.#", "1"), + resource.TestCheckResourceAttr("ibm_app.app", "service_instance_guid.#", "1"), + ), + }, + resource.TestStep{ + Config: testAccCheckIBMAppAddMultipleService(name, route, serviceName1, serviceName2), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckIBMAppExists("ibm_app.app", &conf), + resource.TestCheckResourceAttr("ibm_app.app", "name", name), + resource.TestCheckResourceAttr("ibm_app.app", "instances", "1"), + resource.TestCheckResourceAttr("ibm_app.app", "memory", "128"), + resource.TestCheckResourceAttr("ibm_app.app", "disk_quota", "512"), + resource.TestCheckResourceAttr("ibm_app.app", "environment_json.test", "test1"), + resource.TestCheckResourceAttr("ibm_app.app", "route_guid.#", "1"), + resource.TestCheckResourceAttr("ibm_app.app", "service_instance_guid.#", "2"), + ), + }, + resource.TestStep{ + Config: testAccCheckIBMAppUnBindService(name, route, serviceName1, serviceName2), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckIBMAppExists("ibm_app.app", &conf), + resource.TestCheckResourceAttr("ibm_app.app", "name", name), + resource.TestCheckResourceAttr("ibm_app.app", "instances", "1"), + resource.TestCheckResourceAttr("ibm_app.app", "memory", "128"), + resource.TestCheckResourceAttr("ibm_app.app", "disk_quota", "512"), + resource.TestCheckResourceAttr("ibm_app.app", "environment_json.test", "test1"), + resource.TestCheckResourceAttr("ibm_app.app", "route_guid.#", "1"), + resource.TestCheckResourceAttr("ibm_app.app", "service_instance_guid.#", "1"), + ), + }, + }, + }) + +} + +func testAccCheckIBMAppDestroy(s *terraform.State) error { + cfClient, err := testAccProvider.Meta().(ClientSession).MccpAPI() + if err != nil { + return err + } + for _, rs := range s.RootModule().Resources { + if rs.Type != "ibm_app" { + continue + } + appGUID := rs.Primary.ID + + _, err := cfClient.Apps().Get(appGUID) + if err == nil { + return fmt.Errorf("App still exists: %s", rs.Primary.ID) + } + } + + return nil + +} + +func testAccCheckIBMAppExists(n string, obj *mccpv2.AppFields) resource.TestCheckFunc { + + return func(s *terraform.State) error { + rs, ok := s.RootModule().Resources[n] + if !ok { + return fmt.Errorf("Not found: %s", n) + } + + cfClient, err := testAccProvider.Meta().(ClientSession).MccpAPI() + if err != nil { + return err + } + appGUID := rs.Primary.ID + + app, err := cfClient.Apps().Get(appGUID) + if err != nil { + return err + } + + *obj = *app + return nil + } +} + +func testAccCheckIBMAppInvalidPath(name string) string { + return fmt.Sprintf(` + +data "ibm_space" "space" { + org = "%s" + space = "%s" +} + +resource "ibm_app" "app" { + name = "%s" + space_guid = "${data.ibm_space.space.id}" + app_path = "" + wait_time_minutes = 90 + buildpack = "sdk-for-nodejs" +}`, cfOrganization, cfSpace, name) + +} + +func testAccCheckIBMAppCreate(name string) string { + return fmt.Sprintf(` + +data "ibm_space" "space" { + org = "%s" + space = "%s" +} + +resource "ibm_app" "app" { + name = "%s" + space_guid = "${data.ibm_space.space.id}" + app_path = "test-fixtures/app1.zip" + wait_time_minutes = 90 + buildpack = "sdk-for-nodejs" +}`, cfOrganization, cfSpace, name) + +} + +func testAccCheckIBMAppUpdate(name string) string { + return fmt.Sprintf(` +data "ibm_space" "space" { + org = "%s" + space = "%s" +} + +resource "ibm_app" "app" { + name = "%s" + space_guid = "${data.ibm_space.space.id}" + app_path = "test-fixtures/app1.zip" + wait_time_minutes = 20 + buildpack = "sdk-for-nodejs" + disk_quota = 512 + memory = 128 + instances = 2 + + environment_json = { + "test" = "test1" + } +}`, cfOrganization, cfSpace, name) + +} + +func testAccCheckIBMAppBindRoute(name, route1 string) string { + return fmt.Sprintf(` + +data "ibm_space" "space" { + org = "%s" + space = "%s" +} + +data "ibm_app_domain_shared" "domain" { + name = "mybluemix.net" +} + +resource "ibm_app_route" "route" { + domain_guid = "${data.ibm_app_domain_shared.domain.id}" + space_guid = "${data.ibm_space.space.id}" + host = "%s" +} + +resource "ibm_app" "app" { + name = "%s" + space_guid = "${data.ibm_space.space.id}" + app_path = "test-fixtures/app1.zip" + wait_time_minutes = 20 + buildpack = "sdk-for-nodejs" + instances = 1 + route_guid = ["${ibm_app_route.route.id}"] + disk_quota = 512 + memory = 128 + + environment_json = { + "test" = "test1" + } +}`, cfOrganization, cfSpace, route1, name) + +} + +func testAccCheckIBMAppAddMultipleRoute(name, route1, route2 string) string { + return fmt.Sprintf(` + +data "ibm_space" "space" { + org = "%s" + space = "%s" +} + +data "ibm_app_domain_shared" "domain" { + name = "mybluemix.net" +} + +resource "ibm_app_route" "route" { + domain_guid = "${data.ibm_app_domain_shared.domain.id}" + space_guid = "${data.ibm_space.space.id}" + host = "%s" +} + +resource "ibm_app_route" "route1" { + domain_guid = "${data.ibm_app_domain_shared.domain.id}" + space_guid = "${data.ibm_space.space.id}" + host = "%s" +} + +resource "ibm_app" "app" { + name = "%s" + space_guid = "${data.ibm_space.space.id}" + app_path = "test-fixtures/app1.zip" + wait_time_minutes = 20 + buildpack = "sdk-for-nodejs" + instances = 1 + route_guid = ["${ibm_app_route.route.id}", "${ibm_app_route.route1.id}"] + disk_quota = 512 + memory = 128 + disk_quota = 512 + + environment_json = { + "test" = "test1" + } +}`, cfOrganization, cfSpace, route1, route2, name) + +} + +func testAccCheckIBMAppUnBindRoute(name, route1, route2 string) string { + return fmt.Sprintf(` + +data "ibm_space" "space" { + org = "%s" + space = "%s" +} + +data "ibm_app_domain_shared" "domain" { + name = "mybluemix.net" +} + +resource "ibm_app_route" "route" { + domain_guid = "${data.ibm_app_domain_shared.domain.id}" + space_guid = "${data.ibm_space.space.id}" + host = "%s" +} + +resource "ibm_app_route" "route1" { + domain_guid = "${data.ibm_app_domain_shared.domain.id}" + space_guid = "${data.ibm_space.space.id}" + host = "%s" +} + +resource "ibm_app" "app" { + name = "%s" + space_guid = "${data.ibm_space.space.id}" + app_path = "test-fixtures/app1.zip" + wait_time_minutes = 20 + buildpack = "sdk-for-nodejs" + instances = 1 + route_guid = ["${ibm_app_route.route.id}"] + disk_quota = 512 + memory = 128 + instances = 1 + disk_quota = 512 + + environment_json = { + "test" = "test1" + } +}`, cfOrganization, cfSpace, route1, route2, name) + +} + +func testAccCheckIBMAppBindService(name, route1, serviceName string) string { + return fmt.Sprintf(` + +data "ibm_space" "space" { + org = "%s" + space = "%s" +} + +data "ibm_app_domain_shared" "domain" { + name = "mybluemix.net" +} + +resource "ibm_app_route" "route" { + domain_guid = "${data.ibm_app_domain_shared.domain.id}" + space_guid = "${data.ibm_space.space.id}" + host = "%s" +} + +resource "ibm_service_instance" "service" { + name = "%s" + space_guid = "${data.ibm_space.space.id}" + service = "cleardb" + plan = "cb5" + tags = ["cluster-service", "cluster-bind"] +} + +resource "ibm_app" "app" { + name = "%s" + space_guid = "${data.ibm_space.space.id}" + app_path = "test-fixtures/app1.zip" + wait_time_minutes = 20 + buildpack = "sdk-for-nodejs" + instances = 1 + route_guid = ["${ibm_app_route.route.id}"] + service_instance_guid = ["${ibm_service_instance.service.id}"] + disk_quota = 512 + memory = 128 + instances = 1 + + environment_json = { + "test" = "test1" + } +}`, cfOrganization, cfSpace, route1, serviceName, name) + +} + +func testAccCheckIBMAppAddMultipleService(name, route, serviceName1, serviceName2 string) string { + return fmt.Sprintf(` + +data "ibm_space" "space" { + org = "%s" + space = "%s" +} + +data "ibm_app_domain_shared" "domain" { + name = "mybluemix.net" +} + +resource "ibm_app_route" "route" { + domain_guid = "${data.ibm_app_domain_shared.domain.id}" + space_guid = "${data.ibm_space.space.id}" + host = "%s" +} + +resource "ibm_service_instance" "service" { + name = "%s" + space_guid = "${data.ibm_space.space.id}" + service = "cleardb" + plan = "cb5" + tags = ["cluster-service", "cluster-bind"] +} + +resource "ibm_service_instance" "service1" { + name = "%s" + space_guid = "${data.ibm_space.space.id}" + service = "cloudantNoSQLDB" + plan = "Lite" + tags = ["cluster-service"] +} + +resource "ibm_app" "app" { + name = "%s" + space_guid = "${data.ibm_space.space.id}" + app_path = "test-fixtures/app1.zip" + wait_time_minutes = 20 + buildpack = "sdk-for-nodejs" + instances = 1 + route_guid = ["${ibm_app_route.route.id}"] + service_instance_guid = ["${ibm_service_instance.service.id}", "${ibm_service_instance.service1.id}"] + disk_quota = 512 + memory = 128 + instances = 1 + disk_quota = 512 + + environment_json = { + "test" = "test1" + } +}`, cfOrganization, cfSpace, route, serviceName1, serviceName2, name) + +} + +func testAccCheckIBMAppUnBindService(name, route1, serviceName1, serviceName2 string) string { + return fmt.Sprintf(` + +data "ibm_space" "space" { + org = "%s" + space = "%s" +} + +data "ibm_app_domain_shared" "domain" { + name = "mybluemix.net" +} + +resource "ibm_app_route" "route" { + domain_guid = "${data.ibm_app_domain_shared.domain.id}" + space_guid = "${data.ibm_space.space.id}" + host = "%s" +} + +resource "ibm_service_instance" "service" { + name = "%s" + space_guid = "${data.ibm_space.space.id}" + service = "cleardb" + plan = "cb5" + tags = ["cluster-service", "cluster-bind"] +} + +resource "ibm_service_instance" "service1" { + name = "%s" + space_guid = "${data.ibm_space.space.id}" + service = "cloudantNoSQLDB" + plan = "Lite" + tags = ["cluster-service"] +} + +resource "ibm_app" "app" { + name = "%s" + space_guid = "${data.ibm_space.space.id}" + app_path = "test-fixtures/app1.zip" + wait_time_minutes = 20 + buildpack = "sdk-for-nodejs" + instances = 1 + route_guid = ["${ibm_app_route.route.id}"] + service_instance_guid = ["${ibm_service_instance.service.id}"] + disk_quota = 512 + memory = 128 + instances = 1 + disk_quota = 512 + + environment_json = { + "test" = "test1" + } +}`, cfOrganization, cfSpace, route1, serviceName1, serviceName2, name) + +} diff --git a/ibm/resource_ibm_compute_autoscale_group.go b/ibm/resource_ibm_compute_autoscale_group.go new file mode 100644 index 0000000000..df7bc71304 --- /dev/null +++ b/ibm/resource_ibm_compute_autoscale_group.go @@ -0,0 +1,688 @@ +package ibm + +import ( + "errors" + "fmt" + "log" + "strconv" + "strings" + "time" + + "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/helper/schema" + "github.com/softlayer/softlayer-go/datatypes" + "github.com/softlayer/softlayer-go/services" + "github.com/softlayer/softlayer-go/session" + "github.com/softlayer/softlayer-go/sl" +) + +const HEALTH_CHECK_TYPE_HTTP_CUSTOM = "HTTP-CUSTOM" + +var IBMComputeAutoScaleGroupObjectMask = []string{ + "id", + "name", + "minimumMemberCount", + "maximumMemberCount", + "cooldown", + "status[keyName]", + "regionalGroup[id,name]", + "terminationPolicy[keyName]", + "virtualGuestMemberTemplate[blockDeviceTemplateGroup,primaryNetworkComponent[networkVlan[id]],primaryBackendNetworkComponent[networkVlan[id]]]", + "loadBalancers[id,port,virtualServerId,healthCheck[id]]", + "networkVlans[id,networkVlanId,networkVlan[vlanNumber,primaryRouter[hostname]]]", + "loadBalancers[healthCheck[healthCheckTypeId,type[keyname],attributes[value,type[id,keyname]]]]", +} + +func resourceIBMComputeAutoScaleGroup() *schema.Resource { + return &schema.Resource{ + Create: resourceIBMComputeAutoScaleGroupCreate, + Read: resourceIBMComputeAutoScaleGroupRead, + Update: resourceIBMComputeAutoScaleGroupUpdate, + Delete: resourceIBMComputeAutoScaleGroupDelete, + Exists: resourceIBMComputeAutoScaleGroupExists, + Importer: &schema.ResourceImporter{}, + + Schema: map[string]*schema.Schema{ + "id": { + Type: schema.TypeInt, + Computed: true, + }, + + "name": { + Type: schema.TypeString, + Required: true, + }, + + "regional_group": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + + "minimum_member_count": { + Type: schema.TypeInt, + Required: true, + }, + + "maximum_member_count": { + Type: schema.TypeInt, + Required: true, + }, + + "cooldown": { + Type: schema.TypeInt, + Required: true, + }, + + "termination_policy": { + Type: schema.TypeString, + Required: true, + }, + + "virtual_server_id": { + Type: schema.TypeInt, + Optional: true, + }, + + "port": { + Type: schema.TypeInt, + Optional: true, + }, + + "health_check": { + Type: schema.TypeMap, + Optional: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "type": { + Type: schema.TypeString, + Required: false, + }, + + // Conditionally-required fields, based on value of "type" + "custom_method": { + Type: schema.TypeString, + Optional: true, + // TODO: Must be GET or HEAD + }, + + "custom_request": { + Type: schema.TypeString, + Optional: true, + }, + + "custom_response": { + Type: schema.TypeString, + Optional: true, + }, + }, + }, + }, + + // This has to be a TypeList, because TypeMap does not handle non-primitive + // members properly. + "virtual_guest_member_template": { + Type: schema.TypeList, + Required: true, + Elem: getModifiedVirtualGuestResource(), + }, + + "network_vlan_ids": { + Type: schema.TypeSet, + Optional: true, + Elem: &schema.Schema{Type: schema.TypeInt}, + Set: func(v interface{}) int { + return v.(int) + }, + }, + }, + } +} + +// Returns a modified version of the virtual guest resource, with all members set to ForceNew = false. +// Otherwise a modified template parameter unnecessarily forces scale group drop/create +func getModifiedVirtualGuestResource() *schema.Resource { + + r := resourceIBMComputeVmInstance() + // wait_time_minutes is only used in virtual_guest resource. + delete(r.Schema, "wait_time_minutes") + + for _, elem := range r.Schema { + elem.ForceNew = false + } + + return r +} + +// Helper method to parse healthcheck data in the resource schema format to the SoftLayer datatypes +func buildHealthCheckFromResourceData(d map[string]interface{}) (datatypes.Network_Application_Delivery_Controller_LoadBalancer_Health_Check, error) { + healthCheckOpts := datatypes.Network_Application_Delivery_Controller_LoadBalancer_Health_Check{ + Type: &datatypes.Network_Application_Delivery_Controller_LoadBalancer_Health_Check_Type{ + Keyname: sl.String(d["type"].(string)), + }, + } + + if *healthCheckOpts.Type.Keyname == HEALTH_CHECK_TYPE_HTTP_CUSTOM { + // Validate and apply type-specific fields + healthCheckMethod, ok := d["custom_method"] + if !ok { + return datatypes.Network_Application_Delivery_Controller_LoadBalancer_Health_Check{}, errors.New("\"custom_method\" is required when HTTP-CUSTOM healthcheck is specified") + } + + healthCheckRequest, ok := d["custom_request"] + if !ok { + return datatypes.Network_Application_Delivery_Controller_LoadBalancer_Health_Check{}, errors.New("\"custom_request\" is required when HTTP-CUSTOM healthcheck is specified") + } + + healthCheckResponse, ok := d["custom_response"] + if !ok { + return datatypes.Network_Application_Delivery_Controller_LoadBalancer_Health_Check{}, errors.New("\"custom_response\" is required when HTTP-CUSTOM healthcheck is specified") + } + + // HTTP-CUSTOM values are represented as an array of SoftLayer_Health_Check_Attributes + healthCheckOpts.Attributes = []datatypes.Network_Application_Delivery_Controller_LoadBalancer_Health_Attribute{ + { + Type: &datatypes.Network_Application_Delivery_Controller_LoadBalancer_Health_Attribute_Type{ + Keyname: sl.String("HTTP_CUSTOM_TYPE"), + }, + Value: sl.String(healthCheckMethod.(string)), + }, + { + Type: &datatypes.Network_Application_Delivery_Controller_LoadBalancer_Health_Attribute_Type{ + Keyname: sl.String("LOCATION"), + }, + Value: sl.String(healthCheckRequest.(string)), + }, + { + Type: &datatypes.Network_Application_Delivery_Controller_LoadBalancer_Health_Attribute_Type{ + Keyname: sl.String("EXPECTED_RESPONSE"), + }, + Value: sl.String(healthCheckResponse.(string)), + }, + } + } + + return healthCheckOpts, nil +} + +// Helper method to parse network vlan information in the resource schema format to the SoftLayer datatypes +func buildScaleVlansFromResourceData(v interface{}, meta interface{}) ([]datatypes.Scale_Network_Vlan, error) { + vlanIds := v.([]interface{}) + scaleNetworkVlans := make([]datatypes.Scale_Network_Vlan, 0, len(vlanIds)) + + for _, iVlanId := range vlanIds { + vlanId := iVlanId.(int) + scaleNetworkVlans = append( + scaleNetworkVlans, + datatypes.Scale_Network_Vlan{NetworkVlanId: &vlanId}, + ) + } + + return scaleNetworkVlans, nil +} + +func getVirtualGuestTemplate(vGuestTemplateList []interface{}, meta interface{}) (datatypes.Virtual_Guest, error) { + if len(vGuestTemplateList) != 1 { + return datatypes.Virtual_Guest{}, + errors.New("Only one virtual_guest_member_template can be provided") + } + + // Retrieve the map of virtual_guest_member_template attributes + vGuestMap := vGuestTemplateList[0].(map[string]interface{}) + + // Create an empty ResourceData instance for a IBM_Compute_VM_Instance resource + vGuestResourceData := resourceIBMComputeVmInstance().Data(nil) + + // For each item in the map, call Set on the ResourceData. This handles + // validation and yields a completed ResourceData object + for k, v := range vGuestMap { + log.Printf("****** %s: %#v", k, v) + err := vGuestResourceData.Set(k, v) + if err != nil { + return datatypes.Virtual_Guest{}, + fmt.Errorf("Error while parsing virtual_guest_member_template values: %s", err) + } + } + + // Get the virtual guest creation template from the completed resource data object + return getVirtualGuestTemplateFromResourceData(vGuestResourceData, meta) +} + +func resourceIBMComputeAutoScaleGroupCreate(d *schema.ResourceData, meta interface{}) error { + sess := meta.(ClientSession).SoftLayerSession() + service := services.GetScaleGroupService(sess) + + virtualGuestTemplateOpts, err := getVirtualGuestTemplate(d.Get("virtual_guest_member_template").([]interface{}), meta) + if err != nil { + return fmt.Errorf("Error while parsing virtual_guest_member_template values: %s", err) + } + + scaleNetworkVlans, err := buildScaleVlansFromResourceData(d.Get("network_vlan_ids").(*schema.Set).List(), meta) + if err != nil { + return fmt.Errorf("Error while parsing network vlan values: %s", err) + } + + locationGroupRegionalId, err := getLocationGroupRegionalId(sess, d.Get("regional_group").(string)) + if err != nil { + return err + } + + // Build up our creation options + opts := datatypes.Scale_Group{ + Name: sl.String(d.Get("name").(string)), + Cooldown: sl.Int(d.Get("cooldown").(int)), + MinimumMemberCount: sl.Int(d.Get("minimum_member_count").(int)), + MaximumMemberCount: sl.Int(d.Get("maximum_member_count").(int)), + SuspendedFlag: sl.Bool(false), + VirtualGuestMemberTemplate: &virtualGuestTemplateOpts, + NetworkVlans: scaleNetworkVlans, + RegionalGroupId: &locationGroupRegionalId, + } + + opts.TerminationPolicy = &datatypes.Scale_Termination_Policy{ + KeyName: sl.String(d.Get("termination_policy").(string)), + } + + opts.LoadBalancers, err = buildLoadBalancers(d) + if err != nil { + return fmt.Errorf("Error creating Scale Group: %s", err) + } + + res, err := service.CreateObject(&opts) + if err != nil { + return fmt.Errorf("Error creating Scale Group: %s", err) + } + + d.SetId(strconv.Itoa(*res.Id)) + log.Printf("[INFO] Scale Group ID: %d", *res.Id) + + time.Sleep(60) + + // wait for scale group to become active + _, err = waitForActiveStatus(d, meta) + + if err != nil { + return fmt.Errorf("Error waiting for scale group (%s) to become active: %s", d.Id(), err) + } + + return resourceIBMComputeAutoScaleGroupRead(d, meta) +} + +func buildLoadBalancers(d *schema.ResourceData, ids ...int) ([]datatypes.Scale_LoadBalancer, error) { + isLoadBalancerEmpty := true + loadBalancers := []datatypes.Scale_LoadBalancer{{}} + + if virtualServerId, ok := d.GetOk("virtual_server_id"); ok { + isLoadBalancerEmpty = false + loadBalancers[0].VirtualServerId = sl.Int(virtualServerId.(int)) + if len(ids) > 0 { + loadBalancers[0].Id = sl.Int(ids[0]) + } + } + + if healthCheck, ok := d.GetOk("health_check"); ok { + isLoadBalancerEmpty = false + healthCheckOpts, err := buildHealthCheckFromResourceData(healthCheck.(map[string]interface{})) + if err != nil { + return []datatypes.Scale_LoadBalancer{}, fmt.Errorf("Error while parsing health check options: %s", err) + } + loadBalancers[0].HealthCheck = &healthCheckOpts + } + + if port, ok := d.GetOk("port"); ok { + isLoadBalancerEmpty = false + loadBalancers[0].Port = sl.Int(port.(int)) + } + + if isLoadBalancerEmpty { + return []datatypes.Scale_LoadBalancer{}, nil + } else { + return loadBalancers, nil + } +} + +func resourceIBMComputeAutoScaleGroupRead(d *schema.ResourceData, meta interface{}) error { + sess := meta.(ClientSession).SoftLayerSession() + service := services.GetScaleGroupService(sess) + + groupId, _ := strconv.Atoi(d.Id()) + + slGroupObj, err := service.Id(groupId).Mask(strings.Join(IBMComputeAutoScaleGroupObjectMask, ",")).GetObject() + if err != nil { + // If the scale group is somehow already destroyed, mark as successfully gone + if apiErr, ok := err.(sl.Error); ok && apiErr.StatusCode == 404 { + d.SetId("") + return nil + } + + return fmt.Errorf("Error retrieving autoscale Group: %s", err) + } + + d.Set("id", slGroupObj.Id) + d.Set("name", slGroupObj.Name) + if slGroupObj.RegionalGroup != nil && slGroupObj.RegionalGroup.Name != nil { + d.Set("regional_group", slGroupObj.RegionalGroup.Name) + } + d.Set("minimum_member_count", slGroupObj.MinimumMemberCount) + d.Set("maximum_member_count", slGroupObj.MaximumMemberCount) + d.Set("cooldown", slGroupObj.Cooldown) + d.Set("status", slGroupObj.Status.KeyName) + d.Set("termination_policy", slGroupObj.TerminationPolicy.KeyName) + if len(slGroupObj.LoadBalancers) > 0 { + d.Set("virtual_server_id", slGroupObj.LoadBalancers[0].VirtualServerId) + d.Set("port", slGroupObj.LoadBalancers[0].Port) + + // Health Check + healthCheckObj := slGroupObj.LoadBalancers[0].HealthCheck + currentHealthCheck := d.Get("health_check").(map[string]interface{}) + + currentHealthCheck["type"] = *healthCheckObj.Type.Keyname + + if *healthCheckObj.Type.Keyname == HEALTH_CHECK_TYPE_HTTP_CUSTOM { + for _, elem := range healthCheckObj.Attributes { + switch *elem.Type.Keyname { + case "HTTP_CUSTOM_TYPE": + currentHealthCheck["custom_method"] = *elem.Value + case "LOCATION": + currentHealthCheck["custom_request"] = *elem.Value + case "EXPECTED_RESPONSE": + currentHealthCheck["custom_response"] = *elem.Value + } + } + } + + d.Set("health_check", currentHealthCheck) + } + + // Network Vlans + vlanIds := make([]int, len(slGroupObj.NetworkVlans)) + for i, vlan := range slGroupObj.NetworkVlans { + vlanIds[i] = *vlan.NetworkVlanId + } + d.Set("network_vlan_ids", vlanIds) + + virtualGuestTemplate := populateMemberTemplateResourceData(*slGroupObj.VirtualGuestMemberTemplate) + d.Set("virtual_guest_member_template", virtualGuestTemplate) + + return nil +} + +func populateMemberTemplateResourceData(template datatypes.Virtual_Guest) []map[string]interface{} { + + d := make(map[string]interface{}) + + d["hostname"] = *template.Hostname + d["domain"] = *template.Domain + d["datacenter"] = *template.Datacenter.Name + d["network_speed"] = *template.NetworkComponents[0].MaxSpeed + d["cores"] = *template.StartCpus + d["memory"] = *template.MaxMemory + d["private_network_only"] = *template.PrivateNetworkOnlyFlag + d["hourly_billing"] = *template.HourlyBillingFlag + d["local_disk"] = *template.LocalDiskFlag + + // Guard against nil values for optional fields in virtual_guest resource + d["dedicated_acct_host_only"] = sl.Get(template.DedicatedAccountHostOnlyFlag) + d["os_reference_code"] = sl.Get(template.OperatingSystemReferenceCode) + d["post_install_script_uri"] = sl.Get(template.PostInstallScriptUri) + + if template.PrimaryNetworkComponent != nil && template.PrimaryNetworkComponent.NetworkVlan != nil { + d["public_vlan_id"] = sl.Get(template.PrimaryNetworkComponent.NetworkVlan.Id) + } + + if template.PrimaryBackendNetworkComponent != nil && template.PrimaryBackendNetworkComponent.NetworkVlan != nil { + d["private_vlan_id"] = sl.Get(template.PrimaryBackendNetworkComponent.NetworkVlan.Id) + } + if template.BlockDeviceTemplateGroup != nil { + d["image_id"] = sl.Get(template.BlockDeviceTemplateGroup.GlobalIdentifier) + } + + if len(template.UserData) > 0 { + d["user_metadata"] = *template.UserData[0].Value + } + + sshKeys := make([]interface{}, 0, len(template.SshKeys)) + for _, elem := range template.SshKeys { + sshKeys = append(sshKeys, *elem.Id) + } + d["ssh_key_ids"] = sshKeys + + disks := make([]interface{}, 0, len(template.BlockDevices)) + for _, elem := range template.BlockDevices { + disks = append(disks, *elem.DiskImage.Capacity) + } + d["disks"] = disks + + return []map[string]interface{}{d} +} + +func resourceIBMComputeAutoScaleGroupUpdate(d *schema.ResourceData, meta interface{}) error { + sess := meta.(ClientSession).SoftLayerSession() + scaleGroupService := services.GetScaleGroupService(sess) + scaleNetworkVlanService := services.GetScaleNetworkVlanService(sess) + scaleLoadBalancerService := services.GetScaleLoadBalancerService(sess) + + groupId, err := strconv.Atoi(d.Id()) + if err != nil { + return fmt.Errorf("Not a valid ID. Must be an integer: %s", err) + } + + // Fetch the complete object from SoftLayer, update with current values from the configuration, and send the + // whole thing back to SoftLayer (effectively, a PUT) + groupObj, err := scaleGroupService.Id(groupId).Mask(strings.Join(IBMComputeAutoScaleGroupObjectMask, ",")).GetObject() + if err != nil { + return fmt.Errorf("Error retrieving autoscale_group resource: %s", err) + } + + groupObj.Name = sl.String(d.Get("name").(string)) + groupObj.MinimumMemberCount = sl.Int(d.Get("minimum_member_count").(int)) + groupObj.MaximumMemberCount = sl.Int(d.Get("maximum_member_count").(int)) + groupObj.Cooldown = sl.Int(d.Get("cooldown").(int)) + groupObj.TerminationPolicy.KeyName = sl.String(d.Get("termination_policy").(string)) + + currentLoadBalancers := groupObj.LoadBalancers + if len(currentLoadBalancers) > 0 { + groupObj.LoadBalancers, err = buildLoadBalancers(d, *currentLoadBalancers[0].Id) + } else { + groupObj.LoadBalancers, err = buildLoadBalancers(d) + } + if err != nil { + return fmt.Errorf("Error creating Scale Group: %s", err) + } + + if d.HasChange("network_vlan_ids") { + // Vlans require special handling: + // + // 1. Delete any scale_network_vlans which no longer appear in the updated configuration + // 2. Pass the updated list of vlans to the Scale_Group.editObject function. SoftLayer determines + // which Vlans are new, and which already exist. + + _, newValue := d.GetChange("network_vlan_ids") + newIds := newValue.(*schema.Set).List() + + // Delete all Vlans + oldScaleVlans, err := scaleGroupService. + Id(groupId). + GetNetworkVlans() + if err != nil { + return fmt.Errorf("Could not retrieve current vlans for scale group (%d): %s", groupId, err) + } + + for _, oldScaleVlan := range oldScaleVlans { + _, err := scaleNetworkVlanService.Id(*oldScaleVlan.Id).DeleteObject() + if err != nil { + return fmt.Errorf("Error deleting scale network vlan %d: %s", *oldScaleVlan.Id, err) + } + } + + // Parse the new list of vlans into the appropriate input structure + scaleVlans, err := buildScaleVlansFromResourceData(newIds, meta) + + if err != nil { + return fmt.Errorf("Unable to parse network vlan options: %s", err) + } + + groupObj.NetworkVlans = scaleVlans + } + + if d.HasChange("virtual_guest_member_template") { + virtualGuestTemplateOpts, err := getVirtualGuestTemplate(d.Get("virtual_guest_member_template").([]interface{}), meta) + if err != nil { + return fmt.Errorf("Unable to parse virtual guest member template options: %s", err) + } + + groupObj.VirtualGuestMemberTemplate = &virtualGuestTemplateOpts + + } + _, err = scaleGroupService.Id(groupId).EditObject(&groupObj) + if err != nil { + return fmt.Errorf("Error received while editing autoscale_group: %s", err) + } + + // wait for scale group to become active + _, err = waitForActiveStatus(d, meta) + + if err != nil { + return fmt.Errorf("Error waiting for scale group (%s) to become active: %s", d.Id(), err) + } + + // Delete a load balancer if there is the load balancer in a scale group + // and a request doesn't have virtual_server_id, port, and health_check. + if len(currentLoadBalancers) > 0 && len(groupObj.LoadBalancers) <= 0 { + _, err = scaleLoadBalancerService.Id(*currentLoadBalancers[0].Id).DeleteObject() + if err != nil { + return fmt.Errorf("Error received while deleting loadbalancers: %s", err) + } + } + + return nil +} + +func resourceIBMComputeAutoScaleGroupDelete(d *schema.ResourceData, meta interface{}) error { + sess := meta.(ClientSession).SoftLayerSession() + scaleGroupService := services.GetScaleGroupService(sess) + + id, err := strconv.Atoi(d.Id()) + if err != nil { + return fmt.Errorf("Error deleting scale group: %s", err) + } + + log.Printf("[INFO] Deleting scale group: %d", id) + _, err = scaleGroupService.Id(id).ForceDeleteObject() + if err != nil { + return fmt.Errorf("Error deleting scale group: %s", err) + } + + d.SetId("") + + return nil +} + +func waitForActiveStatus(d *schema.ResourceData, meta interface{}) (interface{}, error) { + sess := meta.(ClientSession).SoftLayerSession() + scaleGroupService := services.GetScaleGroupService(sess) + + log.Printf("Waiting for scale group (%s) to become active", d.Id()) + id, err := strconv.Atoi(d.Id()) + if err != nil { + return nil, fmt.Errorf("The scale group ID %s must be numeric", d.Id()) + } + + stateConf := &resource.StateChangeConf{ + Pending: []string{"BUSY", "SCALING", "SUSPENDED"}, + Target: []string{"ACTIVE"}, + Refresh: func() (interface{}, string, error) { + // get the status of the scale group + result, err := scaleGroupService.Id(id).Mask("status.keyName,minimumMemberCount," + + "virtualGuestMembers[virtualGuest[primaryBackendIpAddress,primaryIpAddress,privateNetworkOnlyFlag,fullyQualifiedDomainName]]"). + GetObject() + if err != nil { + if apiErr, ok := err.(sl.Error); ok && apiErr.StatusCode == 404 { + return nil, "", fmt.Errorf("The scale group %d does not exist anymore: %s", id, err) + } + + return result, "BUSY", nil // Retry + } + + status := "BUSY" + + // Return "BUSY" if member VMs don't have ip addresses. + for _, scaleMemberVirtualGuest := range result.VirtualGuestMembers { + // Checking primary backend IP address. + if scaleMemberVirtualGuest.VirtualGuest.PrimaryBackendIpAddress == nil { + log.Printf("The member vm of scale group does not have private IP yet. Hostname : %s", + *scaleMemberVirtualGuest.VirtualGuest.FullyQualifiedDomainName) + return result, status, nil + } + + // Checking primary IP address. + if !(*scaleMemberVirtualGuest.VirtualGuest.PrivateNetworkOnlyFlag) && + scaleMemberVirtualGuest.VirtualGuest.PrimaryIpAddress == nil { + log.Printf("The member vm of scale group does not have IP yet. Hostname : %s", + *scaleMemberVirtualGuest.VirtualGuest.FullyQualifiedDomainName) + return result, status, nil + } + } + if result.Status.KeyName != nil { + status = *result.Status.KeyName + log.Printf("The status of scale group with id (%d) is (%s)", id, *result.Status.KeyName) + } else { + log.Printf("Could not get the status of scale group with id (%d). Retrying...", id) + } + + return result, status, nil + }, + Timeout: 120 * time.Minute, + Delay: 10 * time.Second, + MinTimeout: 10 * time.Second, + } + + return stateConf.WaitForState() +} + +func resourceIBMComputeAutoScaleGroupExists(d *schema.ResourceData, meta interface{}) (bool, error) { + sess := meta.(ClientSession).SoftLayerSession() + scaleGroupService := services.GetScaleGroupService(sess) + + groupId, err := strconv.Atoi(d.Id()) + if err != nil { + return false, fmt.Errorf("Not a valid ID, must be an integer: %s", err) + } + + result, err := scaleGroupService.Id(groupId).Mask("id").GetObject() + if err != nil { + if apiErr, ok := err.(sl.Error); ok { + if apiErr.StatusCode == 404 { + return false, nil + } + } + return false, fmt.Errorf("Error communicating with the API: %s", err) + } + return result.Id != nil && *result.Id == groupId, nil +} + +func getLocationGroupRegionalId(sess *session.Session, locationGroupRegionalName string) (int, error) { + locationGroupRegionals, err := services.GetLocationGroupRegionalService(sess). + Mask("id,name"). + // FIXME: Someday, filters may actually work in SoftLayer + //Filter(filter.Build( + // filter.Path("name").Eq(locationGroupRegionalName))). + //Limit(1). + GetAllObjects() + + if err != nil { + return -1, err + } + + if len(locationGroupRegionals) < 1 { + return -1, fmt.Errorf("Invalid location group regional: %s", locationGroupRegionalName) + } + + for _, locationGroupRegional := range locationGroupRegionals { + if *locationGroupRegional.Name == locationGroupRegionalName { + return *locationGroupRegional.Id, nil + } + } + + return -1, fmt.Errorf("Invalid regional_group_id: %s", locationGroupRegionalName) +} diff --git a/ibm/resource_ibm_compute_autoscale_group_test.go b/ibm/resource_ibm_compute_autoscale_group_test.go new file mode 100644 index 0000000000..419626c7a7 --- /dev/null +++ b/ibm/resource_ibm_compute_autoscale_group_test.go @@ -0,0 +1,287 @@ +package ibm + +import ( + "errors" + "fmt" + "strconv" + "testing" + + "strings" + + "github.com/hashicorp/terraform/helper/acctest" + "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/terraform" + "github.com/softlayer/softlayer-go/datatypes" + "github.com/softlayer/softlayer-go/services" +) + +func TestAccIBMComputeAutoScaleGroup_Basic(t *testing.T) { + var scalegroup datatypes.Scale_Group + groupname := fmt.Sprintf("terraformuat_%d", acctest.RandInt()) + hostname := acctest.RandString(16) + updatedgroupname := fmt.Sprintf("terraformuat_%d", acctest.RandInt()) + updatedhostname := acctest.RandString(16) + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckIBMComputeAutoScaleGroupDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccCheckIBMComputeAutoScaleGroupConfig_basic(groupname, hostname), + Check: resource.ComposeTestCheckFunc( + testAccCheckIBMComputeAutoScaleGroupExists("ibm_compute_autoscale_group.sample-http-cluster", &scalegroup), + resource.TestCheckResourceAttr( + "ibm_compute_autoscale_group.sample-http-cluster", "name", groupname), + resource.TestCheckResourceAttr( + "ibm_compute_autoscale_group.sample-http-cluster", "regional_group", "na-usa-central-1"), + resource.TestCheckResourceAttr( + "ibm_compute_autoscale_group.sample-http-cluster", "cooldown", "30"), + resource.TestCheckResourceAttr( + "ibm_compute_autoscale_group.sample-http-cluster", "minimum_member_count", "1"), + resource.TestCheckResourceAttr( + "ibm_compute_autoscale_group.sample-http-cluster", "maximum_member_count", "10"), + resource.TestCheckResourceAttr( + "ibm_compute_autoscale_group.sample-http-cluster", "termination_policy", "CLOSEST_TO_NEXT_CHARGE"), + resource.TestCheckResourceAttr( + "ibm_compute_autoscale_group.sample-http-cluster", "port", "8080"), + resource.TestCheckResourceAttr( + "ibm_compute_autoscale_group.sample-http-cluster", "health_check.type", "HTTP"), + resource.TestCheckResourceAttr( + "ibm_compute_autoscale_group.sample-http-cluster", "virtual_guest_member_template.0.hostname", hostname), + resource.TestCheckResourceAttr( + "ibm_compute_autoscale_group.sample-http-cluster", "virtual_guest_member_template.0.domain", "terraformuat.ibm.com"), + resource.TestCheckResourceAttr( + "ibm_compute_autoscale_group.sample-http-cluster", "virtual_guest_member_template.0.cores", "1"), + resource.TestCheckResourceAttr( + "ibm_compute_autoscale_group.sample-http-cluster", "virtual_guest_member_template.0.memory", "4096"), + resource.TestCheckResourceAttr( + "ibm_compute_autoscale_group.sample-http-cluster", "virtual_guest_member_template.0.network_speed", "1000"), + resource.TestCheckResourceAttr( + "ibm_compute_autoscale_group.sample-http-cluster", "virtual_guest_member_template.0.hourly_billing", "true"), + resource.TestCheckResourceAttr( + "ibm_compute_autoscale_group.sample-http-cluster", "virtual_guest_member_template.0.os_reference_code", "DEBIAN_7_64"), + resource.TestCheckResourceAttr( + "ibm_compute_autoscale_group.sample-http-cluster", "virtual_guest_member_template.0.local_disk", "false"), + resource.TestCheckResourceAttr( + "ibm_compute_autoscale_group.sample-http-cluster", "virtual_guest_member_template.0.disks.0", "25"), + resource.TestCheckResourceAttr( + "ibm_compute_autoscale_group.sample-http-cluster", "virtual_guest_member_template.0.disks.1", "100"), + resource.TestCheckResourceAttr( + "ibm_compute_autoscale_group.sample-http-cluster", "virtual_guest_member_template.0.datacenter", "dal09"), + resource.TestCheckResourceAttr( + "ibm_compute_autoscale_group.sample-http-cluster", "virtual_guest_member_template.0.post_install_script_uri", ""), + resource.TestCheckResourceAttr( + "ibm_compute_autoscale_group.sample-http-cluster", "virtual_guest_member_template.0.user_metadata", "#!/bin/bash"), + ), + }, + + resource.TestStep{ + Config: testAccCheckIBMComputeAutoScaleGroupConfig_updated(updatedgroupname, updatedhostname), + Check: resource.ComposeTestCheckFunc( + testAccCheckIBMComputeAutoScaleGroupExists("ibm_compute_autoscale_group.sample-http-cluster", &scalegroup), + resource.TestCheckResourceAttr( + "ibm_compute_autoscale_group.sample-http-cluster", "name", updatedgroupname), + resource.TestCheckResourceAttr( + "ibm_compute_autoscale_group.sample-http-cluster", "regional_group", "na-usa-central-1"), + resource.TestCheckResourceAttr( + "ibm_compute_autoscale_group.sample-http-cluster", "minimum_member_count", "2"), + resource.TestCheckResourceAttr( + "ibm_compute_autoscale_group.sample-http-cluster", "maximum_member_count", "12"), + resource.TestCheckResourceAttr( + "ibm_compute_autoscale_group.sample-http-cluster", "termination_policy", "NEWEST"), + resource.TestCheckResourceAttr( + "ibm_compute_autoscale_group.sample-http-cluster", "cooldown", "35"), + resource.TestCheckResourceAttr( + "ibm_compute_autoscale_group.sample-http-cluster", "port", "9090"), + resource.TestCheckResourceAttr( + "ibm_compute_autoscale_group.sample-http-cluster", "health_check.type", "HTTP-CUSTOM"), + resource.TestCheckResourceAttr( + "ibm_compute_autoscale_group.sample-http-cluster", "virtual_guest_member_template.0.hostname", updatedhostname), + resource.TestCheckResourceAttr( + "ibm_compute_autoscale_group.sample-http-cluster", "virtual_guest_member_template.0.domain", "terraformuat.ibm.com"), + resource.TestCheckResourceAttr( + "ibm_compute_autoscale_group.sample-http-cluster", "virtual_guest_member_template.0.cores", "2"), + resource.TestCheckResourceAttr( + "ibm_compute_autoscale_group.sample-http-cluster", "virtual_guest_member_template.0.memory", "8192"), + resource.TestCheckResourceAttr( + "ibm_compute_autoscale_group.sample-http-cluster", "virtual_guest_member_template.0.network_speed", "100"), + resource.TestCheckResourceAttr( + "ibm_compute_autoscale_group.sample-http-cluster", "virtual_guest_member_template.0.os_reference_code", "CENTOS_7_64"), + resource.TestCheckResourceAttr( + "ibm_compute_autoscale_group.sample-http-cluster", "virtual_guest_member_template.0.datacenter", "dal09"), + resource.TestCheckResourceAttr( + "ibm_compute_autoscale_group.sample-http-cluster", "virtual_guest_member_template.0.post_install_script_uri", "https://www.google.com"), + ), + }, + }, + }) +} + +func testAccCheckIBMComputeAutoScaleGroupDestroy(s *terraform.State) error { + service := services.GetScaleGroupService(testAccProvider.Meta().(ClientSession).SoftLayerSession()) + + for _, rs := range s.RootModule().Resources { + if rs.Type != "ibm_compute_autoscale_group" { + continue + } + + scalegroupId, _ := strconv.Atoi(rs.Primary.ID) + + // Try to find the key + _, err := service.Id(scalegroupId).GetObject() + + if err != nil && !strings.Contains(err.Error(), "404") { + return fmt.Errorf("Error waiting for Auto Scale (%s) to be destroyed: %s", rs.Primary.ID, err) + } + } + + return nil +} + +func testAccCheckIBMComputeAutoScaleGroupContainsNetworkVlan(scaleGroup *datatypes.Scale_Group, vlanNumber int, primaryRouterHostname string) resource.TestCheckFunc { + return func(s *terraform.State) error { + found := false + + for _, scaleVlan := range scaleGroup.NetworkVlans { + vlan := *scaleVlan.NetworkVlan + + if *vlan.VlanNumber == vlanNumber && *vlan.PrimaryRouter.Hostname == primaryRouterHostname { + found = true + break + } + } + + if !found { + return fmt.Errorf( + "Vlan number %d with router hostname %s not found in scale group", + vlanNumber, + primaryRouterHostname) + } + + return nil + } +} + +func testAccCheckIBMComputeAutoScaleGroupExists(n string, scalegroup *datatypes.Scale_Group) resource.TestCheckFunc { + return func(s *terraform.State) error { + rs, ok := s.RootModule().Resources[n] + + if !ok { + return fmt.Errorf("Not found: %s", n) + } + + if rs.Primary.ID == "" { + return errors.New("No Record ID is set") + } + + scalegroupId, _ := strconv.Atoi(rs.Primary.ID) + + service := services.GetScaleGroupService(testAccProvider.Meta().(ClientSession).SoftLayerSession()) + foundScaleGroup, err := service.Id(scalegroupId).Mask(strings.Join(IBMComputeAutoScaleGroupObjectMask, ",")).GetObject() + + if err != nil { + return err + } + + if strconv.Itoa(int(*foundScaleGroup.Id)) != rs.Primary.ID { + return fmt.Errorf("Record %s not found", rs.Primary.ID) + } + + *scalegroup = foundScaleGroup + + return nil + } +} + +func testAccCheckIBMComputeAutoScaleGroupConfig_basic(groupname, hostname string) string { + return fmt.Sprintf(` +resource "ibm_lb" "local_lb_01" { + connections = 250 + datacenter = "dal09" + ha_enabled = false +} + +resource "ibm_lb_service_group" "http_sg" { + load_balancer_id = "${ibm_lb.local_lb_01.id}" + allocation = 100 + port = 80 + routing_method = "ROUND_ROBIN" + routing_type = "HTTP" +} + +resource "ibm_compute_autoscale_group" "sample-http-cluster" { + name = "%s" + regional_group = "na-usa-central-1" + cooldown = 30 + minimum_member_count = 1 + maximum_member_count = 10 + termination_policy = "CLOSEST_TO_NEXT_CHARGE" + virtual_server_id = "${ibm_lb_service_group.http_sg.id}" + port = 8080 + health_check = { + type = "HTTP" + } + virtual_guest_member_template = { + hostname = "%s" + domain = "terraformuat.ibm.com" + cores = 1 + memory = 4096 + network_speed = 1000 + hourly_billing = true + os_reference_code = "DEBIAN_7_64" + local_disk = false + disks = [25,100] + datacenter = "dal09" + post_install_script_uri = "" + user_metadata = "#!/bin/bash" + } +}`, groupname, hostname) +} + +func testAccCheckIBMComputeAutoScaleGroupConfig_updated(updatedgroupname, updatedhostname string) string { + return fmt.Sprintf(` +resource "ibm_lb" "local_lb_01" { + connections = 250 + datacenter = "dal09" + ha_enabled = false +} + +resource "ibm_lb_service_group" "http_sg" { + load_balancer_id = "${ibm_lb.local_lb_01.id}" + allocation = 100 + port = 80 + routing_method = "ROUND_ROBIN" + routing_type = "HTTP" +} +resource "ibm_compute_autoscale_group" "sample-http-cluster" { + name = "%s" + regional_group = "na-usa-central-1" + cooldown = 35 + minimum_member_count = 2 + maximum_member_count = 12 + termination_policy = "NEWEST" + virtual_server_id = "${ibm_lb_service_group.http_sg.id}" + port = 9090 + health_check = { + type = "HTTP-CUSTOM" + custom_method = "GET" + custom_request = "/healthcheck" + custom_response = 200 + } + virtual_guest_member_template = { + hostname = "%s" + domain = "terraformuat.ibm.com" + cores = 2 + memory = 8192 + network_speed = 100 + hourly_billing = true + os_reference_code = "CENTOS_7_64" + local_disk = false + disks = [25,100] + datacenter = "dal09" + post_install_script_uri = "https://www.google.com" + user_metadata = "#!/bin/bash" + } +}`, updatedgroupname, updatedhostname) +} diff --git a/ibm/resource_ibm_compute_autoscale_policy.go b/ibm/resource_ibm_compute_autoscale_policy.go new file mode 100644 index 0000000000..88e26b66c0 --- /dev/null +++ b/ibm/resource_ibm_compute_autoscale_policy.go @@ -0,0 +1,547 @@ +package ibm + +import ( + "bytes" + "errors" + "fmt" + "log" + "strconv" + "strings" + "time" + + "github.com/hashicorp/terraform/helper/hashcode" + "github.com/hashicorp/terraform/helper/schema" + "github.com/softlayer/softlayer-go/datatypes" + "github.com/softlayer/softlayer-go/services" + "github.com/softlayer/softlayer-go/sl" +) + +const ( + IBMComputeTimeFormat = string("2006-01-02T15:04:05-07:00") + IBMCOMPUTE_SCALE_POLICY_TRIGGER_TYPE_ID_RESOURCE_USE = 1 + IBMCOMPUTE_SCALE_POLICY_TRIGGER_TYPE_ID_REPEATING = 2 + IBMCOMPUTE_SCALE_POLICY_TRIGGER_TYPE_ID_ONE_TIME = 3 +) + +var IBMComputeAutoScalePolicyObjectMask = []string{ + "cooldown", + "id", + "name", + "scaleActions", + "scaleGroupId", + "oneTimeTriggers", + "repeatingTriggers", + "resourceUseTriggers.watches", + "triggers", +} + +func resourceIBMComputeAutoScalePolicy() *schema.Resource { + return &schema.Resource{ + Create: resourceIBMComputeAutoScalePolicyCreate, + Read: resourceIBMComputeAutoScalePolicyRead, + Update: resourceIBMComputeAutoScalePolicyUpdate, + Delete: resourceIBMComputeAutoScalePolicyDelete, + Exists: resourceIBMComputeAutoScalePolicyExists, + Importer: &schema.ResourceImporter{}, + + Schema: map[string]*schema.Schema{ + "id": { + Type: schema.TypeInt, + Computed: true, + }, + "name": { + Type: schema.TypeString, + Required: true, + }, + "scale_type": { + Type: schema.TypeString, + Required: true, + }, + "scale_amount": { + Type: schema.TypeInt, + Required: true, + }, + "cooldown": { + Type: schema.TypeInt, + Optional: true, + }, + "scale_group_id": { + Type: schema.TypeInt, + Required: true, + ForceNew: true, + }, + "triggers": { + Type: schema.TypeSet, + Optional: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "id": { + Type: schema.TypeInt, + Computed: true, + }, + "type": { + Type: schema.TypeString, + Required: true, + }, + + // Conditionally-required fields, based on value of "type" + "watches": { + Type: schema.TypeSet, + Optional: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "id": { + Type: schema.TypeInt, + Computed: true, + }, + "metric": { + Type: schema.TypeString, + Required: true, + }, + "operator": { + Type: schema.TypeString, + Required: true, + }, + "value": { + Type: schema.TypeString, + Required: true, + }, + "period": { + Type: schema.TypeInt, + Required: true, + }, + }, + }, + Set: resourceIBMComputeAutoScalePolicyHandlerHash, + }, + + "date": { + Type: schema.TypeString, + Optional: true, + }, + + "schedule": { + Type: schema.TypeString, + Optional: true, + }, + }, + }, + Set: resourceIBMComputeAutoScalePolicyTriggerHash, + }, + }, + } +} + +func resourceIBMComputeAutoScalePolicyCreate(d *schema.ResourceData, meta interface{}) error { + sess := meta.(ClientSession).SoftLayerSession() + service := services.GetScalePolicyService(sess) + + var err error + + // Build up creation options + opts := datatypes.Scale_Policy{ + Name: sl.String(d.Get("name").(string)), + ScaleGroupId: sl.Int(d.Get("scale_group_id").(int)), + Cooldown: sl.Int(d.Get("cooldown").(int)), + } + + if *opts.Cooldown <= 0 || *opts.Cooldown > 864000 { + return fmt.Errorf("Error retrieving scalePolicy: %s", "cooldown must be between 0 seconds and 10 days.") + } + + opts.ScaleActions = []datatypes.Scale_Policy_Action_Scale{{ + Amount: sl.Int(d.Get("scale_amount").(int)), + ScaleType: sl.String(d.Get("scale_type").(string)), + }, + } + opts.ScaleActions[0].TypeId = sl.Int(1) + + if *opts.ScaleActions[0].Amount <= 0 { + return fmt.Errorf("Error retrieving scalePolicy: %s", "scale_amount should be greater than 0.") + } + if *opts.ScaleActions[0].ScaleType != "ABSOLUTE" && *opts.ScaleActions[0].ScaleType != "RELATIVE" && *opts.ScaleActions[0].ScaleType != "PERCENT" { + return fmt.Errorf("Error retrieving scalePolicy: %s", "scale_type should be ABSOLUTE, RELATIVE, or PERCENT.") + } + + if _, ok := d.GetOk("triggers"); ok { + err = validateTriggerTypes(d) + if err != nil { + return fmt.Errorf("Error retrieving scalePolicy: %s", err) + } + + opts.OneTimeTriggers, err = prepareOneTimeTriggers(d) + if err != nil { + return fmt.Errorf("Error retrieving scalePolicy: %s", err) + } + + opts.RepeatingTriggers, err = prepareRepeatingTriggers(d) + if err != nil { + return fmt.Errorf("Error retrieving scalePolicy: %s", err) + } + + opts.ResourceUseTriggers, err = prepareResourceUseTriggers(d) + if err != nil { + return fmt.Errorf("Error retrieving scalePolicy: %s", err) + } + } + + res, err := service.CreateObject(&opts) + if err != nil { + return fmt.Errorf("Error creating Scale Policy: %s $s", err) + } + + d.SetId(strconv.Itoa(*res.Id)) + log.Printf("[INFO] Scale Polocy: %d", res.Id) + + return resourceIBMComputeAutoScalePolicyRead(d, meta) +} + +func resourceIBMComputeAutoScalePolicyRead(d *schema.ResourceData, meta interface{}) error { + sess := meta.(ClientSession).SoftLayerSession() + service := services.GetScalePolicyService(sess) + + scalePolicyId, err := strconv.Atoi(d.Id()) + if err != nil { + return fmt.Errorf("Not a valid scale policy ID, must be an integer: %s", err) + } + + log.Printf("[INFO] Reading Scale Polocy: %d", scalePolicyId) + scalePolicy, err := service.Id(scalePolicyId).Mask(strings.Join(IBMComputeAutoScalePolicyObjectMask, ";")).GetObject() + if err != nil { + return fmt.Errorf("Error retrieving Scale Policy: %s", err) + } + + d.Set("name", scalePolicy.Name) + d.Set("cooldown", scalePolicy.Cooldown) + d.Set("scale_group_id", scalePolicy.ScaleGroupId) + d.Set("scale_type", scalePolicy.ScaleActions[0].ScaleType) + d.Set("scale_amount", scalePolicy.ScaleActions[0].Amount) + triggers := make([]map[string]interface{}, 0) + triggers = append(triggers, readOneTimeTriggers(scalePolicy.OneTimeTriggers)...) + triggers = append(triggers, readRepeatingTriggers(scalePolicy.RepeatingTriggers)...) + triggers = append(triggers, readResourceUseTriggers(scalePolicy.ResourceUseTriggers)...) + + d.Set("triggers", triggers) + + return nil +} + +func resourceIBMComputeAutoScalePolicyUpdate(d *schema.ResourceData, meta interface{}) error { + sess := meta.(ClientSession).SoftLayerSession() + scalePolicyService := services.GetScalePolicyService(sess) + scalePolicyTriggerService := services.GetScalePolicyTriggerService(sess) + + scalePolicyId, err := strconv.Atoi(d.Id()) + if err != nil { + return fmt.Errorf("Not a valid scale policy ID, must be an integer: %s", err) + } + + scalePolicy, err := scalePolicyService.Id(scalePolicyId).Mask(strings.Join(IBMComputeAutoScalePolicyObjectMask, ";")).GetObject() + if err != nil { + return fmt.Errorf("Error retrieving scalePolicy: %s", err) + } + + var template datatypes.Scale_Policy + + template.Id = sl.Int(scalePolicyId) + + if d.HasChange("name") { + template.Name = sl.String(d.Get("name").(string)) + } + + if d.HasChange("scale_type") || d.HasChange("scale_amount") { + template.ScaleActions = make([]datatypes.Scale_Policy_Action_Scale, 1) + template.ScaleActions[0].Id = scalePolicy.ScaleActions[0].Id + template.ScaleActions[0].TypeId = sl.Int(1) + } + + if d.HasChange("scale_type") { + template.ScaleActions[0].ScaleType = sl.String(d.Get("scale_type").(string)) + if *template.ScaleActions[0].ScaleType != "ABSOLUTE" && *template.ScaleActions[0].ScaleType != "RELATIVE" && *template.ScaleActions[0].ScaleType != "PERCENT" { + return fmt.Errorf("Error retrieving scalePolicy: %s", "scale_type should be ABSOLUTE, RELATIVE, or PERCENT.") + } + } + + if d.HasChange("scale_amount") { + template.ScaleActions[0].Amount = sl.Int(d.Get("scale_amount").(int)) + if *template.ScaleActions[0].Amount <= 0 { + return fmt.Errorf("Error retrieving scalePolicy: %s", "scale_amount should be greater than 0.") + } + } + + if d.HasChange("cooldown") { + template.Cooldown = sl.Int(d.Get("cooldown").(int)) + if *template.Cooldown <= 0 || *template.Cooldown > 864000 { + return fmt.Errorf("Error retrieving scalePolicy: %s", "cooldown must be between 0 seconds and 10 days.") + } + } + + if _, ok := d.GetOk("triggers"); ok { + template.OneTimeTriggers, err = prepareOneTimeTriggers(d) + if err != nil { + return fmt.Errorf("Error retrieving scalePolicy: %s", err) + } + template.RepeatingTriggers, err = prepareRepeatingTriggers(d) + if err != nil { + return fmt.Errorf("Error retrieving scalePolicy: %s", err) + } + template.ResourceUseTriggers, err = prepareResourceUseTriggers(d) + if err != nil { + return fmt.Errorf("Error retrieving scalePolicy: %s", err) + } + } + + for _, triggerList := range scalePolicy.Triggers { + log.Printf("[INFO] DELETE TRIGGERS %d", *triggerList.Id) + scalePolicyTriggerService.Id(*triggerList.Id).DeleteObject() + } + + time.Sleep(60) + log.Printf("[INFO] Updating scale policy: %d", scalePolicyId) + _, err = scalePolicyService.Id(scalePolicyId).EditObject(&template) + + if err != nil { + return fmt.Errorf("Error updating scalie policy: %s", err) + } + + return nil +} + +func resourceIBMComputeAutoScalePolicyDelete(d *schema.ResourceData, meta interface{}) error { + sess := meta.(ClientSession).SoftLayerSession() + service := services.GetScalePolicyService(sess) + + id, err := strconv.Atoi(d.Id()) + if err != nil { + return fmt.Errorf("Error deleting scale policy: %s", err) + } + + log.Printf("[INFO] Deleting scale policy: %d", id) + _, err = service.Id(id).DeleteObject() + if err != nil { + return fmt.Errorf("Error deleting scale policy: %s", err) + } + + d.SetId("") + + return nil +} + +func resourceIBMComputeAutoScalePolicyExists(d *schema.ResourceData, meta interface{}) (bool, error) { + sess := meta.(ClientSession).SoftLayerSession() + service := services.GetScalePolicyService(sess) + + policyId, err := strconv.Atoi(d.Id()) + if err != nil { + return false, fmt.Errorf("Not a valid ID, must be an integer: %s", err) + } + result, err := service.Id(policyId).Mask("id").GetObject() + if err != nil { + if apiErr, ok := err.(sl.Error); ok { + if apiErr.StatusCode == 404 { + return false, nil + } + } + return false, fmt.Errorf("Error communicating with the API: %s", err) + } + return result.Id != nil && *result.Id == policyId, nil + +} + +func validateTriggerTypes(d *schema.ResourceData) error { + triggerLists := d.Get("triggers").(*schema.Set).List() + for _, triggerList := range triggerLists { + trigger := triggerList.(map[string]interface{}) + trigger_type := trigger["type"].(string) + if trigger_type != "ONE_TIME" && trigger_type != "REPEATING" && trigger_type != "RESOURCE_USE" { + return fmt.Errorf("Invalid trigger type: %s", trigger_type) + } + } + return nil +} + +func prepareOneTimeTriggers(d *schema.ResourceData) ([]datatypes.Scale_Policy_Trigger_OneTime, error) { + triggerLists := d.Get("triggers").(*schema.Set).List() + triggers := make([]datatypes.Scale_Policy_Trigger_OneTime, 0) + + portalTimeZone := time.FixedZone("PortalTimeZone", -5*60*60) + + for _, triggerList := range triggerLists { + trigger := triggerList.(map[string]interface{}) + + if trigger["type"].(string) == "ONE_TIME" { + var oneTimeTrigger datatypes.Scale_Policy_Trigger_OneTime + oneTimeTrigger.TypeId = sl.Int(IBMCOMPUTE_SCALE_POLICY_TRIGGER_TYPE_ID_ONE_TIME) + timeStampString := trigger["date"].(string) + + // Use UTC time zone for a terraform configuration + isUTC := strings.HasSuffix(timeStampString, "+00:00") + if !isUTC { + return nil, errors.New("The time zone should be an UTC(+00:00).") + } + + timeStamp, err := time.Parse(IBMComputeTimeFormat, timeStampString) + if err != nil { + return nil, err + } + oneTimeTrigger.Date = &datatypes.Time{Time: timeStamp.In(portalTimeZone)} + triggers = append(triggers, oneTimeTrigger) + } + } + return triggers, nil +} + +func prepareRepeatingTriggers(d *schema.ResourceData) ([]datatypes.Scale_Policy_Trigger_Repeating, error) { + triggerLists := d.Get("triggers").(*schema.Set).List() + triggers := make([]datatypes.Scale_Policy_Trigger_Repeating, 0) + for _, triggerList := range triggerLists { + trigger := triggerList.(map[string]interface{}) + + if trigger["type"].(string) == "REPEATING" { + var repeatingTrigger datatypes.Scale_Policy_Trigger_Repeating + repeatingTrigger.TypeId = sl.Int(IBMCOMPUTE_SCALE_POLICY_TRIGGER_TYPE_ID_REPEATING) + repeatingTrigger.Schedule = sl.String(trigger["schedule"].(string)) + triggers = append(triggers, repeatingTrigger) + } + } + return triggers, nil +} + +func prepareResourceUseTriggers(d *schema.ResourceData) ([]datatypes.Scale_Policy_Trigger_ResourceUse, error) { + triggerLists := d.Get("triggers").(*schema.Set).List() + triggers := make([]datatypes.Scale_Policy_Trigger_ResourceUse, 0) + for _, triggerList := range triggerLists { + trigger := triggerList.(map[string]interface{}) + + if trigger["type"].(string) == "RESOURCE_USE" { + var resourceUseTrigger datatypes.Scale_Policy_Trigger_ResourceUse + var err error + resourceUseTrigger.TypeId = sl.Int(IBMCOMPUTE_SCALE_POLICY_TRIGGER_TYPE_ID_RESOURCE_USE) + resourceUseTrigger.Watches, err = prepareWatches(trigger["watches"].(*schema.Set)) + if err != nil { + return nil, err + } + triggers = append(triggers, resourceUseTrigger) + } + } + return triggers, nil +} + +func prepareWatches(d *schema.Set) ([]datatypes.Scale_Policy_Trigger_ResourceUse_Watch, error) { + watchLists := d.List() + watches := make([]datatypes.Scale_Policy_Trigger_ResourceUse_Watch, 0) + for _, watcheList := range watchLists { + var watch datatypes.Scale_Policy_Trigger_ResourceUse_Watch + watchMap := watcheList.(map[string]interface{}) + + watch.Metric = sl.String(watchMap["metric"].(string)) + if *watch.Metric != "host.cpu.percent" && *watch.Metric != "host.network.backend.in.rate" && *watch.Metric != "host.network.backend.out.rate" && *watch.Metric != "host.network.frontend.in.rate" && *watch.Metric != "host.network.frontend.out.rate" { + return nil, fmt.Errorf("Invalid metric : %s", *watch.Metric) + } + + watch.Operator = sl.String(watchMap["operator"].(string)) + if *watch.Operator != ">" && *watch.Operator != "<" { + return nil, fmt.Errorf("Invalid operator : %s", *watch.Operator) + } + + watch.Period = sl.Int(watchMap["period"].(int)) + if *watch.Period <= 0 { + return nil, errors.New("period shoud be greater than 0.") + } + + watch.Value = sl.String(watchMap["value"].(string)) + + // Autoscale only support EWMA algorithm. + watch.Algorithm = sl.String("EWMA") + + watches = append(watches, watch) + } + return watches, nil +} + +func readOneTimeTriggers(list []datatypes.Scale_Policy_Trigger_OneTime) []map[string]interface{} { + triggers := make([]map[string]interface{}, 0, len(list)) + UTCZone, _ := time.LoadLocation("UTC") + + for _, trigger := range list { + t := make(map[string]interface{}) + t["id"] = *trigger.Id + t["type"] = "ONE_TIME" + t["date"] = trigger.Date.In(UTCZone).Format(IBMComputeTimeFormat) + triggers = append(triggers, t) + } + return triggers +} + +func readRepeatingTriggers(list []datatypes.Scale_Policy_Trigger_Repeating) []map[string]interface{} { + triggers := make([]map[string]interface{}, 0, len(list)) + for _, trigger := range list { + t := make(map[string]interface{}) + t["id"] = *trigger.Id + t["type"] = "REPEATING" + t["schedule"] = *trigger.Schedule + triggers = append(triggers, t) + } + return triggers +} + +func readResourceUseTriggers(list []datatypes.Scale_Policy_Trigger_ResourceUse) []map[string]interface{} { + triggers := make([]map[string]interface{}, 0, len(list)) + for _, trigger := range list { + t := make(map[string]interface{}) + t["id"] = *trigger.Id + t["type"] = "RESOURCE_USE" + t["watches"] = schema.NewSet(resourceIBMComputeAutoScalePolicyHandlerHash, + readResourceUseWatches(trigger.Watches)) + triggers = append(triggers, t) + } + return triggers +} + +func readResourceUseWatches(list []datatypes.Scale_Policy_Trigger_ResourceUse_Watch) []interface{} { + watches := make([]interface{}, 0, len(list)) + for _, watch := range list { + w := make(map[string]interface{}) + w["id"] = *watch.Id + w["metric"] = *watch.Metric + w["operator"] = *watch.Operator + w["period"] = *watch.Period + w["value"] = *watch.Value + watches = append(watches, w) + } + return watches +} + +func resourceIBMComputeAutoScalePolicyTriggerHash(v interface{}) int { + var buf bytes.Buffer + trigger := v.(map[string]interface{}) + if trigger["type"].(string) == "ONE_TIME" { + buf.WriteString(fmt.Sprintf("%s-", trigger["type"].(string))) + buf.WriteString(fmt.Sprintf("%s-", trigger["date"].(string))) + } + if trigger["type"].(string) == "REPEATING" { + buf.WriteString(fmt.Sprintf("%s-", trigger["type"].(string))) + buf.WriteString(fmt.Sprintf("%s-", trigger["schedule"].(string))) + } + if trigger["type"].(string) == "RESOURCE_USE" { + buf.WriteString(fmt.Sprintf("%s-", trigger["type"].(string))) + for _, watchList := range trigger["watches"].(*schema.Set).List() { + watch := watchList.(map[string]interface{}) + buf.WriteString(fmt.Sprintf("%s-", watch["metric"].(string))) + buf.WriteString(fmt.Sprintf("%s-", watch["operator"].(string))) + buf.WriteString(fmt.Sprintf("%s-", watch["value"].(string))) + buf.WriteString(fmt.Sprintf("%d-", watch["period"].(int))) + } + } + return hashcode.String(buf.String()) +} + +func resourceIBMComputeAutoScalePolicyHandlerHash(v interface{}) int { + var buf bytes.Buffer + watch := v.(map[string]interface{}) + buf.WriteString(fmt.Sprintf("%s-", watch["metric"].(string))) + buf.WriteString(fmt.Sprintf("%s-", watch["operator"].(string))) + buf.WriteString(fmt.Sprintf("%s-", watch["value"].(string))) + buf.WriteString(fmt.Sprintf("%d-", watch["period"].(int))) + return hashcode.String(buf.String()) +} diff --git a/ibm/resource_ibm_compute_autoscale_policy_test.go b/ibm/resource_ibm_compute_autoscale_policy_test.go new file mode 100644 index 0000000000..f77b5f8f3e --- /dev/null +++ b/ibm/resource_ibm_compute_autoscale_policy_test.go @@ -0,0 +1,302 @@ +package ibm + +import ( + "fmt" + "github.com/hashicorp/terraform/helper/acctest" + "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/terraform" + "github.com/softlayer/softlayer-go/datatypes" + "github.com/softlayer/softlayer-go/services" + "strconv" + "strings" + "testing" + "time" +) + +func TestAccIBMComputeAutoScalePolicy_Basic(t *testing.T) { + var scalepolicy datatypes.Scale_Policy + groupname := fmt.Sprintf("terraformuat_%d", acctest.RandInt()) + hostname := acctest.RandString(16) + policyname := acctest.RandString(16) + updatedpolicyname := acctest.RandString(16) + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckIBMComputeAutoScalePolicyDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccCheckIBMComputeAutoScalePolicyConfig_basic(groupname, hostname, policyname), + Check: resource.ComposeTestCheckFunc( + testAccCheckIBMComputeAutoScalePolicyExists("ibm_compute_autoscale_policy.sample-http-cluster-policy", &scalepolicy), + testAccCheckIBMComputeAutoScalePolicyAttributes(&scalepolicy, policyname), + resource.TestCheckResourceAttr( + "ibm_compute_autoscale_policy.sample-http-cluster-policy", "name", policyname), + resource.TestCheckResourceAttr( + "ibm_compute_autoscale_policy.sample-http-cluster-policy", "scale_type", "RELATIVE"), + resource.TestCheckResourceAttr( + "ibm_compute_autoscale_policy.sample-http-cluster-policy", "scale_amount", "1"), + resource.TestCheckResourceAttr( + "ibm_compute_autoscale_policy.sample-http-cluster-policy", "cooldown", "30"), + resource.TestCheckResourceAttr( + "ibm_compute_autoscale_policy.sample-http-cluster-policy", "triggers.#", "3"), + testAccCheckIBMComputeAutoScalePolicyContainsRepeatingTriggers(&scalepolicy, 2, "0 1 ? * MON,WED *"), + testAccCheckIBMComputeAutoScalePolicyContainsResourceUseTriggers(&scalepolicy, 120, "80"), + testAccCheckIBMComputeAutoScalePolicyContainsOneTimeTriggers(&scalepolicy, testOnetimeTriggerDate), + ), + }, + + resource.TestStep{ + Config: testAccCheckIBMComputeAutoScalePolicyConfig_updated(groupname, hostname, updatedpolicyname), + Check: resource.ComposeTestCheckFunc( + testAccCheckIBMComputeAutoScalePolicyExists("ibm_compute_autoscale_policy.sample-http-cluster-policy", &scalepolicy), + resource.TestCheckResourceAttr( + "ibm_compute_autoscale_policy.sample-http-cluster-policy", "name", updatedpolicyname), + resource.TestCheckResourceAttr( + "ibm_compute_autoscale_policy.sample-http-cluster-policy", "scale_type", "ABSOLUTE"), + resource.TestCheckResourceAttr( + "ibm_compute_autoscale_policy.sample-http-cluster-policy", "scale_amount", "2"), + resource.TestCheckResourceAttr( + "ibm_compute_autoscale_policy.sample-http-cluster-policy", "cooldown", "35"), + resource.TestCheckResourceAttr( + "ibm_compute_autoscale_policy.sample-http-cluster-policy", "triggers.#", "3"), + testAccCheckIBMComputeAutoScalePolicyContainsRepeatingTriggers(&scalepolicy, 2, "0 1 ? * MON,WED,SAT *"), + testAccCheckIBMComputeAutoScalePolicyContainsResourceUseTriggers(&scalepolicy, 130, "90"), + testAccCheckIBMComputeAutoScalePolicyContainsOneTimeTriggers(&scalepolicy, testOnetimeTriggerUpdatedDate), + ), + }, + }, + }) +} + +func testAccCheckIBMComputeAutoScalePolicyDestroy(s *terraform.State) error { + service := services.GetScalePolicyService(testAccProvider.Meta().(ClientSession).SoftLayerSession()) + + for _, rs := range s.RootModule().Resources { + if rs.Type != "ibm_compute_autoscale_policy" { + continue + } + + scalepolicyId, _ := strconv.Atoi(rs.Primary.ID) + + // Try to find the key + _, err := service.Id(scalepolicyId).GetObject() + + if err != nil && !strings.Contains(err.Error(), "404") { + return fmt.Errorf("Error waiting for Auto Scale Policy (%s) to be destroyed: %s", rs.Primary.ID, err) + } + } + + return nil +} + +func testAccCheckIBMComputeAutoScalePolicyContainsResourceUseTriggers(scalePolicy *datatypes.Scale_Policy, period int, value string) resource.TestCheckFunc { + return func(s *terraform.State) error { + found := false + + for _, scaleResourceUseTrigger := range scalePolicy.ResourceUseTriggers { + for _, scaleResourceUseWatch := range scaleResourceUseTrigger.Watches { + if *scaleResourceUseWatch.Metric == "host.cpu.percent" && *scaleResourceUseWatch.Operator == ">" && + *scaleResourceUseWatch.Period == period && *scaleResourceUseWatch.Value == value { + found = true + break + } + } + } + + if !found { + return fmt.Errorf("Resource use trigger not found in scale policy") + + } + + return nil + } +} + +func testAccCheckIBMComputeAutoScalePolicyContainsRepeatingTriggers(scalePolicy *datatypes.Scale_Policy, typeId int, schedule string) resource.TestCheckFunc { + return func(s *terraform.State) error { + found := false + + for _, scaleRepeatingTrigger := range scalePolicy.RepeatingTriggers { + if *scaleRepeatingTrigger.TypeId == typeId && *scaleRepeatingTrigger.Schedule == schedule { + found = true + break + } + } + + if !found { + return fmt.Errorf("Repeating trigger %d with schedule %s not found in scale policy", typeId, schedule) + + } + + return nil + } +} + +func testAccCheckIBMComputeAutoScalePolicyContainsOneTimeTriggers(scalePolicy *datatypes.Scale_Policy, testOnetimeTriggerDate string) resource.TestCheckFunc { + return func(s *terraform.State) error { + found := false + const IBMComputeTimeFormat = "2006-01-02T15:04:05-07:00" + utcLoc, _ := time.LoadLocation("UTC") + + for _, scaleOneTimeTrigger := range scalePolicy.OneTimeTriggers { + if scaleOneTimeTrigger.Date.In(utcLoc).Format(IBMComputeTimeFormat) == testOnetimeTriggerDate { + found = true + break + } + } + + if !found { + return fmt.Errorf("One time trigger with date %s not found in scale policy", testOnetimeTriggerDate) + } + + return nil + + } +} + +func testAccCheckIBMComputeAutoScalePolicyAttributes(scalepolicy *datatypes.Scale_Policy, policyname string) resource.TestCheckFunc { + return func(s *terraform.State) error { + if *scalepolicy.Name != policyname { + return fmt.Errorf("Bad name: %s", *scalepolicy.Name) + } + + return nil + } +} + +func testAccCheckIBMComputeAutoScalePolicyExists(n string, scalepolicy *datatypes.Scale_Policy) resource.TestCheckFunc { + return func(s *terraform.State) error { + rs, ok := s.RootModule().Resources[n] + + if !ok { + return fmt.Errorf("Not found: %s", n) + } + + if rs.Primary.ID == "" { + return fmt.Errorf("No Record ID is set") + } + + scalepolicyId, _ := strconv.Atoi(rs.Primary.ID) + + service := services.GetScalePolicyService(testAccProvider.Meta().(ClientSession).SoftLayerSession()) + foundScalePolicy, err := service.Id(scalepolicyId).Mask(strings.Join(IBMComputeAutoScalePolicyObjectMask, ",")).GetObject() + + if err != nil { + return err + } + + if strconv.Itoa(int(*foundScalePolicy.Id)) != rs.Primary.ID { + return fmt.Errorf("Record not found") + } + + *scalepolicy = foundScalePolicy + return nil + } +} + +func testAccCheckIBMComputeAutoScalePolicyConfig_basic(groupname, hostname, policyname string) string { + return fmt.Sprintf(` +resource "ibm_compute_autoscale_group" "sample-http-cluster-with-policy" { + name = "%s" + regional_group = "na-usa-central-1" + cooldown = 30 + minimum_member_count = 1 + maximum_member_count = 10 + termination_policy = "CLOSEST_TO_NEXT_CHARGE" + virtual_guest_member_template = { + hostname = "%s" + domain = "terraformuat.ibm.com" + cores = 1 + memory = 4096 + network_speed = 1000 + hourly_billing = true + os_reference_code = "DEBIAN_7_64" + local_disk = false + datacenter = "dal09" + } +} + +resource "ibm_compute_autoscale_policy" "sample-http-cluster-policy" { + name = "%s" + scale_type = "RELATIVE" + scale_amount = 1 + cooldown = 30 + scale_group_id = "${ibm_compute_autoscale_group.sample-http-cluster-with-policy.id}" + triggers = { + type = "RESOURCE_USE" + watches = { + + metric = "host.cpu.percent" + operator = ">" + value = "80" + period = 120 + } + } + triggers = { + type = "ONE_TIME" + date = "%s" + } + triggers = { + type = "REPEATING" + schedule = "0 1 ? * MON,WED *" + } + +}`, groupname, hostname, policyname, testOnetimeTriggerDate) +} + +const IBMComputeTestTimeFormat = string("2006-01-02T15:04:05-07:00") + +var utcLoc, _ = time.LoadLocation("UTC") + +var testOnetimeTriggerDate = time.Now().In(utcLoc).AddDate(0, 0, 1).Format(IBMComputeTestTimeFormat) + +func testAccCheckIBMComputeAutoScalePolicyConfig_updated(groupname, hostname, updatedpolicyname string) string { + return fmt.Sprintf(` +resource "ibm_compute_autoscale_group" "sample-http-cluster-with-policy" { + name = "%s" + regional_group = "na-usa-central-1" + cooldown = 30 + minimum_member_count = 1 + maximum_member_count = 10 + termination_policy = "CLOSEST_TO_NEXT_CHARGE" + virtual_guest_member_template = { + hostname = "%s" + domain = "terraformuat.ibm.com" + cores = 1 + memory = 4096 + network_speed = 1000 + hourly_billing = true + os_reference_code = "DEBIAN_7_64" + local_disk = false + datacenter = "dal09" + } +} +resource "ibm_compute_autoscale_policy" "sample-http-cluster-policy" { + name = "%s" + scale_type = "ABSOLUTE" + scale_amount = 2 + cooldown = 35 + scale_group_id = "${ibm_compute_autoscale_group.sample-http-cluster-with-policy.id}" + triggers = { + type = "RESOURCE_USE" + watches = { + + metric = "host.cpu.percent" + operator = ">" + value = "90" + period = 130 + } + } + triggers = { + type = "REPEATING" + schedule = "0 1 ? * MON,WED,SAT *" + } + triggers = { + type = "ONE_TIME" + date = "%s" + } +}`, groupname, hostname, updatedpolicyname, testOnetimeTriggerUpdatedDate) +} + +var testOnetimeTriggerUpdatedDate = time.Now().In(utcLoc).AddDate(0, 0, 2).Format(IBMComputeTestTimeFormat) diff --git a/ibm/resource_ibm_compute_bare_metal.go b/ibm/resource_ibm_compute_bare_metal.go new file mode 100644 index 0000000000..c42dc2240f --- /dev/null +++ b/ibm/resource_ibm_compute_bare_metal.go @@ -0,0 +1,563 @@ +/* +* Licensed Materials - Property of IBM +* (C) Copyright IBM Corp. 2017. All Rights Reserved. +* US Government Users Restricted Rights - Use, duplication or +* disclosure restricted by GSA ADP Schedule Contract with IBM Corp. + */ +package ibm + +import ( + "fmt" + "log" + "strconv" + "strings" + "time" + + "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/helper/schema" + "github.com/softlayer/softlayer-go/datatypes" + "github.com/softlayer/softlayer-go/filter" + "github.com/softlayer/softlayer-go/services" + "github.com/softlayer/softlayer-go/sl" +) + +func resourceIBMComputeBareMetal() *schema.Resource { + return &schema.Resource{ + Create: resourceIBMComputeBareMetalCreate, + Read: resourceIBMComputeBareMetalRead, + Update: resourceIBMComputeBareMetalUpdate, + Delete: resourceIBMComputeBareMetalDelete, + Exists: resourceIBMComputeBareMetalExists, + Importer: &schema.ResourceImporter{}, + + Schema: map[string]*schema.Schema{ + "hostname": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + DefaultFunc: genID, + DiffSuppressFunc: func(k, o, n string, d *schema.ResourceData) bool { + // FIXME: Work around another bug in terraform. + // When a default function is used with an optional property, + // terraform will always execute it on apply, even when the property + // already has a value in the state for it. This causes a false diff. + // Making the property Computed:true does not make a difference. + if strings.HasPrefix(o, "terraformed-") && strings.HasPrefix(n, "terraformed-") { + return true + } + + return o == n + }, + }, + + "domain": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + + "os_reference_code": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + ConflictsWith: []string{"image_template_id"}, + }, + + "hourly_billing": { + Type: schema.TypeBool, + Optional: true, + Default: true, + ForceNew: true, + }, + + "private_network_only": { + Type: schema.TypeBool, + Optional: true, + Default: false, + ForceNew: true, + }, + + "datacenter": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + + "public_vlan_id": { + Type: schema.TypeInt, + Optional: true, + ForceNew: true, + Computed: true, + }, + + "public_subnet": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Computed: true, + }, + + "private_vlan_id": { + Type: schema.TypeInt, + Optional: true, + ForceNew: true, + Computed: true, + }, + + "private_subnet": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Computed: true, + }, + + "network_speed": { + Type: schema.TypeInt, + Optional: true, + Default: 100, + ForceNew: true, + }, + + "public_ipv4_address": { + Type: schema.TypeString, + Computed: true, + }, + + "private_ipv4_address": { + Type: schema.TypeString, + Computed: true, + }, + + "ssh_key_ids": { + Type: schema.TypeList, + Optional: true, + Elem: &schema.Schema{Type: schema.TypeInt}, + ForceNew: true, + }, + + "user_metadata": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, + + "file_storage_ids": { + Type: schema.TypeSet, + Optional: true, + Computed: true, + Elem: &schema.Schema{Type: schema.TypeInt}, + Set: func(v interface{}) int { + return v.(int) + }, + }, + + "block_storage_ids": { + Type: schema.TypeSet, + Optional: true, + Computed: true, + Elem: &schema.Schema{Type: schema.TypeInt}, + Set: func(v interface{}) int { + return v.(int) + }, + }, + + "post_install_script_uri": { + Type: schema.TypeString, + Optional: true, + Default: nil, + ForceNew: true, + }, + + "fixed_config_preset": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + + "image_template_id": { + Type: schema.TypeInt, + Optional: true, + ForceNew: true, + ConflictsWith: []string{"os_reference_code"}, + }, + + "tags": { + Type: schema.TypeSet, + Optional: true, + Elem: &schema.Schema{Type: schema.TypeString}, + Set: schema.HashString, + }, + }, + } +} + +func getBareMetalOrderFromResourceData(d *schema.ResourceData, meta interface{}) (datatypes.Hardware, error) { + dc := datatypes.Location{ + Name: sl.String(d.Get("datacenter").(string)), + } + + networkComponent := datatypes.Network_Component{ + MaxSpeed: sl.Int(d.Get("network_speed").(int)), + } + + hardware := datatypes.Hardware{ + Hostname: sl.String(d.Get("hostname").(string)), + Domain: sl.String(d.Get("domain").(string)), + HourlyBillingFlag: sl.Bool(d.Get("hourly_billing").(bool)), + PrivateNetworkOnlyFlag: sl.Bool(d.Get("private_network_only").(bool)), + Datacenter: &dc, + NetworkComponents: []datatypes.Network_Component{networkComponent}, + PostInstallScriptUri: sl.String(d.Get("post_install_script_uri").(string)), + BareMetalInstanceFlag: sl.Int(1), + + FixedConfigurationPreset: &datatypes.Product_Package_Preset{ + KeyName: sl.String(d.Get("fixed_config_preset").(string)), + }, + } + + if operatingSystemReferenceCode, ok := d.GetOk("os_reference_code"); ok { + hardware.OperatingSystemReferenceCode = sl.String(operatingSystemReferenceCode.(string)) + } + + public_vlan_id := d.Get("public_vlan_id").(int) + if public_vlan_id > 0 { + hardware.PrimaryNetworkComponent = &datatypes.Network_Component{ + NetworkVlan: &datatypes.Network_Vlan{Id: sl.Int(public_vlan_id)}, + } + } + + private_vlan_id := d.Get("private_vlan_id").(int) + if private_vlan_id > 0 { + hardware.PrimaryBackendNetworkComponent = &datatypes.Network_Component{ + NetworkVlan: &datatypes.Network_Vlan{Id: sl.Int(private_vlan_id)}, + } + } + + if public_subnet, ok := d.GetOk("public_subnet"); ok { + subnet := public_subnet.(string) + subnetID, err := getSubnetID(subnet, meta) + if err != nil { + return hardware, fmt.Errorf("Error determining id for subnet %s: %s", subnet, err) + } + + hardware.PrimaryNetworkComponent.NetworkVlan.PrimarySubnetId = sl.Int(subnetID) + } + + if private_subnet, ok := d.GetOk("private_subnet"); ok { + subnet := private_subnet.(string) + subnetID, err := getSubnetID(subnet, meta) + if err != nil { + return hardware, fmt.Errorf("Error determining id for subnet %s: %s", subnet, err) + } + + hardware.PrimaryBackendNetworkComponent.NetworkVlan.PrimarySubnetId = sl.Int(subnetID) + } + + if userMetadata, ok := d.GetOk("user_metadata"); ok { + hardware.UserData = []datatypes.Hardware_Attribute{ + {Value: sl.String(userMetadata.(string))}, + } + } + + // Get configured ssh_keys + ssh_key_ids := d.Get("ssh_key_ids").([]interface{}) + if len(ssh_key_ids) > 0 { + hardware.SshKeys = make([]datatypes.Security_Ssh_Key, 0, len(ssh_key_ids)) + for _, ssh_key_id := range ssh_key_ids { + hardware.SshKeys = append(hardware.SshKeys, datatypes.Security_Ssh_Key{ + Id: sl.Int(ssh_key_id.(int)), + }) + } + } + + return hardware, nil +} + +func resourceIBMComputeBareMetalCreate(d *schema.ResourceData, meta interface{}) error { + sess := meta.(ClientSession).SoftLayerSession() + hwService := services.GetHardwareService(sess) + orderService := services.GetProductOrderService(sess) + + hardware, err := getBareMetalOrderFromResourceData(d, meta) + if err != nil { + return err + } + + order, err := hwService.GenerateOrderTemplate(&hardware) + if err != nil { + return fmt.Errorf( + "Encountered problem trying to get the bare metal order template: %s", err) + } + + // Set image template id if it exists + if rawImageTemplateId, ok := d.GetOk("image_template_id"); ok { + imageTemplateId := rawImageTemplateId.(int) + order.ImageTemplateId = sl.Int(imageTemplateId) + } + + log.Println("[INFO] Ordering bare metal server") + + _, err = orderService.PlaceOrder(&order, sl.Bool(false)) + if err != nil { + return fmt.Errorf("Error ordering bare metal server: %s", err) + } + + log.Printf("[INFO] Bare Metal Server ID: %s", d.Id()) + + // wait for machine availability + bm, err := waitForBareMetalProvision(&hardware, meta) + if err != nil { + return fmt.Errorf( + "Error waiting for bare metal server (%s) to become ready: %s", d.Id(), err) + } + + id := *bm.(datatypes.Hardware).Id + d.SetId(fmt.Sprintf("%d", id)) + + // Set tags + err = setHardwareTags(id, d, meta) + if err != nil { + return err + } + + var storageIds []int + if storageIdsSet := d.Get("file_storage_ids").(*schema.Set); len(storageIdsSet.List()) > 0 { + storageIds = expandIntList(storageIdsSet.List()) + + } + if storageIdsSet := d.Get("block_storage_ids").(*schema.Set); len(storageIdsSet.List()) > 0 { + storageIds = append(storageIds, expandIntList(storageIdsSet.List())...) + } + if len(storageIds) > 0 { + err := addAccessToStorageList(hwService.Id(id), id, storageIds, meta) + if err != nil { + return err + } + } + + return resourceIBMComputeBareMetalRead(d, meta) +} + +func resourceIBMComputeBareMetalRead(d *schema.ResourceData, meta interface{}) error { + service := services.GetHardwareService(meta.(ClientSession).SoftLayerSession()) + + id, err := strconv.Atoi(d.Id()) + if err != nil { + return fmt.Errorf("Not a valid ID, must be an integer: %s", err) + } + + result, err := service.Id(id).Mask( + "hostname,domain," + + "primaryIpAddress,primaryBackendIpAddress,privateNetworkOnlyFlag," + + "userData[value],tagReferences[id,tag[name]]," + + "allowedNetworkStorage[id,nasType]," + + "hourlyBillingFlag," + + "datacenter[id,name,longName]," + + "primaryNetworkComponent[networkVlan[id,primaryRouter,vlanNumber],maxSpeed]," + + "primaryBackendNetworkComponent[networkVlan[id,primaryRouter,vlanNumber],maxSpeed]", + ).GetObject() + + if err != nil { + return fmt.Errorf("Error retrieving bare metal server: %s", err) + } + + d.Set("hostname", *result.Hostname) + d.Set("domain", *result.Domain) + + if result.Datacenter != nil { + d.Set("datacenter", *result.Datacenter.Name) + } + + d.Set("network_speed", *result.PrimaryNetworkComponent.MaxSpeed) + if result.PrimaryIpAddress != nil { + d.Set("public_ipv4_address", *result.PrimaryIpAddress) + } + d.Set("private_ipv4_address", *result.PrimaryBackendIpAddress) + + d.Set("private_network_only", *result.PrivateNetworkOnlyFlag) + d.Set("hourly_billing", *result.HourlyBillingFlag) + + if result.PrimaryNetworkComponent.NetworkVlan != nil { + d.Set("public_vlan_id", *result.PrimaryNetworkComponent.NetworkVlan.Id) + } + + if result.PrimaryBackendNetworkComponent.NetworkVlan != nil { + d.Set("private_vlan_id", *result.PrimaryBackendNetworkComponent.NetworkVlan.Id) + } + + userData := result.UserData + if len(userData) > 0 && userData[0].Value != nil { + d.Set("user_metadata", *userData[0].Value) + } + + tagReferences := result.TagReferences + tagReferencesLen := len(tagReferences) + if tagReferencesLen > 0 { + tags := make([]string, 0, tagReferencesLen) + for _, tagRef := range tagReferences { + tags = append(tags, *tagRef.Tag.Name) + } + d.Set("tags", tags) + } + + storages := result.AllowedNetworkStorage + if len(storages) > 0 { + d.Set("block_storage_ids", flattenBlockStorageID(storages)) + d.Set("file_storage_ids", flattenFileStorageID(storages)) + } + + connInfo := map[string]string{"type": "ssh"} + if !*result.PrivateNetworkOnlyFlag && result.PrimaryIpAddress != nil { + connInfo["host"] = *result.PrimaryIpAddress + } else { + connInfo["host"] = *result.PrimaryBackendIpAddress + } + d.SetConnInfo(connInfo) + + return nil +} + +func resourceIBMComputeBareMetalUpdate(d *schema.ResourceData, meta interface{}) error { + id, _ := strconv.Atoi(d.Id()) + service := services.GetHardwareService(meta.(ClientSession).SoftLayerSession()) + + if d.HasChange("tags") { + err := setHardwareTags(id, d, meta) + if err != nil { + return err + } + } + err := modifyStorageAccess(service.Id(id), id, meta, d) + if err != nil { + return err + } + + return nil +} + +func resourceIBMComputeBareMetalDelete(d *schema.ResourceData, meta interface{}) error { + sess := meta.(ClientSession).SoftLayerSession() + service := services.GetHardwareService(sess) + + id, err := strconv.Atoi(d.Id()) + if err != nil { + return fmt.Errorf("Not a valid ID, must be an integer: %s", err) + } + + _, err = waitForNoBareMetalActiveTransactions(id, meta) + if err != nil { + return fmt.Errorf("Error deleting bare metal server while waiting for zero active transactions: %s", err) + } + + billingItem, err := service.Id(id).GetBillingItem() + if err != nil { + return fmt.Errorf("Error getting billing item for bare metal server: %s", err) + } + + billingItemService := services.GetBillingItemService(sess) + _, err = billingItemService.Id(*billingItem.Id).CancelItem( + sl.Bool(true), sl.Bool(true), sl.String("No longer required"), sl.String("Please cancel this server"), + ) + if err != nil { + return fmt.Errorf("Error canceling the bare metal server (%d): %s", id, err) + } + + return nil +} + +func resourceIBMComputeBareMetalExists(d *schema.ResourceData, meta interface{}) (bool, error) { + service := services.GetHardwareService(meta.(ClientSession).SoftLayerSession()) + + id, err := strconv.Atoi(d.Id()) + if err != nil { + return false, fmt.Errorf("Not a valid ID, must be an integer: %s", err) + } + + result, err := service.Id(id).GetObject() + if err != nil { + if apiErr, ok := err.(sl.Error); !ok || apiErr.StatusCode != 404 { + return false, fmt.Errorf("Error trying to retrieve the Bare Metal server: %s", err) + } + } + + return result.Id != nil && *result.Id == id, nil +} + +// Bare metal creation does not return a bare metal object with an Id. +// Have to wait on provision date to become available on server that matches +// hostname and domain. +// http://sldn.softlayer.com/blog/bpotter/ordering-bare-metal-servers-using-softlayer-api +func waitForBareMetalProvision(d *datatypes.Hardware, meta interface{}) (interface{}, error) { + hostname := *d.Hostname + domain := *d.Domain + log.Printf("Waiting for server (%s.%s) to have to be provisioned", hostname, domain) + + stateConf := &resource.StateChangeConf{ + Pending: []string{"retry", "pending"}, + Target: []string{"provisioned"}, + Refresh: func() (interface{}, string, error) { + service := services.GetAccountService(meta.(ClientSession).SoftLayerSession()) + bms, err := service.Filter( + filter.Build( + filter.Path("hardware.hostname").Eq(hostname), + filter.Path("hardware.domain").Eq(domain), + ), + ).Mask("id,provisionDate").GetHardware() + if err != nil { + return false, "retry", nil + } + + if len(bms) == 0 || bms[0].ProvisionDate == nil { + return datatypes.Hardware{}, "pending", nil + } else { + return bms[0], "provisioned", nil + } + }, + Timeout: 4 * time.Hour, + Delay: 30 * time.Second, + MinTimeout: 2 * time.Minute, + } + + return stateConf.WaitForState() +} + +func waitForNoBareMetalActiveTransactions(id int, meta interface{}) (interface{}, error) { + log.Printf("Waiting for server (%d) to have zero active transactions", id) + service := services.GetHardwareServerService(meta.(ClientSession).SoftLayerSession()) + + stateConf := &resource.StateChangeConf{ + Pending: []string{"retry", "active"}, + Target: []string{"idle"}, + Refresh: func() (interface{}, string, error) { + bm, err := service.Id(id).Mask("id,activeTransactionCount").GetObject() + if err != nil { + return false, "retry", nil + } + + if bm.ActiveTransactionCount != nil && *bm.ActiveTransactionCount == 0 { + return bm, "idle", nil + } else { + return bm, "active", nil + } + }, + Timeout: 4 * time.Hour, + Delay: 5 * time.Second, + MinTimeout: 1 * time.Minute, + } + + return stateConf.WaitForState() +} + +func setHardwareTags(id int, d *schema.ResourceData, meta interface{}) error { + service := services.GetHardwareService(meta.(ClientSession).SoftLayerSession()) + + tags := getTags(d) + if tags != "" { + _, err := service.Id(id).SetTags(sl.String(tags)) + if err != nil { + return fmt.Errorf("Could not set tags on bare metal server %d", id) + } + } + + return nil +} diff --git a/ibm/resource_ibm_compute_bare_metal_test.go b/ibm/resource_ibm_compute_bare_metal_test.go new file mode 100644 index 0000000000..b98caf2151 --- /dev/null +++ b/ibm/resource_ibm_compute_bare_metal_test.go @@ -0,0 +1,258 @@ +package ibm + +import ( + "errors" + "fmt" + "strconv" + "testing" + + "github.com/hashicorp/terraform/helper/acctest" + "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/terraform" + "github.com/softlayer/softlayer-go/datatypes" + "github.com/softlayer/softlayer-go/services" + "github.com/softlayer/softlayer-go/sl" +) + +func TestAccIBMComputeBareMetal_Basic(t *testing.T) { + var bareMetal datatypes.Hardware + + hostname := acctest.RandString(16) + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckIBMComputeBareMetalDestroy, + Steps: []resource.TestStep{ + { + Config: testAccCheckIBMComputeBareMetalConfig_basic(hostname), + Destroy: false, + Check: resource.ComposeTestCheckFunc( + testAccCheckIBMComputeBareMetalExists("ibm_compute_bare_metal.terraform-acceptance-test-1", &bareMetal), + resource.TestCheckResourceAttr( + "ibm_compute_bare_metal.terraform-acceptance-test-1", "hostname", hostname), + resource.TestCheckResourceAttr( + "ibm_compute_bare_metal.terraform-acceptance-test-1", "domain", "terraformuat.ibm.com"), + resource.TestCheckResourceAttr( + "ibm_compute_bare_metal.terraform-acceptance-test-1", "os_reference_code", "UBUNTU_16_64"), + resource.TestCheckResourceAttr( + "ibm_compute_bare_metal.terraform-acceptance-test-1", "datacenter", "dal01"), + resource.TestCheckResourceAttr( + "ibm_compute_bare_metal.terraform-acceptance-test-1", "network_speed", "100"), + resource.TestCheckResourceAttr( + "ibm_compute_bare_metal.terraform-acceptance-test-1", "hourly_billing", "true"), + resource.TestCheckResourceAttr( + "ibm_compute_bare_metal.terraform-acceptance-test-1", "private_network_only", "false"), + resource.TestCheckResourceAttr( + "ibm_compute_bare_metal.terraform-acceptance-test-1", "user_metadata", "{\"value\":\"newvalue\"}"), + resource.TestCheckResourceAttr( + "ibm_compute_bare_metal.terraform-acceptance-test-1", "fixed_config_preset", "S1270_32GB_1X1TBSATA_NORAID"), + CheckStringSet( + "ibm_compute_bare_metal.terraform-acceptance-test-1", + "tags", []string{"collectd"}, + ), + ), + }, + + { + Config: testAccCheckIBMComputeBareMetalConfig_update(hostname), + Destroy: false, + Check: resource.ComposeTestCheckFunc( + testAccCheckIBMComputeBareMetalExists("ibm_compute_bare_metal.terraform-acceptance-test-1", &bareMetal), + CheckStringSet( + "ibm_compute_bare_metal.terraform-acceptance-test-1", + "tags", []string{"mesos-master"}, + ), + ), + }, + }, + }) +} + +func TestAccIBMComputeBareMetal_With_Network_Storage_Access(t *testing.T) { + var bareMetal datatypes.Hardware + hostname := acctest.RandString(16) + domain := "storage.tfbmuat.ibm.com" + + configInstance := "ibm_compute_bare_metal.terraform-bm-storage-access" + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckIBMComputeBareMetalDestroy, + Steps: []resource.TestStep{ + { + Config: testBareMetalAccessToStoragesBasic(hostname, domain), + Check: resource.ComposeTestCheckFunc( + testAccCheckIBMComputeBareMetalExists(configInstance, &bareMetal), + resource.TestCheckResourceAttr( + configInstance, "hostname", hostname), + resource.TestCheckResourceAttr( + configInstance, "domain", domain), + resource.TestCheckResourceAttr( + configInstance, "datacenter", "wdc04"), + resource.TestCheckResourceAttr( + configInstance, "hourly_billing", "true"), + resource.TestCheckResourceAttr( + configInstance, "file_storage_ids.#", "1"), + resource.TestCheckResourceAttr( + configInstance, "block_storage_ids.#", "1"), + ), + }, + { + Config: testBareMetalAccessToStoragesUpdate(hostname, domain), + Check: resource.ComposeTestCheckFunc( + testAccCheckIBMComputeBareMetalExists(configInstance, &bareMetal), + resource.TestCheckResourceAttr( + configInstance, "file_storage_ids.#", "1"), + resource.TestCheckResourceAttr( + configInstance, "block_storage_ids.#", "0"), + ), + }, + }, + }) +} + +func testAccCheckIBMComputeBareMetalDestroy(s *terraform.State) error { + service := services.GetHardwareService(testAccProvider.Meta().(ClientSession).SoftLayerSession()) + + for _, rs := range s.RootModule().Resources { + if rs.Type != "ibm_compute_bare_metal" { + continue + } + + id, _ := strconv.Atoi(rs.Primary.ID) + + // Try to find the bare metal + _, err := service.Id(id).GetObject() + + // Wait + if err != nil { + if apiErr, ok := err.(sl.Error); !ok || apiErr.StatusCode != 404 { + return fmt.Errorf( + "Error waiting for bare metal (%d) to be destroyed: %s", + id, err) + } + } + } + + return nil +} + +func testAccCheckIBMComputeBareMetalExists(n string, bareMetal *datatypes.Hardware) resource.TestCheckFunc { + return func(s *terraform.State) error { + rs, ok := s.RootModule().Resources[n] + if !ok { + return fmt.Errorf("Not found: %s", n) + } + + if rs.Primary.ID == "" { + return errors.New("No bare metal ID is set") + } + + id, err := strconv.Atoi(rs.Primary.ID) + + if err != nil { + return err + } + + service := services.GetHardwareService(testAccProvider.Meta().(ClientSession).SoftLayerSession()) + bm, err := service.Id(id).GetObject() + if err != nil { + return err + } + + fmt.Printf("The ID is %d", *bm.Id) + + if *bm.Id != id { + return errors.New("Bare metal not found") + } + + *bareMetal = bm + + return nil + } +} + +func testAccCheckIBMComputeBareMetalConfig_basic(hostname string) string { + return fmt.Sprintf(` +resource "ibm_compute_bare_metal" "terraform-acceptance-test-1" { + hostname = "%s" + domain = "terraformuat.ibm.com" + os_reference_code = "UBUNTU_16_64" + datacenter = "dal01" + network_speed = 100 + hourly_billing = true + private_network_only = false + user_metadata = "{\"value\":\"newvalue\"}" + fixed_config_preset = "S1270_32GB_1X1TBSATA_NORAID" + tags = ["collectd"] +} +`, hostname) +} + +func testAccCheckIBMComputeBareMetalConfig_update(hostname string) string { + return fmt.Sprintf(` +resource "ibm_compute_bare_metal" "terraform-acceptance-test-1" { + hostname = "%s" + domain = "terraformuat.ibm.com" + os_reference_code = "UBUNTU_16_64" + datacenter = "dal01" + network_speed = 100 + hourly_billing = true + private_network_only = false + user_metadata = "{\"value\":\"newvalue\"}" + fixed_config_preset = "S1270_32GB_1X1TBSATA_NORAID" + tags = ["mesos-master"] +} +`, hostname) +} + +func testBareMetalAccessToStoragesBasic(hostname, domain string) string { + config := fmt.Sprintf(` +resource "ibm_compute_bare_metal" "terraform-bm-storage-access" { + hostname = "%s" + domain = "%s" + os_reference_code = "UBUNTU_16_64" + datacenter = "wdc04" + network_speed = 100 + hourly_billing = true + private_network_only = false + user_metadata = "{\"value\":\"newvalue\"}" + fixed_config_preset = "S1270_32GB_1X1TBSATA_NORAID" + + tags = ["mesos-master"] + file_storage_ids = ["${ibm_storage_file.fs1.id}"] + block_storage_ids = ["${ibm_storage_block.bs.id}"] +} +%s +%s + +`, hostname, domain, fsConfig1, bsConfig1) + return config +} + +func testBareMetalAccessToStoragesUpdate(hostname, domain string) string { + return fmt.Sprintf(` +resource "ibm_compute_bare_metal" "terraform-bm-storage-access" { + hostname = "%s" + domain = "%s" + os_reference_code = "UBUNTU_16_64" + datacenter = "wdc04" + network_speed = 100 + hourly_billing = true + private_network_only = false + user_metadata = "{\"value\":\"newvalue\"}" + fixed_config_preset = "S1270_32GB_1X1TBSATA_NORAID" + file_storage_ids = ["${ibm_storage_file.fs2.id}"] + block_storage_ids = [] + + tags = ["mesos-master"] + file_storage_ids = ["${ibm_storage_file.fs2.id}"] + +} + +%s + +`, hostname, domain, fsConfig2) + +} diff --git a/ibm/resource_ibm_compute_monitor.go b/ibm/resource_ibm_compute_monitor.go new file mode 100644 index 0000000000..ed3beb7f46 --- /dev/null +++ b/ibm/resource_ibm_compute_monitor.go @@ -0,0 +1,289 @@ +package ibm + +import ( + "fmt" + "log" + "strconv" + "strings" + + "github.com/hashicorp/terraform/helper/schema" + "github.com/softlayer/softlayer-go/datatypes" + "github.com/softlayer/softlayer-go/services" + "github.com/softlayer/softlayer-go/sl" +) + +func resourceIBMComputeMonitor() *schema.Resource { + return &schema.Resource{ + Create: resourceIBMComputeMonitorCreate, + Read: resourceIBMComputeMonitorRead, + Update: resourceIBMComputeMonitorUpdate, + Delete: resourceIBMComputeMonitorDelete, + Exists: resourceIBMComputeMonitorExists, + Importer: &schema.ResourceImporter{}, + + Schema: map[string]*schema.Schema{ + "id": { + Type: schema.TypeInt, + Computed: true, + }, + + "guest_id": { + Type: schema.TypeInt, + Required: true, + ForceNew: true, + }, + + "ip_address": { + Type: schema.TypeString, + Optional: true, + }, + + "query_type_id": { + Type: schema.TypeInt, + Required: true, + }, + + "response_action_id": { + Type: schema.TypeInt, + Required: true, + }, + "wait_cycles": { + Type: schema.TypeInt, + Optional: true, + }, + "notified_users": { + Type: schema.TypeSet, + Optional: true, + Computed: true, + Elem: &schema.Schema{Type: schema.TypeInt}, + Set: func(v interface{}) int { + return v.(int) + }, + }, + }, + } +} + +func resourceIBMComputeMonitorCreate(d *schema.ResourceData, meta interface{}) error { + sess := meta.(ClientSession).SoftLayerSession() + virtualGuestService := services.GetVirtualGuestService(sess) + monitorService := services.GetNetworkMonitorVersion1QueryHostService(sess) + + guestId := d.Get("guest_id").(int) + ipAddress := d.Get("ip_address").(string) + if ipAddress == "" { + virtualGuest, err := virtualGuestService.Id(guestId).GetObject() + if err != nil { + return fmt.Errorf("Error looking up virtual guest %d: %s", guestId, err) + } + + if virtualGuest.PrimaryIpAddress == nil { + return fmt.Errorf( + "No primary ip address found for virtual guest %d. Please specify it.", guestId) + } + + ipAddress = *virtualGuest.PrimaryIpAddress + } + + // Build up our creation options + opts := datatypes.Network_Monitor_Version1_Query_Host{ + GuestId: &guestId, + IpAddress: &ipAddress, + QueryTypeId: sl.Int(d.Get("query_type_id").(int)), + ResponseActionId: sl.Int(d.Get("response_action_id").(int)), + } + if wait_cycles, ok := d.GetOk("wait_cycles"); ok { + opts.WaitCycles = sl.Int(wait_cycles.(int)) + } + + // Create a monitor + res, err := monitorService.CreateObject(&opts) + if err != nil { + return fmt.Errorf("Error creating Basic Monitor : %s", err) + } + + d.SetId(strconv.Itoa(*res.Id)) + log.Printf("[INFO] Basic Monitor Id: %d", *res.Id) + + err = createNotifications(d, meta, guestId) + if err != nil { + return err + } + + return resourceIBMComputeMonitorRead(d, meta) +} + +func createNotifications(d *schema.ResourceData, meta interface{}, guestId int) error { + sess := meta.(ClientSession).SoftLayerSession() + virtualGuestService := services.GetVirtualGuestService(sess) + notificationService := services.GetUserCustomerNotificationVirtualGuestService(sess) + + // Create a user notification + // This represents a link between a monitored guest instance and a user account + notificationLinks, err := virtualGuestService.Id(guestId).GetMonitoringUserNotification() + if err != nil { + return fmt.Errorf("Error looking up user notifications for virtual guest %d", guestId) + } + + userNotificationOpts := datatypes.User_Customer_Notification_Virtual_Guest{ + GuestId: &guestId, + } + notifiedUsers := d.Get("notified_users").(*schema.Set) + for _, userId := range notifiedUsers.List() { + userNotificationOpts.UserId = sl.Int(userId.(int)) + // Don't create the notification object if one already exists for the same user and vm + if !notificationExists(notificationLinks, userId.(int)) { + _, err := notificationService.CreateObject(&userNotificationOpts) + if err != nil { + return fmt.Errorf("Error creating notification for userID %d: %v", *userNotificationOpts.UserId, err) + } + } + } + + return nil +} + +func notificationExists(notificationLinks []datatypes.User_Customer_Notification_Virtual_Guest, userId int) bool { + for _, link := range notificationLinks { + if *link.UserId == userId { + return true + } + } + + return false +} + +func resourceIBMComputeMonitorRead(d *schema.ResourceData, meta interface{}) error { + sess := meta.(ClientSession).SoftLayerSession() + service := services.GetNetworkMonitorVersion1QueryHostService(sess) + virtualGuestService := services.GetVirtualGuestService(sess) + + basicMonitorId, _ := strconv.Atoi(d.Id()) + + basicMonitor, err := service.Id(basicMonitorId).GetObject() + if err != nil { + // If the monitor is somehow already destroyed, mark as + // succesfully gone + if apiErr, ok := err.(sl.Error); ok && apiErr.StatusCode == 404 { + d.SetId("") + return nil + } + + return fmt.Errorf("Error retrieving Basic Monitor : %s", err) + } + + guestId := *basicMonitor.GuestId + + d.Set("id", basicMonitor.Id) + d.Set("guest_id", guestId) + d.Set("ip_address", strings.TrimSpace(*basicMonitor.IpAddress)) + d.Set("query_type_id", basicMonitor.QueryTypeId) + d.Set("response_action_id", basicMonitor.ResponseActionId) + d.Set("wait_cycles", basicMonitor.WaitCycles) + + notificationLinks, err := virtualGuestService.Id(guestId).GetMonitoringUserNotification() + if err != nil { + return fmt.Errorf("Error looking up user notifications for virtual guest %d", guestId) + } + + notificationUserIds := schema.NewSet(func(v interface{}) int { return v.(int) }, make([]interface{}, 0, len(notificationLinks))) + for _, notificationLink := range notificationLinks { + notificationUserIds.Add(*notificationLink.UserId) + } + + // Only check that the notified user ids we know about are in SoftLayer. If not, set the incoming list + knownNotifiedUserIds := d.Get("notified_users").(*schema.Set) + if knownNotifiedUserIds != nil && knownNotifiedUserIds.Len() > 0 { + notifiedUserIds := notificationUserIds.List() + for _, knownNotifiedUserId := range knownNotifiedUserIds.List() { + match := false + for _, notifiedUserId := range notifiedUserIds { + if knownNotifiedUserId.(int) == notifiedUserId.(int) { + match = true + break + } + } + + if match == false { + d.Set("notified_users", notificationUserIds.List()) + break + } + } + } + + return nil +} + +func resourceIBMComputeMonitorUpdate(d *schema.ResourceData, meta interface{}) error { + sess := meta.(ClientSession).SoftLayerSession() + service := services.GetNetworkMonitorVersion1QueryHostService(sess) + + basicMonitorId, _ := strconv.Atoi(d.Id()) + guestId := d.Get("guest_id").(int) + + basicMonitor, err := service.Id(basicMonitorId).GetObject() + if err != nil { + return fmt.Errorf("Error retrieving Basic Monitor : %s", err) + } + if d.HasChange("query_type_id") { + basicMonitor.QueryTypeId = sl.Int(d.Get("query_type_id").(int)) + } + if d.HasChange("response_action_id") { + basicMonitor.ResponseActionId = sl.Int(d.Get("response_action_id").(int)) + } + if d.HasChange("wait_cycles") { + basicMonitor.WaitCycles = sl.Int(d.Get("wait_cycles").(int)) + } + + _, err = service.Id(basicMonitorId).EditObject(&basicMonitor) + if err != nil { + return fmt.Errorf("Error editing Basic Monitor : %s", err) + } + + // Will only create notification objects for user/vm relationships that + // don't exist yet. + err = createNotifications(d, meta, guestId) + if err != nil { + return err + } + + return resourceIBMComputeMonitorRead(d, meta) +} + +func resourceIBMComputeMonitorDelete(d *schema.ResourceData, meta interface{}) error { + sess := meta.(ClientSession).SoftLayerSession() + service := services.GetNetworkMonitorVersion1QueryHostService(sess) + + // Delete the basic monitor + id, err := strconv.Atoi(d.Id()) + + log.Printf("[INFO] Deleting Basic Monitor : %d", id) + _, err = service.Id(id).DeleteObject() + if err != nil { + return fmt.Errorf("Error deleting Basic Monitor : %s", err) + } + + d.SetId("") + return nil +} + +func resourceIBMComputeMonitorExists(d *schema.ResourceData, meta interface{}) (bool, error) { + sess := meta.(ClientSession).SoftLayerSession() + service := services.GetNetworkMonitorVersion1QueryHostService(sess) + + basicMonitorId, err := strconv.Atoi(d.Id()) + if err != nil { + return false, fmt.Errorf("Not a valid ID, must be an integer: %s", err) + } + + result, err := service.Id(basicMonitorId).GetObject() + if err != nil { + if apiErr, ok := err.(sl.Error); ok { + if apiErr.StatusCode == 404 { + return false, nil + } + } + return false, fmt.Errorf("Error retrieving basic monitor info: %s", err) + } + return *result.Id == basicMonitorId, nil +} diff --git a/ibm/resource_ibm_compute_monitor_test.go b/ibm/resource_ibm_compute_monitor_test.go new file mode 100644 index 0000000000..17af82c828 --- /dev/null +++ b/ibm/resource_ibm_compute_monitor_test.go @@ -0,0 +1,179 @@ +package ibm + +import ( + "errors" + "fmt" + "strconv" + "strings" + "testing" + + "github.com/hashicorp/terraform/helper/acctest" + "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/terraform" + "github.com/softlayer/softlayer-go/datatypes" + "github.com/softlayer/softlayer-go/services" +) + +func TestAccIBMComputeMonitor_Basic(t *testing.T) { + var basicMonitor datatypes.Network_Monitor_Version1_Query_Host + + hostname := acctest.RandString(16) + domain := "terraformmonitoruat.ibm.com" + + queryTypeID1 := "1" + responseActionID1 := "1" + waitCycles1 := "5" + + queryTypeID2 := "17" + responseActionID2 := "2" + waitCycles2 := "10" + + notifiedUsers := []int{6575505} + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckIBMComputeMonitorDestroy, + Steps: []resource.TestStep{ + { + Config: testAccCheckIBMComputeMonitorConfigBasic(hostname, domain, queryTypeID1, responseActionID1, waitCycles1, notifiedUsers), + Check: resource.ComposeTestCheckFunc( + testAccCheckIBMComputeMonitorExists("ibm_compute_monitor.testacc_basic_monitor", &basicMonitor), + resource.TestCheckResourceAttrSet( + "ibm_compute_monitor.testacc_basic_monitor", "guest_id"), + resource.TestCheckResourceAttrSet( + "ibm_compute_monitor.testacc_basic_monitor", "ip_address"), + resource.TestCheckResourceAttr( + "ibm_compute_monitor.testacc_basic_monitor", "query_type_id", queryTypeID1), + resource.TestCheckResourceAttr( + "ibm_compute_monitor.testacc_basic_monitor", "response_action_id", responseActionID1), + resource.TestCheckResourceAttr( + "ibm_compute_monitor.testacc_basic_monitor", "wait_cycles", waitCycles1), + resource.TestCheckFunc(testAccCheckIBMComputeMonitorNotifiedUsers), + ), + Destroy: false, + }, + + { + Config: testAccCheckIBMComputeMonitorConfigBasic(hostname, domain, queryTypeID2, responseActionID2, waitCycles2, notifiedUsers), + Check: resource.ComposeTestCheckFunc( + testAccCheckIBMComputeMonitorExists("ibm_compute_monitor.testacc_basic_monitor", &basicMonitor), + resource.TestCheckResourceAttrSet( + "ibm_compute_monitor.testacc_basic_monitor", "guest_id"), + resource.TestCheckResourceAttrSet( + "ibm_compute_monitor.testacc_basic_monitor", "ip_address"), + resource.TestCheckResourceAttr( + "ibm_compute_monitor.testacc_basic_monitor", "query_type_id", queryTypeID2), + resource.TestCheckResourceAttr( + "ibm_compute_monitor.testacc_basic_monitor", "response_action_id", responseActionID2), + resource.TestCheckResourceAttr( + "ibm_compute_monitor.testacc_basic_monitor", "wait_cycles", waitCycles2), + ), + Destroy: false, + }, + }, + }) +} + +func testAccCheckIBMComputeMonitorDestroy(s *terraform.State) error { + service := services.GetNetworkMonitorVersion1QueryHostService(testAccProvider.Meta().(ClientSession).SoftLayerSession()) + + for _, rs := range s.RootModule().Resources { + if rs.Type != "ibm_compute_monitor" { + continue + } + + basicMonitorId, _ := strconv.Atoi(rs.Primary.ID) + + // Try to find the basic monitor + _, err := service.Id(basicMonitorId).GetObject() + + if err == nil { + return errors.New("Basic Monitor still exists") + } + } + + return nil +} + +func testAccCheckIBMComputeMonitorExists(n string, basicMonitor *datatypes.Network_Monitor_Version1_Query_Host) resource.TestCheckFunc { + return func(s *terraform.State) error { + + rs, ok := s.RootModule().Resources[n] + + if !ok { + return fmt.Errorf("Not found: %s", n) + } + + if rs.Primary.ID == "" { + return errors.New("No Record ID is set") + } + + basicMonitorId, _ := strconv.Atoi(rs.Primary.ID) + + service := services.GetNetworkMonitorVersion1QueryHostService(testAccProvider.Meta().(ClientSession).SoftLayerSession()) + foundBasicMonitor, err := service.Id(basicMonitorId).GetObject() + + if err != nil { + return err + } + + if strconv.Itoa(int(*foundBasicMonitor.Id)) != rs.Primary.ID { + return errors.New("Record not found") + } + + *basicMonitor = foundBasicMonitor + + return nil + } + +} +func testAccCheckIBMComputeMonitorNotifiedUsers(s *terraform.State) error { + for _, rs := range s.RootModule().Resources { + if rs.Type != "ibm_compute_monitor" { + continue + } + + if n, ok := rs.Primary.Attributes["notified_users.#"]; ok && n != "" && n != "0" { + return nil + } + break + } + return errors.New("Basic monitor has no notified users") +} + +func testAccCheckIBMComputeMonitorConfigBasic(hostname, domain, queryTypeID, responseActionID, waitCycles string, notifiedUsers []int) string { + users := []string{} + for _, v := range notifiedUsers { + text := strconv.Itoa(v) + users = append(users, text) + } + formattedUser := strings.Join(users, ",") + + config := fmt.Sprintf(` +resource "ibm_compute_vm_instance" "vg-basic-monitor-test" { + hostname = "%s" + domain = "%s" + os_reference_code = "DEBIAN_7_64" + datacenter = "dal06" + network_speed = 10 + hourly_billing = true + private_network_only = false + cores = 1 + memory = 1024 + disks = [25, 10, 20] + dedicated_acct_host_only = true + local_disk = false + ipv6_enabled = true + secondary_ip_count = 4 +} +resource "ibm_compute_monitor" "testacc_basic_monitor" { + guest_id = "${ibm_compute_vm_instance.vg-basic-monitor-test.id}" + ip_address = "${ibm_compute_vm_instance.vg-basic-monitor-test.ipv4_address}" + query_type_id = %s + response_action_id = %s + wait_cycles = %s + notified_users = [%s] +}`, hostname, domain, queryTypeID, responseActionID, waitCycles, formattedUser) + return config +} diff --git a/ibm/resource_ibm_compute_provisioning_hook.go b/ibm/resource_ibm_compute_provisioning_hook.go new file mode 100644 index 0000000000..e270b533da --- /dev/null +++ b/ibm/resource_ibm_compute_provisioning_hook.go @@ -0,0 +1,140 @@ +package ibm + +import ( + "fmt" + "log" + "net/http" + "strconv" + + "github.com/hashicorp/terraform/helper/schema" + "github.com/softlayer/softlayer-go/datatypes" + "github.com/softlayer/softlayer-go/services" + "github.com/softlayer/softlayer-go/sl" +) + +func resourceIBMComputeProvisioningHook() *schema.Resource { + return &schema.Resource{ + Create: resourceIBMComputeProvisioningHookCreate, + Read: resourceIBMComputeProvisioningHookRead, + Update: resourceIBMComputeProvisioningHookUpdate, + Delete: resourceIBMComputeProvisioningHookDelete, + Exists: resourceIBMComputeProvisioningHookExists, + Importer: &schema.ResourceImporter{}, + + Schema: map[string]*schema.Schema{ + "name": &schema.Schema{ + Type: schema.TypeString, + Required: true, + }, + + "uri": &schema.Schema{ + Type: schema.TypeString, + Required: true, + }, + }, + } +} + +func resourceIBMComputeProvisioningHookCreate(d *schema.ResourceData, meta interface{}) error { + sess := meta.(ClientSession).SoftLayerSession() + service := services.GetProvisioningHookService(sess) + + opts := datatypes.Provisioning_Hook{ + Name: sl.String(d.Get("name").(string)), + Uri: sl.String(d.Get("uri").(string)), + } + + hook, err := service.CreateObject(&opts) + if err != nil { + return fmt.Errorf("Error creating Provisioning Hook: %s", err) + } + + d.SetId(strconv.Itoa(*hook.Id)) + log.Printf("[INFO] Provisioning Hook ID: %d", *hook.Id) + + return resourceIBMComputeProvisioningHookRead(d, meta) +} + +func resourceIBMComputeProvisioningHookRead(d *schema.ResourceData, meta interface{}) error { + sess := meta.(ClientSession).SoftLayerSession() + service := services.GetProvisioningHookService(sess) + + hookId, _ := strconv.Atoi(d.Id()) + + hook, err := service.Id(hookId).GetObject() + if err != nil { + if err, ok := err.(sl.Error); ok { + if err.StatusCode == http.StatusNotFound { + d.SetId("") + return nil + } + } + return fmt.Errorf("Error retrieving Provisioning Hook: %s", err) + } + + d.Set("id", hook.Id) + d.Set("name", hook.Name) + d.Set("uri", hook.Uri) + + return nil +} + +func resourceIBMComputeProvisioningHookUpdate(d *schema.ResourceData, meta interface{}) error { + sess := meta.(ClientSession).SoftLayerSession() + service := services.GetProvisioningHookService(sess) + + hookId, _ := strconv.Atoi(d.Id()) + + opts := datatypes.Provisioning_Hook{} + + if d.HasChange("name") { + opts.Name = sl.String(d.Get("name").(string)) + } + + if d.HasChange("uri") { + opts.Uri = sl.String(d.Get("uri").(string)) + } + + opts.TypeId = sl.Int(1) + _, err := service.Id(hookId).EditObject(&opts) + + if err != nil { + return fmt.Errorf("Error editing Provisioning Hook: %s", err) + } + return nil +} + +func resourceIBMComputeProvisioningHookDelete(d *schema.ResourceData, meta interface{}) error { + sess := meta.(ClientSession).SoftLayerSession() + service := services.GetProvisioningHookService(sess) + + hookId, err := strconv.Atoi(d.Id()) + log.Printf("[INFO] Deleting Provisioning Hook: %d", hookId) + _, err = service.Id(hookId).DeleteObject() + if err != nil { + return fmt.Errorf("Error deleting Provisioning Hook: %s", err) + } + + return nil +} + +func resourceIBMComputeProvisioningHookExists(d *schema.ResourceData, meta interface{}) (bool, error) { + sess := meta.(ClientSession).SoftLayerSession() + service := services.GetProvisioningHookService(sess) + + hookId, err := strconv.Atoi(d.Id()) + if err != nil { + return false, fmt.Errorf("Not a valid ID, must be an integer: %s", err) + } + + result, err := service.Id(hookId).GetObject() + if err != nil { + if apiErr, ok := err.(sl.Error); ok { + if apiErr.StatusCode == 404 { + return false, nil + } + } + return false, fmt.Errorf("Error communicating with the API: %s", err) + } + return result.Id != nil && *result.Id == hookId, nil +} diff --git a/ibm/resource_ibm_compute_provisioning_hook_test.go b/ibm/resource_ibm_compute_provisioning_hook_test.go new file mode 100644 index 0000000000..bcc5f03b5d --- /dev/null +++ b/ibm/resource_ibm_compute_provisioning_hook_test.go @@ -0,0 +1,127 @@ +package ibm + +import ( + "fmt" + "strconv" + "testing" + + "github.com/hashicorp/terraform/helper/acctest" + "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/terraform" + "github.com/softlayer/softlayer-go/datatypes" + "github.com/softlayer/softlayer-go/services" +) + +func TestAccIBMComputeProvisioningHook_Basic(t *testing.T) { + var hook datatypes.Provisioning_Hook + + hookName1 := fmt.Sprintf("%s%s", "tfuathook", acctest.RandString(10)) + hookName2 := fmt.Sprintf("%s%s", "tfuathook", acctest.RandString(10)) + uri1 := "http://www.weather.com" + uri2 := "https://www.ibm.com" + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckIBMComputeProvisioningHookDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccCheckIBMComputeProvisioningHookConfig(hookName1, uri1), + Check: resource.ComposeTestCheckFunc( + testAccCheckIBMComputeProvisioningHookExists("ibm_compute_provisioning_hook.test-provisioning-hook", &hook), + testAccCheckIBMComputeProvisioningHookAttributes(&hook, hookName1, uri1), + resource.TestCheckResourceAttr( + "ibm_compute_provisioning_hook.test-provisioning-hook", "name", hookName1), + resource.TestCheckResourceAttr( + "ibm_compute_provisioning_hook.test-provisioning-hook", "uri", uri1), + ), + }, + + resource.TestStep{ + Config: testAccCheckIBMComputeProvisioningHookConfig(hookName2, uri2), + Check: resource.ComposeTestCheckFunc( + testAccCheckIBMComputeProvisioningHookExists("ibm_compute_provisioning_hook.test-provisioning-hook", &hook), + resource.TestCheckResourceAttr( + "ibm_compute_provisioning_hook.test-provisioning-hook", "name", hookName2), + resource.TestCheckResourceAttr( + "ibm_compute_provisioning_hook.test-provisioning-hook", "uri", uri2), + ), + }, + }, + }) +} + +func testAccCheckIBMComputeProvisioningHookDestroy(s *terraform.State) error { + service := services.GetProvisioningHookService(testAccProvider.Meta().(ClientSession).SoftLayerSession()) + + for _, rs := range s.RootModule().Resources { + if rs.Type != "ibm_compute_provisioning_hook" { + continue + } + + hookId, _ := strconv.Atoi(rs.Primary.ID) + + // Try to find the provisioning hook + _, err := service.Id(hookId).GetObject() + + if err == nil { + return fmt.Errorf("Provisioning Hook still exists") + } + } + + return nil +} + +func testAccCheckIBMComputeProvisioningHookAttributes(hook *datatypes.Provisioning_Hook, name, uri string) resource.TestCheckFunc { + return func(s *terraform.State) error { + + if *hook.Name != name { + return fmt.Errorf("Bad name: %s", *hook.Name) + } + + if *hook.Uri != uri { + return fmt.Errorf("Bad uri: %s", *hook.Uri) + } + + return nil + } +} + +func testAccCheckIBMComputeProvisioningHookExists(n string, hook *datatypes.Provisioning_Hook) resource.TestCheckFunc { + return func(s *terraform.State) error { + rs, ok := s.RootModule().Resources[n] + + if !ok { + return fmt.Errorf("Not found: %s", n) + } + + if rs.Primary.ID == "" { + return fmt.Errorf("No Record ID is set") + } + + hookId, _ := strconv.Atoi(rs.Primary.ID) + + service := services.GetProvisioningHookService(testAccProvider.Meta().(ClientSession).SoftLayerSession()) + foundHook, err := service.Id(hookId).GetObject() + + if err != nil { + return err + } + + if strconv.Itoa(int(*foundHook.Id)) != rs.Primary.ID { + return fmt.Errorf("Record not found") + } + + *hook = foundHook + + return nil + } +} + +func testAccCheckIBMComputeProvisioningHookConfig(name, uri string) string { + return fmt.Sprintf(` +resource "ibm_compute_provisioning_hook" "test-provisioning-hook" { + name = "%s" + uri = "%s" +}`, name, uri) +} diff --git a/ibm/resource_ibm_compute_ssh_key.go b/ibm/resource_ibm_compute_ssh_key.go new file mode 100644 index 0000000000..f7f37a5c8b --- /dev/null +++ b/ibm/resource_ibm_compute_ssh_key.go @@ -0,0 +1,229 @@ +package ibm + +import ( + "crypto/md5" + "encoding/base64" + "fmt" + "log" + "strconv" + "strings" + + "github.com/hashicorp/terraform/helper/schema" + "github.com/softlayer/softlayer-go/datatypes" + "github.com/softlayer/softlayer-go/filter" + "github.com/softlayer/softlayer-go/services" + "github.com/softlayer/softlayer-go/sl" +) + +func resourceIBMComputeSSHKey() *schema.Resource { + return &schema.Resource{ + Create: resourceIBMComputeSSHKeyCreate, + Read: resourceIBMComputeSSHKeyRead, + Update: resourceIBMComputeSSHKeyUpdate, + Delete: resourceIBMComputeSSHKeyDelete, + Exists: resourceIBMComputeSSHKeyExists, + Importer: &schema.ResourceImporter{}, + + Schema: map[string]*schema.Schema{ + "label": { + Type: schema.TypeString, + Required: true, + }, + + "public_key": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + DiffSuppressFunc: func(k, old, new string, d *schema.ResourceData) bool { + return strings.TrimSpace(old) == strings.TrimSpace(new) + }, + }, + + "fingerprint": { + Type: schema.TypeString, + Computed: true, + }, + + "notes": { + Type: schema.TypeString, + Optional: true, + Default: nil, + }, + }, + } +} + +func resourceIBMComputeSSHKeyCreate(d *schema.ResourceData, meta interface{}) error { + sess := meta.(ClientSession).SoftLayerSession() + service := services.GetSecuritySshKeyService(sess) + + // First check if the key exists by fingerprint + // If so, set the Id (and fingerprint), but update notes and label (if any) + key := d.Get("public_key").(string) + label := d.Get("label").(string) + + fingerprint, err := computeSSHKeyFingerprint(key) + if err != nil { + return err + } + + keys, err := services.GetAccountService(sess). + Filter(filter.Path("sshKeys.fingerprint").Eq(fingerprint).Build()). + GetSshKeys() + if err == nil && len(keys) > 0 { + slKey := keys[0] + id := *slKey.Id + slKey.Id = nil + d.SetId(fmt.Sprintf("%d", id)) + d.Set("fingerprint", fingerprint) + editKey := false + + notes := d.Get("notes").(string) + if notes != "" && (slKey.Notes == nil || notes != *slKey.Notes) { + slKey.Notes = sl.String(notes) + editKey = true + } else if slKey.Notes != nil { + d.Set("notes", *slKey.Notes) + } + + if label != *slKey.Label { + slKey.Label = sl.String(label) + editKey = true + } + + if editKey { + _, err = service.Id(id).EditObject(&slKey) + return err + } + + return nil + } // End of "Import" + + // Build up our creation options + opts := datatypes.Security_Ssh_Key{ + Label: sl.String(label), + Key: sl.String(key), + } + + if notes, ok := d.GetOk("notes"); ok { + opts.Notes = sl.String(notes.(string)) + } + + res, err := service.CreateObject(&opts) + if err != nil { + return fmt.Errorf("Error creating SSH Key: %s", err) + } + + d.SetId(strconv.Itoa(*res.Id)) + log.Printf("[INFO] SSH Key: %d", *res.Id) + + return resourceIBMComputeSSHKeyRead(d, meta) +} + +func resourceIBMComputeSSHKeyRead(d *schema.ResourceData, meta interface{}) error { + sess := meta.(ClientSession).SoftLayerSession() + service := services.GetSecuritySshKeyService(sess) + + keyID, _ := strconv.Atoi(d.Id()) + key, err := service.Id(keyID).GetObject() + if err != nil { + // If the key is somehow already destroyed, mark as + // succesfully gone + if err, ok := err.(sl.Error); ok && err.StatusCode == 404 { + d.SetId("") + return nil + } + return fmt.Errorf("Error retrieving SSH key: %s", err) + } + + d.Set("id", key.Id) + d.Set("label", key.Label) + d.Set("public_key", key.Key) + d.Set("fingerprint", key.Fingerprint) + d.Set("notes", key.Notes) + return nil +} + +func resourceIBMComputeSSHKeyUpdate(d *schema.ResourceData, meta interface{}) error { + sess := meta.(ClientSession).SoftLayerSession() + service := services.GetSecuritySshKeyService(sess) + + keyID, _ := strconv.Atoi(d.Id()) + + key, err := service.Id(keyID).GetObject() + if err != nil { + return fmt.Errorf("Error retrieving SSH key: %s", err) + } + + if d.HasChange("label") { + key.Label = sl.String(d.Get("label").(string)) + } + + if d.HasChange("notes") { + key.Notes = sl.String(d.Get("notes").(string)) + } + + _, err = service.Id(keyID).EditObject(&key) + if err != nil { + return fmt.Errorf("Error editing SSH key: %s", err) + } + return resourceIBMComputeSSHKeyRead(d, meta) +} + +func resourceIBMComputeSSHKeyDelete(d *schema.ResourceData, meta interface{}) error { + sess := meta.(ClientSession).SoftLayerSession() + service := services.GetSecuritySshKeyService(sess) + + id, err := strconv.Atoi(d.Id()) + if err != nil { + return fmt.Errorf("Error deleting SSH Key: %s", err) + } + + log.Printf("[INFO] Deleting SSH key: %d", id) + _, err = service.Id(id).DeleteObject() + if err != nil { + return fmt.Errorf("Error deleting SSH key: %s", err) + } + + d.SetId("") + return nil +} + +func resourceIBMComputeSSHKeyExists(d *schema.ResourceData, meta interface{}) (bool, error) { + sess := meta.(ClientSession).SoftLayerSession() + service := services.GetSecuritySshKeyService(sess) + + keyID, err := strconv.Atoi(d.Id()) + if err != nil { + return false, fmt.Errorf("Not a valid ID, must be an integer: %s", err) + } + + result, err := service.Id(keyID).GetObject() + if err != nil { + if apiErr, ok := err.(sl.Error); ok { + if apiErr.StatusCode == 404 { + return false, nil + } + } + return false, fmt.Errorf("Error communicating with the API: %s", err) + } + return result.Id != nil && *result.Id == keyID, nil +} + +func computeSSHKeyFingerprint(key string) (fingerprint string, err error) { + parts := strings.Fields(key) + if len(parts) < 2 { + return "", fmt.Errorf("Invalid public key specified :%s\nPlease check the value of public_key", key) + } + k, err := base64.StdEncoding.DecodeString(parts[1]) + if err != nil { + return "", fmt.Errorf("Error decoding the public key: %s\nPlease check the value of public_key", err) + } + fp := md5.Sum([]byte(k)) + prints := make([]string, len(fp)) + for i, b := range fp { + prints[i] = fmt.Sprintf("%02x", b) + } + fingerprint = strings.Join(prints, ":") + return +} diff --git a/ibm/resource_ibm_compute_ssh_key_test.go b/ibm/resource_ibm_compute_ssh_key_test.go new file mode 100644 index 0000000000..35dcbf87a7 --- /dev/null +++ b/ibm/resource_ibm_compute_ssh_key_test.go @@ -0,0 +1,124 @@ +package ibm + +import ( + "errors" + "fmt" + "strconv" + "strings" + "testing" + + "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/terraform" + "github.com/softlayer/softlayer-go/datatypes" + "github.com/softlayer/softlayer-go/services" + + "github.com/hashicorp/terraform/helper/acctest" +) + +func TestAccIBMComputeSSHKey_basic(t *testing.T) { + var key datatypes.Security_Ssh_Key + + label1 := fmt.Sprintf("terraformsshuat_create_step_label_%d", acctest.RandInt()) + label2 := fmt.Sprintf("terraformsshuat_update_step_label_%d", acctest.RandInt()) + notes1 := fmt.Sprintf("terraformsshuat_create_step_notes_%d", acctest.RandInt()) + notes2 := fmt.Sprintf("terraformsshuat_update_step_notes_%d", acctest.RandInt()) + + publicKey := strings.TrimSpace(` +ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCKVmnMOlHKcZK8tpt3MP1lqOLAcqcJzhsvJcjscgVERRN7/9484SOBJ3HSKxxNG5JN8owAjy5f9yYwcUg+JaUVuytn5Pv3aeYROHGGg+5G346xaq3DAwX6Y5ykr2fvjObgncQBnuU5KHWCECO/4h8uWuwh/kfniXPVjFToc+gnkqA+3RKpAecZhFXwfalQ9mMuYGFxn+fwn8cYEApsJbsEmb0iJwPiZ5hjFC8wREuiTlhPHDgkBLOiycd20op2nXzDbHfCHInquEe/gYxEitALONxm0swBOwJZwlTDOB7C6y2dzlrtxr1L59m7pCkWI4EtTRLvleehBoj3u7jB4usR +`) + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckIBMComputeSSHKeyDestroy, + Steps: []resource.TestStep{ + { + Config: testAccCheckIBMComputeSSHKeyConfig(label1, notes1, publicKey), + Check: resource.ComposeTestCheckFunc( + testAccCheckIBMComputeSSHKeyExists("ibm_compute_ssh_key.testacc_ssh_key", &key), + resource.TestCheckResourceAttr( + "ibm_compute_ssh_key.testacc_ssh_key", "label", label1), + resource.TestCheckResourceAttr( + "ibm_compute_ssh_key.testacc_ssh_key", "public_key", publicKey), + resource.TestCheckResourceAttr( + "ibm_compute_ssh_key.testacc_ssh_key", "notes", notes1), + ), + }, + + { + Config: testAccCheckIBMComputeSSHKeyConfig(label2, notes2, publicKey), + Check: resource.ComposeTestCheckFunc( + testAccCheckIBMComputeSSHKeyExists("ibm_compute_ssh_key.testacc_ssh_key", &key), + resource.TestCheckResourceAttr( + "ibm_compute_ssh_key.testacc_ssh_key", "label", label2), + resource.TestCheckResourceAttr( + "ibm_compute_ssh_key.testacc_ssh_key", "public_key", publicKey), + resource.TestCheckResourceAttr( + "ibm_compute_ssh_key.testacc_ssh_key", "notes", notes2), + ), + }, + }, + }) +} + +func testAccCheckIBMComputeSSHKeyDestroy(s *terraform.State) error { + service := services.GetSecuritySshKeyService(testAccProvider.Meta().(ClientSession).SoftLayerSession()) + + for _, rs := range s.RootModule().Resources { + if rs.Type != "ibm_compute_ssh_key" { + continue + } + + keyID, _ := strconv.Atoi(rs.Primary.ID) + + // Try to find the key + _, err := service.Id(keyID).GetObject() + + if err == nil { + return fmt.Errorf("SSH key %d still exists", keyID) + } + } + + return nil +} + +func testAccCheckIBMComputeSSHKeyExists(n string, key *datatypes.Security_Ssh_Key) resource.TestCheckFunc { + return func(s *terraform.State) error { + rs, ok := s.RootModule().Resources[n] + + if !ok { + return fmt.Errorf("Not found: %s", n) + } + + if rs.Primary.ID == "" { + return errors.New("No Record ID is set") + } + + keyID, _ := strconv.Atoi(rs.Primary.ID) + + service := services.GetSecuritySshKeyService(testAccProvider.Meta().(ClientSession).SoftLayerSession()) + foundKey, err := service.Id(keyID).GetObject() + + if err != nil { + return err + } + + if strconv.Itoa(int(*foundKey.Id)) != rs.Primary.ID { + return fmt.Errorf("Record %d not found", keyID) + } + + *key = foundKey + + return nil + } +} + +func testAccCheckIBMComputeSSHKeyConfig(label, notes, publicKey string) string { + return fmt.Sprintf(` +resource "ibm_compute_ssh_key" "testacc_ssh_key" { + label = "%s" + notes = "%s" + public_key = "%s" +}`, label, notes, publicKey) + +} diff --git a/ibm/resource_ibm_compute_ssl_certificate.go b/ibm/resource_ibm_compute_ssl_certificate.go new file mode 100644 index 0000000000..6078dd076d --- /dev/null +++ b/ibm/resource_ibm_compute_ssl_certificate.go @@ -0,0 +1,198 @@ +package ibm + +import ( + "fmt" + "log" + + "strconv" + "strings" + + "github.com/hashicorp/terraform/helper/schema" + "github.com/softlayer/softlayer-go/datatypes" + "github.com/softlayer/softlayer-go/services" + "github.com/softlayer/softlayer-go/sl" +) + +func resourceIBMComputeSSLCertificate() *schema.Resource { + return &schema.Resource{ + Create: resourceIBMComputeSSLCertificateCreate, + Read: resourceIBMComputeSSLCertificateRead, + Delete: resourceIBMComputeSSLCertificateDelete, + Exists: resourceIBMComputeSSLCertificateExists, + Importer: &schema.ResourceImporter{}, + + Schema: map[string]*schema.Schema{ + "id": &schema.Schema{ + Type: schema.TypeInt, + Computed: true, + ForceNew: true, + }, + + "certificate": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + StateFunc: normalizeCert, + }, + + "intermediate_certificate": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + StateFunc: normalizeCert, + }, + + "private_key": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + StateFunc: normalizeCert, + }, + + "common_name": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + + "organization_name": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + + "validity_begin": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + + "validity_days": &schema.Schema{ + Type: schema.TypeInt, + Computed: true, + }, + + "validity_end": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + + "key_size": &schema.Schema{ + Type: schema.TypeInt, + Computed: true, + }, + + "create_date": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + + "modify_date": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + }, + } +} + +func resourceIBMComputeSSLCertificateCreate(d *schema.ResourceData, meta interface{}) error { + sess := meta.(ClientSession).SoftLayerSession() + service := services.GetSecurityCertificateService(sess) + + template := datatypes.Security_Certificate{ + Certificate: sl.String(d.Get("certificate").(string)), + IntermediateCertificate: sl.String(d.Get("intermediate_certificate").(string)), + PrivateKey: sl.String(d.Get("private_key").(string)), + } + + log.Printf("[INFO] Creating Security Certificate") + + cert, err := service.CreateObject(&template) + + if err != nil { + return fmt.Errorf("Error creating Security Certificate: %s", err) + } + + d.SetId(fmt.Sprintf("%d", *cert.Id)) + + return resourceIBMComputeSSLCertificateRead(d, meta) +} + +func resourceIBMComputeSSLCertificateRead(d *schema.ResourceData, meta interface{}) error { + sess := meta.(ClientSession).SoftLayerSession() + service := services.GetSecurityCertificateService(sess) + + id, err := strconv.Atoi(d.Id()) + if err != nil { + return fmt.Errorf("Not a valid ID, must be an integer: %s", err) + } + + cert, err := service.Id(id).GetObject() + + if err != nil { + return fmt.Errorf("Unable to get Security Certificate: %s", err) + } + + d.SetId(fmt.Sprintf("%d", *cert.Id)) + d.Set("certificate", *cert.Certificate) + if cert.IntermediateCertificate != nil { + d.Set("intermediate_certificate", *cert.IntermediateCertificate) + } + if cert.PrivateKey != nil { + d.Set("private_key", *cert.PrivateKey) + } + d.Set("common_name", *cert.CommonName) + d.Set("organization_name", *cert.OrganizationName) + d.Set("validity_begin", *cert.ValidityBegin) + d.Set("validity_days", *cert.ValidityDays) + d.Set("validity_end", *cert.ValidityEnd) + d.Set("key_size", *cert.KeySize) + d.Set("create_date", *cert.CreateDate) + d.Set("modify_date", *cert.ModifyDate) + + return nil +} + +func resourceIBMComputeSSLCertificateDelete(d *schema.ResourceData, meta interface{}) error { + sess := meta.(ClientSession).SoftLayerSession() + service := services.GetSecurityCertificateService(sess) + + _, err := service.Id(d.Get("id").(int)).DeleteObject() + + if err != nil { + return fmt.Errorf("Error deleting Security Certificate %s: %s", d.Get("id"), err) + } + + return nil +} + +func resourceIBMComputeSSLCertificateExists(d *schema.ResourceData, meta interface{}) (bool, error) { + sess := meta.(ClientSession).SoftLayerSession() + service := services.GetSecurityCertificateService(sess) + + id, err := strconv.Atoi(d.Id()) + if err != nil { + return false, fmt.Errorf("Not a valid ID, must be an integer: %s", err) + } + + cert, err := service.Id(id).GetObject() + if err != nil { + if apiErr, ok := err.(sl.Error); ok { + if apiErr.StatusCode == 404 { + return false, nil + } + } + return false, fmt.Errorf("Error communicating with the API: %s", err) + } + return cert.Id != nil && *cert.Id == id, nil +} + +func normalizeCert(cert interface{}) string { + if cert == nil || cert == (*string)(nil) { + return "" + } + + switch cert.(type) { + case string: + return strings.TrimSpace(cert.(string)) + default: + return "" + } +} diff --git a/ibm/resource_ibm_compute_ssl_certificate_test.go b/ibm/resource_ibm_compute_ssl_certificate_test.go new file mode 100644 index 0000000000..a83c5e0b7e --- /dev/null +++ b/ibm/resource_ibm_compute_ssl_certificate_test.go @@ -0,0 +1,89 @@ +package ibm + +import ( + "testing" + + "github.com/hashicorp/terraform/helper/resource" +) + +func TestAccIBMComputeSSLCertificate_Basic(t *testing.T) { + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccCheckIBMComputeSSLCertificateConfig_basic, + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttr( + "ibm_compute_ssl_certificate.test-cert", "key_size", "2048"), + resource.TestCheckResourceAttr( + "ibm_compute_ssl_certificate.test-cert", "common_name", "*.weather.com"), + ), + }, + }, + }) +} + +var testAccCheckIBMComputeSSLCertificateConfig_basic = ` +resource "ibm_compute_ssl_certificate" "test-cert" { + certificate = < 0 { + d.Set("api_key", sluserObj.ApiAuthenticationKeys[0].AuthenticationKey) // as its a computed field + d.Set("has_api_key", true) + } else { + d.Set("api_key", "") + d.Set("has_api_key", false) + } + + if sluserObj.OpenIdConnectUserName != nil { + d.Set("ibm_id", sluserObj.OpenIdConnectUserName) + } + + return nil +} + +func resourceIBMComputeUserUpdate(d *schema.ResourceData, meta interface{}) error { + sess := meta.(ClientSession).SoftLayerSession() + service := services.GetUserCustomerService(sess) + + sluid, _ := strconv.Atoi(d.Id()) + + mask := strings.Join([]string{ + "id", + "username", + "email", + "firstName", + "lastName", + "companyName", + "address1", + "address2", + "city", + "state", + "country", + "timezone.shortName", + "userStatus.keyName", + "permissions.keyName", + "apiAuthenticationKeys.authenticationKey", + "apiAuthenticationKeys.id", + }, ";") + + service = service.Id(sluid) + userObj, err := service.Mask(mask).GetObject() + + // Some fields cannot be updated such as username. Computed fields also cannot be updated + // by explicitly providing a value. So only update the fields that are editable. + // Password changes can also not be fully automated, and are not supported + if d.HasChange("first_name") { + userObj.FirstName = sl.String(d.Get("first_name").(string)) + } + if d.HasChange("last_name") { + userObj.LastName = sl.String(d.Get("last_name").(string)) + } + if d.HasChange("email") { + userObj.Email = sl.String(d.Get("email").(string)) + } + if d.HasChange("company_name") { + userObj.CompanyName = sl.String(d.Get("company_name").(string)) + } + if d.HasChange("address1") { + userObj.Address1 = sl.String(d.Get("address1").(string)) + } + if d.HasChange("address2") { + userObj.Address2 = sl.String(d.Get("address2").(string)) + } + if d.HasChange("city") { + userObj.City = sl.String(d.Get("city").(string)) + } + if d.HasChange("state") { + userObj.State = sl.String(d.Get("state").(string)) + } + if d.HasChange("country") { + userObj.Country = sl.String(d.Get("country").(string)) + } + if d.HasChange("timezone") { + tzID, err := getTimezoneIDByName(sess, d.Get("timezone").(string)) + if err != nil { + return err + } + userObj.TimezoneId = &tzID + } + if d.HasChange("user_status") { + userStatusID, err := getUserStatusIDByName(sess, d.Get("user_status").(string)) + if err != nil { + return err + } + userObj.UserStatusId = &userStatusID + } + + _, err = service.EditObject(&userObj) + if err != nil { + return fmt.Errorf("Error received while editing ibm_compute_user: %s", err) + } + + if d.HasChange("permissions") { + old, new := d.GetChange("permissions") + + // 1. Remove old permissions no longer appearing in the new set + // 2. Add new permissions not already granted + + remove := old.(*schema.Set).Difference(new.(*schema.Set)).List() + add := new.(*schema.Set).Difference(old.(*schema.Set)).List() + + oldPermissions := make([]datatypes.User_Customer_CustomerPermission_Permission, 0, len(remove)) + newPermissions := make([]datatypes.User_Customer_CustomerPermission_Permission, 0, len(add)) + + for _, elem := range remove { + oldPermissions = append(oldPermissions, makePermission(elem.(string))) + } + + for _, elem := range add { + newPermissions = append(newPermissions, makePermission(elem.(string))) + } + + // 'remove' all old permissions + _, err = service.RemoveBulkPortalPermission(oldPermissions) + if err != nil { + return fmt.Errorf("Error received while removing old permissions from ibm_compute_user: %s", err) + } + + // 'add' new permission set + _, err = service.AddBulkPortalPermission(newPermissions) + if err != nil { + return fmt.Errorf("Error received while assigning new permissions to ibm_compute_user: %s", err) + } + } + + if d.HasChange("has_api_key") { + // if true, then it means create an api key if none exists. Its a no-op if an api key already exists. + // else false means, delete the api key if one exists. Its a no-op if no api key exists. + api_key_flag := d.Get("has_api_key").(bool) + + // Get the current keys. + keys := userObj.ApiAuthenticationKeys + + // Create a key if flag is true, and a key does not already exist. + if api_key_flag { + if len(keys) == 0 { // means key does not exist, so create one. + key, err := service.AddApiAuthenticationKey() + if err != nil { + return fmt.Errorf("Error creating API key while editing ibm_compute_user resource: %s", err) + } + + d.Set("api_key", key) + } else { + d.Set("api_key", keys[0].AuthenticationKey) // as api_key is a computed field + } + } else { + // If false, then delete the key if there was one. + if len(keys) > 0 { + success, err := service.RemoveApiAuthenticationKey(keys[0].Id) + if err != nil { + return fmt.Errorf("Error deleting API key while editing ibm_compute_user resource: %s", err) + } + + if !success { + return fmt.Errorf( + "The API reported removal of the api key was not successful for %s", + d.Get("email").(string), + ) + } + } + d.Set("api_key", nil) + } + } + return nil +} + +func resourceIBMComputeUserDelete(d *schema.ResourceData, meta interface{}) error { + sess := meta.(ClientSession).SoftLayerSession() + service := services.GetUserCustomerService(sess) + + id, _ := strconv.Atoi(d.Id()) + + user := datatypes.User_Customer{ + UserStatusId: sl.Int(userCustomerCancelStatus), + } + + log.Printf("[INFO] Deleting IBM Cloud user: %d", id) + _, err := service.Id(id).EditObject(&user) + if err != nil { + return fmt.Errorf("Error deleting IBM Cloud user: %s", err) + } + + d.SetId("") + return nil +} + +func resourceIBMComputeUserExists(d *schema.ResourceData, meta interface{}) (bool, error) { + service := services.GetUserCustomerService(meta.(ClientSession).SoftLayerSession()) + + id, err := strconv.Atoi(d.Id()) + + result, err := service.Id(id).GetObject() + + return result.Id != nil && *result.Id == id && err == nil, nil +} + +func getTimezoneIDByName(sess *session.Session, shortName string) (int, error) { + zones, err := services.GetLocaleTimezoneService(sess). + Mask("id,shortName"). + GetAllObjects() + + if err != nil { + return -1, err + } + + for _, zone := range zones { + if *zone.ShortName == shortName { + return *zone.Id, nil + } + } + + return -1, fmt.Errorf("Timezone %s could not be found", shortName) + +} + +func getUserStatusIDByName(sess *session.Session, name string) (int, error) { + statuses, err := services.GetUserCustomerStatusService(sess). + Mask("id,keyName"). + GetAllObjects() + + if err != nil { + return -1, err + } + + for _, status := range statuses { + if *status.KeyName == name { + return *status.Id, nil + } + } + + return -1, fmt.Errorf("User status %s could not be found", name) + +} diff --git a/ibm/resource_ibm_compute_user_test.go b/ibm/resource_ibm_compute_user_test.go new file mode 100644 index 0000000000..7bcc499d8e --- /dev/null +++ b/ibm/resource_ibm_compute_user_test.go @@ -0,0 +1,224 @@ +package ibm + +import ( + "fmt" + "strconv" + "testing" + + "crypto/sha1" + "encoding/hex" + "regexp" + + "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/terraform" + "github.com/softlayer/softlayer-go/datatypes" + "github.com/softlayer/softlayer-go/services" + "github.com/softlayer/softlayer-go/session" +) + +func TestAccIBMComputeUser_Basic(t *testing.T) { + var user datatypes.User_Customer + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckIBMComputeUserDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccCheckIBMComputeUserConfig_basic, + Check: resource.ComposeTestCheckFunc( + testAccCheckIBMComputeUserExists("ibm_compute_user.testuser", &user), + resource.TestCheckResourceAttr( + "ibm_compute_user.testuser", "first_name", "first_name"), + resource.TestCheckResourceAttr( + "ibm_compute_user.testuser", "last_name", "last_name"), + resource.TestCheckResourceAttr( + "ibm_compute_user.testuser", "email", testAccRandomEmail), + resource.TestCheckResourceAttr( + "ibm_compute_user.testuser", "company_name", "company_name"), + resource.TestCheckResourceAttr( + "ibm_compute_user.testuser", "address1", "1 Main St."), + resource.TestCheckResourceAttr( + "ibm_compute_user.testuser", "address2", "Suite 345"), + resource.TestCheckResourceAttr( + "ibm_compute_user.testuser", "city", "Atlanta"), + resource.TestCheckResourceAttr( + "ibm_compute_user.testuser", "state", "GA"), + resource.TestCheckResourceAttr( + "ibm_compute_user.testuser", "country", "US"), + resource.TestCheckResourceAttr( + "ibm_compute_user.testuser", "timezone", "EST"), + resource.TestCheckResourceAttr( + "ibm_compute_user.testuser", "user_status", "ACTIVE"), + resource.TestCheckResourceAttr( + "ibm_compute_user.testuser", "password", hash(testAccUserPassword)), + resource.TestCheckResourceAttr( + "ibm_compute_user.testuser", "permissions.#", "2"), + resource.TestCheckResourceAttr( + "ibm_compute_user.testuser", "has_api_key", "true"), + resource.TestMatchResourceAttr( + "ibm_compute_user.testuser", "api_key", apiKeyRegexp), + resource.TestCheckResourceAttrSet( + "ibm_compute_user.testuser", "username"), + ), + }, + + resource.TestStep{ + Config: testAccCheckIBMComputeUserConfig_updated, + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttr( + "ibm_compute_user.testuser", "first_name", "new_first_name"), + resource.TestCheckResourceAttr( + "ibm_compute_user.testuser", "last_name", "new_last_name"), + resource.TestCheckResourceAttr( + "ibm_compute_user.testuser", "email", "new"+testAccRandomEmail), + resource.TestCheckResourceAttr( + "ibm_compute_user.testuser", "company_name", "new_company_name"), + resource.TestCheckResourceAttr( + "ibm_compute_user.testuser", "address1", "1 1st Avenue"), + resource.TestCheckResourceAttr( + "ibm_compute_user.testuser", "address2", "Apartment 2"), + resource.TestCheckResourceAttr( + "ibm_compute_user.testuser", "city", "Montreal"), + resource.TestCheckResourceAttr( + "ibm_compute_user.testuser", "state", "QC"), + resource.TestCheckResourceAttr( + "ibm_compute_user.testuser", "country", "CA"), + resource.TestCheckResourceAttr( + "ibm_compute_user.testuser", "timezone", "MST"), + resource.TestCheckResourceAttr( + "ibm_compute_user.testuser", "user_status", "INACTIVE"), + resource.TestCheckResourceAttr( + "ibm_compute_user.testuser", "password", hash(testAccUserPassword)), + resource.TestCheckResourceAttr( + "ibm_compute_user.testuser", "permissions.#", "3"), + resource.TestCheckResourceAttr( + "ibm_compute_user.testuser", "has_api_key", "false"), + resource.TestCheckResourceAttr( + "ibm_compute_user.testuser", "api_key", ""), + resource.TestCheckResourceAttrSet( + "ibm_compute_user.testuser", "username"), + ), + }, + }, + }) +} + +func testAccCheckIBMComputeUserDestroy(s *terraform.State) error { + client := services.GetUserCustomerService(testAccProvider.Meta().(ClientSession).SoftLayerSession()) + + for _, rs := range s.RootModule().Resources { + if rs.Type != "ibm_compute_user" { + continue + } + + userID, _ := strconv.Atoi(rs.Primary.ID) + + // Try to find the user + user, err := client.Id(userID).Mask("userStatusId").GetObject() + + // Users are not immediately deleted, but rather placed into a 'cancel_pending' (1021) status + if err != nil || *user.UserStatusId != userCustomerCancelStatus { + return fmt.Errorf("IBM Cloud User still exists") + } + } + + return nil +} + +func testAccCheckIBMComputeUserExists(n string, user *datatypes.User_Customer) resource.TestCheckFunc { + return func(s *terraform.State) error { + rs, ok := s.RootModule().Resources[n] + + if !ok { + return fmt.Errorf("Not found: %s", n) + } + + if rs.Primary.ID == "" { + return fmt.Errorf("No Record ID is set") + } + + userID, _ := strconv.Atoi(rs.Primary.ID) + + client := services.GetUserCustomerService(testAccProvider.Meta().(ClientSession).SoftLayerSession()) + foundUser, err := client.Id(userID).GetObject() + + if err != nil { + return err + } + + if strconv.Itoa(int(*foundUser.Id)) != rs.Primary.ID { + return fmt.Errorf("Record not found") + } + + *user = foundUser + + return nil + } +} + +// Use session.New() to get a new session because the function should be called before testAccProvider is configured. +func testGetAccountId() string { + service := services.GetAccountService(session.New()) + account, err := service.Mask("id").GetObject() + if err != nil { + return "" + } else { + return strconv.Itoa(*account.Id) + } +} + +var testAccCheckIBMComputeUserConfig_basic = fmt.Sprintf(` +resource "ibm_compute_user" "testuser" { + first_name = "first_name" + last_name = "last_name" + email = "%s" + company_name = "company_name" + address1 = "1 Main St." + address2 = "Suite 345" + city = "Atlanta" + state = "GA" + country = "US" + timezone = "EST" + username = "%s" + password = "%s" + permissions = [ + "SERVER_ADD", + "ACCESS_ALL_GUEST" + ] + has_api_key = true +}`, testAccRandomEmail, testAccRandomUser, testAccUserPassword) + +var testAccCheckIBMComputeUserConfig_updated = fmt.Sprintf(` +resource "ibm_compute_user" "testuser" { + first_name = "new_first_name" + last_name = "new_last_name" + email = "new%s" + company_name = "new_company_name" + address1 = "1 1st Avenue" + address2 = "Apartment 2" + city = "Montreal" + state = "QC" + country = "CA" + timezone = "MST" + user_status = "INACTIVE" + username = "%s" + password = "%s" + permissions = [ + "SERVER_ADD", + "ACCESS_ALL_HARDWARE", + "TICKET_EDIT" + ] + has_api_key = false +}`, testAccRandomEmail, testAccRandomUser, testAccUserPassword) + +var testAccRandomEmail = resource.UniqueId() + "@example.com" +var testAccRandomUser = testGetAccountId() + "_" + testAccRandomEmail +var testAccUserPassword = "Softlayer2017!" +var apiKeyRegexp, _ = regexp.Compile(`\w+`) + +// Function used by provider for hashing passwords +func hash(v interface{}) string { + hash := sha1.Sum([]byte(v.(string))) + return hex.EncodeToString(hash[:]) +} diff --git a/ibm/resource_ibm_compute_vm_instance.go b/ibm/resource_ibm_compute_vm_instance.go new file mode 100644 index 0000000000..e3122788b9 --- /dev/null +++ b/ibm/resource_ibm_compute_vm_instance.go @@ -0,0 +1,1225 @@ +package ibm + +import ( + "crypto/rand" + "encoding/base64" + "encoding/hex" + "errors" + "fmt" + "log" + "math" + "strconv" + "strings" + "time" + + "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/helper/schema" + "github.com/softlayer/softlayer-go/datatypes" + "github.com/softlayer/softlayer-go/filter" + "github.com/softlayer/softlayer-go/helpers/product" + "github.com/softlayer/softlayer-go/helpers/virtual" + "github.com/softlayer/softlayer-go/services" + "github.com/softlayer/softlayer-go/session" + "github.com/softlayer/softlayer-go/sl" +) + +type storageIds []int + +func (s storageIds) Storages(meta interface{}) ([]datatypes.Network_Storage, error) { + storageService := services.GetNetworkStorageService(meta.(ClientSession).SoftLayerSession()) + storages := make([]datatypes.Network_Storage, len(s)) + + for i, id := range s { + var err error + storages[i], err = storageService.Id(id).GetObject() + if err != nil { + return nil, err + } + } + return storages, nil +} + +const ( + staticIPRouted = "STATIC_IP_ROUTED" + + upgradeTransaction = "UPGRADE" + pendingUpgrade = "pending_upgrade" + inProgressUpgrade = "upgrade_started" + + activeTransaction = "active" + idleTransaction = "idle" + + virtualGuestAvailable = "available" + virtualGuestProvisioning = "provisioning" + + networkStorageMassAccessControlModificationException = "SoftLayer_Exception_Network_Storage_Group_MassAccessControlModification" + retryDelayForModifyingStorageAccess = 10 * time.Second +) + +func resourceIBMComputeVmInstance() *schema.Resource { + return &schema.Resource{ + Create: resourceIBMComputeVmInstanceCreate, + Read: resourceIBMComputeVmInstanceRead, + Update: resourceIBMComputeVmInstanceUpdate, + Delete: resourceIBMComputeVmInstanceDelete, + Exists: resourceIBMComputeVmInstanceExists, + Importer: &schema.ResourceImporter{}, + + Schema: map[string]*schema.Schema{ + "hostname": { + Type: schema.TypeString, + Optional: true, + DefaultFunc: genID, + DiffSuppressFunc: func(k, o, n string, d *schema.ResourceData) bool { + // FIXME: Work around another bug in terraform. + // When a default function is used with an optional property, + // terraform will always execute it on apply, even when the property + // already has a value in the state for it. This causes a false diff. + // Making the property Computed:true does not make a difference. + if strings.HasPrefix(o, "terraformed-") && strings.HasPrefix(n, "terraformed-") { + return true + } + return o == n + }, + }, + + "domain": { + Type: schema.TypeString, + Required: true, + }, + + "os_reference_code": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + ConflictsWith: []string{"image_id"}, + }, + + "hourly_billing": { + Type: schema.TypeBool, + Optional: true, + Default: true, + ForceNew: true, + }, + + "private_network_only": { + Type: schema.TypeBool, + Optional: true, + Default: false, + ForceNew: true, + }, + + "datacenter": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + + "cores": { + Type: schema.TypeInt, + Required: true, + }, + + "memory": { + Type: schema.TypeInt, + Required: true, + ValidateFunc: func(v interface{}, k string) (ws []string, errors []error) { + memoryInMB := float64(v.(int)) + + // Validate memory to match gigs format + remaining := math.Mod(memoryInMB, 1024) + if remaining > 0 { + suggested := math.Ceil(memoryInMB/1024) * 1024 + errors = append(errors, fmt.Errorf( + "Invalid 'memory' value %d megabytes, must be a multiple of 1024 (e.g. use %d)", int(memoryInMB), int(suggested))) + } + + return + }, + }, + + "dedicated_acct_host_only": { + Type: schema.TypeBool, + Optional: true, + ForceNew: true, + }, + + "public_vlan_id": { + Type: schema.TypeInt, + Optional: true, + ForceNew: true, + Computed: true, + }, + + "public_subnet": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Computed: true, + }, + + "private_vlan_id": { + Type: schema.TypeInt, + Optional: true, + ForceNew: true, + Computed: true, + }, + + "private_subnet": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Computed: true, + }, + + "disks": { + Type: schema.TypeList, + Optional: true, + Elem: &schema.Schema{Type: schema.TypeInt}, + }, + + "network_speed": { + Type: schema.TypeInt, + Optional: true, + DiffSuppressFunc: func(k, o, n string, d *schema.ResourceData) bool { + if privateNetworkOnly, ok := d.GetOk("private_network_only"); ok { + if privateNetworkOnly.(bool) { + return true + } + } + return o == n + }, + Default: 100, + }, + + "ipv4_address": { + Type: schema.TypeString, + Computed: true, + }, + + "ipv4_address_private": { + Type: schema.TypeString, + Computed: true, + }, + + "ip_address_id": { + Type: schema.TypeInt, + Computed: true, + }, + + "ip_address_id_private": { + Type: schema.TypeInt, + Computed: true, + }, + + "ipv6_enabled": { + Type: schema.TypeBool, + Optional: true, + ForceNew: true, + Default: false, + }, + + "ipv6_address": { + Type: schema.TypeString, + Computed: true, + }, + + "ipv6_address_id": { + Type: schema.TypeInt, + Computed: true, + }, + + // SoftLayer does not support public_ipv6_subnet configuration in vm creation. So, public_ipv6_subnet + // is defined as a computed parameter. + "public_ipv6_subnet": { + Type: schema.TypeString, + Computed: true, + }, + + "secondary_ip_count": { + Type: schema.TypeInt, + Optional: true, + ForceNew: true, + DiffSuppressFunc: func(k, o, n string, d *schema.ResourceData) bool { + // secondary_ip_count is only used when a virtual_guest resource is created. + if d.State() == nil { + return false + } + return true + }, + }, + + "secondary_ip_addresses": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + + "ssh_key_ids": { + Type: schema.TypeList, + Optional: true, + Elem: &schema.Schema{Type: schema.TypeInt}, + }, + + "file_storage_ids": { + Type: schema.TypeSet, + Optional: true, + Computed: true, + Elem: &schema.Schema{Type: schema.TypeInt}, + Set: func(v interface{}) int { + return v.(int) + }, + }, + + "block_storage_ids": { + Type: schema.TypeSet, + Optional: true, + Computed: true, + Elem: &schema.Schema{Type: schema.TypeInt}, + Set: func(v interface{}) int { + return v.(int) + }, + }, + "user_metadata": { + Type: schema.TypeString, + Optional: true, + }, + + "local_disk": { + Type: schema.TypeBool, + Optional: true, + Default: false, + ForceNew: true, + }, + + "post_install_script_uri": { + Type: schema.TypeString, + Optional: true, + Default: nil, + ForceNew: true, + }, + + "image_id": { + Type: schema.TypeInt, + Optional: true, + ForceNew: true, + ConflictsWith: []string{"os_reference_code"}, + }, + + "tags": { + Type: schema.TypeSet, + Optional: true, + Elem: &schema.Schema{Type: schema.TypeString}, + Set: schema.HashString, + }, + "wait_time_minutes": { + Type: schema.TypeInt, + Optional: true, + Default: 90, + }, + }, + } +} + +func getSubnetID(subnet string, meta interface{}) (int, error) { + service := services.GetAccountService(meta.(ClientSession).SoftLayerSession()) + + subnetInfo := strings.Split(subnet, "/") + if len(subnetInfo) != 2 { + return 0, fmt.Errorf( + "Unable to parse the provided subnet: %s", subnet) + } + + networkIdentifier := subnetInfo[0] + cidr := subnetInfo[1] + + subnets, err := service. + Mask("id"). + Filter( + filter.Build( + filter.Path("subnets.cidr").Eq(cidr), + filter.Path("subnets.networkIdentifier").Eq(networkIdentifier), + ), + ). + GetSubnets() + + if err != nil { + return 0, fmt.Errorf("Error looking up Subnet: %s", err) + } + + if len(subnets) < 1 { + return 0, fmt.Errorf( + "Unable to locate a subnet matching the provided subnet: %s", subnet) + } + + return *subnets[0].Id, nil +} + +func getNameForBlockDevice(i int) string { + // skip 1, which is reserved for the swap disk. + // so we get 0, 2, 3, 4, 5 ... + if i == 0 { + return "0" + } + + return strconv.Itoa(i + 1) +} + +func getBlockDevices(d *schema.ResourceData) []datatypes.Virtual_Guest_Block_Device { + numBlocks := d.Get("disks.#").(int) + if numBlocks == 0 { + return nil + } + + blocks := make([]datatypes.Virtual_Guest_Block_Device, 0, numBlocks) + for i := 0; i < numBlocks; i++ { + blockRef := fmt.Sprintf("disks.%d", i) + name := getNameForBlockDevice(i) + capacity := d.Get(blockRef).(int) + block := datatypes.Virtual_Guest_Block_Device{ + Device: &name, + DiskImage: &datatypes.Virtual_Disk_Image{ + Capacity: &capacity, + }, + } + blocks = append(blocks, block) + } + return blocks +} +func getVirtualGuestTemplateFromResourceData(d *schema.ResourceData, meta interface{}) (datatypes.Virtual_Guest, error) { + + dc := datatypes.Location{ + Name: sl.String(d.Get("datacenter").(string)), + } + // FIXME: Work around bug in terraform (?) + // For properties that have a default value set and a diff suppress function, + // it is not using the default value. + networkSpeed := d.Get("network_speed").(int) + if networkSpeed == 0 { + networkSpeed = resourceIBMComputeVmInstance().Schema["network_speed"].Default.(int) + } + + networkComponent := datatypes.Virtual_Guest_Network_Component{ + MaxSpeed: &networkSpeed, + } + + opts := datatypes.Virtual_Guest{ + Hostname: sl.String(d.Get("hostname").(string)), + Domain: sl.String(d.Get("domain").(string)), + HourlyBillingFlag: sl.Bool(d.Get("hourly_billing").(bool)), + PrivateNetworkOnlyFlag: sl.Bool(d.Get("private_network_only").(bool)), + Datacenter: &dc, + StartCpus: sl.Int(d.Get("cores").(int)), + MaxMemory: sl.Int(d.Get("memory").(int)), + NetworkComponents: []datatypes.Virtual_Guest_Network_Component{networkComponent}, + BlockDevices: getBlockDevices(d), + LocalDiskFlag: sl.Bool(d.Get("local_disk").(bool)), + PostInstallScriptUri: sl.String(d.Get("post_install_script_uri").(string)), + } + + if dedicatedAcctHostOnly, ok := d.GetOk("dedicated_acct_host_only"); ok { + opts.DedicatedAccountHostOnlyFlag = sl.Bool(dedicatedAcctHostOnly.(bool)) + } + + if imgID, ok := d.GetOk("image_id"); ok { + imageID := imgID.(int) + service := services. + GetVirtualGuestBlockDeviceTemplateGroupService(meta.(ClientSession).SoftLayerSession()) + + image, err := service. + Mask("id,globalIdentifier").Id(imageID). + GetObject() + if err != nil { + return opts, fmt.Errorf("Error looking up image %d: %s", imageID, err) + } else if image.GlobalIdentifier == nil { + return opts, fmt.Errorf( + "Image template %d does not have a global identifier", imageID) + } + + opts.BlockDeviceTemplateGroup = &datatypes.Virtual_Guest_Block_Device_Template_Group{ + GlobalIdentifier: image.GlobalIdentifier, + } + } + + if operatingSystemReferenceCode, ok := d.GetOk("os_reference_code"); ok { + opts.OperatingSystemReferenceCode = sl.String(operatingSystemReferenceCode.(string)) + } + + publicVlanID := d.Get("public_vlan_id").(int) + publicSubnet := d.Get("public_subnet").(string) + privateVlanID := d.Get("private_vlan_id").(int) + privateSubnet := d.Get("private_subnet").(string) + + primaryNetworkComponent := datatypes.Virtual_Guest_Network_Component{ + NetworkVlan: &datatypes.Network_Vlan{}, + } + + if publicVlanID > 0 { + primaryNetworkComponent.NetworkVlan.Id = &publicVlanID + } + + // Apply public subnet if provided + if publicSubnet != "" { + primarySubnetID, err := getSubnetID(publicSubnet, meta) + if err != nil { + return opts, fmt.Errorf("Error creating virtual guest: %s", err) + } + primaryNetworkComponent.NetworkVlan.PrimarySubnetId = &primarySubnetID + } + + if publicVlanID > 0 || publicSubnet != "" { + opts.PrimaryNetworkComponent = &primaryNetworkComponent + } + + primaryBackendNetworkComponent := datatypes.Virtual_Guest_Network_Component{ + NetworkVlan: &datatypes.Network_Vlan{}, + } + + if privateVlanID > 0 { + primaryBackendNetworkComponent.NetworkVlan.Id = &privateVlanID + } + + // Apply private subnet if provided + if privateSubnet != "" { + primarySubnetID, err := getSubnetID(privateSubnet, meta) + if err != nil { + return opts, fmt.Errorf("Error creating virtual guest: %s", err) + } + primaryBackendNetworkComponent.NetworkVlan.PrimarySubnetId = &primarySubnetID + } + + if privateVlanID > 0 || privateSubnet != "" { + opts.PrimaryBackendNetworkComponent = &primaryBackendNetworkComponent + } + + if userData, ok := d.GetOk("user_metadata"); ok { + opts.UserData = []datatypes.Virtual_Guest_Attribute{ + { + Value: sl.String(userData.(string)), + }, + } + } + + // Get configured ssh_keys + sshKeys := d.Get("ssh_key_ids").([]interface{}) + sshKeysLen := len(sshKeys) + if sshKeysLen > 0 { + opts.SshKeys = make([]datatypes.Security_Ssh_Key, 0, sshKeysLen) + for _, sshKey := range sshKeys { + opts.SshKeys = append(opts.SshKeys, datatypes.Security_Ssh_Key{ + Id: sl.Int(sshKey.(int)), + }) + } + } + + return opts, nil +} + +func resourceIBMComputeVmInstanceCreate(d *schema.ResourceData, meta interface{}) error { + service := services.GetVirtualGuestService(meta.(ClientSession).SoftLayerSession()) + sess := meta.(ClientSession).SoftLayerSession() + + opts, err := getVirtualGuestTemplateFromResourceData(d, meta) + if err != nil { + return err + } + + log.Println("[INFO] Creating virtual machine") + + var id int + var template datatypes.Container_Product_Order + + // Build an order template with a custom image. + if opts.BlockDevices != nil && opts.BlockDeviceTemplateGroup != nil { + bd := *opts.BlockDeviceTemplateGroup + opts.BlockDeviceTemplateGroup = nil + opts.OperatingSystemReferenceCode = sl.String("UBUNTU_LATEST") + template, err = service.GenerateOrderTemplate(&opts) + if err != nil { + return fmt.Errorf("Error generating order template: %s", err) + } + + // Remove temporary OS from actual order + prices := make([]datatypes.Product_Item_Price, len(template.Prices)) + i := 0 + for _, p := range template.Prices { + if !strings.Contains(*p.Item.Description, "Ubuntu") { + prices[i] = p + i++ + } + } + template.Prices = prices[:i] + + template.ImageTemplateId = sl.Int(d.Get("image_id").(int)) + template.VirtualGuests[0].BlockDeviceTemplateGroup = &bd + template.VirtualGuests[0].OperatingSystemReferenceCode = nil + } else { + // Build an order template with os_reference_code + template, err = service.GenerateOrderTemplate(&opts) + if err != nil { + return fmt.Errorf("Error generating order template: %s", err) + } + } + + // Add an IPv6 price item + privateNetworkOnly := d.Get("private_network_only").(bool) + + if d.Get("ipv6_enabled").(bool) { + if privateNetworkOnly { + return fmt.Errorf("Unable to configure a public IPv6 address with a private_network_only option") + } + + ipv6Items, err := services.GetProductPackageService(sess). + Id(*template.PackageId). + Mask("id,capacity,description,units,keyName,prices[id,categories[id,name,categoryCode]]"). + Filter(filter.Build(filter.Path("items.keyName").Eq("1_IPV6_ADDRESS"))). + GetItems() + if err != nil { + return fmt.Errorf("Error generating order template: %s", err) + } + if len(ipv6Items) == 0 { + return fmt.Errorf("No product items matching 1_IPV6_ADDRESS could be found") + } + + template.Prices = append(template.Prices, + datatypes.Product_Item_Price{ + Id: ipv6Items[0].Prices[0].Id, + }, + ) + } + + // Configure secondary IPs + secondaryIPCount := d.Get("secondary_ip_count").(int) + if secondaryIPCount > 0 { + if privateNetworkOnly { + return fmt.Errorf("Unable to configure public secondary addresses with a private_network_only option") + } + staticIPItems, err := services.GetProductPackageService(sess). + Id(*template.PackageId). + Mask("id,capacity,description,units,keyName,prices[id,categories[id,name,categoryCode]]"). + Filter(filter.Build(filter.Path("items.keyName").Eq(strconv.Itoa(secondaryIPCount) + "_PUBLIC_IP_ADDRESSES"))). + GetItems() + if err != nil { + return fmt.Errorf("Error generating order template: %s", err) + } + if len(staticIPItems) == 0 { + return fmt.Errorf("No product items matching %d_PUBLIC_IP_ADDRESSES could be found", secondaryIPCount) + } + + template.Prices = append(template.Prices, + datatypes.Product_Item_Price{ + Id: staticIPItems[0].Prices[0].Id, + }, + ) + } + + // GenerateOrderTemplate omits UserData, subnet, and maxSpeed, so configure virtual_guest. + template.VirtualGuests[0] = opts + + order := &datatypes.Container_Product_Order_Virtual_Guest{ + Container_Product_Order_Hardware_Server: datatypes.Container_Product_Order_Hardware_Server{Container_Product_Order: template}, + } + + orderService := services.GetProductOrderService(sess) + receipt, err := orderService.PlaceOrder(order, sl.Bool(false)) + if err != nil { + return fmt.Errorf("Error ordering virtual guest: %s", err) + } + id = *receipt.OrderDetails.VirtualGuests[0].Id + + d.SetId(fmt.Sprintf("%d", id)) + + log.Printf("[INFO] Virtual Machine ID: %s", d.Id()) + + // Set tags + tags := getTags(d) + if tags != "" { + //Try setting only when it is non empty as we are creating virtual guest + err = setGuestTags(id, tags, meta) + if err != nil { + return err + } + } + + var storageIds []int + if fileStorageSet := d.Get("file_storage_ids").(*schema.Set); len(fileStorageSet.List()) > 0 { + storageIds = expandIntList(fileStorageSet.List()) + + } + if blockStorageSet := d.Get("block_storage_ids").(*schema.Set); len(blockStorageSet.List()) > 0 { + storageIds = append(storageIds, expandIntList(blockStorageSet.List())...) + } + if len(storageIds) > 0 { + err := addAccessToStorageList(service.Id(id), id, storageIds, meta) + if err != nil { + return err + } + } + // wait for machine availability + + _, err = WaitForVirtualGuestAvailable(d, meta) + + if err != nil { + return fmt.Errorf( + "Error waiting for virtual machine (%s) to become ready: %s", d.Id(), err) + } + + return resourceIBMComputeVmInstanceRead(d, meta) +} + +func resourceIBMComputeVmInstanceRead(d *schema.ResourceData, meta interface{}) error { + service := services.GetVirtualGuestService(meta.(ClientSession).SoftLayerSession()) + + id, err := strconv.Atoi(d.Id()) + if err != nil { + return fmt.Errorf("Not a valid ID, must be an integer: %s", err) + } + + result, err := service.Id(id).Mask( + "hostname,domain,startCpus,maxMemory,dedicatedAccountHostOnlyFlag,operatingSystemReferenceCode,blockDeviceTemplateGroup[id]," + + "primaryIpAddress,primaryBackendIpAddress,privateNetworkOnlyFlag," + + "hourlyBillingFlag,localDiskFlag," + + "allowedNetworkStorage[id,nasType]," + + "userData[value],tagReferences[id,tag[name]]," + + "datacenter[id,name,longName]," + + "primaryNetworkComponent[networkVlan[id]," + + "primaryVersion6IpAddressRecord[subnet,guestNetworkComponentBinding[ipAddressId]]," + + "primaryIpAddressRecord[subnet,guestNetworkComponentBinding[ipAddressId]]]," + + "primaryBackendNetworkComponent[networkVlan[id]," + + "primaryIpAddressRecord[subnet,guestNetworkComponentBinding[ipAddressId]]]", + ).GetObject() + + if err != nil { + return fmt.Errorf("Error retrieving virtual guest: %s", err) + } + + d.Set("hostname", *result.Hostname) + d.Set("domain", *result.Domain) + + if result.BlockDeviceTemplateGroup != nil { + d.Set("image_id", result.BlockDeviceTemplateGroup.Id) + } else { + //Provided only for the sake of importing os_reference_Code + //In other flows when user gives say UBUNTU_LATEST in the configuration file, the value read back from API might be UBUNTU_16_64 + //which is the actual Ubuntu version which gets provisioned. So we simply avoid writing back the value received to avoid creating diff + if _, ok := d.GetOk("os_reference_code"); !ok { + d.Set("os_reference_code", result.OperatingSystemReferenceCode) + } + } + + if result.Datacenter != nil { + d.Set("datacenter", *result.Datacenter.Name) + } + + d.Set( + "network_speed", + sl.Grab( + result, + "PrimaryBackendNetworkComponent.MaxSpeed", + d.Get("network_speed").(int), + ), + ) + d.Set("cores", *result.StartCpus) + d.Set("memory", *result.MaxMemory) + d.Set("dedicated_acct_host_only", *result.DedicatedAccountHostOnlyFlag) + if result.PrimaryIpAddress != nil { + d.Set("has_public_ip", *result.PrimaryIpAddress != "") + d.Set("ipv4_address", *result.PrimaryIpAddress) + } + d.Set("ipv4_address_private", *result.PrimaryBackendIpAddress) + if result.PrimaryNetworkComponent.PrimaryIpAddressRecord != nil { + d.Set("ip_address_id", *result.PrimaryNetworkComponent.PrimaryIpAddressRecord.GuestNetworkComponentBinding.IpAddressId) + } + d.Set("ip_address_id_private", + *result.PrimaryBackendNetworkComponent.PrimaryIpAddressRecord.GuestNetworkComponentBinding.IpAddressId) + d.Set("private_network_only", *result.PrivateNetworkOnlyFlag) + d.Set("hourly_billing", *result.HourlyBillingFlag) + d.Set("local_disk", *result.LocalDiskFlag) + + if result.PrimaryNetworkComponent.NetworkVlan != nil { + d.Set("public_vlan_id", *result.PrimaryNetworkComponent.NetworkVlan.Id) + } + + d.Set("private_vlan_id", *result.PrimaryBackendNetworkComponent.NetworkVlan.Id) + + if result.PrimaryNetworkComponent.PrimaryIpAddressRecord != nil { + publicSubnet := result.PrimaryNetworkComponent.PrimaryIpAddressRecord.Subnet + d.Set( + "public_subnet", + fmt.Sprintf("%s/%d", *publicSubnet.NetworkIdentifier, *publicSubnet.Cidr), + ) + } + + privateSubnet := result.PrimaryBackendNetworkComponent.PrimaryIpAddressRecord.Subnet + d.Set( + "private_subnet", + fmt.Sprintf("%s/%d", *privateSubnet.NetworkIdentifier, *privateSubnet.Cidr), + ) + + d.Set("ipv6_enabled", false) + if result.PrimaryNetworkComponent.PrimaryVersion6IpAddressRecord != nil { + d.Set("ipv6_enabled", true) + d.Set("ipv6_address", *result.PrimaryNetworkComponent.PrimaryVersion6IpAddressRecord.IpAddress) + d.Set("ipv6_address_id", *result.PrimaryNetworkComponent.PrimaryVersion6IpAddressRecord.GuestNetworkComponentBinding.IpAddressId) + publicSubnet := result.PrimaryNetworkComponent.PrimaryVersion6IpAddressRecord.Subnet + d.Set( + "public_ipv6_subnet", + fmt.Sprintf("%s/%d", *publicSubnet.NetworkIdentifier, *publicSubnet.Cidr), + ) + } + + userData := result.UserData + if userData != nil && len(userData) > 0 { + data, err := base64.StdEncoding.DecodeString(*userData[0].Value) + if err != nil { + log.Printf("Can't base64 decode user data %s. error: %s", *userData[0].Value, err) + d.Set("user_metadata", *userData[0].Value) + } else { + d.Set("user_metadata", string(data)) + } + } + + tagReferences := result.TagReferences + tagReferencesLen := len(tagReferences) + if tagReferencesLen > 0 { + tags := make([]string, 0, tagReferencesLen) + for _, tagRef := range tagReferences { + tags = append(tags, *tagRef.Tag.Name) + } + d.Set("tags", tags) + } + + storages := result.AllowedNetworkStorage + if len(storages) > 0 { + d.Set("block_storage_ids", flattenBlockStorageID(storages)) + d.Set("file_storage_ids", flattenFileStorageID(storages)) + } + + // Set connection info + connInfo := map[string]string{"type": "ssh"} + if !*result.PrivateNetworkOnlyFlag && result.PrimaryIpAddress != nil { + connInfo["host"] = *result.PrimaryIpAddress + } else { + connInfo["host"] = *result.PrimaryBackendIpAddress + } + d.SetConnInfo(connInfo) + + // Read secondary IP addresses + d.Set("secondary_ip_addresses", nil) + if result.PrimaryIpAddress != nil { + secondarySubnetResult, err := services.GetAccountService(meta.(ClientSession).SoftLayerSession()). + Mask("ipAddresses[id,ipAddress],subnetType"). + Filter(filter.Build(filter.Path("publicSubnets.endPointIpAddress.ipAddress").Eq(*result.PrimaryIpAddress))). + GetPublicSubnets() + if err != nil { + log.Printf("Error getting secondary Ip addresses: %s", err) + } + + secondaryIps := make([]string, 0) + for _, subnet := range secondarySubnetResult { + // Count static secondary ip addresses. + if *subnet.SubnetType == staticIPRouted { + for _, ipAddressObj := range subnet.IpAddresses { + secondaryIps = append(secondaryIps, *ipAddressObj.IpAddress) + } + } + } + if len(secondaryIps) > 0 { + d.Set("secondary_ip_addresses", secondaryIps) + d.Set("secondary_ip_count", len(secondaryIps)) + } + } + + return nil +} + +func resourceIBMComputeVmInstanceUpdate(d *schema.ResourceData, meta interface{}) error { + sess := meta.(ClientSession).SoftLayerSession() + service := services.GetVirtualGuestService(sess) + + id, err := strconv.Atoi(d.Id()) + if err != nil { + return fmt.Errorf("Not a valid ID, must be an integer: %s", err) + } + + result, err := service.Id(id).GetObject() + if err != nil { + return fmt.Errorf("Error retrieving virtual guest: %s", err) + } + + // Update "hostname" and "domain" fields if present and changed + // Those are the only fields, which could be updated + if d.HasChange("hostname") || d.HasChange("domain") { + result.Hostname = sl.String(d.Get("hostname").(string)) + result.Domain = sl.String(d.Get("domain").(string)) + + _, err = service.Id(id).EditObject(&result) + + if err != nil { + return fmt.Errorf("Couldn't update virtual guest: %s", err) + } + } + + // Set user data if provided and not empty + if d.HasChange("user_metadata") { + _, err := service.Id(id).SetUserMetadata([]string{d.Get("user_metadata").(string)}) + if err != nil { + return fmt.Errorf("Couldn't update user data for virtual guest: %s", err) + } + } + + // Update tags + if d.HasChange("tags") { + tags := getTags(d) + err := setGuestTags(id, tags, meta) + if err != nil { + return err + } + } + + err = modifyStorageAccess(service.Id(id), id, meta, d) + if err != nil { + return err + } + + // Upgrade "cores", "memory" and "network_speed" if provided and changed + upgradeOptions := map[string]float64{} + if d.HasChange("cores") { + upgradeOptions[product.CPUCategoryCode] = float64(d.Get("cores").(int)) + } + + if d.HasChange("memory") { + memoryInMB := float64(d.Get("memory").(int)) + + // Convert memory to GB, as softlayer only allows to upgrade RAM in Gigs + // Must be already validated at this step + upgradeOptions[product.MemoryCategoryCode] = float64(int(memoryInMB / 1024)) + } + + if d.HasChange("network_speed") { + upgradeOptions[product.NICSpeedCategoryCode] = float64(d.Get("network_speed").(int)) + } + + if len(upgradeOptions) > 0 { + _, err = virtual.UpgradeVirtualGuest(sess, &result, upgradeOptions) + if err != nil { + return fmt.Errorf("Couldn't upgrade virtual guest: %s", err) + } + + // Wait for softlayer to start upgrading... + _, err = WaitForUpgradeTransactionsToAppear(d, meta) + + // Wait for upgrade transactions to finish + _, err = WaitForNoActiveTransactions(d, meta) + + return err + } + + return resourceIBMComputeVmInstanceRead(d, meta) +} + +func modifyStorageAccess(sam storageAccessModifier, deviceID int, meta interface{}, d *schema.ResourceData) error { + var remove, add []int + if d.HasChange("file_storage_ids") { + o, n := d.GetChange("file_storage_ids") + os := o.(*schema.Set) + ns := n.(*schema.Set) + + remove = expandIntList(os.Difference(ns).List()) + add = expandIntList(ns.Difference(os).List()) + } + if d.HasChange("block_storage_ids") { + o, n := d.GetChange("block_storage_ids") + os := o.(*schema.Set) + ns := n.(*schema.Set) + + remove = append(remove, expandIntList(os.Difference(ns).List())...) + add = append(add, expandIntList(ns.Difference(os).List())...) + } + + if len(add) > 0 { + err := addAccessToStorageList(sam, deviceID, add, meta) + if err != nil { + return err + } + } + if len(remove) > 0 { + err := removeAccessToStorageList(sam, deviceID, remove, meta) + if err != nil { + return err + } + } + return nil +} + +func resourceIBMComputeVmInstanceDelete(d *schema.ResourceData, meta interface{}) error { + service := services.GetVirtualGuestService(meta.(ClientSession).SoftLayerSession()) + + id, err := strconv.Atoi(d.Id()) + if err != nil { + return fmt.Errorf("Not a valid ID, must be an integer: %s", err) + } + + _, err = WaitForNoActiveTransactions(d, meta) + + if err != nil { + return fmt.Errorf("Error deleting virtual guest, couldn't wait for zero active transactions: %s", err) + } + + ok, err := service.Id(id).DeleteObject() + + if err != nil { + return fmt.Errorf("Error deleting virtual guest: %s", err) + } + + if !ok { + return fmt.Errorf( + "API reported it was unsuccessful in removing the virtual guest '%d'", id) + } + + return nil +} + +//genID generates a random string to be used for the optional +//hostname +func genID() (interface{}, error) { + numBytes := 8 + bytes := make([]byte, numBytes) + n, err := rand.Reader.Read(bytes) + if err != nil { + return nil, err + } + + if n != numBytes { + return nil, errors.New("generated insufficient random bytes") + } + + hexStr := hex.EncodeToString(bytes) + return fmt.Sprintf("terraformed-%s", hexStr), nil +} + +// WaitForUpgradeTransactionsToAppear Wait for upgrade transactions +func WaitForUpgradeTransactionsToAppear(d *schema.ResourceData, meta interface{}) (interface{}, error) { + log.Printf("Waiting for server (%s) to have upgrade transactions", d.Id()) + + id, err := strconv.Atoi(d.Id()) + if err != nil { + return nil, fmt.Errorf("The instance ID %s must be numeric", d.Id()) + } + stateConf := &resource.StateChangeConf{ + Pending: []string{"retry", pendingUpgrade}, + Target: []string{inProgressUpgrade}, + Refresh: func() (interface{}, string, error) { + service := services.GetVirtualGuestService(meta.(ClientSession).SoftLayerSession()) + transactions, err := service.Id(id).GetActiveTransactions() + if err != nil { + if apiErr, ok := err.(sl.Error); ok && apiErr.StatusCode == 404 { + return nil, "", fmt.Errorf("Couldn't fetch active transactions: %s", err) + } + return false, "retry", nil + } + for _, transaction := range transactions { + if strings.Contains(*transaction.TransactionStatus.Name, upgradeTransaction) { + return transactions, inProgressUpgrade, nil + } + } + return transactions, pendingUpgrade, nil + }, + Timeout: 10 * time.Minute, + Delay: 5 * time.Second, + MinTimeout: 5 * time.Second, + } + + return stateConf.WaitForState() +} + +// WaitForNoActiveTransactions Wait for no active transactions +func WaitForNoActiveTransactions(d *schema.ResourceData, meta interface{}) (interface{}, error) { + log.Printf("Waiting for server (%s) to have zero active transactions", d.Id()) + id, err := strconv.Atoi(d.Id()) + if err != nil { + return nil, fmt.Errorf("The instance ID %s must be numeric", d.Id()) + } + stateConf := &resource.StateChangeConf{ + Pending: []string{"retry", activeTransaction}, + Target: []string{idleTransaction}, + Refresh: func() (interface{}, string, error) { + service := services.GetVirtualGuestService(meta.(ClientSession).SoftLayerSession()) + transactions, err := service.Id(id).GetActiveTransactions() + if err != nil { + if apiErr, ok := err.(sl.Error); ok && apiErr.StatusCode == 404 { + return nil, "", fmt.Errorf("Couldn't get active transactions: %s", err) + } + return false, "retry", nil + } + if len(transactions) == 0 { + return transactions, idleTransaction, nil + } + return transactions, activeTransaction, nil + }, + Timeout: time.Duration(d.Get("wait_time_minutes").(int)) * time.Minute, + Delay: 10 * time.Second, + MinTimeout: 10 * time.Second, + } + + return stateConf.WaitForState() +} + +// WaitForVirtualGuestAvailable Waits for virtual guest creation +func WaitForVirtualGuestAvailable(d *schema.ResourceData, meta interface{}) (interface{}, error) { + log.Printf("Waiting for server (%s) to be available.", d.Id()) + id, err := strconv.Atoi(d.Id()) + if err != nil { + return nil, fmt.Errorf("The instance ID %s must be numeric", d.Id()) + } + sess := meta.(ClientSession).SoftLayerSession() + stateConf := &resource.StateChangeConf{ + Pending: []string{"retry", virtualGuestProvisioning}, + Target: []string{virtualGuestAvailable}, + Refresh: virtualGuestStateRefreshFunc(sess, id, d), + Timeout: time.Duration(d.Get("wait_time_minutes").(int)) * time.Minute, + Delay: 10 * time.Second, + MinTimeout: 10 * time.Second, + } + + return stateConf.WaitForState() +} + +func virtualGuestStateRefreshFunc(sess *session.Session, instanceID int, d *schema.ResourceData) resource.StateRefreshFunc { + return func() (interface{}, string, error) { + // Check active transactions + publicNetwork := !d.Get("private_network_only").(bool) + service := services.GetVirtualGuestService(sess) + result, err := service.Id(instanceID).Mask("activeTransaction,primaryBackendIpAddress,primaryIpAddress").GetObject() + if err != nil { + if apiErr, ok := err.(sl.Error); ok && apiErr.StatusCode == 404 { + return nil, "", fmt.Errorf("Error retrieving virtual guest: %s", err) + } + return false, "retry", nil + } + // Check active transactions + log.Println("Checking active transactions.") + if result.ActiveTransaction != nil { + return result, virtualGuestProvisioning, nil + } + + // Check Primary IP address availability. + log.Println("Checking primary backend IP address.") + if result.PrimaryBackendIpAddress == nil { + return result, virtualGuestProvisioning, nil + } + + log.Println("Checking primary IP address.") + if publicNetwork && result.PrimaryIpAddress == nil { + return result, virtualGuestProvisioning, nil + } + + // Check Secondary IP address availability. + if d.Get("secondary_ip_count").(int) > 0 { + log.Println("Refreshing secondary IPs state.") + secondarySubnetResult, err := services.GetAccountService(sess). + Mask("ipAddresses[id,ipAddress]"). + Filter(filter.Build(filter.Path("publicSubnets.endPointIpAddress.virtualGuest.id").Eq(d.Id()))). + GetPublicSubnets() + if err != nil { + return nil, "", fmt.Errorf("Error retrieving secondary ip address: %s", err) + } + if len(secondarySubnetResult) == 0 { + return result, virtualGuestProvisioning, nil + } + } + + return result, virtualGuestAvailable, nil + } +} + +func resourceIBMComputeVmInstanceExists(d *schema.ResourceData, meta interface{}) (bool, error) { + service := services.GetVirtualGuestService(meta.(ClientSession).SoftLayerSession()) + guestID, err := strconv.Atoi(d.Id()) + if err != nil { + return false, fmt.Errorf("Not a valid ID, must be an integer: %s", err) + } + + result, err := service.Id(guestID).GetObject() + if err != nil { + if apiErr, ok := err.(sl.Error); ok { + if apiErr.StatusCode == 404 { + return false, nil + } + } + return false, fmt.Errorf("Error communicating with the API: %s", err) + } + + return result.Id != nil && *result.Id == guestID, nil +} + +func getTags(d *schema.ResourceData) string { + tagSet := d.Get("tags").(*schema.Set) + + if tagSet.Len() == 0 { + return "" + } + + tags := make([]string, 0, tagSet.Len()) + for _, elem := range tagSet.List() { + tag := elem.(string) + tags = append(tags, tag) + } + return strings.Join(tags, ",") +} + +func setGuestTags(id int, tags string, meta interface{}) error { + service := services.GetVirtualGuestService(meta.(ClientSession).SoftLayerSession()) + _, err := service.Id(id).SetTags(sl.String(tags)) + if err != nil { + return fmt.Errorf("Could not set tags on virtual guest %d", id) + } + return nil +} + +type storageAccessModifier interface { + AllowAccessToNetworkStorageList([]datatypes.Network_Storage) (resp bool, err error) + RemoveAccessToNetworkStorageList([]datatypes.Network_Storage) (resp bool, err error) +} + +func addAccessToStorageList(sam storageAccessModifier, deviceID int, ids storageIds, meta interface{}) error { + s, err := ids.Storages(meta) + if err != nil { + return err + } + for { + _, err := sam.AllowAccessToNetworkStorageList(s) + if err != nil { + if apiErr, ok := err.(sl.Error); ok && apiErr.Exception == networkStorageMassAccessControlModificationException { + log.Printf("[DEBUG] Allow access to storage failed with error %q. Will retry again after %q", err, retryDelayForModifyingStorageAccess) + time.Sleep(retryDelayForModifyingStorageAccess) + continue + } + return fmt.Errorf("Could not authorize Device %d, access to the following storages %q, %q", deviceID, ids, err) + } + log.Printf("[INFO] Device authorized to access %q", ids) + break + } + return nil +} + +func removeAccessToStorageList(sam storageAccessModifier, deviceID int, ids storageIds, meta interface{}) error { + s, err := ids.Storages(meta) + if err != nil { + return err + } + for { + _, err := sam.RemoveAccessToNetworkStorageList(s) + if err != nil { + if apiErr, ok := err.(sl.Error); ok && apiErr.Exception == networkStorageMassAccessControlModificationException { + log.Printf("[DEBUG] Remove access to storage failed with error %q. Will retry again after %q", err, retryDelayForModifyingStorageAccess) + time.Sleep(retryDelayForModifyingStorageAccess) + continue + } + return fmt.Errorf("Could not remove Device %d, access to the following storages %q, %q", deviceID, ids, err) + } + log.Printf("[INFO] Devices's access to %q have been removed", ids) + break + } + return nil +} diff --git a/ibm/resource_ibm_compute_vm_instance_test.go b/ibm/resource_ibm_compute_vm_instance_test.go new file mode 100644 index 0000000000..c2bc3b2422 --- /dev/null +++ b/ibm/resource_ibm_compute_vm_instance_test.go @@ -0,0 +1,498 @@ +package ibm + +import ( + "errors" + "fmt" + "os" + "strconv" + "strings" + "testing" + + "github.com/hashicorp/terraform/helper/acctest" + "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/terraform" + "github.com/softlayer/softlayer-go/datatypes" + "github.com/softlayer/softlayer-go/services" +) + +func init() { + imageID := os.Getenv("IBM_COMPUTE_VM_INSTANCE_IMAGE_ID") + if imageID == "" { + fmt.Println("[WARN] Set the environment variable IBM_COMPUTE_VM_INSTANCE_IMAGE_ID for testing " + + "the ibm_compute_vm_instance resource. The image should be replicated in the Washington 4 datacenter. Some tests for that resource will fail if this is not set correctly") + } +} + +func TestAccIBMComputeVmInstance_basic(t *testing.T) { + var guest datatypes.Virtual_Guest + + hostname := acctest.RandString(16) + domain := "terraformvmuat.ibm.com" + networkSpeed1 := "10" + networkSpeed2 := "100" + cores1 := "1" + cores2 := "2" + memory1 := "1024" + memory2 := "2048" + tags1 := "collectd" + tags2 := "mesos-master" + userMetadata1 := "{\\\"value\\\":\\\"newvalue\\\"}" + userMetadata1Unquoted, _ := strconv.Unquote(`"` + userMetadata1 + `"`) + userMetadata2 := "updated" + + configInstance := "ibm_compute_vm_instance.terraform-acceptance-test-1" + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccIBMComputeVmInstanceDestroy, + Steps: []resource.TestStep{ + { + Config: testAccIBMComputeVmInstanceConfigBasic(hostname, domain, networkSpeed1, cores1, memory1, userMetadata1, tags1), + Destroy: false, + Check: resource.ComposeTestCheckFunc( + testAccIBMComputeVmInstanceExists(configInstance, &guest), + resource.TestCheckResourceAttr( + configInstance, "hostname", hostname), + resource.TestCheckResourceAttr( + configInstance, "domain", domain), + resource.TestCheckResourceAttr( + configInstance, "datacenter", "wdc04"), + resource.TestCheckResourceAttr( + configInstance, "network_speed", networkSpeed1), + resource.TestCheckResourceAttr( + configInstance, "hourly_billing", "true"), + resource.TestCheckResourceAttr( + configInstance, "private_network_only", "false"), + resource.TestCheckResourceAttr( + configInstance, "cores", cores1), + resource.TestCheckResourceAttr( + configInstance, "memory", memory1), + resource.TestCheckResourceAttr( + configInstance, "disks.0", "25"), + resource.TestCheckResourceAttr( + configInstance, "disks.1", "10"), + resource.TestCheckResourceAttr( + configInstance, "disks.2", "20"), + resource.TestCheckResourceAttr( + configInstance, "user_metadata", userMetadata1Unquoted), + resource.TestCheckResourceAttr( + configInstance, "local_disk", "false"), + resource.TestCheckResourceAttr( + configInstance, "dedicated_acct_host_only", "true"), + CheckStringSet( + configInstance, + "tags", []string{tags1}, + ), + resource.TestCheckResourceAttrSet( + configInstance, "ipv6_enabled"), + resource.TestCheckResourceAttrSet( + configInstance, "ipv6_address"), + resource.TestCheckResourceAttrSet( + configInstance, "ipv6_address_id"), + resource.TestCheckResourceAttrSet( + configInstance, "public_ipv6_subnet"), + resource.TestCheckResourceAttr( + configInstance, "secondary_ip_count", "4"), + resource.TestCheckResourceAttrSet( + configInstance, "secondary_ip_addresses.3"), + ), + }, + + { + Config: testAccIBMComputeVmInstanceConfigBasic(hostname, domain, networkSpeed1, cores1, memory1, userMetadata2, tags2), + Destroy: false, + Check: resource.ComposeTestCheckFunc( + testAccIBMComputeVmInstanceExists(configInstance, &guest), + resource.TestCheckResourceAttr( + configInstance, "user_metadata", userMetadata2), + CheckStringSet( + configInstance, + "tags", []string{tags2}, + ), + ), + }, + + { + Config: testAccIBMComputeVmInstanceConfigBasic(hostname, domain, networkSpeed2, cores2, memory2, userMetadata2, tags2), + Check: resource.ComposeTestCheckFunc( + testAccIBMComputeVmInstanceExists(configInstance, &guest), + resource.TestCheckResourceAttr( + configInstance, "cores", cores2), + resource.TestCheckResourceAttr( + configInstance, "memory", memory2), + resource.TestCheckResourceAttr( + configInstance, "network_speed", networkSpeed2), + ), + }, + }, + }) +} + +func TestAccIBMComputeVmInstance_BlockDeviceTemplateGroup(t *testing.T) { + var guest datatypes.Virtual_Guest + + hostname := acctest.RandString(16) + domain := "bdtg.terraformvmuat.ibm.com" + + imageID := os.Getenv("IBM_COMPUTE_VM_INSTANCE_IMAGE_ID") + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccIBMComputeVmInstanceDestroy, + Steps: []resource.TestStep{ + { + Config: testAccIBMComputeVmInstanceConfigBlockDeviceTemplateGroup(hostname, domain, imageID), + Check: resource.ComposeTestCheckFunc( + // image_id value is hardcoded. If it's valid then virtual guest will be created well + testAccIBMComputeVmInstanceExists("ibm_compute_vm_instance.terraform-acceptance-test-BDTGroup", &guest), + ), + }, + }, + }) +} + +func TestAccIBMComputeVmInstance_CustomImageMultipleDisks(t *testing.T) { + var guest datatypes.Virtual_Guest + hostname := acctest.RandString(16) + domain := "mdisk.terraformvmuat.ibm.com" + + imageID := os.Getenv("IBM_COMPUTE_VM_INSTANCE_IMAGE_ID") + + configInstance := "ibm_compute_vm_instance.terraform-acceptance-test-disks" + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccIBMComputeVmInstanceDestroy, + Steps: []resource.TestStep{ + { + Config: testAccIBMComputeVmInstanceConfigCustomImageMultipleDisks(hostname, domain, imageID), + Check: resource.ComposeTestCheckFunc( + // image_id value is hardcoded. If it's valid then virtual guest will be created well + testAccIBMComputeVmInstanceExists(configInstance, &guest), + resource.TestCheckResourceAttr( + configInstance, "disks.0", "25"), + resource.TestCheckResourceAttr( + configInstance, "disks.1", "10"), + resource.TestCheckResourceAttr( + configInstance, "hostname", hostname), + resource.TestCheckResourceAttr( + configInstance, "domain", domain), + ), + }, + }, + }) +} + +func TestAccIBMComputeVmInstance_PostInstallScriptUri(t *testing.T) { + var guest datatypes.Virtual_Guest + + hostname := acctest.RandString(16) + domain := "pis.terraformvmuat.ibm.com" + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccIBMComputeVmInstanceDestroy, + Steps: []resource.TestStep{ + { + Config: testAccIBMComputeVmInstanceConfigPostInstallScriptURI(hostname, domain), + Check: resource.ComposeTestCheckFunc( + // image_id value is hardcoded. If it's valid then virtual guest will be created well + testAccIBMComputeVmInstanceExists("ibm_compute_vm_instance.terraform-acceptance-test-pISU", &guest), + ), + }, + }, + }) +} + +func TestAccIBMComputeVmInstance_With_Network_Storage_Access(t *testing.T) { + var guest datatypes.Virtual_Guest + hostname := acctest.RandString(16) + domain := "storage.tfmvmuat.ibm.com" + + configInstance := "ibm_compute_vm_instance.terraform-vsi-storage-access" + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccIBMComputeVmInstanceDestroy, + Steps: []resource.TestStep{ + { + Config: testAccessToStoragesBasic(hostname, domain), + Check: resource.ComposeTestCheckFunc( + testAccIBMComputeVmInstanceExists("ibm_compute_vm_instance.terraform-vsi-storage-access", &guest), + resource.TestCheckResourceAttr( + configInstance, "hostname", hostname), + resource.TestCheckResourceAttr( + configInstance, "domain", domain), + resource.TestCheckResourceAttr( + configInstance, "datacenter", "wdc04"), + resource.TestCheckResourceAttr( + configInstance, "hourly_billing", "true"), + resource.TestCheckResourceAttr( + configInstance, "file_storage_ids.#", "1"), + resource.TestCheckResourceAttr( + configInstance, "block_storage_ids.#", "1"), + ), + }, + { + Config: testAccessToStoragesUpdate(hostname, domain), + Check: resource.ComposeTestCheckFunc( + testAccIBMComputeVmInstanceExists("ibm_compute_vm_instance.terraform-vsi-storage-access", &guest), + resource.TestCheckResourceAttr( + configInstance, "file_storage_ids.#", "1"), + resource.TestCheckResourceAttr( + configInstance, "block_storage_ids.#", "0"), + ), + }, + }, + }) +} + +func testAccIBMComputeVmInstanceDestroy(s *terraform.State) error { + service := services.GetVirtualGuestService(testAccProvider.Meta().(ClientSession).SoftLayerSession()) + + for _, rs := range s.RootModule().Resources { + if rs.Type != "ibm_compute_vm_instance" { + continue + } + + guestID, _ := strconv.Atoi(rs.Primary.ID) + + // Try to find the guest + _, err := service.Id(guestID).GetObject() + + // Wait + + if err != nil && !strings.Contains(err.Error(), "404") { + return fmt.Errorf( + "Error waiting for virtual guest (%s) to be destroyed: %s", + rs.Primary.ID, err) + } + } + + return nil +} + +func testAccIBMComputeVmInstanceExists(n string, guest *datatypes.Virtual_Guest) resource.TestCheckFunc { + return func(s *terraform.State) error { + rs, ok := s.RootModule().Resources[n] + if !ok { + return fmt.Errorf("Not found: %s", n) + } + + if rs.Primary.ID == "" { + return errors.New("No virtual guest ID is set") + } + + id, err := strconv.Atoi(rs.Primary.ID) + + if err != nil { + return err + } + + service := services.GetVirtualGuestService(testAccProvider.Meta().(ClientSession).SoftLayerSession()) + retrieveVirtGuest, err := service.Id(id).GetObject() + + if err != nil { + return err + } + + fmt.Printf("The ID is %d\n", id) + + if *retrieveVirtGuest.Id != id { + return errors.New("Virtual guest not found") + } + + *guest = retrieveVirtGuest + + return nil + } +} + +func CheckStringSet(n string, name string, set []string) resource.TestCheckFunc { + return func(s *terraform.State) error { + rs, ok := s.RootModule().Resources[n] + if !ok { + return fmt.Errorf("Not found: %s", n) + } + + values := []string{} + setLengthKey := fmt.Sprintf("%s.#", name) + prefix := fmt.Sprintf("%s.", name) + for k, v := range rs.Primary.Attributes { + if k != setLengthKey && strings.HasPrefix(k, prefix) { + values = append(values, v) + } + } + + if len(values) == 0 { + return fmt.Errorf("Could not find %s.%s", n, name) + } + + for _, s := range set { + found := false + for _, v := range values { + if s == v { + found = true + break + } + } + + if !found { + return fmt.Errorf("%s was not found in the set %s", s, name) + } + } + + return nil + } +} + +func testAccIBMComputeVmInstanceConfigBasic(hostname, domain, networkSpeed, cores, memory, userMetadata, tags string) string { + return fmt.Sprintf(` +resource "ibm_compute_vm_instance" "terraform-acceptance-test-1" { + hostname = "%s" + domain = "%s" + os_reference_code = "DEBIAN_7_64" + datacenter = "wdc04" + network_speed = %s + hourly_billing = true + private_network_only = false + cores = %s + memory = %s + disks = [25, 10, 20] + user_metadata = "%s" + tags = ["%s"] + dedicated_acct_host_only = true + local_disk = false + ipv6_enabled = true + secondary_ip_count = 4 +}`, hostname, domain, networkSpeed, cores, memory, userMetadata, tags) +} + +func testAccIBMComputeVmInstanceConfigPostInstallScriptURI(hostname, domain string) string { + return fmt.Sprintf(` +resource "ibm_compute_vm_instance" "terraform-acceptance-test-pISU" { + hostname = "%s" + domain = "%s" + os_reference_code = "DEBIAN_7_64" + datacenter = "wdc04" + network_speed = 10 + hourly_billing = true + private_network_only = false + cores = 1 + memory = 1024 + disks = [25, 10, 20] + user_metadata = "{\"value\":\"newvalue\"}" + dedicated_acct_host_only = true + local_disk = false + post_install_script_uri = "https://www.google.com" +}`, hostname, domain) +} + +func testAccIBMComputeVmInstanceConfigBlockDeviceTemplateGroup(hostname, domain, imageID string) string { + return fmt.Sprintf(` +resource "ibm_compute_vm_instance" "terraform-acceptance-test-BDTGroup" { + hostname = "%s" + domain = "%s" + datacenter = "wdc04" + network_speed = 10 + hourly_billing = false + cores = 1 + memory = 1024 + local_disk = false + image_id = %s +}`, hostname, domain, imageID) +} + +func testAccIBMComputeVmInstanceConfigCustomImageMultipleDisks(hostname, domain, imageID string) string { + return fmt.Sprintf(` +resource "ibm_compute_vm_instance" "terraform-acceptance-test-disks" { + hostname = "%s" + domain = "%s" + datacenter = "wdc04" + network_speed = 10 + hourly_billing = false + cores = 1 + memory = 1024 + local_disk = false + image_id = %s + disks = [25, 10] +}`, hostname, domain, imageID) +} + +const fsConfig1 = ` +resource "ibm_storage_file" "fs1" { + type = "Endurance" + datacenter = "wdc04" + capacity = 20 + iops = 0.25 + snapshot_capacity = 10 +} +` + +const bsConfig1 = `resource "ibm_storage_block" "bs" { + type = "Endurance" + datacenter = "wdc04" + capacity = 20 + iops = 0.25 + snapshot_capacity = 10 + os_format_type = "Linux" +} +` + +const fsConfig2 = `resource "ibm_storage_file" "fs2" { + type = "Endurance" + datacenter = "wdc04" + capacity = 20 + iops = 0.25 + snapshot_capacity = 10 +} + +` + +func testAccessToStoragesBasic(hostname, domain string) string { + config := fmt.Sprintf(` +resource "ibm_compute_vm_instance" "terraform-vsi-storage-access" { + hostname = "%s" + domain = "%s" + datacenter = "wdc04" + network_speed = 10 + hourly_billing = true + file_storage_ids = ["${ibm_storage_file.fs1.id}"] + block_storage_ids = ["${ibm_storage_block.bs.id}"] + + cores = 1 + memory = 1024 + local_disk = false + os_reference_code = "DEBIAN_7_64" + disks = [25, 10] +} +%s +%s + +`, hostname, domain, fsConfig1, bsConfig1) + return config +} + +func testAccessToStoragesUpdate(hostname, domain string) string { + return fmt.Sprintf(` +resource "ibm_compute_vm_instance" "terraform-vsi-storage-access" { + hostname = "%s" + domain = "%s" + datacenter = "wdc04" + network_speed = 10 + hourly_billing = true + file_storage_ids = ["${ibm_storage_file.fs2.id}"] + block_storage_ids = [] + cores = 1 + memory = 1024 + local_disk = false + os_reference_code = "DEBIAN_7_64" + disks = [25, 10] +} + +%s + +`, hostname, domain, fsConfig2) + +} diff --git a/ibm/resource_ibm_container_bind_service.go b/ibm/resource_ibm_container_bind_service.go new file mode 100644 index 0000000000..78ee0fa61e --- /dev/null +++ b/ibm/resource_ibm_container_bind_service.go @@ -0,0 +1,136 @@ +package ibm + +import ( + "fmt" + + v1 "github.com/IBM-Bluemix/bluemix-go/api/container/containerv1" + "github.com/hashicorp/terraform/helper/schema" +) + +func resourceIBMContainerBindService() *schema.Resource { + return &schema.Resource{ + Create: resourceIBMContainerBindServiceCreate, + Read: resourceIBMContainerBindServiceRead, + Delete: resourceIBMContainerBindServiceDelete, + Importer: &schema.ResourceImporter{}, + + Schema: map[string]*schema.Schema{ + "cluster_name_id": { + Type: schema.TypeString, + ForceNew: true, + Required: true, + }, + "service_instance_space_guid": { + Type: schema.TypeString, + Description: "The space guid the service instance belongs to", + ForceNew: true, + Required: true, + }, + "service_instance_name_id": { + Type: schema.TypeString, + ForceNew: true, + Required: true, + }, + "namespace_id": { + Type: schema.TypeString, + ForceNew: true, + Required: true, + }, + "secret_name": { + Type: schema.TypeString, + Computed: true, + }, + "org_guid": { + Description: "The bluemix organization guid this cluster belongs to", + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + "space_guid": { + Description: "The bluemix space guid this cluster belongs to", + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + "account_guid": { + Description: "The bluemix account guid this cluster belongs to", + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + }, + } +} + +func getClusterTargetHeader(d *schema.ResourceData) v1.ClusterTargetHeader { + orgGUID := d.Get("org_guid").(string) + spaceGUID := d.Get("space_guid").(string) + accountGUID := d.Get("account_guid").(string) + + targetEnv := v1.ClusterTargetHeader{ + OrgID: orgGUID, + SpaceID: spaceGUID, + AccountID: accountGUID, + } + return targetEnv +} + +func resourceIBMContainerBindServiceCreate(d *schema.ResourceData, meta interface{}) error { + csClient, err := meta.(ClientSession).ContainerAPI() + if err != nil { + return err + } + clusterNameID := d.Get("cluster_name_id").(string) + serviceInstanceSpaceGUID := d.Get("service_instance_space_guid").(string) + serviceInstanceNameID := d.Get("service_instance_name_id").(string) + namespaceID := d.Get("namespace_id").(string) + + bindService := v1.ServiceBindRequest{ + ClusterNameOrID: clusterNameID, + SpaceGUID: serviceInstanceSpaceGUID, + ServiceInstanceNameOrID: serviceInstanceNameID, + NamespaceID: namespaceID, + } + + targetEnv := getClusterTargetHeader(d) + bindResp, err := csClient.Clusters().BindService(bindService, targetEnv) + if err != nil { + return err + } + //Fix me Id would be typically the returned ID from the API, proabably SecretName should be used + d.SetId(clusterNameID) + d.Set("service_instance_name_id", serviceInstanceNameID) + d.Set("namespace_id", namespaceID) + d.Set("space_guid", serviceInstanceSpaceGUID) + d.Set("secret_name", bindResp.SecretName) + + return resourceIBMContainerBindServiceRead(d, meta) +} + +func resourceIBMContainerBindServiceRead(d *schema.ResourceData, meta interface{}) error { + //No API to read back the credentials so leave schema as it is + return nil +} + +func resourceIBMContainerBindServiceDelete(d *schema.ResourceData, meta interface{}) error { + csClient, err := meta.(ClientSession).ContainerAPI() + if err != nil { + return err + } + clusterID := d.Id() + namespace := d.Get("namespace_id").(string) + serviceInstanceNameID := d.Get("service_instance_name_id").(string) + targetEnv := getClusterTargetHeader(d) + + err = csClient.Clusters().UnBindService(clusterID, namespace, serviceInstanceNameID, targetEnv) + if err != nil { + return fmt.Errorf("Error unbinding service: %s", err) + } + return nil +} + +//Pure Aramda API not available, we can still find by using k8s api +/* +func resourceIBMContainerBindServiceExists(d *schema.ResourceData, meta interface{}) (bool, error) { + +}*/ diff --git a/ibm/resource_ibm_container_bind_service_test.go b/ibm/resource_ibm_container_bind_service_test.go new file mode 100644 index 0000000000..6bf0280562 --- /dev/null +++ b/ibm/resource_ibm_container_bind_service_test.go @@ -0,0 +1,90 @@ +package ibm + +import ( + "fmt" + "testing" + + "github.com/hashicorp/terraform/helper/acctest" + "github.com/hashicorp/terraform/helper/resource" +) + +func TestAccIBMContainerBindService_basic(t *testing.T) { + serviceName := "testService" + serviceKey := "testKey" + clusterName := fmt.Sprintf("terraform_%d", acctest.RandInt()) + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + Steps: []resource.TestStep{ + { + Config: testAccCheckIBMContainerBindService_basic(clusterName, serviceName, serviceKey), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttr( + "ibm_container_bind_service.bind_service", "namespace_id", "default"), + ), + }, + }, + }) +} + +func testAccCheckIBMContainerBindService_basic(clusterName, serviceName, serviceKey string) string { + return fmt.Sprintf(` + +data "ibm_org" "org" { + org = "%s" +} + +data "ibm_space" "space" { + org = "%s" + space = "%s" +} + +data "ibm_account" "acc" { + org_guid = "${data.ibm_org.org.id}" +} + +resource "ibm_container_cluster" "testacc_cluster" { + name = "%s" + datacenter = "dal10" + + org_guid = "${data.ibm_org.org.id}" + space_guid = "${data.ibm_space.space.id}" + account_guid = "${data.ibm_account.acc.id}" + + workers = [{ + name = "worker1" + + action = "add" + }] + + machine_type = "free" + isolation = "public" + public_vlan_id = "vlan" + private_vlan_id = "vlan" +} + + +resource "ibm_service_instance" "service" { + name = "%s" + space_guid = "${data.ibm_space.space.id}" + service = "cloudantNoSQLDB" + plan = "Lite" + tags = ["cluster-service", "cluster-bind"] +} + +resource "ibm_service_key" "serviceKey" { + name = "%s" + service_instance_guid = "${ibm_service_instance.service.id}" +} + +resource "ibm_container_bind_service" "bind_service" { + cluster_name_id = "${ibm_container_cluster.testacc_cluster.name}" + service_instance_space_guid = "${data.ibm_space.space.id}" + service_instance_name_id = "${ibm_service_instance.service.id}" + namespace_id = "default" + org_guid = "${data.ibm_org.org.id}" + space_guid = "${data.ibm_space.space.id}" + account_guid = "${data.ibm_account.acc.id}" +} + `, cfOrganization, cfOrganization, cfSpace, clusterName, serviceName, serviceKey) +} diff --git a/ibm/resource_ibm_container_cluster.go b/ibm/resource_ibm_container_cluster.go new file mode 100644 index 0000000000..13f830bb10 --- /dev/null +++ b/ibm/resource_ibm_container_cluster.go @@ -0,0 +1,623 @@ +package ibm + +import ( + "fmt" + "log" + "strings" + "time" + + v1 "github.com/IBM-Bluemix/bluemix-go/api/container/containerv1" + "github.com/IBM-Bluemix/bluemix-go/bmxerror" + "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/helper/schema" +) + +const ( + clusterNormal = "normal" + workerNormal = "normal" + subnetNormal = "normal" + workerReadyState = "Ready" + workerDeleteState = "deleted" + + clusterProvisioning = "provisioning" + workerProvisioning = "provisioning" + subnetProvisioning = "provisioning" +) + +func resourceIBMContainerCluster() *schema.Resource { + return &schema.Resource{ + Create: resourceIBMContainerClusterCreate, + Read: resourceIBMContainerClusterRead, + Update: resourceIBMContainerClusterUpdate, + Delete: resourceIBMContainerClusterDelete, + Exists: resourceIBMContainerClusterExists, + Importer: &schema.ResourceImporter{}, + + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: "The cluster name", + }, + "datacenter": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: "The datacenter where this cluster will be deployed", + }, + "workers": { + Type: schema.TypeList, + Required: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Required: true, + }, + "id": { + Type: schema.TypeString, + Computed: true, + }, + "action": { + Type: schema.TypeString, + Optional: true, + Default: "add", + ValidateFunc: validateAllowedStringValue([]string{"add", "reboot", "reload"}), + }, + }, + }, + }, + + "machine_type": { + Type: schema.TypeString, + ForceNew: true, + Optional: true, + }, + "isolation": { + Type: schema.TypeString, + ForceNew: true, + Optional: true, + }, + + "billing": { + Type: schema.TypeString, + ForceNew: true, + Optional: true, + Default: "hourly", + }, + + "public_vlan_id": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Default: nil, + }, + + "private_vlan_id": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Default: nil, + }, + "ingress_hostname": { + Type: schema.TypeString, + Computed: true, + }, + "ingress_secret": { + Type: schema.TypeString, + Computed: true, + }, + "no_subnet": { + Type: schema.TypeBool, + Optional: true, + ForceNew: true, + Default: false, + }, + "server_url": { + Type: schema.TypeString, + Computed: true, + }, + "worker_num": { + Type: schema.TypeInt, + Computed: true, + }, + "subnet_id": { + Type: schema.TypeSet, + Optional: true, + Elem: &schema.Schema{Type: schema.TypeString}, + Set: schema.HashString, + }, + "webhook": { + Type: schema.TypeList, + Optional: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "level": { + Type: schema.TypeString, + Required: true, + }, + "type": { + Type: schema.TypeString, + Required: true, + ValidateFunc: validateAllowedStringValue([]string{"slack"}), + }, + "url": { + Type: schema.TypeString, + Required: true, + }, + }, + }, + }, + "org_guid": { + Description: "The bluemix organization guid this cluster belongs to", + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + "space_guid": { + Description: "The bluemix space guid this cluster belongs to", + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + "account_guid": { + Description: "The bluemix account guid this cluster belongs to", + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + "wait_time_minutes": { + Type: schema.TypeInt, + Optional: true, + Default: 90, + }, + }, + } +} + +func resourceIBMContainerClusterCreate(d *schema.ResourceData, meta interface{}) error { + csClient, err := meta.(ClientSession).ContainerAPI() + if err != nil { + return err + } + + name := d.Get("name").(string) + datacenter := d.Get("datacenter").(string) + workers := d.Get("workers").([]interface{}) + billing := d.Get("billing").(string) + machineType := d.Get("machine_type").(string) + publicVlanID := d.Get("public_vlan_id").(string) + privateVlanID := d.Get("private_vlan_id").(string) + webhooks := d.Get("webhook").([]interface{}) + noSubnet := d.Get("no_subnet").(bool) + isolation := d.Get("isolation").(string) + + params := v1.ClusterCreateRequest{ + Name: name, + Datacenter: datacenter, + WorkerNum: len(workers), + Billing: billing, + MachineType: machineType, + PublicVlan: publicVlanID, + PrivateVlan: privateVlanID, + NoSubnet: noSubnet, + Isolation: isolation, + } + + targetEnv := getClusterTargetHeader(d) + + cls, err := csClient.Clusters().Create(params, targetEnv) + if err != nil { + return err + } + d.SetId(cls.ID) + //wait for cluster availability + _, err = WaitForClusterAvailable(d, meta, targetEnv) + //wait for worker availability + _, err = WaitForWorkerAvailable(d, meta, targetEnv) + if err != nil { + return fmt.Errorf( + "Error waiting for workers of cluster (%s) to become ready: %s", d.Id(), err) + } + + subnetAPI := csClient.Subnets() + subnetIDs := d.Get("subnet_id").(*schema.Set) + for _, subnetID := range subnetIDs.List() { + if subnetID != "" { + err = subnetAPI.AddSubnet(cls.ID, subnetID.(string), targetEnv) + if err != nil { + return err + } + } + } + + if len(subnetIDs.List()) > 0 { + _, err = WaitForSubnetAvailable(d, meta, targetEnv) + if err != nil { + return fmt.Errorf( + "Error waiting for initializing ingress hostname and secret: %s", err) + } + } + whkAPI := csClient.WebHooks() + for _, e := range webhooks { + pack := e.(map[string]interface{}) + webhook := v1.WebHook{ + Level: pack["level"].(string), + Type: pack["type"].(string), + URL: pack["url"].(string), + } + + whkAPI.Add(cls.ID, webhook, targetEnv) + + } + + workersInfo := []map[string]string{} + wrkAPI := csClient.Workers() + workerFields, err := wrkAPI.List(cls.ID, targetEnv) + if err != nil { + return err + } + //Create a map with worker name and id + for i, e := range workers { + pack := e.(map[string]interface{}) + var worker = map[string]string{ + "name": pack["name"].(string), + "id": workerFields[i].ID, + "action": pack["action"].(string), + } + workersInfo = append(workersInfo, worker) + } + d.Set("workers", workersInfo) + + if err != nil { + return fmt.Errorf( + "Error waiting for cluster (%s) to become ready: %s", d.Id(), err) + } + + return resourceIBMContainerClusterRead(d, meta) +} + +func resourceIBMContainerClusterRead(d *schema.ResourceData, meta interface{}) error { + csClient, err := meta.(ClientSession).ContainerAPI() + if err != nil { + return err + } + + targetEnv := getClusterTargetHeader(d) + + clusterID := d.Id() + cls, err := csClient.Clusters().Find(clusterID, targetEnv) + if err != nil { + return fmt.Errorf("Error retrieving armada cluster: %s", err) + } + + d.Set("name", cls.Name) + d.Set("server_url", cls.ServerURL) + d.Set("ingress_hostname", cls.IngressHostname) + d.Set("ingress_secret", cls.IngressSecretName) + d.Set("worker_num", cls.WorkerCount) + d.Set("subnet_id", d.Get("subnet_id").(*schema.Set)) + return nil +} + +func resourceIBMContainerClusterUpdate(d *schema.ResourceData, meta interface{}) error { + csClient, err := meta.(ClientSession).ContainerAPI() + if err != nil { + return err + } + + targetEnv := getClusterTargetHeader(d) + + subnetAPI := csClient.Subnets() + whkAPI := csClient.WebHooks() + wrkAPI := csClient.Workers() + + clusterID := d.Id() + workersInfo := []map[string]string{} + if d.HasChange("workers") { + oldWorkers, newWorkers := d.GetChange("workers") + oldWorker := oldWorkers.([]interface{}) + newWorker := newWorkers.([]interface{}) + for _, nW := range newWorker { + newPack := nW.(map[string]interface{}) + exists := false + for _, oW := range oldWorker { + oldPack := oW.(map[string]interface{}) + if strings.Compare(newPack["name"].(string), oldPack["name"].(string)) == 0 { + exists = true + if strings.Compare(newPack["action"].(string), oldPack["action"].(string)) != 0 { + params := v1.WorkerParam{ + Action: newPack["action"].(string), + } + wrkAPI.Update(clusterID, oldPack["id"].(string), params, targetEnv) + var worker = map[string]string{ + "name": newPack["name"].(string), + "id": newPack["id"].(string), + "action": newPack["action"].(string), + } + workersInfo = append(workersInfo, worker) + } else { + var worker = map[string]string{ + "name": oldPack["name"].(string), + "id": oldPack["id"].(string), + "action": oldPack["action"].(string), + } + workersInfo = append(workersInfo, worker) + } + } + } + if !exists { + params := v1.WorkerParam{ + Action: "add", + Count: 1, + } + err := wrkAPI.Add(clusterID, params, targetEnv) + if err != nil { + return fmt.Errorf("Error adding worker to cluster") + } + id, err := getID(d, meta, clusterID, oldWorker, workersInfo) + if err != nil { + return fmt.Errorf("Error getting id of worker") + } + var worker = map[string]string{ + "name": newPack["name"].(string), + "id": id, + "action": newPack["action"].(string), + } + workersInfo = append(workersInfo, worker) + } + } + for _, oW := range oldWorker { + oldPack := oW.(map[string]interface{}) + exists := false + for _, nW := range newWorker { + newPack := nW.(map[string]interface{}) + exists = exists || (strings.Compare(oldPack["name"].(string), newPack["name"].(string)) == 0) + } + if !exists { + wrkAPI.Delete(clusterID, oldPack["id"].(string), targetEnv) + } + + } + //wait for new workers to available + //Done - Can we not put WaitForWorkerAvailable after all client.DeleteWorker + WaitForWorkerAvailable(d, meta, targetEnv) + d.Set("workers", workersInfo) + } + + //TODO put webhooks can't deleted in the error message if such case is observed in the chnages + if d.HasChange("webhook") { + oldHooks, newHooks := d.GetChange("webhook") + oldHook := oldHooks.([]interface{}) + newHook := newHooks.([]interface{}) + for _, nH := range newHook { + newPack := nH.(map[string]interface{}) + exists := false + for _, oH := range oldHook { + oldPack := oH.(map[string]interface{}) + if (strings.Compare(newPack["level"].(string), oldPack["level"].(string)) == 0) && (strings.Compare(newPack["type"].(string), oldPack["type"].(string)) == 0) && (strings.Compare(newPack["url"].(string), oldPack["url"].(string)) == 0) { + exists = true + } + } + if !exists { + webhook := v1.WebHook{ + Level: newPack["level"].(string), + Type: newPack["type"].(string), + URL: newPack["url"].(string), + } + + whkAPI.Add(clusterID, webhook, targetEnv) + } + } + } + //TODO put subnet can't deleted in the error message if such case is observed in the chnages + var subnetAdd bool + if d.HasChange("subnet_id") { + oldSubnets, newSubnets := d.GetChange("subnet_id") + oldSubnet := oldSubnets.(*schema.Set) + newSubnet := newSubnets.(*schema.Set) + for _, nS := range newSubnet.List() { + exists := false + for _, oS := range oldSubnet.List() { + if strings.Compare(nS.(string), oS.(string)) == 0 { + exists = true + } + } + if !exists { + err := subnetAPI.AddSubnet(clusterID, nS.(string), targetEnv) + if err != nil { + return err + } + subnetAdd = true + } + } + if subnetAdd { + _, err = WaitForSubnetAvailable(d, meta, targetEnv) + if err != nil { + return fmt.Errorf( + "Error waiting for initializing ingress hostname and secret: %s", err) + } + } + } + return resourceIBMContainerClusterRead(d, meta) +} + +func getID(d *schema.ResourceData, meta interface{}, clusterID string, oldWorkers []interface{}, workerInfo []map[string]string) (string, error) { + targetEnv := getClusterTargetHeader(d) + csClient, err := meta.(ClientSession).ContainerAPI() + if err != nil { + return "", err + } + workerFields, err := csClient.Workers().List(clusterID, targetEnv) + if err != nil { + return "", err + } + for _, wF := range workerFields { + exists := false + for _, oW := range oldWorkers { + oldPack := oW.(map[string]interface{}) + if strings.Compare(wF.ID, oldPack["id"].(string)) == 0 || strings.Compare(wF.State, "deleted") == 0 { + exists = true + } + } + if !exists { + for i := 0; i < len(workerInfo); i++ { + pack := workerInfo[i] + exists = exists || (strings.Compare(wF.ID, pack["id"]) == 0) + } + if !exists { + return wF.ID, nil + } + } + } + + return "", fmt.Errorf("Unable to get ID of worker") +} + +func resourceIBMContainerClusterDelete(d *schema.ResourceData, meta interface{}) error { + targetEnv := getClusterTargetHeader(d) + csClient, err := meta.(ClientSession).ContainerAPI() + if err != nil { + return err + } + clusterID := d.Id() + err = csClient.Clusters().Delete(clusterID, targetEnv) + if err != nil { + return fmt.Errorf("Error deleting cluster: %s", err) + } + return nil +} + +// WaitForClusterAvailable Waits for cluster creation +func WaitForClusterAvailable(d *schema.ResourceData, meta interface{}, target v1.ClusterTargetHeader) (interface{}, error) { + csClient, err := meta.(ClientSession).ContainerAPI() + if err != nil { + return nil, err + } + log.Printf("Waiting for cluster (%s) to be available.", d.Id()) + id := d.Id() + + stateConf := &resource.StateChangeConf{ + Pending: []string{"retry", clusterProvisioning}, + Target: []string{clusterNormal}, + Refresh: clusterStateRefreshFunc(csClient.Clusters(), id, d, target), + Timeout: time.Duration(d.Get("wait_time_minutes").(int)) * time.Minute, + Delay: 10 * time.Second, + MinTimeout: 10 * time.Second, + } + + return stateConf.WaitForState() +} + +func clusterStateRefreshFunc(client v1.Clusters, instanceID string, d *schema.ResourceData, target v1.ClusterTargetHeader) resource.StateRefreshFunc { + return func() (interface{}, string, error) { + clusterFields, err := client.Find(instanceID, target) + if err != nil { + return nil, "", fmt.Errorf("Error retrieving cluster: %s", err) + } + // Check active transactions + log.Println("Checking cluster") + //Check for cluster state to be normal + log.Println("Checking cluster state", strings.Compare(clusterFields.State, clusterNormal)) + if strings.Compare(clusterFields.State, clusterNormal) != 0 { + return clusterFields, clusterProvisioning, nil + } + return clusterFields, clusterNormal, nil + } +} + +// WaitForWorkerAvailable Waits for worker creation +func WaitForWorkerAvailable(d *schema.ResourceData, meta interface{}, target v1.ClusterTargetHeader) (interface{}, error) { + csClient, err := meta.(ClientSession).ContainerAPI() + if err != nil { + return nil, err + } + log.Printf("Waiting for worker of the cluster (%s) to be available.", d.Id()) + id := d.Id() + + stateConf := &resource.StateChangeConf{ + Pending: []string{"retry", workerProvisioning}, + Target: []string{workerNormal}, + Refresh: workerStateRefreshFunc(csClient.Workers(), id, d, target), + Timeout: time.Duration(d.Get("wait_time_minutes").(int)) * time.Minute, + Delay: 10 * time.Second, + MinTimeout: 10 * time.Second, + } + + return stateConf.WaitForState() +} + +func workerStateRefreshFunc(client v1.Workers, instanceID string, d *schema.ResourceData, target v1.ClusterTargetHeader) resource.StateRefreshFunc { + return func() (interface{}, string, error) { + workerFields, err := client.List(instanceID, target) + if err != nil { + return nil, "", fmt.Errorf("Error retrieving workers for cluster: %s", err) + } + log.Println("Checking workers...") + //Done worker has two fields State and Status , so check for those 2 + for _, e := range workerFields { + if strings.Compare(e.State, workerNormal) != 0 || strings.Compare(e.Status, workerReadyState) != 0 { + if strings.Compare(e.State, "deleted") != 0 { + return workerFields, workerProvisioning, nil + } + } + } + return workerFields, workerNormal, nil + } +} + +func WaitForSubnetAvailable(d *schema.ResourceData, meta interface{}, target v1.ClusterTargetHeader) (interface{}, error) { + csClient, err := meta.(ClientSession).ContainerAPI() + if err != nil { + return nil, err + } + log.Printf("Waiting for Ingress Subdomain and secret being assigned.") + id := d.Id() + + stateConf := &resource.StateChangeConf{ + Pending: []string{"retry", workerProvisioning}, + Target: []string{workerNormal}, + Refresh: subnetStateRefreshFunc(csClient.Clusters(), id, d, target), + Timeout: time.Duration(d.Get("wait_time_minutes").(int)) * time.Minute, + Delay: 10 * time.Second, + MinTimeout: 10 * time.Second, + } + + return stateConf.WaitForState() +} + +func subnetStateRefreshFunc(client v1.Clusters, instanceID string, d *schema.ResourceData, target v1.ClusterTargetHeader) resource.StateRefreshFunc { + return func() (interface{}, string, error) { + cluster, err := client.Find(instanceID, target) + if err != nil { + return nil, "", fmt.Errorf("Error retrieving cluster: %s", err) + } + if cluster.IngressHostname == "" && cluster.IngressSecretName == "" { + return cluster, subnetProvisioning, nil + } + return cluster, subnetNormal, nil + } +} + +func resourceIBMContainerClusterExists(d *schema.ResourceData, meta interface{}) (bool, error) { + csClient, err := meta.(ClientSession).ContainerAPI() + if err != nil { + return false, err + } + targetEnv := getClusterTargetHeader(d) + if err != nil { + return false, err + } + clusterID := d.Id() + cls, err := csClient.Clusters().Find(clusterID, targetEnv) + if err != nil { + if apiErr, ok := err.(bmxerror.RequestFailure); ok { + if apiErr.StatusCode() == 404 { + return false, nil + } + } + return false, fmt.Errorf("Error communicating with the API: %s", err) + } + return cls.ID == clusterID, nil +} diff --git a/ibm/resource_ibm_container_cluster_test.go b/ibm/resource_ibm_container_cluster_test.go new file mode 100644 index 0000000000..e63f80a813 --- /dev/null +++ b/ibm/resource_ibm_container_cluster_test.go @@ -0,0 +1,144 @@ +package ibm + +import ( + "fmt" + "log" + "strings" + "testing" + + bluemix "github.com/IBM-Bluemix/bluemix-go" + "github.com/IBM-Bluemix/bluemix-go/session" + "github.com/hashicorp/terraform/helper/acctest" + "github.com/hashicorp/terraform/helper/resource" + + "github.com/IBM-Bluemix/bluemix-go/api/account/accountv2" + v1 "github.com/IBM-Bluemix/bluemix-go/api/container/containerv1" + "github.com/IBM-Bluemix/bluemix-go/api/mccp/mccpv2" + "github.com/hashicorp/terraform/terraform" +) + +func TestAccIBMContainerCluster_basic(t *testing.T) { + clusterName := fmt.Sprintf("terraform_%d", acctest.RandInt()) + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckIBMContainerClusterDestroy, + Steps: []resource.TestStep{ + { + Config: testAccCheckIBMContainerCluster_basic(clusterName), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttr( + "ibm_container_cluster.testacc_cluster", "name", clusterName), + resource.TestCheckResourceAttr( + "ibm_container_cluster.testacc_cluster", "worker_num", "1"), + ), + }, + }, + }) +} + +func testAccCheckIBMContainerClusterDestroy(s *terraform.State) error { + csClient, err := testAccProvider.Meta().(ClientSession).ContainerAPI() + if err != nil { + return err + } + + for _, rs := range s.RootModule().Resources { + if rs.Type != "ibm_container_cluster" { + continue + } + + targetEnv := getClusterTargetHeaderTestACC() + // Try to find the key + _, err := csClient.Clusters().Find(rs.Primary.ID, targetEnv) + + if err != nil && !strings.Contains(err.Error(), "404") { + return fmt.Errorf("Error waiting for cluster (%s) to be destroyed: %s", rs.Primary.ID, err) + } + } + + return nil +} + +func getClusterTargetHeaderTestACC() v1.ClusterTargetHeader { + org := cfOrganization + space := cfSpace + c := new(bluemix.Config) + sess, err := session.New(c) + if err != nil { + log.Fatal(err) + } + + client, err := mccpv2.New(sess) + + if err != nil { + log.Fatal(err) + } + + orgAPI := client.Organizations() + myorg, err := orgAPI.FindByName(org, BluemixRegion) + + if err != nil { + log.Fatal(err) + } + + spaceAPI := client.Spaces() + myspace, err := spaceAPI.FindByNameInOrg(myorg.GUID, space, BluemixRegion) + + if err != nil { + log.Fatal(err) + } + + accClient, err := accountv2.New(sess) + if err != nil { + log.Fatal(err) + } + accountAPI := accClient.Accounts() + myAccount, err := accountAPI.FindByOrg(myorg.GUID, c.Region) + if err != nil { + log.Fatal(err) + } + + target := v1.ClusterTargetHeader{ + OrgID: myorg.GUID, + SpaceID: myspace.GUID, + AccountID: myAccount.GUID, + } + + return target +} + +func testAccCheckIBMContainerCluster_basic(clusterName string) string { + return fmt.Sprintf(` + +data "ibm_org" "org" { + org = "%s" +} + +data "ibm_space" "space" { + org = "%s" + space = "%s" +} + +data "ibm_account" "acc" { + org_guid = "${data.ibm_org.org.id}" +} + +resource "ibm_container_cluster" "testacc_cluster" { + name = "%s" + datacenter = "dal10" + + org_guid = "${data.ibm_org.org.id}" + space_guid = "${data.ibm_space.space.id}" + account_guid = "${data.ibm_account.acc.id}" + + workers = [{ + name = "worker1" + }] + + machine_type = "free" + isolation = "public" + public_vlan_id = "vlan" + private_vlan_id = "vlan" +} `, cfOrganization, cfOrganization, cfSpace, clusterName) +} diff --git a/ibm/resource_ibm_dns_domain.go b/ibm/resource_ibm_dns_domain.go new file mode 100644 index 0000000000..4dbd487b1e --- /dev/null +++ b/ibm/resource_ibm_dns_domain.go @@ -0,0 +1,203 @@ +package ibm + +import ( + "errors" + "fmt" + "log" + "strconv" + + "github.com/hashicorp/terraform/helper/schema" + "github.com/softlayer/softlayer-go/datatypes" + "github.com/softlayer/softlayer-go/services" + "github.com/softlayer/softlayer-go/sl" +) + +func resourceIBMDNSDomain() *schema.Resource { + return &schema.Resource{ + Exists: resourceIBMDNSDomainExists, + Create: resourceIBMDNSDomainCreate, + Read: resourceIBMDNSDomainRead, + Update: resourceIBMDNSDomainUpdate, + Delete: resourceIBMDNSDomainDelete, + Importer: &schema.ResourceImporter{}, + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + + "serial": { + Type: schema.TypeString, + Computed: true, + }, + + "update_date": { + Type: schema.TypeString, + Computed: true, + }, + + "target": { + Type: schema.TypeString, + Optional: true, + }, + }, + } +} + +func resourceIBMDNSDomainCreate(d *schema.ResourceData, meta interface{}) error { + sess := meta.(ClientSession).SoftLayerSession() + service := services.GetDnsDomainService(sess) + + // prepare creation parameters + opts := datatypes.Dns_Domain{ + Name: sl.String(d.Get("name").(string)), + } + + opts.ResourceRecords = []datatypes.Dns_Domain_ResourceRecord{} + + if targetString, ok := d.GetOk("target"); ok { + opts.ResourceRecords = []datatypes.Dns_Domain_ResourceRecord{ + { + Data: sl.String(targetString.(string)), + Host: sl.String("@"), + Ttl: sl.Int(86400), + Type: sl.String("a"), + }, + } + } + + // create Dns_Domain object + response, err := service.CreateObject(&opts) + if err != nil { + return fmt.Errorf("Error creating Dns Domain: %s", err) + } + + // populate id + id := *response.Id + d.SetId(strconv.Itoa(id)) + log.Printf("[INFO] Created Dns Domain: %d", id) + + // read remote state + return resourceIBMDNSDomainRead(d, meta) +} + +func resourceIBMDNSDomainRead(d *schema.ResourceData, meta interface{}) error { + sess := meta.(ClientSession).SoftLayerSession() + service := services.GetDnsDomainService(sess) + + dnsId, _ := strconv.Atoi(d.Id()) + + // retrieve remote object state + dns_domain, err := service.Id(dnsId).Mask( + "id,name,updateDate,resourceRecords", + ).GetObject() + if err != nil { + return fmt.Errorf("Error retrieving Dns Domain %d: %s", dnsId, err) + } + + // populate fields + d.Set("name", dns_domain.Name) + d.Set("serial", sl.Get(dns_domain.Serial, nil)) + d.Set("update_date", sl.Get(dns_domain.UpdateDate, nil)) + + // find a record with host @; that will have the current target. + for _, record := range dns_domain.ResourceRecords { + if *record.Type == "a" && *record.Host == "@" { + d.Set("target", *record.Data) + break + } + } + + return nil +} + +func resourceIBMDNSDomainUpdate(d *schema.ResourceData, meta interface{}) error { + // If the target has been updated, find the corresponding dns record and update its data. + sess := meta.(ClientSession).SoftLayerSession() + domainId, _ := strconv.Atoi(d.Id()) + + if !d.HasChange("target") { // target is the only editable field + return nil + } + + newTarget := d.Get("target").(string) + + // retrieve domain state + domainService := services.GetDnsDomainService(sess) + domain, err := domainService.Id(domainId).Mask( + "id,name,updateDate,resourceRecords", + ).GetObject() + if err != nil { + return fmt.Errorf("Error retrieving DNS resource %d: %s", domainId, err) + } + + // find a record with host @; that will have the current target. + var record datatypes.Dns_Domain_ResourceRecord + for _, record = range domain.ResourceRecords { + if *record.Type == "a" && *record.Host == "@" { + break + } + } + + if record.Id == nil { + return fmt.Errorf("Could not find DNS target record for domain %s (%d)", + sl.Get(domain.Name), sl.Get(domain.Id)) + } + + record.Data = sl.String(newTarget) + + _, err = services.GetDnsDomainResourceRecordService(sess). + Id(*record.Id).EditObject(&record) + + if err != nil { + return fmt.Errorf("Error editing DNS target record for domain %s (%d): %s", + sl.Get(domain.Name), sl.Get(domain.Id), err) + } + + return nil +} + +func resourceIBMDNSDomainDelete(d *schema.ResourceData, meta interface{}) error { + sess := meta.(ClientSession).SoftLayerSession() + service := services.GetDnsDomainService(sess) + + dnsId, err := strconv.Atoi(d.Id()) + if err != nil { + return fmt.Errorf("Error deleting Dns Domain: %s", err) + } + + log.Printf("[INFO] Deleting Dns Domain: %d", dnsId) + result, err := service.Id(dnsId).DeleteObject() + if err != nil { + return fmt.Errorf("Error deleting Dns Domain: %s", err) + } + + if !result { + return errors.New("Error deleting Dns Domain") + } + + d.SetId("") + return nil +} + +func resourceIBMDNSDomainExists(d *schema.ResourceData, meta interface{}) (bool, error) { + sess := meta.(ClientSession).SoftLayerSession() + service := services.GetDnsDomainService(sess) + + dnsId, err := strconv.Atoi(d.Id()) + if err != nil { + return false, fmt.Errorf("Not a valid ID, must be an integer: %s", err) + } + + result, err := service.Id(dnsId).GetObject() + if err != nil { + if apiErr, ok := err.(sl.Error); ok { + if apiErr.StatusCode == 404 { + return false, nil + } + } + return false, fmt.Errorf("Error retrieving domain info: %s", err) + } + return result.Id != nil && *result.Id == dnsId, nil +} diff --git a/ibm/resource_ibm_dns_domain_test.go b/ibm/resource_ibm_dns_domain_test.go new file mode 100644 index 0000000000..c8d2c390ae --- /dev/null +++ b/ibm/resource_ibm_dns_domain_test.go @@ -0,0 +1,183 @@ +package ibm + +import ( + "errors" + "fmt" + "strconv" + "testing" + + "github.com/hashicorp/terraform/helper/acctest" + "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/terraform" + "github.com/softlayer/softlayer-go/datatypes" + "github.com/softlayer/softlayer-go/services" + "github.com/softlayer/softlayer-go/sl" +) + +func TestAccIBMDNSDomain_Basic(t *testing.T) { + var dns_domain datatypes.Dns_Domain + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckIBMDNSDomainDestroy, + Steps: []resource.TestStep{ + { + Config: fmt.Sprintf(config, domainName1, target1), + Check: resource.ComposeTestCheckFunc( + testAccCheckIBMDNSDomainExists("ibm_dns_domain.acceptance_test_dns_domain-1", &dns_domain), + testAccCheckIBMDNSDomainAttributes(&dns_domain), + saveIBMDNSDomainId(&dns_domain, &firstDnsId), + resource.TestCheckResourceAttr( + "ibm_dns_domain.acceptance_test_dns_domain-1", "name", domainName1), + resource.TestCheckResourceAttr( + "ibm_dns_domain.acceptance_test_dns_domain-1", "target", target1), + ), + Destroy: false, + }, + { + Config: fmt.Sprintf(config, domainName2, target1), + Check: resource.ComposeTestCheckFunc( + testAccCheckIBMDNSDomainExists("ibm_dns_domain.acceptance_test_dns_domain-1", &dns_domain), + testAccCheckIBMDNSDomainAttributes(&dns_domain), + resource.TestCheckResourceAttr( + "ibm_dns_domain.acceptance_test_dns_domain-1", "name", domainName2), + resource.TestCheckResourceAttr( + "ibm_dns_domain.acceptance_test_dns_domain-1", "target", target1), + testAccCheckIBMDNSDomainChanged(&dns_domain), + ), + Destroy: false, + }, + { + Config: fmt.Sprintf(config, domainName2, target2), + Check: resource.ComposeTestCheckFunc( + testAccCheckIBMDNSDomainExists("ibm_dns_domain.acceptance_test_dns_domain-1", &dns_domain), + testAccCheckIBMDNSDomainAttributes(&dns_domain), + resource.TestCheckResourceAttr( + "ibm_dns_domain.acceptance_test_dns_domain-1", "name", domainName2), + resource.TestCheckResourceAttr( + "ibm_dns_domain.acceptance_test_dns_domain-1", "target", target2), + ), + Destroy: false, + }, + }, + }) +} + +func testAccCheckIBMDNSDomainDestroy(s *terraform.State) error { + service := services.GetDnsDomainService(testAccProvider.Meta().(ClientSession).SoftLayerSession()) + + for _, rs := range s.RootModule().Resources { + if rs.Type != "ibm_dns_domain" { + continue + } + + dnsId, _ := strconv.Atoi(rs.Primary.ID) + + // Try to find the domain + _, err := service.Id(dnsId).GetObject() + + if err == nil { + return fmt.Errorf("Dns Domain with id %d still exists", dnsId) + } + } + + return nil +} + +func testAccCheckIBMDNSDomainAttributes(dns *datatypes.Dns_Domain) resource.TestCheckFunc { + return func(s *terraform.State) error { + + if name := sl.Get(dns.Name); name == "" { + return errors.New("Empty dns domain name") + } + + // find a record with host @; that will have the current target. + foundTarget := false + for _, record := range dns.ResourceRecords { + if *record.Type == "a" && *record.Host == "@" { + foundTarget = true + break + } + } + + if !foundTarget { + return fmt.Errorf("Target record not found for dns domain %s (%d)", sl.Get(dns.Name), sl.Get(dns.Id)) + } + + if id := sl.Get(dns.Id); id == 0 { + return fmt.Errorf("Bad dns domain id: %d", id) + } + + return nil + } +} + +func saveIBMDNSDomainId(dns *datatypes.Dns_Domain, id_holder *int) resource.TestCheckFunc { + return func(s *terraform.State) error { + *id_holder = *dns.Id + + return nil + } +} + +func testAccCheckIBMDNSDomainChanged(dns *datatypes.Dns_Domain) resource.TestCheckFunc { + return func(s *terraform.State) error { + service := services.GetDnsDomainService(testAccProvider.Meta().(ClientSession).SoftLayerSession()) + + _, err := service.Id(firstDnsId).Mask( + "id,name,updateDate,resourceRecords", + ).GetObject() + if err == nil { + return fmt.Errorf("Dns domain with id %d still exists", firstDnsId) + } + + return nil + } +} + +func testAccCheckIBMDNSDomainExists(n string, dns_domain *datatypes.Dns_Domain) resource.TestCheckFunc { + return func(s *terraform.State) error { + rs, ok := s.RootModule().Resources[n] + + if !ok { + return fmt.Errorf("Not found: %s", n) + } + + if rs.Primary.ID == "" { + return errors.New("No Record ID is set") + } + + dns_id, _ := strconv.Atoi(rs.Primary.ID) + + service := services.GetDnsDomainService(testAccProvider.Meta().(ClientSession).SoftLayerSession()) + found_domain, err := service.Id(dns_id).Mask( + "id,name,updateDate,resourceRecords", + ).GetObject() + + if err != nil { + return err + } + + if strconv.Itoa(int(*found_domain.Id)) != rs.Primary.ID { + return errors.New("Record not found") + } + + *dns_domain = found_domain + + return nil + } +} + +var config = ` +resource "ibm_dns_domain" "acceptance_test_dns_domain-1" { + name = "%s" + target = "%s" +} +` + +var domainName1 = fmt.Sprintf("tfuatdomain%s.com", acctest.RandString(10)) +var domainName2 = fmt.Sprintf("tfuatdomain%s.com", acctest.RandString(10)) +var target1 = "172.16.0.100" +var target2 = "172.16.0.101" +var firstDnsId = 0 diff --git a/ibm/resource_ibm_dns_record.go b/ibm/resource_ibm_dns_record.go new file mode 100644 index 0000000000..afd2dbd58e --- /dev/null +++ b/ibm/resource_ibm_dns_record.go @@ -0,0 +1,431 @@ +package ibm + +import ( + "fmt" + "log" + "regexp" + "strconv" + "strings" + + "github.com/hashicorp/terraform/helper/schema" + "github.com/softlayer/softlayer-go/datatypes" + "github.com/softlayer/softlayer-go/services" + "github.com/softlayer/softlayer-go/sl" +) + +var allowedDomainRecordTypes = []string{ + "a", "aaaa", "cname", "mx", "ptr", "spf", "srv", "txt", +} +var ipv6Regexp *regexp.Regexp +var upcaseRegexp *regexp.Regexp + +func init() { + ipv6Regexp, _ = regexp.Compile( + "[a-zA-Z0-9]{4}:[a-zA-Z0-9]{4}:[a-zA-Z0-9]{4}:[a-zA-Z0-9]{4}:" + + "[a-zA-Z0-9]{4}:[a-zA-Z0-9]{4}:[a-zA-Z0-9]{4}:[a-zA-Z0-9]{4}", + ) + upcaseRegexp, _ = regexp.Compile("[A-Z]") +} + +func resourceIBMDNSRecord() *schema.Resource { + return &schema.Resource{ + Exists: resourceIBMDNSRecordExists, + Create: resourceIBMDNSRecordCreate, + Read: resourceIBMDNSRecordRead, + Update: resourceIBMDNSRecordUpdate, + Delete: resourceIBMDNSRecordDelete, + Importer: &schema.ResourceImporter{}, + Schema: map[string]*schema.Schema{ + "data": { + Type: schema.TypeString, + Required: true, + ValidateFunc: func(val interface{}, field string) (warnings []string, errors []error) { + value := val.(string) + if ipv6Regexp.MatchString(value) && upcaseRegexp.MatchString(value) { + errors = append( + errors, + fmt.Errorf( + "IPv6 addresses in the data property cannot have upper case letters: %s", + value, + ), + ) + } + return + }, + }, + + "domain_id": { + Type: schema.TypeInt, + Required: true, + ForceNew: true, + }, + + "expire": { + Type: schema.TypeInt, + Optional: true, + Computed: true, + }, + + "host": { + Type: schema.TypeString, + Required: true, + }, + + "mx_priority": { + Type: schema.TypeInt, + Optional: true, + Computed: true, + }, + + "refresh": { + Type: schema.TypeInt, + Optional: true, + Computed: true, + }, + + "responsible_person": { + Type: schema.TypeString, + Optional: true, + Computed: true, + }, + + "retry": { + Type: schema.TypeInt, + Optional: true, + Computed: true, + }, + + "minimum_ttl": { + Type: schema.TypeInt, + Optional: true, + Computed: true, + }, + + "ttl": { + Type: schema.TypeInt, + Required: true, + DefaultFunc: func() (interface{}, error) { + return 86400, nil + }, + }, + + "type": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: func(val interface{}, field string) (warnings []string, errors []error) { + value := val.(string) + for _, rtype := range allowedDomainRecordTypes { + if value == rtype { + return + } + } + + errors = append( + errors, + fmt.Errorf("%s is not one of the valid domain record types: %s", + value, strings.Join(allowedDomainRecordTypes, ", "), + ), + ) + return + }, + }, + + "service": { + Type: schema.TypeString, + Optional: true, + }, + + "protocol": { + Type: schema.TypeString, + Optional: true, + }, + + "port": { + Type: schema.TypeInt, + Optional: true, + }, + + "priority": { + Type: schema.TypeInt, + Optional: true, + Computed: true, + }, + + "weight": { + Type: schema.TypeInt, + Optional: true, + }, + }, + } +} + +// Creates DNS Domain Resource Record +// https://sldn.softlayer.com/reference/services/SoftLayer_Dns_Domain_ResourceRecord/createObject +func resourceIBMDNSRecordCreate(d *schema.ResourceData, meta interface{}) error { + sess := meta.(ClientSession).SoftLayerSession() + service := services.GetDnsDomainResourceRecordService(sess) + + opts := datatypes.Dns_Domain_ResourceRecord{ + Data: sl.String(d.Get("data").(string)), + DomainId: sl.Int(d.Get("domain_id").(int)), + Host: sl.String(d.Get("host").(string)), + Ttl: sl.Int(d.Get("ttl").(int)), + Type: sl.String(d.Get("type").(string)), + } + + if expire, ok := d.GetOk("expire"); ok { + opts.Expire = sl.Int(expire.(int)) + } + + if minimum, ok := d.GetOk("minimum_ttl"); ok { + opts.Minimum = sl.Int(minimum.(int)) + } + + if mxPriority, ok := d.GetOk("mx_priority"); ok { + opts.MxPriority = sl.Int(mxPriority.(int)) + } + + if refresh, ok := d.GetOk("refresh"); ok { + opts.Refresh = sl.Int(refresh.(int)) + } + + if responsiblePerson, ok := d.GetOk("responsible_person"); ok { + opts.ResponsiblePerson = sl.String(responsiblePerson.(string)) + } + + if retry, ok := d.GetOk("retry"); ok { + opts.Retry = sl.Int(retry.(int)) + } + + optsSrv := datatypes.Dns_Domain_ResourceRecord_SrvType{ + Dns_Domain_ResourceRecord: opts, + } + if *opts.Type == "srv" { + if serviceName, ok := d.GetOk("service"); ok { + optsSrv.Service = sl.String(serviceName.(string)) + } + + if protocol, ok := d.GetOk("protocol"); ok { + optsSrv.Protocol = sl.String(protocol.(string)) + } + + if priority, ok := d.GetOk("priority"); ok { + optsSrv.Priority = sl.Int(priority.(int)) + } + + if weight, ok := d.GetOk("weight"); ok { + optsSrv.Weight = sl.Int(weight.(int)) + } + + if port, ok := d.GetOk("port"); ok { + optsSrv.Port = sl.Int(port.(int)) + } + } + + log.Printf("[INFO] Creating DNS Resource %s Record for '%d' dns domain", *opts.Type, d.Get("id")) + + var err error + var id int + if *opts.Type == "srv" { + var record datatypes.Dns_Domain_ResourceRecord_SrvType + serviceSrv := services.GetDnsDomainResourceRecordSrvTypeService(sess) + record, err = serviceSrv.CreateObject(&optsSrv) + if record.Id != nil { + id = *record.Id + } + } else { + var record datatypes.Dns_Domain_ResourceRecord + record, err = service.CreateObject(&opts) + if record.Id != nil { + id = *record.Id + } + } + + if err != nil { + return fmt.Errorf("Error creating DNS Resource %s Record: %s", *opts.Type, err) + } + + d.SetId(fmt.Sprintf("%d", id)) + + log.Printf("[INFO] Dns Resource %s Record ID: %s", *opts.Type, d.Id()) + + return resourceIBMDNSRecordRead(d, meta) +} + +// Reads DNS Domain Resource Record from SL system +// https://sldn.softlayer.com/reference/services/SoftLayer_Dns_Domain_ResourceRecord/getObject +func resourceIBMDNSRecordRead(d *schema.ResourceData, meta interface{}) error { + sess := meta.(ClientSession).SoftLayerSession() + service := services.GetDnsDomainResourceRecordService(sess) + + id, err := strconv.Atoi(d.Id()) + if err != nil { + return fmt.Errorf("Not a valid ID, must be an integer: %s", err) + } + result, err := service.Id(id).GetObject() + if err != nil { + return fmt.Errorf("Error retrieving DNS Resource Record: %s", err) + } + + // Required fields + d.Set("data", *result.Data) + d.Set("domain_id", *result.DomainId) + d.Set("host", *result.Host) + d.Set("type", *result.Type) + d.Set("ttl", *result.Ttl) + + // Optional fields + d.Set("expire", sl.Get(result.Expire, nil)) + d.Set("minimum_ttl", sl.Get(result.Minimum, nil)) + d.Set("mx_priority", sl.Get(result.MxPriority, nil)) + d.Set("responsible_person", sl.Get(result.ResponsiblePerson, nil)) + d.Set("refresh", sl.Get(result.Refresh, nil)) + d.Set("retry", sl.Get(result.Retry, nil)) + + if *result.Type == "srv" { + d.Set("service", sl.Get(result.Service, nil)) + d.Set("protocol", sl.Get(result.Protocol, nil)) + d.Set("port", sl.Get(result.Port, nil)) + d.Set("priority", sl.Get(result.Priority, nil)) + d.Set("weight", sl.Get(result.Weight, nil)) + } + + return nil +} + +// Updates DNS Domain Resource Record in SL system +// https://sldn.softlayer.com/reference/services/SoftLayer_Dns_Domain_ResourceRecord/editObject +func resourceIBMDNSRecordUpdate(d *schema.ResourceData, meta interface{}) error { + sess := meta.(ClientSession).SoftLayerSession() + recordId, _ := strconv.Atoi(d.Id()) + + service := services.GetDnsDomainResourceRecordService(sess) + record, err := service.Id(recordId).GetObject() + if err != nil { + return fmt.Errorf("Error retrieving DNS Resource Record: %s", err) + } + + recordType := d.Get("type").(string) + + if data, ok := d.GetOk("data"); ok && d.HasChange("data") { + record.Data = sl.String(data.(string)) + } + + if domain_id, ok := d.GetOk("domain_id"); ok && d.HasChange("domain_id") { + record.DomainId = sl.Int(domain_id.(int)) + } + + if host, ok := d.GetOk("host"); ok && d.HasChange("host") { + record.Host = sl.String(host.(string)) + } + + if ttl, ok := d.GetOk("ttl"); ok && d.HasChange("ttl") { + record.Ttl = sl.Int(ttl.(int)) + } + + if expire, ok := d.GetOk("expire"); ok && d.HasChange("expire") { + record.Expire = sl.Int(expire.(int)) + } + + if minimum_ttl, ok := d.GetOk("minimum_ttl"); ok && d.HasChange("minimum_ttl") { + record.Minimum = sl.Int(minimum_ttl.(int)) + } + + if mx_priority, ok := d.GetOk("mx_priority"); ok && d.HasChange("mx_priority") { + record.MxPriority = sl.Int(mx_priority.(int)) + } + + if refresh, ok := d.GetOk("refresh"); ok && d.HasChange("refresh") { + record.Refresh = sl.Int(refresh.(int)) + } + + if contact_email, ok := d.GetOk("responsible_person"); ok && d.HasChange("responsible_person") { + record.ResponsiblePerson = sl.String(contact_email.(string)) + } + + if retry, ok := d.GetOk("retry"); ok && d.HasChange("retry") { + record.Retry = sl.Int(retry.(int)) + } + + recordSrv := datatypes.Dns_Domain_ResourceRecord_SrvType{ + Dns_Domain_ResourceRecord: record, + } + if recordType == "srv" { + if service, ok := d.GetOk("service"); ok && d.HasChange("service") { + recordSrv.Service = sl.String(service.(string)) + } + + if priority, ok := d.GetOk("priority"); ok && d.HasChange("priority") { + recordSrv.Priority = sl.Int(priority.(int)) + } + + if protocol, ok := d.GetOk("protocol"); ok && d.HasChange("protocol") { + recordSrv.Protocol = sl.String(protocol.(string)) + } + + if port, ok := d.GetOk("port"); ok && d.HasChange("port") { + recordSrv.Port = sl.Int(port.(int)) + } + + if weight, ok := d.GetOk("weight"); ok && d.HasChange("weight") { + recordSrv.Weight = sl.Int(weight.(int)) + } + } + + if recordType == "srv" { + _, err = services.GetDnsDomainResourceRecordSrvTypeService(sess). + Id(recordId).EditObject(&recordSrv) + } else { + _, err = service.Id(recordId).EditObject(&record) + } + + if err != nil { + return fmt.Errorf("Error editing DNS Resource %s Record %d: %s", recordType, recordId, err) + } + + return nil +} + +// Deletes DNS Domain Resource Record in SL system +// https://sldn.softlayer.com/reference/services/SoftLayer_Dns_Domain_ResourceRecord/deleteObject +func resourceIBMDNSRecordDelete(d *schema.ResourceData, meta interface{}) error { + sess := meta.(ClientSession).SoftLayerSession() + service := services.GetDnsDomainResourceRecordService(sess) + + id, err := strconv.Atoi(d.Id()) + if err != nil { + return fmt.Errorf("Not a valid ID, must be an integer: %s", err) + } + + _, err = service.Id(id).DeleteObject() + + if err != nil { + return fmt.Errorf("Error deleting DNS Resource Record: %s", err) + } + + return nil +} + +// Exists function is called by refresh +// if the entity is absent - it is deleted from the .tfstate file +func resourceIBMDNSRecordExists(d *schema.ResourceData, meta interface{}) (bool, error) { + sess := meta.(ClientSession).SoftLayerSession() + service := services.GetDnsDomainResourceRecordService(sess) + + id, err := strconv.Atoi(d.Id()) + if err != nil { + return false, fmt.Errorf("Not a valid ID, must be an integer: %s", err) + } + + record, err := service.Id(id).GetObject() + if err != nil { + if apiErr, ok := err.(sl.Error); ok { + if apiErr.StatusCode == 404 { + return false, nil + } + } + return false, fmt.Errorf("Error retrieving domain record info: %s", err) + } + return record.Id != nil && *record.Id == id, nil +} diff --git a/ibm/resource_ibm_dns_record_test.go b/ibm/resource_ibm_dns_record_test.go new file mode 100644 index 0000000000..73df0ed956 --- /dev/null +++ b/ibm/resource_ibm_dns_record_test.go @@ -0,0 +1,220 @@ +package ibm + +import ( + "errors" + "fmt" + "strconv" + "testing" + + "github.com/hashicorp/terraform/helper/acctest" + "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/terraform" + "github.com/softlayer/softlayer-go/datatypes" + "github.com/softlayer/softlayer-go/services" +) + +func TestAccIBMDNSRecord_Basic(t *testing.T) { + var dns_domain datatypes.Dns_Domain + var dns_domain_record datatypes.Dns_Domain_ResourceRecord + + domainName := fmt.Sprintf("tfuatdomainr%s.ibm.com", acctest.RandString(10)) + host1 := acctest.RandString(10) + "ibm.com" + host2 := acctest.RandString(10) + "ibm.com" + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckIBMDNSDomainDestroy, + Steps: []resource.TestStep{ + { + Config: testAccCheckIBMDNSRecordConfigBasic(domainName, host1), + Check: resource.ComposeTestCheckFunc( + testAccCheckIBMDNSDomainExists("ibm_dns_domain.test_dns_domain_records", &dns_domain), + testAccCheckIBMDNSRecordExists("ibm_dns_record.recordA", &dns_domain_record), + resource.TestCheckResourceAttr("ibm_dns_record.recordA", "data", "127.0.0.1"), + resource.TestCheckResourceAttr("ibm_dns_record.recordA", "expire", "900"), + resource.TestCheckResourceAttr("ibm_dns_record.recordA", "minimum_ttl", "90"), + resource.TestCheckResourceAttr("ibm_dns_record.recordA", "mx_priority", "1"), + resource.TestCheckResourceAttr("ibm_dns_record.recordA", "refresh", "1"), + resource.TestCheckResourceAttr("ibm_dns_record.recordA", "host", host1), + resource.TestCheckResourceAttr("ibm_dns_record.recordA", "responsible_person", "user@softlayer.com"), + resource.TestCheckResourceAttr("ibm_dns_record.recordA", "ttl", "900"), + resource.TestCheckResourceAttr("ibm_dns_record.recordA", "retry", "1"), + resource.TestCheckResourceAttr("ibm_dns_record.recordA", "type", "a"), + ), + }, + { + Config: testAccCheckIBMDNSRecordConfigBasic(domainName, host2), + Check: resource.ComposeTestCheckFunc( + testAccCheckIBMDNSDomainExists("ibm_dns_domain.test_dns_domain_records", &dns_domain), + testAccCheckIBMDNSRecordExists("ibm_dns_record.recordA", &dns_domain_record), + resource.TestCheckResourceAttr("ibm_dns_record.recordA", "host", host2), + ), + }, + }, + }) +} + +func TestAccIBMDNSRecord_Types(t *testing.T) { + var dns_domain datatypes.Dns_Domain + var dns_domain_record datatypes.Dns_Domain_ResourceRecord + + domainName := acctest.RandString(10) + "dnstest.ibm.com" + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckIBMDNSDomainDestroy, + Steps: []resource.TestStep{ + { + Config: fmt.Sprintf(testAccCheckIBMDNSRecordConfig_all_types, domainName, "_tcp"), + Check: resource.ComposeTestCheckFunc( + testAccCheckIBMDNSDomainExists("ibm_dns_domain.test_dns_domain_record_types", &dns_domain), + testAccCheckIBMDNSRecordExists("ibm_dns_record.recordA", &dns_domain_record), + testAccCheckIBMDNSRecordExists("ibm_dns_record.recordAAAA", &dns_domain_record), + testAccCheckIBMDNSRecordExists("ibm_dns_record.recordCNAME", &dns_domain_record), + testAccCheckIBMDNSRecordExists("ibm_dns_record.recordMX", &dns_domain_record), + testAccCheckIBMDNSRecordExists("ibm_dns_record.recordSPF", &dns_domain_record), + testAccCheckIBMDNSRecordExists("ibm_dns_record.recordTXT", &dns_domain_record), + testAccCheckIBMDNSRecordExists("ibm_dns_record.recordSRV", &dns_domain_record), + ), + }, + + { + Config: fmt.Sprintf(testAccCheckIBMDNSRecordConfig_all_types, domainName, "_udp"), + Check: resource.ComposeTestCheckFunc( + testAccCheckIBMDNSDomainExists("ibm_dns_domain.test_dns_domain_record_types", &dns_domain), + resource.TestCheckResourceAttr("ibm_dns_record.recordSRV", "protocol", "_udp"), + ), + }, + }, + }) +} + +func testAccCheckIBMDNSRecordExists(n string, dns_domain_record *datatypes.Dns_Domain_ResourceRecord) resource.TestCheckFunc { + return func(s *terraform.State) error { + rs, ok := s.RootModule().Resources[n] + + if !ok { + return fmt.Errorf("Not found: %s", n) + } + + if rs.Primary.ID == "" { + return errors.New("No Record ID is set") + } + + dns_id, _ := strconv.Atoi(rs.Primary.ID) + + service := services.GetDnsDomainResourceRecordService(testAccProvider.Meta().(ClientSession).SoftLayerSession()) + found_domain_record, err := service.Id(dns_id).GetObject() + + if err != nil { + return err + } + + if strconv.Itoa(int(*found_domain_record.Id)) != rs.Primary.ID { + return fmt.Errorf("Record %d not found", dns_id) + } + + *dns_domain_record = found_domain_record + + return nil + } +} + +func testAccCheckIBMDNSRecordConfigBasic(domainName, hostname string) string { + return fmt.Sprintf(` +resource "ibm_dns_domain" "test_dns_domain_records" { + name = "%s" + target = "172.16.0.100" +} + +resource "ibm_dns_record" "recordA" { + data = "127.0.0.1" + domain_id = "${ibm_dns_domain.test_dns_domain_records.id}" + expire = 900 + minimum_ttl = 90 + mx_priority = 1 + refresh = 1 + host = "%s" + responsible_person = "user@softlayer.com" + ttl = 900 + retry = 1 + type = "a" +}`, domainName, hostname) +} + +var testAccCheckIBMDNSRecordConfig_all_types = ` +resource "ibm_dns_domain" "test_dns_domain_record_types" { + name = "%s" + target = "172.16.12.100" +} + +resource "ibm_dns_record" "recordA" { + data = "127.0.0.1" + domain_id = "${ibm_dns_domain.test_dns_domain_record_types.id}" + host = "hosta.com" + responsible_person = "user@softlayer.com" + ttl = 900 + type = "a" +} + +resource "ibm_dns_record" "recordAAAA" { + data = "fe80:0000:0000:0000:0202:b3ff:fe1e:8329" + domain_id = "${ibm_dns_domain.test_dns_domain_record_types.id}" + host = "hosta-2.com" + responsible_person = "user2changed@softlayer.com" + ttl = 1000 + type = "aaaa" +} + +resource "ibm_dns_record" "recordCNAME" { + data = "testsssaaaass.com" + domain_id = "${ibm_dns_domain.test_dns_domain_record_types.id}" + host = "hosta-cname.com" + responsible_person = "user@softlayer.com" + ttl = 900 + type = "cname" +} + +resource "ibm_dns_record" "recordMX" { + data = "email.example.com" + domain_id = "${ibm_dns_domain.test_dns_domain_record_types.id}" + host = "hosta-mx.com" + responsible_person = "user@softlayer.com" + ttl = 900 + type = "mx" +} + +resource "ibm_dns_record" "recordSPF" { + data = "v=spf1 mx:mail.example.org ~all" + domain_id = "${ibm_dns_domain.test_dns_domain_record_types.id}" + host = "hosta-spf" + responsible_person = "user@softlayer.com" + ttl = 900 + type = "spf" +} + +resource "ibm_dns_record" "recordTXT" { + data = "127.0.0.1" + domain_id = "${ibm_dns_domain.test_dns_domain_record_types.id}" + host = "hosta-txt.com" + responsible_person = "user@softlayer.com" + ttl = 900 + type = "txt" +} + +resource "ibm_dns_record" "recordSRV" { + data = "ns1.example.org" + domain_id = "${ibm_dns_domain.test_dns_domain_record_types.id}" + host = "hosta-srv.com" + responsible_person = "user@softlayer.com" + ttl = 900 + type = "srv" + port = 8080 + priority = 3 + protocol = "%s" + weight = 3 + service = "_mail" +} +` diff --git a/ibm/resource_ibm_firewall.go b/ibm/resource_ibm_firewall.go new file mode 100644 index 0000000000..32b1cadc07 --- /dev/null +++ b/ibm/resource_ibm_firewall.go @@ -0,0 +1,234 @@ +package ibm + +import ( + "fmt" + "strconv" + + "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/helper/schema" + "github.com/softlayer/softlayer-go/datatypes" + "github.com/softlayer/softlayer-go/filter" + "github.com/softlayer/softlayer-go/helpers/product" + "github.com/softlayer/softlayer-go/services" + "github.com/softlayer/softlayer-go/session" + "github.com/softlayer/softlayer-go/sl" + "log" + "time" +) + +const ( + FwHardwareDedicatedPackageType = "ADDITIONAL_SERVICES_FIREWALL" + + vlanMask = "firewallNetworkComponents,networkVlanFirewall.billingItem.orderItem.order.id,dedicatedFirewallFlag" + + ",firewallGuestNetworkComponents,firewallInterfaces,firewallRules,highAvailabilityFirewallFlag" + fwMask = "id,networkVlan.highAvailabilityFirewallFlag" +) + +func resourceIBMFirewall() *schema.Resource { + return &schema.Resource{ + Create: resourceIBMFirewallCreate, + Read: resourceIBMFirewallRead, + Delete: resourceIBMFirewallDelete, + Exists: resourceIBMFirewallExists, + Importer: &schema.ResourceImporter{}, + + Schema: map[string]*schema.Schema{ + "ha_enabled": { + Type: schema.TypeBool, + Optional: true, + ForceNew: true, + Default: false, + }, + "public_vlan_id": { + Type: schema.TypeInt, + Required: true, + ForceNew: true, + }, + }, + } +} + +func resourceIBMFirewallCreate(d *schema.ResourceData, meta interface{}) error { + sess := meta.(ClientSession).SoftLayerSession() + + haEnabled := d.Get("ha_enabled").(bool) + publicVlanId := d.Get("public_vlan_id").(int) + + keyName := "HARDWARE_FIREWALL_DEDICATED" + if haEnabled { + keyName = "HARDWARE_FIREWALL_HIGH_AVAILABILITY" + } + + pkg, err := product.GetPackageByType(sess, FwHardwareDedicatedPackageType) + if err != nil { + return err + } + + // Get all prices for ADDITIONAL_SERVICES_FIREWALL with the given capacity + productItems, err := product.GetPackageProducts(sess, *pkg.Id) + if err != nil { + return err + } + + // Select only those product items with a matching keyname + targetItems := []datatypes.Product_Item{} + for _, item := range productItems { + if *item.KeyName == keyName { + targetItems = append(targetItems, item) + } + } + + if len(targetItems) == 0 { + return fmt.Errorf("No product items matching %s could be found", keyName) + } + + productOrderContainer := datatypes.Container_Product_Order_Network_Protection_Firewall_Dedicated{ + Container_Product_Order: datatypes.Container_Product_Order{ + PackageId: pkg.Id, + Prices: []datatypes.Product_Item_Price{ + { + Id: targetItems[0].Prices[0].Id, + }, + }, + Quantity: sl.Int(1), + }, + VlanId: sl.Int(publicVlanId), + } + + log.Println("[INFO] Creating dedicated hardware firewall") + + receipt, err := services.GetProductOrderService(sess). + PlaceOrder(&productOrderContainer, sl.Bool(false)) + if err != nil { + return fmt.Errorf("Error during creation of dedicated hardware firewall: %s", err) + } + vlan, err := findDedicatedFirewallByOrderId(sess, *receipt.OrderId) + if err != nil { + return fmt.Errorf("Error during creation of dedicated hardware firewall: %s", err) + } + + d.SetId(fmt.Sprintf("%d", *vlan.NetworkVlanFirewall.Id)) + d.Set("ha_enabled", *vlan.HighAvailabilityFirewallFlag) + d.Set("public_vlan_id", *vlan.Id) + + log.Printf("[INFO] Firewall ID: %s", d.Id()) + + return resourceIBMFirewallRead(d, meta) +} + +func resourceIBMFirewallRead(d *schema.ResourceData, meta interface{}) error { + sess := meta.(ClientSession).SoftLayerSession() + + fwID, _ := strconv.Atoi(d.Id()) + + fw, err := services.GetNetworkVlanFirewallService(sess). + Id(fwID). + Mask(fwMask). + GetObject() + + if err != nil { + return fmt.Errorf("Error retrieving firewall information: %s", err) + } + + d.Set("public_vlan_id", *fw.NetworkVlan.Id) + d.Set("ha_enabled", *fw.NetworkVlan.HighAvailabilityFirewallFlag) + + return nil +} + +func resourceIBMFirewallDelete(d *schema.ResourceData, meta interface{}) error { + sess := meta.(ClientSession).SoftLayerSession() + fwService := services.GetNetworkVlanFirewallService(sess) + + fwID, _ := strconv.Atoi(d.Id()) + + // Get billing item associated with the firewall + billingItem, err := fwService.Id(fwID).GetBillingItem() + + if err != nil { + return fmt.Errorf("Error while looking up billing item associated with the firewall: %s", err) + } + + if billingItem.Id == nil { + return fmt.Errorf("Error while looking up billing item associated with the firewall: No billing item for ID:%d", fwID) + } + + success, err := services.GetBillingItemService(sess).Id(*billingItem.Id).CancelService() + if err != nil { + return err + } + + if !success { + return fmt.Errorf("SoftLayer reported an unsuccessful cancellation") + } + + return nil +} + +func resourceIBMFirewallExists(d *schema.ResourceData, meta interface{}) (bool, error) { + sess := meta.(ClientSession).SoftLayerSession() + + fwID, err := strconv.Atoi(d.Id()) + if err != nil { + return false, fmt.Errorf("Not a valid ID, must be an integer: %s", err) + } + + _, err = services.GetNetworkVlanFirewallService(sess). + Id(fwID). + GetObject() + + if err != nil { + if apiErr, ok := err.(sl.Error); ok && apiErr.StatusCode == 404 { + return false, nil + } + return false, fmt.Errorf("Error retrieving firewall information: %s", err) + } + + return true, nil +} + +func findDedicatedFirewallByOrderId(sess *session.Session, orderId int) (datatypes.Network_Vlan, error) { + filterPath := "networkVlans.networkVlanFirewall.billingItem.orderItem.order.id" + + stateConf := &resource.StateChangeConf{ + Pending: []string{"pending"}, + Target: []string{"complete"}, + Refresh: func() (interface{}, string, error) { + vlans, err := services.GetAccountService(sess). + Filter(filter.Build( + filter.Path(filterPath). + Eq(strconv.Itoa(orderId)))). + Mask(vlanMask). + GetNetworkVlans() + if err != nil { + return datatypes.Network_Vlan{}, "", err + } + + if len(vlans) == 1 { + return vlans[0], "complete", nil + } else if len(vlans) == 0 { + return nil, "pending", nil + } else { + return nil, "", fmt.Errorf("Expected one dedicated firewall: %s", err) + } + }, + Timeout: 45 * time.Minute, + Delay: 10 * time.Second, + MinTimeout: 10 * time.Second, + } + + pendingResult, err := stateConf.WaitForState() + + if err != nil { + return datatypes.Network_Vlan{}, err + } + + var result, ok = pendingResult.(datatypes.Network_Vlan) + + if ok { + return result, nil + } + + return datatypes.Network_Vlan{}, + fmt.Errorf("Cannot find Dedicated Firewall with order id '%d'", orderId) +} diff --git a/ibm/resource_ibm_firewall_policy.go b/ibm/resource_ibm_firewall_policy.go new file mode 100644 index 0000000000..84818ce1e7 --- /dev/null +++ b/ibm/resource_ibm_firewall_policy.go @@ -0,0 +1,337 @@ +package ibm + +import ( + "fmt" + "net" + "strconv" + "strings" + + "log" + "time" + + "github.com/hashicorp/terraform/helper/schema" + "github.com/softlayer/softlayer-go/datatypes" + "github.com/softlayer/softlayer-go/services" + "github.com/softlayer/softlayer-go/session" + "github.com/softlayer/softlayer-go/sl" +) + +const ( + aclMask = "name,firewallInterfaces[name,firewallContextAccessControlLists]" +) + +func resourceIBMFirewallPolicy() *schema.Resource { + return &schema.Resource{ + Create: resourceIBMFirewallPolicyCreate, + Read: resourceIBMFirewallPolicyRead, + Update: resourceIBMFirewallPolicyUpdate, + Delete: resourceIBMFirewallPolicyDelete, + Exists: resourceIBMFirewallPolicyExists, + Importer: &schema.ResourceImporter{}, + + Schema: map[string]*schema.Schema{ + "firewall_id": { + Type: schema.TypeInt, + Required: true, + ForceNew: true, + }, + + "rules": { + Type: schema.TypeList, + Required: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "action": { + Type: schema.TypeString, + Required: true, + }, + "src_ip_address": { + Type: schema.TypeString, + Required: true, + DiffSuppressFunc: func(k, o, n string, d *schema.ResourceData) bool { + newSrcIpAddress := net.ParseIP(n) + return newSrcIpAddress != nil && (newSrcIpAddress.String() == net.ParseIP(o).String()) + }, + }, + "src_ip_cidr": { + Type: schema.TypeInt, + Required: true, + }, + "dst_ip_address": { + Type: schema.TypeString, + Required: true, + DiffSuppressFunc: func(k, o, n string, d *schema.ResourceData) bool { + newDstIpAddress := net.ParseIP(n) + return newDstIpAddress != nil && (newDstIpAddress.String() == net.ParseIP(o).String()) + }, + }, + "dst_ip_cidr": { + Type: schema.TypeInt, + Required: true, + }, + // ICMP, GRE, AH, and ESP don't require port ranges. + "dst_port_range_start": { + Type: schema.TypeInt, + Optional: true, + }, + "dst_port_range_end": { + Type: schema.TypeInt, + Optional: true, + }, + "protocol": { + Type: schema.TypeString, + Required: true, + }, + "notes": { + Type: schema.TypeString, + Optional: true, + }, + }, + }, + }, + }, + } +} + +func prepareRules(d *schema.ResourceData) []datatypes.Network_Firewall_Update_Request_Rule { + ruleList := d.Get("rules").([]interface{}) + rules := make([]datatypes.Network_Firewall_Update_Request_Rule, 0) + for i, ruleItem := range ruleList { + ruleMap := ruleItem.(map[string]interface{}) + var rule datatypes.Network_Firewall_Update_Request_Rule + rule.OrderValue = sl.Int(i + 1) + rule.Action = sl.String(ruleMap["action"].(string)) + rule.SourceIpAddress = sl.String(ruleMap["src_ip_address"].(string)) + rule.SourceIpCidr = sl.Int(ruleMap["src_ip_cidr"].(int)) + rule.DestinationIpAddress = sl.String(ruleMap["dst_ip_address"].(string)) + rule.DestinationIpCidr = sl.Int(ruleMap["dst_ip_cidr"].(int)) + + if ruleMap["dst_port_range_start"] != nil { + rule.DestinationPortRangeStart = sl.Int(ruleMap["dst_port_range_start"].(int)) + } + if ruleMap["dst_port_range_end"] != nil { + rule.DestinationPortRangeEnd = sl.Int(ruleMap["dst_port_range_end"].(int)) + } + + rule.Protocol = sl.String(ruleMap["protocol"].(string)) + if len(ruleMap["notes"].(string)) > 0 { + rule.Notes = sl.String(ruleMap["notes"].(string)) + } + + if strings.Contains(*rule.SourceIpAddress, ":") || strings.Contains(*rule.DestinationIpAddress, ":") { + rule.Version = sl.Int(6) + } + rules = append(rules, rule) + } + return rules +} + +func getFirewallContextAccessControlListId(fwId int, sess *session.Session) (int, error) { + service := services.GetNetworkVlanFirewallService(sess) + vlan, err := service.Id(fwId).Mask(aclMask).GetNetworkVlans() + + if err != nil { + return 0, err + } + + for _, fwInterface := range vlan[0].FirewallInterfaces { + if fwInterface.Name != nil && + *fwInterface.Name == "outside" && + len(fwInterface.FirewallContextAccessControlLists) > 0 && + fwInterface.FirewallContextAccessControlLists[0].Id != nil { + return *fwInterface.FirewallContextAccessControlLists[0].Id, nil + } + } + return 0, fmt.Errorf("No firewallContextAccessControlListId.") +} + +func resourceIBMFirewallPolicyCreate(d *schema.ResourceData, meta interface{}) error { + sess := meta.(ClientSession).SoftLayerSession() + fwId := d.Get("firewall_id").(int) + rules := prepareRules(d) + + fwContextACLId, err := getFirewallContextAccessControlListId(fwId, sess) + if err != nil { + return fmt.Errorf("Error during creation of dedicated hardware firewall rules: %s", err) + } + + ruleTemplate := datatypes.Network_Firewall_Update_Request{ + FirewallContextAccessControlListId: sl.Int(fwContextACLId), + Rules: rules, + } + + log.Println("[INFO] Creating dedicated hardware firewall rules") + + _, err = services.GetNetworkFirewallUpdateRequestService(sess).CreateObject(&ruleTemplate) + if err != nil { + return fmt.Errorf("Error during creation of dedicated hardware firewall rules: %s", err) + } + + d.SetId(strconv.Itoa(fwId)) + + log.Printf("[INFO] Firewall rules ID: %s", d.Id()) + log.Printf("[INFO] Wait one minute for applying the rules.") + time.Sleep(time.Minute) + + return resourceIBMFirewallPolicyRead(d, meta) +} + +func resourceIBMFirewallPolicyRead(d *schema.ResourceData, meta interface{}) error { + sess := meta.(ClientSession).SoftLayerSession() + + fwRulesID, _ := strconv.Atoi(d.Id()) + + fw, err := services.GetNetworkVlanFirewallService(sess). + Id(fwRulesID). + Mask("rules"). + GetObject() + + if err != nil { + return fmt.Errorf("Error retrieving firewall rules: %s", err) + } + + rules := make([]map[string]interface{}, 0, len(fw.Rules)) + for _, rule := range fw.Rules { + r := make(map[string]interface{}) + r["action"] = *rule.Action + r["src_ip_address"] = *rule.SourceIpAddress + r["src_ip_cidr"] = *rule.SourceIpCidr + r["dst_ip_address"] = *rule.DestinationIpAddress + r["dst_ip_cidr"] = *rule.DestinationIpCidr + if rule.DestinationPortRangeStart != nil { + r["dst_port_range_start"] = *rule.DestinationPortRangeStart + } + if rule.DestinationPortRangeEnd != nil { + r["dst_port_range_end"] = *rule.DestinationPortRangeEnd + } + r["protocol"] = *rule.Protocol + if len(*rule.Notes) > 0 { + r["notes"] = *rule.Notes + } + rules = append(rules, r) + } + + d.Set("firewall_id", fwRulesID) + d.Set("rules", rules) + + return nil +} + +func appendAnyOpenRule(rules []datatypes.Network_Firewall_Update_Request_Rule, protocol string) []datatypes.Network_Firewall_Update_Request_Rule { + ruleAnyOpen := datatypes.Network_Firewall_Update_Request_Rule{ + OrderValue: sl.Int(len(rules) + 1), + Action: sl.String("permit"), + SourceIpAddress: sl.String("any"), + DestinationIpAddress: sl.String("any"), + DestinationPortRangeStart: sl.Int(1), + DestinationPortRangeEnd: sl.Int(65535), + Protocol: sl.String(protocol), + Notes: sl.String("terraform-default-anyopen-" + protocol), + } + ruleAnyOpenIpv6 := datatypes.Network_Firewall_Update_Request_Rule{ + OrderValue: sl.Int(len(rules) + 1), + Action: sl.String("permit"), + SourceIpAddress: sl.String("any"), + DestinationIpAddress: sl.String("any"), + DestinationPortRangeStart: sl.Int(1), + DestinationPortRangeEnd: sl.Int(65535), + Protocol: sl.String(protocol), + Notes: sl.String("terraform-default-anyopen-" + protocol + "-ipv6"), + Version: sl.Int(6), + } + + return append(rules, ruleAnyOpen, ruleAnyOpenIpv6) +} + +func resourceIBMFirewallPolicyUpdate(d *schema.ResourceData, meta interface{}) error { + sess := meta.(ClientSession).SoftLayerSession() + fwId, err := strconv.Atoi(d.Id()) + if err != nil { + return fmt.Errorf("Not a valid firewall ID, must be an integer: %s", err) + } + rules := prepareRules(d) + + fwContextACLId, err := getFirewallContextAccessControlListId(fwId, sess) + if err != nil { + return fmt.Errorf("Error during updating of dedicated hardware firewall rules: %s", err) + } + + ruleTemplate := datatypes.Network_Firewall_Update_Request{ + FirewallContextAccessControlListId: sl.Int(fwContextACLId), + Rules: rules, + } + + log.Println("[INFO] Updating dedicated hardware firewall rules") + + _, err = services.GetNetworkFirewallUpdateRequestService(sess).CreateObject(&ruleTemplate) + if err != nil { + return fmt.Errorf("Error during updating of dedicated hardware firewall rules: %s", err) + } + time.Sleep(time.Minute) + + return resourceIBMFirewallPolicyRead(d, meta) +} + +func resourceIBMFirewallPolicyDelete(d *schema.ResourceData, meta interface{}) error { + sess := meta.(ClientSession).SoftLayerSession() + fwId, err := strconv.Atoi(d.Id()) + if err != nil { + return fmt.Errorf("Not a valid firewall ID, must be an integer: %s", err) + } + + fwContextACLId, err := getFirewallContextAccessControlListId(fwId, sess) + if err != nil { + return fmt.Errorf("Error during deleting of dedicated hardware firewall rules: %s", err) + } + + ruleTemplate := datatypes.Network_Firewall_Update_Request{ + FirewallContextAccessControlListId: sl.Int(fwContextACLId), + } + + ruleTemplate.Rules = appendAnyOpenRule(ruleTemplate.Rules, "tcp") + ruleTemplate.Rules = appendAnyOpenRule(ruleTemplate.Rules, "udp") + ruleTemplate.Rules = appendAnyOpenRule(ruleTemplate.Rules, "icmp") + ruleTemplate.Rules = appendAnyOpenRule(ruleTemplate.Rules, "gre") + ruleTemplate.Rules = appendAnyOpenRule(ruleTemplate.Rules, "pptp") + ruleTemplate.Rules = appendAnyOpenRule(ruleTemplate.Rules, "ah") + ruleTemplate.Rules = appendAnyOpenRule(ruleTemplate.Rules, "esp") + + log.Println("[INFO] Deleting dedicated hardware firewall rules") + + _, err = services.GetNetworkFirewallUpdateRequestService(sess).CreateObject(&ruleTemplate) + if err != nil { + return fmt.Errorf("Error during deleting of dedicated hardware firewall rules: %s", err) + } + time.Sleep(time.Minute) + + return nil +} + +func resourceIBMFirewallPolicyExists(d *schema.ResourceData, meta interface{}) (bool, error) { + sess := meta.(ClientSession).SoftLayerSession() + + fwRulesID, err := strconv.Atoi(d.Id()) + if err != nil { + return false, fmt.Errorf("Not a valid ID, must be an integer: %s", err) + } + + fw, err := services.GetNetworkVlanFirewallService(sess). + Id(fwRulesID). + Mask("rules"). + GetObject() + + if err != nil { + if apiErr, ok := err.(sl.Error); ok { + if apiErr.StatusCode == 404 { + return false, nil + } + } + return false, fmt.Errorf("Error retrieving firewall rules: %s", err) + } + + if len(fw.Rules) == 0 { + return false, nil + } + + return true, nil +} diff --git a/ibm/resource_ibm_firewall_policy_test.go b/ibm/resource_ibm_firewall_policy_test.go new file mode 100644 index 0000000000..b7adc4fcb1 --- /dev/null +++ b/ibm/resource_ibm_firewall_policy_test.go @@ -0,0 +1,208 @@ +package ibm + +import ( + "fmt" + "github.com/hashicorp/terraform/helper/acctest" + "github.com/hashicorp/terraform/helper/resource" + "testing" +) + +func TestAccIBMFirewallPolicy_Basic(t *testing.T) { + hostname := acctest.RandString(16) + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccCheckIBMFirewallPolicy_basic(hostname), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttr( + "ibm_firewall_policy.rules", "rules.0.action", "deny"), + resource.TestCheckResourceAttr( + "ibm_firewall_policy.rules", "rules.0.src_ip_address", "0.0.0.0"), + resource.TestCheckResourceAttr( + "ibm_firewall_policy.rules", "rules.0.dst_ip_address", "any"), + resource.TestCheckResourceAttr( + "ibm_firewall_policy.rules", "rules.0.dst_port_range_start", "1"), + resource.TestCheckResourceAttr( + "ibm_firewall_policy.rules", "rules.0.dst_port_range_end", "65535"), + resource.TestCheckResourceAttr( + "ibm_firewall_policy.rules", "rules.0.notes", "Deny all"), + resource.TestCheckResourceAttr( + "ibm_firewall_policy.rules", "rules.0.protocol", "tcp"), + + resource.TestCheckResourceAttr( + "ibm_firewall_policy.rules", "rules.1.action", "permit"), + resource.TestCheckResourceAttr( + "ibm_firewall_policy.rules", "rules.1.src_ip_address", "0.0.0.0"), + resource.TestCheckResourceAttr( + "ibm_firewall_policy.rules", "rules.1.dst_ip_address", "any"), + resource.TestCheckResourceAttr( + "ibm_firewall_policy.rules", "rules.1.dst_port_range_start", "22"), + resource.TestCheckResourceAttr( + "ibm_firewall_policy.rules", "rules.1.dst_port_range_end", "22"), + resource.TestCheckResourceAttr( + "ibm_firewall_policy.rules", "rules.1.notes", "Allow SSH"), + resource.TestCheckResourceAttr( + "ibm_firewall_policy.rules", "rules.1.protocol", "tcp"), + resource.TestCheckResourceAttr( + "ibm_firewall_policy.rules", "rules.2.action", "permit"), + resource.TestCheckResourceAttr( + "ibm_firewall_policy.rules", "rules.2.src_ip_address", + "0000:0000:0000:0000:0000:0000:0000:0000"), + resource.TestCheckResourceAttr( + "ibm_firewall_policy.rules", "rules.2.dst_ip_address", "any"), + resource.TestCheckResourceAttr( + "ibm_firewall_policy.rules", "rules.2.dst_port_range_start", "22"), + resource.TestCheckResourceAttr( + "ibm_firewall_policy.rules", "rules.2.dst_port_range_end", "22"), + resource.TestCheckResourceAttr( + "ibm_firewall_policy.rules", "rules.2.notes", "Allow SSH"), + resource.TestCheckResourceAttr( + "ibm_firewall_policy.rules", "rules.2.protocol", "tcp"), + ), + }, + resource.TestStep{ + Config: testAccCheckIBMFirewallPolicy_update(hostname), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttr( + "ibm_firewall_policy.rules", "rules.0.action", "permit"), + resource.TestCheckResourceAttr( + "ibm_firewall_policy.rules", "rules.0.src_ip_address", "10.1.1.0"), + resource.TestCheckResourceAttr( + "ibm_firewall_policy.rules", "rules.0.dst_port_range_start", "80"), + resource.TestCheckResourceAttr( + "ibm_firewall_policy.rules", "rules.0.dst_port_range_end", "80"), + resource.TestCheckResourceAttr( + "ibm_firewall_policy.rules", "rules.0.notes", "Permit from 10.1.1.0"), + resource.TestCheckResourceAttr( + "ibm_firewall_policy.rules", "rules.0.protocol", "udp"), + resource.TestCheckResourceAttr( + "ibm_firewall_policy.rules", "rules.1.action", "deny"), + resource.TestCheckResourceAttr( + "ibm_firewall_policy.rules", "rules.1.src_ip_address", "2401:c900:1501:0032:0000:0000:0000:0000"), + resource.TestCheckResourceAttr( + "ibm_firewall_policy.rules", "rules.1.dst_port_range_start", "80"), + resource.TestCheckResourceAttr( + "ibm_firewall_policy.rules", "rules.1.dst_port_range_end", "80"), + resource.TestCheckResourceAttr( + "ibm_firewall_policy.rules", "rules.1.notes", "Deny for IPv6"), + resource.TestCheckResourceAttr( + "ibm_firewall_policy.rules", "rules.1.protocol", "udp"), + ), + }, + }, + }) +} + +func testAccCheckIBMFirewallPolicy_basic(hostname string) string { + return fmt.Sprintf(` +resource "ibm_compute_vm_instance" "fwvm2" { + hostname = "%s" + domain = "terraformuat.ibm.com" + os_reference_code = "DEBIAN_7_64" + datacenter = "sjc01" + network_speed = 10 + hourly_billing = true + private_network_only = false + cores = 1 + memory = 1024 + disks = [25] + local_disk = false +} + +resource "ibm_firewall" "accfw2" { + ha_enabled = false + public_vlan_id = "${ibm_compute_vm_instance.fwvm2.public_vlan_id}" +} + +resource "ibm_firewall_policy" "rules" { + firewall_id = "${ibm_firewall.accfw2.id}" + rules = { + "action" = "deny" + "src_ip_address"= "0.0.0.0" + "src_ip_cidr"= 0 + "dst_ip_address"= "any" + "dst_ip_cidr"= 32 + "dst_port_range_start"= 1 + "dst_port_range_end"= 65535 + "notes"= "Deny all" + "protocol"= "tcp" + } + rules = { + "action" = "permit" + "src_ip_address"= "0.0.0.0" + "src_ip_cidr"= 0 + "dst_ip_address"= "any" + "dst_ip_cidr"= 32 + "dst_port_range_start"= 22 + "dst_port_range_end"= 22 + "notes"= "Allow SSH" + "protocol"= "tcp" + } + rules = { + "action" = "permit" + "src_ip_address"= "0::" + "src_ip_cidr"= 0 + "dst_ip_address"= "any" + "dst_ip_cidr"= 128 + "dst_port_range_start"= 22 + "dst_port_range_end"= 22 + "notes"= "Allow SSH" + "protocol"= "tcp" + } +} + +`, hostname) +} + +func testAccCheckIBMFirewallPolicy_update(hostname string) string { + return fmt.Sprintf(` +resource "ibm_compute_vm_instance" "fwvm2" { + hostname = "%s" + domain = "terraformuat.ibm.com" + os_reference_code = "DEBIAN_7_64" + datacenter = "sjc01" + network_speed = 10 + hourly_billing = true + private_network_only = false + cores = 1 + memory = 1024 + disks = [25] + local_disk = false +} + +resource "ibm_firewall" "accfw2" { + ha_enabled = false + public_vlan_id = "${ibm_compute_vm_instance.fwvm2.public_vlan_id}" +} + +resource "ibm_firewall_policy" "rules" { + firewall_id = "${ibm_firewall.accfw2.id}" + rules = { + "action" = "permit" + "src_ip_address"= "10.1.1.0" + "src_ip_cidr"= 24 + "dst_ip_address"= "any" + "dst_ip_cidr"= 32 + "dst_port_range_start"= 80 + "dst_port_range_end"= 80 + "notes"= "Permit from 10.1.1.0" + "protocol"= "udp" + } + rules = { + "action" = "deny" + "src_ip_address"= "2401:c900:1501:0032:0000:0000:0000:0000" + "src_ip_cidr"= 64 + "dst_ip_address"= "any" + "dst_ip_cidr"= 128 + "dst_port_range_start"= 80 + "dst_port_range_end"= 80 + "notes"= "Deny for IPv6" + "protocol"= "udp" + } + +} + +`, hostname) +} diff --git a/ibm/resource_ibm_firewall_test.go b/ibm/resource_ibm_firewall_test.go new file mode 100644 index 0000000000..d142e3cfb2 --- /dev/null +++ b/ibm/resource_ibm_firewall_test.go @@ -0,0 +1,50 @@ +package ibm + +import ( + "fmt" + "github.com/hashicorp/terraform/helper/acctest" + "github.com/hashicorp/terraform/helper/resource" + "testing" +) + +func TestAccIBMFirewall_Basic(t *testing.T) { + hostname := acctest.RandString(16) + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccCheckIBMFirewall_basic(hostname), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttr( + "ibm_firewall.accfw", "ha_enabled", "false"), + testAccCheckIBMResources("ibm_firewall.accfw", "public_vlan_id", + "ibm_compute_vm_instance.fwvm1", "public_vlan_id"), + ), + }, + }, + }) +} + +func testAccCheckIBMFirewall_basic(hostname string) string { + return fmt.Sprintf(` +resource "ibm_compute_vm_instance" "fwvm1" { + hostname = "%s" + domain = "terraformuat.ibm.com" + os_reference_code = "DEBIAN_7_64" + datacenter = "sjc01" + network_speed = 10 + hourly_billing = true + private_network_only = false + cores = 1 + memory = 1024 + disks = [25] + local_disk = false +} + +resource "ibm_firewall" "accfw" { + ha_enabled = false + public_vlan_id = "${ibm_compute_vm_instance.fwvm1.public_vlan_id}" +}`, hostname) +} diff --git a/ibm/resource_ibm_lb.go b/ibm/resource_ibm_lb.go new file mode 100644 index 0000000000..a291447939 --- /dev/null +++ b/ibm/resource_ibm_lb.go @@ -0,0 +1,395 @@ +package ibm + +import ( + "fmt" + "log" + "strconv" + "time" + + "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/helper/schema" + "github.com/softlayer/softlayer-go/datatypes" + "github.com/softlayer/softlayer-go/filter" + "github.com/softlayer/softlayer-go/helpers/location" + "github.com/softlayer/softlayer-go/helpers/product" + "github.com/softlayer/softlayer-go/services" + "github.com/softlayer/softlayer-go/session" + "github.com/softlayer/softlayer-go/sl" +) + +const ( + LB_LARGE_150000_CONNECTIONS = 150000 + LB_SMALL_15000_CONNECTIONS = 15000 + + LbLocalPackageType = "ADDITIONAL_SERVICES_LOAD_BALANCER" + + lbMask = "id,dedicatedFlag,connectionLimit,ipAddressId,securityCertificateId,highAvailabilityFlag," + + "sslEnabledFlag,loadBalancerHardware[datacenter[name]],ipAddress[ipAddress,subnetId]" +) + +func resourceIBMLb() *schema.Resource { + return &schema.Resource{ + Create: resourceIBMLbCreate, + Read: resourceIBMLbRead, + Update: resourceIBMLbUpdate, + Delete: resourceIBMLbDelete, + Exists: resourceIBMLbExists, + Importer: &schema.ResourceImporter{}, + + Schema: map[string]*schema.Schema{ + "connections": { + Type: schema.TypeInt, + Required: true, + ForceNew: true, + }, + "datacenter": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + "ha_enabled": { + Type: schema.TypeBool, + Optional: true, + ForceNew: true, + Default: false, + }, + "security_certificate_id": { + Type: schema.TypeInt, + Optional: true, + }, + "ip_address": { + Type: schema.TypeString, + Computed: true, + }, + "subnet_id": { + Type: schema.TypeInt, + Computed: true, + }, + "dedicated": { + Type: schema.TypeBool, + Optional: true, + Default: false, + ForceNew: true, + }, + "ssl_enabled": { + Type: schema.TypeBool, + Computed: true, + }, + }, + } +} + +func resourceIBMLbCreate(d *schema.ResourceData, meta interface{}) error { + sess := meta.(ClientSession).SoftLayerSession() + + connections := d.Get("connections").(int) + haEnabled := d.Get("ha_enabled").(bool) + dedicated := d.Get("dedicated").(bool) + + var categoryCode string + + // SoftLayer capacities don't match the published capacities as seen in the local lb + // ordering screen in the customer portal. Terraform exposes the published capacities. + // Create a translation map for those cases where the published capacity does not + // equal the actual actual capacity on the product_item. + capacities := map[int]float64{ + 15000: 65000.0, + 150000: 130000.0, + } + + var capacity float64 + if c, ok := capacities[connections]; !ok { + capacity = float64(connections) + } else { + capacity = c + } + + var keyFormatter string + if dedicated { + // Dedicated local LB always comes with SSL support + d.Set("ssl_enabled", true) + categoryCode = product.DedicatedLoadBalancerCategoryCode + if haEnabled { + keyFormatter = "DEDICATED_LOAD_BALANCER_WITH_HIGH_AVAILABILITY_AND_SSL_%d_CONNECTIONS" + } else { + keyFormatter = "LOAD_BALANCER_DEDICATED_WITH_SSL_OFFLOAD_%d_CONNECTIONS" + } + } else { + if d.Get("ha_enabled").(bool) { + return fmt.Errorf("High Availability is not supported for shared local load balancers") + } + categoryCode = product.ProxyLoadBalancerCategoryCode + if _, ok := d.GetOk("security_certificate_id"); ok { + d.Set("ssl_enabled", true) + keyFormatter = "LOAD_BALANCER_%d_VIP_CONNECTIONS_WITH_SSL_OFFLOAD" + } else { + d.Set("ssl_enabled", false) + keyFormatter = "LOAD_BALANCER_%d_VIP_CONNECTIONS" + } + } + + keyName := fmt.Sprintf(keyFormatter, connections) + + pkg, err := product.GetPackageByType(sess, LbLocalPackageType) + if err != nil { + return err + } + + // Get all prices for ADDITIONAL_SERVICE_LOAD_BALANCER with the given capacity + productItems, err := product.GetPackageProducts(sess, *pkg.Id) + if err != nil { + return err + } + + // Select only those product items with a matching keyname + targetItems := []datatypes.Product_Item{} + for _, item := range productItems { + if *item.KeyName == keyName { + targetItems = append(targetItems, item) + } + } + + if len(targetItems) == 0 { + return fmt.Errorf("No product items matching %s could be found", keyName) + } + + //select prices with the required capacity + prices := product.SelectProductPricesByCategory( + targetItems, + map[string]float64{ + categoryCode: capacity, + }, + ) + + // Lookup the datacenter ID + dc, err := location.GetDatacenterByName(sess, d.Get("datacenter").(string)) + + productOrderContainer := datatypes.Container_Product_Order_Network_LoadBalancer{ + Container_Product_Order: datatypes.Container_Product_Order{ + PackageId: pkg.Id, + Location: sl.String(strconv.Itoa(*dc.Id)), + Prices: prices[:1], + Quantity: sl.Int(1), + }, + } + + log.Println("[INFO] Creating load balancer") + + receipt, err := services.GetProductOrderService(sess). + PlaceOrder(&productOrderContainer, sl.Bool(false)) + if err != nil { + return fmt.Errorf("Error during creation of load balancer: %s", err) + } + + loadBalancer, err := findLoadBalancerByOrderId(sess, *receipt.OrderId, dedicated) + if err != nil { + return fmt.Errorf("Error during creation of load balancer: %s", err) + } + + d.SetId(fmt.Sprintf("%d", *loadBalancer.Id)) + d.Set("connections", getConnectionLimit(*loadBalancer.ConnectionLimit)) + d.Set("datacenter", loadBalancer.LoadBalancerHardware[0].Datacenter.Name) + d.Set("ip_address", loadBalancer.IpAddress.IpAddress) + d.Set("subnet_id", loadBalancer.IpAddress.SubnetId) + d.Set("ha_enabled", loadBalancer.HighAvailabilityFlag) + + log.Printf("[INFO] Load Balancer ID: %s", d.Id()) + + return resourceIBMLbUpdate(d, meta) +} + +func resourceIBMLbUpdate(d *schema.ResourceData, meta interface{}) error { + sess := meta.(ClientSession).SoftLayerSession() + + vipID, _ := strconv.Atoi(d.Id()) + + certID := d.Get("security_certificate_id").(int) + + err := setLocalLBSecurityCert(sess, vipID, certID) + + if err != nil { + return fmt.Errorf("Update load balancer failed: %s", err) + } + + return resourceIBMLbRead(d, meta) +} + +func resourceIBMLbRead(d *schema.ResourceData, meta interface{}) error { + sess := meta.(ClientSession).SoftLayerSession() + + vipID, _ := strconv.Atoi(d.Id()) + + vip, err := services.GetNetworkApplicationDeliveryControllerLoadBalancerVirtualIpAddressService(sess). + Id(vipID). + Mask(lbMask). + GetObject() + + if err != nil { + return fmt.Errorf("Error retrieving load balancer: %s", err) + } + + d.Set("connections", getConnectionLimit(*vip.ConnectionLimit)) + d.Set("datacenter", vip.LoadBalancerHardware[0].Datacenter.Name) + d.Set("ip_address", vip.IpAddress.IpAddress) + d.Set("subnet_id", vip.IpAddress.SubnetId) + d.Set("ha_enabled", vip.HighAvailabilityFlag) + d.Set("dedicated", vip.DedicatedFlag) + d.Set("ssl_enabled", vip.SslEnabledFlag) + + // Optional fields. Guard against nil pointer dereferences + d.Set("security_certificate_id", sl.Get(vip.SecurityCertificateId, nil)) + + return nil +} + +func resourceIBMLbDelete(d *schema.ResourceData, meta interface{}) error { + sess := meta.(ClientSession).SoftLayerSession() + vipService := services.GetNetworkApplicationDeliveryControllerLoadBalancerVirtualIpAddressService(sess) + + vipID, _ := strconv.Atoi(d.Id()) + + var billingItem datatypes.Billing_Item_Network_LoadBalancer + var err error + + // Get billing item associated with the load balancer + if d.Get("dedicated").(bool) { + billingItem, err = vipService. + Id(vipID). + GetDedicatedBillingItem() + } else { + billingItem.Billing_Item, err = vipService. + Id(vipID). + GetBillingItem() + } + + if err != nil { + return fmt.Errorf("Error while looking up billing item associated with the load balancer: %s", err) + } + + if billingItem.Id == nil { + return fmt.Errorf("Error while looking up billing item associated with the load balancer: No billing item for ID:%d", vipID) + } + success, err := services.GetBillingItemService(sess).Id(*billingItem.Id).CancelService() + if err != nil { + return err + } + + if !success { + return fmt.Errorf("SoftLayer reported an unsuccessful cancellation") + } + + return nil +} + +func resourceIBMLbExists(d *schema.ResourceData, meta interface{}) (bool, error) { + sess := meta.(ClientSession).SoftLayerSession() + + vipID, _ := strconv.Atoi(d.Id()) + + _, err := services.GetNetworkApplicationDeliveryControllerLoadBalancerVirtualIpAddressService(sess). + Id(vipID). + Mask("id"). + GetObject() + + if err != nil { + if apiErr, ok := err.(sl.Error); ok { + if apiErr.StatusCode == 404 { + return false, nil + } + } + return false, fmt.Errorf("Error communicating with the API: %s", err) + } + return true, nil +} + +/* When requesting 15000 SL creates between 15000 and 150000. When requesting 150000 SL creates >= 150000 */ +func getConnectionLimit(connectionLimit int) int { + if connectionLimit >= LB_LARGE_150000_CONNECTIONS { + return LB_LARGE_150000_CONNECTIONS + } else if connectionLimit >= LB_SMALL_15000_CONNECTIONS && + connectionLimit < LB_LARGE_150000_CONNECTIONS { + return LB_SMALL_15000_CONNECTIONS + } else { + return connectionLimit + } +} + +func findLoadBalancerByOrderId(sess *session.Session, orderId int, dedicated bool) (datatypes.Network_Application_Delivery_Controller_LoadBalancer_VirtualIpAddress, error) { + var filterPath string + if dedicated { + filterPath = "adcLoadBalancers.dedicatedBillingItem.orderItem.order.id" + } else { + filterPath = "adcLoadBalancers.billingItem.orderItem.order.id" + } + + stateConf := &resource.StateChangeConf{ + Pending: []string{"pending"}, + Target: []string{"complete"}, + Refresh: func() (interface{}, string, error) { + lbs, err := services.GetAccountService(sess). + Filter(filter.Build( + filter.Path(filterPath). + Eq(strconv.Itoa(orderId)))). + Mask(lbMask). + GetAdcLoadBalancers() + if err != nil { + return datatypes.Network_Application_Delivery_Controller_LoadBalancer_VirtualIpAddress{}, "", err + } + + if len(lbs) == 1 { + return lbs[0], "complete", nil + } else if len(lbs) == 0 { + return nil, "pending", nil + } else { + return nil, "", fmt.Errorf("Expected one load balancer: %s", err) + } + }, + Timeout: 10 * time.Minute, + Delay: 5 * time.Second, + MinTimeout: 3 * time.Second, + } + + pendingResult, err := stateConf.WaitForState() + + if err != nil { + return datatypes.Network_Application_Delivery_Controller_LoadBalancer_VirtualIpAddress{}, err + } + + var result, ok = pendingResult.(datatypes.Network_Application_Delivery_Controller_LoadBalancer_VirtualIpAddress) + + if ok { + return result, nil + } + + return datatypes.Network_Application_Delivery_Controller_LoadBalancer_VirtualIpAddress{}, + fmt.Errorf("Cannot find Application Delivery Controller Load Balancer with order id '%d'", orderId) +} + +func setLocalLBSecurityCert(sess *session.Session, vipID int, certID int) error { + var vip struct { + SecurityCertificateId *int `json:"securityCertificateId"` + } + + var success bool + + if certID == 0 { + vip.SecurityCertificateId = nil + } else { + vip.SecurityCertificateId = &certID + } + + // In order to send a null value, need to invoke DoRequest directly with a custom struct + err := sess.DoRequest( + "SoftLayer_Network_Application_Delivery_Controller_LoadBalancer_VirtualIpAddress", + "editObject", + []interface{}{&vip}, + &sl.Options{Id: &vipID}, + &success, + ) + + if !success && err == nil { + return fmt.Errorf("Unable to remove ssl security certificate from load balancer") + } + + return err +} diff --git a/ibm/resource_ibm_lb_service.go b/ibm/resource_ibm_lb_service.go new file mode 100644 index 0000000000..5c111ea305 --- /dev/null +++ b/ibm/resource_ibm_lb_service.go @@ -0,0 +1,359 @@ +package ibm + +import ( + "fmt" + "log" + "strconv" + "time" + + "strings" + + "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/helper/schema" + "github.com/softlayer/softlayer-go/datatypes" + "github.com/softlayer/softlayer-go/filter" + "github.com/softlayer/softlayer-go/services" + "github.com/softlayer/softlayer-go/session" + "github.com/softlayer/softlayer-go/sl" +) + +func resourceIBMLbService() *schema.Resource { + return &schema.Resource{ + Create: resourceIBMLbServiceCreate, + Read: resourceIBMLbServiceRead, + Update: resourceIBMLbServiceUpdate, + Delete: resourceIBMLbServiceDelete, + Exists: resourceIBMLbServiceExists, + Importer: &schema.ResourceImporter{}, + + Schema: map[string]*schema.Schema{ + "service_group_id": { + Type: schema.TypeInt, + Required: true, + ForceNew: true, + }, + "ip_address_id": { + Type: schema.TypeInt, + Required: true, + ForceNew: true, + }, + "port": { + Type: schema.TypeInt, + Required: true, + }, + "enabled": { + Type: schema.TypeBool, + Required: true, + }, + "health_check_type": { + Type: schema.TypeString, + Required: true, + }, + "weight": { + Type: schema.TypeInt, + Required: true, + }, + }, + } +} + +func resourceIBMLbServiceCreate(d *schema.ResourceData, meta interface{}) error { + sess := meta.(ClientSession).SoftLayerSession() + + // SoftLayer Local LBs consist of a multi-level hierarchy of types. + // (virtualIpAddress -> []virtualServer -> []serviceGroup -> []service) + + // Using the service group ID provided in the config, find the IDs of the + // respective virtualServer and virtualIpAddress + sgID := d.Get("service_group_id").(int) + serviceGroup, err := services.GetNetworkApplicationDeliveryControllerLoadBalancerServiceGroupService(sess). + Id(sgID). + Mask("id,routingMethodId,routingTypeId,virtualServer[id,allocation,port,virtualIpAddress[id]]"). + GetObject() + + if err != nil { + return fmt.Errorf("Error retrieving load balancer service group from SoftLayer, %s", err) + } + + // Store the IDs for later use + vsID := *serviceGroup.VirtualServer.Id + vipID := *serviceGroup.VirtualServer.VirtualIpAddress.Id + + // Convert the health check type name to an ID + healthCheckTypeId, err := getHealthCheckTypeId(sess, d.Get("health_check_type").(string)) + if err != nil { + return err + } + + // The API only exposes edit capability at the root of the tree (virtualIpAddress), + // so need to send the full structure from the root down to the node to be added or + // modified + vip := datatypes.Network_Application_Delivery_Controller_LoadBalancer_VirtualIpAddress{ + + VirtualServers: []datatypes.Network_Application_Delivery_Controller_LoadBalancer_VirtualServer{{ + Id: &vsID, + Allocation: serviceGroup.VirtualServer.Allocation, + Port: serviceGroup.VirtualServer.Port, + + ServiceGroups: []datatypes.Network_Application_Delivery_Controller_LoadBalancer_Service_Group{{ + Id: &sgID, + RoutingMethodId: serviceGroup.RoutingMethodId, + RoutingTypeId: serviceGroup.RoutingTypeId, + + Services: []datatypes.Network_Application_Delivery_Controller_LoadBalancer_Service{{ + Enabled: sl.Int(1), + Port: sl.Int(d.Get("port").(int)), + IpAddressId: sl.Int(d.Get("ip_address_id").(int)), + + HealthChecks: []datatypes.Network_Application_Delivery_Controller_LoadBalancer_Health_Check{{ + HealthCheckTypeId: &healthCheckTypeId, + }}, + + GroupReferences: []datatypes.Network_Application_Delivery_Controller_LoadBalancer_Service_Group_CrossReference{{ + Weight: sl.Int(d.Get("weight").(int)), + }}, + }}, + }}, + }}, + } + + log.Println("[INFO] Creating load balancer service") + + err = updateLoadBalancerService(sess, vipID, &vip) + + if err != nil { + return fmt.Errorf("Error creating load balancer service: %s", err) + } + + // Retrieve the newly created object, to obtain its ID + svcs, err := services.GetNetworkApplicationDeliveryControllerLoadBalancerServiceGroupService(sess). + Id(sgID). + Mask("mask[id,port,ipAddressId]"). + Filter(filter.New( + filter.Path("services.port").Eq(d.Get("port")), + filter.Path("services.ipAddressId").Eq(d.Get("ip_address_id"))).Build()). + GetServices() + + if err != nil || len(svcs) == 0 { + return fmt.Errorf("Error retrieving load balancer: %s", err) + } + + d.SetId(strconv.Itoa(*svcs[0].Id)) + + log.Printf("[INFO] Load Balancer Service ID: %s", d.Id()) + + return resourceIBMLbServiceRead(d, meta) +} + +func resourceIBMLbServiceUpdate(d *schema.ResourceData, meta interface{}) error { + sess := meta.(ClientSession).SoftLayerSession() + + // Using the ID stored in the config, find the IDs of the respective + // serviceGroup, virtualServer and virtualIpAddress + svcID, _ := strconv.Atoi(d.Id()) + svc, err := services.GetNetworkApplicationDeliveryControllerLoadBalancerServiceService(sess). + Id(svcID). + Mask("id,serviceGroup[id,routingTypeId,routingMethodId,virtualServer[id,allocation,port,virtualIpAddress[id]]]"). + GetObject() + + if err != nil { + return fmt.Errorf("Error retrieving load balancer service group from SoftLayer, %s", err) + } + + // Store the IDs for later use + sgID := *svc.ServiceGroup.Id + vsID := *svc.ServiceGroup.VirtualServer.Id + vipID := *svc.ServiceGroup.VirtualServer.VirtualIpAddress.Id + + // Convert the health check type name to an ID + healthCheckTypeId, err := getHealthCheckTypeId(sess, d.Get("health_check_type").(string)) + if err != nil { + return err + } + + // The API only exposes edit capability at the root of the tree (virtualIpAddress), + // so need to send the full structure from the root down to the node to be added or + // modified + vip := datatypes.Network_Application_Delivery_Controller_LoadBalancer_VirtualIpAddress{ + + VirtualServers: []datatypes.Network_Application_Delivery_Controller_LoadBalancer_VirtualServer{{ + Id: &vsID, + Allocation: svc.ServiceGroup.VirtualServer.Allocation, + Port: svc.ServiceGroup.VirtualServer.Port, + + ServiceGroups: []datatypes.Network_Application_Delivery_Controller_LoadBalancer_Service_Group{{ + Id: &sgID, + RoutingMethodId: svc.ServiceGroup.RoutingMethodId, + RoutingTypeId: svc.ServiceGroup.RoutingTypeId, + + Services: []datatypes.Network_Application_Delivery_Controller_LoadBalancer_Service{{ + Id: &svcID, + Enabled: sl.Int(1), + Port: sl.Int(d.Get("port").(int)), + IpAddressId: sl.Int(d.Get("ip_address_id").(int)), + + HealthChecks: []datatypes.Network_Application_Delivery_Controller_LoadBalancer_Health_Check{{ + HealthCheckTypeId: &healthCheckTypeId, + }}, + + GroupReferences: []datatypes.Network_Application_Delivery_Controller_LoadBalancer_Service_Group_CrossReference{{ + Weight: sl.Int(d.Get("weight").(int)), + }}, + }}, + }}, + }}, + } + + log.Println("[INFO] Updating load balancer service") + + err = updateLoadBalancerService(sess, vipID, &vip) + + if err != nil { + return fmt.Errorf("Error updating load balancer service: %s", err) + } + + return resourceIBMLbServiceRead(d, meta) +} + +func resourceIBMLbServiceRead(d *schema.ResourceData, meta interface{}) error { + sess := meta.(ClientSession).SoftLayerSession() + + svcID, _ := strconv.Atoi(d.Id()) + + svc, err := services.GetNetworkApplicationDeliveryControllerLoadBalancerServiceService(sess). + Id(svcID). + Mask("ipAddressId,enabled,port,healthChecks[type[keyname]],groupReferences[weight]"). + GetObject() + + if err != nil { + return fmt.Errorf("Error retrieving service: %s", err) + } + + d.Set("ip_address_id", svc.IpAddressId) + d.Set("port", svc.Port) + d.Set("health_check_type", svc.HealthChecks[0].Type.Keyname) + d.Set("weight", svc.GroupReferences[0].Weight) + d.Set("enabled", (*svc.Enabled == 1)) + + return nil +} + +func resourceIBMLbServiceDelete(d *schema.ResourceData, meta interface{}) error { + sess := meta.(ClientSession).SoftLayerSession() + + svcID, _ := strconv.Atoi(d.Id()) + + stateConf := &resource.StateChangeConf{ + Pending: []string{"pending"}, + Target: []string{"complete"}, + Refresh: func() (interface{}, string, error) { + err := services.GetNetworkApplicationDeliveryControllerLoadBalancerServiceService(sess). + Id(svcID). + DeleteObject() + + if apiErr, ok := err.(sl.Error); ok { + switch { + case apiErr.Exception == "SoftLayer_Exception_Network_Timeout" || + strings.Contains(apiErr.Message, "There was a problem saving your configuration to the load balancer.") || + strings.Contains(apiErr.Message, "The selected group could not be removed from the load balancer.") || + strings.Contains(apiErr.Message, "The resource '480' is already in use."): + // The LB is busy with another transaction. Retry + return false, "pending", nil + case apiErr.StatusCode == 404 || // 404 - service was deleted on the previous attempt + strings.Contains(apiErr.Message, "Unable to find object with id"): // xmlrpc returns 200 instead of 404 + return true, "complete", nil + default: + // Any other error is unexpected. Abort + return false, "", err + } + } + + return true, "complete", nil + }, + Timeout: 10 * time.Minute, + Delay: 5 * time.Second, + MinTimeout: 3 * time.Second, + } + + _, err := stateConf.WaitForState() + + if err != nil { + return fmt.Errorf("Error deleting service: %s", err) + } + + return nil +} + +func resourceIBMLbServiceExists(d *schema.ResourceData, meta interface{}) (bool, error) { + sess := meta.(ClientSession).SoftLayerSession() + + svcID, _ := strconv.Atoi(d.Id()) + + _, err := services.GetNetworkApplicationDeliveryControllerLoadBalancerServiceService(sess). + Id(svcID). + Mask("id"). + GetObject() + + if err != nil { + if apiErr, ok := err.(sl.Error); ok { + if apiErr.StatusCode == 404 { + return false, nil + } + } + return false, fmt.Errorf("Error communicating with the API: %s", err) + } + return true, nil +} + +func getHealthCheckTypeId(sess *session.Session, healthCheckTypeName string) (int, error) { + healthCheckTypes, err := services.GetNetworkApplicationDeliveryControllerLoadBalancerHealthCheckTypeService(sess). + Mask("id"). + Filter(filter.Build( + filter.Path("keyname").Eq(healthCheckTypeName))). + Limit(1). + GetAllObjects() + + if err != nil { + return -1, err + } + + if len(healthCheckTypes) < 1 { + return -1, fmt.Errorf("Invalid health check type: %s", healthCheckTypeName) + } + + return *healthCheckTypes[0].Id, nil +} + +func updateLoadBalancerService(sess *session.Session, vipID int, vip *datatypes.Network_Application_Delivery_Controller_LoadBalancer_VirtualIpAddress) error { + stateConf := &resource.StateChangeConf{ + Pending: []string{"pending"}, + Target: []string{"complete"}, + Refresh: func() (interface{}, string, error) { + _, err := services.GetNetworkApplicationDeliveryControllerLoadBalancerVirtualIpAddressService(sess). + Id(vipID). + EditObject(vip) + + if apiErr, ok := err.(sl.Error); ok { + // The LB is busy with another transaction. Retry + if apiErr.Exception == "SoftLayer_Exception_Network_Timeout" || + strings.Contains(apiErr.Message, "There was a problem saving your configuration to the load balancer.") || + strings.Contains(apiErr.Message, "The selected group could not be removed from the load balancer.") || + strings.Contains(apiErr.Message, "The resource '480' is already in use.") { + return false, "pending", nil + } + + // Any other error is unexpected. Abort + return false, "", err + } + + return true, "complete", nil + }, + Timeout: 10 * time.Minute, + Delay: 5 * time.Second, + MinTimeout: 3 * time.Second, + } + + _, err := stateConf.WaitForState() + + return err +} diff --git a/ibm/resource_ibm_lb_service_group.go b/ibm/resource_ibm_lb_service_group.go new file mode 100644 index 0000000000..7c729a410a --- /dev/null +++ b/ibm/resource_ibm_lb_service_group.go @@ -0,0 +1,291 @@ +package ibm + +import ( + "fmt" + "log" + + "strconv" + + "strings" + "time" + + "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/helper/schema" + "github.com/softlayer/softlayer-go/datatypes" + "github.com/softlayer/softlayer-go/filter" + "github.com/softlayer/softlayer-go/services" + "github.com/softlayer/softlayer-go/session" + "github.com/softlayer/softlayer-go/sl" +) + +func resourceIBMLbServiceGroup() *schema.Resource { + return &schema.Resource{ + Create: resourceIBMLbServiceGroupCreate, + Read: resourceIBMLbServiceGroupRead, + Update: resourceIBMLbServiceGroupUpdate, + Delete: resourceIBMLbServiceGroupDelete, + Exists: resourceIBMLbServiceGroupExists, + Importer: &schema.ResourceImporter{}, + + Schema: map[string]*schema.Schema{ + "virtual_server_id": { + Type: schema.TypeInt, + Computed: true, + }, + "service_group_id": { + Type: schema.TypeInt, + Computed: true, + }, + "load_balancer_id": { + Type: schema.TypeInt, + Required: true, + ForceNew: true, + }, + "allocation": { + Type: schema.TypeInt, + Required: true, + ForceNew: true, + }, + "port": { + Type: schema.TypeInt, + Required: true, + }, + "routing_method": { + Type: schema.TypeString, + Required: true, + }, + "routing_type": { + Type: schema.TypeString, + Required: true, + }, + }, + } +} + +func resourceIBMLbServiceGroupCreate(d *schema.ResourceData, meta interface{}) error { + sess := meta.(ClientSession).SoftLayerSession() + + vipID := d.Get("load_balancer_id").(int) + + routingMethodId, err := getRoutingMethodId(sess, d.Get("routing_method").(string)) + if err != nil { + return err + } + + routingTypeId, err := getRoutingTypeId(sess, d.Get("routing_type").(string)) + if err != nil { + return err + } + + vip := datatypes.Network_Application_Delivery_Controller_LoadBalancer_VirtualIpAddress{ + + VirtualServers: []datatypes.Network_Application_Delivery_Controller_LoadBalancer_VirtualServer{{ + Allocation: sl.Int(d.Get("allocation").(int)), + Port: sl.Int(d.Get("port").(int)), + + ServiceGroups: []datatypes.Network_Application_Delivery_Controller_LoadBalancer_Service_Group{{ + RoutingMethodId: &routingMethodId, + RoutingTypeId: &routingTypeId, + }}, + }}, + } + + log.Println("[INFO] Creating load balancer service group") + + err = updateLoadBalancerService(sess, vipID, &vip) + + if err != nil { + return fmt.Errorf("Error creating load balancer service group: %s", err) + } + + // Retrieve the newly created object, to obtain its ID + vs, err := services.GetNetworkApplicationDeliveryControllerLoadBalancerVirtualIpAddressService(sess). + Id(vipID). + Filter(filter.New(filter.Path("virtualServers.port").Eq(d.Get("port"))).Build()). + Mask("id,serviceGroups[id]"). + GetVirtualServers() + + if err != nil { + return fmt.Errorf("Error retrieving load balancer: %s", err) + } + + d.SetId(strconv.Itoa(*vs[0].Id)) + d.Set("service_group_id", vs[0].ServiceGroups[0].Id) + + log.Printf("[INFO] Load Balancer Service Group ID: %s", d.Id()) + + return resourceIBMLbServiceGroupRead(d, meta) +} + +func resourceIBMLbServiceGroupUpdate(d *schema.ResourceData, meta interface{}) error { + sess := meta.(ClientSession).SoftLayerSession() + + vipID := d.Get("load_balancer_id").(int) + vsID, _ := strconv.Atoi(d.Id()) + sgID := d.Get("service_group_id").(int) + + routingMethodId, err := getRoutingMethodId(sess, d.Get("routing_method").(string)) + if err != nil { + return err + } + + routingTypeId, err := getRoutingTypeId(sess, d.Get("routing_type").(string)) + if err != nil { + return err + } + + vip := datatypes.Network_Application_Delivery_Controller_LoadBalancer_VirtualIpAddress{ + + VirtualServers: []datatypes.Network_Application_Delivery_Controller_LoadBalancer_VirtualServer{{ + Id: &vsID, + Allocation: sl.Int(d.Get("allocation").(int)), + Port: sl.Int(d.Get("port").(int)), + + ServiceGroups: []datatypes.Network_Application_Delivery_Controller_LoadBalancer_Service_Group{{ + Id: &sgID, + RoutingMethodId: &routingMethodId, + RoutingTypeId: &routingTypeId, + }}, + }}, + } + + log.Println("[INFO] Updating load balancer service group") + + err = updateLoadBalancerService(sess, vipID, &vip) + + if err != nil { + return fmt.Errorf("Error creating load balancer service group: %s", err) + } + + return resourceIBMLbServiceGroupRead(d, meta) +} + +func resourceIBMLbServiceGroupRead(d *schema.ResourceData, meta interface{}) error { + sess := meta.(ClientSession).SoftLayerSession() + + vsID, _ := strconv.Atoi(d.Id()) + + vs, err := services.GetNetworkApplicationDeliveryControllerLoadBalancerVirtualServerService(sess). + Id(vsID). + Mask("allocation,port,serviceGroups[routingMethod[keyname],routingType[keyname]]"). + GetObject() + + if err != nil { + return fmt.Errorf("Error retrieving load balancer: %s", err) + } + + d.Set("allocation", vs.Allocation) + d.Set("port", vs.Port) + + d.Set("routing_method", vs.ServiceGroups[0].RoutingMethod.Keyname) + d.Set("routing_type", vs.ServiceGroups[0].RoutingType.Keyname) + + return nil +} + +func resourceIBMLbServiceGroupDelete(d *schema.ResourceData, meta interface{}) error { + sess := meta.(ClientSession).SoftLayerSession() + + vsID, _ := strconv.Atoi(d.Id()) + + stateConf := &resource.StateChangeConf{ + Pending: []string{"pending"}, + Target: []string{"complete"}, + Refresh: func() (interface{}, string, error) { + err := services.GetNetworkApplicationDeliveryControllerLoadBalancerVirtualServerService(sess). + Id(vsID). + DeleteObject() + + if apiErr, ok := err.(sl.Error); ok { + switch { + case apiErr.Exception == "SoftLayer_Exception_Network_Timeout" || + strings.Contains(apiErr.Message, "There was a problem saving your configuration to the load balancer.") || + strings.Contains(apiErr.Message, "The selected group could not be removed from the load balancer.") || + strings.Contains(apiErr.Message, "An error has occurred while processing your request.") || + strings.Contains(apiErr.Message, "The resource '480' is already in use."): + // The LB is busy with another transaction. Retry + return false, "pending", nil + case apiErr.StatusCode == 404: + // 404 - service was deleted on the previous attempt + return true, "complete", nil + default: + // Any other error is unexpected. Abort + return false, "", err + } + } + + return true, "complete", nil + }, + Timeout: 10 * time.Minute, + Delay: 5 * time.Second, + MinTimeout: 3 * time.Second, + } + + _, err := stateConf.WaitForState() + + if err != nil { + return fmt.Errorf("Error deleting service: %s", err) + } + + return nil +} + +func resourceIBMLbServiceGroupExists(d *schema.ResourceData, meta interface{}) (bool, error) { + sess := meta.(ClientSession).SoftLayerSession() + + vsID, _ := strconv.Atoi(d.Id()) + + _, err := services.GetNetworkApplicationDeliveryControllerLoadBalancerVirtualServerService(sess). + Id(vsID). + Mask("id"). + GetObject() + + if err != nil { + if apiErr, ok := err.(sl.Error); ok { + if apiErr.StatusCode == 404 { + return false, nil + } + } + return false, fmt.Errorf("Error communicating with the API: %s", err) + } + + return true, nil +} + +func getRoutingTypeId(sess *session.Session, routingTypeName string) (int, error) { + routingTypes, err := services.GetNetworkApplicationDeliveryControllerLoadBalancerRoutingTypeService(sess). + Mask("id"). + Filter(filter.Build( + filter.Path("keyname").Eq(routingTypeName))). + Limit(1). + GetAllObjects() + + if err != nil { + return -1, err + } + + if len(routingTypes) < 1 { + return -1, fmt.Errorf("Invalid routing type: %s", routingTypeName) + } + + return *routingTypes[0].Id, nil +} + +func getRoutingMethodId(sess *session.Session, routingMethodName string) (int, error) { + routingMethods, err := services.GetNetworkApplicationDeliveryControllerLoadBalancerRoutingMethodService(sess). + Mask("id"). + Filter(filter.Build( + filter.Path("keyname").Eq(routingMethodName))). + Limit(1). + GetAllObjects() + + if err != nil { + return -1, err + } + + if len(routingMethods) < 1 { + return -1, fmt.Errorf("Invalid routing method: %s", routingMethodName) + } + + return *routingMethods[0].Id, nil +} diff --git a/ibm/resource_ibm_lb_service_group_test.go b/ibm/resource_ibm_lb_service_group_test.go new file mode 100644 index 0000000000..8ac47b2555 --- /dev/null +++ b/ibm/resource_ibm_lb_service_group_test.go @@ -0,0 +1,60 @@ +package ibm + +import ( + "github.com/hashicorp/terraform/helper/resource" + "testing" +) + +func TestAccIBMLbServiceGroup_Basic(t *testing.T) { + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccCheckIBMLbServiceGroupConfig_basic, + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttr( + "ibm_lb_service_group.test_service_group1", "port", "82"), + resource.TestCheckResourceAttr( + "ibm_lb_service_group.test_service_group1", "routing_method", "CONSISTENT_HASH_IP"), + resource.TestCheckResourceAttr( + "ibm_lb_service_group.test_service_group1", "routing_type", "HTTP"), + resource.TestCheckResourceAttr( + "ibm_lb_service_group.test_service_group1", "allocation", "50"), + resource.TestCheckResourceAttr( + "ibm_lb_service_group.test_service_group2", "port", "83"), + resource.TestCheckResourceAttr( + "ibm_lb_service_group.test_service_group2", "routing_method", "CONSISTENT_HASH_IP"), + resource.TestCheckResourceAttr( + "ibm_lb_service_group.test_service_group2", "routing_type", "TCP"), + resource.TestCheckResourceAttr( + "ibm_lb_service_group.test_service_group2", "allocation", "50"), + ), + }, + }, + }) +} + +const testAccCheckIBMLbServiceGroupConfig_basic = ` +resource "ibm_lb" "testacc_foobar_lb" { + connections = 250 + datacenter = "tor01" + ha_enabled = false +} + +resource "ibm_lb_service_group" "test_service_group1" { + port = 82 + routing_method = "CONSISTENT_HASH_IP" + routing_type = "HTTP" + load_balancer_id = "${ibm_lb.testacc_foobar_lb.id}" + allocation = 50 +} + +resource "ibm_lb_service_group" "test_service_group2" { + port = 83 + routing_method = "CONSISTENT_HASH_IP" + routing_type = "TCP" + load_balancer_id = "${ibm_lb.testacc_foobar_lb.id}" + allocation = 50 +} +` diff --git a/ibm/resource_ibm_lb_service_test.go b/ibm/resource_ibm_lb_service_test.go new file mode 100644 index 0000000000..b1d064d19e --- /dev/null +++ b/ibm/resource_ibm_lb_service_test.go @@ -0,0 +1,74 @@ +package ibm + +import ( + "fmt" + "github.com/hashicorp/terraform/helper/acctest" + "github.com/hashicorp/terraform/helper/resource" + "testing" +) + +func TestAccBluemixIBMLbService_Basic(t *testing.T) { + hostname := acctest.RandString(16) + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccCheckBluemixIBMLbServiceConfig_basic(hostname), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttr( + "ibm_lb_service.test_service", "port", "80"), + resource.TestCheckResourceAttr( + "ibm_lb_service.test_service", "enabled", "true"), + resource.TestCheckResourceAttr( + "ibm_lb_service.test_service", "weight", "1"), + resource.TestCheckResourceAttr( + "ibm_lb_service.test_service", "health_check_type", "DNS"), + ), + }, + }, + }) +} + +func testAccCheckBluemixIBMLbServiceConfig_basic(hostname string) string { + return fmt.Sprintf(` +resource "ibm_compute_vm_instance" "test_server_1" { + hostname = "%s" + domain = "terraformuat.ibm.com" + os_reference_code = "DEBIAN_7_64" + datacenter = "dal06" + network_speed = 10 + hourly_billing = true + private_network_only = false + cores = 1 + memory = 1024 + disks = [25, 10, 20] + user_metadata = "{\"value\":\"newvalue\"}" + dedicated_acct_host_only = true + local_disk = false +} + +resource "ibm_lb" "testacc_foobar_lb" { + connections = 250 + datacenter = "dal06" + ha_enabled = false + dedicated = false +} + +resource "ibm_lb_service_group" "test_service_group" { + port = 82 + routing_method = "CONSISTENT_HASH_IP" + routing_type = "HTTP" + load_balancer_id = "${ibm_lb.testacc_foobar_lb.id}" + allocation = 100 +} + +resource "ibm_lb_service" "test_service" { + port = 80 + enabled = true + service_group_id = "${ibm_lb_service_group.test_service_group.service_group_id}" + weight = 1 + health_check_type = "DNS" + ip_address_id = "${ibm_compute_vm_instance.test_server_1.ip_address_id}" +}`, hostname) +} diff --git a/ibm/resource_ibm_lb_test.go b/ibm/resource_ibm_lb_test.go new file mode 100644 index 0000000000..400f0cc3f0 --- /dev/null +++ b/ibm/resource_ibm_lb_test.go @@ -0,0 +1,70 @@ +package ibm + +import ( + "github.com/hashicorp/terraform/helper/resource" + "testing" +) + +func TestAccIBMLbShared_Basic(t *testing.T) { + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccCheckIBMLbSharedConfig_basic, + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttr( + "ibm_lb.testacc_foobar_lb", "connections", "250"), + resource.TestCheckResourceAttr( + "ibm_lb.testacc_foobar_lb", "datacenter", "dal09"), + resource.TestCheckResourceAttr( + "ibm_lb.testacc_foobar_lb", "ha_enabled", "false"), + resource.TestCheckResourceAttr( + "ibm_lb.testacc_foobar_lb", "dedicated", "false"), + resource.TestCheckResourceAttr( + "ibm_lb.testacc_foobar_lb", "ssl_enabled", "false"), + ), + }, + }, + }) +} + +func TestAccIBMLbDedicated_Basic(t *testing.T) { + t.SkipNow() + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccCheckIBMLbDedicatedConfig_basic, + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttr( + "ibm_lb.testacc_foobar_lb", "connections", "15000"), + resource.TestCheckResourceAttr( + "ibm_lb.testacc_foobar_lb", "datacenter", "dal09"), + resource.TestCheckResourceAttr( + "ibm_lb.testacc_foobar_lb", "ha_enabled", "false"), + resource.TestCheckResourceAttr( + "ibm_lb.testacc_foobar_lb", "dedicated", "true"), + resource.TestCheckResourceAttr( + "ibm_lb.testacc_foobar_lb", "ssl_enabled", "true"), + ), + }, + }, + }) +} + +const testAccCheckIBMLbSharedConfig_basic = ` +resource "ibm_lb" "testacc_foobar_lb" { + connections = 250 + datacenter = "dal09" + ha_enabled = false +}` + +const testAccCheckIBMLbDedicatedConfig_basic = ` +resource "ibm_lb" "testacc_foobar_lb" { + connections = 15000 + datacenter = "dal09" + ha_enabled = false + dedicated = true +}` diff --git a/ibm/resource_ibm_lb_vpx.go b/ibm/resource_ibm_lb_vpx.go new file mode 100644 index 0000000000..08eadf0387 --- /dev/null +++ b/ibm/resource_ibm_lb_vpx.go @@ -0,0 +1,606 @@ +package ibm + +import ( + "fmt" + "log" + "regexp" + "strconv" + "strings" + "time" + + "errors" + + "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/helper/schema" + "github.com/softlayer/softlayer-go/datatypes" + "github.com/softlayer/softlayer-go/filter" + "github.com/softlayer/softlayer-go/helpers/location" + "github.com/softlayer/softlayer-go/helpers/product" + "github.com/softlayer/softlayer-go/services" + "github.com/softlayer/softlayer-go/session" + "github.com/softlayer/softlayer-go/sl" +) + +const ( + PACKAGE_ID_APPLICATION_DELIVERY_CONTROLLER = 192 + DELIMITER = "_" +) + +func resourceIBMLbVpx() *schema.Resource { + return &schema.Resource{ + Create: resourceIBMLbVpxCreate, + Read: resourceIBMLbVpxRead, + Delete: resourceIBMLbVpxDelete, + Exists: resourceIBMLbVpxExists, + Importer: &schema.ResourceImporter{}, + + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Computed: true, + }, + + "type": { + Type: schema.TypeString, + Computed: true, + }, + + "datacenter": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + + "speed": { + Type: schema.TypeInt, + Required: true, + ForceNew: true, + }, + + "version": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + + "plan": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + + "ip_count": { + Type: schema.TypeInt, + Required: true, + ForceNew: true, + }, + + "public_vlan_id": { + Type: schema.TypeInt, + Optional: true, + Computed: true, + ForceNew: true, + }, + + "front_end_vlan": { + Type: schema.TypeMap, + Optional: true, + Removed: "Please use 'public_vlan_id'", + ForceNew: true, + }, + + "public_subnet": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Computed: true, + }, + + "front_end_subnet": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Computed: true, + Removed: "Renamed as 'public_subnet'", + }, + + "private_vlan_id": { + Type: schema.TypeInt, + Optional: true, + Computed: true, + ForceNew: true, + }, + + "back_end_vlan": { + Type: schema.TypeMap, + Optional: true, + Removed: "Please use 'private_vlan_id'", + ForceNew: true, + }, + + "private_subnet": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Computed: true, + }, + + "back_end_subnet": { + Type: schema.TypeString, + Optional: true, + Removed: "Renamed as 'private_subnet'", + ForceNew: true, + }, + + "vip_pool": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + }, + } +} + +func getSubnetId(subnet string, meta interface{}) (int, error) { + service := services.GetAccountService(meta.(ClientSession).SoftLayerSession()) + + subnetInfo := strings.Split(subnet, "/") + if len(subnetInfo) != 2 { + return 0, fmt.Errorf( + "Unable to parse the provided subnet: %s", subnet) + } + + networkIdentifier := subnetInfo[0] + cidr := subnetInfo[1] + + subnets, err := service. + Mask("id"). + Filter( + filter.Build( + filter.Path("subnets.cidr").Eq(cidr), + filter.Path("subnets.networkIdentifier").Eq(networkIdentifier), + ), + ). + GetSubnets() + + if err != nil { + return 0, fmt.Errorf("Error looking up Subnet: %s", err) + } + + if len(subnets) < 1 { + return 0, fmt.Errorf( + "Unable to locate a subnet matching the provided subnet: %s", subnet) + } + + return *subnets[0].Id, nil +} + +func getVPXVersion(id int, sess *session.Session) (string, error) { + service := services.GetNetworkApplicationDeliveryControllerService(sess) + getObjectResult, err := service.Id(id).Mask("description").GetObject() + + if err != nil { + return "", fmt.Errorf("Error retrieving VPX version: %s", err) + } + + return strings.Split(*getObjectResult.Description, " ")[3], nil +} + +func getVPXPriceItemKeyName(version string, speed int, plan string) string { + name := "CITRIX_NETSCALER_VPX" + speedMeasurements := "MBPS" + versionReplaced := strings.Replace(version, ".", DELIMITER, -1) + speedString := strconv.Itoa(speed) + speedMeasurements + + return strings.Join([]string{name, versionReplaced, speedString, strings.ToUpper(plan)}, DELIMITER) +} + +func getPublicIpItemKeyName(ipCount int) string { + name := "STATIC_PUBLIC_IP_ADDRESSES" + ipCountString := strconv.Itoa(ipCount) + + return strings.Join([]string{ipCountString, name}, DELIMITER) +} + +func findVPXPriceItems(version string, speed int, plan string, ipCount int, meta interface{}) ([]datatypes.Product_Item_Price, error) { + sess := meta.(ClientSession).SoftLayerSession() + + // Get VPX package type. + productPackage, err := product.GetPackageByType(sess, "ADDITIONAL_SERVICES_APPLICATION_DELIVERY_APPLIANCE") + if err != nil { + return []datatypes.Product_Item_Price{}, err + } + + // Get VPX product items + items, err := product.GetPackageProducts(sess, *productPackage.Id) + if err != nil { + return []datatypes.Product_Item_Price{}, err + } + + // Get VPX and static IP items + nadcKey := getVPXPriceItemKeyName(version, speed, plan) + ipKey := getPublicIpItemKeyName(ipCount) + + var nadcItemPrice, ipItemPrice datatypes.Product_Item_Price + + for _, item := range items { + itemKey := item.KeyName + if *itemKey == nadcKey { + nadcItemPrice = item.Prices[0] + } + if *itemKey == ipKey { + ipItemPrice = item.Prices[0] + } + } + + var errorMessages []string + + if nadcItemPrice.Id == nil { + errorMessages = append(errorMessages, "VPX version, speed or plan have incorrect values") + } + + if ipItemPrice.Id == nil { + errorMessages = append(errorMessages, "IP quantity value is incorrect") + } + + if len(errorMessages) > 0 { + err = errors.New(strings.Join(errorMessages, "\n")) + return []datatypes.Product_Item_Price{}, err + } + + return []datatypes.Product_Item_Price{ + { + Id: nadcItemPrice.Id, + }, + { + Id: ipItemPrice.Id, + }, + }, nil +} + +func findVPXByOrderId(orderId int, meta interface{}) (datatypes.Network_Application_Delivery_Controller, error) { + service := services.GetAccountService(meta.(ClientSession).SoftLayerSession()) + + stateConf := &resource.StateChangeConf{ + Pending: []string{"pending"}, + Target: []string{"complete"}, + Refresh: func() (interface{}, string, error) { + vpxs, err := service. + Filter( + filter.Build( + filter.Path("applicationDeliveryControllers.billingItem.orderItem.order.id").Eq(orderId), + ), + ).GetApplicationDeliveryControllers() + if err != nil { + return datatypes.Network_Application_Delivery_Controller{}, "", err + } + + if len(vpxs) == 1 { + return vpxs[0], "complete", nil + } else if len(vpxs) == 0 { + return nil, "pending", nil + } else { + return nil, "", fmt.Errorf("Expected one VPX: %s", err) + } + }, + Timeout: 45 * time.Minute, + Delay: 10 * time.Second, + MinTimeout: 10 * time.Second, + } + + pendingResult, err := stateConf.WaitForState() + + if err != nil { + return datatypes.Network_Application_Delivery_Controller{}, err + } + + var result, ok = pendingResult.(datatypes.Network_Application_Delivery_Controller) + + if ok { + return result, nil + } + + return datatypes.Network_Application_Delivery_Controller{}, + fmt.Errorf("Cannot find Application Delivery Controller with order id '%d'", orderId) +} + +func prepareHardwareOptions(d *schema.ResourceData, meta interface{}) ([]datatypes.Hardware, error) { + hardwareOpts := make([]datatypes.Hardware, 1) + publicVlanId := d.Get("public_vlan_id").(int) + publicSubnet := d.Get("public_subnet").(string) + + if publicVlanId > 0 || len(publicSubnet) > 0 { + hardwareOpts[0].PrimaryNetworkComponent = &datatypes.Network_Component{} + } + + if publicVlanId > 0 { + hardwareOpts[0].PrimaryNetworkComponent.NetworkVlanId = &publicVlanId + } + + if len(publicSubnet) > 0 { + primarySubnetId, err := getSubnetId(publicSubnet, meta) + if err != nil { + return nil, fmt.Errorf("Error creating network application delivery controller: %s", err) + } + hardwareOpts[0].PrimaryNetworkComponent.NetworkVlan = &datatypes.Network_Vlan{ + PrimarySubnetId: &primarySubnetId, + } + } + + privateVlanId := d.Get("private_vlan_id").(int) + privateSubnet := d.Get("private_subnet").(string) + if privateVlanId > 0 || len(privateSubnet) > 0 { + hardwareOpts[0].PrimaryBackendNetworkComponent = &datatypes.Network_Component{} + } + + if privateVlanId > 0 { + hardwareOpts[0].PrimaryBackendNetworkComponent.NetworkVlanId = &privateVlanId + } + + if len(privateSubnet) > 0 { + primarySubnetId, err := getSubnetId(privateSubnet, meta) + if err != nil { + return nil, fmt.Errorf("Error creating network application delivery controller: %s", err) + } + hardwareOpts[0].PrimaryBackendNetworkComponent.NetworkVlan = &datatypes.Network_Vlan{ + PrimarySubnetId: &primarySubnetId, + } + } + return hardwareOpts, nil +} + +func resourceIBMLbVpxCreate(d *schema.ResourceData, meta interface{}) error { + sess := meta.(ClientSession).SoftLayerSession() + + productOrderService := services.GetProductOrderService(sess) + NADCService := services.GetNetworkApplicationDeliveryControllerService(sess) + var err error + + opts := datatypes.Container_Product_Order{ + PackageId: sl.Int(PACKAGE_ID_APPLICATION_DELIVERY_CONTROLLER), + Quantity: sl.Int(1), + } + + opts.Prices, err = findVPXPriceItems( + d.Get("version").(string), + d.Get("speed").(int), + d.Get("plan").(string), + d.Get("ip_count").(int), + meta) + + if err != nil { + return fmt.Errorf("Error Cannot find Application Delivery Controller prices '%s'.", err) + } + + datacenter := d.Get("datacenter").(string) + + if len(datacenter) > 0 { + datacenter, err := location.GetDatacenterByName(sess, datacenter, "id") + if err != nil { + return fmt.Errorf("Error creating network application delivery controller: %s", err) + } + opts.Location = sl.String(strconv.Itoa(*datacenter.Id)) + } + + opts.Hardware, err = prepareHardwareOptions(d, meta) + if err != nil { + return fmt.Errorf("Error Cannot get hardware options '%s'.", err) + } + + log.Println("[INFO] Creating network application delivery controller") + + receipt, err := productOrderService.PlaceOrder(&opts, sl.Bool(false)) + + if err != nil { + return fmt.Errorf("Error creating network application delivery controller: %s", err) + } + + // Wait VPX provisioning + VPX, err := findVPXByOrderId(*receipt.OrderId, meta) + + if err != nil { + return fmt.Errorf("Error creating network application delivery controller: %s", err) + } + + d.SetId(fmt.Sprintf("%d", *VPX.Id)) + + log.Printf("[INFO] Netscaler VPX ID: %s", d.Id()) + + id, err := strconv.Atoi(d.Id()) + if err != nil { + return fmt.Errorf("Not a valid ID, must be an integer: %s", err) + } + + // Wait Virtual IP provisioning + IsVipReady := false + + for vipWaitCount := 0; vipWaitCount < 270; vipWaitCount++ { + getObjectResult, err := NADCService.Id(id).Mask("subnets[ipAddresses],password[password]").GetObject() + if err != nil { + return fmt.Errorf("Error retrieving network application delivery controller: %s", err) + } + + ipCount := 0 + if getObjectResult.Password != nil && getObjectResult.Password.Password != nil && len(*getObjectResult.Password.Password) > 0 && + getObjectResult.Subnets != nil && len(getObjectResult.Subnets) > 0 && getObjectResult.Subnets[0].IpAddresses != nil { + ipCount = len(getObjectResult.Subnets[0].IpAddresses) + } + if ipCount > 0 { + IsVipReady = true + break + } + log.Printf("[INFO] Wait 10 seconds for Virtual IP provisioning on Netscaler VPX ID: %d", id) + time.Sleep(time.Second * 10) + } + + if !IsVipReady { + return fmt.Errorf("Failed to create VIPs for Netscaler VPX ID: %d", id) + } + + // Wait while VPX service is initializing. GetLoadBalancers() internally calls REST API of VPX and returns + // an error "Could not connect to host" if the REST API is not available. + IsRESTReady := false + + for restWaitCount := 0; restWaitCount < 270; restWaitCount++ { + _, err := NADCService.Id(id).GetLoadBalancers() + // GetLoadBalancers returns an error "There was a problem processing the reply from the + // application tier. Please contact development." if the VPX version is 10.5. + if err == nil || !strings.Contains(err.Error(), "Could not connect to host") { + IsRESTReady = true + break + } + log.Printf("[INFO] Wait 10 seconds for VPX(%d) REST Service ID", id) + time.Sleep(time.Second * 10) + } + + if !IsRESTReady { + return fmt.Errorf("Failed to intialize VPX REST Service for Netscaler VPX ID: %d", id) + } + + // Wait additional buffer time for VPX service. + time.Sleep(time.Minute) + + return resourceIBMLbVpxRead(d, meta) +} + +func resourceIBMLbVpxRead(d *schema.ResourceData, meta interface{}) error { + sess := meta.(ClientSession).SoftLayerSession() + + service := services.GetNetworkApplicationDeliveryControllerService(sess) + id, err := strconv.Atoi(d.Id()) + if err != nil { + return fmt.Errorf("Not a valid ID, must be an integer: %s", err) + } + + getObjectResult, err := service. + Id(id). + Mask("id,name,type[name],datacenter,networkVlans[primaryRouter],networkVlans[primarySubnets],subnets[ipAddresses],description"). + GetObject() + + if err != nil { + return fmt.Errorf("Error retrieving network application delivery controller: %s", err) + } + + d.Set("name", *getObjectResult.Name) + d.Set("type", *getObjectResult.Type.Name) + if getObjectResult.Datacenter != nil { + d.Set("datacenter", *getObjectResult.Datacenter.Name) + } + + for _, vlan := range getObjectResult.NetworkVlans { + if vlan.PrimaryRouter != nil && *vlan.PrimaryRouter.Hostname != "" { + isFcr := strings.HasPrefix(*vlan.PrimaryRouter.Hostname, "fcr") + isBcr := strings.HasPrefix(*vlan.PrimaryRouter.Hostname, "bcr") + if isFcr { + d.Set("public_vlan_id", *vlan.Id) + if vlan.PrimarySubnets != nil && len(vlan.PrimarySubnets) > 0 { + ipAddress := *vlan.PrimarySubnets[0].NetworkIdentifier + d.Set( + "public_subnet", + fmt.Sprintf("%s/%d", ipAddress, *vlan.PrimarySubnets[0].Cidr), + ) + } + } + + if isBcr { + d.Set("private_vlan_id", *vlan.Id) + if vlan.PrimarySubnets != nil && len(vlan.PrimarySubnets) > 0 { + ipAddress := *vlan.PrimarySubnets[0].NetworkIdentifier + d.Set( + "private_subnet", + fmt.Sprintf("%s/%d", ipAddress, *vlan.PrimarySubnets[0].Cidr), + ) + } + } + } + } + + vips := make([]string, 0) + ipCount := 0 + for i, subnet := range getObjectResult.Subnets { + for _, ipAddressObj := range subnet.IpAddresses { + vips = append(vips, *ipAddressObj.IpAddress) + if i == 0 { + ipCount++ + } + } + } + + d.Set("vip_pool", vips) + d.Set("ip_count", ipCount) + + description := *getObjectResult.Description + r, _ := regexp.Compile(" [0-9]+Mbps") + speedStr := r.FindString(description) + r, _ = regexp.Compile("[0-9]+") + speed, err := strconv.Atoi(r.FindString(speedStr)) + if err == nil && speed > 0 { + d.Set("speed", speed) + } + + r, _ = regexp.Compile(" VPX [0-9]+\\.[0-9]+ ") + versionStr := r.FindString(description) + r, _ = regexp.Compile("[0-9]+\\.[0-9]+") + version := r.FindString(versionStr) + if version != "" { + d.Set("version", version) + } + + r, _ = regexp.Compile(" [A-Za-z]+$") + planStr := r.FindString(description) + r, _ = regexp.Compile("[A-Za-z]+$") + plan := r.FindString(planStr) + if plan != "" { + d.Set("plan", plan) + } + + return nil +} + +func resourceIBMLbVpxDelete(d *schema.ResourceData, meta interface{}) error { + sess := meta.(ClientSession).SoftLayerSession() + service := services.GetNetworkApplicationDeliveryControllerService(sess) + + id, err := strconv.Atoi(d.Id()) + if err != nil { + return fmt.Errorf("Not a valid ID, must be an integer: %s", err) + } + + billingItem, err := service.Id(id).GetBillingItem() + if err != nil { + return fmt.Errorf("Error deleting network application delivery controller: %s", err) + } + + if *billingItem.Id > 0 { + billingItemService := services.GetBillingItemService(sess) + deleted, err := billingItemService.Id(*billingItem.Id).CancelService() + if err != nil { + return fmt.Errorf("Error deleting network application delivery controller: %s", err) + } + + if deleted { + return nil + } + } + + return nil +} + +func resourceIBMLbVpxExists(d *schema.ResourceData, meta interface{}) (bool, error) { + service := services.GetNetworkApplicationDeliveryControllerService(meta.(ClientSession).SoftLayerSession()) + + id, err := strconv.Atoi(d.Id()) + if err != nil { + return false, fmt.Errorf("Not a valid ID, must be an integer: %s", err) + } + nadc, err := service.Mask("id").Id(id).GetObject() + if err != nil { + if apiErr, ok := err.(sl.Error); ok { + if apiErr.StatusCode == 404 { + return false, nil + } + } + return false, fmt.Errorf("Error communicating with the API: %s", err) + } + return nadc.Id != nil && *nadc.Id == id, nil +} diff --git a/ibm/resource_ibm_lb_vpx_ha.go b/ibm/resource_ibm_lb_vpx_ha.go new file mode 100644 index 0000000000..70255a9ebb --- /dev/null +++ b/ibm/resource_ibm_lb_vpx_ha.go @@ -0,0 +1,346 @@ +package ibm + +import ( + "fmt" + "log" + "strconv" + "strings" + "time" + + "github.com/hashicorp/terraform/helper/schema" + "github.com/minsikl/netscaler-nitro-go/client" + dt "github.com/minsikl/netscaler-nitro-go/datatypes" + "github.com/minsikl/netscaler-nitro-go/op" +) + +func resourceIBMLbVpxHa() *schema.Resource { + return &schema.Resource{ + Create: resourceIBMLbVpxHaCreate, + Read: resourceIBMLbVpxHaRead, + Update: resourceIBMLbVpxHaUpdate, + Delete: resourceIBMLbVpxHaDelete, + Exists: resourceIBMLbVpxHaExists, + Importer: &schema.ResourceImporter{}, + Schema: map[string]*schema.Schema{ + "id": { + Type: schema.TypeString, + Computed: true, + ForceNew: true, + }, + "primary_id": { + Type: schema.TypeInt, + Required: true, + ForceNew: true, + }, + "secondary_id": { + Type: schema.TypeInt, + Required: true, + ForceNew: true, + }, + "stay_secondary": { + Type: schema.TypeBool, + Optional: true, + Computed: true, + }, + }, + } +} + +func configureHA(nClient1 *client.NitroClient, nClient2 *client.NitroClient, staySecondary bool) error { + // 1. VPX2 : Sync password + systemuserReq2 := dt.SystemuserReq{ + Systemuser: &dt.Systemuser{ + Username: op.String("root"), + Password: op.String(nClient1.Password), + }, + } + err := nClient2.Update(&systemuserReq2) + if err != nil { + return err + } + nClient2.Password = nClient1.Password + + // 2. VPX1 : Register hanode + hanodeReq1 := dt.HanodeReq{ + Hanode: &dt.Hanode{ + Id: op.String("2"), + Ipaddress: op.String(nClient2.IpAddress), + }, + } + + err = nClient1.Add(&hanodeReq1) + if err != nil { + return err + } + + // Wait 5 secs to make VPX1 a primary node. + time.Sleep(time.Second * 5) + + // 3. VPX2 : Register hanode + hanodeReq2 := dt.HanodeReq{ + Hanode: &dt.Hanode{ + Id: op.String("2"), + Ipaddress: op.String(nClient1.IpAddress), + }, + } + err = nClient2.Add(&hanodeReq2) + if err != nil { + return err + } + + // 4. VPX2 : Update STAYSECONDARY + stay := dt.HanodeReq{Hanode: &dt.Hanode{}} + if staySecondary { + stay.Hanode.Hastatus = op.String("STAYSECONDARY") + } else { + stay.Hanode.Hastatus = op.String("ENABLE") + } + err = nClient2.Update(&stay) + if err != nil { + return err + } + + // 5. VPX1 : Register rpcnode + nsrpcnode1 := dt.NsrpcnodeReq{ + Nsrpcnode: &dt.Nsrpcnode{ + Ipaddress: op.String(nClient1.IpAddress), + Password: op.String(nClient1.Password), + }, + } + err = nClient1.Update(&nsrpcnode1) + if err != nil { + return err + } + nsrpcnode1.Nsrpcnode.Ipaddress = op.String(nClient2.IpAddress) + err = nClient1.Update(&nsrpcnode1) + if err != nil { + return err + } + + // 6. VPX2 : Register rpcnode + nsrpcnode2 := dt.NsrpcnodeReq{ + Nsrpcnode: &dt.Nsrpcnode{ + Ipaddress: op.String(nClient1.IpAddress), + Password: op.String(nClient1.Password), + }, + } + err = nClient2.Update(&nsrpcnode2) + if err != nil { + return err + } + nsrpcnode2.Nsrpcnode.Ipaddress = op.String(nClient2.IpAddress) + err = nClient2.Update(&nsrpcnode2) + if err != nil { + return err + } + + // 7. VPX1 : Sync files + hafiles := dt.HafilesReq{ + Hafiles: &dt.Hafiles{ + Mode: []string{"all"}, + }, + } + err = nClient1.Add(&hafiles, "action=sync") + if err != nil { + return err + } + + return nil +} + +func deleteHA(nClient1 *client.NitroClient, nClient2 *client.NitroClient) error { + // 1. VPX2 : Delete hanode + err := nClient2.Delete(&dt.HanodeReq{}, "2") + if err != nil { + return err + } + + // 2. VPX1 : Delete hanode + err = nClient1.Delete(&dt.HanodeReq{}, "2") + if err != nil { + return err + } + return nil +} + +func parseHAId(id string) (int, int, error) { + if len(id) < 1 { + return 0, 0, fmt.Errorf("Failed to parse id : Unable to get netscaler Ids") + } + idList := strings.Split(id, ":") + if len(idList) != 2 || len(idList[0]) < 1 || len(idList[1]) < 1 { + return 0, 0, fmt.Errorf("Failed to parse id : Invalid HA ID") + } + primaryId, err := strconv.Atoi(idList[0]) + if err != nil { + return 0, 0, fmt.Errorf("Failed to parse id : Unable to get a primaryId %s", err) + } + secondaryId, err := strconv.Atoi(idList[1]) + if err != nil { + return 0, 0, fmt.Errorf("Failed to parse id : Unable to get a secondaryId %s", err) + } + return primaryId, secondaryId, nil +} + +func resourceIBMLbVpxHaCreate(d *schema.ResourceData, meta interface{}) error { + primaryId := d.Get("primary_id").(int) + secondaryId := d.Get("secondary_id").(int) + staySecondary := false + if stay, ok := d.GetOk("stay_secondary"); ok { + staySecondary = stay.(bool) + } + + nClientPrimary, err := getNitroClient(meta.(ClientSession).SoftLayerSession(), primaryId) + if err != nil { + return fmt.Errorf("Error getting primary netscaler information ID: %d", primaryId) + } + + nClientSecondary, err := getNitroClient(meta.(ClientSession).SoftLayerSession(), secondaryId) + if err != nil { + return fmt.Errorf("Error getting secondary netscaler information ID: %d", secondaryId) + } + + err = configureHA(nClientPrimary, nClientSecondary, staySecondary) + if err != nil { + return fmt.Errorf("Error configuration HA %s", err.Error()) + } + + d.SetId(fmt.Sprintf("%d:%d", primaryId, secondaryId)) + + log.Printf("[INFO] Netscaler HA ID: %s", d.Id()) + + return resourceIBMLbVpxHaRead(d, meta) +} + +func resourceIBMLbVpxHaRead(d *schema.ResourceData, meta interface{}) error { + primaryId, secondaryId, err := parseHAId(d.Id()) + if err != nil { + return fmt.Errorf("Error reading HA %s", err.Error()) + } + + nClientPrimary, err := getNitroClient(meta.(ClientSession).SoftLayerSession(), primaryId) + if err != nil { + return fmt.Errorf("Error getting primary netscaler information ID: %d", primaryId) + } + + nClientSecondary, err := getNitroClient(meta.(ClientSession).SoftLayerSession(), secondaryId) + if err != nil { + return fmt.Errorf("Error getting primary netscaler information ID: %d", primaryId) + } + + nClientSecondary.Password = nClientPrimary.Password + + res := dt.HanodeRes{} + err = nClientSecondary.Get(&res, "") + if err != nil { + fmt.Printf("Error getting hnode information : %s", err.Error()) + } + staySecondary := false + if *res.Hanode[0].Hastatus == "STAYSECONDARY" { + staySecondary = true + } + + d.Set("primary_id", primaryId) + d.Set("secondary_id", secondaryId) + d.Set("stay_secondary", staySecondary) + + return nil +} + +func resourceIBMLbVpxHaUpdate(d *schema.ResourceData, meta interface{}) error { + primaryId, secondaryId, err := parseHAId(d.Id()) + if err != nil { + return fmt.Errorf("Error deleting HA %s", err.Error()) + } + + nClientPrimary, err := getNitroClient(meta.(ClientSession).SoftLayerSession(), primaryId) + if err != nil { + return fmt.Errorf("Error getting primary netscaler information ID: %d", primaryId) + } + + nClientSecondary, err := getNitroClient(meta.(ClientSession).SoftLayerSession(), secondaryId) + if err != nil { + return fmt.Errorf("Error getting secondary netscaler information ID: %d", secondaryId) + } + + nClientSecondary.Password = nClientPrimary.Password + + staySecondary := false + if stay, ok := d.GetOk("stay_secondary"); ok { + staySecondary = stay.(bool) + } + + stay := dt.HanodeReq{Hanode: &dt.Hanode{}} + if staySecondary { + stay.Hanode.Hastatus = op.String("STAYSECONDARY") + } else { + stay.Hanode.Hastatus = op.String("ENABLE") + } + + err = nClientSecondary.Update(&stay) + if err != nil { + return err + } + + return nil +} + +func resourceIBMLbVpxHaDelete(d *schema.ResourceData, meta interface{}) error { + primaryId, secondaryId, err := parseHAId(d.Id()) + if err != nil { + return fmt.Errorf("Error deleting HA %s", err.Error()) + } + nClientPrimary, err := getNitroClient(meta.(ClientSession).SoftLayerSession(), primaryId) + if err != nil { + return fmt.Errorf("Error getting primary netscaler information ID: %d", primaryId) + } + nClientSecondary, err := getNitroClient(meta.(ClientSession).SoftLayerSession(), secondaryId) + if err != nil { + return fmt.Errorf("Error getting secondary netscaler information ID: %d", secondaryId) + } + + secondaryPassword := nClientSecondary.Password + nClientSecondary.Password = nClientPrimary.Password + err = deleteHA(nClientPrimary, nClientSecondary) + if err != nil { + return fmt.Errorf("Error deleting HA %s", err.Error()) + } + + // Restore password of the secondary VPX + systemuserReq := dt.SystemuserReq{ + Systemuser: &dt.Systemuser{ + Username: op.String("root"), + Password: op.String(secondaryPassword), + }, + } + err = nClientSecondary.Update(&systemuserReq) + if err != nil { + return err + } + + return nil +} + +func resourceIBMLbVpxHaExists(d *schema.ResourceData, meta interface{}) (bool, error) { + primaryId, _, err := parseHAId(d.Id()) + if err != nil { + return false, fmt.Errorf("Error reading HA %s", err.Error()) + } + + nClientPrimary, err := getNitroClient(meta.(ClientSession).SoftLayerSession(), primaryId) + if err != nil { + return false, fmt.Errorf("Error getting primary netscaler information ID in Exist: %d", primaryId) + } + + res := dt.HanodeRes{} + err = nClientPrimary.Get(&res, "") + if err != nil { + return false, fmt.Errorf("Error getting hnode information in Exist: %s", err.Error()) + } + + if len(res.Hanode) < 2 { + return false, nil + } + + return true, nil +} diff --git a/ibm/resource_ibm_lb_vpx_ha_test.go b/ibm/resource_ibm_lb_vpx_ha_test.go new file mode 100644 index 0000000000..c1fe7d6ca0 --- /dev/null +++ b/ibm/resource_ibm_lb_vpx_ha_test.go @@ -0,0 +1,74 @@ +package ibm + +import ( + "testing" + + "github.com/hashicorp/terraform/helper/resource" +) + +func TestAccIBMLbVpxHa_Basic(t *testing.T) { + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + Steps: []resource.TestStep{ + { + Config: testAccCheckIBMLbVpxHaConfig_basic, + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttr( + "ibm_lb_vpx_ha.test_ha", "stay_secondary", "true"), + testAccCheckIBMResources("ibm_lb_vpx_ha.test_ha", "primary_id", + "ibm_lb_vpx.test_pri", "id"), + testAccCheckIBMResources("ibm_lb_vpx_ha.test_ha", "secondary_id", + "ibm_lb_vpx.test_sec", "id"), + ), + }, + }, + }) +} + +var testAccCheckIBMLbVpxHaConfig_basic = ` + +resource "ibm_compute_vm_instance" "vm1" { + hostname = "vm1" + domain = "terraformuat.ibm.com" + os_reference_code = "DEBIAN_7_64" + datacenter = "dal06" + network_speed = 10 + hourly_billing = true + private_network_only = false + cores = 1 + memory = 1024 + disks = [25] + local_disk = false +} + +resource "ibm_lb_vpx" "test_pri" { + datacenter = "dal06" + speed = 10 + version = "10.5" + plan = "Standard" + ip_count = 2 + public_vlan_id = "${ibm_compute_vm_instance.vm1.public_vlan_id}" + private_vlan_id = "${ibm_compute_vm_instance.vm1.private_vlan_id}" + public_subnet = "${ibm_compute_vm_instance.vm1.public_subnet}" + private_subnet = "${ibm_compute_vm_instance.vm1.private_subnet}" +} + +resource "ibm_lb_vpx" "test_sec" { + datacenter = "dal06" + speed = 10 + version = "10.5" + plan = "Standard" + ip_count = 2 + public_vlan_id = "${ibm_compute_vm_instance.vm1.public_vlan_id}" + private_vlan_id = "${ibm_compute_vm_instance.vm1.private_vlan_id}" + public_subnet = "${ibm_compute_vm_instance.vm1.public_subnet}" + private_subnet = "${ibm_compute_vm_instance.vm1.private_subnet}" +} + +resource "ibm_lb_vpx_ha" "test_ha" { + primary_id = "${ibm_lb_vpx.test_pri.id}" + secondary_id = "${ibm_lb_vpx.test_sec.id}" + stay_secondary = true +} +` diff --git a/ibm/resource_ibm_lb_vpx_service.go b/ibm/resource_ibm_lb_vpx_service.go new file mode 100644 index 0000000000..a9805aa59f --- /dev/null +++ b/ibm/resource_ibm_lb_vpx_service.go @@ -0,0 +1,669 @@ +package ibm + +import ( + "errors" + "fmt" + "log" + "strconv" + "strings" + "time" + + "github.com/hashicorp/terraform/helper/schema" + dt "github.com/minsikl/netscaler-nitro-go/datatypes" + "github.com/minsikl/netscaler-nitro-go/op" + "github.com/softlayer/softlayer-go/datatypes" + "github.com/softlayer/softlayer-go/helpers/network" + "github.com/softlayer/softlayer-go/services" + "github.com/softlayer/softlayer-go/session" + "github.com/softlayer/softlayer-go/sl" +) + +var ( + // Healthcheck mapping tables + healthCheckMapFromSLtoVPX105 = map[string]string{ + "HTTP": "http", + "TCP": "tcp", + "ICMP": "ping", + "icmp": "ping", + "DNS": "dns", + } + + healthCheckMapFromVPX105toSL = map[string]string{ + "http": "HTTP", + "tcp": "TCP", + "ping": "ICMP", + "dns": "DNS", + } +) + +func resourceIBMLbVpxService() *schema.Resource { + return &schema.Resource{ + Create: resourceIBMLbVpxServiceCreate, + Read: resourceIBMLbVpxServiceRead, + Update: resourceIBMLbVpxServiceUpdate, + Delete: resourceIBMLbVpxServiceDelete, + Exists: resourceIBMLbVpxServiceExists, + Importer: &schema.ResourceImporter{}, + + Schema: map[string]*schema.Schema{ + "id": { + Type: schema.TypeString, + Computed: true, + ForceNew: true, + }, + + "vip_id": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + + "name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + + "destination_ip_address": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + + "destination_port": { + Type: schema.TypeInt, + Required: true, + ForceNew: true, + }, + + "weight": { + Type: schema.TypeInt, + Required: true, + }, + + "connection_limit": { + Type: schema.TypeInt, + Required: true, + }, + + "health_check": { + Type: schema.TypeString, + Required: true, + DiffSuppressFunc: func(k, o, n string, d *schema.ResourceData) bool { + if strings.ToUpper(o) == strings.ToUpper(n) { + return true + } + return false + }, + }, + }, + } +} + +func parseServiceId(id string) (string, int, string, error) { + parts := strings.Split(id, ":") + vipId := parts[1] + nacdId, err := strconv.Atoi(parts[0]) + if err != nil { + return "", -1, "", fmt.Errorf("Error parsing vip id: %s", err) + } + + serviceName := "" + if len(parts) > 2 { + serviceName = parts[2] + } + + return vipId, nacdId, serviceName, nil +} + +func updateVpxService(sess *session.Session, nadcId int, lbVip *datatypes.Network_LoadBalancer_VirtualIpAddress) (bool, error) { + service := services.GetNetworkApplicationDeliveryControllerService(sess) + serviceName := *lbVip.Services[0].Name + successFlag := true + var err error + for count := 0; count < 10; count++ { + successFlag, err = service.Id(nadcId).UpdateLiveLoadBalancer(lbVip) + log.Printf("[INFO] Updating LoadBalancer Service %s successFlag : %t", serviceName, successFlag) + + if err != nil && strings.Contains(err.Error(), "Operation already in progress") { + log.Printf("[INFO] Updating LoadBalancer Service %s Error : %s. Retry in 10 secs", serviceName, err.Error()) + time.Sleep(time.Second * 10) + continue + } + + break + } + return successFlag, err +} + +func resourceIBMLbVpxServiceCreate(d *schema.ResourceData, meta interface{}) error { + vipId := d.Get("vip_id").(string) + _, nadcId, _, err := parseServiceId(vipId) + + if err != nil { + return fmt.Errorf("Error parsing vip id: %s", err) + } + + version, err := getVPXVersion(nadcId, meta.(ClientSession).SoftLayerSession()) + if err != nil { + return fmt.Errorf("Error creating Virtual Ip Address: %s", err) + } + + if version == VPX_VERSION_10_1 { + return resourceIBMLbVpxServiceCreate101(d, meta) + } + + return resourceIBMLbVpxServiceCreate105(d, meta) +} + +func resourceIBMLbVpxServiceRead(d *schema.ResourceData, meta interface{}) error { + _, nadcId, _, err := parseServiceId(d.Id()) + if err != nil { + return fmt.Errorf("Error parsing vip id: %s", err) + } + + version, err := getVPXVersion(nadcId, meta.(ClientSession).SoftLayerSession()) + if err != nil { + return fmt.Errorf("Error Reading Virtual Ip Address: %s", err) + } + + if version == VPX_VERSION_10_1 { + return resourceIBMLbVpxServiceRead101(d, meta) + } + + return resourceIBMLbVpxServiceRead105(d, meta) +} + +func resourceIBMLbVpxServiceUpdate(d *schema.ResourceData, meta interface{}) error { + _, nadcId, _, err := parseServiceId(d.Id()) + if err != nil { + return fmt.Errorf("Error updating Virtual IP Address: %s", err) + } + + version, err := getVPXVersion(nadcId, meta.(ClientSession).SoftLayerSession()) + if err != nil { + return fmt.Errorf("Error updating Virtual Ip Address: %s", err) + } + + if version == VPX_VERSION_10_1 { + return resourceIBMLbVpxServiceUpdate101(d, meta) + } + + return resourceIBMLbVpxServiceUpdate105(d, meta) +} + +func resourceIBMLbVpxServiceDelete(d *schema.ResourceData, meta interface{}) error { + _, nadcId, _, err := parseServiceId(d.Id()) + if err != nil { + return fmt.Errorf("Error deleting Virtual Ip Address: %s", err) + } + + version, err := getVPXVersion(nadcId, meta.(ClientSession).SoftLayerSession()) + if err != nil { + return fmt.Errorf("Error deleting Virtual Ip Address: %s", err) + } + + if version == VPX_VERSION_10_1 { + return resourceIBMLbVpxServiceDelete101(d, meta) + } + + return resourceIBMLbVpxServiceDelete105(d, meta) +} + +func resourceIBMLbVpxServiceExists(d *schema.ResourceData, meta interface{}) (bool, error) { + _, nadcId, _, err := parseServiceId(d.Id()) + if err != nil { + return false, fmt.Errorf("Error in exists: %s", err) + } + + version, err := getVPXVersion(nadcId, meta.(ClientSession).SoftLayerSession()) + if err != nil { + return false, fmt.Errorf("Error in exists: %s", err) + } + + if version == VPX_VERSION_10_1 { + return resourceIBMLbVpxServiceExists101(d, meta) + } + + return resourceIBMLbVpxServiceExists105(d, meta) +} + +func resourceIBMLbVpxServiceCreate101(d *schema.ResourceData, meta interface{}) error { + sess := meta.(ClientSession).SoftLayerSession() + + vipId := d.Get("vip_id").(string) + vipName, nadcId, _, err := parseServiceId(vipId) + serviceName := d.Get("name").(string) + + if err != nil { + return fmt.Errorf("Error parsing vip id: %s", err) + } + + lb_services := []datatypes.Network_LoadBalancer_Service{ + { + Name: sl.String(d.Get("name").(string)), + DestinationIpAddress: sl.String(d.Get("destination_ip_address").(string)), + DestinationPort: sl.Int(d.Get("destination_port").(int)), + Weight: sl.Int(d.Get("weight").(int)), + HealthCheck: sl.String(d.Get("health_check").(string)), + ConnectionLimit: sl.Int(d.Get("connection_limit").(int)), + }, + } + + lbVip := &datatypes.Network_LoadBalancer_VirtualIpAddress{ + Name: sl.String(vipName), + Services: lb_services, + } + + // Check if there is an existed loadbalancer service which has same name. + log.Printf("[INFO] Creating LoadBalancer Service Name %s validation", serviceName) + + _, err = network.GetNadcLbVipServiceByName(sess, nadcId, vipName, serviceName) + if err == nil { + return fmt.Errorf("Error creating LoadBalancer Service: The service name '%s' is already used.", + serviceName) + } + + log.Printf("[INFO] Creating LoadBalancer Service %s", serviceName) + + successFlag, err := updateVpxService(sess, nadcId, lbVip) + + if err != nil { + return fmt.Errorf("Error creating LoadBalancer Service: %s", err) + } + + if !successFlag { + return errors.New("Error creating LoadBalancer Service") + } + + d.SetId(fmt.Sprintf("%s:%s", vipId, serviceName)) + + return resourceIBMLbVpxServiceRead(d, meta) +} + +func resourceIBMLbVpxServiceCreate105(d *schema.ResourceData, meta interface{}) error { + vipId := d.Get("vip_id").(string) + vipName, nadcId, _, err := parseServiceId(vipId) + serviceName := d.Get("name").(string) + + if err != nil { + return fmt.Errorf("Error parsing vip id: %s", err) + } + + nClient, err := getNitroClient(meta.(ClientSession).SoftLayerSession(), nadcId) + if err != nil { + return fmt.Errorf("Error getting netscaler information ID: %d", nadcId) + } + + // Create a service + svcReq := dt.ServiceReq{ + Service: &dt.Service{ + Name: op.String(d.Get("name").(string)), + Ip: op.String(d.Get("destination_ip_address").(string)), + Port: op.Int(d.Get("destination_port").(int)), + Maxclient: op.String(strconv.Itoa(d.Get("connection_limit").(int))), + }, + } + + // Get serviceType of a virtual server + vip := dt.LbvserverRes{} + err = nClient.Get(&vip, vipName) + if err != nil { + return fmt.Errorf("Error creating LoadBalancer Service : %s", err) + } + + if vip.Lbvserver[0].ServiceType != nil { + svcReq.Service.ServiceType = vip.Lbvserver[0].ServiceType + } else { + return fmt.Errorf("Error creating LoadBalancer : type of VIP '%s' is null.", vipName) + } + + // SSL offload + if *svcReq.Service.ServiceType == "SSL" { + *svcReq.Service.ServiceType = "HTTP" + } + + log.Printf("[INFO] Creating LoadBalancer Service %s", serviceName) + + // Add the service + err = nClient.Add(&svcReq) + if err != nil { + return fmt.Errorf("Error creating LoadBalancer Service: %s", err) + } + + // Bind the virtual server and the service + lbvserverServiceBindingReq := dt.LbvserverServiceBindingReq{ + LbvserverServiceBinding: &dt.LbvserverServiceBinding{ + Name: op.String(vipName), + ServiceName: op.String(serviceName), + }, + } + + err = nClient.Add(&lbvserverServiceBindingReq) + if err != nil { + return fmt.Errorf("Error creating LoadBalancer Service: %s", err) + } + + // Bind Health_check monitor + healthCheck := d.Get("health_check").(string) + if len(healthCheckMapFromSLtoVPX105[healthCheck]) > 0 { + healthCheck = healthCheckMapFromSLtoVPX105[healthCheck] + } + + serviceLbmonitorBindingReq := dt.ServiceLbmonitorBindingReq{ + ServiceLbmonitorBinding: &dt.ServiceLbmonitorBinding{ + Name: op.String(serviceName), + MonitorName: op.String(healthCheck), + }, + } + + err = nClient.Add(&serviceLbmonitorBindingReq) + if err != nil { + return fmt.Errorf("Error creating LoadBalancer Service: %s", err) + } + + d.SetId(fmt.Sprintf("%s:%s", vipId, serviceName)) + + return resourceIBMLbVpxServiceRead(d, meta) +} + +func resourceIBMLbVpxServiceRead101(d *schema.ResourceData, meta interface{}) error { + sess := meta.(ClientSession).SoftLayerSession() + + vipName, nadcId, serviceName, err := parseServiceId(d.Id()) + if err != nil { + return fmt.Errorf("Error parsing vip id: %s", err) + } + + lbService, err := network.GetNadcLbVipServiceByName(sess, nadcId, vipName, serviceName) + if err != nil { + return fmt.Errorf("Unable to get load balancer service %s: %s", serviceName, err) + } + + d.Set("vip_id", strconv.Itoa(nadcId)+":"+vipName) + d.Set("name", *lbService.Name) + d.Set("destination_ip_address", *lbService.DestinationIpAddress) + d.Set("destination_port", *lbService.DestinationPort) + d.Set("weight", *lbService.Weight) + d.Set("health_check", *lbService.HealthCheck) + d.Set("connection_limit", *lbService.ConnectionLimit) + + return nil +} + +func resourceIBMLbVpxServiceRead105(d *schema.ResourceData, meta interface{}) error { + vipName, nadcId, serviceName, err := parseServiceId(d.Id()) + if err != nil { + return fmt.Errorf("Error parsing vip id: %s", err) + } + + nClient, err := getNitroClient(meta.(ClientSession).SoftLayerSession(), nadcId) + if err != nil { + return fmt.Errorf("Error getting netscaler information ID: %d", nadcId) + } + + // Read a service + + svc := dt.ServiceRes{} + err = nClient.Get(&svc, serviceName) + if err != nil { + fmt.Printf("Error getting service information : %s", err.Error()) + } + d.Set("vip_id", strconv.Itoa(nadcId)+":"+vipName) + d.Set("name", *svc.Service[0].Name) + d.Set("destination_ip_address", *svc.Service[0].Ipaddress) + d.Set("destination_port", *svc.Service[0].Port) + + maxClientStr, err := strconv.Atoi(*svc.Service[0].Maxclient) + if err == nil { + d.Set("connection_limit", maxClientStr) + } + + // Read a monitor information + healthCheck := dt.ServiceLbmonitorBindingRes{} + err = nClient.Get(&healthCheck, serviceName) + if err != nil { + fmt.Printf("Error getting service information : %s", err.Error()) + } + if healthCheck.ServiceLbmonitorBinding[0].MonitorName != nil { + healthCheck := *healthCheck.ServiceLbmonitorBinding[0].MonitorName + if len(healthCheckMapFromVPX105toSL[healthCheck]) > 0 { + healthCheck = healthCheckMapFromVPX105toSL[healthCheck] + } + d.Set("health_check", healthCheck) + } + + return nil +} + +func resourceIBMLbVpxServiceUpdate101(d *schema.ResourceData, meta interface{}) error { + sess := meta.(ClientSession).SoftLayerSession() + + vipName, nadcId, serviceName, err := parseServiceId(d.Id()) + if err != nil { + return fmt.Errorf("Error parsing vip id: %s", err) + } + + lbService, err := network.GetNadcLbVipServiceByName(sess, nadcId, vipName, serviceName) + if err != nil { + return fmt.Errorf("Unable to get load balancer service: %s", err) + } + + // copy current service + template := datatypes.Network_LoadBalancer_Service(*lbService) + + if data, ok := d.GetOk("name"); ok { + template.Name = sl.String(data.(string)) + } + if data, ok := d.GetOk("destination_ip_address"); ok { + template.DestinationIpAddress = sl.String(data.(string)) + } + if data, ok := d.GetOk("destination_port"); ok { + template.DestinationPort = sl.Int(data.(int)) + } + if data, ok := d.GetOk("weight"); ok { + template.Weight = sl.Int(data.(int)) + } + if data, ok := d.GetOk("health_check"); ok { + template.HealthCheck = sl.String(data.(string)) + } + if data, ok := d.GetOk("connection_limit"); ok { + template.ConnectionLimit = sl.Int(data.(int)) + } + + lbVip := &datatypes.Network_LoadBalancer_VirtualIpAddress{ + Name: sl.String(vipName), + Services: []datatypes.Network_LoadBalancer_Service{ + template}, + } + + successFlag, err := updateVpxService(sess, nadcId, lbVip) + + if err != nil { + return fmt.Errorf("Error updating LoadBalancer Service: %s", err) + } + + if !successFlag { + return errors.New("Error updating LoadBalancer Service") + } + + return nil +} + +func resourceIBMLbVpxServiceUpdate105(d *schema.ResourceData, meta interface{}) error { + _, nadcId, serviceName, err := parseServiceId(d.Id()) + if err != nil { + return fmt.Errorf("Error parsing vip id: %s", err) + } + + nClient, err := getNitroClient(meta.(ClientSession).SoftLayerSession(), nadcId) + if err != nil { + return fmt.Errorf("Error getting netscaler information ID: %d", nadcId) + } + + // Update a service + svcReq := dt.ServiceReq{ + Service: &dt.Service{ + Name: op.String(d.Get("name").(string)), + }, + } + + updateFlag := false + + if d.HasChange("health_check") { + healthCheck := dt.ServiceLbmonitorBindingRes{} + err = nClient.Get(&healthCheck, serviceName) + if err != nil { + fmt.Printf("Error getting service information : %s", err.Error()) + } + monitorName := healthCheck.ServiceLbmonitorBinding[0].MonitorName + if monitorName != nil && *monitorName != "tcp-default" { + // Delete previous health_check + err = nClient.Delete(&dt.ServiceLbmonitorBindingReq{}, serviceName, "args=monitor_name:"+*monitorName) + if err != nil { + return fmt.Errorf("Error deleting monitor %s: %s", *monitorName, err) + } + } + + // Add a new health_check + monitor := d.Get("health_check").(string) + if len(healthCheckMapFromSLtoVPX105[monitor]) > 0 { + monitor = healthCheckMapFromSLtoVPX105[monitor] + } + + serviceLbmonitorBindingReq := dt.ServiceLbmonitorBindingReq{ + ServiceLbmonitorBinding: &dt.ServiceLbmonitorBinding{ + Name: op.String(serviceName), + MonitorName: op.String(monitor), + }, + } + + err = nClient.Add(&serviceLbmonitorBindingReq) + if err != nil { + return fmt.Errorf("Error adding a monitor: %s", err) + } + } + + if d.HasChange("connection_limit") { + svcReq.Service.Maxclient = op.String(strconv.Itoa(d.Get("connection_limit").(int))) + updateFlag = true + } + + log.Printf("[INFO] Updating LoadBalancer Service %s", serviceName) + + if updateFlag { + err = nClient.Update(&svcReq) + } + + if err != nil { + return fmt.Errorf("Error updating LoadBalancer Service: %s", err) + } + + return nil +} + +func resourceIBMLbVpxServiceDelete101(d *schema.ResourceData, meta interface{}) error { + vipName, nadcId, serviceName, err := parseServiceId(d.Id()) + if err != nil { + return fmt.Errorf("Error parsing vip id: %s", err) + } + + sess := meta.(ClientSession).SoftLayerSession() + service := services.GetNetworkApplicationDeliveryControllerService(sess) + + lbSvc := datatypes.Network_LoadBalancer_Service{ + Name: sl.String(serviceName), + Vip: &datatypes.Network_LoadBalancer_VirtualIpAddress{ + Name: sl.String(vipName), + }, + } + + for count := 0; count < 10; count++ { + err = service.Id(nadcId).DeleteLiveLoadBalancerService(&lbSvc) + log.Printf("[INFO] Deleting Loadbalancer service %s", serviceName) + + if err != nil && + (strings.Contains(err.Error(), "Operation already in progress") || + strings.Contains(err.Error(), "Internal Error")) { + log.Printf("[INFO] Deleting Loadbalancer service Error : %s. Retry in 10 secs", err.Error()) + time.Sleep(time.Second * 10) + continue + } + + if err != nil && + (strings.Contains(err.Error(), "No Service") || + strings.Contains(err.Error(), "Unable to find object with unknown identifier of")) { + log.Printf("[INFO] Deleting Loadbalancer service %s Error : %s ", serviceName, err.Error()) + err = nil + } + + break + } + + if err != nil { + return fmt.Errorf("Error deleting LoadBalancer Service %s: %s", serviceName, err) + } + + return nil +} + +func resourceIBMLbVpxServiceDelete105(d *schema.ResourceData, meta interface{}) error { + _, nadcId, serviceName, err := parseServiceId(d.Id()) + if err != nil { + return fmt.Errorf("Error parsing vip id: %s", err) + } + + nClient, err := getNitroClient(meta.(ClientSession).SoftLayerSession(), nadcId) + if err != nil { + return fmt.Errorf("Error getting netscaler information ID: %d", nadcId) + } + + // Delete a service + err = nClient.Delete(&dt.ServiceReq{}, serviceName) + if err != nil { + return fmt.Errorf("Error deleting service %s: %s", serviceName, err) + } + + return nil +} + +func resourceIBMLbVpxServiceExists101(d *schema.ResourceData, meta interface{}) (bool, error) { + vipName, nadcId, serviceName, err := parseServiceId(d.Id()) + if err != nil { + return false, fmt.Errorf("Error parsing vip id: %s", err) + } + + sess := meta.(ClientSession).SoftLayerSession() + lbService, err := network.GetNadcLbVipServiceByName(sess, nadcId, vipName, serviceName) + if err != nil { + if apiErr, ok := err.(sl.Error); ok { + if apiErr.StatusCode == 404 { + return false, nil + } + } + return false, fmt.Errorf("Error communicating with the API: %s", err) + } + return *lbService.Name == serviceName, nil +} + +func resourceIBMLbVpxServiceExists105(d *schema.ResourceData, meta interface{}) (bool, error) { + _, nadcId, serviceName, err := parseServiceId(d.Id()) + if err != nil { + return false, fmt.Errorf("Error parsing vip id: %s", err) + } + + nClient, err := getNitroClient(meta.(ClientSession).SoftLayerSession(), nadcId) + if err != nil { + return false, fmt.Errorf("Error getting netscaler information ID: %d", nadcId) + } + + svc := dt.ServiceRes{} + err = nClient.Get(&svc, serviceName) + if err != nil && strings.Contains(err.Error(), "No Service") { + return false, nil + } else if err != nil { + return false, fmt.Errorf("Unable to get load balancer service %s: %s", serviceName, err) + } + + return *svc.Service[0].Name == serviceName, nil +} diff --git a/ibm/resource_ibm_lb_vpx_service_test.go b/ibm/resource_ibm_lb_vpx_service_test.go new file mode 100644 index 0000000000..fbc04b7bee --- /dev/null +++ b/ibm/resource_ibm_lb_vpx_service_test.go @@ -0,0 +1,225 @@ +package ibm + +import ( + "testing" + + "github.com/hashicorp/terraform/helper/resource" +) + +func TestAccIBMLbVpxService_Basic(t *testing.T) { + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + Steps: []resource.TestStep{ + { + Config: testAccCheckIBMLbVpxServiceConfig_basic, + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttr( + "ibm_lb_vpx_service.testacc_service1", "name", "test_load_balancer_service1"), + resource.TestCheckResourceAttr( + "ibm_lb_vpx_service.testacc_service1", "destination_port", "89"), + resource.TestCheckResourceAttr( + "ibm_lb_vpx_service.testacc_service1", "weight", "55"), + resource.TestCheckResourceAttr( + "ibm_lb_vpx_service.testacc_service2", "name", "test_load_balancer_service2"), + resource.TestCheckResourceAttr( + "ibm_lb_vpx_service.testacc_service2", "destination_port", "89"), + resource.TestCheckResourceAttr( + "ibm_lb_vpx_service.testacc_service2", "weight", "55"), + ), + }, + }, + }) +} + +func TestAccIBMLbVpxService_Basic105(t *testing.T) { + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + Steps: []resource.TestStep{ + { + Config: testAccCheckIBMLbVpxServiceConfig_basic105, + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttr( + "ibm_lb_vpx_service.testacc_service3", "name", "testacc_service3"), + resource.TestCheckResourceAttr( + "ibm_lb_vpx_service.testacc_service3", "destination_port", "89"), + resource.TestCheckResourceAttr( + "ibm_lb_vpx_service.testacc_service4", "name", "testacc_service4"), + resource.TestCheckResourceAttr( + "ibm_lb_vpx_service.testacc_service4", "destination_port", "89"), + ), + }, + }, + }) +} + +var testAccCheckIBMLbVpxServiceConfig_basic = ` + +resource "ibm_compute_vm_instance" "vm1" { + hostname = "vm1" + domain = "terraformuat.ibm.com" + os_reference_code = "DEBIAN_7_64" + datacenter = "dal09" + network_speed = 10 + hourly_billing = true + private_network_only = false + cores = 1 + memory = 1024 + disks = [25] + local_disk = false +} + +resource "ibm_compute_vm_instance" "vm2" { + hostname = "vm2" + domain = "terraformuat.ibm.com" + os_reference_code = "DEBIAN_7_64" + datacenter = "dal09" + network_speed = 10 + hourly_billing = true + private_network_only = false + cores = 1 + memory = 1024 + disks = [25] + local_disk = false +} + +resource "ibm_lb_vpx" "testacc_vpx" { + datacenter = "dal09" + speed = 10 + version = "10.1" + plan = "Standard" + ip_count = 2 +} + +resource "ibm_lb_vpx_vip" "testacc_vip" { + name = "test_load_balancer_vip" + nad_controller_id = "${ibm_lb_vpx.testacc_vpx.id}" + load_balancing_method = "lc" + source_port = 80 + type = "HTTP" + virtual_ip_address = "${ibm_lb_vpx.testacc_vpx.vip_pool[0]}" +} + +resource "ibm_lb_vpx_service" "testacc_service1" { + name = "test_load_balancer_service1" + vip_id = "${ibm_lb_vpx_vip.testacc_vip.id}" + destination_ip_address = "${ibm_compute_vm_instance.vm1.ipv4_address}" + destination_port = 89 + weight = 55 + connection_limit = 5000 + health_check = "HTTP" +} + +resource "ibm_lb_vpx_service" "testacc_service2" { + name = "test_load_balancer_service2" + vip_id = "${ibm_lb_vpx_vip.testacc_vip.id}" + destination_ip_address = "${ibm_compute_vm_instance.vm2.ipv4_address}" + destination_port = 89 + weight = 55 + connection_limit = 5000 + health_check = "HTTP" +} +` + +var testAccCheckIBMLbVpxServiceConfig_basic105 = ` + +resource "ibm_compute_vm_instance" "vm3" { + hostname = "vm3" + domain = "terraformuat.ibm.com" + os_reference_code = "DEBIAN_7_64" + datacenter = "dal09" + network_speed = 10 + hourly_billing = true + private_network_only = false + cores = 1 + memory = 1024 + disks = [25] + local_disk = false +} + +resource "ibm_compute_vm_instance" "vm4" { + hostname = "vm4" + domain = "terraformuat.ibm.com" + os_reference_code = "DEBIAN_7_64" + datacenter = "dal09" + network_speed = 10 + hourly_billing = true + private_network_only = false + cores = 1 + memory = 1024 + disks = [25] + local_disk = false +} + +resource "ibm_lb_vpx" "testacc_vpx2" { + datacenter = "dal09" + speed = 10 + version = "10.5" + plan = "Standard" + ip_count = 2 +} + +resource "ibm_lb_vpx_vip" "testacc_vip2" { + name = "testacc_vip2" + nad_controller_id = "${ibm_lb_vpx.testacc_vpx2.id}" + load_balancing_method = "lc" + source_port = 80 + type = "HTTP" + virtual_ip_address = "${ibm_lb_vpx.testacc_vpx2.vip_pool[0]}" +} + +resource "ibm_lb_vpx_service" "testacc_service3" { + name = "testacc_service3" + vip_id = "${ibm_lb_vpx_vip.testacc_vip2.id}" + destination_ip_address = "${ibm_compute_vm_instance.vm3.ipv4_address}" + destination_port = 89 + weight = 55 + connection_limit = 5000 + health_check = "HTTP" +} + +resource "ibm_lb_vpx_service" "testacc_service4" { + name = "testacc_service4" + vip_id = "${ibm_lb_vpx_vip.testacc_vip2.id}" + destination_ip_address = "${ibm_compute_vm_instance.vm4.ipv4_address}" + destination_port = 89 + weight = 55 + connection_limit = 5000 + health_check = "HTTP" +} +` + +/* +var testAccCheckIBMLbVpxServiceConfig_basic105 = ` + +resource "ibm_lb_vpx_vip" "testacc_vip2" { + name = "testacc_vip2" + nad_controller_id = 21641 + load_balancing_method = "lc" + source_port = 80 + type = "HTTP" + virtual_ip_address = "169.54.227.96" +} + +resource "ibm_lb_vpx_service" "testacc_service3" { + name = "testacc_service3" + vip_id = "${ibm_lb_vpx_vip.testacc_vip2.id}" + destination_ip_address = "10.10.10.10" + destination_port = 89 + weight = 55 + connection_limit = 5000 + health_check = "HTTP" +} + +resource "ibm_lb_vpx_service" "testacc_service4" { + name = "testacc_service4" + vip_id = "${ibm_lb_vpx_vip.testacc_vip2.id}" + destination_ip_address = "20.20.20.20" + destination_port = 89 + weight = 55 + connection_limit = 5000 + health_check = "HTTP" +} +` +*/ diff --git a/ibm/resource_ibm_lb_vpx_test.go b/ibm/resource_ibm_lb_vpx_test.go new file mode 100644 index 0000000000..7d047e81d4 --- /dev/null +++ b/ibm/resource_ibm_lb_vpx_test.go @@ -0,0 +1,82 @@ +package ibm + +import ( + "fmt" + "strconv" + "testing" + + "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/terraform" + "github.com/softlayer/softlayer-go/datatypes" + "github.com/softlayer/softlayer-go/services" +) + +func TestAccIBMLbVpx_Basic(t *testing.T) { + var nadc datatypes.Network_Application_Delivery_Controller + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccCheckIBMLbVpxConfig_basic, + Check: resource.ComposeTestCheckFunc( + testAccCheckIBMLbVpxExists("ibm_lb_vpx.testacc_foobar_vpx", &nadc), + resource.TestCheckResourceAttr( + "ibm_lb_vpx.testacc_foobar_vpx", "type", "NetScaler VPX"), + resource.TestCheckResourceAttr( + "ibm_lb_vpx.testacc_foobar_vpx", "datacenter", "dal09"), + resource.TestCheckResourceAttr( + "ibm_lb_vpx.testacc_foobar_vpx", "speed", "10"), + resource.TestCheckResourceAttr( + "ibm_lb_vpx.testacc_foobar_vpx", "plan", "Standard"), + resource.TestCheckResourceAttr( + "ibm_lb_vpx.testacc_foobar_vpx", "ip_count", "2"), + resource.TestCheckResourceAttr( + "ibm_lb_vpx.testacc_foobar_vpx", "version", "10.1"), + resource.TestCheckResourceAttr( + "ibm_lb_vpx.testacc_foobar_vpx", "vip_pool.#", "2"), + ), + }, + }, + }) +} + +func testAccCheckIBMLbVpxExists(n string, nadc *datatypes.Network_Application_Delivery_Controller) resource.TestCheckFunc { + return func(s *terraform.State) error { + rs, ok := s.RootModule().Resources[n] + + if !ok { + return fmt.Errorf("Not found: %s", n) + } + + if rs.Primary.ID == "" { + return fmt.Errorf("No Record ID is set") + } + + nadcId, _ := strconv.Atoi(rs.Primary.ID) + + service := services.GetNetworkApplicationDeliveryControllerService(testAccProvider.Meta().(ClientSession).SoftLayerSession()) + found, err := service.Id(nadcId).GetObject() + if err != nil { + return err + } + + if strconv.Itoa(int(*found.Id)) != rs.Primary.ID { + return fmt.Errorf("Record not found") + } + + *nadc = found + + return nil + } +} + +const testAccCheckIBMLbVpxConfig_basic = ` +resource "ibm_lb_vpx" "testacc_foobar_vpx" { + datacenter = "dal09" + speed = 10 + version = "10.1" + plan = "Standard" + ip_count = 2 +}` diff --git a/ibm/resource_ibm_lb_vpx_vip.go b/ibm/resource_ibm_lb_vpx_vip.go new file mode 100644 index 0000000000..bbba466652 --- /dev/null +++ b/ibm/resource_ibm_lb_vpx_vip.go @@ -0,0 +1,788 @@ +package ibm + +import ( + "encoding/base64" + "errors" + "fmt" + "log" + "strconv" + "strings" + "time" + + "github.com/hashicorp/terraform/helper/schema" + "github.com/softlayer/softlayer-go/datatypes" + "github.com/softlayer/softlayer-go/helpers/network" + "github.com/softlayer/softlayer-go/services" + "github.com/softlayer/softlayer-go/session" + "github.com/softlayer/softlayer-go/sl" + + "github.com/minsikl/netscaler-nitro-go/client" + dt "github.com/minsikl/netscaler-nitro-go/datatypes" + "github.com/minsikl/netscaler-nitro-go/op" +) + +const ( + VPX_VERSION_10_1 = "10.1" +) + +var ( + // Load balancing algorithm mapping tables + + lbMethodMapFromSLtoVPX105 = map[string][2]string{ + "rr": {"NONE", "ROUNDROBIN"}, + "sr": {"NONE", "LEASTRESPONSETIME"}, + "lc": {"NONE", "LEASTCONNECTION"}, + "pi": {"SOURCEIP", "ROUNDROBIN"}, + "pi-sr": {"SOURCEIP", "LEASTRESPONSETIME"}, + "pi-lc": {"SOURCEIP", "LEASTCONNECTION"}, + "ic": {"COOKIEINSERT", "ROUNDROBIN"}, + "ic-sr": {"COOKIEINSERT", "LEASTRESPONSETIME"}, + "ic-lc": {"COOKIEINSERT", "LEASTCONNECTION"}, + } + + lbMethodMapFromVPX105toSL = map[[2]string]string{ + {"NONE", "ROUNDROBIN"}: "rr", + {"NONE", "LEASTRESPONSETIME"}: "sr", + {"NONE", "LEASTCONNECTION"}: "lc", + {"SOURCEIP", "ROUNDROBIN"}: "pi", + {"SOURCEIP", "LEASTRESPONSETIME"}: "pi-sr", + {"SOURCEIP", "LEASTCONNECTION"}: "pi-lc", + {"COOKIEINSERT", "ROUNDROBIN"}: "ic", + {"COOKIEINSERT", "LEASTRESPONSETIME"}: "ic-sr", + {"COOKIEINSERT", "LEASTCONNECTION"}: "ic-lc", + } +) + +func resourceIBMLbVpxVip() *schema.Resource { + return &schema.Resource{ + Create: resourceIBMLbVpxVipCreate, + Read: resourceIBMLbVpxVipRead, + Update: resourceIBMLbVpxVipUpdate, + Delete: resourceIBMLbVpxVipDelete, + Exists: resourceIBMLbVpxVipExists, + Importer: &schema.ResourceImporter{}, + + Schema: map[string]*schema.Schema{ + "nad_controller_id": { + Type: schema.TypeInt, + Required: true, + ForceNew: true, + }, + + "load_balancing_method": { + Type: schema.TypeString, + Required: true, + }, + + "persistence": { + Type: schema.TypeString, + Optional: true, + Computed: true, + }, + + // name field is actually used as an ID in SoftLayer + // http://sldn.softlayer.com/reference/services/SoftLayer_Network_Application_Delivery_Controller/updateLiveLoadBalancer + "name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + + "source_port": { + Type: schema.TypeInt, + Required: true, + ForceNew: true, + }, + + "type": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + + // security_certificate_id is only acceptable with SSL type + "security_certificate_id": { + Type: schema.TypeInt, + Optional: true, + ForceNew: true, + }, + + "virtual_ip_address": { + Type: schema.TypeString, + Required: true, + }, + }, + } +} + +func resourceIBMLbVpxVipCreate(d *schema.ResourceData, meta interface{}) error { + version, err := getVPXVersion(d.Get("nad_controller_id").(int), meta.(ClientSession).SoftLayerSession()) + if err != nil { + return fmt.Errorf("Error creating Virtual Ip Address: %s", err) + } + + if version == VPX_VERSION_10_1 { + return resourceIBMLbVpxVipCreate101(d, meta) + } + + return resourceIBMLbVpxVipCreate105(d, meta) +} + +func resourceIBMLbVpxVipRead(d *schema.ResourceData, meta interface{}) error { + nadcId, _, err := parseId(d.Id()) + if err != nil { + return fmt.Errorf("Error Reading Virtual IP Address: %s", err) + } + + version, err := getVPXVersion(nadcId, meta.(ClientSession).SoftLayerSession()) + if err != nil { + return fmt.Errorf("Error Reading Virtual Ip Address: %s", err) + } + + if version == VPX_VERSION_10_1 { + return resourceIBMLbVpxVipRead101(d, meta) + } + + return resourceIBMLbVpxVipRead105(d, meta) +} + +func resourceIBMLbVpxVipUpdate(d *schema.ResourceData, meta interface{}) error { + nadcId, _, err := parseId(d.Id()) + if err != nil { + return fmt.Errorf("Error updating Virtual IP Address: %s", err) + } + + version, err := getVPXVersion(nadcId, meta.(ClientSession).SoftLayerSession()) + if err != nil { + return fmt.Errorf("Error updating Virtual Ip Address: %s", err) + } + + if version == VPX_VERSION_10_1 { + return resourceIBMLbVpxVipUpdate101(d, meta) + } + + return resourceIBMLbVpxVipUpdate105(d, meta) +} + +func resourceIBMLbVpxVipDelete(d *schema.ResourceData, meta interface{}) error { + nadcId, _, err := parseId(d.Id()) + if err != nil { + return fmt.Errorf("Error deleting Virtual Ip Address: %s", err) + } + + version, err := getVPXVersion(nadcId, meta.(ClientSession).SoftLayerSession()) + if err != nil { + return fmt.Errorf("Error deleting Virtual Ip Address: %s", err) + } + + if version == VPX_VERSION_10_1 { + return resourceIBMLbVpxVipDelete101(d, meta) + } + + return resourceIBMLbVpxVipDelete105(d, meta) +} + +func resourceIBMLbVpxVipExists(d *schema.ResourceData, meta interface{}) (bool, error) { + nadcId, _, err := parseId(d.Id()) + if err != nil { + return false, fmt.Errorf("Error in exists: %s", err) + } + + version, err := getVPXVersion(nadcId, meta.(ClientSession).SoftLayerSession()) + if err != nil { + return false, fmt.Errorf("Error in exists: %s", err) + } + + if version == VPX_VERSION_10_1 { + return resourceIBMLbVpxVipExists101(d, meta) + } + + return resourceIBMLbVpxVipExists105(d, meta) +} + +func parseId(id string) (int, string, error) { + if len(id) < 1 { + return 0, "", fmt.Errorf("Failed to parse id %s: Unable to get a VIP ID", id) + } + + idList := strings.Split(id, ":") + if len(idList) != 2 || len(idList[0]) < 1 || len(idList[1]) < 1 { + return 0, "", fmt.Errorf("Failed to parse id %s: Invalid VIP ID", id) + } + + nadcId, err := strconv.Atoi(idList[0]) + if err != nil { + return 0, "", fmt.Errorf("Failed to parse id : Unable to get a VIP ID %s", err) + } + + vipName := idList[1] + return nadcId, vipName, nil +} + +func resourceIBMLbVpxVipCreate101(d *schema.ResourceData, meta interface{}) error { + if _, ok := d.GetOk("security_certificate_id"); ok { + return fmt.Errorf("Error creating Virtual Ip Address: security_certificate_id is not supported with VPX 10.1.") + } + + sess := meta.(ClientSession).SoftLayerSession() + service := services.GetNetworkApplicationDeliveryControllerService(sess) + + nadcId := d.Get("nad_controller_id").(int) + vipName := d.Get("name").(string) + + template := datatypes.Network_LoadBalancer_VirtualIpAddress{ + LoadBalancingMethod: sl.String(d.Get("load_balancing_method").(string)), + Name: sl.String(vipName), + SourcePort: sl.Int(d.Get("source_port").(int)), + Type: sl.String(d.Get("type").(string)), + VirtualIpAddress: sl.String(d.Get("virtual_ip_address").(string)), + } + + log.Printf("[INFO] Creating Virtual Ip Address %s", *template.VirtualIpAddress) + + var err error + var successFlag bool + + for count := 0; count < 10; count++ { + successFlag, err = service.Id(nadcId).CreateLiveLoadBalancer(&template) + log.Printf("[INFO] Creating Virtual Ip Address %s successFlag : %t", *template.VirtualIpAddress, successFlag) + + if err != nil && strings.Contains(err.Error(), "already exists") { + log.Printf("[INFO] Creating Virtual Ip Address %s error : %s. Ingore the error.", *template.VirtualIpAddress, err.Error()) + successFlag = true + err = nil + break + } + + if err != nil && strings.Contains(err.Error(), "Operation already in progress") { + log.Printf("[INFO] Creating Virtual Ip Address %s error : %s. Retry in 10 secs", *template.VirtualIpAddress, err.Error()) + time.Sleep(time.Second * 10) + continue + } + + break + } + + if err != nil { + return fmt.Errorf("Error creating Virtual Ip Address: %s", err) + } + + if !successFlag { + return errors.New("Error creating Virtual Ip Address") + } + + d.SetId(fmt.Sprintf("%d:%s", nadcId, vipName)) + + log.Printf("[INFO] Netscaler VPX VIP ID: %s", d.Id()) + + return resourceIBMLbVpxVipRead(d, meta) +} + +func resourceIBMLbVpxVipCreate105(d *schema.ResourceData, meta interface{}) error { + nadcId := d.Get("nad_controller_id").(int) + nClient, err := getNitroClient(meta.(ClientSession).SoftLayerSession(), nadcId) + if err != nil { + return fmt.Errorf("Error getting netscaler information ID: %d", nadcId) + } + + vipName := d.Get("name").(string) + vipType := d.Get("type").(string) + securityCertificateId := d.Get("security_certificate_id").(int) + + lbvserverReq := dt.LbvserverReq{ + Lbvserver: &dt.Lbvserver{ + Name: op.String(vipName), + Ipv46: op.String(d.Get("virtual_ip_address").(string)), + Port: op.Int(d.Get("source_port").(int)), + ServiceType: op.String(vipType), + }, + } + + if len(d.Get("persistence").(string)) > 0 { + lbvserverReq.Lbvserver.Lbmethod = op.String(d.Get("persistence").(string)) + } + lbMethodPair := lbMethodMapFromSLtoVPX105[d.Get("load_balancing_method").(string)] + if len(lbMethodPair[1]) > 0 { + if len(lbMethodPair[0]) > 0 { + lbvserverReq.Lbvserver.Persistencetype = &lbMethodPair[0] + } else { + lbvserverReq.Lbvserver.Persistencetype = op.String("NONE") + } + lbvserverReq.Lbvserver.Lbmethod = &lbMethodPair[1] + } + + log.Printf("[INFO] Creating Virtual Ip Address %s", *lbvserverReq.Lbvserver.Ipv46) + + // security_certificated_id is only available when type is 'SSL' + if securityCertificateId > 0 && vipType != "SSL" { + return fmt.Errorf("Error creating VIP : security_certificated_id is only available when type is 'SSL'") + } else if securityCertificateId == 0 && vipType == "SSL" { + return fmt.Errorf("Error creating VIP : 'SSL' type requires security_certificated_id.") + + } + + // Create a virtual server + err = nClient.Add(&lbvserverReq) + if err != nil { + return err + } + + // Configure security_certificate for SSL Offload. + if vipType == "SSL" { + // Delete the previous security certificate. + deleteSecurityCertificate(nClient, vipName, securityCertificateId) + + err = configureSecurityCertificate(nClient, meta.(ClientSession).SoftLayerSession(), vipName, securityCertificateId) + + if err != nil { + // Rollback VIP creation and return an error. + resourceIBMLbVpxVipDelete105(d, meta) + return err + } + } + + d.SetId(fmt.Sprintf("%d:%s", nadcId, vipName)) + + log.Printf("[INFO] Netscaler VPX VIP ID: %s", d.Id()) + + return resourceIBMLbVpxVipRead(d, meta) +} + +func resourceIBMLbVpxVipRead101(d *schema.ResourceData, meta interface{}) error { + sess := meta.(ClientSession).SoftLayerSession() + + nadcId, vipName, err := parseId(d.Id()) + if err != nil { + return fmt.Errorf("ibm_lb_vpx : %s", err) + } + + vip, err := network.GetNadcLbVipByName(sess, nadcId, vipName) + if err != nil { + return fmt.Errorf("ibm_lb_vpx : while looking up a virtual ip address : %s", err) + } + + d.Set("nad_controller_id", nadcId) + if vip.LoadBalancingMethod != nil { + d.Set("load_balancing_method", *vip.LoadBalancingMethod) + } + + if vip.Name != nil { + d.Set("name", *vip.Name) + } + + if vip.SourcePort != nil { + d.Set("source_port", *vip.SourcePort) + } + + if vip.Type != nil { + d.Set("type", *vip.Type) + } + + if vip.VirtualIpAddress != nil { + d.Set("virtual_ip_address", *vip.VirtualIpAddress) + } + + return nil +} + +func resourceIBMLbVpxVipRead105(d *schema.ResourceData, meta interface{}) error { + nadcId, vipName, err := parseId(d.Id()) + if err != nil { + return fmt.Errorf("ibm_lb_vpx : %s", err) + } + + nClient, err := getNitroClient(meta.(ClientSession).SoftLayerSession(), nadcId) + if err != nil { + return fmt.Errorf("Error getting netscaler information ID: %d", nadcId) + } + + // Read a virtual server + vip := dt.LbvserverRes{} + err = nClient.Get(&vip, vipName) + if err != nil { + fmt.Printf("Error getting VIP information : %s", err.Error()) + } + + d.Set("nad_controller_id", nadcId) + if vip.Lbvserver[0].Lbmethod != nil { + d.Set("load_balancing_method", *vip.Lbvserver[0].Lbmethod) + } + + if vip.Lbvserver[0].Name != nil { + d.Set("name", *vip.Lbvserver[0].Name) + } + + if vip.Lbvserver[0].Port != nil { + d.Set("source_port", *vip.Lbvserver[0].Port) + } + + if vip.Lbvserver[0].ServiceType != nil { + d.Set("type", *vip.Lbvserver[0].ServiceType) + } + + if vip.Lbvserver[0].Persistencetype != nil { + if *vip.Lbvserver[0].Persistencetype == "NONE" { + d.Set("persistence", nil) + } else { + d.Set("persistence", *vip.Lbvserver[0].Persistencetype) + } + } + + lbMethod := lbMethodMapFromVPX105toSL[[2]string{*vip.Lbvserver[0].Persistencetype, *vip.Lbvserver[0].Lbmethod}] + if len(lbMethod) > 0 { + d.Set("load_balancing_method", lbMethod) + } + + if vip.Lbvserver[0].Ipv46 != nil { + d.Set("virtual_ip_address", *vip.Lbvserver[0].Ipv46) + } + + // Read a security certificate information + securityCertificateId, err := getSecurityCertificateId(nClient, vipName) + if err == nil { + d.Set("security_certificate_id", securityCertificateId) + } else { + if _, ok := d.GetOk("security_certificate_id"); ok { + d.Set("security_certificate_id", 0) + } + } + + return nil +} + +func resourceIBMLbVpxVipUpdate101(d *schema.ResourceData, meta interface{}) error { + sess := meta.(ClientSession).SoftLayerSession() + service := services.GetNetworkApplicationDeliveryControllerService(sess) + + nadcId := d.Get("nad_controller_id").(int) + template := datatypes.Network_LoadBalancer_VirtualIpAddress{ + Name: sl.String(d.Get("name").(string)), + } + + if d.HasChange("load_balancing_method") { + template.LoadBalancingMethod = sl.String(d.Get("load_balancing_method").(string)) + } + + if d.HasChange("virtual_ip_address") { + template.VirtualIpAddress = sl.String(d.Get("virtual_ip_address").(string)) + } + + var err error + + for count := 0; count < 10; count++ { + var successFlag bool + successFlag, err = service.Id(nadcId).UpdateLiveLoadBalancer(&template) + log.Printf("[INFO] Updating Virtual Ip Address successFlag : %t", successFlag) + + if err != nil && strings.Contains(err.Error(), "Operation already in progress") { + log.Printf("[INFO] Updating Virtual Ip Address error : %s. Retry in 10 secs", err.Error()) + time.Sleep(time.Second * 10) + continue + } + + break + } + + if err != nil { + return fmt.Errorf("Error updating Virtual Ip Address: %s", err) + } + + return resourceIBMLbVpxVipRead(d, meta) +} + +func resourceIBMLbVpxVipUpdate105(d *schema.ResourceData, meta interface{}) error { + nadcId := d.Get("nad_controller_id").(int) + nClient, err := getNitroClient(meta.(ClientSession).SoftLayerSession(), nadcId) + if err != nil { + return fmt.Errorf("Error getting netscaler information ID: %d", nadcId) + } + + lbvserverReq := dt.LbvserverReq{ + Lbvserver: &dt.Lbvserver{ + Name: op.String(d.Get("name").(string)), + }, + } + + if d.HasChange("load_balancing_method") || d.HasChange("persistence") { + lbvserverReq.Lbvserver.Persistencetype = op.String(d.Get("persistence").(string)) + lbvserverReq.Lbvserver.Lbmethod = op.String(d.Get("load_balancing_method").(string)) + + lbMethodPair := lbMethodMapFromSLtoVPX105[d.Get("load_balancing_method").(string)] + if len(lbMethodPair[1]) > 0 { + if len(lbMethodPair[0]) > 0 { + lbvserverReq.Lbvserver.Persistencetype = &lbMethodPair[0] + } else { + lbvserverReq.Lbvserver.Persistencetype = op.String("NONE") + } + lbvserverReq.Lbvserver.Lbmethod = &lbMethodPair[1] + } + } + + if d.HasChange("virtual_ip_address") { + lbvserverReq.Lbvserver.Ipv46 = sl.String(d.Get("virtual_ip_address").(string)) + } + + // Update the virtual server + err = nClient.Update(&lbvserverReq) + if err != nil { + return fmt.Errorf("Error updating Virtual Ip Address: " + err.Error()) + } + + return resourceIBMLbVpxVipRead(d, meta) +} + +func resourceIBMLbVpxVipDelete101(d *schema.ResourceData, meta interface{}) error { + sess := meta.(ClientSession).SoftLayerSession() + service := services.GetNetworkApplicationDeliveryControllerService(sess) + + nadcId, vipName, err := parseId(d.Id()) + if err != nil { + return fmt.Errorf("ibm_lb_vpx : %s", err) + } + + for count := 0; count < 10; count++ { + var successFlag bool + successFlag, err = service.Id(nadcId).DeleteLiveLoadBalancer( + &datatypes.Network_LoadBalancer_VirtualIpAddress{Name: sl.String(vipName)}, + ) + log.Printf("[INFO] Deleting Virtual Ip Address %s successFlag : %t", vipName, successFlag) + + if err != nil && + (strings.Contains(err.Error(), "Operation already in progress") || + strings.Contains(err.Error(), "No Service")) { + log.Printf("[INFO] Deleting Virtual Ip Address %s Error : %s Retry in 10 secs", vipName, err.Error()) + time.Sleep(time.Second * 10) + continue + } + + // Check if the resource is already deleted. + if err != nil && strings.Contains(err.Error(), "Unable to find object with unknown identifier of") { + log.Printf("[INFO] Deleting Virtual Ip Address %s Error : %s . Ignore the error.", vipName, err.Error()) + err = nil + } + + break + } + + if err != nil { + return fmt.Errorf("Error deleting Virtual Ip Address %s: %s", vipName, err) + } + + return nil +} + +func resourceIBMLbVpxVipDelete105(d *schema.ResourceData, meta interface{}) error { + nadcId, vipName, err := parseId(d.Id()) + if err != nil { + return fmt.Errorf("ibm_lb_vpx : %s", err) + } + + nClient, err := getNitroClient(meta.(ClientSession).SoftLayerSession(), nadcId) + if err != nil { + return fmt.Errorf("Error deleting Virtual Ip Address %s: %s", vipName, err) + } + + // Delete a virtual server + err = nClient.Delete(&dt.LbvserverReq{}, vipName) + if err != nil { + return fmt.Errorf("Error deleting Virtual Ip Address %s: %s", vipName, err) + } + + // Delete a security certificate + securityCertificateId, err := getSecurityCertificateId(nClient, vipName) + if err == nil { + deleteSecurityCertificate(nClient, vipName, securityCertificateId) + } + + return nil +} + +func resourceIBMLbVpxVipExists101(d *schema.ResourceData, meta interface{}) (bool, error) { + sess := meta.(ClientSession).SoftLayerSession() + + nadcId, vipName, err := parseId(d.Id()) + if err != nil { + return false, fmt.Errorf("ibm_lb_vpx : %s", err) + } + + vip, err := network.GetNadcLbVipByName(sess, nadcId, vipName) + if err != nil { + if apiErr, ok := err.(sl.Error); ok { + if apiErr.StatusCode == 404 { + return false, nil + } + } + return false, fmt.Errorf("Error communicating with the API: %s", err) + } + return vip != nil && *vip.Name == vipName, nil +} + +func resourceIBMLbVpxVipExists105(d *schema.ResourceData, meta interface{}) (bool, error) { + nadcId, vipName, err := parseId(d.Id()) + if err != nil { + return false, fmt.Errorf("ibm_lb_vpx : %s", err) + } + + nClient, err := getNitroClient(meta.(ClientSession).SoftLayerSession(), nadcId) + if err != nil { + return false, err + } + + // Read a virtual server + vip := dt.LbvserverRes{} + err = nClient.Get(&vip, vipName) + + if err != nil && strings.Contains(err.Error(), "No such resource") { + return false, nil + } else if err != nil { + return false, err + } + + return true, nil +} + +func getNitroClient(sess *session.Session, nadcId int) (*client.NitroClient, error) { + service := services.GetNetworkApplicationDeliveryControllerService(sess) + nadc, err := service.Id(nadcId).Mask("managementIpAddress,password[password]").GetObject() + if err != nil { + return nil, fmt.Errorf("Error retrieving netscaler: %s", err) + } + return client.NewNitroClient("http", *nadc.ManagementIpAddress, dt.CONFIG, + "root", *nadc.Password.Password, true), nil +} + +func configureSecurityCertificate(nClient *client.NitroClient, sess *session.Session, vipName string, securityCertificateId int) error { + // Read security_certificate + service := services.GetSecurityCertificateService(sess) + cert, err := service.Id(securityCertificateId).GetObject() + if err != nil { + return fmt.Errorf("Unable to get Security Certificate: %s", err) + } + + certName := vipName + "_" + strconv.Itoa(securityCertificateId) + certFileName := certName + ".cert" + keyFileName := certName + ".key" + + // Delete previous security certificate + deleteSecurityCertificate(nClient, vipName, securityCertificateId) + + // Upload security_certificate + certReq := dt.SystemfileReq{ + Systemfile: &dt.Systemfile{ + Filename: op.String(certFileName), + Filecontent: op.String(base64.StdEncoding.EncodeToString([]byte(*cert.Certificate))), + Filelocation: op.String("/nsconfig/ssl/"), + Fileencoding: op.String("BASE64"), + }, + } + + err = nClient.Add(&certReq) + if err != nil { + deleteSecurityCertificate(nClient, vipName, securityCertificateId) + return err + } + + keyReq := dt.SystemfileReq{ + Systemfile: &dt.Systemfile{ + Filename: op.String(keyFileName), + Filecontent: op.String(base64.StdEncoding.EncodeToString([]byte(*cert.PrivateKey))), + Filelocation: op.String("/nsconfig/ssl/"), + Fileencoding: op.String("BASE64"), + }, + } + + err = nClient.Add(&keyReq) + if err != nil { + deleteSecurityCertificate(nClient, vipName, securityCertificateId) + return err + } + + // Enable SSL + + sslFeature := dt.NsfeatureReq{ + Nsfeature: &dt.Nsfeature{ + Feature: []string{"ssl"}, + }, + } + + err = nClient.Enable(&sslFeature, true) + if err != nil { + deleteSecurityCertificate(nClient, vipName, securityCertificateId) + return err + } + + // Register SSL + + sslCertKey := dt.SslcertkeyReq{ + Sslcertkey: &dt.Sslcertkey{ + Certkey: op.String(certName), + Cert: op.String(certFileName), + Key: op.String(keyFileName), + }, + } + + err = nClient.Add(&sslCertKey) + if err != nil { + deleteSecurityCertificate(nClient, vipName, securityCertificateId) + return err + } + + // Bind security_certificate + + sslBind := dt.SslvserverSslcertkeyBindingReq{ + SslvserverSslcertkeyBinding: &dt.SslvserverSslcertkeyBinding{ + Vservername: op.String(vipName), + Certkeyname: op.String(certName), + }, + } + + err = nClient.Add(&sslBind) + if err != nil { + deleteSecurityCertificate(nClient, vipName, securityCertificateId) + return err + } + return nil +} + +func deleteSecurityCertificate(nClient *client.NitroClient, vipName string, securityCertificateId int) { + certName := vipName + "_" + strconv.Itoa(securityCertificateId) + certFileName := certName + ".cert" + keyFileName := certName + ".key" + + // Delete sslvserversslcertkeybinding + nClient.Delete(&dt.SslvserverSslcertkeyBindingReq{}, vipName, "args=certkeyname:"+certName) + + // Delete sslcertkey + nClient.Delete(&dt.SslcertkeyReq{}, certName) + + // Delete cert + nClient.Delete(&dt.SystemfileReq{}, certFileName, "args=fileLocation:"+"%2Fnsconfig%2Fssl%2F") + + // Delete key + nClient.Delete(&dt.SystemfileReq{}, keyFileName, "args=fileLocation:"+"%2Fnsconfig%2Fssl%2F") +} + +func getSecurityCertificateId(nClient *client.NitroClient, vipName string) (int, error) { + securityCertificateId := 0 + res := dt.SslcertkeyRes{} + err := nClient.Get(&res, "") + if err != nil { + return 0, fmt.Errorf("Error getting securityCertificateId information : %s", err.Error()) + } + + //CertKey name is consisted of `vipName`_`securityCertificateId`. + for _, sslCertKey := range res.Sslcertkey { + sslCertKeyArr := strings.Split(*sslCertKey.Certkey, "_") + if len(sslCertKeyArr) < 2 || !strings.HasPrefix(*sslCertKey.Certkey, vipName+"_") { + continue + } + + securityCertificateId, err = strconv.Atoi(sslCertKeyArr[len(sslCertKeyArr)-1]) + if err != nil { + continue + } else { + return securityCertificateId, nil + } + } + return 0, fmt.Errorf("Error getting securityCertificateId information : No security certificate for %s", vipName) +} diff --git a/ibm/resource_ibm_lb_vpx_vip_test.go b/ibm/resource_ibm_lb_vpx_vip_test.go new file mode 100644 index 0000000000..6f69fe4795 --- /dev/null +++ b/ibm/resource_ibm_lb_vpx_vip_test.go @@ -0,0 +1,102 @@ +package ibm + +import ( + "fmt" + "strconv" + "testing" + + "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/terraform" + + "github.com/softlayer/softlayer-go/helpers/network" +) + +func TestAccIBMLbVpxVip_Basic(t *testing.T) { + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckIBMLbVpxVipDestroy, + Steps: []resource.TestStep{ + { + Config: testAccCheckIBMLbVpxVipConfig_basic, + Check: resource.ComposeTestCheckFunc( + // Test VPX 10.1 + resource.TestCheckResourceAttr( + "ibm_lb_vpx_vip.testacc_vip", "load_balancing_method", "lc"), + resource.TestCheckResourceAttr( + "ibm_lb_vpx_vip.testacc_vip", "name", "test_load_balancer_vip"), + resource.TestCheckResourceAttr( + "ibm_lb_vpx_vip.testacc_vip", "source_port", "80"), + resource.TestCheckResourceAttr( + "ibm_lb_vpx_vip.testacc_vip", "type", "HTTP"), + // Test VPX 10.5 + resource.TestCheckResourceAttr( + "ibm_lb_vpx_vip.testacc_vip105", "load_balancing_method", "lc"), + resource.TestCheckResourceAttr( + "ibm_lb_vpx_vip.testacc_vip105", "name", "test_load_balancer_vip105"), + resource.TestCheckResourceAttr( + "ibm_lb_vpx_vip.testacc_vip105", "source_port", "80"), + resource.TestCheckResourceAttr( + "ibm_lb_vpx_vip.testacc_vip105", "type", "HTTP"), + ), + }, + }, + }) +} + +func testAccCheckIBMLbVpxVipDestroy(s *terraform.State) error { + sess := testAccProvider.Meta().(ClientSession).SoftLayerSession() + + for _, rs := range s.RootModule().Resources { + if rs.Type != "ibm_lb_vpx_vip" { + continue + } + + nadcId, _ := strconv.Atoi(rs.Primary.Attributes["nad_controller_id"]) + vipName, _ := rs.Primary.Attributes["name"] + + vip, _ := network.GetNadcLbVipByName(sess, nadcId, vipName) + + if vip != nil { + return fmt.Errorf("Netscaler VPX VIP still exists") + } + } + + return nil +} + +var testAccCheckIBMLbVpxVipConfig_basic = ` +resource "ibm_lb_vpx" "testacc_foobar_nadc" { + datacenter = "dal09" + speed = 10 + version = "10.1" + plan = "Standard" + ip_count = 2 +} + +resource "ibm_lb_vpx_vip" "testacc_vip" { + name = "test_load_balancer_vip" + nad_controller_id = "${ibm_lb_vpx.testacc_foobar_nadc.id}" + load_balancing_method = "lc" + source_port = 80 + type = "HTTP" + virtual_ip_address = "${ibm_lb_vpx.testacc_foobar_nadc.vip_pool[0]}" +} + +resource "ibm_lb_vpx" "testacc_foobar_nadc105" { + datacenter = "dal09" + speed = 10 + version = "10.5" + plan = "Standard" + ip_count = 2 +} + +resource "ibm_lb_vpx_vip" "testacc_vip105" { + name = "test_load_balancer_vip105" + nad_controller_id = "${ibm_lb_vpx.testacc_foobar_nadc105.id}" + load_balancing_method = "lc" + source_port = 80 + type = "HTTP" + virtual_ip_address = "${ibm_lb_vpx.testacc_foobar_nadc105.vip_pool[0]}" +} +` diff --git a/ibm/resource_ibm_network_public_ip.go b/ibm/resource_ibm_network_public_ip.go new file mode 100644 index 0000000000..04793a859b --- /dev/null +++ b/ibm/resource_ibm_network_public_ip.go @@ -0,0 +1,308 @@ +package ibm + +import ( + "fmt" + "log" + "net" + "strconv" + "strings" + "time" + + "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/helper/schema" + "github.com/softlayer/softlayer-go/datatypes" + "github.com/softlayer/softlayer-go/filter" + "github.com/softlayer/softlayer-go/helpers/product" + "github.com/softlayer/softlayer-go/services" + "github.com/softlayer/softlayer-go/session" + "github.com/softlayer/softlayer-go/sl" +) + +const ( + AdditionalServicesGlobalIpAddressesPackageType = "ADDITIONAL_SERVICES_GLOBAL_IP_ADDRESSES" + + GlobalIpMask = "id,ipAddress[ipAddress],destinationIpAddress[ipAddress]" +) + +func resourceIBMNetworkPublicIp() *schema.Resource { + return &schema.Resource{ + Create: resourceIBMNetworkPublicIpCreate, + Read: resourceIBMNetworkPublicIpRead, + Update: resourceIBMNetworkPublicIpUpdate, + Delete: resourceIBMNetworkPublicIpDelete, + Exists: resourceIBMNetworkPublicIpExists, + Importer: &schema.ResourceImporter{}, + + Schema: map[string]*schema.Schema{ + "ip_address": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + + "routes_to": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ValidateFunc: func(v interface{}, k string) (ws []string, errors []error) { + address := v.(string) + if net.ParseIP(address) == nil { + errors = append(errors, fmt.Errorf("Invalid IP format: %s", address)) + } + return + }, + DiffSuppressFunc: func(k, o, n string, d *schema.ResourceData) bool { + newRoutesTo := net.ParseIP(n) + // Return true when n has the appropriate IPv6 format and + // the compressed value of n equals the compressed value of o. + return newRoutesTo != nil && (newRoutesTo.String() == net.ParseIP(o).String()) + }, + }, + }, + } +} + +func resourceIBMNetworkPublicIpCreate(d *schema.ResourceData, meta interface{}) error { + sess := meta.(ClientSession).SoftLayerSession() + + // Find price items with AdditionalServicesGlobalIpAddresses + productOrderContainer, err := buildGlobalIpProductOrderContainer(d, sess, AdditionalServicesGlobalIpAddressesPackageType) + if err != nil { + // Find price items with AdditionalServices + productOrderContainer, err = buildGlobalIpProductOrderContainer(d, sess, AdditionalServicesPackageType) + if err != nil { + return fmt.Errorf("Error creating global ip: %s", err) + } + } + + log.Println("[INFO] Creating global ip") + + receipt, err := services.GetProductOrderService(sess). + PlaceOrder(productOrderContainer, sl.Bool(false)) + if err != nil { + return fmt.Errorf("Error during creation of global ip: %s", err) + } + + globalIp, err := findGlobalIpByOrderId(sess, *receipt.OrderId) + if err != nil { + return fmt.Errorf("Error during creation of global ip: %s", err) + } + + d.SetId(fmt.Sprintf("%d", *globalIp.Id)) + d.Set("ip_address", *globalIp.IpAddress.IpAddress) + + return resourceIBMNetworkPublicIpUpdate(d, meta) +} + +func resourceIBMNetworkPublicIpRead(d *schema.ResourceData, meta interface{}) error { + sess := meta.(ClientSession).SoftLayerSession() + service := services.GetNetworkSubnetIpAddressGlobalService(sess) + + globalIpId, err := strconv.Atoi(d.Id()) + if err != nil { + return fmt.Errorf("Not a valid global ip ID, must be an integer: %s", err) + } + + globalIp, err := service.Id(globalIpId).Mask(GlobalIpMask).GetObject() + if err != nil { + return fmt.Errorf("Error retrieving Global Ip: %s", err) + } + + d.Set("id", *globalIp.Id) + d.Set("ip_address", *globalIp.IpAddress.IpAddress) + if globalIp.DestinationIpAddress != nil { + d.Set("routes_to", *globalIp.DestinationIpAddress.IpAddress) + } + return nil +} + +func resourceIBMNetworkPublicIpUpdate(d *schema.ResourceData, meta interface{}) error { + sess := meta.(ClientSession).SoftLayerSession() + service := services.GetNetworkSubnetIpAddressGlobalService(sess) + + globalIpId, err := strconv.Atoi(d.Id()) + if err != nil { + return fmt.Errorf("Not a valid global ip ID, must be an integer: %s", err) + } + + routes_to := d.Get("routes_to").(string) + if strings.Contains(routes_to, ":") && len(routes_to) != 39 { + parts := strings.Split(routes_to, ":") + for x, s := range parts { + if s == "" { + zeroes := 9 - len(parts) + parts[x] = strings.Repeat("0000:", zeroes)[:(zeroes*4)+(zeroes-1)] + } else { + parts[x] = fmt.Sprintf("%04s", s) + } + } + + routes_to = strings.Join(parts, ":") + d.Set("routes_to", routes_to) + } + + _, err = service.Id(globalIpId).Route(sl.String(routes_to)) + if err != nil { + return fmt.Errorf("Error editing Global Ip: %s", err) + } + stateConf := &resource.StateChangeConf{ + Pending: []string{"pending"}, + Target: []string{"complete"}, + Refresh: func() (interface{}, string, error) { + transaction, err := service.Id(globalIpId).GetActiveTransaction() + if err != nil { + return datatypes.Network_Subnet_IpAddress_Global{}, "pending", err + } + if transaction.Id == nil { + return datatypes.Network_Subnet_IpAddress_Global{}, "complete", nil + } + return datatypes.Network_Subnet_IpAddress_Global{}, "pending", nil + }, + Timeout: 10 * time.Minute, + Delay: 5 * time.Second, + MinTimeout: 3 * time.Second, + } + + pendingResult, err := stateConf.WaitForState() + + if err != nil { + return fmt.Errorf("Error waiting for global ip destination ip address to become active: %s", err) + } + + if _, ok := pendingResult.(datatypes.Network_Subnet_IpAddress_Global); ok { + return nil + } + + return nil +} + +func resourceIBMNetworkPublicIpDelete(d *schema.ResourceData, meta interface{}) error { + sess := meta.(ClientSession).SoftLayerSession() + service := services.GetNetworkSubnetIpAddressGlobalService(sess) + + globalIpId, err := strconv.Atoi(d.Id()) + if err != nil { + return fmt.Errorf("Not a valid global ip ID, must be an integer: %s", err) + } + + billingItem, err := service.Id(globalIpId).GetBillingItem() + if err != nil { + return fmt.Errorf("Error deleting global ip: %s", err) + } + + if billingItem.Id == nil { + return nil + } + + _, err = services.GetBillingItemService(sess).Id(*billingItem.Id).CancelService() + + return err +} + +func resourceIBMNetworkPublicIpExists(d *schema.ResourceData, meta interface{}) (bool, error) { + sess := meta.(ClientSession).SoftLayerSession() + service := services.GetNetworkSubnetIpAddressGlobalService(sess) + + globalIpId, err := strconv.Atoi(d.Id()) + if err != nil { + return false, fmt.Errorf("Not a valid ID, must be an integer: %s", err) + } + + result, err := service.Id(globalIpId).GetObject() + if err != nil { + if apiErr, ok := err.(sl.Error); ok && apiErr.StatusCode == 404 { + return false, nil + } + return false, fmt.Errorf("Error retrieving global ip: %s", err) + } + return result.Id != nil && *result.Id == globalIpId, nil +} + +func findGlobalIpByOrderId(sess *session.Session, orderId int) (datatypes.Network_Subnet_IpAddress_Global, error) { + stateConf := &resource.StateChangeConf{ + Pending: []string{"pending"}, + Target: []string{"complete"}, + Refresh: func() (interface{}, string, error) { + globalIps, err := services.GetAccountService(sess). + Filter(filter.Path("globalIpRecords.billingItem.orderItem.order.id"). + Eq(strconv.Itoa(orderId)).Build()). + Mask("id,ipAddress[ipAddress]"). + GetGlobalIpRecords() + if err != nil { + return datatypes.Network_Subnet_IpAddress_Global{}, "", err + } + + if len(globalIps) == 1 && globalIps[0].IpAddress != nil { + return globalIps[0], "complete", nil + } else if len(globalIps) == 0 || len(globalIps) == 1 { + return nil, "pending", nil + } else { + return nil, "", fmt.Errorf("Expected one global ip: %s", err) + } + }, + Timeout: 10 * time.Minute, + Delay: 5 * time.Second, + MinTimeout: 3 * time.Second, + } + + pendingResult, err := stateConf.WaitForState() + + if err != nil { + return datatypes.Network_Subnet_IpAddress_Global{}, err + } + + if result, ok := pendingResult.(datatypes.Network_Subnet_IpAddress_Global); ok { + return result, nil + } + + return datatypes.Network_Subnet_IpAddress_Global{}, + fmt.Errorf("Cannot find global ip with order id '%d'", orderId) +} + +func buildGlobalIpProductOrderContainer(d *schema.ResourceData, sess *session.Session, packageType string) ( + *datatypes.Container_Product_Order_Network_Subnet, error) { + + // 1. Get a package + pkg, err := product.GetPackageByType(sess, packageType) + if err != nil { + return &datatypes.Container_Product_Order_Network_Subnet{}, err + } + + // 2. Get all prices for the package + productItems, err := product.GetPackageProducts(sess, *pkg.Id) + if err != nil { + return &datatypes.Container_Product_Order_Network_Subnet{}, err + } + + // 3. Find global ip prices + // the following looks for only IPV4 Global Ips only + globalIpKeyname := "GLOBAL_IPV4" + if strings.Contains(d.Get("routes_to").(string), ":") { + globalIpKeyname = "GLOBAL_IPV6" + } + + // 4. Select items with a matching keyname + globalIpItems := []datatypes.Product_Item{} + for _, item := range productItems { + if *item.KeyName == globalIpKeyname { + globalIpItems = append(globalIpItems, item) + } + } + + if len(globalIpItems) == 0 { + return &datatypes.Container_Product_Order_Network_Subnet{}, + fmt.Errorf("No product items matching %s could be found", globalIpKeyname) + } + + productOrderContainer := datatypes.Container_Product_Order_Network_Subnet{ + Container_Product_Order: datatypes.Container_Product_Order{ + PackageId: pkg.Id, + Prices: []datatypes.Product_Item_Price{ + { + Id: globalIpItems[0].Prices[0].Id, + }, + }, + Quantity: sl.Int(1), + }, + } + + return &productOrderContainer, nil +} diff --git a/ibm/resource_ibm_network_public_ip_test.go b/ibm/resource_ibm_network_public_ip_test.go new file mode 100644 index 0000000000..f964c8ed7e --- /dev/null +++ b/ibm/resource_ibm_network_public_ip_test.go @@ -0,0 +1,255 @@ +package ibm + +import ( + "fmt" + "strconv" + "testing" + + "github.com/hashicorp/terraform/helper/acctest" + "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/terraform" + "github.com/softlayer/softlayer-go/services" + "regexp" +) + +func TestAccIBMNetworkPublicIp_Basic(t *testing.T) { + hostname1 := acctest.RandString(16) + hostname2 := acctest.RandString(16) + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccCheckIBMNetworkPublicIpConfig_basic(hostname1, hostname2), + Check: resource.ComposeTestCheckFunc( + testAccCheckIBMNetworkPublicIpExists("ibm_network_public_ip.test-global-ip"), + resource.TestMatchResourceAttr("ibm_network_public_ip.test-global-ip", "ip_address", + regexp.MustCompile(`^(([01]?[0-9]?[0-9]|2([0-4][0-9]|5[0-5]))\.){3}([01]?[0-9]?[0-9]|2([0-4][0-9]|5[0-5]))$`)), + testAccCheckIBMResources("ibm_network_public_ip.test-global-ip", "routes_to", + "ibm_compute_vm_instance.vm1", "ipv4_address"), + ), + }, + + resource.TestStep{ + Config: testAccCheckIBMNetworkPublicIpConfig_updated(hostname1, hostname2), + Check: resource.ComposeTestCheckFunc( + testAccCheckIBMResources("ibm_network_public_ip.test-global-ip", "routes_to", + "ibm_compute_vm_instance.vm2", "ipv4_address"), + ), + }, + + resource.TestStep{ + Config: testAccCheckIBMNetworkPublicIpConfig_Ipv6Basic, + Check: resource.ComposeTestCheckFunc( + testAccCheckIBMNetworkPublicIpExists("ibm_network_public_ip.test-global-ip-3"), + resource.TestMatchResourceAttr("ibm_network_public_ip.test-global-ip-3", "ip_address", + regexp.MustCompile(`^(([[:xdigit:]]{4}:){7})([[:xdigit:]]{4})$`)), + testAccCheckIBMResources("ibm_network_public_ip.test-global-ip-3", "routes_to", + "ibm_compute_vm_instance.vm3", "ipv6_address"), + ), + }, + + resource.TestStep{ + Config: testAccCheckIBMNetworkPublicIpConfig_Ipv6Updated, + Check: resource.ComposeTestCheckFunc( + testAccCheckIBMResources("ibm_network_public_ip.test-global-ip-3", "routes_to", + "ibm_compute_vm_instance.vm4", "ipv6_address"), + ), + }, + }, + }) +} + +func testAccCheckIBMNetworkPublicIpExists(n string) resource.TestCheckFunc { + return func(s *terraform.State) error { + rs, ok := s.RootModule().Resources[n] + + if !ok { + return fmt.Errorf("Not found: %s", n) + } + + if rs.Primary.ID == "" { + return fmt.Errorf("No Record ID is set") + } + + globalIpId, _ := strconv.Atoi(rs.Primary.ID) + + service := services.GetNetworkSubnetIpAddressGlobalService(testAccProvider.Meta().(ClientSession).SoftLayerSession()) + foundGlobalIp, err := service.Id(globalIpId).GetObject() + + if err != nil { + return err + } + + if strconv.Itoa(int(*foundGlobalIp.Id)) != rs.Primary.ID { + return fmt.Errorf("Record not found") + } + + return nil + } +} + +func testAccCheckIBMResources(srcResource, srcKey, tgtResource, tgtKey string) resource.TestCheckFunc { + return func(s *terraform.State) error { + sourceResource, ok := s.RootModule().Resources[srcResource] + if !ok { + return fmt.Errorf("Not found: %s", srcResource) + } + + targetResource, ok := s.RootModule().Resources[tgtResource] + if !ok { + return fmt.Errorf("Not found: %s", tgtResource) + } + + if sourceResource.Primary.Attributes[srcKey] != targetResource.Primary.Attributes[tgtKey] { + return fmt.Errorf("Different values : Source : %s %s %s , Target : %s %s %s", + srcResource, srcKey, sourceResource.Primary.Attributes[srcKey], + tgtResource, tgtKey, targetResource.Primary.Attributes[tgtKey]) + } + + return nil + } +} + +func testAccCheckIBMNetworkPublicIpConfig_basic(hostname1 string, hostname2 string) string { + return fmt.Sprintf(` +resource "ibm_compute_vm_instance" "vm1" { + hostname = "%s" + domain = "terraformuat.ibm.com" + os_reference_code = "DEBIAN_7_64" + datacenter = "dal06" + network_speed = 100 + hourly_billing = true + private_network_only = false + cores = 1 + memory = 1024 + disks = [25] + local_disk = false +} + +resource "ibm_compute_vm_instance" "vm2" { + hostname = "%s" + domain = "terraformuat.ibm.com" + os_reference_code = "DEBIAN_7_64" + datacenter = "tor01" + network_speed = 100 + hourly_billing = true + private_network_only = false + cores = 1 + memory = 1024 + disks = [25] + local_disk = false +} + +resource "ibm_network_public_ip" "test-global-ip" { + routes_to = "${ibm_compute_vm_instance.vm1.ipv4_address}" +} +`, hostname1, hostname2) +} + +func testAccCheckIBMNetworkPublicIpConfig_updated(hostname1 string, hostname2 string) string { + return fmt.Sprintf(` +resource "ibm_compute_vm_instance" "vm1" { + hostname = "%s" + domain = "terraformuat.ibm.com" + os_reference_code = "DEBIAN_7_64" + datacenter = "dal06" + network_speed = 100 + hourly_billing = true + private_network_only = false + cores = 1 + memory = 1024 + disks = [25] + local_disk = false +} + +resource "ibm_compute_vm_instance" "vm2" { + hostname = "%s" + domain = "terraformuat.ibm.com" + os_reference_code = "DEBIAN_7_64" + datacenter = "tor01" + network_speed = 100 + hourly_billing = true + private_network_only = false + cores = 1 + memory = 1024 + disks = [25] + local_disk = false +} + +resource "ibm_network_public_ip" "test-global-ip" { + routes_to = "${ibm_compute_vm_instance.vm2.ipv4_address}" +} +`, hostname1, hostname2) +} + +const testAccCheckIBMNetworkPublicIpConfig_Ipv6Basic = ` +resource "ibm_compute_vm_instance" "vm3" { + hostname = "vm3" + domain = "terraformuat.ibm.com" + os_reference_code = "DEBIAN_7_64" + datacenter = "che01" + network_speed = 100 + hourly_billing = true + private_network_only = false + cores = 1 + memory = 1024 + disks = [25] + local_disk = false + ipv6_enabled = true +} + +resource "ibm_compute_vm_instance" "vm4" { + hostname = "vm4" + domain = "terraformuat.ibm.com" + os_reference_code = "DEBIAN_7_64" + datacenter = "che01" + network_speed = 100 + hourly_billing = true + private_network_only = false + cores = 1 + memory = 1024 + disks = [25] + local_disk = false + ipv6_enabled = true +} + +resource "ibm_network_public_ip" "test-global-ip-3" { + routes_to = "${ibm_compute_vm_instance.vm3.ipv6_address}" +}` + +const testAccCheckIBMNetworkPublicIpConfig_Ipv6Updated = ` +resource "ibm_compute_vm_instance" "vm3" { + hostname = "vm3" + domain = "terraformuat.ibm.com" + os_reference_code = "DEBIAN_7_64" + datacenter = "che01" + network_speed = 100 + hourly_billing = true + private_network_only = false + cores = 1 + memory = 1024 + disks = [25] + local_disk = false + ipv6_enabled = true +} + +resource "ibm_compute_vm_instance" "vm4" { + hostname = "vm4" + domain = "terraformuat.ibm.com" + os_reference_code = "DEBIAN_7_64" + datacenter = "che01" + network_speed = 100 + hourly_billing = true + private_network_only = false + cores = 1 + memory = 1024 + disks = [25] + local_disk = false + ipv6_enabled = true +} + +resource "ibm_network_public_ip" "test-global-ip-3" { + routes_to = "${ibm_compute_vm_instance.vm4.ipv6_address}" +}` diff --git a/ibm/resource_ibm_network_vlan.go b/ibm/resource_ibm_network_vlan.go new file mode 100644 index 0000000000..010230f504 --- /dev/null +++ b/ibm/resource_ibm_network_vlan.go @@ -0,0 +1,409 @@ +package ibm + +import ( + "errors" + "fmt" + "log" + "strconv" + "strings" + "time" + + "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/helper/schema" + "github.com/softlayer/softlayer-go/datatypes" + "github.com/softlayer/softlayer-go/filter" + "github.com/softlayer/softlayer-go/helpers/hardware" + "github.com/softlayer/softlayer-go/helpers/location" + "github.com/softlayer/softlayer-go/helpers/product" + "github.com/softlayer/softlayer-go/services" + "github.com/softlayer/softlayer-go/session" + "github.com/softlayer/softlayer-go/sl" +) + +const ( + AdditionalServicesPackageType = "ADDITIONAL_SERVICES" + AdditionalServicesNetworkVlanPackageType = "ADDITIONAL_SERVICES_NETWORK_VLAN" + + VlanMask = "id,name,primaryRouter[datacenter[name]],primaryRouter[hostname],vlanNumber," + + "billingItem[recurringFee],guestNetworkComponentCount,subnets[networkIdentifier,cidr,subnetType]" +) + +func resourceIBMNetworkVlan() *schema.Resource { + return &schema.Resource{ + Create: resourceIBMNetworkVlanCreate, + Read: resourceIBMNetworkVlanRead, + Update: resourceIBMNetworkVlanUpdate, + Delete: resourceIBMNetworkVlanDelete, + Exists: resourceIBMNetworkVlanExists, + Importer: &schema.ResourceImporter{}, + + Schema: map[string]*schema.Schema{ + "id": { + Type: schema.TypeInt, + Computed: true, + }, + "datacenter": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + "type": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: func(v interface{}, k string) (ws []string, errs []error) { + vlanType := v.(string) + if vlanType != "PRIVATE" && vlanType != "PUBLIC" { + errs = append(errs, errors.New( + "vlan type should be either 'PRIVATE' or 'PUBLIC'")) + } + return + }, + }, + "subnet_size": { + Type: schema.TypeInt, + Required: true, + ForceNew: true, + }, + + "name": { + Type: schema.TypeString, + Optional: true, + }, + + "router_hostname": { + Type: schema.TypeString, + Computed: true, + Optional: true, + ForceNew: true, + }, + + "vlan_number": { + Type: schema.TypeInt, + Computed: true, + }, + "softlayer_managed": { + Type: schema.TypeBool, + Computed: true, + }, + "child_resource_count": { + Type: schema.TypeInt, + Computed: true, + }, + "subnets": { + Type: schema.TypeSet, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "subnet": { + Type: schema.TypeString, + Required: true, + }, + "subnet_type": { + Type: schema.TypeString, + Required: true, + }, + }, + }, + }, + }, + } +} + +func resourceIBMNetworkVlanCreate(d *schema.ResourceData, meta interface{}) error { + sess := meta.(ClientSession).SoftLayerSession() + router := d.Get("router_hostname").(string) + name := d.Get("name").(string) + + vlanType := d.Get("type").(string) + if (vlanType == "PRIVATE" && len(router) > 0 && strings.Contains(router, "fcr")) || + (vlanType == "PUBLIC" && len(router) > 0 && strings.Contains(router, "bcr")) { + return fmt.Errorf("Error creating vlan: mismatch between vlan_type '%s' and router_hostname '%s'", vlanType, router) + } + + // Find price items with AdditionalServicesNetworkVlan + productOrderContainer, err := buildVlanProductOrderContainer(d, sess, AdditionalServicesNetworkVlanPackageType) + if err != nil { + // Find price items with AdditionalServices + productOrderContainer, err = buildVlanProductOrderContainer(d, sess, AdditionalServicesPackageType) + if err != nil { + return fmt.Errorf("Error creating vlan: %s", err) + } + } + + log.Println("[INFO] Creating vlan") + + receipt, err := services.GetProductOrderService(sess). + PlaceOrder(productOrderContainer, sl.Bool(false)) + if err != nil { + return fmt.Errorf("Error during creation of vlan: %s", err) + } + + vlan, err := findVlanByOrderId(sess, *receipt.OrderId) + + if len(name) > 0 { + _, err = services.GetNetworkVlanService(sess). + Id(*vlan.Id).EditObject(&datatypes.Network_Vlan{Name: sl.String(name)}) + if err != nil { + return fmt.Errorf("Error updating vlan: %s", err) + } + } + + d.SetId(fmt.Sprintf("%d", *vlan.Id)) + return resourceIBMNetworkVlanRead(d, meta) +} + +func resourceIBMNetworkVlanRead(d *schema.ResourceData, meta interface{}) error { + sess := meta.(ClientSession).SoftLayerSession() + service := services.GetNetworkVlanService(sess) + + vlanId, err := strconv.Atoi(d.Id()) + if err != nil { + return fmt.Errorf("Not a valid vlan ID, must be an integer: %s", err) + } + + vlan, err := service.Id(vlanId).Mask(VlanMask).GetObject() + + if err != nil { + return fmt.Errorf("Error retrieving vlan: %s", err) + } + + d.Set("id", *vlan.Id) + d.Set("vlan_number", *vlan.VlanNumber) + d.Set("child_resource_count", *vlan.GuestNetworkComponentCount) + d.Set("name", sl.Get(vlan.Name, "")) + + if vlan.PrimaryRouter != nil { + d.Set("router_hostname", *vlan.PrimaryRouter.Hostname) + if strings.HasPrefix(*vlan.PrimaryRouter.Hostname, "fcr") { + d.Set("type", "PUBLIC") + } else { + d.Set("type", "PRIVATE") + } + if vlan.PrimaryRouter.Datacenter != nil { + d.Set("datacenter", *vlan.PrimaryRouter.Datacenter.Name) + } + } + + d.Set("softlayer_managed", vlan.BillingItem == nil) + + // Subnets + subnets := make([]map[string]interface{}, 0) + + for _, elem := range vlan.Subnets { + subnet := make(map[string]interface{}) + subnet["subnet"] = fmt.Sprintf("%s/%s", *elem.NetworkIdentifier, strconv.Itoa(*elem.Cidr)) + subnet["subnet_type"] = *elem.SubnetType + subnets = append(subnets, subnet) + } + d.Set("subnets", subnets) + + if vlan.Subnets != nil && len(vlan.Subnets) > 0 { + d.Set("subnet_size", 1<<(uint)(32-*vlan.Subnets[0].Cidr)) + } else { + d.Set("subnet_size", 0) + } + + return nil +} + +func resourceIBMNetworkVlanUpdate(d *schema.ResourceData, meta interface{}) error { + sess := meta.(ClientSession).SoftLayerSession() + service := services.GetNetworkVlanService(sess) + + vlanId, err := strconv.Atoi(d.Id()) + if err != nil { + return fmt.Errorf("Not a valid vlan ID, must be an integer: %s", err) + } + + opts := datatypes.Network_Vlan{} + + if d.HasChange("name") { + opts.Name = sl.String(d.Get("name").(string)) + } + + _, err = service.Id(vlanId).EditObject(&opts) + + if err != nil { + return fmt.Errorf("Error updating vlan: %s", err) + } + return resourceIBMNetworkVlanRead(d, meta) +} + +func resourceIBMNetworkVlanDelete(d *schema.ResourceData, meta interface{}) error { + sess := meta.(ClientSession).SoftLayerSession() + service := services.GetNetworkVlanService(sess) + + vlanId, err := strconv.Atoi(d.Id()) + if err != nil { + return fmt.Errorf("Not a valid vlan ID, must be an integer: %s", err) + } + + billingItem, err := service.Id(vlanId).GetBillingItem() + if err != nil { + return fmt.Errorf("Error deleting vlan: %s", err) + } + + // VLANs which don't have billing items are managed by SoftLayer. They can't be deleted by + // users. If a target VLAN doesn't have a billing item, the function will return nil without + // errors and only VLAN resource information in a terraform state file will be deleted. + // Physical VLAN will be deleted automatically which the VLAN doesn't have any child resources. + if billingItem.Id == nil { + return nil + } + + // If the VLAN has a billing item, the function deletes the billing item and returns so that + // the VLAN resource in a terraform state file can be deleted. Physical VLAN will be deleted + // automatically which the VLAN doesn't have any child resources. + _, err = services.GetBillingItemService(sess).Id(*billingItem.Id).CancelService() + + return err +} + +func resourceIBMNetworkVlanExists(d *schema.ResourceData, meta interface{}) (bool, error) { + sess := meta.(ClientSession).SoftLayerSession() + service := services.GetNetworkVlanService(sess) + + vlanID, err := strconv.Atoi(d.Id()) + if err != nil { + return false, fmt.Errorf("Not a valid vlan ID, must be an integer: %s", err) + } + + result, err := service.Id(vlanID).Mask("id").GetObject() + if err != nil { + if apiErr, ok := err.(sl.Error); ok { + if apiErr.StatusCode == 404 { + return false, nil + } + } + return false, fmt.Errorf("Error communicating with the API: %s", err) + } + return result.Id != nil && *result.Id == vlanID, nil +} + +func findVlanByOrderId(sess *session.Session, orderId int) (datatypes.Network_Vlan, error) { + stateConf := &resource.StateChangeConf{ + Pending: []string{"pending"}, + Target: []string{"complete"}, + Refresh: func() (interface{}, string, error) { + vlans, err := services.GetAccountService(sess). + Filter(filter.Path("networkVlans.billingItem.orderItem.order.id"). + Eq(strconv.Itoa(orderId)).Build()). + Mask("id"). + GetNetworkVlans() + if err != nil { + return datatypes.Network_Vlan{}, "", err + } + + if len(vlans) == 1 { + return vlans[0], "complete", nil + } else if len(vlans) == 0 { + return nil, "pending", nil + } else { + return nil, "", fmt.Errorf("Expected one vlan: %s", err) + } + }, + Timeout: 10 * time.Minute, + Delay: 5 * time.Second, + MinTimeout: 3 * time.Second, + } + + pendingResult, err := stateConf.WaitForState() + + if err != nil { + return datatypes.Network_Vlan{}, err + } + + var result, ok = pendingResult.(datatypes.Network_Vlan) + + if ok { + return result, nil + } + + return datatypes.Network_Vlan{}, + fmt.Errorf("Cannot find vlan with order id '%d'", orderId) +} + +func buildVlanProductOrderContainer(d *schema.ResourceData, sess *session.Session, packageType string) ( + *datatypes.Container_Product_Order_Network_Vlan, error) { + var rt datatypes.Hardware + router := d.Get("router_hostname").(string) + + vlanType := d.Get("type").(string) + datacenter := d.Get("datacenter").(string) + + if datacenter == "" { + return &datatypes.Container_Product_Order_Network_Vlan{}, + errors.New("datacenter name is empty.") + } + + dc, err := location.GetDatacenterByName(sess, datacenter, "id") + if err != nil { + return &datatypes.Container_Product_Order_Network_Vlan{}, err + } + + // 1. Get a package + pkg, err := product.GetPackageByType(sess, packageType) + if err != nil { + return &datatypes.Container_Product_Order_Network_Vlan{}, err + } + + // 2. Get all prices for the package + productItems, err := product.GetPackageProducts(sess, *pkg.Id) + if err != nil { + return &datatypes.Container_Product_Order_Network_Vlan{}, err + } + + // 3. Find vlan and subnet prices + vlanKeyname := vlanType + "_NETWORK_VLAN" + subnetKeyname := strconv.Itoa(d.Get("subnet_size").(int)) + "_STATIC_PUBLIC_IP_ADDRESSES" + + // 4. Select items with a matching keyname + vlanItems := []datatypes.Product_Item{} + subnetItems := []datatypes.Product_Item{} + for _, item := range productItems { + if *item.KeyName == vlanKeyname { + vlanItems = append(vlanItems, item) + } + if strings.Contains(*item.KeyName, subnetKeyname) { + subnetItems = append(subnetItems, item) + } + } + + if len(vlanItems) == 0 { + return &datatypes.Container_Product_Order_Network_Vlan{}, + fmt.Errorf("No product items matching %s could be found", vlanKeyname) + } + + if len(subnetItems) == 0 { + return &datatypes.Container_Product_Order_Network_Vlan{}, + fmt.Errorf("No product items matching %s could be found", subnetKeyname) + } + + productOrderContainer := datatypes.Container_Product_Order_Network_Vlan{ + Container_Product_Order: datatypes.Container_Product_Order{ + PackageId: pkg.Id, + Location: sl.String(strconv.Itoa(*dc.Id)), + Prices: []datatypes.Product_Item_Price{ + { + Id: vlanItems[0].Prices[0].Id, + }, + { + Id: subnetItems[0].Prices[0].Id, + }, + }, + Quantity: sl.Int(1), + }, + } + + if len(router) > 0 { + rt, err = hardware.GetRouterByName(sess, router, "id") + productOrderContainer.RouterId = rt.Id + if err != nil { + return &datatypes.Container_Product_Order_Network_Vlan{}, + fmt.Errorf("Error creating vlan: %s", err) + } + } + + return &productOrderContainer, nil +} diff --git a/ibm/resource_ibm_network_vlan_test.go b/ibm/resource_ibm_network_vlan_test.go new file mode 100644 index 0000000000..4e49ec21a9 --- /dev/null +++ b/ibm/resource_ibm_network_vlan_test.go @@ -0,0 +1,66 @@ +/* +* Licensed Materials - Property of IBM +* (C) Copyright IBM Corp. 2017. All Rights Reserved. +* US Government Users Restricted Rights - Use, duplication or +* disclosure restricted by GSA ADP Schedule Contract with IBM Corp. + */ + +package ibm + +import ( + "github.com/hashicorp/terraform/helper/resource" + "testing" +) + +func TestAccIBMNetworkVlan_Basic(t *testing.T) { + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccCheckIBMNetworkVlanConfig_basic, + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttr( + "ibm_network_vlan.test_vlan", "name", "test_vlan"), + resource.TestCheckResourceAttr( + "ibm_network_vlan.test_vlan", "datacenter", "lon02"), + resource.TestCheckResourceAttr( + "ibm_network_vlan.test_vlan", "type", "PUBLIC"), + resource.TestCheckResourceAttr( + "ibm_network_vlan.test_vlan", "softlayer_managed", "false"), + resource.TestCheckResourceAttr( + "ibm_network_vlan.test_vlan", "router_hostname", "fcr01a.lon02"), + resource.TestCheckResourceAttr( + "ibm_network_vlan.test_vlan", "subnet_size", "8"), + ), + }, + + resource.TestStep{ + Config: testAccCheckIBMNetworkVlanConfig_name_update, + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttr( + "ibm_network_vlan.test_vlan", "name", "test_vlan_update"), + ), + }, + }, + }) +} + +const testAccCheckIBMNetworkVlanConfig_basic = ` +resource "ibm_network_vlan" "test_vlan" { + name = "test_vlan" + datacenter = "lon02" + type = "PUBLIC" + subnet_size = 8 + router_hostname = "fcr01a.lon02" +}` + +const testAccCheckIBMNetworkVlanConfig_name_update = ` +resource "ibm_network_vlan" "test_vlan" { + name = "test_vlan_update" + datacenter = "lon02" + type = "PUBLIC" + subnet_size = 8 + router_hostname = "fcr01a.lon02" +}` diff --git a/ibm/resource_ibm_object_storage_account.go b/ibm/resource_ibm_object_storage_account.go new file mode 100644 index 0000000000..1e05e95df3 --- /dev/null +++ b/ibm/resource_ibm_object_storage_account.go @@ -0,0 +1,170 @@ +package ibm + +import ( + "fmt" + "log" + "strings" + + "time" + + "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/helper/schema" + "github.com/softlayer/softlayer-go/datatypes" + "github.com/softlayer/softlayer-go/filter" + "github.com/softlayer/softlayer-go/helpers/order" + "github.com/softlayer/softlayer-go/services" + "github.com/softlayer/softlayer-go/sl" +) + +func resourceIBMObjectStorageAccount() *schema.Resource { + return &schema.Resource{ + Create: resourceIBMObjectStorageAccountCreate, + Read: resourceIBMObjectStorageAccountRead, + Update: resourceIBMObjectStorageAccountUpdate, + Delete: resourceIBMObjectStorageAccountDelete, + Exists: resourceIBMObjectStorageAccountExists, + Importer: &schema.ResourceImporter{}, + + Schema: map[string]*schema.Schema{ + "name": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + "local_note": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + }, + }, + } +} + +func resourceIBMObjectStorageAccountCreate(d *schema.ResourceData, meta interface{}) error { + sess := meta.(ClientSession).SoftLayerSession() + accountService := services.GetAccountService(sess) + + // Check if an object storage account exists + objectStorageAccounts, err := accountService.GetHubNetworkStorage() + if err != nil { + return fmt.Errorf("resource_ibm_object_storage_account: Error on create: %s", err) + } + + if len(objectStorageAccounts) == 0 { + // Order the account + productOrderService := services.GetProductOrderService(sess) + + receipt, err := productOrderService.PlaceOrder(&datatypes.Container_Product_Order{ + Quantity: sl.Int(1), + PackageId: sl.Int(0), + Prices: []datatypes.Product_Item_Price{ + {Id: sl.Int(30920)}, + }, + }, sl.Bool(false)) + if err != nil { + return fmt.Errorf( + "resource_ibm_object_storage_account: Error ordering account: %s", err) + } + + // Wait for the object storage account order to complete. + billingOrderItem, err := WaitForOrderCompletion(&receipt, meta) + if err != nil { + return fmt.Errorf( + "Error waiting for object storage account order (%d) to complete: %s", receipt.OrderId, err) + } + + // Get accountName using filter on hub network storage + objectStorageAccounts, err = accountService.Filter( + filter.Path("billingItem.id").Eq(billingOrderItem.BillingItem.Id).Build(), + ).GetNetworkStorage() + if err != nil { + return fmt.Errorf("resource_ibm_object_storage_account: Error on retrieving new: %s", err) + } + + if len(objectStorageAccounts) == 0 { + return fmt.Errorf("resource_ibm_object_storage_account: Failed to create object storage account.") + } + } + + // Get account name and set as the Id + d.SetId(*objectStorageAccounts[0].Username) + d.Set("name", *objectStorageAccounts[0].Username) + + return nil +} + +func WaitForOrderCompletion( + receipt *datatypes.Container_Product_Order_Receipt, meta interface{}) (datatypes.Billing_Order_Item, error) { + + log.Printf("Waiting for billing order %d to have zero active transactions", receipt.OrderId) + var billingOrderItem *datatypes.Billing_Order_Item + + stateConf := &resource.StateChangeConf{ + Pending: []string{"", "in progress"}, + Target: []string{"complete"}, + Refresh: func() (interface{}, string, error) { + var err error + var completed bool + + sess := meta.(ClientSession).SoftLayerSession() + completed, billingOrderItem, err = order.CheckBillingOrderComplete(sess, receipt) + if err != nil { + return nil, "", err + } + + if completed { + return billingOrderItem, "complete", nil + } else { + return billingOrderItem, "in progress", nil + } + }, + Timeout: 10 * time.Minute, + Delay: 10 * time.Second, + MinTimeout: 10 * time.Second, + } + + _, err := stateConf.WaitForState() + return *billingOrderItem, err +} + +func resourceIBMObjectStorageAccountRead(d *schema.ResourceData, meta interface{}) error { + sess := meta.(ClientSession).SoftLayerSession() + accountService := services.GetAccountService(sess) + accountName := d.Id() + d.Set("name", accountName) + + // Check if an object storage account exists + objectStorageAccounts, err := accountService.Filter( + filter.Path("username").Eq(accountName).Build(), + ).GetHubNetworkStorage() + if err != nil { + return fmt.Errorf("resource_ibm_object_storage_account: Error on Read: %s", err) + } + + for _, objectStorageAccount := range objectStorageAccounts { + if *objectStorageAccount.Username == accountName { + return nil + } + } + + return fmt.Errorf("resource_ibm_object_storage_account: Could not find account %s", accountName) +} + +func resourceIBMObjectStorageAccountUpdate(d *schema.ResourceData, meta interface{}) error { + // Nothing to update for now. Not supported. + return nil +} + +func resourceIBMObjectStorageAccountDelete(d *schema.ResourceData, meta interface{}) error { + // Delete is not supported for now. + return nil +} + +func resourceIBMObjectStorageAccountExists(d *schema.ResourceData, meta interface{}) (bool, error) { + err := resourceIBMObjectStorageAccountRead(d, meta) + if err != nil { + if strings.Contains(err.Error(), "Could not find account") { + return false, nil + } + return false, err + } + return true, nil +} diff --git a/ibm/resource_ibm_object_storage_account_test.go b/ibm/resource_ibm_object_storage_account_test.go new file mode 100644 index 0000000000..cab964c77c --- /dev/null +++ b/ibm/resource_ibm_object_storage_account_test.go @@ -0,0 +1,65 @@ +package ibm + +import ( + "fmt" + "testing" + + "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/terraform" +) + +func TestAccIBMObjectStorageAccount_Basic(t *testing.T) { + var accountName string + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckIBMObjectStorageAccountDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccCheckIBMObjectStorageAccountConfig_basic, + Check: resource.ComposeTestCheckFunc( + testAccCheckIBMObjectStorageAccountExists("ibm_object_storage_account.testacc_foobar", &accountName), + testAccCheckIBMObjectStorageAccountAttributes(&accountName), + ), + }, + }, + }) +} + +func testAccCheckIBMObjectStorageAccountDestroy(s *terraform.State) error { + return nil +} + +func testAccCheckIBMObjectStorageAccountExists(n string, accountName *string) resource.TestCheckFunc { + return func(s *terraform.State) error { + rs, ok := s.RootModule().Resources[n] + + if !ok { + return fmt.Errorf("Not found: %s", n) + } + + if rs.Primary.ID == "" { + return fmt.Errorf("No Record ID is set") + } + + *accountName = rs.Primary.ID + + return nil + } +} + +func testAccCheckIBMObjectStorageAccountAttributes(accountName *string) resource.TestCheckFunc { + return func(s *terraform.State) error { + + if *accountName == "" { + return fmt.Errorf("No object storage account name") + } + + return nil + } +} + +var testAccCheckIBMObjectStorageAccountConfig_basic = ` +resource "ibm_object_storage_account" "testacc_foobar" { +}` diff --git a/ibm/resource_ibm_service_instance.go b/ibm/resource_ibm_service_instance.go new file mode 100644 index 0000000000..466a075d2f --- /dev/null +++ b/ibm/resource_ibm_service_instance.go @@ -0,0 +1,250 @@ +package ibm + +import ( + "fmt" + + "github.com/IBM-Bluemix/bluemix-go/api/mccp/mccpv2" + "github.com/IBM-Bluemix/bluemix-go/bmxerror" + "github.com/IBM-Bluemix/bluemix-go/helpers" + "github.com/hashicorp/terraform/helper/schema" +) + +func resourceIBMServiceInstance() *schema.Resource { + return &schema.Resource{ + Create: resourceIBMServiceInstanceCreate, + Read: resourceIBMServiceInstanceRead, + Update: resourceIBMServiceInstanceUpdate, + Delete: resourceIBMServiceInstanceDelete, + Exists: resourceIBMServiceInstanceExists, + Importer: &schema.ResourceImporter{}, + + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Required: true, + Description: "A name for the service instance", + }, + + "space_guid": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: "The guid of the space in which the instance will be created", + }, + + "service": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: "The name of the service offering like cleardb, cloudantNOSQLDb etc", + }, + + "credentials": { + Description: "Credentials asociated with the key", + Computed: true, + Type: schema.TypeMap, + }, + + "service_plan_guid": { + Description: "The uniquie identifier of the service offering plan type", + Computed: true, + Type: schema.TypeString, + }, + + "parameters": { + Type: schema.TypeMap, + Optional: true, + Description: "Arbitrary parameters to pass along to the service broker. Must be a JSON object", + }, + + "plan": { + Type: schema.TypeString, + Required: true, + Description: "The plan type of the service", + }, + + "tags": { + Type: schema.TypeSet, + Optional: true, + Elem: &schema.Schema{Type: schema.TypeString}, + Set: schema.HashString, + }, + }, + } +} + +func resourceIBMServiceInstanceCreate(d *schema.ResourceData, meta interface{}) error { + cfClient, err := meta.(ClientSession).MccpAPI() + if err != nil { + return err + } + serviceName := d.Get("service").(string) + plan := d.Get("plan").(string) + name := d.Get("name").(string) + spaceGUID := d.Get("space_guid").(string) + + svcInst := mccpv2.ServiceInstanceCreateRequest{ + Name: name, + SpaceGUID: spaceGUID, + } + + serviceOff, err := cfClient.ServiceOfferings().FindByLabel(serviceName) + if err != nil { + return fmt.Errorf("Error retrieving service offering: %s", err) + } + + servicePlan, err := cfClient.ServicePlans().FindPlanInServiceOffering(serviceOff.GUID, plan) + if err != nil { + return fmt.Errorf("Error retrieving plan: %s", err) + } + svcInst.PlanGUID = servicePlan.GUID + + if parameters, ok := d.GetOk("parameters"); ok { + svcInst.Params = parameters.(map[string]interface{}) + } + + if _, ok := d.GetOk("tags"); ok { + svcInst.Tags = getServiceTags(d) + } + + service, err := cfClient.ServiceInstances().Create(svcInst) + if err != nil { + return fmt.Errorf("Error creating service: %s", err) + } + + d.SetId(service.Metadata.GUID) + + return resourceIBMServiceInstanceRead(d, meta) +} + +func resourceIBMServiceInstanceRead(d *schema.ResourceData, meta interface{}) error { + cfClient, err := meta.(ClientSession).MccpAPI() + if err != nil { + return err + } + + serviceGUID := d.Id() + + service, err := cfClient.ServiceInstances().Get(serviceGUID) + if err != nil { + return fmt.Errorf("Error retrieving service: %s", err) + } + + servicePlanGUID := service.Entity.ServicePlanGUID + d.Set("service_plan_guid", servicePlanGUID) + d.Set("credentials", service.Entity.Credentials) + d.Set("tags", service.Entity.Tags) + d.Set("name", service.Entity.Name) + + p, err := cfClient.ServicePlans().Get(servicePlanGUID) + if err != nil { + return err + } + d.Set("plan", p.Entity.Name) + + svcOff, err := cfClient.ServiceOfferings().Get(p.Entity.ServiceGUID) + if err != nil { + return err + } + d.Set("service", svcOff.Entity.Label) + + return nil +} + +func resourceIBMServiceInstanceUpdate(d *schema.ResourceData, meta interface{}) error { + cfClient, err := meta.(ClientSession).MccpAPI() + if err != nil { + return err + } + + serviceGUID := d.Id() + + updateReq := mccpv2.ServiceInstanceUpdateRequest{} + if d.HasChange("name") { + updateReq.Name = helpers.String(d.Get("name").(string)) + } + + if d.HasChange("plan") { + plan := d.Get("plan").(string) + service := d.Get("service").(string) + serviceOff, err := cfClient.ServiceOfferings().FindByLabel(service) + if err != nil { + return fmt.Errorf("Error retrieving service offering: %s", err) + } + + servicePlan, err := cfClient.ServicePlans().FindPlanInServiceOffering(serviceOff.GUID, plan) + if err != nil { + return fmt.Errorf("Error retrieving plan: %s", err) + } + updateReq.PlanGUID = helpers.String(servicePlan.GUID) + + } + + if d.HasChange("parameters") { + updateReq.Params = d.Get("parameters").(map[string]interface{}) + } + + if d.HasChange("tags") { + tags := getServiceTags(d) + updateReq.Tags = &tags + } + + _, err = cfClient.ServiceInstances().Update(serviceGUID, updateReq) + if err != nil { + return fmt.Errorf("Error updating service: %s", err) + } + + return resourceIBMServiceInstanceRead(d, meta) +} + +func resourceIBMServiceInstanceDelete(d *schema.ResourceData, meta interface{}) error { + cfClient, err := meta.(ClientSession).MccpAPI() + if err != nil { + return err + } + id := d.Id() + + err = cfClient.ServiceInstances().Delete(id) + if err != nil { + return fmt.Errorf("Error deleting service: %s", err) + } + + d.SetId("") + + return nil +} +func resourceIBMServiceInstanceExists(d *schema.ResourceData, meta interface{}) (bool, error) { + cfClient, err := meta.(ClientSession).MccpAPI() + if err != nil { + return false, err + } + serviceGUID := d.Id() + + service, err := cfClient.ServiceInstances().Get(serviceGUID) + if err != nil { + if apiErr, ok := err.(bmxerror.RequestFailure); ok { + if apiErr.StatusCode() == 404 { + return false, nil + } + } + return false, fmt.Errorf("Error communicating with the API: %s", err) + } + + return service.Metadata.GUID == serviceGUID, nil +} + +func getServiceTags(d *schema.ResourceData) []string { + tagSet := d.Get("tags").(*schema.Set) + + if tagSet.Len() == 0 { + empty := []string{} + return empty + } + + tags := make([]string, 0, tagSet.Len()) + for _, elem := range tagSet.List() { + tag := elem.(string) + tags = append(tags, tag) + } + return tags +} diff --git a/ibm/resource_ibm_service_instance_test.go b/ibm/resource_ibm_service_instance_test.go new file mode 100644 index 0000000000..538177bd16 --- /dev/null +++ b/ibm/resource_ibm_service_instance_test.go @@ -0,0 +1,182 @@ +package ibm + +import ( + "fmt" + "testing" + + "strings" + + "github.com/hashicorp/terraform/helper/acctest" + "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/terraform" + + "github.com/IBM-Bluemix/bluemix-go/api/mccp/mccpv2" +) + +func TestAccIBMServiceInstance_Basic(t *testing.T) { + var conf mccpv2.ServiceInstanceFields + serviceName := fmt.Sprintf("terraform_%d", acctest.RandInt()) + updateName := fmt.Sprintf("terraform_%d", acctest.RandInt()) + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckIBMServiceInstanceDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccCheckIBMServiceInstance_basic(serviceName), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckIBMServiceInstanceExists("ibm_service_instance.service", &conf), + resource.TestCheckResourceAttr("ibm_service_instance.service", "name", serviceName), + resource.TestCheckResourceAttr("ibm_service_instance.service", "service", "cleardb"), + resource.TestCheckResourceAttr("ibm_service_instance.service", "plan", "cb5"), + resource.TestCheckResourceAttr("ibm_service_instance.service", "tags.#", "2"), + ), + }, + resource.TestStep{ + Config: testAccCheckIBMServiceInstance_updateWithSameName(serviceName), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckIBMServiceInstanceExists("ibm_service_instance.service", &conf), + resource.TestCheckResourceAttr("ibm_service_instance.service", "name", serviceName), + resource.TestCheckResourceAttr("ibm_service_instance.service", "service", "cleardb"), + resource.TestCheckResourceAttr("ibm_service_instance.service", "plan", "cb5"), + resource.TestCheckResourceAttr("ibm_service_instance.service", "tags.#", "3"), + ), + }, + resource.TestStep{ + Config: testAccCheckIBMServiceInstance_update(updateName), + Check: resource.ComposeAggregateTestCheckFunc( + resource.TestCheckResourceAttr("ibm_service_instance.service", "name", updateName), + resource.TestCheckResourceAttr("ibm_service_instance.service", "service", "cleardb"), + resource.TestCheckResourceAttr("ibm_service_instance.service", "plan", "cb5"), + resource.TestCheckResourceAttr("ibm_service_instance.service", "tags.#", "1"), + ), + }, + resource.TestStep{ + Config: testAccCheckIBMServiceInstance_newServiceType(updateName), + Check: resource.ComposeAggregateTestCheckFunc( + resource.TestCheckResourceAttr("ibm_service_instance.service", "name", updateName), + resource.TestCheckResourceAttr("ibm_service_instance.service", "service", "cloudantNoSQLDB"), + resource.TestCheckResourceAttr("ibm_service_instance.service", "plan", "Lite"), + resource.TestCheckResourceAttr("ibm_service_instance.service", "tags.#", "1"), + ), + }, + }, + }) +} + +func testAccCheckIBMServiceInstanceDestroy(s *terraform.State) error { + cfClient, err := testAccProvider.Meta().(ClientSession).MccpAPI() + if err != nil { + return err + } + for _, rs := range s.RootModule().Resources { + if rs.Type != "ibm_service_instance" { + continue + } + + serviceGuid := rs.Primary.ID + + // Try to find the key + _, err := cfClient.ServiceInstances().Get(serviceGuid) + + if err != nil && !strings.Contains(err.Error(), "404") { + return fmt.Errorf("Error waiting for CF service (%s) to be destroyed: %s", rs.Primary.ID, err) + } + } + + return nil +} + +func testAccCheckIBMServiceInstanceExists(n string, obj *mccpv2.ServiceInstanceFields) resource.TestCheckFunc { + + return func(s *terraform.State) error { + rs, ok := s.RootModule().Resources[n] + if !ok { + return fmt.Errorf("Not found: %s", n) + } + + cfClient, err := testAccProvider.Meta().(ClientSession).MccpAPI() + if err != nil { + return err + } + serviceGuid := rs.Primary.ID + + service, err := cfClient.ServiceInstances().Get(serviceGuid) + + if err != nil { + return err + } + + *obj = *service + return nil + } +} + +func testAccCheckIBMServiceInstance_basic(serviceName string) string { + return fmt.Sprintf(` + data "ibm_space" "spacedata" { + space = "%s" + org = "%s" + } + + resource "ibm_service_instance" "service" { + name = "%s" + space_guid = "${data.ibm_space.spacedata.id}" + service = "cleardb" + plan = "cb5" + tags = ["cluster-service","cluster-bind"] + } + `, cfSpace, cfOrganization, serviceName) +} + +func testAccCheckIBMServiceInstance_updateWithSameName(serviceName string) string { + return fmt.Sprintf(` + data "ibm_space" "spacedata" { + space = "%s" + org = "%s" + } + + resource "ibm_service_instance" "service" { + name = "%s" + space_guid = "${data.ibm_space.spacedata.id}" + service = "cleardb" + plan = "cb5" + tags = ["cluster-service","cluster-bind","db"] + } + `, cfSpace, cfOrganization, serviceName) +} + +func testAccCheckIBMServiceInstance_update(updateName string) string { + return fmt.Sprintf(` + data "ibm_space" "spacedata" { + space = "%s" + org = "%s" + } + + resource "ibm_service_instance" "service" { + name = "%s" + space_guid = "${data.ibm_space.spacedata.id}" + service = "cleardb" + plan = "cb5" + tags = ["cluster-service"] + } + `, cfSpace, cfOrganization, updateName) +} + +func testAccCheckIBMServiceInstance_newServiceType(updateName string) string { + return fmt.Sprintf(` + data "ibm_space" "spacedata" { + space = "%s" + org = "%s" + } + + resource "ibm_service_instance" "service" { + name = "%s" + space_guid = "${data.ibm_space.spacedata.id}" + service = "cloudantNoSQLDB" + plan = "Lite" + tags = ["cluster-service"] + } + `, cfSpace, cfOrganization, updateName) +} diff --git a/ibm/resource_ibm_service_key.go b/ibm/resource_ibm_service_key.go new file mode 100644 index 0000000000..e25fd80010 --- /dev/null +++ b/ibm/resource_ibm_service_key.go @@ -0,0 +1,124 @@ +package ibm + +import ( + "fmt" + + "github.com/IBM-Bluemix/bluemix-go/bmxerror" + "github.com/hashicorp/terraform/helper/schema" +) + +func resourceIBMServiceKey() *schema.Resource { + return &schema.Resource{ + Create: resourceIBMServiceKeyCreate, + Read: resourceIBMServiceKeyRead, + Delete: resourceIBMServiceKeyDelete, + Exists: resourceIBMServiceKeyExists, + Importer: &schema.ResourceImporter{}, + + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: "The name of the service key", + }, + + "service_instance_guid": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: "The guid of the service instance for which to create service key", + }, + "parameters": { + Type: schema.TypeMap, + Optional: true, + ForceNew: true, + Description: "Arbitrary parameters to pass along to the service broker. Must be a JSON object", + }, + "credentials": { + Description: "Credentials asociated with the key", + Type: schema.TypeMap, + Computed: true, + }, + }, + } +} + +func resourceIBMServiceKeyCreate(d *schema.ResourceData, meta interface{}) error { + cfClient, err := meta.(ClientSession).MccpAPI() + if err != nil { + return err + } + name := d.Get("name").(string) + serviceInstanceGUID := d.Get("service_instance_guid").(string) + var parameters map[string]interface{} + + if parameters, ok := d.GetOk("parameters"); ok { + parameters = parameters.(map[string]interface{}) + } + + serviceKey, err := cfClient.ServiceKeys().Create(serviceInstanceGUID, name, parameters) + if err != nil { + return fmt.Errorf("Error creating service key: %s", err) + } + + d.SetId(serviceKey.Metadata.GUID) + + return resourceIBMServiceKeyRead(d, meta) +} + +func resourceIBMServiceKeyRead(d *schema.ResourceData, meta interface{}) error { + cfClient, err := meta.(ClientSession).MccpAPI() + if err != nil { + return err + } + serviceKeyGUID := d.Id() + + serviceKey, err := cfClient.ServiceKeys().Get(serviceKeyGUID) + if err != nil { + return fmt.Errorf("Error retrieving service key: %s", err) + } + d.Set("credentials", serviceKey.Entity.Credentials) + d.Set("service_instance_guid", serviceKey.Entity.ServiceInstanceGUID) + d.Set("name", serviceKey.Entity.Name) + + return nil +} + +func resourceIBMServiceKeyDelete(d *schema.ResourceData, meta interface{}) error { + cfClient, err := meta.(ClientSession).MccpAPI() + if err != nil { + return err + } + + serviceKeyGUID := d.Id() + + err = cfClient.ServiceKeys().Delete(serviceKeyGUID) + if err != nil { + return fmt.Errorf("Error deleting service key: %s", err) + } + + d.SetId("") + + return nil +} + +func resourceIBMServiceKeyExists(d *schema.ResourceData, meta interface{}) (bool, error) { + cfClient, err := meta.(ClientSession).MccpAPI() + if err != nil { + return false, err + } + serviceKeyGUID := d.Id() + + serviceKey, err := cfClient.ServiceKeys().Get(serviceKeyGUID) + if err != nil { + if apiErr, ok := err.(bmxerror.RequestFailure); ok { + if apiErr.StatusCode() == 404 { + return false, nil + } + } + return false, fmt.Errorf("Error communicating with the API: %s", err) + } + + return serviceKey.Metadata.GUID == serviceKeyGUID, nil +} diff --git a/ibm/resource_ibm_service_key_test.go b/ibm/resource_ibm_service_key_test.go new file mode 100644 index 0000000000..b69a9626fb --- /dev/null +++ b/ibm/resource_ibm_service_key_test.go @@ -0,0 +1,106 @@ +package ibm + +import ( + "fmt" + "testing" + + "strings" + + "github.com/hashicorp/terraform/helper/acctest" + "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/terraform" + + "github.com/IBM-Bluemix/bluemix-go/api/mccp/mccpv2" +) + +func TestAccIBMServiceKey_Basic(t *testing.T) { + var conf mccpv2.ServiceKeyFields + serviceName := fmt.Sprintf("terraform_%d", acctest.RandInt()) + serviceKey := fmt.Sprintf("terraform_%d", acctest.RandInt()) + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckIBMServiceKeyDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccCheckIBMServiceKey_basic(serviceName, serviceKey), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckIBMServiceKeyExists("ibm_service_key.serviceKey", &conf), + resource.TestCheckResourceAttr("ibm_service_key.serviceKey", "name", serviceKey), + ), + }, + }, + }) +} + +func testAccCheckIBMServiceKeyExists(n string, obj *mccpv2.ServiceKeyFields) resource.TestCheckFunc { + + return func(s *terraform.State) error { + rs, ok := s.RootModule().Resources[n] + if !ok { + return fmt.Errorf("Not found: %s", n) + } + + cfClient, err := testAccProvider.Meta().(ClientSession).MccpAPI() + if err != nil { + return err + } + serviceKeyGuid := rs.Primary.ID + + serviceKey, err := cfClient.ServiceKeys().Get(serviceKeyGuid) + if err != nil { + return err + } + + *obj = *serviceKey + return nil + } +} + +func testAccCheckIBMServiceKeyDestroy(s *terraform.State) error { + cfClient, err := testAccProvider.Meta().(ClientSession).MccpAPI() + if err != nil { + return err + } + + for _, rs := range s.RootModule().Resources { + if rs.Type != "ibm_service_key" { + continue + } + + serviceKeyGuid := rs.Primary.ID + + // Try to find the key + _, err := cfClient.ServiceKeys().Get(serviceKeyGuid) + + if err != nil && !strings.Contains(err.Error(), "404") { + return fmt.Errorf("Error waiting for CF service key (%s) to be destroyed: %s", rs.Primary.ID, err) + } + } + + return nil +} + +func testAccCheckIBMServiceKey_basic(serviceName, serviceKey string) string { + return fmt.Sprintf(` + + data "ibm_space" "spacedata" { + space = "%s" + org = "%s" + } + + resource "ibm_service_instance" "service" { + name = "%s" + space_guid = "${data.ibm_space.spacedata.id}" + service = "cleardb" + plan = "cb5" + tags = ["cluster-service","cluster-bind"] + } + + resource "ibm_service_key" "serviceKey" { + name = "%s" + service_instance_guid = "${ibm_service_instance.service.id}" + } + `, cfSpace, cfOrganization, serviceName, serviceKey) +} diff --git a/ibm/resource_ibm_space.go b/ibm/resource_ibm_space.go new file mode 100644 index 0000000000..36f145ffb1 --- /dev/null +++ b/ibm/resource_ibm_space.go @@ -0,0 +1,332 @@ +package ibm + +import ( + "fmt" + + "github.com/IBM-Bluemix/bluemix-go/api/mccp/mccpv2" + "github.com/IBM-Bluemix/bluemix-go/bmxerror" + "github.com/IBM-Bluemix/bluemix-go/helpers" + "github.com/hashicorp/terraform/helper/schema" +) + +func resourceIBMSpace() *schema.Resource { + return &schema.Resource{ + Create: resourceIBMSpaceCreate, + Read: resourceIBMSpaceRead, + Update: resourceIBMSpaceUpdate, + Delete: resourceIBMSpaceDelete, + Exists: resourceIBMSpaceExists, + Importer: &schema.ResourceImporter{}, + + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Required: true, + Description: "The name for the space", + }, + "org": { + Description: "The org this space belongs to", + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + "auditors": { + Type: schema.TypeSet, + Optional: true, + Elem: &schema.Schema{Type: schema.TypeString}, + Description: "The IBMID of the users who will have auditor role in this space, ex - user@example.com", + }, + "managers": { + Type: schema.TypeSet, + Optional: true, + Elem: &schema.Schema{Type: schema.TypeString}, + Description: "The IBMID of the users who will have manager role in this space, ex - user@example.com", + }, + "developers": { + Type: schema.TypeSet, + Optional: true, + Elem: &schema.Schema{Type: schema.TypeString}, + Description: "The IBMID of the users who will have developer role in this space, ex - user@example.com", + }, + "space_quota": { + Description: "The name of the Space Quota Definition", + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, + }, + } +} + +func resourceIBMSpaceCreate(d *schema.ResourceData, meta interface{}) error { + cfClient, err := meta.(ClientSession).MccpAPI() + if err != nil { + return err + } + org := d.Get("org").(string) + name := d.Get("name").(string) + + req := mccpv2.SpaceCreateRequest{ + Name: name, + } + + orgFields, err := cfClient.Organizations().FindByName(org, BluemixRegion) + if err != nil { + return fmt.Errorf("Error retrieving org: %s", err) + } + req.OrgGUID = orgFields.GUID + + if spaceQuota, ok := d.GetOk("space_quota"); ok { + quota, err := cfClient.SpaceQuotas().FindByName(spaceQuota.(string), orgFields.GUID) + if err != nil { + return fmt.Errorf("Error retrieving space quota: %s", err) + } + req.SpaceQuotaGUID = quota.GUID + } + + spaceAPI := cfClient.Spaces() + space, err := spaceAPI.Create(req) + if err != nil { + return fmt.Errorf("Error creating space: %s", err) + } + + spaceGUID := space.Metadata.GUID + d.SetId(spaceGUID) + + if developerSet := d.Get("developers").(*schema.Set); len(developerSet.List()) > 0 { + developers := expandStringList(developerSet.List()) + for _, d := range developers { + _, err := spaceAPI.AssociateDeveloper(spaceGUID, d) + if err != nil { + return fmt.Errorf("Error associating developer %s with space %s : %s", d, spaceGUID, err) + } + } + } + + if auditorSet := d.Get("auditors").(*schema.Set); len(auditorSet.List()) > 0 { + auditors := expandStringList(auditorSet.List()) + for _, d := range auditors { + _, err := spaceAPI.AssociateAuditor(spaceGUID, d) + if err != nil { + return fmt.Errorf("Error associating auditor %s with space %s : %s", d, spaceGUID, err) + } + } + + } + if managerSet := d.Get("managers").(*schema.Set); len(managerSet.List()) > 0 { + managers := expandStringList(managerSet.List()) + for _, d := range managers { + _, err := spaceAPI.AssociateManager(spaceGUID, d) + if err != nil { + return fmt.Errorf("Error associating manager %s with space %s : %s", d, spaceGUID, err) + } + } + } + + return resourceIBMSpaceRead(d, meta) +} + +func resourceIBMSpaceRead(d *schema.ResourceData, meta interface{}) error { + cfClient, err := meta.(ClientSession).MccpAPI() + if err != nil { + return err + } + spaceGUID := d.Id() + + spaceAPI := cfClient.Spaces() + spaceDetails, err := spaceAPI.Get(spaceGUID) + if err != nil { + return fmt.Errorf("Error retrieving space: %s", err) + } + + auditors, err := spaceAPI.ListAuditors(spaceGUID) + if err != nil { + return fmt.Errorf("Error retrieving auditors in the space: %s", err) + } + + managers, err := spaceAPI.ListManagers(spaceGUID) + if err != nil { + return fmt.Errorf("Error retrieving managers in the space: %s", err) + } + + developers, err := spaceAPI.ListDevelopers(spaceGUID) + if err != nil { + return fmt.Errorf("Error retrieving developers in space: %s", err) + } + + d.Set("auditors", flattenSpaceRoleUsers(auditors)) + d.Set("managers", flattenSpaceRoleUsers(managers)) + d.Set("developers", flattenSpaceRoleUsers(developers)) + + if spaceDetails.Entity.SpaceQuotaGUID != "" { + sqAPI := cfClient.SpaceQuotas() + quota, err := sqAPI.Get(spaceDetails.Entity.SpaceQuotaGUID) + if err != nil { + return fmt.Errorf("Error retrieving quotas details for space: %s", err) + } + d.Set("space_quota", quota.Entity.Name) + } + + return nil +} + +func resourceIBMSpaceUpdate(d *schema.ResourceData, meta interface{}) error { + cfClient, err := meta.(ClientSession).MccpAPI() + if err != nil { + return err + } + id := d.Id() + + req := mccpv2.SpaceUpdateRequest{} + if d.HasChange("name") { + req.Name = helpers.String(d.Get("name").(string)) + } + + api := cfClient.Spaces() + _, err = api.Update(id, req) + if err != nil { + return fmt.Errorf("Error updating space: %s", err) + } + + err = updateAuditors(api, id, d) + if err != nil { + return err + } + err = updateManagers(api, id, d) + if err != nil { + return err + } + err = updateDevelopers(api, id, d) + if err != nil { + return err + } + return resourceIBMSpaceRead(d, meta) +} + +func resourceIBMSpaceDelete(d *schema.ResourceData, meta interface{}) error { + cfClient, err := meta.(ClientSession).MccpAPI() + if err != nil { + return err + } + id := d.Id() + + err = cfClient.Spaces().Delete(id) + if err != nil { + return fmt.Errorf("Error deleting space: %s", err) + } + + d.SetId("") + return nil +} + +func resourceIBMSpaceExists(d *schema.ResourceData, meta interface{}) (bool, error) { + cfClient, err := meta.(ClientSession).MccpAPI() + if err != nil { + return false, err + } + id := d.Id() + + space, err := cfClient.Spaces().Get(id) + if err != nil { + if apiErr, ok := err.(bmxerror.RequestFailure); ok { + if apiErr.StatusCode() == 404 { + return false, nil + } + } + return false, fmt.Errorf("Error communicating with the API: %s", err) + } + + return space.Metadata.GUID == id, nil +} + +func updateDevelopers(api mccpv2.Spaces, spaceGUID string, d *schema.ResourceData) error { + if !d.HasChange("developers") { + return nil + } + var remove, add []string + o, n := d.GetChange("developers") + os := o.(*schema.Set) + ns := n.(*schema.Set) + + remove = expandStringList(os.Difference(ns).List()) + add = expandStringList(ns.Difference(os).List()) + + if len(add) > 0 { + for _, d := range add { + _, err := api.AssociateDeveloper(spaceGUID, d) + if err != nil { + return fmt.Errorf("Error associating developer %s with space %s : %s", d, spaceGUID, err) + } + } + } + if len(remove) > 0 { + for _, d := range remove { + err := api.DisassociateDeveloper(spaceGUID, d) + if err != nil { + return fmt.Errorf("Error dis-associating developer %s with space %s : %s", d, spaceGUID, err) + } + } + } + return nil +} + +func updateManagers(api mccpv2.Spaces, spaceGUID string, d *schema.ResourceData) error { + if !d.HasChange("managers") { + return nil + } + var remove, add []string + o, n := d.GetChange("managers") + os := o.(*schema.Set) + ns := n.(*schema.Set) + + remove = expandStringList(os.Difference(ns).List()) + add = expandStringList(ns.Difference(os).List()) + + if len(add) > 0 { + for _, d := range add { + _, err := api.AssociateManager(spaceGUID, d) + if err != nil { + return fmt.Errorf("Error associating manager %s with space %s : %s", d, spaceGUID, err) + } + } + } + if len(remove) > 0 { + for _, d := range remove { + err := api.DisassociateManager(spaceGUID, d) + if err != nil { + return fmt.Errorf("Error dis-associating manager %s with space %s : %s", d, spaceGUID, err) + } + } + } + return nil +} +func updateAuditors(api mccpv2.Spaces, spaceGUID string, d *schema.ResourceData) error { + if !d.HasChange("auditors") { + return nil + } + var remove, add []string + o, n := d.GetChange("auditors") + os := o.(*schema.Set) + ns := n.(*schema.Set) + + remove = expandStringList(os.Difference(ns).List()) + add = expandStringList(ns.Difference(os).List()) + + if len(add) > 0 { + for _, d := range add { + _, err := api.AssociateAuditor(spaceGUID, d) + if err != nil { + return fmt.Errorf("Error associating auditor %s with space %s : %s", d, spaceGUID, err) + } + } + } + if len(remove) > 0 { + for _, d := range remove { + err := api.DisassociateAuditor(spaceGUID, d) + if err != nil { + return fmt.Errorf("Error dis-associating auditor %s with space %s : %s", d, spaceGUID, err) + } + } + } + return nil +} diff --git a/ibm/resource_ibm_space_test.go b/ibm/resource_ibm_space_test.go new file mode 100644 index 0000000000..6ca571f6fe --- /dev/null +++ b/ibm/resource_ibm_space_test.go @@ -0,0 +1,173 @@ +package ibm + +import ( + "fmt" + "testing" + + "github.com/hashicorp/terraform/helper/acctest" + "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/terraform" + + "github.com/IBM-Bluemix/bluemix-go/api/mccp/mccpv2" + "github.com/IBM-Bluemix/bluemix-go/bmxerror" +) + +func TestAccIBMSpace_Basic(t *testing.T) { + var conf mccpv2.SpaceFields + name := fmt.Sprintf("terraform_%d", acctest.RandInt()) + updatedName := fmt.Sprintf("terraform_updated_%d", acctest.RandInt()) + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckIBMSpaceDestroy, + Steps: []resource.TestStep{ + + resource.TestStep{ + Config: testAccCheckIBMSpaceCreate(name), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckIBMSpaceExists("ibm_space.space", &conf), + resource.TestCheckResourceAttr("ibm_space.space", "org", cfOrganization), + resource.TestCheckResourceAttr("ibm_space.space", "name", name), + ), + }, + + resource.TestStep{ + Config: testAccCheckIBMSpaceUpdate(updatedName), + Check: resource.ComposeAggregateTestCheckFunc( + resource.TestCheckResourceAttr("ibm_space.space", "org", cfOrganization), + resource.TestCheckResourceAttr("ibm_space.space", "name", updatedName), + ), + }, + }, + }) +} + +func TestAccIBMSpace_with_roles(t *testing.T) { + var conf mccpv2.SpaceFields + name := fmt.Sprintf("terraform_%d", acctest.RandInt()) + updatedName := fmt.Sprintf("terraform_updated_%d", acctest.RandInt()) + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckIBMSpaceDestroy, + Steps: []resource.TestStep{ + + resource.TestStep{ + Config: testAccCheckIBMSpaceCreateWithRoles(name), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckIBMSpaceExists("ibm_space.space", &conf), + resource.TestCheckResourceAttr("ibm_space.space", "org", cfOrganization), + resource.TestCheckResourceAttr("ibm_space.space", "name", name), + resource.TestCheckResourceAttr("ibm_space.space", "auditors.#", "1"), + resource.TestCheckResourceAttr("ibm_space.space", "managers.#", "1"), + resource.TestCheckResourceAttr("ibm_space.space", "developers.#", "1"), + ), + }, + + resource.TestStep{ + Config: testAccCheckIBMSpaceUpdateWithRoles(updatedName), + Check: resource.ComposeAggregateTestCheckFunc( + resource.TestCheckResourceAttr("ibm_space.space", "org", cfOrganization), + resource.TestCheckResourceAttr("ibm_space.space", "name", updatedName), + resource.TestCheckResourceAttr("ibm_space.space", "auditors.#", "1"), + resource.TestCheckResourceAttr("ibm_space.space", "managers.#", "2"), + resource.TestCheckResourceAttr("ibm_space.space", "developers.#", "1"), + ), + }, + }, + }) +} + +func testAccCheckIBMSpaceExists(n string, obj *mccpv2.SpaceFields) resource.TestCheckFunc { + + return func(s *terraform.State) error { + rs, ok := s.RootModule().Resources[n] + if !ok { + return fmt.Errorf("Not found: %s", n) + } + + cfClient, err := testAccProvider.Meta().(ClientSession).MccpAPI() + if err != nil { + return err + } + spaceGUID := rs.Primary.ID + + space, err := cfClient.Spaces().Get(spaceGUID) + if err != nil { + return err + } + + *obj = *space + return nil + } +} + +func testAccCheckIBMSpaceDestroy(s *terraform.State) error { + cfClient, err := testAccProvider.Meta().(ClientSession).MccpAPI() + if err != nil { + return err + } + + for _, rs := range s.RootModule().Resources { + if rs.Type != "ibm_space" { + continue + } + + spaceGUID := rs.Primary.ID + _, err := cfClient.Spaces().Get(spaceGUID) + + if err != nil { + if apierr, ok := err.(bmxerror.RequestFailure); ok && apierr.StatusCode() != 404 { + return fmt.Errorf("Error waiting for Space (%s) to be destroyed: %s", rs.Primary.ID, err) + } + } + } + return nil +} + +func testAccCheckIBMSpaceCreate(name string) string { + return fmt.Sprintf(` + +resource "ibm_space" "space" { + org = "%s" + name = "%s" +}`, cfOrganization, name) + +} + +func testAccCheckIBMSpaceUpdate(updatedName string) string { + return fmt.Sprintf(` + +resource "ibm_space" "space" { + org = "%s" + name = "%s" +}`, cfOrganization, updatedName) + +} + +func testAccCheckIBMSpaceCreateWithRoles(name string) string { + return fmt.Sprintf(` + +resource "ibm_space" "space" { + org = "%s" + name = "%s" + auditors = ["%s"] + managers = ["%s"] + developers = ["%s"] +}`, cfOrganization, name, ibmid1, ibmid1, ibmid1) + +} + +func testAccCheckIBMSpaceUpdateWithRoles(updatedName string) string { + return fmt.Sprintf(` +resource "ibm_space" "space" { + org = "%s" + name = "%s" + auditors = ["%s"] + managers = ["%s", "%s"] + developers = ["%s"] +}`, cfOrganization, updatedName, ibmid2, ibmid2, ibmid1, ibmid2) + +} diff --git a/ibm/resource_ibm_storage_block.go b/ibm/resource_ibm_storage_block.go new file mode 100644 index 0000000000..ab6918dd93 --- /dev/null +++ b/ibm/resource_ibm_storage_block.go @@ -0,0 +1,382 @@ +package ibm + +import ( + "fmt" + "log" + "strconv" + + "regexp" + "strings" + + "github.com/hashicorp/terraform/helper/schema" + "github.com/softlayer/softlayer-go/datatypes" + "github.com/softlayer/softlayer-go/helpers/network" + "github.com/softlayer/softlayer-go/services" + "github.com/softlayer/softlayer-go/sl" +) + +func resourceIBMStorageBlock() *schema.Resource { + return &schema.Resource{ + Create: resourceIBMStorageBlockCreate, + Read: resourceIBMStorageBlockRead, + Update: resourceIBMStorageBlockUpdate, + Delete: resourceIBMStorageBlockDelete, + Exists: resourceIBMStorageBlockExists, + Importer: &schema.ResourceImporter{}, + + Schema: map[string]*schema.Schema{ + "type": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + + "datacenter": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + + "capacity": { + Type: schema.TypeInt, + Required: true, + ForceNew: true, + }, + + "iops": { + Type: schema.TypeFloat, + Required: true, + ForceNew: true, + }, + + "volumename": { + Type: schema.TypeString, + Computed: true, + }, + + "hostname": { + Type: schema.TypeString, + Computed: true, + }, + + "snapshot_capacity": { + Type: schema.TypeInt, + Optional: true, + ForceNew: true, + }, + + "os_format_type": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + + "allowed_virtual_guest_ids": { + Type: schema.TypeSet, + Optional: true, + Computed: true, + Elem: &schema.Schema{Type: schema.TypeInt}, + Set: func(v interface{}) int { + return v.(int) + }, + }, + + "allowed_virtual_guest_info": { + Type: schema.TypeSet, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "id": { + Type: schema.TypeInt, + Computed: true, + }, + "username": { + Type: schema.TypeString, + Computed: true, + }, + "password": { + Type: schema.TypeString, + Computed: true, + }, + "hostIQN": { + Type: schema.TypeString, + Computed: true, + }, + }, + }, + Set: func(v interface{}) int { + virtualGuest := v.(map[string]interface{}) + return virtualGuest["id"].(int) + }, + }, + + "allowed_hardware_ids": { + Type: schema.TypeSet, + Optional: true, + Computed: true, + Elem: &schema.Schema{Type: schema.TypeInt}, + Set: func(v interface{}) int { + return v.(int) + }, + }, + + "allowed_hardware_info": { + Type: schema.TypeSet, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "id": { + Type: schema.TypeInt, + Computed: true, + }, + "username": { + Type: schema.TypeString, + Computed: true, + }, + "password": { + Type: schema.TypeString, + Computed: true, + }, + "hostIQN": { + Type: schema.TypeString, + Computed: true, + }, + }, + }, + Set: func(v interface{}) int { + baremetal := v.(map[string]interface{}) + return baremetal["id"].(int) + }, + }, + + "allowed_ip_addresses": { + Type: schema.TypeSet, + Optional: true, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + }, + } +} + +func resourceIBMStorageBlockCreate(d *schema.ResourceData, meta interface{}) error { + sess := meta.(ClientSession).SoftLayerSession() + + storageType := d.Get("type").(string) + iops := d.Get("iops").(float64) + datacenter := d.Get("datacenter").(string) + capacity := d.Get("capacity").(int) + snapshotCapacity := d.Get("snapshot_capacity").(int) + osFormatType := d.Get("os_format_type").(string) + osType, err := network.GetOsTypeByName(sess, osFormatType) + + if err != nil { + return err + } + + storageOrderContainer, err := buildStorageProductOrderContainer(sess, storageType, iops, capacity, snapshotCapacity, blockStorage, datacenter) + if err != nil { + return fmt.Errorf("Error while creating storage:%s", err) + } + + log.Println("[INFO] Creating storage") + + var receipt datatypes.Container_Product_Order_Receipt + + switch storageType { + case enduranceType: + receipt, err = services.GetProductOrderService(sess).PlaceOrder( + &datatypes.Container_Product_Order_Network_Storage_Enterprise{ + Container_Product_Order: storageOrderContainer, + OsFormatType: &datatypes.Network_Storage_Iscsi_OS_Type{ + Id: osType.Id, + KeyName: osType.KeyName, + }, + }, sl.Bool(false)) + case performanceType: + receipt, err = services.GetProductOrderService(sess).PlaceOrder( + &datatypes.Container_Product_Order_Network_PerformanceStorage_Iscsi{ + Container_Product_Order_Network_PerformanceStorage: datatypes.Container_Product_Order_Network_PerformanceStorage{ + Container_Product_Order: storageOrderContainer, + }, + OsFormatType: &datatypes.Network_Storage_Iscsi_OS_Type{ + Id: osType.Id, + KeyName: osType.KeyName, + }, + }, sl.Bool(false)) + default: + return fmt.Errorf("Error during creation of storage: Invalid storageType %s", storageType) + } + + if err != nil { + return fmt.Errorf("Error during creation of storage: %s", err) + } + + // Find the storage device + blockStorage, err := findStorageByOrderId(sess, *receipt.OrderId) + + if err != nil { + return fmt.Errorf("Error during creation of storage: %s", err) + } + d.SetId(fmt.Sprintf("%d", *blockStorage.Id)) + + // Wait for storage availability + _, err = WaitForStorageAvailable(d, meta) + + if err != nil { + return fmt.Errorf( + "Error waiting for storage (%s) to become ready: %s", d.Id(), err) + } + + // SoftLayer changes the device ID after completion of provisioning. It is necessary to refresh device ID. + blockStorage, err = findStorageByOrderId(sess, *receipt.OrderId) + + if err != nil { + return fmt.Errorf("Error during creation of storage: %s", err) + } + d.SetId(fmt.Sprintf("%d", *blockStorage.Id)) + + log.Printf("[INFO] Storage ID: %s", d.Id()) + + return resourceIBMStorageBlockUpdate(d, meta) +} + +func resourceIBMStorageBlockRead(d *schema.ResourceData, meta interface{}) error { + sess := meta.(ClientSession).SoftLayerSession() + storageId, _ := strconv.Atoi(d.Id()) + + storage, err := services.GetNetworkStorageService(sess). + Id(storageId). + Mask(storageDetailMask). + GetObject() + + if err != nil { + return fmt.Errorf("Error retrieving storage information: %s", err) + } + + storageType := strings.Fields(*storage.StorageType.Description)[0] + + // Calculate IOPS + iops, err := getIops(storage, storageType) + if err != nil { + return fmt.Errorf("Error retrieving storage information: %s", err) + } + + d.Set("type", storageType) + d.Set("capacity", *storage.CapacityGb) + d.Set("volumename", *storage.Username) + d.Set("hostname", *storage.ServiceResourceBackendIpAddress) + d.Set("iops", iops) + if storage.SnapshotCapacityGb != nil { + snapshotCapacity, _ := strconv.Atoi(*storage.SnapshotCapacityGb) + d.Set("snapshot_capacity", snapshotCapacity) + } + + // Parse data center short name from ServiceResourceName. For example, + // if SoftLayer API returns "'serviceResourceName': 'PerfStor Aggr aggr_staasdal0601_p01'", + // the data center short name is "dal06". + r, _ := regexp.Compile("[a-zA-Z]{3}[0-9]{2}") + d.Set("datacenter", r.FindString(*storage.ServiceResourceName)) + + // Read allowed_ip_addresses + allowedIpaddressesList := make([]string, 0, len(storage.AllowedIpAddresses)) + for _, allowedIpaddress := range storage.AllowedIpAddresses { + allowedIpaddressesList = append(allowedIpaddressesList, *allowedIpaddress.IpAddress) + } + d.Set("allowed_ip_addresses", allowedIpaddressesList) + + // Read allowed_virtual_guest_ids and allowed_virtual_guest_info + allowedVirtualGuestInfoList := make([]map[string]interface{}, 0) + allowedVirtualGuestIdsList := make([]int, 0, len(storage.AllowedVirtualGuests)) + + for _, allowedVirtualGuest := range storage.AllowedVirtualGuests { + singleVirtualGuest := make(map[string]interface{}) + singleVirtualGuest["id"] = *allowedVirtualGuest.Id + singleVirtualGuest["username"] = *allowedVirtualGuest.AllowedHost.Credential.Username + singleVirtualGuest["password"] = *allowedVirtualGuest.AllowedHost.Credential.Password + singleVirtualGuest["hostIQN"] = *allowedVirtualGuest.AllowedHost.Name + allowedVirtualGuestInfoList = append(allowedVirtualGuestInfoList, singleVirtualGuest) + allowedVirtualGuestIdsList = append(allowedVirtualGuestIdsList, *allowedVirtualGuest.Id) + } + d.Set("allowed_virtual_guest_ids", allowedVirtualGuestIdsList) + d.Set("allowed_virtual_guest_info", allowedVirtualGuestInfoList) + + // Read allowed_hardware_ids and allowed_hardware_info + allowedHardwareInfoList := make([]map[string]interface{}, 0) + allowedHardwareIdsList := make([]int, 0, len(storage.AllowedHardware)) + for _, allowedHW := range storage.AllowedHardware { + singleHardware := make(map[string]interface{}) + singleHardware["id"] = *allowedHW.Id + singleHardware["username"] = *allowedHW.AllowedHost.Credential.Username + singleHardware["password"] = *allowedHW.AllowedHost.Credential.Password + singleHardware["hostIQN"] = *allowedHW.AllowedHost.Name + allowedHardwareInfoList = append(allowedHardwareInfoList, singleHardware) + allowedHardwareIdsList = append(allowedHardwareIdsList, *allowedHW.Id) + } + d.Set("allowed_hardware_ids", allowedHardwareIdsList) + d.Set("allowed_hardware_info", allowedHardwareInfoList) + + if storage.OsType != nil { + d.Set("os_type", *storage.OsType.Name) + } + + return nil +} + +func resourceIBMStorageBlockUpdate(d *schema.ResourceData, meta interface{}) error { + sess := meta.(ClientSession).SoftLayerSession() + id, err := strconv.Atoi(d.Id()) + if err != nil { + return fmt.Errorf("Not a valid ID, must be an integer: %s", err) + } + + storage, err := services.GetNetworkStorageService(sess). + Id(id). + Mask(storageDetailMask). + GetObject() + + if err != nil { + return fmt.Errorf("Error updating storage information: %s", err) + } + + // Update allowed_ip_addresses + if d.HasChange("allowed_ip_addresses") { + err := updateAllowedIpAddresses(d, sess, storage) + if err != nil { + return fmt.Errorf("Error updating storage information: %s", err) + } + } + + // Update allowed_subnets + if d.HasChange("allowed_subnets") { + err := updateAllowedSubnets(d, sess, storage) + if err != nil { + return fmt.Errorf("Error updating storage information: %s", err) + } + } + + // Update allowed_virtual_guest_ids + if d.HasChange("allowed_virtual_guest_ids") { + err := updateAllowedVirtualGuestIds(d, sess, storage) + if err != nil { + return fmt.Errorf("Error updating storage information: %s", err) + } + } + + // Update allowed_hardware_ids + if d.HasChange("allowed_hardware_ids") { + err := updateAllowedHardwareIds(d, sess, storage) + if err != nil { + return fmt.Errorf("Error updating storage information: %s", err) + } + } + + return resourceIBMStorageBlockRead(d, meta) +} + +func resourceIBMStorageBlockDelete(d *schema.ResourceData, meta interface{}) error { + return resourceIBMStorageFileDelete(d, meta) +} + +func resourceIBMStorageBlockExists(d *schema.ResourceData, meta interface{}) (bool, error) { + return resourceIBMStorageFileExists(d, meta) +} diff --git a/ibm/resource_ibm_storage_block_test.go b/ibm/resource_ibm_storage_block_test.go new file mode 100644 index 0000000000..079bb3f125 --- /dev/null +++ b/ibm/resource_ibm_storage_block_test.go @@ -0,0 +1,162 @@ +package ibm + +import ( + "fmt" + "strconv" + "testing" + + "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/terraform" + "github.com/softlayer/softlayer-go/services" +) + +func TestAccIBMStorageBlock_Basic(t *testing.T) { + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccCheckIBMStorageBlockConfig_basic, + Check: resource.ComposeTestCheckFunc( + // Endurance Storage + testAccCheckIBMStorageBlockExists("ibm_storage_block.bs_endurance"), + resource.TestCheckResourceAttr( + "ibm_storage_block.bs_endurance", "type", "Endurance"), + resource.TestCheckResourceAttr( + "ibm_storage_block.bs_endurance", "capacity", "20"), + resource.TestCheckResourceAttr( + "ibm_storage_block.bs_endurance", "iops", "0.25"), + resource.TestCheckResourceAttr( + "ibm_storage_block.bs_endurance", "snapshot_capacity", "10"), + resource.TestCheckResourceAttr( + "ibm_storage_block.bs_endurance", "os_format_type", "Linux"), + testAccCheckIBMResources("ibm_storage_block.bs_endurance", "datacenter", + "ibm_compute_vm_instance.storagevm2", "datacenter"), + // Performance Storage + testAccCheckIBMStorageBlockExists("ibm_storage_block.bs_performance"), + resource.TestCheckResourceAttr( + "ibm_storage_block.bs_performance", "type", "Performance"), + resource.TestCheckResourceAttr( + "ibm_storage_block.bs_performance", "capacity", "20"), + resource.TestCheckResourceAttr( + "ibm_storage_block.bs_performance", "iops", "100"), + resource.TestCheckResourceAttr( + "ibm_storage_block.bs_endurance", "os_format_type", "Linux"), + testAccCheckIBMResources("ibm_storage_block.bs_performance", "datacenter", + "ibm_compute_vm_instance.storagevm2", "datacenter"), + ), + }, + + resource.TestStep{ + Config: testAccCheckIBMStorageBlockConfig_update, + Check: resource.ComposeTestCheckFunc( + // Endurance Storage + resource.TestCheckResourceAttr("ibm_storage_block.bs_endurance", "allowed_virtual_guest_ids.#", "1"), + resource.TestCheckResourceAttr("ibm_storage_block.bs_endurance", "allowed_ip_addresses.#", "1"), + // Performance Storage + resource.TestCheckResourceAttr("ibm_storage_block.bs_performance", "allowed_virtual_guest_ids.#", "1"), + resource.TestCheckResourceAttr("ibm_storage_block.bs_performance", "allowed_ip_addresses.#", "1"), + ), + }, + }, + }) +} + +func testAccCheckIBMStorageBlockExists(n string) resource.TestCheckFunc { + return func(s *terraform.State) error { + rs, ok := s.RootModule().Resources[n] + + if !ok { + return fmt.Errorf("Not found: %s", n) + } + + if rs.Primary.ID == "" { + return fmt.Errorf("No Record ID is set") + } + + storageId, _ := strconv.Atoi(rs.Primary.ID) + + service := services.GetNetworkStorageService(testAccProvider.Meta().(ClientSession).SoftLayerSession()) + foundStorage, err := service.Id(storageId).GetObject() + + if err != nil { + return err + } + + if strconv.Itoa(int(*foundStorage.Id)) != rs.Primary.ID { + return fmt.Errorf("Record not found") + } + + return nil + } +} + +const testAccCheckIBMStorageBlockConfig_basic = ` +resource "ibm_compute_vm_instance" "storagevm2" { + hostname = "storagevm2" + domain = "example.com" + os_reference_code = "DEBIAN_7_64" + datacenter = "dal06" + network_speed = 100 + hourly_billing = true + private_network_only = false + cores = 1 + memory = 1024 + disks = [25] + local_disk = false +} + +resource "ibm_storage_block" "bs_endurance" { + type = "Endurance" + datacenter = "${ibm_compute_vm_instance.storagevm2.datacenter}" + capacity = 20 + iops = 0.25 + snapshot_capacity = 10 + os_format_type = "Linux" +} + +resource "ibm_storage_block" "bs_performance" { + type = "Performance" + datacenter = "${ibm_compute_vm_instance.storagevm2.datacenter}" + capacity = 20 + iops = 100 + os_format_type = "Linux" +} +` +const testAccCheckIBMStorageBlockConfig_update = ` +resource "ibm_compute_vm_instance" "storagevm2" { + hostname = "storagevm2" + domain = "example.com" + os_reference_code = "DEBIAN_7_64" + datacenter = "dal06" + network_speed = 100 + hourly_billing = true + private_network_only = false + cores = 1 + memory = 1024 + disks = [25] + local_disk = false +} + +resource "ibm_storage_block" "bs_endurance" { + type = "Endurance" + datacenter = "${ibm_compute_vm_instance.storagevm2.datacenter}" + capacity = 20 + iops = 0.25 + os_format_type = "Linux" + allowed_virtual_guest_ids = [ "${ibm_compute_vm_instance.storagevm2.id}" ] + allowed_ip_addresses = [ "${ibm_compute_vm_instance.storagevm2.ipv4_address_private}" ] + snapshot_capacity = 10 +} + +resource "ibm_storage_block" "bs_performance" { + type = "Performance" + datacenter = "${ibm_compute_vm_instance.storagevm2.datacenter}" + capacity = 20 + iops = 100 + os_format_type = "Linux" + allowed_virtual_guest_ids = [ "${ibm_compute_vm_instance.storagevm2.id}" ] + allowed_ip_addresses = [ "${ibm_compute_vm_instance.storagevm2.ipv4_address_private}" ] +} +` diff --git a/ibm/resource_ibm_storage_file.go b/ibm/resource_ibm_storage_file.go new file mode 100644 index 0000000000..7c9c26e447 --- /dev/null +++ b/ibm/resource_ibm_storage_file.go @@ -0,0 +1,1072 @@ +/* +* Licensed Materials - Property of IBM +* (C) Copyright IBM Corp. 2017. All Rights Reserved. +* US Government Users Restricted Rights - Use, duplication or +* disclosure restricted by GSA ADP Schedule Contract with IBM Corp. + */ + +package ibm + +import ( + "fmt" + "log" + "regexp" + "strconv" + "strings" + "time" + + "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/helper/schema" + "github.com/softlayer/softlayer-go/datatypes" + "github.com/softlayer/softlayer-go/filter" + "github.com/softlayer/softlayer-go/helpers/location" + "github.com/softlayer/softlayer-go/helpers/product" + "github.com/softlayer/softlayer-go/services" + "github.com/softlayer/softlayer-go/session" + "github.com/softlayer/softlayer-go/sl" +) + +const ( + storagePerformancePackageType = "ADDITIONAL_SERVICES_PERFORMANCE_STORAGE" + storageEndurancePackageType = "ADDITIONAL_SERVICES_ENTERPRISE_STORAGE" + storageMask = "id,billingItem.orderItem.order.id" + storageDetailMask = "id,capacityGb,iops,storageType,username,serviceResourceBackendIpAddress,properties[type]" + + ",serviceResourceName,allowedIpAddresses,allowedSubnets,allowedVirtualGuests[id,allowedHost[name,credential[username,password]]],snapshotCapacityGb,osType" + itemMask = "id,capacity,description,units,keyName,prices[id,categories[id,name,categoryCode],capacityRestrictionMinimum,capacityRestrictionMaximum,locationGroupId]" + enduranceType = "Endurance" + performanceType = "Performance" + fileStorage = "FILE_STORAGE" + blockStorage = "BLOCK_STORAGE" + retryTime = 5 +) + +var ( + // Map IOPS value to endurance storage tier keyName in SoftLayer_Product_Item + enduranceIopsMap = map[float64]string{ + 0.25: "LOW_INTENSITY_TIER", + 2: "READHEAVY_TIER", + 4: "WRITEHEAVY_TIER", + 10: "10_IOPS_PER_GB", + } + + // Map IOPS value to endurance storage tier capacityRestrictionMaximum/capacityRestrictionMinimum in SoftLayer_Product_Item + enduranceCapacityRestrictionMap = map[float64]int{ + 0.25: 100, + 2: 200, + 4: 300, + 10: 1000, + } + + // storagePackageType is a storage package keyName for SoftLayer_Product_Package. It is used to filter storage package. + // iopsCategoryCode is a storage IOPS categoryCode for SoftLayer_Product_Item. It is used to filter storage IOPS price. + // storageProtocolCategoryCode is a storage protocol categoryCode for SoftLayer_Product_Item. It is used to filter storage protocol price. + storagePackageMap = map[string](map[string](map[string]string)){ + fileStorage: { + performanceType: { + "storagePackageType": storagePerformancePackageType, + "iopsCategoryCode": "performance_storage_iops", + "storageProtocolCategoryCode": "performance_storage_nfs", + }, + enduranceType: { + "storagePackageType": storageEndurancePackageType, + "iopsCategoryCode": "storage_tier_level", + "storageProtocolCategoryCode": "storage_file", + }, + }, + blockStorage: { + performanceType: { + "storagePackageType": storagePerformancePackageType, + "iopsCategoryCode": "performance_storage_iops", + "storageProtocolCategoryCode": "performance_storage_iscsi", + }, + enduranceType: { + "storagePackageType": storageEndurancePackageType, + "iopsCategoryCode": "storage_tier_level", + "storageProtocolCategoryCode": "storage_block", + }, + }, + } +) + +func resourceIBMStorageFile() *schema.Resource { + return &schema.Resource{ + Create: resourceIBMStorageFileCreate, + Read: resourceIBMStorageFileRead, + Update: resourceIBMStorageFileUpdate, + Delete: resourceIBMStorageFileDelete, + Exists: resourceIBMStorageFileExists, + Importer: &schema.ResourceImporter{}, + + Schema: map[string]*schema.Schema{ + "id": { + Type: schema.TypeInt, + Computed: true, + }, + + "type": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + + "datacenter": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + + "capacity": { + Type: schema.TypeInt, + Required: true, + ForceNew: true, + }, + + "iops": { + Type: schema.TypeFloat, + Required: true, + ForceNew: true, + }, + + "volumename": { + Type: schema.TypeString, + Computed: true, + }, + + "hostname": { + Type: schema.TypeString, + Computed: true, + }, + + "snapshot_capacity": { + Type: schema.TypeInt, + Optional: true, + ForceNew: true, + }, + + "allowed_virtual_guest_ids": { + Type: schema.TypeSet, + Optional: true, + Computed: true, + Elem: &schema.Schema{Type: schema.TypeInt}, + Set: func(v interface{}) int { + return v.(int) + }, + }, + + "allowed_hardware_ids": { + Type: schema.TypeSet, + Optional: true, + Computed: true, + Elem: &schema.Schema{Type: schema.TypeInt}, + Set: func(v interface{}) int { + return v.(int) + }, + }, + + "allowed_subnets": { + Type: schema.TypeSet, + Optional: true, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + + "allowed_ip_addresses": { + Type: schema.TypeSet, + Optional: true, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + + "snapshot_schedule": { + Type: schema.TypeList, + Optional: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "scheduleType": { + Type: schema.TypeString, + Optional: true, + }, + + "retentionCount": { + Type: schema.TypeInt, + Optional: true, + }, + + "minute": { + Type: schema.TypeInt, + Optional: true, + }, + + "hour": { + Type: schema.TypeInt, + Optional: true, + }, + + "dayOfWeek": { + Type: schema.TypeString, + Optional: true, + }, + + "enable": { + Type: schema.TypeBool, + Optional: true, + }, + }, + }, + }, + "mountpoint": { + Type: schema.TypeString, + Computed: true, + }, + }, + } +} + +func resourceIBMStorageFileCreate(d *schema.ResourceData, meta interface{}) error { + sess := meta.(ClientSession).SoftLayerSession() + + storageType := d.Get("type").(string) + iops := d.Get("iops").(float64) + datacenter := d.Get("datacenter").(string) + capacity := d.Get("capacity").(int) + snapshotCapacity := d.Get("snapshot_capacity").(int) + + storageOrderContainer, err := buildStorageProductOrderContainer(sess, storageType, iops, capacity, snapshotCapacity, fileStorage, datacenter) + if err != nil { + return fmt.Errorf("Error while creating storage:%s", err) + } + + log.Println("[INFO] Creating storage") + + var receipt datatypes.Container_Product_Order_Receipt + + switch storageType { + case enduranceType: + receipt, err = services.GetProductOrderService(sess).PlaceOrder( + &datatypes.Container_Product_Order_Network_Storage_Enterprise{ + Container_Product_Order: storageOrderContainer, + }, sl.Bool(false)) + case performanceType: + receipt, err = services.GetProductOrderService(sess).PlaceOrder( + &datatypes.Container_Product_Order_Network_PerformanceStorage_Nfs{ + Container_Product_Order_Network_PerformanceStorage: datatypes.Container_Product_Order_Network_PerformanceStorage{ + Container_Product_Order: storageOrderContainer, + }, + }, sl.Bool(false)) + default: + return fmt.Errorf("Error during creation of storage: Invalid storageType %s", storageType) + } + + if err != nil { + return fmt.Errorf("Error during creation of storage: %s", err) + } + + // Find the storage device + fileStorage, err := findStorageByOrderId(sess, *receipt.OrderId) + + if err != nil { + return fmt.Errorf("Error during creation of storage: %s", err) + } + d.SetId(fmt.Sprintf("%d", *fileStorage.Id)) + + // Wait for storage availability + _, err = WaitForStorageAvailable(d, meta) + + if err != nil { + return fmt.Errorf( + "Error waiting for storage (%s) to become ready: %s", d.Id(), err) + } + + // SoftLayer changes the device ID after completion of provisioning. It is necessary to refresh device ID. + fileStorage, err = findStorageByOrderId(sess, *receipt.OrderId) + + if err != nil { + return fmt.Errorf("Error during creation of storage: %s", err) + } + d.SetId(fmt.Sprintf("%d", *fileStorage.Id)) + + log.Printf("[INFO] Storage ID: %s", d.Id()) + + return resourceIBMStorageFileUpdate(d, meta) +} + +func resourceIBMStorageFileRead(d *schema.ResourceData, meta interface{}) error { + sess := meta.(ClientSession).SoftLayerSession() + storageId, _ := strconv.Atoi(d.Id()) + + storage, err := services.GetNetworkStorageService(sess). + Id(storageId). + Mask(storageDetailMask). + GetObject() + + if err != nil { + return fmt.Errorf("Error retrieving storage information: %s", err) + } + + storageType := strings.Fields(*storage.StorageType.Description)[0] + + // Calculate IOPS + iops, err := getIops(storage, storageType) + if err != nil { + return fmt.Errorf("Error retrieving storage information: %s", err) + } + + d.Set("type", storageType) + d.Set("capacity", *storage.CapacityGb) + d.Set("volumename", *storage.Username) + d.Set("hostname", *storage.ServiceResourceBackendIpAddress) + d.Set("iops", iops) + if storage.SnapshotCapacityGb != nil { + snapshotCapacity, _ := strconv.Atoi(*storage.SnapshotCapacityGb) + d.Set("snapshot_capacity", snapshotCapacity) + } + + // Parse data center short name from ServiceResourceName. For example, + // if SoftLayer API returns "'serviceResourceName': 'PerfStor Aggr aggr_staasdal0601_p01'", + // the data center short name is "dal06". + r, _ := regexp.Compile("[a-zA-Z]{3}[0-9]{2}") + d.Set("datacenter", r.FindString(*storage.ServiceResourceName)) + + // Read allowed_ip_addresses + allowedIpaddressesList := make([]string, 0, len(storage.AllowedIpAddresses)) + for _, allowedIpaddress := range storage.AllowedIpAddresses { + allowedIpaddressesList = append(allowedIpaddressesList, *allowedIpaddress.IpAddress) + } + d.Set("allowed_ip_addresses", allowedIpaddressesList) + + // Read allowed_subnets + allowedSubnetsList := make([]string, 0, len(storage.AllowedSubnets)) + for _, allowedSubnets := range storage.AllowedSubnets { + allowedSubnetsList = append(allowedSubnetsList, *allowedSubnets.NetworkIdentifier+"/"+strconv.Itoa(*allowedSubnets.Cidr)) + } + d.Set("allowed_subnets", allowedSubnetsList) + + // Read allowed_virtual_guest_ids + allowedVirtualGuestIdsList := make([]int, 0, len(storage.AllowedVirtualGuests)) + for _, allowedVirtualGuest := range storage.AllowedVirtualGuests { + allowedVirtualGuestIdsList = append(allowedVirtualGuestIdsList, *allowedVirtualGuest.Id) + } + d.Set("allowed_virtual_guest_ids", allowedVirtualGuestIdsList) + + // Read allowed_hardware_ids + allowedHardwareIdsList := make([]int, 0, len(storage.AllowedHardware)) + for _, allowedHW := range storage.AllowedHardware { + allowedHardwareIdsList = append(allowedHardwareIdsList, *allowedHW.Id) + } + d.Set("allowed_hardware_ids", allowedHardwareIdsList) + + if storage.OsType != nil { + d.Set("os_type", *storage.OsType.Name) + } + + mountpoint, err := services.GetNetworkStorageService(sess).Id(storageId).GetFileNetworkMountAddress() + if err != nil { + return fmt.Errorf("Error retrieving storage information: %s", err) + } + d.Set("mountpoint", mountpoint) + + return nil +} + +func resourceIBMStorageFileUpdate(d *schema.ResourceData, meta interface{}) error { + sess := meta.(ClientSession).SoftLayerSession() + id, err := strconv.Atoi(d.Id()) + if err != nil { + return fmt.Errorf("Not a valid ID, must be an integer: %s", err) + } + + storage, err := services.GetNetworkStorageService(sess). + Id(id). + Mask(storageDetailMask). + GetObject() + + if err != nil { + return fmt.Errorf("Error updating storage information: %s", err) + } + + // Update allowed_ip_addresses + if d.HasChange("allowed_ip_addresses") { + err := updateAllowedIpAddresses(d, sess, storage) + if err != nil { + return fmt.Errorf("Error updating storage information: %s", err) + } + } + + // Update allowed_subnets + if d.HasChange("allowed_subnets") { + err := updateAllowedSubnets(d, sess, storage) + if err != nil { + return fmt.Errorf("Error updating storage information: %s", err) + } + } + + // Update allowed_virtual_guest_ids + if d.HasChange("allowed_virtual_guest_ids") { + err := updateAllowedVirtualGuestIds(d, sess, storage) + if err != nil { + return fmt.Errorf("Error updating storage information: %s", err) + } + } + + // Update allowed_hardware_ids + if d.HasChange("allowed_hardware_ids") { + err := updateAllowedHardwareIds(d, sess, storage) + if err != nil { + return fmt.Errorf("Error updating storage information: %s", err) + } + } + + // Enable Storage Snapshot Schedule + if d.HasChange("snapshot_schedule") { + err := enableStorageSnapshot(d, sess, storage) + if err != nil { + return fmt.Errorf("Error creating storage snapshot schedule: %s", err) + } + } + + return resourceIBMStorageFileRead(d, meta) +} + +func resourceIBMStorageFileDelete(d *schema.ResourceData, meta interface{}) error { + sess := meta.(ClientSession).SoftLayerSession() + storageService := services.GetNetworkStorageService(sess) + + storageID, _ := strconv.Atoi(d.Id()) + + // Get billing item associated with the storage + billingItem, err := storageService.Id(storageID).GetBillingItem() + + if err != nil { + return fmt.Errorf("Error while looking up billing item associated with the storage: %s", err) + } + + if billingItem.Id == nil { + return fmt.Errorf("Error while looking up billing item associated with the storage: No billing item for ID:%d", storageID) + } + + success, err := services.GetBillingItemService(sess).Id(*billingItem.Id).CancelService() + if err != nil { + return err + } + + if !success { + return fmt.Errorf("SoftLayer reported an unsuccessful cancellation") + } + return nil +} + +func resourceIBMStorageFileExists(d *schema.ResourceData, meta interface{}) (bool, error) { + sess := meta.(ClientSession).SoftLayerSession() + + storageID, err := strconv.Atoi(d.Id()) + if err != nil { + return false, fmt.Errorf("Not a valid ID, must be an integer: %s", err) + } + + _, err = services.GetNetworkStorageService(sess). + Id(storageID). + GetObject() + + if err != nil { + if apiErr, ok := err.(sl.Error); ok && apiErr.StatusCode == 404 { + return false, nil + } + return false, fmt.Errorf("Error retrieving storage information: %s", err) + } + return true, nil +} + +func buildStorageProductOrderContainer( + sess *session.Session, + storageType string, + iops float64, + capacity int, + snapshotCapacity int, + storageProtocol string, + datacenter string) (datatypes.Container_Product_Order, error) { + + // Build product item filters for performance storage + iopsKeyName, err := getIopsKeyName(iops, storageType) + if err != nil { + return datatypes.Container_Product_Order{}, err + } + capacityKeyName := fmt.Sprintf("%d_GB_", capacity) + snapshotCapacityKeyName := fmt.Sprintf("%d_GB_", snapshotCapacity) + + storagePackageType := storagePackageMap[storageProtocol][storageType]["storagePackageType"] + iopsCategoryCode := storagePackageMap[storageProtocol][storageType]["iopsCategoryCode"] + storageProtocolCategoryCode := storagePackageMap[storageProtocol][storageType]["storageProtocolCategoryCode"] + + // Get a package type + pkg, err := product.GetPackageByType(sess, storagePackageType) + if err != nil { + return datatypes.Container_Product_Order{}, err + } + + // Get all prices + productItems, err := product.GetPackageProducts(sess, *pkg.Id, itemMask) + if err != nil { + return datatypes.Container_Product_Order{}, err + } + + // Add IOPS price + targetItemPrices := []datatypes.Product_Item_Price{} + var iopsPrice datatypes.Product_Item_Price + + if storageType == enduranceType { + iopsPrice, err = getPrice(productItems, iopsKeyName, iopsCategoryCode, "", 0) + if err != nil { + return datatypes.Container_Product_Order{}, err + } + } else { + iopsPrice, err = getPrice(productItems, iopsKeyName, iopsCategoryCode, "STORAGE_SPACE", capacity) + if err != nil { + return datatypes.Container_Product_Order{}, err + } + } + + targetItemPrices = append(targetItemPrices, iopsPrice) + + var capacityPrice datatypes.Product_Item_Price + // Add capacity price + if storageType == enduranceType { + capacityPrice, err = getPrice(productItems, capacityKeyName, "performance_storage_space", "STORAGE_TIER_LEVEL", enduranceCapacityRestrictionMap[iops]) + if err != nil { + return datatypes.Container_Product_Order{}, err + } + } else { + capacityPrice, err = getPrice(productItems, capacityKeyName, "performance_storage_space", "", 0) + if err != nil { + return datatypes.Container_Product_Order{}, err + } + } + targetItemPrices = append(targetItemPrices, capacityPrice) + + // Add storageProtocol price + storageProtocolPrice, err := getPrice(productItems, storageProtocol, storageProtocolCategoryCode, "", 0) + if err != nil { + return datatypes.Container_Product_Order{}, err + } + targetItemPrices = append(targetItemPrices, storageProtocolPrice) + + // Add Endurane Storage price + if storageType == enduranceType { + endurancePrice, err := getPrice(productItems, "CODENAME_PRIME_STORAGE_SERVICE", "storage_service_enterprise", "", 0) + if err != nil { + return datatypes.Container_Product_Order{}, err + } + targetItemPrices = append(targetItemPrices, endurancePrice) + } + + // Add snapshot capacity price + if storageType == enduranceType && snapshotCapacity > 0 { + snapshotCapacityPrice, err := getPrice(productItems, snapshotCapacityKeyName, "storage_snapshot_space", "STORAGE_TIER_LEVEL", enduranceCapacityRestrictionMap[iops]) + if err != nil { + return datatypes.Container_Product_Order{}, err + } + targetItemPrices = append(targetItemPrices, snapshotCapacityPrice) + } + + // Lookup the data center ID + dc, err := location.GetDatacenterByName(sess, datacenter) + if err != nil { + return datatypes.Container_Product_Order{}, + fmt.Errorf("No data centers matching %s could be found", datacenter) + } + + productOrderContainer := datatypes.Container_Product_Order{ + PackageId: pkg.Id, + Location: sl.String(strconv.Itoa(*dc.Id)), + Prices: targetItemPrices, + Quantity: sl.Int(1), + } + + return productOrderContainer, nil +} + +func findStorageByOrderId(sess *session.Session, orderId int) (datatypes.Network_Storage, error) { + filterPath := "networkStorage.billingItem.orderItem.order.id" + + stateConf := &resource.StateChangeConf{ + Pending: []string{"pending"}, + Target: []string{"complete"}, + Refresh: func() (interface{}, string, error) { + storage, err := services.GetAccountService(sess). + Filter(filter.Build( + filter.Path(filterPath). + Eq(strconv.Itoa(orderId)))). + Mask(storageMask). + GetNetworkStorage() + if err != nil { + return datatypes.Network_Storage{}, "", err + } + + if len(storage) == 1 { + return storage[0], "complete", nil + } else if len(storage) == 0 { + return nil, "pending", nil + } else { + return nil, "", fmt.Errorf("Expected one Storage: %s", err) + } + }, + Timeout: 45 * time.Minute, + Delay: 10 * time.Second, + MinTimeout: 10 * time.Second, + NotFoundChecks: 300, + } + + pendingResult, err := stateConf.WaitForState() + + if err != nil { + return datatypes.Network_Storage{}, err + } + + var result, ok = pendingResult.(datatypes.Network_Storage) + + if ok { + return result, nil + } + + return datatypes.Network_Storage{}, + fmt.Errorf("Cannot find Storage with order id '%d'", orderId) +} + +// Waits for storage provisioning +func WaitForStorageAvailable(d *schema.ResourceData, meta interface{}) (interface{}, error) { + log.Printf("Waiting for storage (%s) to be available.", d.Id()) + id, err := strconv.Atoi(d.Id()) + if err != nil { + return nil, fmt.Errorf("The storage ID %s must be numeric", d.Id()) + } + sess := meta.(ClientSession).SoftLayerSession() + + stateConf := &resource.StateChangeConf{ + Pending: []string{"retry", "provisioning"}, + Target: []string{"available"}, + Refresh: func() (interface{}, string, error) { + // Check active transactions + service := services.GetNetworkStorageService(sess) + result, err := service.Id(id).Mask("activeTransactions").GetObject() + if err != nil { + if apiErr, ok := err.(sl.Error); ok && apiErr.StatusCode == 404 { + return nil, "", fmt.Errorf("Error retrieving storage: %s", err) + } + return false, "retry", nil + } + + log.Println("Checking active transactions.") + if len(result.ActiveTransactions) > 0 { + return result, "provisioning", nil + } + + // Check volume status. + log.Println("Checking volume status.") + resultStr := "" + err = sess.DoRequest( + "SoftLayer_Network_Storage", + "getObject", + nil, + &sl.Options{Id: &id, Mask: "volumeStatus"}, + &resultStr, + ) + if err != nil { + return false, "retry", nil + } + + if !strings.Contains(resultStr, "PROVISION_COMPLETED") && + !strings.Contains(resultStr, "Volume Provisioning has completed") { + return result, "provisioning", nil + } + + return result, "available", nil + }, + Timeout: 45 * time.Minute, + Delay: 10 * time.Second, + MinTimeout: 10 * time.Second, + } + + return stateConf.WaitForState() +} + +func getIopsKeyName(iops float64, storageType string) (string, error) { + switch storageType { + case enduranceType: + return enduranceIopsMap[iops], nil + case performanceType: + + return fmt.Sprintf("%.f_IOPS", iops), nil + } + return "", fmt.Errorf("Invalid storageType %s.", storageType) +} + +func getPrice(productItems []datatypes.Product_Item, keyName string, categoryCode string, capacityRestrictionType string, capacityRestriction int) (datatypes.Product_Item_Price, error) { + for _, item := range productItems { + if strings.HasPrefix(*item.KeyName, keyName) { + for _, price := range item.Prices { + if *price.Categories[0].CategoryCode == categoryCode && price.LocationGroupId == nil { + if capacityRestrictionType == "STORAGE_SPACE" { + if price.CapacityRestrictionMinimum == nil || + price.CapacityRestrictionMaximum == nil { + continue + } + capacityRestrictionMinimum, _ := strconv.Atoi(*price.CapacityRestrictionMinimum) + capacityRestrictionMaximum, _ := strconv.Atoi(*price.CapacityRestrictionMaximum) + if capacityRestrictionMinimum > 0 && + capacityRestriction >= capacityRestrictionMinimum && + capacityRestriction <= capacityRestrictionMaximum { + return price, nil + } + } + + if capacityRestrictionType == "STORAGE_TIER_LEVEL" { + if price.CapacityRestrictionMinimum == nil || + price.CapacityRestrictionMaximum == nil { + continue + } + capacityRestrictionMinimum, _ := strconv.Atoi(*price.CapacityRestrictionMinimum) + capacityRestrictionMaximum, _ := strconv.Atoi(*price.CapacityRestrictionMaximum) + if capacityRestrictionMinimum > 0 && + capacityRestriction == capacityRestrictionMinimum && + capacityRestriction == capacityRestrictionMaximum { + return price, nil + } + } + + if capacityRestrictionType == "" && capacityRestriction == 0 { + return price, nil + } + } + } + } + } + return datatypes.Product_Item_Price{}, + fmt.Errorf("No product items matching with keyName %s and categoryCode %s could be found", keyName, categoryCode) +} + +func getIops(storage datatypes.Network_Storage, storageType string) (float64, error) { + switch storageType { + case enduranceType: + for _, property := range storage.Properties { + if *property.Type.Keyname == "PROVISIONED_IOPS" { + provisionedIops, err := strconv.Atoi(*property.Value) + if err != nil { + return 0, err + } + enduranceIops := float64(provisionedIops / *storage.CapacityGb) + if enduranceIops < 1 { + enduranceIops = 0.25 + } + return enduranceIops, nil + } + } + case performanceType: + if storage.Iops == nil { + return 0, fmt.Errorf("Failed to retrieve iops information.") + } + iops, err := strconv.Atoi(*storage.Iops) + if err != nil { + return 0, err + } + return float64(iops), nil + } + return 0, fmt.Errorf("Invalid storage type %s", storageType) +} + +func updateAllowedIpAddresses(d *schema.ResourceData, sess *session.Session, storage datatypes.Network_Storage) error { + id := *storage.Id + newIps := d.Get("allowed_ip_addresses").(*schema.Set).List() + + // Add new allowed_ip_addresses + for _, newIp := range newIps { + isNewIp := true + for _, oldAllowedIpAddresses := range storage.AllowedIpAddresses { + if newIp.(string) == *oldAllowedIpAddresses.IpAddress { + isNewIp = false + break + } + } + if isNewIp { + ipObject, err := services.GetAccountService(sess). + Filter(filter.Build( + filter.Path("ipAddresses.ipAddress"). + Eq(newIp.(string)))).GetIpAddresses() + if err != nil { + return err + } + if len(ipObject) != 1 { + return fmt.Errorf("Number of IP address is %d", len(ipObject)) + } + for { + _, err = services.GetNetworkStorageService(sess). + Id(id). + AllowAccessFromHostList([]datatypes.Container_Network_Storage_Host{ + { + Id: ipObject[0].Id, + ObjectType: sl.String("SoftLayer_Network_Subnet_IpAddress"), + }, + }) + if err != nil { + if strings.Contains(err.Error(), "SoftLayer_Exception_Network_Storage_Group_MassAccessControlModification") { + time.Sleep(retryTime * time.Second) + continue + } + return err + } + break + } + } + } + + // Remove deleted allowed_hardware_ids + for _, oldAllowedIpAddresses := range storage.AllowedIpAddresses { + isDeletedId := true + for _, newIp := range newIps { + if newIp.(string) == *oldAllowedIpAddresses.IpAddress { + isDeletedId = false + break + } + } + if isDeletedId { + for { + _, err := services.GetNetworkStorageService(sess). + Id(id). + RemoveAccessFromHostList([]datatypes.Container_Network_Storage_Host{ + { + Id: oldAllowedIpAddresses.Id, + ObjectType: sl.String("SoftLayer_Network_Subnet_IpAddress"), + }, + }) + if err != nil { + if strings.Contains(err.Error(), "SoftLayer_Exception_Network_Storage_Group_MassAccessControlModification") { + time.Sleep(retryTime * time.Second) + continue + } + return err + } + break + } + } + } + return nil +} + +func updateAllowedSubnets(d *schema.ResourceData, sess *session.Session, storage datatypes.Network_Storage) error { + id := *storage.Id + newSubnets := d.Get("allowed_subnets").(*schema.Set).List() + + // Add new allowed_subnets + for _, newSubnet := range newSubnets { + isNewSubnet := true + newSubnetArr := strings.Split(newSubnet.(string), "/") + newNetworkIdentifier := newSubnetArr[0] + newCidr, err := strconv.Atoi(newSubnetArr[1]) + if err != nil { + return err + } + for _, oldAllowedSubnets := range storage.AllowedSubnets { + if newNetworkIdentifier == *oldAllowedSubnets.NetworkIdentifier && newCidr == *oldAllowedSubnets.Cidr { + isNewSubnet = false + break + } + } + if isNewSubnet { + filterStr := fmt.Sprintf("{\"subnets\":{\"networkIdentifier\":{\"operation\":\"%s\"},\"cidr\":{\"operation\":\"%d\"}}}", newNetworkIdentifier, newCidr) + subnetObject, err := services.GetAccountService(sess). + Filter(filterStr).GetSubnets() + if err != nil { + return err + } + if len(subnetObject) != 1 { + return fmt.Errorf("Number of subnet is %d", len(subnetObject)) + } + _, err = services.GetNetworkStorageService(sess). + Id(id). + AllowAccessFromHostList([]datatypes.Container_Network_Storage_Host{ + { + Id: subnetObject[0].Id, + ObjectType: sl.String("SoftLayer_Network_Subnet"), + }, + }) + if err != nil { + return err + } + } + } + + // Remove deleted allowed_subnets + for _, oldAllowedSubnets := range storage.AllowedSubnets { + isDeletedSubnet := true + for _, newSubnet := range newSubnets { + newSubnetArr := strings.Split(newSubnet.(string), "/") + newNetworkIdentifier := newSubnetArr[0] + newCidr, err := strconv.Atoi(newSubnetArr[1]) + if err != nil { + return err + } + + if newNetworkIdentifier == *oldAllowedSubnets.NetworkIdentifier && newCidr == *oldAllowedSubnets.Cidr { + isDeletedSubnet = false + break + } + } + if isDeletedSubnet { + _, err := services.GetNetworkStorageService(sess). + Id(id). + RemoveAccessFromHostList([]datatypes.Container_Network_Storage_Host{ + { + Id: sl.Int(*oldAllowedSubnets.Id), + ObjectType: sl.String("SoftLayer_Network_Subnet"), + }, + }) + if err != nil { + return err + } + } + } + return nil +} + +func updateAllowedVirtualGuestIds(d *schema.ResourceData, sess *session.Session, storage datatypes.Network_Storage) error { + id := *storage.Id + newIds := d.Get("allowed_virtual_guest_ids").(*schema.Set).List() + + // Add new allowed_virtual_guest_ids + for _, newId := range newIds { + isNewId := true + for _, oldAllowedVirtualGuest := range storage.AllowedVirtualGuests { + if newId.(int) == *oldAllowedVirtualGuest.Id { + isNewId = false + break + } + } + if isNewId { + for { + _, err := services.GetNetworkStorageService(sess). + Id(id). + AllowAccessFromHostList([]datatypes.Container_Network_Storage_Host{ + { + Id: sl.Int(newId.(int)), + ObjectType: sl.String("SoftLayer_Virtual_Guest"), + }, + }) + if err != nil { + if strings.Contains(err.Error(), "SoftLayer_Exception_Network_Storage_Group_MassAccessControlModification") { + time.Sleep(retryTime * time.Second) + continue + } + return err + } + break + } + } + } + + // Remove deleted allowed_virtual_guest_ids + for _, oldAllowedVirtualGuest := range storage.AllowedVirtualGuests { + isDeletedId := true + for _, newId := range newIds { + if newId.(int) == *oldAllowedVirtualGuest.Id { + isDeletedId = false + break + } + } + if isDeletedId { + for { + _, err := services.GetNetworkStorageService(sess). + Id(id). + RemoveAccessFromHostList([]datatypes.Container_Network_Storage_Host{ + { + Id: sl.Int(*oldAllowedVirtualGuest.Id), + ObjectType: sl.String("SoftLayer_Virtual_Guest"), + }, + }) + if err != nil { + if strings.Contains(err.Error(), "SoftLayer_Exception_Network_Storage_Group_MassAccessControlModification") { + time.Sleep(retryTime * time.Second) + continue + } + return err + } + break + } + } + } + return nil +} + +func updateAllowedHardwareIds(d *schema.ResourceData, sess *session.Session, storage datatypes.Network_Storage) error { + id := *storage.Id + newIds := d.Get("allowed_hardware_ids").(*schema.Set).List() + + // Add new allowed_hardware_ids + for _, newId := range newIds { + isNewId := true + for _, oldAllowedHardware := range storage.AllowedHardware { + if newId.(int) == *oldAllowedHardware.Id { + isNewId = false + break + } + } + if isNewId { + _, err := services.GetNetworkStorageService(sess). + Id(id). + AllowAccessFromHostList([]datatypes.Container_Network_Storage_Host{ + { + Id: sl.Int(newId.(int)), + ObjectType: sl.String("SoftLayer_Hardware"), + }, + }) + if err != nil { + return err + } + } + } + + // Remove deleted allowed_hardware_ids + for _, oldAllowedHardware := range storage.AllowedHardware { + isDeletedId := true + for _, newId := range newIds { + if newId.(int) == *oldAllowedHardware.Id { + isDeletedId = false + break + } + } + if isDeletedId { + _, err := services.GetNetworkStorageService(sess). + Id(id). + RemoveAccessFromHostList([]datatypes.Container_Network_Storage_Host{ + { + Id: sl.Int(*oldAllowedHardware.Id), + ObjectType: sl.String("SoftLayer_Hardware"), + }, + }) + if err != nil { + return err + } + } + } + return nil +} + +func enableStorageSnapshot(d *schema.ResourceData, sess *session.Session, storage datatypes.Network_Storage) error { + id := *storage.Id + for _, e := range d.Get("snapshot_schedule").([]interface{}) { + value := e.(map[string]interface{}) + enable := value["enable"].(bool) + if enable { + _, err := services.GetNetworkStorageService(sess). + Id(id). + EnableSnapshots(sl.String(value["scheduleType"].(string)), sl.Int(value["retentionCount"].(int)), sl.Int(value["minute"].(int)), sl.Int(value["hour"].(int)), sl.String(value["dayOfWeek"].(string))) + if err != nil { + return err + } + } else { + _, err := services.GetNetworkStorageService(sess). + Id(id). + DisableSnapshots(sl.String(value["scheduleType"].(string))) + if err != nil { + return err + } + + } + } + return nil +} diff --git a/ibm/resource_ibm_storage_file_test.go b/ibm/resource_ibm_storage_file_test.go new file mode 100644 index 0000000000..8169f06570 --- /dev/null +++ b/ibm/resource_ibm_storage_file_test.go @@ -0,0 +1,294 @@ +package ibm + +import ( + "fmt" + "strconv" + "testing" + + "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/terraform" + "github.com/softlayer/softlayer-go/services" +) + +func TestAccIBMStorageFile_Basic(t *testing.T) { + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccCheckIBMStorageFileConfig_basic, + Check: resource.ComposeTestCheckFunc( + // Endurance Storage + testAccCheckIBMStorageFileExists("ibm_storage_file.fs_endurance"), + resource.TestCheckResourceAttr( + "ibm_storage_file.fs_endurance", "type", "Endurance"), + resource.TestCheckResourceAttr( + "ibm_storage_file.fs_endurance", "capacity", "20"), + resource.TestCheckResourceAttrSet("ibm_storage_file.fs_endurance", "mountpoint"), + resource.TestCheckResourceAttr( + "ibm_storage_file.fs_endurance", "iops", "0.25"), + resource.TestCheckResourceAttr( + "ibm_storage_file.fs_endurance", "snapshot_capacity", "10"), + testAccCheckIBMResources("ibm_storage_file.fs_endurance", "datacenter", + "ibm_compute_vm_instance.storagevm1", "datacenter"), + // Performance Storage + testAccCheckIBMStorageFileExists("ibm_storage_file.fs_performance"), + resource.TestCheckResourceAttr( + "ibm_storage_file.fs_performance", "type", "Performance"), + resource.TestCheckResourceAttr( + "ibm_storage_file.fs_performance", "capacity", "20"), + resource.TestCheckResourceAttr( + "ibm_storage_file.fs_performance", "iops", "200"), + testAccCheckIBMResources("ibm_storage_file.fs_performance", "datacenter", + "ibm_compute_vm_instance.storagevm1", "datacenter"), + ), + }, + + resource.TestStep{ + Config: testAccCheckIBMStorageFileConfig_update, + Check: resource.ComposeTestCheckFunc( + // Endurance Storage + resource.TestCheckResourceAttr("ibm_storage_file.fs_endurance", "allowed_virtual_guest_ids.#", "1"), + resource.TestCheckResourceAttr("ibm_storage_file.fs_endurance", "allowed_subnets.#", "1"), + resource.TestCheckResourceAttr("ibm_storage_file.fs_endurance", "allowed_ip_addresses.#", "1"), + // Performance Storage + resource.TestCheckResourceAttr("ibm_storage_file.fs_performance", "allowed_virtual_guest_ids.#", "1"), + resource.TestCheckResourceAttr("ibm_storage_file.fs_performance", "allowed_subnets.#", "1"), + resource.TestCheckResourceAttr("ibm_storage_file.fs_performance", "allowed_ip_addresses.#", "1"), + resource.TestCheckResourceAttrSet("ibm_storage_file.fs_endurance", "mountpoint"), + ), + }, + + resource.TestStep{ + Config: testAccCheckIBMStorageFileConfig_enablesnapshot, + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttrSet("ibm_storage_file.fs_endurance", "mountpoint"), + // Endurance Storage + resource.TestCheckResourceAttr("ibm_storage_file.fs_endurance", "snapshot_schedule.#", "3"), + resource.TestCheckResourceAttr("ibm_storage_file.fs_endurance", "snapshot_schedule.0.scheduleType", "WEEKLY"), + resource.TestCheckResourceAttr("ibm_storage_file.fs_endurance", "snapshot_schedule.0.retentionCount", "5"), + resource.TestCheckResourceAttr("ibm_storage_file.fs_endurance", "snapshot_schedule.0.minute", "2"), + resource.TestCheckResourceAttr("ibm_storage_file.fs_endurance", "snapshot_schedule.0.hour", "13"), + resource.TestCheckResourceAttr("ibm_storage_file.fs_endurance", "snapshot_schedule.0.dayOfWeek", "SUNDAY"), + resource.TestCheckResourceAttr("ibm_storage_file.fs_endurance", "snapshot_schedule.1.scheduleType", "HOURLY"), + resource.TestCheckResourceAttr("ibm_storage_file.fs_endurance", "snapshot_schedule.1.retentionCount", "5"), + resource.TestCheckResourceAttr("ibm_storage_file.fs_endurance", "snapshot_schedule.1.minute", "30"), + resource.TestCheckResourceAttr("ibm_storage_file.fs_endurance", "snapshot_schedule.2.scheduleType", "DAILY"), + resource.TestCheckResourceAttr("ibm_storage_file.fs_endurance", "snapshot_schedule.2.retentionCount", "6"), + resource.TestCheckResourceAttr("ibm_storage_file.fs_endurance", "snapshot_schedule.2.minute", "2"), + resource.TestCheckResourceAttr("ibm_storage_file.fs_endurance", "snapshot_schedule.2.hour", "15"), + ), + }, + resource.TestStep{ + Config: testAccCheckIBMStorageFileConfig_updatesnapshot, + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttrSet("ibm_storage_file.fs_endurance", "mountpoint"), + // Endurance Storage + resource.TestCheckResourceAttr("ibm_storage_file.fs_endurance", "snapshot_schedule.#", "3"), + resource.TestCheckResourceAttr("ibm_storage_file.fs_endurance", "snapshot_schedule.0.retentionCount", "2"), + resource.TestCheckResourceAttr("ibm_storage_file.fs_endurance", "snapshot_schedule.0.minute", "2"), + resource.TestCheckResourceAttr("ibm_storage_file.fs_endurance", "snapshot_schedule.0.hour", "13"), + resource.TestCheckResourceAttr("ibm_storage_file.fs_endurance", "snapshot_schedule.0.dayOfWeek", "MONDAY"), + resource.TestCheckResourceAttr("ibm_storage_file.fs_endurance", "snapshot_schedule.1.retentionCount", "3"), + resource.TestCheckResourceAttr("ibm_storage_file.fs_endurance", "snapshot_schedule.1.minute", "40"), + resource.TestCheckResourceAttr("ibm_storage_file.fs_endurance", "snapshot_schedule.2.retentionCount", "5"), + resource.TestCheckResourceAttr("ibm_storage_file.fs_endurance", "snapshot_schedule.2.minute", "2"), + resource.TestCheckResourceAttr("ibm_storage_file.fs_endurance", "snapshot_schedule.2.hour", "15"), + resource.TestCheckResourceAttr("ibm_storage_file.fs_endurance", "snapshot_schedule.2.enable", "false"), + ), + }, + }, + }) +} + +func testAccCheckIBMStorageFileExists(n string) resource.TestCheckFunc { + return func(s *terraform.State) error { + rs, ok := s.RootModule().Resources[n] + + if !ok { + return fmt.Errorf("Not found: %s", n) + } + + if rs.Primary.ID == "" { + return fmt.Errorf("No Record ID is set") + } + + storageId, _ := strconv.Atoi(rs.Primary.ID) + + service := services.GetNetworkStorageService(testAccProvider.Meta().(ClientSession).SoftLayerSession()) + foundStorage, err := service.Id(storageId).GetObject() + + if err != nil { + return err + } + + if strconv.Itoa(int(*foundStorage.Id)) != rs.Primary.ID { + return fmt.Errorf("Record not found") + } + + return nil + } +} + +const testAccCheckIBMStorageFileConfig_basic = ` +resource "ibm_compute_vm_instance" "storagevm1" { + hostname = "storagevm1" + domain = "terraformuat.ibm.com" + os_reference_code = "DEBIAN_7_64" + datacenter = "dal06" + network_speed = 100 + hourly_billing = true + private_network_only = false + cores = 1 + memory = 1024 + disks = [25] + local_disk = false +} + +resource "ibm_storage_file" "fs_endurance" { + type = "Endurance" + datacenter = "${ibm_compute_vm_instance.storagevm1.datacenter}" + capacity = 20 + iops = 0.25 + snapshot_capacity = 10 +} + +resource "ibm_storage_file" "fs_performance" { + type = "Performance" + datacenter = "${ibm_compute_vm_instance.storagevm1.datacenter}" + capacity = 20 + iops = 200 +} +` +const testAccCheckIBMStorageFileConfig_update = ` +resource "ibm_compute_vm_instance" "storagevm1" { + hostname = "storagevm1" + domain = "terraformuat.ibm.com" + os_reference_code = "DEBIAN_7_64" + datacenter = "dal06" + network_speed = 100 + hourly_billing = true + private_network_only = false + cores = 1 + memory = 1024 + disks = [25] + local_disk = false +} + +resource "ibm_storage_file" "fs_endurance" { + type = "Endurance" + datacenter = "${ibm_compute_vm_instance.storagevm1.datacenter}" + capacity = 20 + iops = 0.25 + allowed_virtual_guest_ids = [ "${ibm_compute_vm_instance.storagevm1.id}" ] + allowed_subnets = [ "${ibm_compute_vm_instance.storagevm1.private_subnet}" ] + allowed_ip_addresses = [ "${ibm_compute_vm_instance.storagevm1.ipv4_address_private}" ] + snapshot_capacity = 10 +} + +resource "ibm_storage_file" "fs_performance" { + type = "Performance" + datacenter = "${ibm_compute_vm_instance.storagevm1.datacenter}" + capacity = 20 + iops = 100 + allowed_virtual_guest_ids = [ "${ibm_compute_vm_instance.storagevm1.id}" ] + allowed_subnets = [ "${ibm_compute_vm_instance.storagevm1.private_subnet}" ] + allowed_ip_addresses = [ "${ibm_compute_vm_instance.storagevm1.ipv4_address_private}" ] +} +` + +const testAccCheckIBMStorageFileConfig_enablesnapshot = ` +resource "ibm_compute_vm_instance" "storagevm1" { + hostname = "storagevm1" + domain = "terraformuat.ibm.com" + os_reference_code = "DEBIAN_7_64" + datacenter = "dal06" + network_speed = 100 + hourly_billing = true + private_network_only = false + cores = 1 + memory = 1024 + disks = [25] + local_disk = false +} + +resource "ibm_storage_file" "fs_endurance" { + type = "Endurance" + datacenter = "${ibm_compute_vm_instance.storagevm1.datacenter}" + capacity = 20 + iops = 0.25 + snapshot_capacity = 10 + snapshot_schedule = [ + { + scheduleType="WEEKLY", + retentionCount= 5, + minute= 2, + hour= 13, + dayOfWeek= "SUNDAY", + enable= true + }, + { + scheduleType="HOURLY", + retentionCount= 5, + minute= 30, + enable= true + }, + + { + scheduleType="DAILY", + retentionCount= 6, + minute= 2, + hour= 15 + enable= true + }, + ] +} +` +const testAccCheckIBMStorageFileConfig_updatesnapshot = ` +resource "ibm_compute_vm_instance" "storagevm1" { + hostname = "storagevm1" + domain = "terraformuat.ibm.com" + os_reference_code = "DEBIAN_7_64" + datacenter = "dal06" + network_speed = 100 + hourly_billing = true + private_network_only = false + cores = 1 + memory = 1024 + disks = [25] + local_disk = false +} + +resource "ibm_storage_file" "fs_endurance" { + type = "Endurance" + datacenter = "${ibm_compute_vm_instance.storagevm1.datacenter}" + capacity = 20 + iops = 0.25 + snapshot_capacity = 10 + snapshot_schedule = [ + { + scheduleType="WEEKLY", + retentionCount= 2, + minute= 2, + hour= 13, + dayOfWeek= "MONDAY", + enable= true + }, + { + scheduleType="HOURLY", + retentionCount= 3, + minute= 40, + enable= true + }, + + { + scheduleType="DAILY", + retentionCount= 5, + minute= 2, + hour= 15 + enable= false + }, + ] +} +` diff --git a/ibm/structures.go b/ibm/structures.go new file mode 100644 index 0000000000..81be547022 --- /dev/null +++ b/ibm/structures.go @@ -0,0 +1,110 @@ +package ibm + +import ( + "github.com/IBM-Bluemix/bluemix-go/api/mccp/mccpv2" + "github.com/hashicorp/terraform/helper/schema" + "github.com/softlayer/softlayer-go/datatypes" +) + +//HashInt ... +func HashInt(v interface{}) int { return v.(int) } + +func expandStringList(input []interface{}) []string { + vs := make([]string, len(input)) + for i, v := range input { + vs[i] = v.(string) + } + return vs +} + +func flattenStringList(list []string) []interface{} { + vs := make([]interface{}, len(list)) + for i, v := range list { + vs[i] = v + } + return vs +} + +func expandIntList(input []interface{}) []int { + vs := make([]int, len(input)) + for i, v := range input { + vs[i] = v.(int) + } + return vs +} + +func flattenIntList(list []int) []interface{} { + vs := make([]interface{}, len(list)) + for i, v := range list { + vs[i] = v + } + return vs +} + +func newStringSet(f schema.SchemaSetFunc, in []string) *schema.Set { + var out = make([]interface{}, len(in), len(in)) + for i, v := range in { + out[i] = v + } + return schema.NewSet(f, out) +} + +func flattenRoute(in []mccpv2.Route) *schema.Set { + vs := make([]string, len(in)) + for i, v := range in { + vs[i] = v.GUID + } + return newStringSet(schema.HashString, vs) +} + +func stringSliceToSet(in []string) *schema.Set { + vs := make([]string, len(in)) + for i, v := range in { + vs[i] = v + } + return newStringSet(schema.HashString, vs) +} + +func flattenServiceBindings(in []mccpv2.ServiceBinding) *schema.Set { + vs := make([]string, len(in)) + for i, v := range in { + vs[i] = v.ServiceInstanceGUID + } + return newStringSet(schema.HashString, vs) +} + +func flattenPort(in []int) *schema.Set { + var out = make([]interface{}, len(in)) + for i, v := range in { + out[i] = v + } + return schema.NewSet(HashInt, out) +} + +func flattenFileStorageID(in []datatypes.Network_Storage) *schema.Set { + var out = []interface{}{} + for _, v := range in { + if *v.NasType == "NAS" { + out = append(out, *v.Id) + } + } + return schema.NewSet(HashInt, out) +} + +func flattenBlockStorageID(in []datatypes.Network_Storage) *schema.Set { + var out = []interface{}{} + for _, v := range in { + if *v.NasType == "ISCSI" { + out = append(out, *v.Id) + } + } + return schema.NewSet(HashInt, out) +} + +func flattenSpaceRoleUsers(in []mccpv2.SpaceRole) *schema.Set { + var out = []interface{}{} + for _, v := range in { + out = append(out, v.UserName) + } + return schema.NewSet(schema.HashString, out) +} diff --git a/ibm/test-fixtures/app1.zip b/ibm/test-fixtures/app1.zip new file mode 100644 index 0000000000..4ceab25289 Binary files /dev/null and b/ibm/test-fixtures/app1.zip differ diff --git a/ibm/test-fixtures/app2.zip b/ibm/test-fixtures/app2.zip new file mode 100644 index 0000000000..b450b52bd3 Binary files /dev/null and b/ibm/test-fixtures/app2.zip differ diff --git a/ibm/validators.go b/ibm/validators.go new file mode 100644 index 0000000000..f9dc87090e --- /dev/null +++ b/ibm/validators.go @@ -0,0 +1,120 @@ +package ibm + +import ( + "fmt" + "strings" + + "github.com/IBM-Bluemix/bluemix-go/helpers" + "github.com/hashicorp/terraform/helper/schema" + homedir "github.com/mitchellh/go-homedir" +) + +func validateServiceTags(v interface{}, k string) (ws []string, errors []error) { + value := v.(string) + if len(value) > 2048 { + errors = append(errors, fmt.Errorf( + "%q must contain tags whose maximum length is 2048 characters", k)) + } + return +} + +func validateAllowedStringValue(validValues []string) schema.SchemaValidateFunc { + return func(v interface{}, k string) (ws []string, errors []error) { + input := v.(string) + existed := false + for _, s := range validValues { + if s == input { + existed = true + break + } + } + if !existed { + errors = append(errors, fmt.Errorf( + "%q must contain a value from %#v, got %q", + k, validValues, input)) + } + return + + } +} + +func validateRoutePath(v interface{}, k string) (ws []string, errors []error) { + value := v.(string) + //Somehow API allows this + if value == "" { + return + } + + if (len(value) < 2) || (len(value) > 128) { + errors = append(errors, fmt.Errorf( + "%q (%q) must contain from 2 to 128 characters ", k, value)) + } + if !(strings.HasPrefix(value, "/")) { + errors = append(errors, fmt.Errorf( + "%q (%q) must start with a forward slash '/'", k, value)) + + } + if strings.Contains(value, "?") { + errors = append(errors, fmt.Errorf( + "%q (%q) must not contain a '?'", k, value)) + } + + return +} + +func validateRoutePort(v interface{}, k string) (ws []string, errors []error) { + return validatePortRange(1024, 65535)(v, k) +} + +func validateAppPort(v interface{}, k string) (ws []string, errors []error) { + return validatePortRange(1024, 65535)(v, k) +} + +func validatePortRange(start, end int) func(v interface{}, k string) (ws []string, errors []error) { + f := func(v interface{}, k string) (ws []string, errors []error) { + value := v.(int) + if (value < start) || (value > end) { + errors = append(errors, fmt.Errorf( + "%q (%d) must be in the range of %d to %d", k, value, start, end)) + } + return + } + return f +} + +func validateDomainName(v interface{}, k string) (ws []string, errors []error) { + value := v.(string) + + if !(strings.Contains(value, ".")) { + errors = append(errors, fmt.Errorf( + "%q (%q) must contain a '.',example.com,foo.example.com", k, value)) + } + + return +} + +func validateAppInstance(v interface{}, k string) (ws []string, errors []error) { + instances := v.(int) + if instances < 0 { + errors = append(errors, fmt.Errorf( + "%q (%q) must be greater than 0", k, instances)) + } + return + +} + +func validateAppZipPath(v interface{}, k string) (ws []string, errors []error) { + path := v.(string) + applicationZip, err := homedir.Expand(path) + if err != nil { + errors = append(errors, fmt.Errorf( + "%q (%q) home directory in the given path couldn't be expanded", k, path)) + } + if !helpers.FileExists(applicationZip) { + errors = append(errors, fmt.Errorf( + "%q (%q) doesn't exist", k, path)) + } + + return + +} diff --git a/main.go b/main.go new file mode 100644 index 0000000000..c895d7aeb3 --- /dev/null +++ b/main.go @@ -0,0 +1,15 @@ +package main + +import ( + "log" + + "github.com/hashicorp/terraform/plugin" + "github.com/terraform-providers/terraform-provider-ibm/ibm" +) + +func main() { + log.Println("IBM Cloud Provider version", Version, VersionPrerelease, GitCommit) + plugin.Serve(&plugin.ServeOpts{ + ProviderFunc: ibm.Provider, + }) +} diff --git a/migration/migration.txt b/migration/migration.txt new file mode 100644 index 0000000000..cd132394ae --- /dev/null +++ b/migration/migration.txt @@ -0,0 +1,46 @@ +# You can use this file with sed command to replace all old names with new ones +# find /path/to/tf-configs -type f -name "*.tf" -exec sed -i .bak -f migration.txt {} + +# The above command would replace old names as seen below with new ones in all files ending with tf and take backs up of your files by suffixing them bak +# Caution: Make sure you don't run this in a git directory as it might corrupt your .git. You could exclude .git directory in your command or point +# specifically to the directory which contains terraform configuration files + +s/ibmcloud_cf_account/ibm_account/g +s/ibmcloud_cf_app/ibm_app/g +s/ibmcloud_cf_org/ibm_org/g +s/ibmcloud_cf_private_domain/ibm_app_domain_private/g +s/ibmcloud_cf_route/ibm_app_route/g +s/ibmcloud_cf_service_instance/ibm_service_instance/g +s/ibmcloud_cf_service_key/ibm_service_key/g +s/ibmcloud_cf_service_plan/ibm_service_plan/g +s/ibmcloud_cf_shared_domain/ibm_app_domain_shared/g +s/ibmcloud_cf_space/ibm_space/g +s/ibmcloud_cs_cluster_config/ibm_container_cluster_config/g +s/ibmcloud_cs_cluster/ibm_container_cluster/g +s/ibmcloud_cs_worker/ibm_container_cluster_worker/g +s/ibmcloud_infra_dns_domain/ibm_dns_domain/g +s/ibmcloud_infra_image_template/ibm_compute_image_template/g +s/ibmcloud_infra_ssh_key/ibm_compute_ssh_key/g +s/ibmcloud_infra_virtual_guest/ibm_compute_vm_instance/g +s/ibmcloud_infra_vlan/ibm_network_vlan/g +s/ibmcloud_cs_cluster_bind_service/ibm_container_bind_service/g +s/ibmcloud_infra_bare_metal/ibm_compute_bare_metal/g +s/ibmcloud_infra_basic_monitor/ibm_compute_monitor/g +s/ibmcloud_infra_block_storage/ibm_storage_block/g +s/ibmcloud_infra_dns_domain_record/ibm_dns_record/g +s/ibmcloud_infra_file_storage/ibm_storage_file/g +s/ibmcloud_infra_fw_hardware_dedicated_rules/ibm_firewall_policy/g +s/ibmcloud_infra_fw_hardware_dedicated/ibm_firewall/g +s/ibmcloud_infra_global_ip/ibm_network_public_ip/g +s/ibmcloud_infra_lb_local_service_group/ibm_lb_service_group/g +s/ibmcloud_infra_lb_local_service/ibm_lb_service/g +s/ibmcloud_infra_lb_local/ibm_lb/g +s/ibmcloud_infra_lb_vpx_ha/ibm_lb_vpx_ha/g +s/ibmcloud_infra_lb_vpx_service/ibm_lb_vpx_service/g +s/ibmcloud_infra_lb_vpx_vip/ibm_lb_vpx_vip/g +s/ibmcloud_infra_lb_vpx/ibm_lb_vpx/g +s/ibmcloud_infra_objectstorage_account/ibm_object_storage_account/g +s/ibmcloud_infra_provisioning_hook/ibm_compute_provisioning_hook/g +s/ibmcloud_infra_scale_group/ibm_compute_autoscale_group/g +s/ibmcloud_infra_scale_policy/ibm_compute_autoscale_policy/g +s/ibmcloud_infra_security_certificate/ibm_compute_ssl_certificate/g +s/ibmcloud_infra_user/ibm_compute_user/g diff --git a/scripts/build.sh b/scripts/build.sh new file mode 100755 index 0000000000..d4d06e9d49 --- /dev/null +++ b/scripts/build.sh @@ -0,0 +1,102 @@ +#!/usr/bin/env bash +# +# This script builds the application from source for multiple platforms. + +# Get the parent directory of where this script is. +SOURCE="${BASH_SOURCE[0]}" +while [ -h "$SOURCE" ] ; do SOURCE="$(readlink "$SOURCE")"; done +DIR="$( cd -P "$( dirname "$SOURCE" )/.." && pwd )" + +# Change into that directory +cd "$DIR" + +# Get the git commit +GIT_COMMIT=$(git rev-parse HEAD) +GIT_DIRTY=$(test -n "`git status --porcelain`" && echo "+CHANGES" || true) + +# Determine the arch/os combos we're building for +XC_ARCH=${XC_ARCH:-"amd64"} +XC_OS=${XC_OS:-linux darwin windows} +XC_EXCLUDE_OSARCH="!darwin/arm !darwin/386" + +# Delete the old dir +echo "==> Removing old directory..." +rm -f bin/* +rm -rf pkg/* +mkdir -p bin/ + +# If its dev mode, only build for ourself +if [ "${TF_DEV}x" != "x" ]; then + XC_OS=$(go env GOOS) + XC_ARCH=$(go env GOARCH) +fi + +if ! which gox > /dev/null; then + echo "==> Installing gox..." + go get -u github.com/mitchellh/gox +fi + +# instruct gox to build statically linked binaries +export CGO_ENABLED=0 + +# Allow LD_FLAGS to be appended during development compilations +LD_FLAGS="-X main.GitCommit=${GIT_COMMIT}${GIT_DIRTY} $LD_FLAGS" + +# In release mode we don't want debug information in the binary +if [[ -n "${TF_RELEASE}" ]]; then + LD_FLAGS="-X main.GitCommit=${GIT_COMMIT}${GIT_DIRTY} -X main.VersionPrerelease= -s -w" +fi + +# Build! +echo "==> Building..." +gox \ + -os="${XC_OS}" \ + -arch="${XC_ARCH}" \ + -osarch="${XC_EXCLUDE_OSARCH}" \ + -ldflags "${LD_FLAGS}" \ + -output "pkg/{{.OS}}_{{.Arch}}/terraform-provider-ibm" \ + . + +# Move all the compiled things to the $GOPATH/bin +GOPATH=${GOPATH:-$(go env GOPATH)} +case $(uname) in + CYGWIN*) + GOPATH="$(cygpath $GOPATH)" + ;; +esac +OLDIFS=$IFS +IFS=: MAIN_GOPATH=($GOPATH) +IFS=$OLDIFS + +# Create GOPATH/bin if it's doesn't exists +if [ ! -d $MAIN_GOPATH/bin ]; then + echo "==> Creating GOPATH/bin directory..." + mkdir -p $MAIN_GOPATH/bin +fi + +# Copy our OS/Arch to the bin/ directory +DEV_PLATFORM="./pkg/$(go env GOOS)_$(go env GOARCH)" +if [[ -d "${DEV_PLATFORM}" ]]; then + for F in $(find ${DEV_PLATFORM} -mindepth 1 -maxdepth 1 -type f); do + cp ${F} bin/ + cp ${F} ${MAIN_GOPATH}/bin/ + done +fi + +if [ "${TF_DEV}x" = "x" ]; then + # Zip and copy to the dist dir + echo "==> Packaging..." + for PLATFORM in $(find ./pkg -mindepth 1 -maxdepth 1 -type d); do + OSARCH=$(basename ${PLATFORM}) + echo "--> ${OSARCH}" + + pushd $PLATFORM >/dev/null 2>&1 + zip ../${OSARCH}.zip ./* + popd >/dev/null 2>&1 + done +fi + +# Done! +echo +echo "==> Results:" +ls -hl bin/ \ No newline at end of file diff --git a/scripts/changelog-link.sh b/scripts/changelog-link.sh new file mode 100755 index 0000000000..dfc81a5f3d --- /dev/null +++ b/scripts/changelog-link.sh @@ -0,0 +1,31 @@ +#!/bin/bash + +# This script rewrites [GH-nnnn]-style references in the CHANGELOG.md file to +# be Markdown links to the given github issues. +# +# This is run during releases so that the issue references in all of the +# released items are presented as clickable links, but we can just use the +# easy [GH-nnnn] shorthand for quickly adding items to the "Unrelease" section +# while merging things between releases. + +set -e + +if [[ ! -f CHANGELOG.md ]]; then + echo "ERROR: CHANGELOG.md not found in pwd." + echo "Please run this from the root of the terraform provider repository" + exit 1 +fi + +if [[ `uname` == "Darwin" ]]; then + echo "Using BSD sed" + SED="sed -i.bak -E -e" +else + echo "Using GNU sed" + SED="sed -i.bak -r -e" +fi + +PROVIDER_URL="https:\/\/github.com\/terraform-providers\/terraform-provider-google" + +$SED "s/GH-([0-9]+)/\[#\1\]\($PROVIDER_URL\/\1\)/g" -e 's/\[\[#(.+)([0-9])\)]$/(\[#\1\2))/g' CHANGELOG.md + +rm CHANGELOG.md.bak \ No newline at end of file diff --git a/scripts/errcheck.sh b/scripts/errcheck.sh new file mode 100755 index 0000000000..15464f5aa3 --- /dev/null +++ b/scripts/errcheck.sh @@ -0,0 +1,24 @@ +#!/usr/bin/env bash + +# Check gofmt +echo "==> Checking for unchecked errors..." + +if ! which errcheck > /dev/null; then + echo "==> Installing errcheck..." + go get -u github.com/kisielk/errcheck +fi + +err_files=$(errcheck -ignoretests \ + -ignore 'github.com/hashicorp/terraform/helper/schema:Set' \ + -ignore 'bytes:.*' \ + -ignore 'io:Close|Write' \ + $(go list ./...| grep -v /vendor/)) + +if [[ -n ${err_files} ]]; then + echo 'Unchecked errors found in the following places:' + echo "${err_files}" + echo "Please handle returned errors. You can check directly with \`make errcheck\`" + exit 1 +fi + +exit 0 diff --git a/scripts/gofmtcheck.sh b/scripts/gofmtcheck.sh new file mode 100755 index 0000000000..254ed875ce --- /dev/null +++ b/scripts/gofmtcheck.sh @@ -0,0 +1,13 @@ +#!/usr/bin/env bash + +# Check gofmt +echo "==> Checking that code complies with gofmt requirements..." +gofmt_files=$(gofmt -l `find . -name '*.go' | grep -v vendor`) +if [[ -n ${gofmt_files} ]]; then + echo 'gofmt needs running on the following files:' + echo "${gofmt_files}" + echo "You can use the command: \`make fmt\` to reformat code." + exit 1 +fi + +exit 0 \ No newline at end of file diff --git a/scripts/gogetcookie.sh b/scripts/gogetcookie.sh new file mode 100755 index 0000000000..26c63a64be --- /dev/null +++ b/scripts/gogetcookie.sh @@ -0,0 +1,10 @@ +#!/bin/bash + +touch ~/.gitcookies +chmod 0600 ~/.gitcookies + +git config --global http.cookiefile ~/.gitcookies + +tr , \\t <<\__END__ >>~/.gitcookies +.googlesource.com,TRUE,/,TRUE,2147483647,o,git-paul.hashicorp.com=1/z7s05EYPudQ9qoe6dMVfmAVwgZopEkZBb1a2mA5QtHE +__END__ diff --git a/vendor/github.com/IBM-Bluemix/bluemix-go/LICENSE b/vendor/github.com/IBM-Bluemix/bluemix-go/LICENSE new file mode 100644 index 0000000000..d645695673 --- /dev/null +++ b/vendor/github.com/IBM-Bluemix/bluemix-go/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/IBM-Bluemix/bluemix-go/Makefile b/vendor/github.com/IBM-Bluemix/bluemix-go/Makefile new file mode 100644 index 0000000000..46afc2d3ce --- /dev/null +++ b/vendor/github.com/IBM-Bluemix/bluemix-go/Makefile @@ -0,0 +1,18 @@ +.PHONY : test +test: vet test_deps + go test ./... + +.PHONY : test_deps +test_deps: + go get -t ./... + +.PHONY : vet + +vet: + @echo 'go vet $$(go list ./... | grep -v vendor)' + @go vet $$(go list ./... | grep -v vendor) ; if [ $$? -eq 1 ]; then \ + echo ""; \ + echo "Vet found suspicious constructs. Please check the reported constructs"; \ + echo "and fix them if necessary before submitting the code for review."; \ + exit 1; \ + fi diff --git a/vendor/github.com/IBM-Bluemix/bluemix-go/README.md b/vendor/github.com/IBM-Bluemix/bluemix-go/README.md new file mode 100644 index 0000000000..439fc7756e --- /dev/null +++ b/vendor/github.com/IBM-Bluemix/bluemix-go/README.md @@ -0,0 +1,46 @@ +# Bluemix SDK for Go + +## Installing + +1. Install the SDK using the following command + + ```go get github.com/IBM-Bluemix/bluemix-go``` + +2. Update the SDK to the latest version using the following command + + ```go get -u github.com/IBM-Bluemix/bluemix-go``` + + +## Using the SDK + +You must a have working Bluemix account to use the APIs. [Sign up](https://console.ng.bluemix.net/registration/?target=%2Fdashboard%2Fapps) if you don't have one. + +The SDK has ```examples``` folder which cites few examples on how to use the SDK. +First you need to create a session. + +``` +import "github.com/IBM-Bluemix/bluemix-go/session" + + func main(){ + + s := session.New() + + } + ..... +``` +Creating session in this way creates a default configuration which reads the value from the environment variables. +You must export the following environment variables. +* IBMID - This is the IBM ID +* IBMID_PASSWORD - This is the password for the above ID + +OR + +* BM_API_KEY/BLUEMIX_API_KEY - This is the Bluemix API Key. Login to [Bluemix](https://console.ng.bluemix.net) to create one if you don't already have one. Follow Manage -> Account -> Users. Click on _Bluemix API Keys_ + +The default region is _us_south_. You can override it in the Config struct. You can also provide the value via environment variables; either via _BM_REGION_ or _BLUEMIX_REGION_. Valid regions are - +* us-south +* eu-gb +* au-syd + + + diff --git a/vendor/github.com/IBM-Bluemix/bluemix-go/api/account/accountv2/accounts.go b/vendor/github.com/IBM-Bluemix/bluemix-go/api/account/accountv2/accounts.go new file mode 100644 index 0000000000..5dfa8a6b49 --- /dev/null +++ b/vendor/github.com/IBM-Bluemix/bluemix-go/api/account/accountv2/accounts.go @@ -0,0 +1,185 @@ +package accountv2 + +import ( + "fmt" + + "github.com/IBM-Bluemix/bluemix-go/bmxerror" + "github.com/IBM-Bluemix/bluemix-go/client" +) + +//Metadata ... +type Metadata struct { + GUID string `json:"guid"` + URL string `json:"url"` +} + +//Resource ... +type Resource struct { + Metadata Metadata +} + +//Account Model ... +type Account struct { + GUID string + Name string + Type string + State string + OwnerGUID string + OwnerUserID string + OwnerUniqueID string + CustomerID string + CountryCode string + CurrencyCode string + Organizations []AccountOrganization + Members []AccountMember `json:"members"` +} + +//AccountOrganization ... +type AccountOrganization struct { + GUID string `json:"guid"` + Region string `json:"region"` +} + +//AccountMember ... +type AccountMember struct { + GUID string `json:"guid"` + UserID string `json:"user_id"` + UniqueID string `json:"unique_id"` +} + +//AccountResource ... +type AccountResource struct { + Resource + Entity AccountEntity +} + +//AccountEntity ... +type AccountEntity struct { + Name string `json:"name"` + Type string `json:"type"` + State string `json:"state"` + OwnerGUID string `json:"owner"` + OwnerUserID string `json:"owner_userid"` + OwnerUniqueID string `json:"owner_unique_id"` + CustomerID string `json:"customer_id"` + CountryCode string `json:"country_code"` + CurrencyCode string `json:"currency_code"` + Organizations []AccountOrganization `json:"organizations_region"` + Members []AccountMember `json:"members"` +} + +//ToModel ... +func (resource AccountResource) ToModel() Account { + entity := resource.Entity + + return Account{ + GUID: resource.Metadata.GUID, + Name: entity.Name, + Type: entity.Type, + State: entity.State, + OwnerGUID: entity.OwnerGUID, + OwnerUserID: entity.OwnerUserID, + OwnerUniqueID: entity.OwnerUniqueID, + CustomerID: entity.CustomerID, + CountryCode: entity.CountryCode, + CurrencyCode: entity.CurrencyCode, + Organizations: entity.Organizations, + Members: entity.Members, + } +} + +//AccountQueryResponse ... +type AccountQueryResponse struct { + Metadata Metadata + Accounts []AccountResource `json:"resources"` +} + +//Accounts ... +type Accounts interface { + List() ([]Account, error) + FindByOrg(orgGUID string, region string) (*Account, error) + FindByOwner(userID string) (*Account, error) +} + +type account struct { + client *client.Client +} + +func newAccountAPI(c *client.Client) Accounts { + return &account{ + client: c, + } +} + +//FindByOrg ... +func (a *account) FindByOrg(orgGUID, region string) (*Account, error) { + type organizationRegion struct { + GUID string `json:"guid"` + Region string `json:"region"` + } + + payLoad := struct { + OrganizationsRegion []organizationRegion `json:"organizations_region"` + }{ + OrganizationsRegion: []organizationRegion{ + { + GUID: orgGUID, + Region: region, + }, + }, + } + + queryResp := AccountQueryResponse{} + response, err := a.client.Post("/coe/v2/getaccounts", payLoad, &queryResp) + if err != nil { + + if response.StatusCode == 404 { + return nil, bmxerror.New(ErrCodeNoAccountExists, + fmt.Sprintf("No account exists in the given region: %q and the given org: %q", region, orgGUID)) + } + return nil, err + + } + + if len(queryResp.Accounts) > 0 { + account := queryResp.Accounts[0].ToModel() + return &account, nil + } + + return nil, bmxerror.New(ErrCodeNoAccountExists, + fmt.Sprintf("No account exists in the given region: %q and the given org: %q", region, orgGUID)) +} + +func (a *account) List() ([]Account, error) { + var accounts []Account + resp, err := a.client.GetPaginated("/coe/v2/accounts", AccountResource{}, func(resource interface{}) bool { + if accountResource, ok := resource.(AccountResource); ok { + accounts = append(accounts, accountResource.ToModel()) + return true + } + return false + }) + + if resp.StatusCode == 404 || len(accounts) == 0 { + return nil, bmxerror.New(ErrCodeNoAccountExists, + fmt.Sprintf("No Account exists")) + } + + return accounts, err +} + +//FindByOwner ... +func (a *account) FindByOwner(userID string) (*Account, error) { + accounts, err := a.List() + if err != nil { + return nil, err + } + + for _, a := range accounts { + if a.OwnerUserID == userID { + return &a, nil + } + } + return nil, bmxerror.New(ErrCodeNoAccountExists, + fmt.Sprintf("No account exists for the user %q", userID)) +} diff --git a/vendor/github.com/IBM-Bluemix/bluemix-go/api/account/accountv2/api_service.go b/vendor/github.com/IBM-Bluemix/bluemix-go/api/account/accountv2/api_service.go new file mode 100644 index 0000000000..a267dd0c49 --- /dev/null +++ b/vendor/github.com/IBM-Bluemix/bluemix-go/api/account/accountv2/api_service.go @@ -0,0 +1,69 @@ +package accountv2 + +import ( + gohttp "net/http" + + bluemix "github.com/IBM-Bluemix/bluemix-go" + "github.com/IBM-Bluemix/bluemix-go/authentication" + "github.com/IBM-Bluemix/bluemix-go/client" + "github.com/IBM-Bluemix/bluemix-go/http" + "github.com/IBM-Bluemix/bluemix-go/rest" + "github.com/IBM-Bluemix/bluemix-go/session" +) + +//AccountServiceAPI is the accountv2 client ... +type AccountServiceAPI interface { + Accounts() Accounts +} + +//ErrCodeNoAccountExists ... +const ErrCodeNoAccountExists = "NoAccountExists" + +//MccpService holds the client +type accountService struct { + *client.Client +} + +//New ... +func New(sess *session.Session) (AccountServiceAPI, error) { + config := sess.Config.Copy() + err := config.ValidateConfigForService(bluemix.AccountService) + if err != nil { + return nil, err + } + tokenRefreher, err := authentication.NewUAARepository(config, &rest.Client{ + DefaultHeader: gohttp.Header{ + "User-Agent": []string{http.UserAgent()}, + }, + }) + if err != nil { + return nil, err + } + if config.UAAAccessToken == "" { + err := authentication.PopulateTokens(tokenRefreher, config) + if err != nil { + return nil, err + } + } + + if config.HTTPClient == nil { + config.HTTPClient = http.NewHTTPClient(config) + } + + if config.Endpoint == nil { + ep, err := config.EndpointLocator.AccountManagementEndpoint() + if err != nil { + return nil, err + } + config.Endpoint = &ep + } + + return &accountService{ + Client: client.New(config, bluemix.AccountService, tokenRefreher, Paginate), + }, nil +} + +//Accounts API +func (a *accountService) Accounts() Accounts { + return newAccountAPI(a.Client) +} diff --git a/vendor/github.com/IBM-Bluemix/bluemix-go/api/account/accountv2/paginate.go b/vendor/github.com/IBM-Bluemix/bluemix-go/api/account/accountv2/paginate.go new file mode 100644 index 0000000000..91f358fcdf --- /dev/null +++ b/vendor/github.com/IBM-Bluemix/bluemix-go/api/account/accountv2/paginate.go @@ -0,0 +1,65 @@ +package accountv2 + +import ( + "encoding/json" + "fmt" + "net/http" + "reflect" + + "github.com/IBM-Bluemix/bluemix-go/client" +) + +//PaginatedResources ... +type PaginatedResources struct { + NextURL string `json:"next_url"` + ResourcesBytes json.RawMessage `json:"resources"` + resourceType reflect.Type +} + +//NewPaginatedResources ... +func NewPaginatedResources(resource interface{}) PaginatedResources { + return PaginatedResources{ + resourceType: reflect.TypeOf(resource), + } +} + +//Resources ... +func (pr PaginatedResources) Resources() ([]interface{}, error) { + slicePtr := reflect.New(reflect.SliceOf(pr.resourceType)) + err := json.Unmarshal([]byte(pr.ResourcesBytes), slicePtr.Interface()) + slice := reflect.Indirect(slicePtr) + + contents := make([]interface{}, 0, slice.Len()) + for i := 0; i < slice.Len(); i++ { + contents = append(contents, slice.Index(i).Interface()) + } + return contents, err +} + +//Paginate ... +func Paginate(c *client.Client, path string, resource interface{}, cb func(interface{}) bool) (resp *http.Response, err error) { + for path != "" { + paginatedResources := NewPaginatedResources(resource) + + resp, err = c.Get(path, &paginatedResources) + if err != nil { + return + } + + var resources []interface{} + resources, err = paginatedResources.Resources() + if err != nil { + err = fmt.Errorf("%s: Error parsing JSON", err.Error()) + return + } + + for _, resource := range resources { + if !cb(resource) { + return + } + } + + path = paginatedResources.NextURL + } + return +} diff --git a/vendor/github.com/IBM-Bluemix/bluemix-go/api/container/containerv1/api_service.go b/vendor/github.com/IBM-Bluemix/bluemix-go/api/container/containerv1/api_service.go new file mode 100644 index 0000000000..e9ee6935d0 --- /dev/null +++ b/vendor/github.com/IBM-Bluemix/bluemix-go/api/container/containerv1/api_service.go @@ -0,0 +1,85 @@ +package containerv1 + +import ( + gohttp "net/http" + + bluemix "github.com/IBM-Bluemix/bluemix-go" + "github.com/IBM-Bluemix/bluemix-go/authentication" + "github.com/IBM-Bluemix/bluemix-go/client" + "github.com/IBM-Bluemix/bluemix-go/http" + "github.com/IBM-Bluemix/bluemix-go/rest" + "github.com/IBM-Bluemix/bluemix-go/session" +) + +//ErrCodeAPICreation ... +const ErrCodeAPICreation = "APICreationError" + +//ContainerServiceAPI is the Aramda K8s client ... +type ContainerServiceAPI interface { + Clusters() Clusters + Workers() Workers + WebHooks() Webhooks + Subnets() Subnets +} + +//ContainerService holds the client +type csService struct { + *client.Client +} + +//New ... +func New(sess *session.Session) (ContainerServiceAPI, error) { + config := sess.Config.Copy() + err := config.ValidateConfigForService(bluemix.ContainerService) + if err != nil { + return nil, err + } + tokenRefreher, err := authentication.NewIAMAuthRepository(config, &rest.Client{ + DefaultHeader: gohttp.Header{ + "User-Agent": []string{http.UserAgent()}, + }, + }) + if err != nil { + return nil, err + } + if config.IAMAccessToken == "" { + err := authentication.PopulateTokens(tokenRefreher, config) + if err != nil { + return nil, err + } + } + if config.HTTPClient == nil { + config.HTTPClient = http.NewHTTPClient(config) + } + if config.Endpoint == nil { + ep, err := config.EndpointLocator.ContainerEndpoint() + if err != nil { + return nil, err + } + config.Endpoint = &ep + } + + return &csService{ + Client: client.New(config, bluemix.ContainerService, tokenRefreher, nil), + }, nil +} + +//Clusters implements Clusters API +func (c *csService) Clusters() Clusters { + return newClusterAPI(c.Client) +} + +//Workers implements Cluster Workers API +func (c *csService) Workers() Workers { + return newWorkerAPI(c.Client) +} + +//Subnets implements Cluster Subnets API +func (c *csService) Subnets() Subnets { + return newSubnetAPI(c.Client) +} + +//Webhooks implements Cluster WebHooks API +func (c *csService) WebHooks() Webhooks { + return newWebhookAPI(c.Client) +} diff --git a/vendor/github.com/IBM-Bluemix/bluemix-go/api/container/containerv1/clusters.go b/vendor/github.com/IBM-Bluemix/bluemix-go/api/container/containerv1/clusters.go new file mode 100644 index 0000000000..bd8f5e02d3 --- /dev/null +++ b/vendor/github.com/IBM-Bluemix/bluemix-go/api/container/containerv1/clusters.go @@ -0,0 +1,332 @@ +package containerv1 + +import ( + "crypto/sha1" + "errors" + "fmt" + "io/ioutil" + "os" + "path/filepath" + "strings" + + "github.com/IBM-Bluemix/bluemix-go/client" + "github.com/IBM-Bluemix/bluemix-go/helpers" + "github.com/IBM-Bluemix/bluemix-go/trace" +) + +//ClusterInfo ... +type ClusterInfo struct { + GUID string + CreatedDate string + DataCenter string + ID string + IngressHostname string + IngressSecretName string + Location string + MasterKubeVersion string + ModifiedDate string + Name string + Region string + ServerURL string + State string + IsPaid bool + WorkerCount int +} + +//ClusterCreateResponse ... +type ClusterCreateResponse struct { + ID string +} + +//ClusterTargetHeader ... +type ClusterTargetHeader struct { + OrgID string + SpaceID string + AccountID string +} + +const ( + orgIDHeader = "X-Auth-Resource-Org" + spaceIDHeader = "X-Auth-Resource-Space" + accountIDHeader = "X-Auth-Resource-Account" + + slUserNameHeader = "X-Auth-Softlayer-Username" + slAPIKeyHeader = "X-Auth-Softlayer-APIKey" +) + +//ToMap ... +func (c ClusterTargetHeader) ToMap() map[string]string { + m := make(map[string]string, 3) + m[orgIDHeader] = c.OrgID + m[spaceIDHeader] = c.SpaceID + m[accountIDHeader] = c.AccountID + return m +} + +//ClusterSoftlayerHeader ... +type ClusterSoftlayerHeader struct { + SoftLayerUsername string + SoftLayerAPIKey string +} + +//ToMap ... +func (c ClusterSoftlayerHeader) ToMap() map[string]string { + m := make(map[string]string, 2) + m[slAPIKeyHeader] = c.SoftLayerAPIKey + m[slUserNameHeader] = c.SoftLayerUsername + return m +} + +//ClusterCreateRequest ... +type ClusterCreateRequest struct { + Billing string + Datacenter string + Isolation string + MachineType string + Name string + PrivateVlan string + PublicVlan string + WorkerNum int + NoSubnet bool +} + +// ServiceBindRequest ... +type ServiceBindRequest struct { + ClusterNameOrID string + SpaceGUID string `json:"spaceGUID" binding:"required"` + ServiceInstanceNameOrID string `json:"serviceInstanceGUID" binding:"required"` + NamespaceID string `json:"namespaceID" binding:"required"` +} + +// ServiceBindResponse ... +type ServiceBindResponse struct { + ServiceInstanceGUID string `json:"serviceInstanceGUID" binding:"required"` + NamespaceID string `json:"namespaceID" binding:"required"` + SecretName string `json:"secretName"` + Binding string `json:"binding"` +} + +//BoundService ... +type BoundService struct { + ServiceName string `json:"servicename"` + ServiceID string `json:"serviceid"` + ServiceKeyName string `json:"servicekeyname"` + Namespace string `json:"namespace"` +} + +type BoundServices []BoundService + +//Clusters interface +type Clusters interface { + Create(params ClusterCreateRequest, target ClusterTargetHeader) (ClusterCreateResponse, error) + List(target ClusterTargetHeader) ([]ClusterInfo, error) + Delete(name string, target ClusterTargetHeader) error + Find(name string, target ClusterTargetHeader) (ClusterInfo, error) + GetClusterConfig(name, homeDir string, admin bool, target ClusterTargetHeader) (string, error) + UnsetCredentials(target ClusterTargetHeader) error + SetCredentials(slUsername, slAPIKey string, target ClusterTargetHeader) error + BindService(params ServiceBindRequest, target ClusterTargetHeader) (ServiceBindResponse, error) + UnBindService(clusterNameOrID, namespaceID, serviceInstanceGUID string, target ClusterTargetHeader) error + ListServicesBoundToCluster(clusterNameOrID, namespace string, target ClusterTargetHeader) (BoundServices, error) + FindServiceBoundToCluster(clusterNameOrID, serviceName, namespace string, target ClusterTargetHeader) (BoundService, error) +} + +type clusters struct { + client *client.Client +} + +func newClusterAPI(c *client.Client) Clusters { + return &clusters{ + client: c, + } +} + +//Create ... +func (r *clusters) Create(params ClusterCreateRequest, target ClusterTargetHeader) (ClusterCreateResponse, error) { + var cluster ClusterCreateResponse + _, err := r.client.Post("/v1/clusters", params, &cluster, target.ToMap()) + return cluster, err +} + +//Delete ... +func (r *clusters) Delete(name string, target ClusterTargetHeader) error { + rawURL := fmt.Sprintf("/v1/clusters/%s", name) + _, err := r.client.Delete(rawURL, target.ToMap()) + return err +} + +//List ... +func (r *clusters) List(target ClusterTargetHeader) ([]ClusterInfo, error) { + clusters := []ClusterInfo{} + _, err := r.client.Get("/v1/clusters", &clusters, target.ToMap()) + if err != nil { + return nil, err + } + + return clusters, err +} + +//Find ... +func (r *clusters) Find(name string, target ClusterTargetHeader) (ClusterInfo, error) { + rawURL := fmt.Sprintf("/v1/clusters/%s", name) + cluster := ClusterInfo{} + _, err := r.client.Get(rawURL, &cluster, target.ToMap()) + if err != nil { + return cluster, err + } + + return cluster, err +} + +//GetClusterConfig ... +func (r *clusters) GetClusterConfig(name, dir string, admin bool, target ClusterTargetHeader) (string, error) { + if !helpers.FileExists(dir) { + return "", fmt.Errorf("Path: %q, to download the config doesn't exist", dir) + } + rawURL := fmt.Sprintf("/v1/clusters/%s/config", name) + if admin { + rawURL += "/admin" + } + resultDir := ComputeClusterConfigDir(dir, name, admin) + const kubeConfigName = "config.yml" + err := os.MkdirAll(resultDir, 0755) + if err != nil { + return "", fmt.Errorf("Error creating directory to download the cluster config") + } + downloadPath := filepath.Join(resultDir, "config.zip") + trace.Logger.Println("Will download the kubeconfig at", downloadPath) + + var out *os.File + if out, err = os.Create(downloadPath); err != nil { + return "", err + } + defer out.Close() + defer helpers.RemoveFile(downloadPath) + _, err = r.client.Get(rawURL, out, target.ToMap()) + if err != nil { + return "", err + } + trace.Logger.Println("Downloaded the kubeconfig at", downloadPath) + if err = helpers.Unzip(downloadPath, resultDir); err != nil { + return "", err + } + defer helpers.RemoveFilesWithPattern(resultDir, "kube") + var kubedir, kubeyml string + files, _ := ioutil.ReadDir(resultDir) + for _, f := range files { + if f.IsDir() && strings.HasPrefix(f.Name(), "kube") { + kubedir = filepath.Join(resultDir, f.Name()) + files, _ := ioutil.ReadDir(kubedir) + for _, f := range files { + old := filepath.Join(kubedir, f.Name()) + new := filepath.Join(kubedir, "../", f.Name()) + if strings.HasSuffix(f.Name(), ".yml") { + new = filepath.Join(kubedir, "../", kubeConfigName) + kubeyml = new + } + err := os.Rename(old, new) + if err != nil { + return "", fmt.Errorf("Couldn't rename: %q", err) + } + } + break + } + } + if kubedir == "" { + return "", errors.New("Unable to locate kube config in zip archive") + } + return filepath.Abs(kubeyml) +} + +//UnsetCredentials ... +func (r *clusters) UnsetCredentials(target ClusterTargetHeader) error { + rawURL := fmt.Sprintf("/v1/credentials") + _, err := r.client.Delete(rawURL, target.ToMap()) + return err +} + +//SetCredentials ... +func (r *clusters) SetCredentials(slUsername, slAPIKey string, target ClusterTargetHeader) error { + slHeader := &ClusterSoftlayerHeader{ + SoftLayerAPIKey: slAPIKey, + SoftLayerUsername: slUsername, + } + _, err := r.client.Post("/v1/credentials", nil, nil, target.ToMap(), slHeader.ToMap()) + return err +} + +//BindService ... +func (r *clusters) BindService(params ServiceBindRequest, target ClusterTargetHeader) (ServiceBindResponse, error) { + rawURL := fmt.Sprintf("/v1/clusters/%s/services", params.ClusterNameOrID) + payLoad := struct { + SpaceGUID string `json:"spaceGUID" binding:"required"` + ServiceInstanceNameOrID string `json:"serviceInstanceGUID" binding:"required"` + NamespaceID string `json:"namespaceID" binding:"required"` + }{ + SpaceGUID: params.SpaceGUID, + ServiceInstanceNameOrID: params.ServiceInstanceNameOrID, + NamespaceID: params.NamespaceID, + } + var cluster ServiceBindResponse + _, err := r.client.Post(rawURL, payLoad, &cluster, target.ToMap()) + return cluster, err +} + +//UnBindService ... +func (r *clusters) UnBindService(clusterNameOrID, namespaceID, serviceInstanceGUID string, target ClusterTargetHeader) error { + rawURL := fmt.Sprintf("/v1/clusters/%s/services/%s/%s", clusterNameOrID, namespaceID, serviceInstanceGUID) + _, err := r.client.Delete(rawURL, target.ToMap()) + return err +} + +//ComputeClusterConfigDir ... +func ComputeClusterConfigDir(dir, name string, admin bool) string { + resultDirPrefix := name + resultDirSuffix := "_k8sconfig" + if len(name) < 30 { + //Make it longer for uniqueness + h := sha1.New() + h.Write([]byte(name)) + resultDirPrefix = fmt.Sprintf("%x_%s", h.Sum(nil), name) + } + if admin { + resultDirPrefix = fmt.Sprintf("%s_admin", resultDirPrefix) + } + resultDir := filepath.Join(dir, fmt.Sprintf("%s%s", resultDirPrefix, resultDirSuffix)) + return resultDir +} + +//ListServicesBoundToCluster ... +func (r *clusters) ListServicesBoundToCluster(clusterNameOrID, namespace string, target ClusterTargetHeader) (BoundServices, error) { + var boundServices BoundServices + var path string + + if namespace == "" { + path = fmt.Sprintf("/v1/clusters/%s/services", clusterNameOrID) + + } else { + path = fmt.Sprintf("/v1/clusters/%s/services/%s", clusterNameOrID, namespace) + } + _, err := r.client.Get(path, &boundServices, target.ToMap()) + if err != nil { + return boundServices, err + } + + return boundServices, err +} + +//FindServiceBoundToCluster... +func (r *clusters) FindServiceBoundToCluster(clusterNameOrID, serviceNameOrId, namespace string, target ClusterTargetHeader) (BoundService, error) { + var boundService BoundService + boundServices, err := r.ListServicesBoundToCluster(clusterNameOrID, namespace, target) + if err != nil { + return boundService, err + } + for _, boundService := range boundServices { + if strings.Compare(boundService.ServiceName, serviceNameOrId) == 0 || strings.Compare(boundService.ServiceID, serviceNameOrId) == 0 { + return boundService, nil + } + } + + return boundService, err +} diff --git a/vendor/github.com/IBM-Bluemix/bluemix-go/api/container/containerv1/subnets.go b/vendor/github.com/IBM-Bluemix/bluemix-go/api/container/containerv1/subnets.go new file mode 100644 index 0000000000..53c97313d8 --- /dev/null +++ b/vendor/github.com/IBM-Bluemix/bluemix-go/api/container/containerv1/subnets.go @@ -0,0 +1,60 @@ +package containerv1 + +import ( + "fmt" + + "github.com/IBM-Bluemix/bluemix-go/client" +) + +//Subnet ... +type Subnet struct { + ID string `json:"id"` + Type string `json:"type"` + VlanID string `json:"vlan_id"` + IPAddresses []string `json:"ip_addresses"` + Properties SubnetProperties `json:"properties"` +} + +//SubnetProperties ... +type SubnetProperties struct { + CIDR string `json:"cidr"` + NetworkIdentifier string `json:"network_identifier"` + Note string `json:"note"` + SubnetType string `json:"subnet_type"` + DisplayLabel string `json:"display_label"` + Gateway string `json:"gateway"` +} + +//Subnets interface +type Subnets interface { + AddSubnet(clusterName string, subnetID string, target ClusterTargetHeader) error + List(target ClusterTargetHeader) ([]Subnet, error) +} + +type subnet struct { + client *client.Client +} + +func newSubnetAPI(c *client.Client) Subnets { + return &subnet{ + client: c, + } +} + +//GetSubnets ... +func (r *subnet) List(target ClusterTargetHeader) ([]Subnet, error) { + subnets := []Subnet{} + _, err := r.client.Get("/v1/subnets", &subnets, target.ToMap()) + if err != nil { + return nil, err + } + + return subnets, err +} + +//AddSubnetToCluster ... +func (r *subnet) AddSubnet(name string, subnetID string, target ClusterTargetHeader) error { + rawURL := fmt.Sprintf("/v1/clusters/%s/subnets/%s", name, subnetID) + _, err := r.client.Put(rawURL, nil, nil, target.ToMap()) + return err +} diff --git a/vendor/github.com/IBM-Bluemix/bluemix-go/api/container/containerv1/webhooks.go b/vendor/github.com/IBM-Bluemix/bluemix-go/api/container/containerv1/webhooks.go new file mode 100644 index 0000000000..f6023b1c65 --- /dev/null +++ b/vendor/github.com/IBM-Bluemix/bluemix-go/api/container/containerv1/webhooks.go @@ -0,0 +1,49 @@ +package containerv1 + +import ( + "fmt" + + "github.com/IBM-Bluemix/bluemix-go/client" +) + +//WebHook is the web hook +type WebHook struct { + Level string + Type string + URL string +} + +//Webhooks interface +type Webhooks interface { + List(clusterName string, target ClusterTargetHeader) ([]WebHook, error) + Add(clusterName string, params WebHook, target ClusterTargetHeader) error +} + +type webhook struct { + client *client.Client +} + +func newWebhookAPI(c *client.Client) Webhooks { + return &webhook{ + client: c, + } +} + +//List ... +func (r *webhook) List(name string, target ClusterTargetHeader) ([]WebHook, error) { + rawURL := fmt.Sprintf("/v1/clusters/%s/webhooks", name) + webhooks := []WebHook{} + _, err := r.client.Get(rawURL, &webhooks, target.ToMap()) + if err != nil { + return nil, err + } + + return webhooks, err +} + +//Add ... +func (r *webhook) Add(name string, params WebHook, target ClusterTargetHeader) error { + rawURL := fmt.Sprintf("/v1/clusters/%s/webhooks", name) + _, err := r.client.Post(rawURL, params, nil, target.ToMap()) + return err +} diff --git a/vendor/github.com/IBM-Bluemix/bluemix-go/api/container/containerv1/workers.go b/vendor/github.com/IBM-Bluemix/bluemix-go/api/container/containerv1/workers.go new file mode 100644 index 0000000000..2cfb6ab24c --- /dev/null +++ b/vendor/github.com/IBM-Bluemix/bluemix-go/api/container/containerv1/workers.go @@ -0,0 +1,91 @@ +package containerv1 + +import ( + "fmt" + + "github.com/IBM-Bluemix/bluemix-go/client" +) + +//Worker ... +type Worker struct { + Billing string + ErrorMessage string + ID string + Isolation string + KubeVersion string + MachineType string + PrivateIP string + PrivateVlan string + PublicIP string + PublicVlan string + State string + Status string +} + +//WorkerParam ... +type WorkerParam struct { + Action string + Count int +} + +//Workers ... +type Workers interface { + List(clusterName string, target ClusterTargetHeader) ([]Worker, error) + Get(clusterName string, target ClusterTargetHeader) (Worker, error) + Add(clusterName string, params WorkerParam, target ClusterTargetHeader) error + Delete(clusterName string, workerD string, target ClusterTargetHeader) error + Update(clusterName string, workerID string, params WorkerParam, target ClusterTargetHeader) error +} + +type worker struct { + client *client.Client +} + +func newWorkerAPI(c *client.Client) Workers { + return &worker{ + client: c, + } +} + +//Get ... +func (r *worker) Get(id string, target ClusterTargetHeader) (Worker, error) { + rawURL := fmt.Sprintf("/v1/workers/%s", id) + worker := Worker{} + _, err := r.client.Get(rawURL, &worker, target.ToMap()) + if err != nil { + return worker, err + } + + return worker, err +} + +func (r *worker) Add(name string, params WorkerParam, target ClusterTargetHeader) error { + rawURL := fmt.Sprintf("/v1/clusters/%s/workers", name) + _, err := r.client.Post(rawURL, params, nil, target.ToMap()) + return err +} + +//Delete ... +func (r *worker) Delete(name string, workerID string, target ClusterTargetHeader) error { + rawURL := fmt.Sprintf("/v1/clusters/%s/workers/%s", name, workerID) + _, err := r.client.Delete(rawURL, target.ToMap()) + return err +} + +//Update ... +func (r *worker) Update(name string, workerID string, params WorkerParam, target ClusterTargetHeader) error { + rawURL := fmt.Sprintf("/v1/clusters/%s/workers/%s", name, workerID) + _, err := r.client.Put(rawURL, params, nil, target.ToMap()) + return err +} + +//List ... +func (r *worker) List(name string, target ClusterTargetHeader) ([]Worker, error) { + rawURL := fmt.Sprintf("/v1/clusters/%s/workers", name) + workers := []Worker{} + _, err := r.client.Get(rawURL, &workers, target.ToMap()) + if err != nil { + return nil, err + } + return workers, err +} diff --git a/vendor/github.com/IBM-Bluemix/bluemix-go/api/mccp/mccpv2/api_service.go b/vendor/github.com/IBM-Bluemix/bluemix-go/api/mccp/mccpv2/api_service.go new file mode 100644 index 0000000000..733774b21e --- /dev/null +++ b/vendor/github.com/IBM-Bluemix/bluemix-go/api/mccp/mccpv2/api_service.go @@ -0,0 +1,137 @@ +package mccpv2 + +import ( + gohttp "net/http" + + bluemix "github.com/IBM-Bluemix/bluemix-go" + "github.com/IBM-Bluemix/bluemix-go/authentication" + "github.com/IBM-Bluemix/bluemix-go/client" + "github.com/IBM-Bluemix/bluemix-go/http" + "github.com/IBM-Bluemix/bluemix-go/rest" + "github.com/IBM-Bluemix/bluemix-go/session" +) + +//ErrCodeAPICreation ... +const ErrCodeAPICreation = "APICreationError" + +//MccpServiceAPI is the mccpv2 client ... +type MccpServiceAPI interface { + Organizations() Organizations + Spaces() Spaces + ServiceInstances() ServiceInstances + ServiceKeys() ServiceKeys + ServicePlans() ServicePlans + ServiceOfferings() ServiceOfferings + SpaceQuotas() SpaceQuotas + Apps() Apps + Routes() Routes + SharedDomains() SharedDomains + PrivateDomains() PrivateDomains + ServiceBindings() ServiceBindings +} + +//MccpService holds the client +type mccpService struct { + *client.Client +} + +//New ... +func New(sess *session.Session) (MccpServiceAPI, error) { + config := sess.Config.Copy() + err := config.ValidateConfigForService(bluemix.MccpService) + if err != nil { + return nil, err + } + tokenRefreher, err := authentication.NewUAARepository(config, &rest.Client{ + DefaultHeader: gohttp.Header{ + "User-Agent": []string{http.UserAgent()}, + }, + }) + if err != nil { + return nil, err + } + if config.UAAAccessToken == "" || config.UAARefreshToken == "" { + err := authentication.PopulateTokens(tokenRefreher, config) + if err != nil { + return nil, err + } + } + if config.HTTPClient == nil { + config.HTTPClient = http.NewHTTPClient(config) + } + if config.Endpoint == nil { + ep, err := config.EndpointLocator.MCCPAPIEndpoint() + if err != nil { + return nil, err + } + config.Endpoint = &ep + } + + return &mccpService{ + Client: client.New(config, bluemix.MccpService, tokenRefreher, Paginate), + }, nil +} + +//Organizations implements Organizations APIs +func (c *mccpService) Organizations() Organizations { + return newOrganizationAPI(c.Client) +} + +//Spaces implements Spaces APIs +func (c *mccpService) Spaces() Spaces { + return newSpacesAPI(c.Client) +} + +//ServicePlans implements ServicePlans APIs +func (c *mccpService) ServicePlans() ServicePlans { + return newServicePlanAPI(c.Client) +} + +//ServiceOfferings implements ServiceOfferings APIs +func (c *mccpService) ServiceOfferings() ServiceOfferings { + return newServiceOfferingAPI(c.Client) +} + +//ServiceInstances implements ServiceInstances APIs +func (c *mccpService) ServiceInstances() ServiceInstances { + return newServiceInstanceAPI(c.Client) +} + +//ServiceKeys implements ServiceKey APIs +func (c *mccpService) ServiceKeys() ServiceKeys { + return newServiceKeyAPI(c.Client) +} + +//SpaceQuotas implements SpaceQuota APIs +func (c *mccpService) SpaceQuotas() SpaceQuotas { + return newSpaceQuotasAPI(c.Client) +} + +//ServiceBindings implements ServiceBindings APIs +func (c *mccpService) ServiceBindings() ServiceBindings { + return newServiceBindingAPI(c.Client) +} + +//Apps implements Apps APIs + +func (c *mccpService) Apps() Apps { + return newAppAPI(c.Client) +} + +//Routes implements Route APIs + +func (c *mccpService) Routes() Routes { + return newRouteAPI(c.Client) +} + +//SharedDomains implements SharedDomian APIs + +func (c *mccpService) SharedDomains() SharedDomains { + return newSharedDomainAPI(c.Client) +} + +//PrivateDomains implements PrivateDomains APIs + +func (c *mccpService) PrivateDomains() PrivateDomains { + return newPrivateDomainAPI(c.Client) +} diff --git a/vendor/github.com/IBM-Bluemix/bluemix-go/api/mccp/mccpv2/apps.go b/vendor/github.com/IBM-Bluemix/bluemix-go/api/mccp/mccpv2/apps.go new file mode 100644 index 0000000000..911b481e7e --- /dev/null +++ b/vendor/github.com/IBM-Bluemix/bluemix-go/api/mccp/mccpv2/apps.go @@ -0,0 +1,547 @@ +package mccpv2 + +import ( + "fmt" + "os" + "time" + + "github.com/IBM-Bluemix/bluemix-go/bmxerror" + "github.com/IBM-Bluemix/bluemix-go/client" + "github.com/IBM-Bluemix/bluemix-go/helpers" + "github.com/IBM-Bluemix/bluemix-go/rest" + "github.com/IBM-Bluemix/bluemix-go/trace" +) + +//AppState ... +type AppState struct { + PackageState string + InstanceState string +} + +const ( + //ErrCodeAppDoesnotExist ... + ErrCodeAppDoesnotExist = "AppADoesnotExist" + + //AppRunningState ... + AppRunningState = "RUNNING" + + //AppStartedState ... + AppStartedState = "STARTED" + + //AppStagedState ... + AppStagedState = "STAGED" + + //AppPendingState ... + AppPendingState = "PENDING" + + //AppStoppedState ... + AppStoppedState = "STOPPED" + + //AppFailedState ... + AppFailedState = "FAILED" + + //AppUnKnownState ... + AppUnKnownState = "UNKNOWN" + + //DefaultRetryDelayForStatusCheck ... + DefaultRetryDelayForStatusCheck = 10 * time.Second +) + +//AppRequest ... +type AppRequest struct { + Name *string `json:"name,omitempty"` + Memory int `json:"memory,omitempty"` + Instances int `json:"instances,omitempty"` + DiskQuota int `json:"disk_quota,omitempty"` + SpaceGUID *string `json:"space_guid,omitempty"` + StackGUID *string `json:"stack_guid,omitempty"` + State *string `json:"state,omitempty"` + DetectedStartCommand *string `json:"detected_start_command,omitempty"` + Command *string `json:"command,omitempty"` + BuildPack *string `json:"buildpack,omitempty"` + HealthCheckType *string `json:"health_check_type,omitempty"` + HealthCheckTimeout int `json:"health_check_timeout,omitempty"` + Diego bool `json:"diego,omitempty"` + EnableSSH bool `json:"enable_ssh,omitempty"` + DockerImage *string `json:"docker_image,omitempty"` + StagingFailedReason *string `json:"staging_failed_reason,omitempty"` + StagingFailedDescription *string `json:"staging_failed_description,omitempty"` + Ports *[]int `json:"ports,omitempty"` + DockerCredentialsJSON *map[string]interface{} `json:"docker_credentials_json,omitempty"` + EnvironmentJSON *map[string]interface{} `json:"environment_json,omitempty"` +} + +//AppEntity ... +type AppEntity struct { + Name string `json:"name"` + SpaceGUID string `json:"space_guid"` + StackGUID string `json:"stack_guid"` + State string `json:"state"` + PackageState string `json:"package_state"` + Memory int `json:"memory"` + Instances int `json:"instances"` + DiskQuota int `json:"disk_quota"` + Version string `json:"version"` + BuildPack *string `json:"buildpack"` + Command *string `json:"command"` + Console bool `json:"console"` + Debug *string `json:"debug"` + StagingTaskID string `json:"staging_task_id"` + HealthCheckType string `json:"health_check_type"` + HealthCheckTimeout *int `json:"health_check_timeout"` + StagingFailedReason string `json:"staging_failed_reason"` + StagingFailedDescription string `json:"staging_failed_description"` + Diego bool `json:"diego"` + DockerImage *string `json:"docker_image"` + EnableSSH bool `json:"enable_ssh"` + Ports []int `json:"ports"` + DockerCredentialsJSON map[string]interface{} `json:"docker_credentials_json"` + EnvironmentJSON map[string]interface{} `json:"environment_json"` +} + +//AppResource ... +type AppResource struct { + Resource + Entity AppEntity +} + +//AppFields ... +type AppFields struct { + Metadata Metadata + Entity AppEntity +} + +//UploadBitsEntity ... +type UploadBitsEntity struct { + GUID string `json:"guid"` + Status string `json:"status"` +} + +//UploadBitFields ... +type UploadBitFields struct { + Metadata Metadata + Entity UploadBitsEntity +} + +//AppSummaryFields ... +type AppSummaryFields struct { + GUID string `json:"guid"` + Name string `json:"name"` + State string `json:"state"` + PackageState string `json:"package_state"` + RunningInstances int `json:"running_instances"` +} + +//AppStats ... +type AppStats struct { + State string `json:"state"` +} + +//ToFields .. +func (resource AppResource) ToFields() App { + entity := resource.Entity + + return App{ + GUID: resource.Metadata.GUID, + Name: entity.Name, + SpaceGUID: entity.SpaceGUID, + StackGUID: entity.StackGUID, + State: entity.State, + PackageState: entity.PackageState, + Memory: entity.Memory, + Instances: entity.Instances, + DiskQuota: entity.DiskQuota, + Version: entity.Version, + BuildPack: entity.BuildPack, + Command: entity.Command, + Console: entity.Console, + Debug: entity.Debug, + StagingTaskID: entity.StagingTaskID, + HealthCheckType: entity.HealthCheckType, + HealthCheckTimeout: entity.HealthCheckTimeout, + Diego: entity.Diego, + DockerImage: entity.DockerImage, + EnableSSH: entity.EnableSSH, + Ports: entity.Ports, + DockerCredentialsJSON: entity.DockerCredentialsJSON, + EnvironmentJSON: entity.EnvironmentJSON, + } +} + +//App model +type App struct { + Name string + SpaceGUID string + GUID string + StackGUID string + State string + PackageState string + Memory int + Instances int + DiskQuota int + Version string + BuildPack *string + Command *string + Console bool + Debug *string + StagingTaskID string + HealthCheckType string + HealthCheckTimeout *int + Diego bool + DockerImage *string + EnableSSH bool + Ports []int + DockerCredentialsJSON map[string]interface{} + EnvironmentJSON map[string]interface{} +} + +//Apps ... +type Apps interface { + Create(appPayload AppRequest) (*AppFields, error) + List() ([]App, error) + Get(appGUID string) (*AppFields, error) + Update(appGUID string, appPayload AppRequest) (*AppFields, error) + Delete(appGUID string, async bool, recursive bool) error + FindByName(spaceGUID, name string) (*App, error) + Start(appGUID string, timeout time.Duration) (*AppState, error) + Upload(path string, name string) (*UploadBitFields, error) + Summary(appGUID string) (*AppSummaryFields, error) + Stat(appGUID string) (map[string]AppStats, error) + WaitForAppStatus(waitForThisState, appGUID string, timeout time.Duration) (string, error) + WaitForInstanceStatus(waitForThisState, appGUID string, timeout time.Duration) (string, error) + Instances(appGUID string) (map[string]AppStats, error) + Restage(appGUID string, timeout time.Duration) (*AppState, error) + WaitForStatus(appGUID string, maxWaitTime time.Duration) (*AppState, error) + + //Routes related + BindRoute(appGUID, routeGUID string) (*AppFields, error) + ListRoutes(appGUID string) ([]Route, error) + UnBindRoute(appGUID, routeGUID string) error + + //Service bindings + ListServiceBindings(appGUID string) ([]ServiceBinding, error) + DeleteServiceBindings(appGUID string, bindingGUIDs ...string) error +} + +type app struct { + client *client.Client +} + +func newAppAPI(c *client.Client) Apps { + return &app{ + client: c, + } +} + +func (r *app) FindByName(spaceGUID string, name string) (*App, error) { + rawURL := fmt.Sprintf("/v2/spaces/%s/apps", spaceGUID) + req := rest.GetRequest(rawURL).Query("q", "name:"+name) + httpReq, err := req.Build() + if err != nil { + return nil, err + } + path := httpReq.URL.String() + apps, err := r.listAppWithPath(path) + if err != nil { + return nil, err + } + if len(apps) == 0 { + return nil, bmxerror.New(ErrCodeAppDoesnotExist, + fmt.Sprintf("Given app: %q doesn't exist in given space: %q", name, spaceGUID)) + + } + return &apps[0], nil +} + +func (r *app) Create(appPayload AppRequest) (*AppFields, error) { + rawURL := "/v2/apps?async=true" + appFields := AppFields{} + _, err := r.client.Post(rawURL, appPayload, &appFields) + if err != nil { + return nil, err + } + return &appFields, nil +} + +func (r *app) BindRoute(appGUID, routeGUID string) (*AppFields, error) { + rawURL := fmt.Sprintf("/v2/apps/%s/routes/%s", appGUID, routeGUID) + appFields := AppFields{} + _, err := r.client.Put(rawURL, nil, &appFields) + if err != nil { + return nil, err + } + return &appFields, nil +} + +func (r *app) ListRoutes(appGUID string) ([]Route, error) { + rawURL := fmt.Sprintf("/v2/apps/%s/routes", appGUID) + req := rest.GetRequest(rawURL) + httpReq, err := req.Build() + if err != nil { + return nil, err + } + path := httpReq.URL.String() + route, err := listRouteWithPath(r.client, path) + if err != nil { + return nil, err + } + return route, nil +} + +func (r *app) UnBindRoute(appGUID, routeGUID string) error { + rawURL := fmt.Sprintf("/v2/apps/%s/routes/%s", appGUID, routeGUID) + _, err := r.client.Delete(rawURL) + return err +} + +func (r *app) DeleteServiceBindings(appGUID string, sbGUIDs ...string) error { + for _, g := range sbGUIDs { + rawURL := fmt.Sprintf("/v2/apps/%s/service_bindings/%s", appGUID, g) + _, err := r.client.Delete(rawURL) + return err + } + return nil +} + +func (r *app) listAppWithPath(path string) ([]App, error) { + var apps []App + _, err := r.client.GetPaginated(path, AppResource{}, func(resource interface{}) bool { + if appResource, ok := resource.(AppResource); ok { + apps = append(apps, appResource.ToFields()) + return true + } + return false + }) + return apps, err +} + +func (r *app) Upload(appGUID string, zipPath string) (*UploadBitFields, error) { + req := rest.PutRequest(r.client.URL("/v2/apps/"+appGUID+"/bits")).Query("async", "false") + file, err := os.Open(zipPath) + if err != nil { + return nil, err + } + defer file.Close() + + f := rest.File{ + Name: file.Name(), + Content: file, + } + req.File("application", f) + req.Field("resources", "[]") + uploadBitResponse := &UploadBitFields{} + _, err = r.client.SendRequest(req, uploadBitResponse) + return uploadBitResponse, err +} + +func (r *app) Start(appGUID string, maxWaitTime time.Duration) (*AppState, error) { + payload := AppRequest{ + State: helpers.String(AppStartedState), + } + rawURL := fmt.Sprintf("/v2/apps/%s", appGUID) + appFields := AppFields{} + _, err := r.client.Put(rawURL, payload, &appFields) + if err != nil { + return nil, err + } + appState := &AppState{ + PackageState: AppPendingState, + InstanceState: AppUnKnownState, + } + if maxWaitTime == 0 { + appState.PackageState = appFields.Entity.PackageState + appState.InstanceState = appFields.Entity.State + return appState, nil + } + return r.WaitForStatus(appGUID, maxWaitTime) + +} + +func (r *app) Get(appGUID string) (*AppFields, error) { + rawURL := fmt.Sprintf("/v2/apps/%s", appGUID) + appFields := AppFields{} + _, err := r.client.Get(rawURL, &appFields, nil) + if err != nil { + return nil, err + } + return &appFields, nil +} + +func (r *app) Summary(appGUID string) (*AppSummaryFields, error) { + rawURL := fmt.Sprintf("/v2/apps/%s/summary", appGUID) + appFields := AppSummaryFields{} + _, err := r.client.Get(rawURL, &appFields, nil) + if err != nil { + return nil, err + } + return &appFields, nil +} + +func (r *app) Stat(appGUID string) (map[string]AppStats, error) { + rawURL := fmt.Sprintf("/v2/apps/%s/stats", appGUID) + appStats := map[string]AppStats{} + _, err := r.client.Get(rawURL, &appStats, nil) + if err != nil { + return nil, err + } + return appStats, nil +} + +func (r *app) Instances(appGUID string) (map[string]AppStats, error) { + + rawURL := fmt.Sprintf("/v2/apps/%s/instances", appGUID) + appInstances := map[string]AppStats{} + _, err := r.client.Get(rawURL, &appInstances, nil) + if err != nil { + return nil, err + } + return appInstances, nil +} + +func (r *app) List() ([]App, error) { + rawURL := "v2/apps" + req := rest.GetRequest(rawURL) + httpReq, err := req.Build() + if err != nil { + return nil, err + } + path := httpReq.URL.String() + apps, err := r.listAppWithPath(path) + if err != nil { + return nil, err + } + return apps, nil + +} + +func (r *app) Update(appGUID string, appPayload AppRequest) (*AppFields, error) { + rawURL := fmt.Sprintf("/v2/apps/%s", appGUID) + appFields := AppFields{} + _, err := r.client.Put(rawURL, appPayload, &appFields) + if err != nil { + return nil, err + } + return &appFields, nil +} + +func (r *app) Delete(appGUID string, async bool, recursive bool) error { + req := rest.GetRequest(fmt.Sprintf("/v2/apps/%s", appGUID)) + if async { + req.Query("async", "true") + } + if recursive { + req.Query("recursive", "true") + } + httpReq, err := req.Build() + if err != nil { + return err + } + path := httpReq.URL.String() + _, err = r.client.Delete(path) + return err +} + +func (r *app) Restage(appGUID string, maxWaitTime time.Duration) (*AppState, error) { + rawURL := fmt.Sprintf("/v2/apps/%s/restage", appGUID) + appFields := AppFields{} + _, err := r.client.Post(rawURL, nil, &appFields) + if err != nil { + return nil, err + } + appState := &AppState{ + PackageState: AppPendingState, + InstanceState: AppUnKnownState, + } + if maxWaitTime == 0 { + appState.PackageState = appFields.Entity.PackageState + appState.InstanceState = appFields.Entity.State + return appState, nil + } + return r.WaitForStatus(appGUID, maxWaitTime) + +} + +func (r *app) WaitForAppStatus(waitForThisState, appGUID string, maxWaitTime time.Duration) (string, error) { + timeout := time.After(maxWaitTime) + tick := time.Tick(DefaultRetryDelayForStatusCheck) + status := AppPendingState + for { + select { + case <-timeout: + trace.Logger.Printf("Timed out while checking the app status for %q. Waited for %q for the state to be %q", appGUID, maxWaitTime, waitForThisState) + return status, nil + case <-tick: + appFields, err := r.Get(appGUID) + if err != nil { + return "", err + } + status = appFields.Entity.PackageState + trace.Logger.Println("apps.Entity.PackageState ===>>> ", status) + if status == waitForThisState || status == AppFailedState { + return status, nil + } + } + } +} + +func (r *app) WaitForInstanceStatus(waitForThisState, appGUID string, maxWaitTime time.Duration) (string, error) { + timeout := time.After(maxWaitTime) + tick := time.Tick(DefaultRetryDelayForStatusCheck) + status := AppStartedState + for { + select { + case <-timeout: + trace.Logger.Printf("Timed out while checking the app status for %q. Waited for %q for the state to be %q", appGUID, maxWaitTime, waitForThisState) + return status, nil + case <-tick: + appStat, err := r.Stat(appGUID) + if err != nil { + return status, err + } + stateCount := 0 + for k, v := range appStat { + fmt.Printf("Instance[%s] State is %s", k, v) + if v.State == waitForThisState { + stateCount++ + } + } + if stateCount == len(appStat) { + return waitForThisState, nil + } + + } + } + +} + +func (r *app) WaitForStatus(appGUID string, maxWaitTime time.Duration) (*AppState, error) { + appState := &AppState{ + PackageState: AppPendingState, + InstanceState: AppUnKnownState, + } + status, err := r.WaitForAppStatus(AppStagedState, appGUID, maxWaitTime/2) + appState.PackageState = status + if err != nil || status == AppFailedState { + return appState, err + } + status, err = r.WaitForInstanceStatus(AppRunningState, appGUID, maxWaitTime/2) + appState.InstanceState = status + return appState, nil +} + +//TODO pull the wait logic in a auxiliary function which can be used by all + +func (r *app) ListServiceBindings(appGUID string) ([]ServiceBinding, error) { + rawURL := fmt.Sprintf("/v2/apps/%s/service_bindings", appGUID) + req := rest.GetRequest(rawURL) + httpReq, err := req.Build() + if err != nil { + return nil, err + } + path := httpReq.URL.String() + sb, err := listServiceBindingWithPath(r.client, path) + if err != nil { + return nil, err + } + return sb, nil +} diff --git a/vendor/github.com/IBM-Bluemix/bluemix-go/api/mccp/mccpv2/filter.go b/vendor/github.com/IBM-Bluemix/bluemix-go/api/mccp/mccpv2/filter.go new file mode 100644 index 0000000000..d63838d93d --- /dev/null +++ b/vendor/github.com/IBM-Bluemix/bluemix-go/api/mccp/mccpv2/filter.go @@ -0,0 +1,89 @@ +package mccpv2 + +import ( + "errors" + "fmt" + "strings" +) + +var ( + //ErrFilterNameMissing ... + ErrFilterNameMissing = errors.New("Filter must have a name") + + //ErrFilterMissingOp .. + ErrFilterMissingOp = errors.New("Filter must have an operator") +) + +//Filter ... +type Filter struct { + name string + op string + value string +} + +//Name ... +func (f Filter) Name(name string) Filter { + f.name = name + return f +} + +//Eq ... +func (f Filter) Eq(target string) Filter { + f.op = ":" + f.value = target + return f +} + +//In ... +func (f Filter) In(targets ...string) Filter { + f.op = " IN " + f.value = strings.Join(targets, ",") + return f +} + +//Ge ... +func (f Filter) Ge(target string) Filter { + f.op = ":" + f.value = target + return f +} + +//Le ... +func (f Filter) Le(target string) Filter { + f.op = "<=" + f.value = target + return f +} + +//Gt ... +func (f Filter) Gt(target string) Filter { + f.op = ">" + f.value = target + return f +} + +//Lt ... +func (f Filter) Lt(target string) Filter { + f.op = "<" + f.value = target + return f +} + +func (f Filter) validate() error { + if f.name == "" { + return ErrFilterNameMissing + } + if f.op == "" { + return ErrFilterMissingOp + } + return nil +} + +//Build ... +func (f Filter) Build() (string, error) { + err := f.validate() + if err != nil { + return "", nil + } + return fmt.Sprintf("%s%s%s;", f.name, f.op, f.value), nil +} diff --git a/vendor/github.com/IBM-Bluemix/bluemix-go/api/mccp/mccpv2/organizations.go b/vendor/github.com/IBM-Bluemix/bluemix-go/api/mccp/mccpv2/organizations.go new file mode 100644 index 0000000000..785288fbfc --- /dev/null +++ b/vendor/github.com/IBM-Bluemix/bluemix-go/api/mccp/mccpv2/organizations.go @@ -0,0 +1,186 @@ +package mccpv2 + +import ( + "fmt" + "strconv" + + "github.com/IBM-Bluemix/bluemix-go/bmxerror" + "github.com/IBM-Bluemix/bluemix-go/client" + "github.com/IBM-Bluemix/bluemix-go/rest" +) + +//ErrCodeOrgDoesnotExist ... +var ErrCodeOrgDoesnotExist = "OrgDoesnotExist" + +//Metadata ... +type Metadata struct { + GUID string `json:"guid"` + URL string `json:"url"` +} + +//Resource ... +type Resource struct { + Metadata Metadata +} + +//OrgResource ... +type OrgResource struct { + Resource + Entity OrgEntity +} + +//OrgEntity ... +type OrgEntity struct { + Name string `json:"name"` + Region string `json:"region"` + BillingEnabled bool `json:"billing_enabled"` +} + +//ToFields .. +func (resource OrgResource) ToFields() Organization { + entity := resource.Entity + + return Organization{ + GUID: resource.Metadata.GUID, + Name: entity.Name, + Region: entity.Region, + BillingEnabled: entity.BillingEnabled, + } +} + +//Organization model +type Organization struct { + GUID string + Name string + Region string + BillingEnabled bool +} + +//Organizations ... +type Organizations interface { + Create(name string) error + List(region string) ([]Organization, error) + FindByName(orgName, region string) (*Organization, error) + Delete(guid string, recursive bool) error + Update(guid string, newName string) error +} + +type organization struct { + client *client.Client +} + +func newOrganizationAPI(c *client.Client) Organizations { + return &organization{ + client: c, + } +} + +func (o *organization) Create(name string) error { + body := struct { + Name string `json:"name"` + }{ + Name: name, + } + _, err := o.client.Post("/v2/organizations", body, nil) + return err +} + +func (o *organization) Update(guid string, newName string) error { + rawURL := fmt.Sprintf("/v2/organizations/%s", guid) + body := struct { + Name string `json:"name"` + }{ + Name: newName, + } + _, err := o.client.Put(rawURL, body, nil) + return err +} + +func (o *organization) Delete(guid string, recursive bool) error { + req := rest.DeleteRequest(fmt.Sprintf("/v2/organizations/%s", guid)). + Query("recursive", strconv.FormatBool(recursive)) + + path, pathErr := o.url(req) + if pathErr != nil { + return pathErr + } + + _, err := o.client.Delete(path, nil, nil) + return err +} + +func (o *organization) List(region string) ([]Organization, error) { + req := rest.GetRequest("/v2/organizations") + if region != "" { + req.Query("region", region) + } + path, err := o.url(req) + if err != nil { + return []Organization{}, err + } + + var orgs []Organization + err = o.listOrgResourcesWithPath(path, func(orgResource OrgResource) bool { + orgs = append(orgs, orgResource.ToFields()) + return true + }) + return orgs, err +} + +//FindByName ... +func (o *organization) FindByName(name string, region string) (*Organization, error) { + path, err := o.urlOfOrgWithName(name, region, false) + if err != nil { + return nil, err + } + + var org Organization + var found bool + err = o.listOrgResourcesWithPath(path, func(orgResource OrgResource) bool { + org = orgResource.ToFields() + found = true + return false + }) + + if err != nil { + return nil, err + } + + if found { + return &org, err + } + + //May not be found and no error + return nil, bmxerror.New(ErrCodeOrgDoesnotExist, + fmt.Sprintf("Given org %q doesn't exist", name)) + +} + +func (o *organization) listOrgResourcesWithPath(path string, cb func(OrgResource) bool) error { + _, err := o.client.GetPaginated(path, OrgResource{}, func(resource interface{}) bool { + if orgResource, ok := resource.(OrgResource); ok { + return cb(orgResource) + } + return false + }) + return err +} + +func (o *organization) urlOfOrgWithName(name string, region string, inline bool) (string, error) { + req := rest.GetRequest("/v2/organizations").Query("q", fmt.Sprintf("name:%s", name)) + if region != "" { + req.Query("region", region) + } + if inline { + req.Query("inline-relations-depth", "1") + } + return o.url(req) +} + +func (o *organization) url(req *rest.Request) (string, error) { + httpReq, err := req.Build() + if err != nil { + return "", err + } + return httpReq.URL.String(), nil +} diff --git a/vendor/github.com/IBM-Bluemix/bluemix-go/api/mccp/mccpv2/paginate.go b/vendor/github.com/IBM-Bluemix/bluemix-go/api/mccp/mccpv2/paginate.go new file mode 100644 index 0000000000..1d7d680a89 --- /dev/null +++ b/vendor/github.com/IBM-Bluemix/bluemix-go/api/mccp/mccpv2/paginate.go @@ -0,0 +1,65 @@ +package mccpv2 + +import ( + "encoding/json" + "fmt" + "net/http" + "reflect" + + "github.com/IBM-Bluemix/bluemix-go/client" +) + +//PaginatedResources ... +type PaginatedResources struct { + NextURL string `json:"next_url"` + ResourcesBytes json.RawMessage `json:"resources"` + resourceType reflect.Type +} + +//NewPaginatedResources ... +func NewPaginatedResources(resource interface{}) PaginatedResources { + return PaginatedResources{ + resourceType: reflect.TypeOf(resource), + } +} + +//Resources ... +func (pr PaginatedResources) Resources() ([]interface{}, error) { + slicePtr := reflect.New(reflect.SliceOf(pr.resourceType)) + err := json.Unmarshal([]byte(pr.ResourcesBytes), slicePtr.Interface()) + slice := reflect.Indirect(slicePtr) + + contents := make([]interface{}, 0, slice.Len()) + for i := 0; i < slice.Len(); i++ { + contents = append(contents, slice.Index(i).Interface()) + } + return contents, err +} + +//Paginate ... +func Paginate(c *client.Client, path string, resource interface{}, cb func(interface{}) bool) (resp *http.Response, err error) { + for path != "" { + paginatedResources := NewPaginatedResources(resource) + + resp, err = c.Get(path, &paginatedResources) + if err != nil { + return + } + + var resources []interface{} + resources, err = paginatedResources.Resources() + if err != nil { + err = fmt.Errorf("%s: Error parsing JSON", err.Error()) + return + } + + for _, resource := range resources { + if !cb(resource) { + return + } + } + + path = paginatedResources.NextURL + } + return +} diff --git a/vendor/github.com/IBM-Bluemix/bluemix-go/api/mccp/mccpv2/private_domain.go b/vendor/github.com/IBM-Bluemix/bluemix-go/api/mccp/mccpv2/private_domain.go new file mode 100644 index 0000000000..70d69626c2 --- /dev/null +++ b/vendor/github.com/IBM-Bluemix/bluemix-go/api/mccp/mccpv2/private_domain.go @@ -0,0 +1,168 @@ +package mccpv2 + +import ( + "fmt" + + "github.com/IBM-Bluemix/bluemix-go/bmxerror" + "github.com/IBM-Bluemix/bluemix-go/client" + "github.com/IBM-Bluemix/bluemix-go/rest" +) + +//ErrCodePrivateDomainDoesnotExist ... +var ErrCodePrivateDomainDoesnotExist = "PrivateDomainDoesnotExist" + +//PrivateDomainRequest ... +type PrivateDomainRequest struct { + Name string `json:"name,omitempty"` + OrgGUID string `json:"owning_organization_guid,omitempty"` +} + +//PrivateDomaineMetadata ... +type PrivateDomainMetadata struct { + GUID string `json:"guid"` + URL string `json:"url"` +} + +//PrivateDomainEntity ... +type PrivateDomainEntity struct { + Name string `json:"name"` + OwningOrganizationGUID string `json:"owning_organization_guid"` + OwningOrganizationURL string `json:"owning_organization_url"` + SharedOrganizationURL string `json:"shared_organizations_url"` +} + +//PrivateDomainResource ... +type PrivateDomainResource struct { + Resource + Entity PrivateDomainEntity +} + +//PrivateDomainFields ... +type PrivateDomainFields struct { + Metadata PrivateDomainMetadata + Entity PrivateDomainEntity +} + +//ToFields .. +func (resource PrivateDomainResource) ToFields() PrivateDomain { + entity := resource.Entity + + return PrivateDomain{ + GUID: resource.Metadata.GUID, + Name: entity.Name, + OwningOrganizationGUID: entity.OwningOrganizationGUID, + OwningOrganizationURL: entity.OwningOrganizationURL, + SharedOrganizationURL: entity.OwningOrganizationURL, + } +} + +//PrivateDomain model +type PrivateDomain struct { + GUID string + Name string + OwningOrganizationGUID string + OwningOrganizationURL string + SharedOrganizationURL string +} + +//PrivateDomains ... +type PrivateDomains interface { + FindByNameInOrg(orgGUID, domainName string) (*PrivateDomain, error) + FindByName(domainName string) (*PrivateDomain, error) + Create(req PrivateDomainRequest) (*PrivateDomainFields, error) + Get(privateDomainGUID string) (*PrivateDomainFields, error) + Delete(privateDomainGUID string, async bool) error +} + +type privateDomain struct { + client *client.Client +} + +func newPrivateDomainAPI(c *client.Client) PrivateDomains { + return &privateDomain{ + client: c, + } +} + +func (d *privateDomain) FindByNameInOrg(orgGUID, domainName string) (*PrivateDomain, error) { + rawURL := fmt.Sprintf("/v2/organizations/%s/private_domains", orgGUID) + req := rest.GetRequest(rawURL).Query("q", "name:"+domainName) + httpReq, err := req.Build() + if err != nil { + return nil, err + } + path := httpReq.URL.String() + domain, err := listPrivateDomainWithPath(d.client, path) + if err != nil { + return nil, err + } + if len(domain) == 0 { + return nil, bmxerror.New(ErrCodePrivateDomainDoesnotExist, fmt.Sprintf("Private Domain: %q doesn't exist", domainName)) + } + return &domain[0], nil +} + +func (d *privateDomain) FindByName(domainName string) (*PrivateDomain, error) { + rawURL := fmt.Sprintf("/v2/private_domains") + req := rest.GetRequest(rawURL).Query("q", "name:"+domainName) + httpReq, err := req.Build() + if err != nil { + return nil, err + } + path := httpReq.URL.String() + domain, err := listPrivateDomainWithPath(d.client, path) + if err != nil { + return nil, err + } + if len(domain) == 0 { + return nil, bmxerror.New(ErrCodePrivateDomainDoesnotExist, fmt.Sprintf("Private Domain: %q doesn't exist", domainName)) + } + return &domain[0], nil +} + +func listPrivateDomainWithPath(c *client.Client, path string) ([]PrivateDomain, error) { + var privateDomain []PrivateDomain + _, err := c.GetPaginated(path, PrivateDomainResource{}, func(resource interface{}) bool { + if privateDomainResource, ok := resource.(PrivateDomainResource); ok { + privateDomain = append(privateDomain, privateDomainResource.ToFields()) + return true + } + return false + }) + return privateDomain, err +} + +func (d *privateDomain) Create(req PrivateDomainRequest) (*PrivateDomainFields, error) { + rawURL := "/v2/private_domains" + privateDomainFields := PrivateDomainFields{} + _, err := d.client.Post(rawURL, req, &privateDomainFields) + if err != nil { + return nil, err + } + return &privateDomainFields, nil +} + +func (d *privateDomain) Get(privateDomainGUID string) (*PrivateDomainFields, error) { + rawURL := fmt.Sprintf("/v2/private_domains/%s", privateDomainGUID) + privateDomainFields := PrivateDomainFields{} + _, err := d.client.Get(rawURL, &privateDomainFields, nil) + if err != nil { + return nil, err + } + return &privateDomainFields, nil +} + +func (d *privateDomain) Delete(privateDomainGUID string, async bool) error { + rawURL := fmt.Sprintf("/v2/private_domains/%s", privateDomainGUID) + req := rest.GetRequest(rawURL).Query("recursive", "true") + if async { + req.Query("async", "true") + } + httpReq, err := req.Build() + if err != nil { + return err + } + path := httpReq.URL.String() + _, err = d.client.Delete(path) + return err +} diff --git a/vendor/github.com/IBM-Bluemix/bluemix-go/api/mccp/mccpv2/routes.go b/vendor/github.com/IBM-Bluemix/bluemix-go/api/mccp/mccpv2/routes.go new file mode 100644 index 0000000000..1be7881353 --- /dev/null +++ b/vendor/github.com/IBM-Bluemix/bluemix-go/api/mccp/mccpv2/routes.go @@ -0,0 +1,184 @@ +package mccpv2 + +import ( + "fmt" + + "github.com/IBM-Bluemix/bluemix-go/client" + "github.com/IBM-Bluemix/bluemix-go/rest" +) + +//ErrCodeRouteDoesnotExist ... +var ErrCodeRouteDoesnotExist = "RouteDoesnotExist" + +//RouteRequest ... +type RouteRequest struct { + Host string `json:"host,omitempty"` + SpaceGUID string `json:"space_guid"` + DomainGUID string `json:"domain_guid,omitempty"` + Path string `json:"path,omitempty"` + Port *int `json:"port,omitempty"` +} + +//RouteUpdateRequest ... +type RouteUpdateRequest struct { + Host *string `json:"host,omitempty"` + Path *string `json:"path,omitempty"` + Port *int `json:"port,omitempty"` +} + +//RouteMetadata ... +type RouteMetadata struct { + GUID string `json:"guid"` + URL string `json:"url"` +} + +//RouteEntity ... +type RouteEntity struct { + Host string `json:"host"` + Path string `json:"path"` + DomainGUID string `json:"domain_guid"` + SpaceGUID string `json:"space_guid"` + ServiceInstanceGUID string `json:"service_instance_guid"` + Port *int `json:"port"` + DomainURL string `json:"domain_url"` + SpaceURL string `json:"space_url"` + AppsURL string `json:"apps_url"` + RouteMappingURL string `json:"route_mapping_url"` +} + +//RouteResource ... +type RouteResource struct { + Resource + Entity RouteEntity +} + +//RouteFields ... +type RouteFields struct { + Metadata RouteMetadata + Entity RouteEntity +} + +//ToFields .. +func (resource RouteResource) ToFields() Route { + entity := resource.Entity + + return Route{ + GUID: resource.Metadata.GUID, + Host: entity.Host, + Path: entity.Path, + DomainGUID: entity.DomainGUID, + SpaceGUID: entity.SpaceGUID, + ServiceInstanceGUID: entity.ServiceInstanceGUID, + Port: entity.Port, + DomainURL: entity.DomainURL, + SpaceURL: entity.SpaceURL, + AppsURL: entity.AppsURL, + RouteMappingURL: entity.RouteMappingURL, + } +} + +//Route model +type Route struct { + GUID string + Host string + Path string + DomainGUID string + SpaceGUID string + ServiceInstanceGUID string + Port *int + DomainURL string + SpaceURL string + AppsURL string + RouteMappingURL string +} + +//Routes ... +type Routes interface { + Find(hostname, domainGUID string) ([]Route, error) + Create(req RouteRequest) (*RouteFields, error) + Get(routeGUID string) (*RouteFields, error) + Update(routeGUID string, req RouteUpdateRequest) (*RouteFields, error) + Delete(routeGUID string, async bool) error +} + +type route struct { + client *client.Client +} + +func newRouteAPI(c *client.Client) Routes { + return &route{ + client: c, + } +} + +func (r *route) Get(routeGUID string) (*RouteFields, error) { + rawURL := fmt.Sprintf("/v2/routes/%s", routeGUID) + routeFields := RouteFields{} + _, err := r.client.Get(rawURL, &routeFields, nil) + if err != nil { + return nil, err + } + return &routeFields, nil +} + +func (r *route) Find(hostname, domainGUID string) ([]Route, error) { + rawURL := "/v2/routes?inline-relations-depth=1" + req := rest.GetRequest(rawURL).Query("q", "host:"+hostname+";domain_guid:"+domainGUID) + httpReq, err := req.Build() + if err != nil { + return nil, err + } + path := httpReq.URL.String() + route, err := listRouteWithPath(r.client, path) + if err != nil { + return nil, err + } + return route, nil +} + +func (r *route) Create(req RouteRequest) (*RouteFields, error) { + rawURL := "/v2/routes?async=true&inline-relations-depth=1" + routeFields := RouteFields{} + _, err := r.client.Post(rawURL, req, &routeFields) + if err != nil { + return nil, err + } + return &routeFields, nil +} + +func (r *route) Update(routeGUID string, req RouteUpdateRequest) (*RouteFields, error) { + rawURL := fmt.Sprintf("/v2/routes/%s", routeGUID) + routeFields := RouteFields{} + _, err := r.client.Put(rawURL, req, &routeFields) + if err != nil { + return nil, err + } + return &routeFields, nil +} + +func (r *route) Delete(routeGUID string, async bool) error { + rawURL := fmt.Sprintf("/v2/routes/%s", routeGUID) + req := rest.GetRequest(rawURL).Query("recursive", "true") + if async { + req.Query("async", "true") + } + httpReq, err := req.Build() + if err != nil { + return err + } + path := httpReq.URL.String() + _, err = r.client.Delete(path) + return err +} + +func listRouteWithPath(c *client.Client, path string) ([]Route, error) { + var route []Route + _, err := c.GetPaginated(path, RouteResource{}, func(resource interface{}) bool { + if routeResource, ok := resource.(RouteResource); ok { + route = append(route, routeResource.ToFields()) + return true + } + return false + }) + return route, err +} diff --git a/vendor/github.com/IBM-Bluemix/bluemix-go/api/mccp/mccpv2/service_bindings.go b/vendor/github.com/IBM-Bluemix/bluemix-go/api/mccp/mccpv2/service_bindings.go new file mode 100644 index 0000000000..661388907b --- /dev/null +++ b/vendor/github.com/IBM-Bluemix/bluemix-go/api/mccp/mccpv2/service_bindings.go @@ -0,0 +1,144 @@ +package mccpv2 + +import ( + "fmt" + "strings" + + "github.com/IBM-Bluemix/bluemix-go/client" + "github.com/IBM-Bluemix/bluemix-go/rest" +) + +//ServiceBindingRequest ... +type ServiceBindingRequest struct { + ServiceInstanceGUID string `json:"service_instance_guid"` + AppGUID string `json:"app_guid"` + Parameters string `json:"parameters,omitempty"` +} + +//ServiceBindingMetadata ... +type ServiceBindingMetadata struct { + GUID string `json:"guid"` + URL string `json:"url"` +} + +//ServiceBindingEntity ... +type ServiceBindingEntity struct { + ServiceInstanceGUID string `json:"service_instance_guid"` + AppGUID string `json:"app_guid"` + Credentials map[string]interface{} `json:"credentials"` +} + +//ServiceBindingResource ... +type ServiceBindingResource struct { + Resource + Entity ServiceBindingEntity +} + +//ServiceBindingFields ... +type ServiceBindingFields struct { + Metadata ServiceBindingMetadata + Entity ServiceBindingEntity +} + +//ServiceBinding model +type ServiceBinding struct { + GUID string + ServiceInstanceGUID string + AppGUID string + Credentials map[string]interface{} +} + +//ToFields .. +func (resource ServiceBindingResource) ToFields() ServiceBinding { + entity := resource.Entity + + return ServiceBinding{ + GUID: resource.Metadata.GUID, + ServiceInstanceGUID: entity.ServiceInstanceGUID, + AppGUID: entity.AppGUID, + Credentials: entity.Credentials, + } +} + +//ServiceBindings ... +type ServiceBindings interface { + Create(req ServiceBindingRequest) (*ServiceBindingFields, error) + Get(guid string) (*ServiceBindingFields, error) + Delete(guid string, async bool) error + List(filters ...string) ([]ServiceBinding, error) +} + +type serviceBinding struct { + client *client.Client +} + +func newServiceBindingAPI(c *client.Client) ServiceBindings { + return &serviceBinding{ + client: c, + } +} + +func (r *serviceBinding) Get(sbGUID string) (*ServiceBindingFields, error) { + rawURL := fmt.Sprintf("/v2/service_bindings/%s", sbGUID) + sbFields := ServiceBindingFields{} + _, err := r.client.Get(rawURL, &sbFields, nil) + if err != nil { + return nil, err + } + return &sbFields, nil +} + +func (r *serviceBinding) Create(req ServiceBindingRequest) (*ServiceBindingFields, error) { + rawURL := "/v2/service_bindings" + sbFields := ServiceBindingFields{} + _, err := r.client.Post(rawURL, req, &sbFields) + if err != nil { + return nil, err + } + return &sbFields, nil +} + +func (r *serviceBinding) Delete(guid string, async bool) error { + rawURL := fmt.Sprintf("/v2/service_bindings/%s", guid) + req := rest.GetRequest(rawURL).Query("recursive", "true") + if async { + req.Query("async", "true") + } + httpReq, err := req.Build() + if err != nil { + return err + } + path := httpReq.URL.String() + _, err = r.client.Delete(path) + return err +} + +func (r *serviceBinding) List(filters ...string) ([]ServiceBinding, error) { + rawURL := "/v2/service_bindings" + req := rest.GetRequest(rawURL) + if len(filters) > 0 { + req.Query("q", strings.Join(filters, "")) + } + httpReq, err := req.Build() + if err != nil { + return nil, err + } + path := httpReq.URL.String() + bindings, err := listServiceBindingWithPath(r.client, path) + if err != nil { + return nil, err + } + return bindings, nil +} + +func listServiceBindingWithPath(c *client.Client, path string) ([]ServiceBinding, error) { + var sb []ServiceBinding + _, err := c.GetPaginated(path, ServiceBindingResource{}, func(resource interface{}) bool { + if sbResource, ok := resource.(ServiceBindingResource); ok { + sb = append(sb, sbResource.ToFields()) + return true + } + return false + }) + return sb, err +} diff --git a/vendor/github.com/IBM-Bluemix/bluemix-go/api/mccp/mccpv2/service_instances.go b/vendor/github.com/IBM-Bluemix/bluemix-go/api/mccp/mccpv2/service_instances.go new file mode 100644 index 0000000000..74541c85d6 --- /dev/null +++ b/vendor/github.com/IBM-Bluemix/bluemix-go/api/mccp/mccpv2/service_instances.go @@ -0,0 +1,218 @@ +package mccpv2 + +import ( + "fmt" + + "github.com/IBM-Bluemix/bluemix-go/client" + "github.com/IBM-Bluemix/bluemix-go/rest" +) + +//ServiceInstanceCreateRequest ... +type ServiceInstanceCreateRequest struct { + Name string `json:"name"` + SpaceGUID string `json:"space_guid"` + PlanGUID string `json:"service_plan_guid"` + Params map[string]interface{} `json:"parameters,omitempty"` + Tags []string `json:"tags,omitempty"` +} + +//ServiceInstanceUpdateRequest ... +type ServiceInstanceUpdateRequest struct { + Name *string `json:"name,omitempty"` + PlanGUID *string `json:"service_plan_guid,omitempty"` + Params map[string]interface{} `json:"parameters,omitempty"` + Tags *[]string `json:"tags,omitempty"` +} + +//ServiceInstance ... +type ServiceInstance struct { + GUID string + Name string `json:"name"` + Credentials map[string]interface{} `json:"credentials"` + ServicePlanGUID string `json:"service_plan_guid"` + SpaceGUID string `json:"space_guid"` + GatewayData string `json:"gateway_data"` + Type string `json:"type"` + DashboardURL string `json:"dashboard_url"` + LastOperation LastOperationFields `json:"last_operation"` + RouteServiceURL string `json:"routes_url"` + Tags []string `json:"tags"` + SpaceURL string `json:"space_url"` + ServicePlanURL string `json:"service_plan_url"` + ServiceBindingURL string `json:"service_bindings_url"` + ServiceKeysURL string `json:"service_keys_url"` +} + +//ServiceInstanceFields ... +type ServiceInstanceFields struct { + Metadata ServiceInstanceMetadata + Entity ServiceInstance +} + +//ServiceInstanceMetadata ... +type ServiceInstanceMetadata struct { + GUID string `json:"guid"` + URL string `json:"url"` +} + +//LastOperationFields ... +type LastOperationFields struct { + Type string `json:"type"` + State string `json:"state"` + Description string `json:"description"` + CreatedAt string `json:"created_at"` + UpdatedAt string `json:"updated_at"` +} + +//ServiceInstanceResource ... +type ServiceInstanceResource struct { + Resource + Entity ServiceInstanceEntity +} + +//ServiceInstanceEntity ... +type ServiceInstanceEntity struct { + Name string `json:"name"` + Credentials map[string]interface{} `json:"credentials"` + ServicePlanGUID string `json:"service_plan_guid"` + SpaceGUID string `json:"space_guid"` + GatewayData string `json:"gateway_data"` + Type string `json:"type"` + DashboardURL string `json:"dashboard_url"` + LastOperation LastOperationFields `json:"last_operation"` + RouteServiceURL string `json:"routes_url"` + Tags []string `json:"tags"` + SpaceURL string `json:"space_url"` + ServicePlanURL string `json:"service_plan_url"` + ServiceBindingURL string `json:"service_bindings_url"` + ServiceKeysURL string `json:"service_keys_url"` +} + +//ToModel ... +func (resource ServiceInstanceResource) ToModel() ServiceInstance { + + entity := resource.Entity + + return ServiceInstance{ + GUID: resource.Metadata.GUID, + Name: entity.Name, + Credentials: entity.Credentials, + ServicePlanGUID: entity.ServicePlanGUID, + SpaceGUID: entity.SpaceGUID, + GatewayData: entity.GatewayData, + Type: entity.Type, + LastOperation: entity.LastOperation, + RouteServiceURL: entity.RouteServiceURL, + DashboardURL: entity.DashboardURL, + Tags: entity.Tags, + SpaceURL: entity.SpaceURL, + ServicePlanURL: entity.ServicePlanURL, + ServiceBindingURL: entity.ServiceBindingURL, + ServiceKeysURL: entity.ServiceKeysURL, + } +} + +//ServiceInstances ... +type ServiceInstances interface { + Create(req ServiceInstanceCreateRequest) (*ServiceInstanceFields, error) + Update(instanceGUID string, req ServiceInstanceUpdateRequest) (*ServiceInstanceFields, error) + Delete(instanceGUID string) error + FindByName(instanceName string) (*ServiceInstance, error) + Get(instanceGUID string) (*ServiceInstanceFields, error) + ListServiceBindings(instanceGUID string) ([]ServiceBinding, error) +} + +type serviceInstance struct { + client *client.Client +} + +func newServiceInstanceAPI(c *client.Client) ServiceInstances { + return &serviceInstance{ + client: c, + } +} + +func (s *serviceInstance) Create(req ServiceInstanceCreateRequest) (*ServiceInstanceFields, error) { + rawURL := "/v2/service_instances?accepts_incomplete=true&async=true" + serviceFields := ServiceInstanceFields{} + _, err := s.client.Post(rawURL, req, &serviceFields) + if err != nil { + return nil, err + } + return &serviceFields, nil +} + +func (s *serviceInstance) Get(instanceGUID string) (*ServiceInstanceFields, error) { + rawURL := fmt.Sprintf("/v2/service_instances/%s", instanceGUID) + serviceFields := ServiceInstanceFields{} + _, err := s.client.Get(rawURL, &serviceFields) + if err != nil { + return nil, err + } + + return &serviceFields, err +} + +func (s *serviceInstance) FindByName(instanceName string) (*ServiceInstance, error) { + req := rest.GetRequest("/v2/service_instances") + req.Query("return_user_provided_service_instances", "true") + if instanceName != "" { + req.Query("q", "name:"+instanceName) + } + httpReq, err := req.Build() + if err != nil { + return nil, err + } + path := httpReq.URL.String() + services, err := listServicesWithPath(s.client, path) + if err != nil { + return nil, err + } + if len(services) == 0 { + return nil, fmt.Errorf("Service instance: %q doesn't exist", instanceName) + } + return &services[0], nil +} + +func (s *serviceInstance) Delete(instanceGUID string) error { + rawURL := fmt.Sprintf("/v2/service_instances/%s", instanceGUID) + _, err := s.client.Delete(rawURL) + return err +} + +func (s *serviceInstance) Update(instanceGUID string, req ServiceInstanceUpdateRequest) (*ServiceInstanceFields, error) { + rawURL := fmt.Sprintf("/v2/service_instances/%s?accepts_incomplete=true&async=true", instanceGUID) + serviceFields := ServiceInstanceFields{} + _, err := s.client.Put(rawURL, req, &serviceFields) + if err != nil { + return nil, err + } + return &serviceFields, nil +} + +func (s *serviceInstance) ListServiceBindings(instanceGUID string) ([]ServiceBinding, error) { + rawURL := fmt.Sprintf("/v2/service_instances/%s/service_bindings", instanceGUID) + req := rest.GetRequest(rawURL) + httpReq, err := req.Build() + if err != nil { + return nil, err + } + path := httpReq.URL.String() + sb, err := listServiceBindingWithPath(s.client, path) + if err != nil { + return nil, err + } + return sb, nil +} + +func listServicesWithPath(client *client.Client, path string) ([]ServiceInstance, error) { + var services []ServiceInstance + _, err := client.GetPaginated(path, ServiceInstanceResource{}, func(resource interface{}) bool { + if serviceInstanceResource, ok := resource.(ServiceInstanceResource); ok { + services = append(services, serviceInstanceResource.ToModel()) + return true + } + return false + }) + return services, err +} diff --git a/vendor/github.com/IBM-Bluemix/bluemix-go/api/mccp/mccpv2/service_keys.go b/vendor/github.com/IBM-Bluemix/bluemix-go/api/mccp/mccpv2/service_keys.go new file mode 100644 index 0000000000..f17cad5326 --- /dev/null +++ b/vendor/github.com/IBM-Bluemix/bluemix-go/api/mccp/mccpv2/service_keys.go @@ -0,0 +1,173 @@ +package mccpv2 + +import ( + "fmt" + "strings" + + "github.com/IBM-Bluemix/bluemix-go/bmxerror" + "github.com/IBM-Bluemix/bluemix-go/client" + "github.com/IBM-Bluemix/bluemix-go/rest" +) + +//ErrCodeServiceKeyDoesNotExist ... +const ErrCodeServiceKeyDoesNotExist = "erviceKeyDoesNotExist" + +//ServiceKeyRequest ... +type ServiceKeyRequest struct { + Name string `json:"name"` + ServiceInstanceGUID string `json:"service_instance_guid"` + Params map[string]interface{} `json:"parameters,omitempty"` +} + +//ServiceKey model... +type ServiceKey struct { + GUID string + Name string `json:"name"` + ServiceInstanceGUID string `json:"service_instance_guid"` + ServiceInstanceURL string `json:"service_instance_url"` + Credentials map[string]interface{} `json:"credentials"` +} + +//ServiceKeyFields ... +type ServiceKeyFields struct { + Metadata ServiceInstanceMetadata + Entity ServiceKey +} + +//ServiceKeyMetadata ... +type ServiceKeyMetadata struct { + GUID string `json:"guid"` + URL string `json:"url"` + CreatedAt string `json:"created_at"` + UpdatedAt string `json:"updated_at"` +} + +//ServiceKeyResource ... +type ServiceKeyResource struct { + Resource + Entity ServiceKeyEntity +} + +//ServiceKeyEntity ... +type ServiceKeyEntity struct { + Name string `json:"name"` + ServiceInstanceGUID string `json:"service_instance_guid"` + ServiceInstanceURL string `json:"service_instance_url"` + Credentials map[string]interface{} `json:"credentials"` +} + +//ToModel ... +func (resource ServiceKeyResource) ToModel() ServiceKey { + + entity := resource.Entity + + return ServiceKey{ + GUID: resource.Metadata.GUID, + Name: entity.Name, + ServiceInstanceGUID: entity.ServiceInstanceGUID, + ServiceInstanceURL: entity.ServiceInstanceURL, + Credentials: entity.Credentials, + } +} + +//ServiceKeys ... +type ServiceKeys interface { + Create(serviceInstanceGUID string, keyName string, params map[string]interface{}) (*ServiceKeyFields, error) + FindByName(serviceInstanceGUID string, keyName string) (*ServiceKey, error) + Get(serviceKeyGUID string) (*ServiceKeyFields, error) + Delete(serviceKeyGUID string) error + List(filters ...string) ([]ServiceKey, error) +} + +type serviceKey struct { + client *client.Client +} + +func newServiceKeyAPI(c *client.Client) ServiceKeys { + return &serviceKey{ + client: c, + } +} + +func (r *serviceKey) Create(serviceInstanceGUID string, keyName string, params map[string]interface{}) (*ServiceKeyFields, error) { + serviceKeyFields := ServiceKeyFields{} + reqParam := ServiceKeyRequest{ + ServiceInstanceGUID: serviceInstanceGUID, + Name: keyName, + Params: params, + } + _, err := r.client.Post("/v2/service_keys", reqParam, &serviceKeyFields) + if err != nil { + return nil, err + } + return &serviceKeyFields, nil +} + +func (r *serviceKey) Delete(serviceKeyGUID string) error { + rawURL := fmt.Sprintf("/v2/service_keys/%s", serviceKeyGUID) + _, err := r.client.Delete(rawURL) + return err +} + +func (r *serviceKey) Get(guid string) (*ServiceKeyFields, error) { + rawURL := fmt.Sprintf("/v2/service_keys/%s", guid) + serviceKeyFields := ServiceKeyFields{} + _, err := r.client.Get(rawURL, &serviceKeyFields) + if err != nil { + return nil, err + } + + return &serviceKeyFields, err +} + +func (r *serviceKey) FindByName(serviceInstanceGUID string, keyName string) (*ServiceKey, error) { + rawURL := fmt.Sprintf("/v2/service_instances/%s/service_keys", serviceInstanceGUID) + req := rest.GetRequest(rawURL) + if keyName != "" { + req.Query("q", "name:"+keyName) + } + httpReq, err := req.Build() + if err != nil { + return nil, err + } + path := httpReq.URL.String() + serviceKeys, err := r.listServiceKeysWithPath(path) + if err != nil { + return nil, err + } + if len(serviceKeys) == 0 { + return nil, bmxerror.New(ErrCodeServiceKeyDoesNotExist, + fmt.Sprintf("Given service key %q doesn't exist for the given service instance %q", keyName, serviceInstanceGUID)) + } + return &serviceKeys[0], nil +} + +func (r *serviceKey) List(filters ...string) ([]ServiceKey, error) { + rawURL := "/v2/service_keys" + req := rest.GetRequest(rawURL) + if len(filters) > 0 { + req.Query("q", strings.Join(filters, "")) + } + httpReq, err := req.Build() + if err != nil { + return nil, err + } + path := httpReq.URL.String() + keys, err := r.listServiceKeysWithPath(path) + if err != nil { + return nil, err + } + return keys, nil +} + +func (r *serviceKey) listServiceKeysWithPath(path string) ([]ServiceKey, error) { + var serviceKeys []ServiceKey + _, err := r.client.GetPaginated(path, ServiceKeyResource{}, func(resource interface{}) bool { + if serviceKeyResource, ok := resource.(ServiceKeyResource); ok { + serviceKeys = append(serviceKeys, serviceKeyResource.ToModel()) + return true + } + return false + }) + return serviceKeys, err +} diff --git a/vendor/github.com/IBM-Bluemix/bluemix-go/api/mccp/mccpv2/service_offerings.go b/vendor/github.com/IBM-Bluemix/bluemix-go/api/mccp/mccpv2/service_offerings.go new file mode 100644 index 0000000000..51eccb057d --- /dev/null +++ b/vendor/github.com/IBM-Bluemix/bluemix-go/api/mccp/mccpv2/service_offerings.go @@ -0,0 +1,168 @@ +package mccpv2 + +import ( + "fmt" + + "github.com/IBM-Bluemix/bluemix-go/client" + + "github.com/IBM-Bluemix/bluemix-go/bmxerror" + "github.com/IBM-Bluemix/bluemix-go/rest" +) + +//ErrCodeServiceDoesnotExist ... +const ErrCodeServiceDoesnotExist = "ServiceDoesnotExist" + +//ServiceOffering model +type ServiceOffering struct { + GUID string + Label string `json:"label"` + Provider string `json:"provider"` + Description string `json:"description"` + LongDescription string `json:"long_description"` + Version string `json:"version"` + URL string `json:"url"` + InfoURL string `json:"info_url"` + DocumentURL string `json:"documentation_url"` + Timeout string `json:"timeout"` + UniqueID string `json:"unique_id"` + ServiceBrokerGUID string `json:"service_broker_guid"` + ServicePlansURL string `json:"service_plans_url"` + Tags []string `json:"tags"` + Requires []string `json:"requires"` + IsActive bool `json:"active"` + IsBindable bool `json:"bindable"` + IsPlanUpdateable bool `json:"plan_updateable"` +} + +//ServiceOfferingResource ... +type ServiceOfferingResource struct { + Resource + Entity ServiceOfferingEntity +} + +//ServiceOfferingEntity ... +type ServiceOfferingEntity struct { + Label string `json:"label"` + Provider string `json:"provider"` + Description string `json:"description"` + LongDescription string `json:"long_description"` + Version string `json:"version"` + URL string `json:"url"` + InfoURL string `json:"info_url"` + DocumentURL string `json:"documentation_url"` + Timeout string `json:"timeout"` + UniqueID string `json:"unique_id"` + ServiceBrokerGUID string `json:"service_broker_guid"` + ServicePlansURL string `json:"service_plans_url"` + Tags []string `json:"tags"` + Requires []string `json:"requires"` + IsActive bool `json:"active"` + IsBindable bool `json:"bindable"` + IsPlanUpdateable bool `json:"plan_updateable"` +} + +//ToFields ... +func (resource ServiceOfferingResource) ToFields() ServiceOffering { + entity := resource.Entity + + return ServiceOffering{ + GUID: resource.Metadata.GUID, + Label: entity.Label, + Provider: entity.Provider, + Description: entity.Description, + LongDescription: entity.LongDescription, + Version: entity.Version, + URL: entity.URL, + InfoURL: entity.InfoURL, + DocumentURL: entity.DocumentURL, + Timeout: entity.Timeout, + UniqueID: entity.UniqueID, + ServiceBrokerGUID: entity.ServiceBrokerGUID, + ServicePlansURL: entity.ServicePlansURL, + Tags: entity.Tags, + Requires: entity.Requires, + IsActive: entity.IsActive, + IsBindable: entity.IsBindable, + IsPlanUpdateable: entity.IsPlanUpdateable, + } +} + +//ServiceOfferingFields ... +type ServiceOfferingFields struct { + Metadata ServiceOfferingMetadata + Entity ServiceOffering +} + +//ServiceOfferingMetadata ... +type ServiceOfferingMetadata struct { + GUID string `json:"guid"` + URL string `json:"url"` +} + +//ServiceOfferings ... +type ServiceOfferings interface { + FindByLabel(serviceName string) (*ServiceOffering, error) + Get(svcOfferingGUID string) (*ServiceOfferingFields, error) +} + +type serviceOfferrings struct { + client *client.Client +} + +func newServiceOfferingAPI(c *client.Client) ServiceOfferings { + return &serviceOfferrings{ + client: c, + } +} + +func (s *serviceOfferrings) Get(svcGUID string) (*ServiceOfferingFields, error) { + rawURL := fmt.Sprintf("/v2/services/%s", svcGUID) + svcFields := ServiceOfferingFields{} + _, err := s.client.Get(rawURL, &svcFields) + if err != nil { + return nil, err + } + return &svcFields, err +} + +func (s *serviceOfferrings) FindByLabel(serviceName string) (*ServiceOffering, error) { + req := rest.GetRequest("v2/services") + if serviceName != "" { + req.Query("q", "label:"+serviceName) + } + httpReq, err := req.Build() + if err != nil { + return nil, err + } + path := httpReq.URL.String() + var services ServiceOffering + var found bool + err = s.listServicesOfferingWithPath(path, func(serviceOfferingResource ServiceOfferingResource) bool { + services = serviceOfferingResource.ToFields() + found = true + return false + }) + + if err != nil { + return nil, err + } + + if found { + return &services, err + } + //May not be found and no error + + return nil, bmxerror.New(ErrCodeServiceDoesnotExist, + fmt.Sprintf("Given service %q doesn't exist", serviceName)) + +} + +func (s *serviceOfferrings) listServicesOfferingWithPath(path string, cb func(ServiceOfferingResource) bool) error { + _, err := s.client.GetPaginated(path, ServiceOfferingResource{}, func(resource interface{}) bool { + if serviceOfferingResource, ok := resource.(ServiceOfferingResource); ok { + return cb(serviceOfferingResource) + } + return false + }) + return err +} diff --git a/vendor/github.com/IBM-Bluemix/bluemix-go/api/mccp/mccpv2/service_plans.go b/vendor/github.com/IBM-Bluemix/bluemix-go/api/mccp/mccpv2/service_plans.go new file mode 100644 index 0000000000..49bb2d647c --- /dev/null +++ b/vendor/github.com/IBM-Bluemix/bluemix-go/api/mccp/mccpv2/service_plans.go @@ -0,0 +1,139 @@ +package mccpv2 + +import ( + "fmt" + + "github.com/IBM-Bluemix/bluemix-go/bmxerror" + "github.com/IBM-Bluemix/bluemix-go/client" + "github.com/IBM-Bluemix/bluemix-go/rest" +) + +//ErrCodeServicePlanDoesNotExist ... +const ErrCodeServicePlanDoesNotExist = "ServicePlanDoesNotExist" + +//ServicePlan ... +type ServicePlan struct { + GUID string + Name string `json:"name"` + Description string `json:"description"` + IsFree bool `json:"free"` + IsPublic bool `json:"public"` + IsActive bool `json:"active"` + ServiceGUID string `json:"service_guid"` + UniqueID string `json:"unique_id"` + ServiceInstancesURL string `json:"service_instances_url"` +} + +//ServicePlanResource ... +type ServicePlanResource struct { + Resource + Entity ServicePlanEntity +} + +//ServicePlanEntity ... +type ServicePlanEntity struct { + Name string `json:"name"` + Description string `json:"description"` + IsFree bool `json:"free"` + IsPublic bool `json:"public"` + IsActive bool `json:"active"` + ServiceGUID string `json:"service_guid"` + UniqueID string `json:"unique_id"` + ServiceInstancesURL string `json:"service_instances_url"` +} + +//ToFields ... +func (resource ServicePlanResource) ToFields() ServicePlan { + entity := resource.Entity + + return ServicePlan{ + GUID: resource.Metadata.GUID, + Name: entity.Name, + Description: entity.Description, + IsFree: entity.IsFree, + IsPublic: entity.IsPublic, + IsActive: entity.IsActive, + ServiceGUID: entity.ServiceGUID, + UniqueID: entity.UniqueID, + ServiceInstancesURL: entity.ServiceInstancesURL, + } +} + +//ServicePlanFields ... +type ServicePlanFields struct { + Metadata ServicePlanMetadata + Entity ServicePlan +} + +//ServicePlanMetadata ... +type ServicePlanMetadata struct { + GUID string `json:"guid"` + URL string `json:"url"` +} + +//ServicePlans ... +type ServicePlans interface { + FindPlanInServiceOffering(serviceOfferingGUID string, planType string) (*ServicePlan, error) + Get(planGUID string) (*ServicePlanFields, error) +} + +type servicePlan struct { + client *client.Client +} + +func newServicePlanAPI(c *client.Client) ServicePlans { + return &servicePlan{ + client: c, + } +} + +func (s *servicePlan) Get(planGUID string) (*ServicePlanFields, error) { + rawURL := fmt.Sprintf("/v2/service_plans/%s", planGUID) + planFields := ServicePlanFields{} + _, err := s.client.Get(rawURL, &planFields) + if err != nil { + return nil, err + } + return &planFields, err +} + +func (s *servicePlan) FindPlanInServiceOffering(serviceOfferingGUID string, planType string) (*ServicePlan, error) { + req := rest.GetRequest("/v2/service_plans") + if serviceOfferingGUID != "" { + req.Query("q", "service_guid:"+serviceOfferingGUID) + } + httpReq, err := req.Build() + if err != nil { + return nil, err + } + path := httpReq.URL.String() + plans, err := s.listServicesPlanWithPath(path) + if err != nil { + return nil, err + } + if len(plans) == 0 { + return nil, bmxerror.New(ErrCodeServicePlanDoesNotExist, + fmt.Sprintf("Given plan %q doesn't exist for the service %q", planType, serviceOfferingGUID)) + } + for _, p := range plans { + if p.Name == planType { + return &p, nil + } + + } + return nil, bmxerror.New(ErrCodeServicePlanDoesNotExist, + fmt.Sprintf("Given plan %q doesn't exist for the service %q", planType, serviceOfferingGUID)) + +} + +func (s *servicePlan) listServicesPlanWithPath(path string) ([]ServicePlan, error) { + var servicePlans []ServicePlan + _, err := s.client.GetPaginated(path, ServicePlanResource{}, func(resource interface{}) bool { + if servicePlanResource, ok := resource.(ServicePlanResource); ok { + servicePlans = append(servicePlans, servicePlanResource.ToFields()) + return true + } + return false + }) + return servicePlans, err +} diff --git a/vendor/github.com/IBM-Bluemix/bluemix-go/api/mccp/mccpv2/shared_domain.go b/vendor/github.com/IBM-Bluemix/bluemix-go/api/mccp/mccpv2/shared_domain.go new file mode 100644 index 0000000000..f1c08bd9f4 --- /dev/null +++ b/vendor/github.com/IBM-Bluemix/bluemix-go/api/mccp/mccpv2/shared_domain.go @@ -0,0 +1,146 @@ +package mccpv2 + +import ( + "fmt" + + "github.com/IBM-Bluemix/bluemix-go/bmxerror" + "github.com/IBM-Bluemix/bluemix-go/client" + "github.com/IBM-Bluemix/bluemix-go/rest" +) + +//ErrCodeSharedDomainDoesnotExist ... +var ErrCodeSharedDomainDoesnotExist = "SharedDomainDoesnotExist" + +//SharedDomainRequest ... +type SharedDomainRequest struct { + Name string `json:"name"` + RouterGroupGUID string `json:"router_group_guid,omitempty"` +} + +//SharedDomaineMetadata ... +type SharedDomainMetadata struct { + GUID string `json:"guid"` + URL string `json:"url"` +} + +//SharedDomainEntity ... +type SharedDomainEntity struct { + Name string `json:"name"` + RouterGroupGUID string `json:"router_group_guid"` + RouterGroupType string `json:"router_group_type"` +} + +//SharedDomainResource ... +type SharedDomainResource struct { + Resource + Entity SharedDomainEntity +} + +//SharedDomainFields ... +type SharedDomainFields struct { + Metadata SharedDomainMetadata + Entity SharedDomainEntity +} + +//ToFields .. +func (resource SharedDomainResource) ToFields() SharedDomain { + entity := resource.Entity + + return SharedDomain{ + GUID: resource.Metadata.GUID, + Name: entity.Name, + RouterGroupGUID: entity.RouterGroupGUID, + RouterGroupType: entity.RouterGroupType, + } +} + +//SharedDomain model +type SharedDomain struct { + GUID string + Name string + RouterGroupGUID string + RouterGroupType string +} + +//SharedDomains ... +type SharedDomains interface { + FindByName(domainName string) (*SharedDomain, error) + Create(req SharedDomainRequest) (*SharedDomainFields, error) + Get(sharedDomainGUID string) (*SharedDomainFields, error) + Delete(sharedDomainGUID string, async bool) error +} + +type sharedDomain struct { + client *client.Client +} + +func newSharedDomainAPI(c *client.Client) SharedDomains { + return &sharedDomain{ + client: c, + } +} + +func (d *sharedDomain) FindByName(domainName string) (*SharedDomain, error) { + rawURL := "/v2/shared_domains" + req := rest.GetRequest(rawURL).Query("q", "name:"+domainName) + httpReq, err := req.Build() + if err != nil { + return nil, err + } + path := httpReq.URL.String() + domain, err := listSharedDomainWithPath(d.client, path) + if err != nil { + return nil, err + } + if len(domain) == 0 { + return nil, bmxerror.New(ErrCodeSharedDomainDoesnotExist, fmt.Sprintf("Shared Domain: %q doesn't exist", domainName)) + } + return &domain[0], nil +} + +func listSharedDomainWithPath(c *client.Client, path string) ([]SharedDomain, error) { + var sharedDomain []SharedDomain + _, err := c.GetPaginated(path, SharedDomainResource{}, func(resource interface{}) bool { + if sharedDomainResource, ok := resource.(SharedDomainResource); ok { + sharedDomain = append(sharedDomain, sharedDomainResource.ToFields()) + return true + } + return false + }) + return sharedDomain, err +} + +func (d *sharedDomain) Create(req SharedDomainRequest) (*SharedDomainFields, error) { + rawURL := "/v2/shared_domains" + sharedDomainFields := SharedDomainFields{} + _, err := d.client.Post(rawURL, req, &sharedDomainFields) + if err != nil { + return nil, err + } + return &sharedDomainFields, nil +} + +func (d *sharedDomain) Get(sharedDomainGUID string) (*SharedDomainFields, error) { + rawURL := fmt.Sprintf("/v2/shared_domains/%s", sharedDomainGUID) + sharedDomainFields := SharedDomainFields{} + _, err := d.client.Get(rawURL, &sharedDomainFields, nil) + if err != nil { + return nil, err + } + return &sharedDomainFields, nil +} + +func (d *sharedDomain) Delete(sharedDomainGUID string, async bool) error { + rawURL := fmt.Sprintf("/v2/shared_domains/%s", sharedDomainGUID) + req := rest.GetRequest(rawURL).Query("recursive", "true") + if async { + req.Query("async", "true") + } + httpReq, err := req.Build() + if err != nil { + return err + } + path := httpReq.URL.String() + _, err = d.client.Delete(path) + return err +} diff --git a/vendor/github.com/IBM-Bluemix/bluemix-go/api/mccp/mccpv2/space_quota.go b/vendor/github.com/IBM-Bluemix/bluemix-go/api/mccp/mccpv2/space_quota.go new file mode 100644 index 0000000000..188c121d46 --- /dev/null +++ b/vendor/github.com/IBM-Bluemix/bluemix-go/api/mccp/mccpv2/space_quota.go @@ -0,0 +1,205 @@ +package mccpv2 + +import ( + "encoding/json" + "fmt" + + "github.com/IBM-Bluemix/bluemix-go/bmxerror" + "github.com/IBM-Bluemix/bluemix-go/client" + "github.com/IBM-Bluemix/bluemix-go/rest" +) + +//SpaceQuotaCreateRequest ... +type SpaceQuotaCreateRequest struct { + Name string `json:"name"` + OrgGUID string `json:"organization_guid"` + MemoryLimitInMB int64 `json:"memory_limit,omitempty"` + InstanceMemoryLimitInMB int64 `json:"instance_memory_limit,omitempty"` + RoutesLimit int `json:"total_routes,omitempty"` + ServicesLimit int `json:"total_services,omitempty"` + NonBasicServicesAllowed bool `json:"non_basic_services_allowed"` +} + +//SpaceQuotaUpdateRequest ... +type SpaceQuotaUpdateRequest struct { + Name string `json:"name"` + OrgGUID string `json:"organization_guid,omitempty"` + MemoryLimitInMB int64 `json:"memory_limit,omitempty"` + InstanceMemoryLimitInMB int64 `json:"instance_memory_limit,omitempty"` + RoutesLimit int `json:"total_routes,omitempty"` + ServicesLimit int `json:"total_services,omitempty"` + NonBasicServicesAllowed bool `json:"non_basic_services_allowed"` +} + +type SpaceQuota struct { + GUID string + Name string + NonBasicServicesAllowed bool + ServicesLimit int + RoutesLimit int + MemoryLimitInMB int64 + InstanceMemoryLimitInMB int64 + TrialDBAllowed bool + AppInstanceLimit int + PrivateDomainsLimit int + AppTaskLimit int +} + +//SpaceQuotaFields ... +type SpaceQuotaFields struct { + Metadata SpaceQuotaMetadata + Entity SpaceQuotaEntity +} + +//SpaceQuotaMetadata ... +type SpaceQuotaMetadata struct { + GUID string `json:"guid"` + URL string `json:"url"` +} + +//ErrCodeSpaceQuotaDoesnotExist ... +const ErrCodeSpaceQuotaDoesnotExist = "SpaceQuotaDoesnotExist" + +type SpaceQuotaResource struct { + Resource + Entity SpaceQuotaEntity +} + +type SpaceQuotaEntity struct { + Name string `json:"name"` + NonBasicServicesAllowed bool `json:"non_basic_services_allowed"` + ServicesLimit int `json:"total_services"` + RoutesLimit int `json:"total_routes"` + MemoryLimitInMB int64 `json:"memory_limit"` + InstanceMemoryLimitInMB int64 `json:"instance_memory_limit"` + TrialDBAllowed bool `json:"trial_db_allowed"` + AppInstanceLimit json.Number `json:"app_instance_limit"` + PrivateDomainsLimit json.Number `json:"total_private_domains"` + AppTaskLimit json.Number `json:"app_task_limit"` +} + +func (resource SpaceQuotaResource) ToFields() SpaceQuota { + entity := resource.Entity + + return SpaceQuota{ + GUID: resource.Metadata.GUID, + Name: entity.Name, + NonBasicServicesAllowed: entity.NonBasicServicesAllowed, + ServicesLimit: entity.ServicesLimit, + RoutesLimit: entity.RoutesLimit, + MemoryLimitInMB: entity.MemoryLimitInMB, + InstanceMemoryLimitInMB: entity.InstanceMemoryLimitInMB, + TrialDBAllowed: entity.TrialDBAllowed, + AppInstanceLimit: NumberToInt(entity.AppInstanceLimit, -1), + PrivateDomainsLimit: NumberToInt(entity.PrivateDomainsLimit, -1), + AppTaskLimit: NumberToInt(entity.AppTaskLimit, -1), + } +} + +//SpaceQuotas ... +type SpaceQuotas interface { + FindByName(name, orgGUID string) (*SpaceQuota, error) + Create(createRequest SpaceQuotaCreateRequest) (*SpaceQuotaFields, error) + Update(updateRequest SpaceQuotaUpdateRequest, spaceQuotaGUID string) (*SpaceQuotaFields, error) + Delete(spaceQuotaGUID string) error + Get(spaceQuotaGUID string) (*SpaceQuotaFields, error) +} + +type spaceQuota struct { + client *client.Client +} + +func newSpaceQuotasAPI(c *client.Client) SpaceQuotas { + return &spaceQuota{ + client: c, + } +} + +func (r *spaceQuota) FindByName(name, orgGUID string) (*SpaceQuota, error) { + rawURL := fmt.Sprintf("/v2/organizations/%s/space_quota_definitions", orgGUID) + req := rest.GetRequest(rawURL) + + httpReq, err := req.Build() + if err != nil { + return nil, err + } + path := httpReq.URL.String() + + spaceQuotas, err := r.listSpaceQuotaWithPath(path) + if err != nil { + return nil, err + } + + if len(spaceQuotas) == 0 { + return nil, bmxerror.New(ErrCodeSpaceQuotaDoesnotExist, + fmt.Sprintf("Given space quota %q doesn't exist for the organization %q", name, orgGUID)) + } + + for _, q := range spaceQuotas { + if q.Name == name { + return &q, nil + } + + } + return nil, bmxerror.New(ErrCodeSpaceQuotaDoesnotExist, + fmt.Sprintf("Given space quota %q doesn't exist for the organization %q", name, orgGUID)) +} + +func (r *spaceQuota) listSpaceQuotaWithPath(path string) ([]SpaceQuota, error) { + var spaceQuota []SpaceQuota + _, err := r.client.GetPaginated(path, SpaceQuotaResource{}, func(resource interface{}) bool { + if spaceQuotaResource, ok := resource.(SpaceQuotaResource); ok { + spaceQuota = append(spaceQuota, spaceQuotaResource.ToFields()) + return true + } + return false + }) + return spaceQuota, err +} + +func (r *spaceQuota) Create(createRequest SpaceQuotaCreateRequest) (*SpaceQuotaFields, error) { + rawURL := "/v2/space_quota_definitions?accepts_incomplete=true&async=true" + spaceQuotaFields := SpaceQuotaFields{} + _, err := r.client.Post(rawURL, createRequest, &spaceQuotaFields) + if err != nil { + return nil, err + } + return &spaceQuotaFields, nil +} + +func (r *spaceQuota) Get(spaceQuotaGUID string) (*SpaceQuotaFields, error) { + rawURL := fmt.Sprintf("/v2/space_quota_definitions/%s", spaceQuotaGUID) + spaceQuotaFields := SpaceQuotaFields{} + _, err := r.client.Get(rawURL, &spaceQuotaFields) + if err != nil { + return nil, err + } + + return &spaceQuotaFields, err +} + +func (r *spaceQuota) Update(updateRequest SpaceQuotaUpdateRequest, spaceQuotaGUID string) (*SpaceQuotaFields, error) { + rawURL := fmt.Sprintf("/v2/space_quota_definitions/%s?accepts_incomplete=true&async=true", spaceQuotaGUID) + spaceQuotaFields := SpaceQuotaFields{} + _, err := r.client.Put(rawURL, updateRequest, &spaceQuotaFields) + if err != nil { + return nil, err + } + return &spaceQuotaFields, nil +} + +func (r *spaceQuota) Delete(spaceQuotaGUID string) error { + rawURL := fmt.Sprintf("/v2/space_quota_definitions/%s", spaceQuotaGUID) + _, err := r.client.Delete(rawURL) + return err +} + +func NumberToInt(number json.Number, defaultValue int) int { + if number != "" { + i, err := number.Int64() + if err == nil { + return int(i) + } + } + return defaultValue +} diff --git a/vendor/github.com/IBM-Bluemix/bluemix-go/api/mccp/mccpv2/spaces.go b/vendor/github.com/IBM-Bluemix/bluemix-go/api/mccp/mccpv2/spaces.go new file mode 100644 index 0000000000..6ab4d7722e --- /dev/null +++ b/vendor/github.com/IBM-Bluemix/bluemix-go/api/mccp/mccpv2/spaces.go @@ -0,0 +1,348 @@ +package mccpv2 + +import ( + "fmt" + "strconv" + "strings" + + "github.com/IBM-Bluemix/bluemix-go/bmxerror" + "github.com/IBM-Bluemix/bluemix-go/client" + "github.com/IBM-Bluemix/bluemix-go/rest" +) + +//SpaceCreateRequest ... +type SpaceCreateRequest struct { + Name string `json:"name"` + OrgGUID string `json:"organization_guid"` + SpaceQuotaGUID string `json:"space_quota_definition_guid,omitempty"` +} + +//SpaceUpdateRequest ... +type SpaceUpdateRequest struct { + Name *string `json:"name,omitempty"` +} + +//Space ... +type Space struct { + GUID string + Name string + OrgGUID string + SpaceQuotaGUID string + AllowSSH bool +} + +//SpaceRole ... +type SpaceRole struct { + UserGUID string + Admin bool + UserName string +} + +//SpaceFields ... +type SpaceFields struct { + Metadata SpaceMetadata + Entity SpaceEntity +} + +//SpaceMetadata ... +type SpaceMetadata struct { + GUID string `json:"guid"` + URL string `json:"url"` +} + +//ErrCodeSpaceDoesnotExist ... +const ErrCodeSpaceDoesnotExist = "SpaceDoesnotExist" + +//SpaceResource ... +type SpaceResource struct { + Resource + Entity SpaceEntity +} + +//SpaceRoleResource ... +type SpaceRoleResource struct { + Resource + Entity SpaceRoleEntity +} + +//SpaceRoleEntity ... +type SpaceRoleEntity struct { + UserGUID string `json:"guid"` + Admin bool `json:"bool"` + UserName string `json:"username"` +} + +//SpaceEntity ... +type SpaceEntity struct { + Name string `json:"name"` + OrgGUID string `json:"organization_guid"` + SpaceQuotaGUID string `json:"space_quota_definition_guid"` + AllowSSH bool `json:"allow_ssh"` +} + +//ToFields ... +func (resource *SpaceResource) ToFields() Space { + entity := resource.Entity + + return Space{ + GUID: resource.Metadata.GUID, + Name: entity.Name, + OrgGUID: entity.OrgGUID, + SpaceQuotaGUID: entity.SpaceQuotaGUID, + AllowSSH: entity.AllowSSH, + } +} + +//ToFields ... +func (resource *SpaceRoleResource) ToFields() SpaceRole { + entity := resource.Entity + + return SpaceRole{ + UserGUID: resource.Metadata.GUID, + Admin: entity.Admin, + UserName: entity.UserName, + } +} + +//RouteFilter ... +type RouteFilter struct { + DomainGUID string + Host *string + Path *string + Port *int +} + +//Spaces ... +type Spaces interface { + ListSpacesInOrg(orgGUID, region string) ([]Space, error) + FindByNameInOrg(orgGUID, name, region string) (*Space, error) + Create(req SpaceCreateRequest) (*SpaceFields, error) + Update(spaceGUID string, req SpaceUpdateRequest) (*SpaceFields, error) + Delete(spaceGUID string) error + Get(spaceGUID string) (*SpaceFields, error) + ListRoutes(spaceGUID string, req RouteFilter) ([]Route, error) + AssociateAuditor(spaceGUID, userMail string) (*SpaceFields, error) + AssociateDeveloper(spaceGUID, userMail string) (*SpaceFields, error) + AssociateManager(spaceGUID, userMail string) (*SpaceFields, error) + + DisassociateAuditor(spaceGUID, userMail string) error + DisassociateDeveloper(spaceGUID, userMail string) error + DisassociateManager(spaceGUID, userMail string) error + + ListAuditors(spaceGUID string, filters ...string) ([]SpaceRole, error) + ListDevelopers(spaceGUID string, filters ...string) ([]SpaceRole, error) + ListManagers(spaceGUID string, filters ...string) ([]SpaceRole, error) +} + +type spaces struct { + client *client.Client +} + +func newSpacesAPI(c *client.Client) Spaces { + return &spaces{ + client: c, + } +} + +func (r *spaces) FindByNameInOrg(orgGUID string, name string, region string) (*Space, error) { + rawURL := fmt.Sprintf("/v2/organizations/%s/spaces", orgGUID) + req := rest.GetRequest(rawURL).Query("q", "name:"+name) + if region != "" { + req.Query("region", region) + } + httpReq, err := req.Build() + if err != nil { + return nil, err + } + path := httpReq.URL.String() + + spaces, err := r.listSpacesWithPath(path) + + if err != nil { + return nil, err + } + if len(spaces) == 0 { + return nil, bmxerror.New(ErrCodeSpaceDoesnotExist, + fmt.Sprintf("Given space: %q doesn't exist in given org: %q", name, orgGUID)) + + } + return &spaces[0], nil +} + +func (r *spaces) ListSpacesInOrg(orgGUID string, region string) ([]Space, error) { + rawURL := fmt.Sprintf("v2/organizations/%s/spaces", orgGUID) + req := rest.GetRequest(rawURL) + if region != "" { + req.Query("region", region) + } + httpReq, err := req.Build() + if err != nil { + return nil, err + } + path := httpReq.URL.String() + + return r.listSpacesWithPath(path) +} + +func (r *spaces) listSpacesWithPath(path string) ([]Space, error) { + var spaces []Space + _, err := r.client.GetPaginated(path, SpaceResource{}, func(resource interface{}) bool { + if spaceResource, ok := resource.(SpaceResource); ok { + spaces = append(spaces, spaceResource.ToFields()) + return true + } + return false + }) + return spaces, err +} + +func (r *spaces) listSpaceRolesWithPath(path string) ([]SpaceRole, error) { + var spaceRoles []SpaceRole + _, err := r.client.GetPaginated(path, SpaceRoleResource{}, func(resource interface{}) bool { + if spaceRoleResource, ok := resource.(SpaceRoleResource); ok { + spaceRoles = append(spaceRoles, spaceRoleResource.ToFields()) + return true + } + return false + }) + return spaceRoles, err +} + +func (r *spaces) Create(req SpaceCreateRequest) (*SpaceFields, error) { + rawURL := "/v2/spaces?accepts_incomplete=true&async=true" + spaceFields := SpaceFields{} + _, err := r.client.Post(rawURL, req, &spaceFields) + if err != nil { + return nil, err + } + return &spaceFields, nil +} + +func (r *spaces) Get(spaceGUID string) (*SpaceFields, error) { + rawURL := fmt.Sprintf("/v2/spaces/%s", spaceGUID) + spaceFields := SpaceFields{} + _, err := r.client.Get(rawURL, &spaceFields) + if err != nil { + return nil, err + } + + return &spaceFields, err +} + +func (r *spaces) Update(spaceGUID string, req SpaceUpdateRequest) (*SpaceFields, error) { + rawURL := fmt.Sprintf("/v2/spaces/%s?accepts_incomplete=true&async=true", spaceGUID) + spaceFields := SpaceFields{} + _, err := r.client.Put(rawURL, req, &spaceFields) + if err != nil { + return nil, err + } + return &spaceFields, nil +} + +func (r *spaces) Delete(spaceGUID string) error { + rawURL := fmt.Sprintf("/v2/spaces/%s", spaceGUID) + _, err := r.client.Delete(rawURL) + return err +} + +func (r *spaces) associateRole(url, userMail string) (*SpaceFields, error) { + spaceFields := SpaceFields{} + _, err := r.client.Put(url, map[string]string{"username": userMail}, &spaceFields) + if err != nil { + return nil, err + } + return &spaceFields, nil +} + +func (r *spaces) removeRole(url, userMail string) error { + spaceFields := SpaceFields{} + _, err := r.client.DeleteWithBody(url, map[string]string{"username": userMail}, &spaceFields) + return err +} + +func (r *spaces) AssociateManager(spaceGUID string, userMail string) (*SpaceFields, error) { + rawURL := fmt.Sprintf("/v2/spaces/%s/managers", spaceGUID) + return r.associateRole(rawURL, userMail) +} +func (r *spaces) AssociateDeveloper(spaceGUID string, userMail string) (*SpaceFields, error) { + rawURL := fmt.Sprintf("/v2/spaces/%s/developers", spaceGUID) + return r.associateRole(rawURL, userMail) +} +func (r *spaces) AssociateAuditor(spaceGUID string, userMail string) (*SpaceFields, error) { + rawURL := fmt.Sprintf("/v2/spaces/%s/auditors", spaceGUID) + return r.associateRole(rawURL, userMail) +} + +func (r *spaces) DisassociateManager(spaceGUID string, userMail string) error { + rawURL := fmt.Sprintf("/v2/spaces/%s/managers", spaceGUID) + return r.removeRole(rawURL, userMail) +} + +func (r *spaces) DisassociateDeveloper(spaceGUID string, userMail string) error { + rawURL := fmt.Sprintf("/v2/spaces/%s/developers", spaceGUID) + return r.removeRole(rawURL, userMail) +} +func (r *spaces) DisassociateAuditor(spaceGUID string, userMail string) error { + rawURL := fmt.Sprintf("/v2/spaces/%s/auditors", spaceGUID) + return r.removeRole(rawURL, userMail) +} + +func (r *spaces) listSpaceRoles(rawURL string, filters ...string) ([]SpaceRole, error) { + req := rest.GetRequest(rawURL) + if len(filters) > 0 { + req.Query("q", strings.Join(filters, "")) + } + httpReq, err := req.Build() + if err != nil { + return nil, err + } + path := httpReq.URL.String() + return r.listSpaceRolesWithPath(path) +} + +func (r *spaces) ListAuditors(spaceGUID string, filters ...string) ([]SpaceRole, error) { + rawURL := fmt.Sprintf("/v2/spaces/%s/auditors", spaceGUID) + return r.listSpaceRoles(rawURL, filters...) +} + +func (r *spaces) ListManagers(spaceGUID string, filters ...string) ([]SpaceRole, error) { + rawURL := fmt.Sprintf("/v2/spaces/%s/managers", spaceGUID) + return r.listSpaceRoles(rawURL, filters...) +} +func (r *spaces) ListDevelopers(spaceGUID string, filters ...string) ([]SpaceRole, error) { + rawURL := fmt.Sprintf("/v2/spaces/%s/developers", spaceGUID) + return r.listSpaceRoles(rawURL, filters...) +} + +func (r *spaces) ListRoutes(spaceGUID string, routeFilter RouteFilter) ([]Route, error) { + rawURL := fmt.Sprintf("/v2/spaces/%s/routes", spaceGUID) + req := rest.GetRequest(rawURL) + var query string + if routeFilter.DomainGUID != "" { + query = "domain_guid:" + routeFilter.DomainGUID + ";" + } + if routeFilter.Host != nil { + query += "host:" + *routeFilter.Host + ";" + } + if routeFilter.Path != nil { + query += "path:" + *routeFilter.Path + ";" + } + if routeFilter.Port != nil { + query += "port:" + strconv.Itoa(*routeFilter.Port) + ";" + } + + if len(query) > 0 { + req.Query("q", query) + } + + httpReq, err := req.Build() + if err != nil { + return nil, err + } + path := httpReq.URL.String() + route, err := listRouteWithPath(r.client, path) + if err != nil { + return nil, err + } + return route, nil +} diff --git a/vendor/github.com/IBM-Bluemix/bluemix-go/authentication/auth.go b/vendor/github.com/IBM-Bluemix/bluemix-go/authentication/auth.go new file mode 100644 index 0000000000..5e5d2326db --- /dev/null +++ b/vendor/github.com/IBM-Bluemix/bluemix-go/authentication/auth.go @@ -0,0 +1,26 @@ +package authentication + +import ( + "errors" + + bluemix "github.com/IBM-Bluemix/bluemix-go" + "github.com/IBM-Bluemix/bluemix-go/client" +) + +const ( + //ErrCodeInvalidToken ... + ErrCodeInvalidToken = "InvalidToken" +) + +//PopulateTokens populate the relevant tokens in the bluemix Config using the token provider +func PopulateTokens(tokenProvider client.TokenProvider, c *bluemix.Config) error { + if c.IBMID != "" && c.IBMIDPassword != "" { + err := tokenProvider.AuthenticatePassword(c.IBMID, c.IBMIDPassword) + return err + } + if c.BluemixAPIKey != "" { + err := tokenProvider.AuthenticateAPIKey(c.BluemixAPIKey) + return err + } + return errors.New("Insufficient credentials, need IBMID/IBMIDPassword or Bluemix API Key") +} diff --git a/vendor/github.com/IBM-Bluemix/bluemix-go/authentication/iam.go b/vendor/github.com/IBM-Bluemix/bluemix-go/authentication/iam.go new file mode 100644 index 0000000000..3695752308 --- /dev/null +++ b/vendor/github.com/IBM-Bluemix/bluemix-go/authentication/iam.go @@ -0,0 +1,137 @@ +package authentication + +import ( + "encoding/base64" + "fmt" + + bluemix "github.com/IBM-Bluemix/bluemix-go" + "github.com/IBM-Bluemix/bluemix-go/bmxerror" + "github.com/IBM-Bluemix/bluemix-go/rest" +) + +//IAMError ... +type IAMError struct { + ErrorCode string `json:"errorCode"` + ErrorMessage string `json:"errorMessage"` + ErrorDetails string `json:"errorDetails"` +} + +//Description ... +func (e IAMError) Description() string { + if e.ErrorDetails != "" { + return e.ErrorDetails + } + return e.ErrorMessage +} + +//IAMTokenResponse ... +type IAMTokenResponse struct { + AccessToken string `json:"access_token"` + RefreshToken string `json:"refresh_token"` + UAAAccessToken string `json:"uaa_token"` + UAARefreshToken string `json:"uaa_refresh_token"` + TokenType string `json:"token_type"` +} + +//IAMAuthRepository ... +type IAMAuthRepository struct { + config *bluemix.Config + client *rest.Client + endpoint string +} + +//NewIAMAuthRepository ... +func NewIAMAuthRepository(config *bluemix.Config, client *rest.Client) (*IAMAuthRepository, error) { + var endpoint string + + if config.TokenProviderEndpoint != nil { + endpoint = *config.TokenProviderEndpoint + } else { + var err error + endpoint, err = config.EndpointLocator.IAMEndpoint() + if err != nil { + return nil, err + } + } + + return &IAMAuthRepository{ + config: config, + client: client, + endpoint: endpoint, + }, nil +} + +//AuthenticatePassword ... +func (auth *IAMAuthRepository) AuthenticatePassword(username string, password string) error { + return auth.getToken(map[string]string{ + "grant_type": "password", + "username": username, + "password": password, + }) +} + +//AuthenticateAPIKey ... +func (auth *IAMAuthRepository) AuthenticateAPIKey(apiKey string) error { + return auth.getToken(map[string]string{ + "grant_type": "urn:ibm:params:oauth:grant-type:apikey", + "apikey": apiKey, + }) +} + +//AuthenticateSSO ... +func (auth *IAMAuthRepository) AuthenticateSSO(passcode string) error { + return auth.getToken(map[string]string{ + "grant_type": "urn:ibm:params:oauth:grant-type:passcode", + "passcode": passcode, + }) +} + +//RefreshToken ... +func (auth *IAMAuthRepository) RefreshToken() (string, error) { + data := map[string]string{ + "grant_type": "refresh_token", + "refresh_token": auth.config.IAMRefreshToken, + } + + err := auth.getToken(data) + if err != nil { + return "", err + } + + return auth.config.IAMAccessToken, nil +} + +func (auth *IAMAuthRepository) getToken(data map[string]string) error { + request := rest.PostRequest(auth.endpoint+"/oidc/token"). + Set("Authorization", "Basic "+base64.StdEncoding.EncodeToString([]byte("bx:bx"))). + Field("response_type", "cloud_iam,uaa"). + Field("uaa_client_id", "cf"). + Field("uaa_client_secret", "") + + for k, v := range data { + request.Field(k, v) + } + + var tokens IAMTokenResponse + var apiErr IAMError + + resp, err := auth.client.Do(request, &tokens, &apiErr) + if err != nil { + return err + } + + if apiErr.ErrorCode != "" { + if apiErr.ErrorCode == "BXNIM0407E" { + return bmxerror.New(ErrCodeInvalidToken, apiErr.Description()) + } + return bmxerror.NewRequestFailure(apiErr.ErrorCode, apiErr.Description(), resp.StatusCode) + } + + auth.config.IAMAccessToken = fmt.Sprintf("%s %s", tokens.TokenType, tokens.AccessToken) + auth.config.IAMRefreshToken = tokens.RefreshToken + + auth.config.UAAAccessToken = fmt.Sprintf("%s %s", tokens.TokenType, tokens.UAAAccessToken) + auth.config.UAARefreshToken = tokens.UAARefreshToken + + return nil +} diff --git a/vendor/github.com/IBM-Bluemix/bluemix-go/authentication/uaa.go b/vendor/github.com/IBM-Bluemix/bluemix-go/authentication/uaa.go new file mode 100644 index 0000000000..13b043a81d --- /dev/null +++ b/vendor/github.com/IBM-Bluemix/bluemix-go/authentication/uaa.go @@ -0,0 +1,113 @@ +package authentication + +import ( + "encoding/base64" + "fmt" + + bluemix "github.com/IBM-Bluemix/bluemix-go" + "github.com/IBM-Bluemix/bluemix-go/bmxerror" + "github.com/IBM-Bluemix/bluemix-go/rest" +) + +//UAAError ... +type UAAError struct { + ErrorCode string `json:"error"` + Description string `json:"error_description"` +} + +//UAATokenResponse ... +type UAATokenResponse struct { + AccessToken string `json:"access_token"` + TokenType string `json:"token_type"` + RefreshToken string `json:"refresh_token"` +} + +//UAARepository ... +type UAARepository struct { + config *bluemix.Config + client *rest.Client + endpoint string +} + +//NewUAARepository ... +func NewUAARepository(config *bluemix.Config, client *rest.Client) (*UAARepository, error) { + var endpoint string + + if config.TokenProviderEndpoint != nil { + endpoint = *config.TokenProviderEndpoint + } else { + var err error + endpoint, err = config.EndpointLocator.UAAEndpoint() + if err != nil { + return nil, err + } + } + return &UAARepository{ + config: config, + client: client, + endpoint: endpoint, + }, nil +} + +//AuthenticatePassword ... +func (auth *UAARepository) AuthenticatePassword(username string, password string) error { + return auth.getToken(map[string]string{ + "grant_type": "password", + "username": username, + "password": password, + }) +} + +//AuthenticateSSO ... +func (auth *UAARepository) AuthenticateSSO(passcode string) error { + return auth.getToken(map[string]string{ + "grant_type": "password", + "passcode": passcode, + }) +} + +//AuthenticateAPIKey ... +func (auth *UAARepository) AuthenticateAPIKey(apiKey string) error { + return auth.AuthenticatePassword("apikey", apiKey) +} + +//RefreshToken ... +func (auth *UAARepository) RefreshToken() (string, error) { + err := auth.getToken(map[string]string{ + "grant_type": "refresh_token", + "refresh_token": auth.config.UAARefreshToken, + }) + if err != nil { + return "", err + } + + return auth.config.UAAAccessToken, nil +} + +func (auth *UAARepository) getToken(data map[string]string) error { + request := rest.PostRequest(auth.endpoint+"/oauth/token"). + Set("Authorization", "Basic "+base64.StdEncoding.EncodeToString([]byte("cf:"))). + Field("scope", "") + + for k, v := range data { + request.Field(k, v) + } + + var tokens UAATokenResponse + var apiErr UAAError + + resp, err := auth.client.Do(request, &tokens, &apiErr) + if err != nil { + return err + } + if apiErr.ErrorCode != "" { + if apiErr.ErrorCode == "invalid-token" { + return bmxerror.NewInvalidTokenError(apiErr.Description) + } + return bmxerror.NewRequestFailure(apiErr.ErrorCode, apiErr.Description, resp.StatusCode) + } + + auth.config.UAAAccessToken = fmt.Sprintf("%s %s", tokens.TokenType, tokens.AccessToken) + auth.config.UAARefreshToken = tokens.RefreshToken + return nil +} diff --git a/vendor/github.com/IBM-Bluemix/bluemix-go/bmxerror/cert.go b/vendor/github.com/IBM-Bluemix/bluemix-go/bmxerror/cert.go new file mode 100644 index 0000000000..a8bf023321 --- /dev/null +++ b/vendor/github.com/IBM-Bluemix/bluemix-go/bmxerror/cert.go @@ -0,0 +1,23 @@ +package bmxerror + +//InvalidSSLCert ... +type InvalidSSLCert struct { + URL string + Reason string +} + +//NewInvalidSSLCert ... +func NewInvalidSSLCert(url, reason string) *InvalidSSLCert { + return &InvalidSSLCert{ + URL: url, + Reason: reason, + } +} + +func (err *InvalidSSLCert) Error() string { + message := "Received invalid SSL certificate from " + err.URL + if err.Reason != "" { + message += " - " + err.Reason + } + return message +} diff --git a/vendor/github.com/IBM-Bluemix/bluemix-go/bmxerror/error.go b/vendor/github.com/IBM-Bluemix/bluemix-go/bmxerror/error.go new file mode 100644 index 0000000000..7688893930 --- /dev/null +++ b/vendor/github.com/IBM-Bluemix/bluemix-go/bmxerror/error.go @@ -0,0 +1,25 @@ +package bmxerror + +//Error interface +type Error interface { + Error() string + Code() string + Description() string +} + +//RequestFailure interface +type RequestFailure interface { + Error + // The status code of the HTTP response. + StatusCode() int +} + +//New creates a new Error object +func New(code, description string) Error { + return newGenericError(code, description) +} + +//NewRequestFailure creates a new Error object wrapping the server error +func NewRequestFailure(code, description string, statusCode int) Error { + return newRequestError(code, description, statusCode) +} diff --git a/vendor/github.com/IBM-Bluemix/bluemix-go/bmxerror/network.go b/vendor/github.com/IBM-Bluemix/bluemix-go/bmxerror/network.go new file mode 100644 index 0000000000..17da7dd088 --- /dev/null +++ b/vendor/github.com/IBM-Bluemix/bluemix-go/bmxerror/network.go @@ -0,0 +1,38 @@ +package bmxerror + +import ( + "crypto/x509" + "fmt" + "net" + "net/url" + + "golang.org/x/net/websocket" +) + +//WrapNetworkErrors ... +func WrapNetworkErrors(host string, err error) error { + var innerErr error + switch typedErr := err.(type) { + case *url.Error: + innerErr = typedErr.Err + case *websocket.DialError: + innerErr = typedErr.Err + } + + if innerErr != nil { + switch typedInnerErr := innerErr.(type) { + case x509.UnknownAuthorityError: + return NewInvalidSSLCert(host, "unknown authority") + case x509.HostnameError: + return NewInvalidSSLCert(host, "not valid for the requested host") + case x509.CertificateInvalidError: + return NewInvalidSSLCert(host, "") + case *net.OpError: + if typedInnerErr.Op == "dial" { + return fmt.Errorf("%s\n%s", err.Error(), "TIP: If you are behind a firewall and require an HTTP proxy, verify the https_proxy environment variable is correctly set. Else, check your network connection.") + } + } + } + + return err +} diff --git a/vendor/github.com/IBM-Bluemix/bluemix-go/bmxerror/token.go b/vendor/github.com/IBM-Bluemix/bluemix-go/bmxerror/token.go new file mode 100644 index 0000000000..d7193174b2 --- /dev/null +++ b/vendor/github.com/IBM-Bluemix/bluemix-go/bmxerror/token.go @@ -0,0 +1,17 @@ +package bmxerror + +//InvalidTokenError ... +type InvalidTokenError struct { + Message string +} + +//NewInvalidTokenError ... +func NewInvalidTokenError(message string) *InvalidTokenError { + return &InvalidTokenError{ + Message: message, + } +} + +func (e *InvalidTokenError) Error() string { + return ("Invalid auth token: ") + e.Message +} diff --git a/vendor/github.com/IBM-Bluemix/bluemix-go/bmxerror/types.go b/vendor/github.com/IBM-Bluemix/bluemix-go/bmxerror/types.go new file mode 100644 index 0000000000..1c5323fbe7 --- /dev/null +++ b/vendor/github.com/IBM-Bluemix/bluemix-go/bmxerror/types.go @@ -0,0 +1,57 @@ +package bmxerror + +import "fmt" + +type genericError struct { + code string + description string +} + +func newGenericError(code, description string) *genericError { + return &genericError{code, description} +} + +func (g genericError) Error() string { + return fmt.Sprintf("%s: %s", g.code, g.description) +} + +func (g genericError) String() string { + return g.Error() +} + +func (g genericError) Code() string { + return g.code +} + +func (g genericError) Description() string { + return g.description +} + +type requestError struct { + genericError + statusCode int +} + +func newRequestError(code, description string, statusCode int) *requestError { + return &requestError{ + genericError: genericError{ + code: code, + description: description, + }, + statusCode: statusCode, + } +} + +func (r requestError) Error() string { + return fmt.Sprintf("Request failed with status code: %d, %s: %s", r.statusCode, r.code, r.description) +} + +func (r requestError) Code() string { + return r.code +} +func (r requestError) Description() string { + return r.description +} +func (r requestError) StatusCode() int { + return r.statusCode +} diff --git a/vendor/github.com/IBM-Bluemix/bluemix-go/client/client.go b/vendor/github.com/IBM-Bluemix/bluemix-go/client/client.go new file mode 100644 index 0000000000..c80b4e6c3a --- /dev/null +++ b/vendor/github.com/IBM-Bluemix/bluemix-go/client/client.go @@ -0,0 +1,204 @@ +//Package client provides a generic client to be used by all services +package client + +import ( + "fmt" + "log" + "path" + "strings" + + gohttp "net/http" + + bluemix "github.com/IBM-Bluemix/bluemix-go" + "github.com/IBM-Bluemix/bluemix-go/bmxerror" + "github.com/IBM-Bluemix/bluemix-go/http" + + "github.com/IBM-Bluemix/bluemix-go/rest" +) + +//TokenProvider ... +type TokenProvider interface { + RefreshToken() (string, error) + AuthenticatePassword(string, string) error + AuthenticateAPIKey(string) error +} + +//HandlePagination ... +type HandlePagination func(c *Client, path string, resource interface{}, cb func(interface{}) bool) (resp *gohttp.Response, err error) + +//Client is the base client for all service api client +type Client struct { + Config *bluemix.Config + DefaultHeader gohttp.Header + ServiceName bluemix.ServiceName + TokenRefresher TokenProvider + HandlePagination HandlePagination +} + +//Config stores any generic service client configurations +type Config struct { + Config *bluemix.Config + Endpoint string +} + +//New ... +func New(c *bluemix.Config, serviceName bluemix.ServiceName, refresher TokenProvider, pagination HandlePagination) *Client { + config := c.Copy() + return &Client{ + Config: config, + ServiceName: serviceName, + TokenRefresher: refresher, + HandlePagination: pagination, + DefaultHeader: getDefaultAuthHeaders(serviceName, c), + } +} + +//SendRequest ... +func (c *Client) SendRequest(r *rest.Request, respV interface{}) (*gohttp.Response, error) { + httpClient := c.Config.HTTPClient + if httpClient == nil { + httpClient = gohttp.DefaultClient + } + + restClient := &rest.Client{ + DefaultHeader: c.DefaultHeader, + HTTPClient: httpClient, + } + + resp, err := restClient.Do(r, respV, nil) + + // The response returned by go HTTP client.Do() could be nil if request timeout. + // For convenience, we ensure that response returned by this method is always not nil. + if resp == nil { + return new(gohttp.Response), err + } + + if err != nil { + err = bmxerror.WrapNetworkErrors(resp.Request.URL.Host, err) + } + + // if token is invalid, refresh and try again + if resp.StatusCode == 401 && c.TokenRefresher != nil { + _, err := c.TokenRefresher.RefreshToken() + switch err.(type) { + case nil: + restClient.DefaultHeader = getDefaultAuthHeaders(c.ServiceName, c.Config) + resp, err = restClient.Do(r, respV, nil) + case *bmxerror.InvalidTokenError: + return resp, bmxerror.NewRequestFailure("InvalidToken", fmt.Sprintf("%v", err), 401) + default: + return resp, fmt.Errorf("Authentication failed, Unable to refresh auth token: %v. Try again later", err) + } + } + + return resp, err +} + +//Get ... +func (c *Client) Get(path string, respV interface{}, extraHeader ...interface{}) (*gohttp.Response, error) { + r := rest.GetRequest(c.URL(path)) + for _, t := range extraHeader { + addToRequestHeader(t, r) + } + return c.SendRequest(r, respV) +} + +//Put ... +func (c *Client) Put(path string, data interface{}, respV interface{}, extraHeader ...interface{}) (*gohttp.Response, error) { + r := rest.PutRequest(c.URL(path)).Body(data) + for _, t := range extraHeader { + addToRequestHeader(t, r) + } + return c.SendRequest(r, respV) +} + +//Patch ... +func (c *Client) Patch(path string, data interface{}, respV interface{}, extraHeader ...interface{}) (*gohttp.Response, error) { + r := rest.PatchRequest(c.URL(path)).Body(data) + for _, t := range extraHeader { + addToRequestHeader(t, r) + } + return c.SendRequest(r, respV) +} + +//Post ... +func (c *Client) Post(path string, data interface{}, respV interface{}, extraHeader ...interface{}) (*gohttp.Response, error) { + r := rest.PostRequest(c.URL(path)).Body(data) + for _, t := range extraHeader { + addToRequestHeader(t, r) + } + return c.SendRequest(r, respV) +} + +//Delete ... +func (c *Client) Delete(path string, extraHeader ...interface{}) (*gohttp.Response, error) { + r := rest.DeleteRequest(c.URL(path)) + for _, t := range extraHeader { + addToRequestHeader(t, r) + } + return c.SendRequest(r, nil) +} + +//DeleteWithBody ... +func (c *Client) DeleteWithBody(path string, data interface{}, extraHeader ...interface{}) (*gohttp.Response, error) { + r := rest.DeleteRequest(c.URL(path)).Body(data) + for _, t := range extraHeader { + addToRequestHeader(t, r) + } + return c.SendRequest(r, nil) +} + +func addToRequestHeader(h interface{}, r *rest.Request) { + switch v := h.(type) { + case map[string]string: + for key, value := range v { + r.Set(key, value) + } + } +} + +//GetPaginated ... +func (c *Client) GetPaginated(path string, resource interface{}, cb func(interface{}) bool) (resp *gohttp.Response, err error) { + return c.HandlePagination(c, path, resource, cb) +} + +//URL ... +func (c *Client) URL(path string) string { + return *c.Config.Endpoint + cleanPath(path) +} + +func cleanPath(p string) string { + if p == "" { + return "/" + } + if !strings.HasPrefix(p, "/") { + p = "/" + p + } + return path.Clean(p) +} + +const ( + userAgentHeader = "User-Agent" + authorizationHeader = "Authorization" + uaaAccessTokenHeader = "X-Auth-Uaa-Token" + + iamRefreshTokenHeader = "X-Auth-Refresh-Token" +) + +func getDefaultAuthHeaders(serviceName bluemix.ServiceName, c *bluemix.Config) gohttp.Header { + h := gohttp.Header{} + switch serviceName { + case bluemix.MccpService, bluemix.AccountService: + h.Set(userAgentHeader, http.UserAgent()) + h.Set(authorizationHeader, c.UAAAccessToken) + + case bluemix.ContainerService: + h.Set(userAgentHeader, http.UserAgent()) + h.Set(authorizationHeader, c.IAMAccessToken) + h.Set(iamRefreshTokenHeader, c.IAMRefreshToken) + h.Set(uaaAccessTokenHeader, c.UAAAccessToken) + default: + log.Println("Unknown service") + } + return h +} diff --git a/vendor/github.com/IBM-Bluemix/bluemix-go/config.go b/vendor/github.com/IBM-Bluemix/bluemix-go/config.go new file mode 100644 index 0000000000..629e2d9e97 --- /dev/null +++ b/vendor/github.com/IBM-Bluemix/bluemix-go/config.go @@ -0,0 +1,85 @@ +package bluemix + +import ( + "net/http" + "time" + + "github.com/IBM-Bluemix/bluemix-go/bmxerror" + "github.com/IBM-Bluemix/bluemix-go/endpoints" +) + +//ServiceName .. +type ServiceName string + +const ( + //AccountService ... + AccountService ServiceName = ServiceName("account") + //MccpService ... + MccpService ServiceName = ServiceName("mccp") + //ContainerService ... + ContainerService ServiceName = ServiceName("container") + //UAAService ... + UAAService ServiceName = ServiceName("uaa") + //IAMService ... + IAMService ServiceName = ServiceName("iam") +) + +//Config ... +type Config struct { + IBMID string + + IBMIDPassword string + + BluemixAPIKey string + + IAMAccessToken string + IAMRefreshToken string + UAAAccessToken string + UAARefreshToken string + + //Region is optional. If region is not provided then endpoint must be provided + Region string + //Endpoint is optional. If endpoint is not provided then endpoint must be obtained from region via EndpointLocator + Endpoint *string + //TokenProviderEndpoint is optional. If endpoint is not provided then endpoint must be obtained from region via EndpointLocator + TokenProviderEndpoint *string + EndpointLocator endpoints.EndpointLocator + MaxRetries *int + RetryDelay *time.Duration + + HTTPTimeout time.Duration + + Debug bool + + HTTPClient *http.Client + + SSLDisable bool +} + +//Copy allows the configuration to be overriden or added +//Typically the endpoints etc +func (c *Config) Copy(mccpgs ...*Config) *Config { + out := new(Config) + *out = *c + if len(mccpgs) == 0 { + return out + } + for _, mergeInput := range mccpgs { + if mergeInput.Endpoint != nil { + out.Endpoint = mergeInput.Endpoint + } + } + return out +} + +//ValidateConfigForService ... +func (c *Config) ValidateConfigForService(svc ServiceName) error { + if (c.IBMID == "" || c.IBMIDPassword == "") && c.BluemixAPIKey == "" { + return bmxerror.New(ErrInsufficientCredentials, "Please check the documentation on how to configure the Bluemix credentials") + } + + if c.Region == "" && (c.Endpoint == nil || *c.Endpoint == "") { + return bmxerror.New(ErrInvalidConfigurationCode, "Please provide region or endpoint") + } + return nil +} diff --git a/vendor/github.com/IBM-Bluemix/bluemix-go/endpoints/endpoints.go b/vendor/github.com/IBM-Bluemix/bluemix-go/endpoints/endpoints.go new file mode 100644 index 0000000000..2f65a09476 --- /dev/null +++ b/vendor/github.com/IBM-Bluemix/bluemix-go/endpoints/endpoints.go @@ -0,0 +1,128 @@ +package endpoints + +import ( + "fmt" + + "github.com/IBM-Bluemix/bluemix-go/bmxerror" + "github.com/IBM-Bluemix/bluemix-go/helpers" +) + +//EndpointLocator ... +type EndpointLocator interface { + AccountManagementEndpoint() (string, error) + CFAPIEndpoint() (string, error) + MCCPAPIEndpoint() (string, error) + ContainerEndpoint() (string, error) + IAMEndpoint() (string, error) + UAAEndpoint() (string, error) +} + +const ( + //ErrCodeServiceEndpoint ... + ErrCodeServiceEndpoint = "ServiceEndpointDoesnotExist" +) + +var regionToEndpoint = map[string]map[string]string{ + "cf": { + "us-south": "https://api.ng.bluemix.net", + "eu-gb": "https://api.eu-gb.bluemix.net", + "au-syd": "https://api.au-syd.bluemix.net", + "eu-de": "https://api.eu-de.bluemix.net", + }, + "mccp": { + "us-south": "https://mccp.ng.bluemix.net", + "eu-gb": "https://mccp.eu-gb.bluemix.net", + "au-syd": "https://mccp.au-syd.bluemix.net", + "eu-de": "https://mccp.eu-de.bluemix.net", + }, + "iam": { + "us-south": "https://iam.ng.bluemix.net", + "eu-gb": "https://iam.eu-gb.bluemix.net", + "au-syd": "https://iam.au-syd.bluemix.net", + "eu-de": "https://iam.eu-de.bluemix.net", + }, + + "uaa": { + "us-south": "https://login.ng.bluemix.net/UAALoginServerWAR", + "eu-gb": "https://login.eu-gb.bluemix.net/UAALoginServerWAR", + "au-syd": "https://login.au-syd.bluemix.net/UAALoginServerWAR", + "eu-de": "https://login.eu-de.bluemix.net/UAALoginServerWAR", + }, + "account": { + "us-south": "https://accountmanagement.ng.bluemix.net", + "eu-gb": "https://accountmanagement.eu-gb.bluemix.net", + "au-syd": "https://accountmanagement.au-syd.bluemix.net", + "eu-de": "https://accountmanagement.eu-de.bluemix.net", + }, + "cs": { + "us-south": "https://us-south.containers.bluemix.net", + "eu-de": "https://eu-central.containers.bluemix.net", + }, +} + +func init() { + //TODO populate the endpoints which can be retrieved from given endpoints dynamically + //Example - UAA can be found from the CF endpoint +} + +type endpointLocator struct { + region string +} + +//NewEndpointLocator ... +func NewEndpointLocator(region string) EndpointLocator { + return &endpointLocator{region: region} +} + +func (e *endpointLocator) CFAPIEndpoint() (string, error) { + if ep, ok := regionToEndpoint["cf"][e.region]; ok { + //As the current list of regionToEndpoint above is not exhaustive we allow to read endpoints from the env + return helpers.EnvFallBack([]string{"IBMCLOUD_CF_API_ENDPOINT"}, ep), nil + + } + return "", bmxerror.New(ErrCodeServiceEndpoint, fmt.Sprintf("Cloud Foundry endpoint doesn't exist for region: %q", e.region)) +} + +func (e *endpointLocator) MCCPAPIEndpoint() (string, error) { + if ep, ok := regionToEndpoint["mccp"][e.region]; ok { + //As the current list of regionToEndpoint above is not exhaustive we allow to read endpoints from the env + return helpers.EnvFallBack([]string{"IBMCLOUD_MCCP_API_ENDPOINT"}, ep), nil + + } + return "", bmxerror.New(ErrCodeServiceEndpoint, fmt.Sprintf("MCCP API endpoint doesn't exist for region: %q", e.region)) +} + +func (e *endpointLocator) UAAEndpoint() (string, error) { + if ep, ok := regionToEndpoint["uaa"][e.region]; ok { + //As the current list of regionToEndpoint above is not exhaustive we allow to read endpoints from the env + return helpers.EnvFallBack([]string{"IBMCLOUD_UAA_ENDPOINT"}, ep), nil + + } + return "", bmxerror.New(ErrCodeServiceEndpoint, fmt.Sprintf("UAA endpoint doesn't exist for region: %q", e.region)) +} + +func (e *endpointLocator) AccountManagementEndpoint() (string, error) { + if ep, ok := regionToEndpoint["account"][e.region]; ok { + //As the current list of regionToEndpoint above is not exhaustive we allow to read endpoints from the env + return helpers.EnvFallBack([]string{"IBMCLOUD_ACCOUNT_MANAGEMENT_API_ENDPOINT"}, ep), nil + + } + return "", bmxerror.New(ErrCodeServiceEndpoint, fmt.Sprintf("Account Management endpoint doesn't exist for region: %q", e.region)) +} + +func (e *endpointLocator) IAMEndpoint() (string, error) { + if ep, ok := regionToEndpoint["iam"][e.region]; ok { + //As the current list of regionToEndpoint above is not exhaustive we allow to read endpoints from the env + return helpers.EnvFallBack([]string{"IBMCLOUD_IAM_API_ENDPOINT"}, ep), nil + + } + return "", bmxerror.New(ErrCodeServiceEndpoint, fmt.Sprintf("IAM endpoint doesn't exist for region: %q", e.region)) +} + +func (e *endpointLocator) ContainerEndpoint() (string, error) { + if ep, ok := regionToEndpoint["cs"][e.region]; ok { + //As the current list of regionToEndpoint above is not exhaustive we allow to read endpoints from the env + return helpers.EnvFallBack([]string{"IBMCLOUD_CS_API_ENDPOINT"}, ep), nil + } + return "", bmxerror.New(ErrCodeServiceEndpoint, fmt.Sprintf("Container Service endpoint doesn't exist for region: %q", e.region)) +} diff --git a/vendor/github.com/IBM-Bluemix/bluemix-go/errors.go b/vendor/github.com/IBM-Bluemix/bluemix-go/errors.go new file mode 100644 index 0000000000..e260fc4453 --- /dev/null +++ b/vendor/github.com/IBM-Bluemix/bluemix-go/errors.go @@ -0,0 +1,12 @@ +package bluemix + +const ( + //ErrMissingRegionCode .. + ErrMissingRegionCode = "MissingRegion" + + //ErrInvalidConfigurationCode .. + ErrInvalidConfigurationCode = "InvalidConfiguration" + + //ErrInsufficientCredentials .. + ErrInsufficientCredentials = "InsufficientCredentials" +) diff --git a/vendor/github.com/IBM-Bluemix/bluemix-go/helpers/conversion.go b/vendor/github.com/IBM-Bluemix/bluemix-go/helpers/conversion.go new file mode 100644 index 0000000000..2249b21245 --- /dev/null +++ b/vendor/github.com/IBM-Bluemix/bluemix-go/helpers/conversion.go @@ -0,0 +1,33 @@ +package helpers + +import "time" + +// Bool returns a pointer to the bool value +func Bool(v bool) *bool { + return &v +} + +// Int returns a pointer to the int value +func Int(v int) *int { + return &v +} + +// String returns a pointer to the string value +func String(v string) *string { + return &v +} + +// Map returns a pointer to the map value +func Map(v map[string]interface{}) *map[string]interface{} { + return &v +} + +// IntSlice returns a pointer to the IntSlice value +func IntSlice(v []int) *[]int { + return &v +} + +// Duration returns a pointer to the time.Duration +func Duration(v time.Duration) *time.Duration { + return &v +} diff --git a/vendor/github.com/IBM-Bluemix/bluemix-go/helpers/env.go b/vendor/github.com/IBM-Bluemix/bluemix-go/helpers/env.go new file mode 100644 index 0000000000..3b65d26bb4 --- /dev/null +++ b/vendor/github.com/IBM-Bluemix/bluemix-go/helpers/env.go @@ -0,0 +1,13 @@ +package helpers + +import "os" + +//EnvFallBack ... +func EnvFallBack(envs []string, defaultValue string) string { + for _, k := range envs { + if v := os.Getenv(k); v != "" { + return v + } + } + return defaultValue +} diff --git a/vendor/github.com/IBM-Bluemix/bluemix-go/helpers/file.go b/vendor/github.com/IBM-Bluemix/bluemix-go/helpers/file.go new file mode 100644 index 0000000000..4ad260fb92 --- /dev/null +++ b/vendor/github.com/IBM-Bluemix/bluemix-go/helpers/file.go @@ -0,0 +1,117 @@ +package helpers + +import ( + "fmt" + "io" + "io/ioutil" + "os" + "path/filepath" + "regexp" +) + +func FileExists(path string) bool { + _, err := os.Stat(path) + if err == nil { + return true + } + if os.IsNotExist(err) { + return false + } + return false +} + +func RemoveFile(path string) error { + if FileExists(path) { + return os.Remove(path) + } + return nil +} + +func CopyFile(src string, dest string) (err error) { + srcFile, err := os.Open(src) + if err != nil { + return + } + defer srcFile.Close() + + srcStat, err := srcFile.Stat() + if err != nil { + return + } + + if !srcStat.Mode().IsRegular() { + return fmt.Errorf("%s is not a regular file.", src) + } + + destFile, err := os.Create(dest) + if err != nil { + return + } + defer destFile.Close() + + _, err = io.Copy(destFile, srcFile) + return +} + +func CopyDir(src string, dest string) (err error) { + srcStat, err := os.Stat(src) + if err != nil { + return + } + + if !srcStat.Mode().IsDir() { + return fmt.Errorf("%s is not a directory.", src) + } + + _, err = os.Stat(dest) + if !os.IsNotExist(err) { + return fmt.Errorf("Destination %s already exists.", dest) + } + + entries, err := ioutil.ReadDir(src) + if err != nil { + return + } + + err = os.MkdirAll(dest, srcStat.Mode()) + if err != nil { + return + } + + for _, entry := range entries { + srcPath := filepath.Join(src, entry.Name()) + destPath := filepath.Join(dest, entry.Name()) + + if entry.Mode().IsDir() { + err = CopyDir(srcPath, destPath) + } else { + err = CopyFile(srcPath, destPath) + } + if err != nil { + return + } + } + + return +} + +//RemoveFilesWithPattern ... +func RemoveFilesWithPattern(targetDir, pattern string) error { + r, err := regexp.Compile(pattern) + if err != nil { + return err + } + files, err := ioutil.ReadDir(targetDir) + if err != nil { + return err + } + for _, f := range files { + if r.MatchString(f.Name()) { + err := os.RemoveAll(filepath.Join(targetDir, f.Name())) + if err != nil { + return err + } + } + } + return nil +} diff --git a/vendor/github.com/IBM-Bluemix/bluemix-go/helpers/zip.go b/vendor/github.com/IBM-Bluemix/bluemix-go/helpers/zip.go new file mode 100644 index 0000000000..8d7cd3b83e --- /dev/null +++ b/vendor/github.com/IBM-Bluemix/bluemix-go/helpers/zip.go @@ -0,0 +1,56 @@ +package helpers + +import ( + "archive/zip" + "io" + "os" + "path/filepath" +) + +//Unzip src to dest +func Unzip(src, dest string) error { + r, err := zip.OpenReader(src) + if err != nil { + return err + } + defer r.Close() + + err = os.MkdirAll(dest, 0755) + if err != nil { + return err + } + + for _, f := range r.File { + err := extractFileInZipArchive(dest, f) + if err != nil { + return err + } + } + + return nil +} + +func extractFileInZipArchive(dest string, f *zip.File) error { + rc, err := f.Open() + if err != nil { + return err + } + defer rc.Close() + + path := filepath.Join(dest, f.Name) + + if f.FileInfo().IsDir() { + return os.MkdirAll(path, f.Mode()) + } + err = os.MkdirAll(filepath.Dir(path), f.Mode()) + if err != nil { + return err + } + zf, err := os.OpenFile(path, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, f.Mode()) + if err != nil { + return err + } + defer zf.Close() + _, err = io.Copy(zf, rc) + return err +} diff --git a/vendor/github.com/IBM-Bluemix/bluemix-go/http/http.go b/vendor/github.com/IBM-Bluemix/bluemix-go/http/http.go new file mode 100644 index 0000000000..fa36b332bb --- /dev/null +++ b/vendor/github.com/IBM-Bluemix/bluemix-go/http/http.go @@ -0,0 +1,40 @@ +package http + +import ( + "crypto/tls" + "fmt" + "net" + "net/http" + "runtime" + "time" + + "github.com/IBM-Bluemix/bluemix-go" +) + +//NewHTTPClient ... +func NewHTTPClient(config *bluemix.Config) *http.Client { + return &http.Client{ + Transport: makeTransport(config), + Timeout: config.HTTPTimeout, + } +} + +func makeTransport(config *bluemix.Config) http.RoundTripper { + return NewTraceLoggingTransport(&http.Transport{ + Proxy: http.ProxyFromEnvironment, + Dial: (&net.Dialer{ + Timeout: 50 * time.Second, + KeepAlive: 30 * time.Second, + }).Dial, + TLSHandshakeTimeout: 20 * time.Second, + DisableCompression: true, + TLSClientConfig: &tls.Config{ + InsecureSkipVerify: config.SSLDisable, + }, + }) +} + +//UserAgent ... +func UserAgent() string { + return fmt.Sprintf("Blumix-go SDK %s / %s ", bluemix.Version, runtime.GOOS) +} diff --git a/vendor/github.com/IBM-Bluemix/bluemix-go/http/transport.go b/vendor/github.com/IBM-Bluemix/bluemix-go/http/transport.go new file mode 100644 index 0000000000..41906c0899 --- /dev/null +++ b/vendor/github.com/IBM-Bluemix/bluemix-go/http/transport.go @@ -0,0 +1,81 @@ +package http + +import ( + "net/http" + "net/http/httputil" + "strings" + "time" + + "github.com/IBM-Bluemix/bluemix-go/trace" +) + +// TraceLoggingTransport is a thin wrapper around Transport. It dumps HTTP +// request and response using trace logger, based on the "BLUEMIX_TRACE" +// environment variable. Sensitive user data will be replaced by text +// "[PRIVATE DATA HIDDEN]". +type TraceLoggingTransport struct { + rt http.RoundTripper +} + +// NewTraceLoggingTransport returns a TraceLoggingTransport wrapping around +// the passed RoundTripper. If the passed RoundTripper is nil, HTTP +// DefaultTransport is used. +func NewTraceLoggingTransport(rt http.RoundTripper) *TraceLoggingTransport { + if rt == nil { + return &TraceLoggingTransport{ + rt: http.DefaultTransport, + } + } + return &TraceLoggingTransport{ + rt: rt, + } +} + +//RoundTrip ... +func (r *TraceLoggingTransport) RoundTrip(req *http.Request) (resp *http.Response, err error) { + start := time.Now() + r.dumpRequest(req, start) + resp, err = r.rt.RoundTrip(req) + if err != nil { + return + } + r.dumpResponse(resp, start) + return +} + +func (r *TraceLoggingTransport) dumpRequest(req *http.Request, start time.Time) { + shouldDisplayBody := !strings.Contains(req.Header.Get("Content-Type"), "multipart/form-data") + + dumpedRequest, err := httputil.DumpRequest(req, shouldDisplayBody) + if err != nil { + trace.Logger.Printf("An error occurred while dumping request:\n%v\n", err) + return + } + + trace.Logger.Printf("\n%s [%s]\n%s\n", + "REQUEST:", + start.Format(time.RFC3339), + trace.Sanitize(string(dumpedRequest))) + + if !shouldDisplayBody { + trace.Logger.Println("[MULTIPART/FORM-DATA CONTENT HIDDEN]") + } +} + +func (r *TraceLoggingTransport) dumpResponse(res *http.Response, start time.Time) { + end := time.Now() + + shouldDisplayBody := !strings.Contains(res.Header.Get("Content-Type"), "application/zip") + dumpedResponse, err := httputil.DumpResponse(res, shouldDisplayBody) + if err != nil { + trace.Logger.Printf("An error occurred while dumping response:\n%v\n", err) + return + } + + trace.Logger.Printf("\n%s [%s] %s %.0fms\n%s\n", + "RESPONSE:", + end.Format(time.RFC3339), + "Elapsed:", + end.Sub(start).Seconds()*1000, + trace.Sanitize(string(dumpedResponse))) +} diff --git a/vendor/github.com/IBM-Bluemix/bluemix-go/rest/client.go b/vendor/github.com/IBM-Bluemix/bluemix-go/rest/client.go new file mode 100644 index 0000000000..bd4336819f --- /dev/null +++ b/vendor/github.com/IBM-Bluemix/bluemix-go/rest/client.go @@ -0,0 +1,163 @@ +// Package rest provides a simple REST client for creating and sending +// API requests. + +// Examples: +// Creating request +// // GET request +// GetRequest("http://www.example.com"). +// Set("Accept", "application/json"). +// Query("foo1", "bar1"). +// Query("foo2", "bar2") +// +// // JSON body +// foo = Foo{Bar: "val"} +// PostRequest("http://www.example.com"). +// Body(foo) + +// // String body +// PostRequest("http://www.example.com"). +// Body("{\"bar\": \"val\"}") + +// // Stream body +// PostRequest("http://www.example.com"). +// Body(strings.NewReader("abcde")) + +// // Multipart POST request +// var f *os.File +// PostRequest("http://www.example.com"). +// Field("foo", "bar"). +// File("file1", File{Name: f.Name(), Content: f}). +// File("file2", File{Name: "1.txt", Content: []byte("abcde"), Type: "text/plain") + +// // Build to an HTTP request +// GetRequest("http://www.example.com").Build() + +// Sending request: +// client := NewClient() +// var foo = struct { +// Bar string +// }{} +// var apiErr = struct { +// Message string +// }{} +// resp, err := client.Do(request, &foo, &apiErr) +package rest + +import ( + "encoding/json" + "fmt" + "io" + "io/ioutil" + "net/http" + + "github.com/IBM-Bluemix/bluemix-go/bmxerror" +) + +const ( + //ErrCodeEmptyResponse ... + ErrCodeEmptyResponse = "EmptyResponseBody" +) + +//ErrEmptyResponseBody ... +var ErrEmptyResponseBody = bmxerror.New(ErrCodeEmptyResponse, "empty response body") + +// Client is a REST client. It's recommend that a client be created with the +// NewClient() method. +type Client struct { + // The HTTP client to be used. Default is HTTP's defaultClient. + HTTPClient *http.Client + // Defaualt header for all outgoing HTTP requests. + DefaultHeader http.Header +} + +// NewClient creates a new REST client. +func NewClient() *Client { + return &Client{ + HTTPClient: http.DefaultClient, + } +} + +// Do sends an request and returns an HTTP response. The resp.Body will be +// consumed and closed in the method. +// +// For 2XX response, it will be JSON decoded into the value pointed to by +// respv. +// +// For non-2XX response, an attempt will be made to unmarshal the response +// into the value pointed to by errV. If unmarshal failed, an ErrorResponse +// error with status code and response text is returned. +func (c *Client) Do(r *Request, respV interface{}, errV interface{}) (*http.Response, error) { + req, err := c.makeRequest(r) + if err != nil { + return nil, err + } + + client := c.HTTPClient + if client == nil { + client = http.DefaultClient + } + + resp, err := client.Do(req) + if err != nil { + return resp, err + } + defer resp.Body.Close() + + if resp.StatusCode < 200 || resp.StatusCode > 299 { + raw, err := ioutil.ReadAll(resp.Body) + if err != nil { + return resp, fmt.Errorf("Error reading response: %v", err) + } + + if len(raw) > 0 && errV != nil { + if json.Unmarshal(raw, errV) == nil { + return resp, nil + } + } + + return resp, bmxerror.NewRequestFailure("ServerErrorResponse", string(raw), resp.StatusCode) + } + + if respV != nil { + switch respV.(type) { + case io.Writer: + _, err = io.Copy(respV.(io.Writer), resp.Body) + default: + err = json.NewDecoder(resp.Body).Decode(respV) + if err == io.EOF { + err = ErrEmptyResponseBody + } + } + } + + return resp, err +} + +func (c *Client) makeRequest(r *Request) (*http.Request, error) { + req, err := r.Build() + if err != nil { + return nil, err + } + + c.applyDefaultHeader(req) + + if req.Header.Get("Accept") == "" { + req.Header.Set("Accept", "application/json") + } + if req.Header.Get("Content-Type") == "" { + req.Header.Set("Content-Type", "application/json") + } + + return req, nil +} + +func (c *Client) applyDefaultHeader(req *http.Request) { + for k, vs := range c.DefaultHeader { + if req.Header.Get(k) != "" { + continue + } + for _, v := range vs { + req.Header.Add(k, v) + } + } +} diff --git a/vendor/github.com/IBM-Bluemix/bluemix-go/rest/request.go b/vendor/github.com/IBM-Bluemix/bluemix-go/rest/request.go new file mode 100644 index 0000000000..31981c31b7 --- /dev/null +++ b/vendor/github.com/IBM-Bluemix/bluemix-go/rest/request.go @@ -0,0 +1,279 @@ +package rest + +import ( + "bytes" + "encoding/json" + "fmt" + "io" + "mime/multipart" + "net/http" + "net/textproto" + "net/url" + "strings" +) + +const ( + contentType = "Content-Type" + jsonContentType = "application/json" + formUrlEncodedContentType = "application/x-www-form-urlencoded" +) + +// File represents a file upload in the POST request +type File struct { + // File name + Name string + // File content + Content io.Reader + // Mime type, defaults to "application/octet-stream" + Type string +} + +// Request is a REST request. It also acts like a HTTP request builder. +type Request struct { + method string + rawUrl string + header http.Header + + queryParams url.Values + formParams url.Values + + // files to upload + files map[string][]File + + // custom request body + body interface{} +} + +// NewRequest creates a new REST request with the given rawUrl. +func NewRequest(rawUrl string) *Request { + return &Request{ + rawUrl: rawUrl, + header: http.Header{}, + queryParams: url.Values{}, + formParams: url.Values{}, + files: make(map[string][]File), + } +} + +// Method sets HTTP method of the request. +func (r *Request) Method(method string) *Request { + r.method = method + return r +} + +// GetRequest creates a REST request with GET method and the given rawUrl. +func GetRequest(rawUrl string) *Request { + return NewRequest(rawUrl).Method("GET") +} + +// HeadRequest creates a REST request with HEAD method and the given rawUrl. +func HeadRequest(rawUrl string) *Request { + return NewRequest(rawUrl).Method("HEAD") +} + +// PostRequest creates a REST request with POST method and the given rawUrl. +func PostRequest(rawUrl string) *Request { + return NewRequest(rawUrl).Method("POST") +} + +// PutRequest creates a REST request with PUT method and the given rawUrl. +func PutRequest(rawUrl string) *Request { + return NewRequest(rawUrl).Method("PUT") +} + +// DeleteRequest creates a REST request with DELETE method and the given +// rawUrl. +func DeleteRequest(rawUrl string) *Request { + return NewRequest(rawUrl).Method("DELETE") +} + +// PatchRequest creates a REST request with PATCH method and the given +// rawUrl. +func PatchRequest(rawUrl string) *Request { + return NewRequest(rawUrl).Method("PATCH") +} + +// Creates a request with HTTP OPTIONS. +func OptionsRequest(rawUrl string) *Request { + return NewRequest(rawUrl).Method("OPTIONS") +} + +// Add adds the key, value pair to the request header. It appends to any +// existing values associated with key. +func (r *Request) Add(key string, value string) *Request { + r.header.Add(http.CanonicalHeaderKey(key), value) + return r +} + +// Set sets the header entries associated with key to the single element value. +// It replaces any existing values associated with key. +func (r *Request) Set(key string, value string) *Request { + r.header.Set(http.CanonicalHeaderKey(key), value) + return r +} + +// Query appends the key, value pair to the request query which will be +// encoded as url query parameters on HTTP request's url. +func (r *Request) Query(key string, value string) *Request { + r.queryParams.Add(key, value) + return r +} + +// Field appends the key, value pair to the form fields in the POST request. +func (r *Request) Field(key string, value string) *Request { + r.formParams.Add(key, value) + return r +} + +// File appends a file upload item in the POST request. The file content will +// be consumed when building HTTP request (see Build()) and closed if it's +// also a ReadCloser type. +func (r *Request) File(name string, file File) *Request { + r.files[name] = append(r.files[name], file) + return r +} + +// Body sets the request body. Accepted types are string, []byte, io.Reader, +// or structs to be JSON encodeded. +func (r *Request) Body(body interface{}) *Request { + r.body = body + return r +} + +// Build builds a HTTP request according to the settings in the REST request. +func (r *Request) Build() (*http.Request, error) { + url, err := r.buildURL() + if err != nil { + return nil, err + } + + body, err := r.buildBody() + if err != nil { + return nil, err + } + + req, err := http.NewRequest(r.method, url, body) + if err != nil { + return req, err + } + + for k, vs := range r.header { + for _, v := range vs { + req.Header.Add(k, v) + } + } + + return req, nil +} + +func (r *Request) buildURL() (string, error) { + if r.rawUrl == "" || len(r.queryParams) == 0 { + return r.rawUrl, nil + } + u, err := url.Parse(r.rawUrl) + if err != nil { + return "", err + } + q := u.Query() + for k, vs := range r.queryParams { + for _, v := range vs { + q.Add(k, v) + } + } + u.RawQuery = q.Encode() + return u.String(), nil +} + +func (r *Request) buildBody() (io.Reader, error) { + if len(r.files) > 0 { + return r.buildFormMultipart() + } + + if len(r.formParams) > 0 { + return r.buildFormFields() + } + + return r.buildCustomBody() +} + +func (r *Request) buildFormMultipart() (io.Reader, error) { + b := new(bytes.Buffer) + w := multipart.NewWriter(b) + defer w.Close() + + for k, files := range r.files { + for _, f := range files { + defer func() { + if f, ok := f.Content.(io.ReadCloser); ok { + f.Close() + } + }() + + p, err := createPartWriter(w, k, f) + if err != nil { + return nil, err + } + _, err = io.Copy(p, f.Content) + if err != nil { + return nil, err + } + } + } + + for k, vs := range r.formParams { + for _, v := range vs { + err := w.WriteField(k, v) + if err != nil { + return nil, err + } + } + } + + r.header.Set(contentType, w.FormDataContentType()) + return b, nil +} + +func createPartWriter(w *multipart.Writer, fieldName string, f File) (io.Writer, error) { + h := make(textproto.MIMEHeader) + h.Set("Content-Disposition", + fmt.Sprintf(`form-data; name="%s"; filename="%s"`, + escapeQuotes(fieldName), escapeQuotes(f.Name))) + if f.Type != "" { + h.Set("Content-Type", f.Type) + } else { + h.Set("Content-Type", "application/octet-stream") + } + return w.CreatePart(h) +} + +var quoteEscaper = strings.NewReplacer("\\", "\\\\", `"`, "\\\"") + +func escapeQuotes(s string) string { + return quoteEscaper.Replace(s) +} + +func (r *Request) buildFormFields() (io.Reader, error) { + r.header.Set(contentType, formUrlEncodedContentType) + return strings.NewReader(r.formParams.Encode()), nil +} + +func (r *Request) buildCustomBody() (io.Reader, error) { + if r.body == nil { + return nil, nil + } + + switch b := r.body; b.(type) { + case string: + return strings.NewReader(b.(string)), nil + case []byte: + return bytes.NewReader(b.([]byte)), nil + case io.Reader: + return b.(io.Reader), nil + default: + raw, err := json.Marshal(b) + if err != nil { + return nil, fmt.Errorf("Invalid JSON request: %v", err) + } + return bytes.NewReader(raw), nil + } +} diff --git a/vendor/github.com/IBM-Bluemix/bluemix-go/session/session.go b/vendor/github.com/IBM-Bluemix/bluemix-go/session/session.go new file mode 100644 index 0000000000..c5a6a06bf6 --- /dev/null +++ b/vendor/github.com/IBM-Bluemix/bluemix-go/session/session.go @@ -0,0 +1,80 @@ +package session + +import ( + "fmt" + "time" + + bluemix "github.com/IBM-Bluemix/bluemix-go" + "github.com/IBM-Bluemix/bluemix-go/endpoints" + "github.com/IBM-Bluemix/bluemix-go/helpers" + "github.com/IBM-Bluemix/bluemix-go/trace" +) + +//Session ... +type Session struct { + Config *bluemix.Config +} + +//New ... +func New(configs ...*bluemix.Config) (*Session, error) { + var c *bluemix.Config + + if len(configs) == 0 { + c = &bluemix.Config{} + } else { + c = configs[0] + } + sess := &Session{ + Config: c, + } + + if len(c.IBMID) == 0 { + c.IBMID = helpers.EnvFallBack([]string{"IBMID"}, "") + } + + if len(c.IBMIDPassword) == 0 { + c.IBMIDPassword = helpers.EnvFallBack([]string{"IBMID_PASSWORD"}, "") + } + + if len(c.BluemixAPIKey) == 0 { + c.BluemixAPIKey = helpers.EnvFallBack([]string{"BM_API_KEY", "BLUEMIX_API_KEY"}, "") + } + + if len(c.Region) == 0 { + c.Region = helpers.EnvFallBack([]string{"BM_REGION", "BLUEMIX_REGION"}, "us-south") + } + if c.MaxRetries == nil { + c.MaxRetries = helpers.Int(3) + } + if c.HTTPTimeout == 0 { + c.HTTPTimeout = 180 * time.Second + timeout := helpers.EnvFallBack([]string{"BM_TIMEOUT", "BLUEMIX_TIMEOUT"}, "180") + timeoutDuration, err := time.ParseDuration(fmt.Sprintf("%ss", timeout)) + if err != nil { + fmt.Printf("BM_TIMEOUT or BLUEMIX_TIMEOUT has invalid time format. Default timeout will be set to %q", c.HTTPTimeout) + } + if err == nil { + c.HTTPTimeout = timeoutDuration + } + } + + if c.RetryDelay == nil { + c.RetryDelay = helpers.Duration(30 * time.Second) + } + if c.EndpointLocator == nil { + c.EndpointLocator = endpoints.NewEndpointLocator(c.Region) + } + + if c.Debug { + trace.Logger = trace.NewLogger("true") + } + + return sess, nil +} + +//Copy allows sessions to create a copy of it and optionally override any defaults via the config +func (s *Session) Copy(mccpgs ...*bluemix.Config) *Session { + return &Session{ + Config: s.Config.Copy(mccpgs...), + } +} diff --git a/vendor/github.com/IBM-Bluemix/bluemix-go/trace/trace.go b/vendor/github.com/IBM-Bluemix/bluemix-go/trace/trace.go new file mode 100644 index 0000000000..64cba8e70b --- /dev/null +++ b/vendor/github.com/IBM-Bluemix/bluemix-go/trace/trace.go @@ -0,0 +1,129 @@ +package trace + +import ( + "fmt" + "io" + "log" + "os" + "regexp" + "strings" +) + +//Printer ... +type Printer interface { + Print(v ...interface{}) + Printf(format string, v ...interface{}) + Println(v ...interface{}) +} + +//Closer ... +type Closer interface { + Close() error +} + +//PrinterCloser ... +type PrinterCloser interface { + Printer + Closer +} + +//NullLogger ... +type NullLogger struct{} + +func (l *NullLogger) Print(v ...interface{}) {} +func (l *NullLogger) Printf(format string, v ...interface{}) {} +func (l *NullLogger) Println(v ...interface{}) {} + +type loggerImpl struct { + *log.Logger + c io.WriteCloser +} + +func (loggerImpl *loggerImpl) Close() error { + if loggerImpl.c != nil { + return loggerImpl.c.Close() + } + return nil +} + +func newLoggerImpl(out io.Writer, prefix string, flag int) *loggerImpl { + l := log.New(out, prefix, flag) + c := out.(io.WriteCloser) + return &loggerImpl{ + Logger: l, + c: c, + } +} + +//Logger is global logger +var Logger Printer = NewLogger("") + +// NewLogger returns a printer for the given trace setting. +func NewLogger(bluemix_trace string) Printer { + switch strings.ToLower(bluemix_trace) { + case "", "false": + return new(NullLogger) + case "true": + return NewStdLogger() + default: + return NewFileLogger(bluemix_trace) + } +} + +// NewStdLogger return a printer that writes to StdOut. +func NewStdLogger() PrinterCloser { + return newLoggerImpl(os.Stderr, "", 0) +} + +// NewFileLogger return a printer that writes to the given file path. +func NewFileLogger(path string) PrinterCloser { + file, err := os.OpenFile(path, os.O_CREATE|os.O_RDWR|os.O_APPEND, 0666) + if err != nil { + logger := NewStdLogger() + logger.Printf("An error occurred when creating log file '%s':\n%v\n\n", path, err) + return logger + } + return newLoggerImpl(file, "", 0) +} + +// Sanitize returns a clean string with sentive user data in the input +// replaced by PRIVATE_DATA_PLACEHOLDER. +func Sanitize(input string) string { + re := regexp.MustCompile(`(?m)^Authorization: .*`) + sanitized := re.ReplaceAllString(input, "Authorization: "+privateDataPlaceholder()) + + re = regexp.MustCompile(`(?m)^X-Auth-Token: .*`) + sanitized = re.ReplaceAllString(sanitized, "X-Auth-Token: "+privateDataPlaceholder()) + + re = regexp.MustCompile(`(?m)^X-Auth-Refresh-Token: .*`) + sanitized = re.ReplaceAllString(sanitized, "X-Auth-Refresh-Token: "+privateDataPlaceholder()) + + re = regexp.MustCompile(`(?m)^X-Auth-Uaa-Token: .*`) + sanitized = re.ReplaceAllString(sanitized, "X-Auth-Uaa-Token: "+privateDataPlaceholder()) + + re = regexp.MustCompile(`password=[^&]*&`) + sanitized = re.ReplaceAllString(sanitized, "password="+privateDataPlaceholder()+"&") + + re = regexp.MustCompile(`refresh_token=[^&]*&`) + sanitized = re.ReplaceAllString(sanitized, "refresh_token="+privateDataPlaceholder()+"&") + + re = regexp.MustCompile(`apikey=[^&]*&`) + sanitized = re.ReplaceAllString(sanitized, "apikey="+privateDataPlaceholder()+"&") + + sanitized = sanitizeJSON("token", sanitized) + sanitized = sanitizeJSON("password", sanitized) + sanitized = sanitizeJSON("apikey", sanitized) + sanitized = sanitizeJSON("passcode", sanitized) + + return sanitized +} + +func sanitizeJSON(propertySubstring string, json string) string { + regex := regexp.MustCompile(fmt.Sprintf(`(?i)"([^"]*%s[^"]*)":\s*"[^\,]*"`, propertySubstring)) + return regex.ReplaceAllString(json, fmt.Sprintf(`"$1":"%s"`, privateDataPlaceholder())) +} + +// privateDataPlaceholder returns the text to replace the sentive data. +func privateDataPlaceholder() string { + return "[PRIVATE DATA HIDDEN]" +} diff --git a/vendor/github.com/IBM-Bluemix/bluemix-go/version.go b/vendor/github.com/IBM-Bluemix/bluemix-go/version.go new file mode 100644 index 0000000000..ef121571c0 --- /dev/null +++ b/vendor/github.com/IBM-Bluemix/bluemix-go/version.go @@ -0,0 +1,4 @@ +package bluemix + +//Version is the SDK version +const Version = "0.1" diff --git a/vendor/github.com/apparentlymart/go-cidr/LICENSE b/vendor/github.com/apparentlymart/go-cidr/LICENSE new file mode 100644 index 0000000000..2125378860 --- /dev/null +++ b/vendor/github.com/apparentlymart/go-cidr/LICENSE @@ -0,0 +1,19 @@ +Copyright (c) 2015 Martin Atkins + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/vendor/github.com/apparentlymart/go-cidr/cidr/cidr.go b/vendor/github.com/apparentlymart/go-cidr/cidr/cidr.go new file mode 100644 index 0000000000..a31cdec773 --- /dev/null +++ b/vendor/github.com/apparentlymart/go-cidr/cidr/cidr.go @@ -0,0 +1,112 @@ +// Package cidr is a collection of assorted utilities for computing +// network and host addresses within network ranges. +// +// It expects a CIDR-type address structure where addresses are divided into +// some number of prefix bits representing the network and then the remaining +// suffix bits represent the host. +// +// For example, it can help to calculate addresses for sub-networks of a +// parent network, or to calculate host addresses within a particular prefix. +// +// At present this package is prioritizing simplicity of implementation and +// de-prioritizing speed and memory usage. Thus caution is advised before +// using this package in performance-critical applications or hot codepaths. +// Patches to improve the speed and memory usage may be accepted as long as +// they do not result in a significant increase in code complexity. +package cidr + +import ( + "fmt" + "math/big" + "net" +) + +// Subnet takes a parent CIDR range and creates a subnet within it +// with the given number of additional prefix bits and the given +// network number. +// +// For example, 10.3.0.0/16, extended by 8 bits, with a network number +// of 5, becomes 10.3.5.0/24 . +func Subnet(base *net.IPNet, newBits int, num int) (*net.IPNet, error) { + ip := base.IP + mask := base.Mask + + parentLen, addrLen := mask.Size() + newPrefixLen := parentLen + newBits + + if newPrefixLen > addrLen { + return nil, fmt.Errorf("insufficient address space to extend prefix of %d by %d", parentLen, newBits) + } + + maxNetNum := uint64(1< maxNetNum { + return nil, fmt.Errorf("prefix extension of %d does not accommodate a subnet numbered %d", newBits, num) + } + + return &net.IPNet{ + IP: insertNumIntoIP(ip, num, newPrefixLen), + Mask: net.CIDRMask(newPrefixLen, addrLen), + }, nil +} + +// Host takes a parent CIDR range and turns it into a host IP address with +// the given host number. +// +// For example, 10.3.0.0/16 with a host number of 2 gives 10.3.0.2. +func Host(base *net.IPNet, num int) (net.IP, error) { + ip := base.IP + mask := base.Mask + + parentLen, addrLen := mask.Size() + hostLen := addrLen - parentLen + + maxHostNum := uint64(1< maxHostNum { + return nil, fmt.Errorf("prefix of %d does not accommodate a host numbered %d", parentLen, num) + } + + return insertNumIntoIP(ip, num, 32), nil +} + +// AddressRange returns the first and last addresses in the given CIDR range. +func AddressRange(network *net.IPNet) (net.IP, net.IP) { + // the first IP is easy + firstIP := network.IP + + // the last IP is the network address OR NOT the mask address + prefixLen, bits := network.Mask.Size() + if prefixLen == bits { + // Easy! + // But make sure that our two slices are distinct, since they + // would be in all other cases. + lastIP := make([]byte, len(firstIP)) + copy(lastIP, firstIP) + return firstIP, lastIP + } + + firstIPInt, bits := ipToInt(firstIP) + hostLen := uint(bits) - uint(prefixLen) + lastIPInt := big.NewInt(1) + lastIPInt.Lsh(lastIPInt, hostLen) + lastIPInt.Sub(lastIPInt, big.NewInt(1)) + lastIPInt.Or(lastIPInt, firstIPInt) + + return firstIP, intToIP(lastIPInt, bits) +} + +// AddressCount returns the number of distinct host addresses within the given +// CIDR range. +// +// Since the result is a uint64, this function returns meaningful information +// only for IPv4 ranges and IPv6 ranges with a prefix size of at least 65. +func AddressCount(network *net.IPNet) uint64 { + prefixLen, bits := network.Mask.Size() + return 1 << (uint64(bits) - uint64(prefixLen)) +} diff --git a/vendor/github.com/apparentlymart/go-cidr/cidr/wrangling.go b/vendor/github.com/apparentlymart/go-cidr/cidr/wrangling.go new file mode 100644 index 0000000000..861a5f623d --- /dev/null +++ b/vendor/github.com/apparentlymart/go-cidr/cidr/wrangling.go @@ -0,0 +1,38 @@ +package cidr + +import ( + "fmt" + "math/big" + "net" +) + +func ipToInt(ip net.IP) (*big.Int, int) { + val := &big.Int{} + val.SetBytes([]byte(ip)) + if len(ip) == net.IPv4len { + return val, 32 + } else if len(ip) == net.IPv6len { + return val, 128 + } else { + panic(fmt.Errorf("Unsupported address length %d", len(ip))) + } +} + +func intToIP(ipInt *big.Int, bits int) net.IP { + ipBytes := ipInt.Bytes() + ret := make([]byte, bits/8) + // Pack our IP bytes into the end of the return array, + // since big.Int.Bytes() removes front zero padding. + for i := 1; i <= len(ipBytes); i++ { + ret[len(ret)-i] = ipBytes[len(ipBytes)-i] + } + return net.IP(ret) +} + +func insertNumIntoIP(ip net.IP, num int, prefixLen int) net.IP { + ipInt, totalBits := ipToInt(ip) + bigNum := big.NewInt(int64(num)) + bigNum.Lsh(bigNum, uint(totalBits-prefixLen)) + ipInt.Or(ipInt, bigNum) + return intToIP(ipInt, totalBits) +} diff --git a/vendor/github.com/apparentlymart/go-rundeck-api/LICENSE b/vendor/github.com/apparentlymart/go-rundeck-api/LICENSE new file mode 100644 index 0000000000..35687787ed --- /dev/null +++ b/vendor/github.com/apparentlymart/go-rundeck-api/LICENSE @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2015 Martin Atkins + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/github.com/apparentlymart/go-rundeck-api/README.md b/vendor/github.com/apparentlymart/go-rundeck-api/README.md new file mode 100644 index 0000000000..18abfb3d33 --- /dev/null +++ b/vendor/github.com/apparentlymart/go-rundeck-api/README.md @@ -0,0 +1,9 @@ +# go-rundeck-api + +This is a Go client for the Rundeck HTTP API. It was primarily developed to back the Rundeck provider in [Terraform](https://terraform.io), but can be used standalone too. + +It should ``go install`` just like any other Go package: + +* ``go install github.com/apparentlymart/go-rundeck-api/rundeck`` + +For reference documentation, see [godoc](https://godoc.org/github.com/apparentlymart/go-rundeck-api/rundeck). diff --git a/vendor/github.com/aws/aws-sdk-go/LICENSE.txt b/vendor/github.com/aws/aws-sdk-go/LICENSE.txt new file mode 100644 index 0000000000..d645695673 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/LICENSE.txt @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/aws/aws-sdk-go/NOTICE.txt b/vendor/github.com/aws/aws-sdk-go/NOTICE.txt new file mode 100644 index 0000000000..5f14d1162e --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/NOTICE.txt @@ -0,0 +1,3 @@ +AWS SDK for Go +Copyright 2015 Amazon.com, Inc. or its affiliates. All Rights Reserved. +Copyright 2014-2015 Stripe, Inc. diff --git a/vendor/github.com/aws/aws-sdk-go/aws/awserr/error.go b/vendor/github.com/aws/aws-sdk-go/aws/awserr/error.go new file mode 100644 index 0000000000..56fdfc2bfc --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/awserr/error.go @@ -0,0 +1,145 @@ +// Package awserr represents API error interface accessors for the SDK. +package awserr + +// An Error wraps lower level errors with code, message and an original error. +// The underlying concrete error type may also satisfy other interfaces which +// can be to used to obtain more specific information about the error. +// +// Calling Error() or String() will always include the full information about +// an error based on its underlying type. +// +// Example: +// +// output, err := s3manage.Upload(svc, input, opts) +// if err != nil { +// if awsErr, ok := err.(awserr.Error); ok { +// // Get error details +// log.Println("Error:", awsErr.Code(), awsErr.Message()) +// +// // Prints out full error message, including original error if there was one. +// log.Println("Error:", awsErr.Error()) +// +// // Get original error +// if origErr := awsErr.OrigErr(); origErr != nil { +// // operate on original error. +// } +// } else { +// fmt.Println(err.Error()) +// } +// } +// +type Error interface { + // Satisfy the generic error interface. + error + + // Returns the short phrase depicting the classification of the error. + Code() string + + // Returns the error details message. + Message() string + + // Returns the original error if one was set. Nil is returned if not set. + OrigErr() error +} + +// BatchError is a batch of errors which also wraps lower level errors with +// code, message, and original errors. Calling Error() will include all errors +// that occurred in the batch. +// +// Deprecated: Replaced with BatchedErrors. Only defined for backwards +// compatibility. +type BatchError interface { + // Satisfy the generic error interface. + error + + // Returns the short phrase depicting the classification of the error. + Code() string + + // Returns the error details message. + Message() string + + // Returns the original error if one was set. Nil is returned if not set. + OrigErrs() []error +} + +// BatchedErrors is a batch of errors which also wraps lower level errors with +// code, message, and original errors. Calling Error() will include all errors +// that occurred in the batch. +// +// Replaces BatchError +type BatchedErrors interface { + // Satisfy the base Error interface. + Error + + // Returns the original error if one was set. Nil is returned if not set. + OrigErrs() []error +} + +// New returns an Error object described by the code, message, and origErr. +// +// If origErr satisfies the Error interface it will not be wrapped within a new +// Error object and will instead be returned. +func New(code, message string, origErr error) Error { + var errs []error + if origErr != nil { + errs = append(errs, origErr) + } + return newBaseError(code, message, errs) +} + +// NewBatchError returns an BatchedErrors with a collection of errors as an +// array of errors. +func NewBatchError(code, message string, errs []error) BatchedErrors { + return newBaseError(code, message, errs) +} + +// A RequestFailure is an interface to extract request failure information from +// an Error such as the request ID of the failed request returned by a service. +// RequestFailures may not always have a requestID value if the request failed +// prior to reaching the service such as a connection error. +// +// Example: +// +// output, err := s3manage.Upload(svc, input, opts) +// if err != nil { +// if reqerr, ok := err.(RequestFailure); ok { +// log.Println("Request failed", reqerr.Code(), reqerr.Message(), reqerr.RequestID()) +// } else { +// log.Println("Error:", err.Error()) +// } +// } +// +// Combined with awserr.Error: +// +// output, err := s3manage.Upload(svc, input, opts) +// if err != nil { +// if awsErr, ok := err.(awserr.Error); ok { +// // Generic AWS Error with Code, Message, and original error (if any) +// fmt.Println(awsErr.Code(), awsErr.Message(), awsErr.OrigErr()) +// +// if reqErr, ok := err.(awserr.RequestFailure); ok { +// // A service error occurred +// fmt.Println(reqErr.StatusCode(), reqErr.RequestID()) +// } +// } else { +// fmt.Println(err.Error()) +// } +// } +// +type RequestFailure interface { + Error + + // The status code of the HTTP response. + StatusCode() int + + // The request ID returned by the service for a request failure. This will + // be empty if no request ID is available such as the request failed due + // to a connection error. + RequestID() string +} + +// NewRequestFailure returns a new request error wrapper for the given Error +// provided. +func NewRequestFailure(err Error, statusCode int, reqID string) RequestFailure { + return newRequestError(err, statusCode, reqID) +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/awserr/types.go b/vendor/github.com/aws/aws-sdk-go/aws/awserr/types.go new file mode 100644 index 0000000000..0202a008f5 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/awserr/types.go @@ -0,0 +1,194 @@ +package awserr + +import "fmt" + +// SprintError returns a string of the formatted error code. +// +// Both extra and origErr are optional. If they are included their lines +// will be added, but if they are not included their lines will be ignored. +func SprintError(code, message, extra string, origErr error) string { + msg := fmt.Sprintf("%s: %s", code, message) + if extra != "" { + msg = fmt.Sprintf("%s\n\t%s", msg, extra) + } + if origErr != nil { + msg = fmt.Sprintf("%s\ncaused by: %s", msg, origErr.Error()) + } + return msg +} + +// A baseError wraps the code and message which defines an error. It also +// can be used to wrap an original error object. +// +// Should be used as the root for errors satisfying the awserr.Error. Also +// for any error which does not fit into a specific error wrapper type. +type baseError struct { + // Classification of error + code string + + // Detailed information about error + message string + + // Optional original error this error is based off of. Allows building + // chained errors. + errs []error +} + +// newBaseError returns an error object for the code, message, and errors. +// +// code is a short no whitespace phrase depicting the classification of +// the error that is being created. +// +// message is the free flow string containing detailed information about the +// error. +// +// origErrs is the error objects which will be nested under the new errors to +// be returned. +func newBaseError(code, message string, origErrs []error) *baseError { + b := &baseError{ + code: code, + message: message, + errs: origErrs, + } + + return b +} + +// Error returns the string representation of the error. +// +// See ErrorWithExtra for formatting. +// +// Satisfies the error interface. +func (b baseError) Error() string { + size := len(b.errs) + if size > 0 { + return SprintError(b.code, b.message, "", errorList(b.errs)) + } + + return SprintError(b.code, b.message, "", nil) +} + +// String returns the string representation of the error. +// Alias for Error to satisfy the stringer interface. +func (b baseError) String() string { + return b.Error() +} + +// Code returns the short phrase depicting the classification of the error. +func (b baseError) Code() string { + return b.code +} + +// Message returns the error details message. +func (b baseError) Message() string { + return b.message +} + +// OrigErr returns the original error if one was set. Nil is returned if no +// error was set. This only returns the first element in the list. If the full +// list is needed, use BatchedErrors. +func (b baseError) OrigErr() error { + switch len(b.errs) { + case 0: + return nil + case 1: + return b.errs[0] + default: + if err, ok := b.errs[0].(Error); ok { + return NewBatchError(err.Code(), err.Message(), b.errs[1:]) + } + return NewBatchError("BatchedErrors", + "multiple errors occurred", b.errs) + } +} + +// OrigErrs returns the original errors if one was set. An empty slice is +// returned if no error was set. +func (b baseError) OrigErrs() []error { + return b.errs +} + +// So that the Error interface type can be included as an anonymous field +// in the requestError struct and not conflict with the error.Error() method. +type awsError Error + +// A requestError wraps a request or service error. +// +// Composed of baseError for code, message, and original error. +type requestError struct { + awsError + statusCode int + requestID string +} + +// newRequestError returns a wrapped error with additional information for +// request status code, and service requestID. +// +// Should be used to wrap all request which involve service requests. Even if +// the request failed without a service response, but had an HTTP status code +// that may be meaningful. +// +// Also wraps original errors via the baseError. +func newRequestError(err Error, statusCode int, requestID string) *requestError { + return &requestError{ + awsError: err, + statusCode: statusCode, + requestID: requestID, + } +} + +// Error returns the string representation of the error. +// Satisfies the error interface. +func (r requestError) Error() string { + extra := fmt.Sprintf("status code: %d, request id: %s", + r.statusCode, r.requestID) + return SprintError(r.Code(), r.Message(), extra, r.OrigErr()) +} + +// String returns the string representation of the error. +// Alias for Error to satisfy the stringer interface. +func (r requestError) String() string { + return r.Error() +} + +// StatusCode returns the wrapped status code for the error +func (r requestError) StatusCode() int { + return r.statusCode +} + +// RequestID returns the wrapped requestID +func (r requestError) RequestID() string { + return r.requestID +} + +// OrigErrs returns the original errors if one was set. An empty slice is +// returned if no error was set. +func (r requestError) OrigErrs() []error { + if b, ok := r.awsError.(BatchedErrors); ok { + return b.OrigErrs() + } + return []error{r.OrigErr()} +} + +// An error list that satisfies the golang interface +type errorList []error + +// Error returns the string representation of the error. +// +// Satisfies the error interface. +func (e errorList) Error() string { + msg := "" + // How do we want to handle the array size being zero + if size := len(e); size > 0 { + for i := 0; i < size; i++ { + msg += fmt.Sprintf("%s", e[i].Error()) + // We check the next index to see if it is within the slice. + // If it is, then we append a newline. We do this, because unit tests + // could be broken with the additional '\n' + if i+1 < size { + msg += "\n" + } + } + } + return msg +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/awsutil/copy.go b/vendor/github.com/aws/aws-sdk-go/aws/awsutil/copy.go new file mode 100644 index 0000000000..1a3d106d5c --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/awsutil/copy.go @@ -0,0 +1,108 @@ +package awsutil + +import ( + "io" + "reflect" + "time" +) + +// Copy deeply copies a src structure to dst. Useful for copying request and +// response structures. +// +// Can copy between structs of different type, but will only copy fields which +// are assignable, and exist in both structs. Fields which are not assignable, +// or do not exist in both structs are ignored. +func Copy(dst, src interface{}) { + dstval := reflect.ValueOf(dst) + if !dstval.IsValid() { + panic("Copy dst cannot be nil") + } + + rcopy(dstval, reflect.ValueOf(src), true) +} + +// CopyOf returns a copy of src while also allocating the memory for dst. +// src must be a pointer type or this operation will fail. +func CopyOf(src interface{}) (dst interface{}) { + dsti := reflect.New(reflect.TypeOf(src).Elem()) + dst = dsti.Interface() + rcopy(dsti, reflect.ValueOf(src), true) + return +} + +// rcopy performs a recursive copy of values from the source to destination. +// +// root is used to skip certain aspects of the copy which are not valid +// for the root node of a object. +func rcopy(dst, src reflect.Value, root bool) { + if !src.IsValid() { + return + } + + switch src.Kind() { + case reflect.Ptr: + if _, ok := src.Interface().(io.Reader); ok { + if dst.Kind() == reflect.Ptr && dst.Elem().CanSet() { + dst.Elem().Set(src) + } else if dst.CanSet() { + dst.Set(src) + } + } else { + e := src.Type().Elem() + if dst.CanSet() && !src.IsNil() { + if _, ok := src.Interface().(*time.Time); !ok { + dst.Set(reflect.New(e)) + } else { + tempValue := reflect.New(e) + tempValue.Elem().Set(src.Elem()) + // Sets time.Time's unexported values + dst.Set(tempValue) + } + } + if src.Elem().IsValid() { + // Keep the current root state since the depth hasn't changed + rcopy(dst.Elem(), src.Elem(), root) + } + } + case reflect.Struct: + t := dst.Type() + for i := 0; i < t.NumField(); i++ { + name := t.Field(i).Name + srcVal := src.FieldByName(name) + dstVal := dst.FieldByName(name) + if srcVal.IsValid() && dstVal.CanSet() { + rcopy(dstVal, srcVal, false) + } + } + case reflect.Slice: + if src.IsNil() { + break + } + + s := reflect.MakeSlice(src.Type(), src.Len(), src.Cap()) + dst.Set(s) + for i := 0; i < src.Len(); i++ { + rcopy(dst.Index(i), src.Index(i), false) + } + case reflect.Map: + if src.IsNil() { + break + } + + s := reflect.MakeMap(src.Type()) + dst.Set(s) + for _, k := range src.MapKeys() { + v := src.MapIndex(k) + v2 := reflect.New(v.Type()).Elem() + rcopy(v2, v, false) + dst.SetMapIndex(k, v2) + } + default: + // Assign the value if possible. If its not assignable, the value would + // need to be converted and the impact of that may be unexpected, or is + // not compatible with the dst type. + if src.Type().AssignableTo(dst.Type()) { + dst.Set(src) + } + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/awsutil/equal.go b/vendor/github.com/aws/aws-sdk-go/aws/awsutil/equal.go new file mode 100644 index 0000000000..59fa4a558a --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/awsutil/equal.go @@ -0,0 +1,27 @@ +package awsutil + +import ( + "reflect" +) + +// DeepEqual returns if the two values are deeply equal like reflect.DeepEqual. +// In addition to this, this method will also dereference the input values if +// possible so the DeepEqual performed will not fail if one parameter is a +// pointer and the other is not. +// +// DeepEqual will not perform indirection of nested values of the input parameters. +func DeepEqual(a, b interface{}) bool { + ra := reflect.Indirect(reflect.ValueOf(a)) + rb := reflect.Indirect(reflect.ValueOf(b)) + + if raValid, rbValid := ra.IsValid(), rb.IsValid(); !raValid && !rbValid { + // If the elements are both nil, and of the same type the are equal + // If they are of different types they are not equal + return reflect.TypeOf(a) == reflect.TypeOf(b) + } else if raValid != rbValid { + // Both values must be valid to be equal + return false + } + + return reflect.DeepEqual(ra.Interface(), rb.Interface()) +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/awsutil/path_value.go b/vendor/github.com/aws/aws-sdk-go/aws/awsutil/path_value.go new file mode 100644 index 0000000000..11c52c3896 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/awsutil/path_value.go @@ -0,0 +1,222 @@ +package awsutil + +import ( + "reflect" + "regexp" + "strconv" + "strings" + + "github.com/jmespath/go-jmespath" +) + +var indexRe = regexp.MustCompile(`(.+)\[(-?\d+)?\]$`) + +// rValuesAtPath returns a slice of values found in value v. The values +// in v are explored recursively so all nested values are collected. +func rValuesAtPath(v interface{}, path string, createPath, caseSensitive, nilTerm bool) []reflect.Value { + pathparts := strings.Split(path, "||") + if len(pathparts) > 1 { + for _, pathpart := range pathparts { + vals := rValuesAtPath(v, pathpart, createPath, caseSensitive, nilTerm) + if len(vals) > 0 { + return vals + } + } + return nil + } + + values := []reflect.Value{reflect.Indirect(reflect.ValueOf(v))} + components := strings.Split(path, ".") + for len(values) > 0 && len(components) > 0 { + var index *int64 + var indexStar bool + c := strings.TrimSpace(components[0]) + if c == "" { // no actual component, illegal syntax + return nil + } else if caseSensitive && c != "*" && strings.ToLower(c[0:1]) == c[0:1] { + // TODO normalize case for user + return nil // don't support unexported fields + } + + // parse this component + if m := indexRe.FindStringSubmatch(c); m != nil { + c = m[1] + if m[2] == "" { + index = nil + indexStar = true + } else { + i, _ := strconv.ParseInt(m[2], 10, 32) + index = &i + indexStar = false + } + } + + nextvals := []reflect.Value{} + for _, value := range values { + // pull component name out of struct member + if value.Kind() != reflect.Struct { + continue + } + + if c == "*" { // pull all members + for i := 0; i < value.NumField(); i++ { + if f := reflect.Indirect(value.Field(i)); f.IsValid() { + nextvals = append(nextvals, f) + } + } + continue + } + + value = value.FieldByNameFunc(func(name string) bool { + if c == name { + return true + } else if !caseSensitive && strings.ToLower(name) == strings.ToLower(c) { + return true + } + return false + }) + + if nilTerm && value.Kind() == reflect.Ptr && len(components[1:]) == 0 { + if !value.IsNil() { + value.Set(reflect.Zero(value.Type())) + } + return []reflect.Value{value} + } + + if createPath && value.Kind() == reflect.Ptr && value.IsNil() { + // TODO if the value is the terminus it should not be created + // if the value to be set to its position is nil. + value.Set(reflect.New(value.Type().Elem())) + value = value.Elem() + } else { + value = reflect.Indirect(value) + } + + if value.Kind() == reflect.Slice || value.Kind() == reflect.Map { + if !createPath && value.IsNil() { + value = reflect.ValueOf(nil) + } + } + + if value.IsValid() { + nextvals = append(nextvals, value) + } + } + values = nextvals + + if indexStar || index != nil { + nextvals = []reflect.Value{} + for _, valItem := range values { + value := reflect.Indirect(valItem) + if value.Kind() != reflect.Slice { + continue + } + + if indexStar { // grab all indices + for i := 0; i < value.Len(); i++ { + idx := reflect.Indirect(value.Index(i)) + if idx.IsValid() { + nextvals = append(nextvals, idx) + } + } + continue + } + + // pull out index + i := int(*index) + if i >= value.Len() { // check out of bounds + if createPath { + // TODO resize slice + } else { + continue + } + } else if i < 0 { // support negative indexing + i = value.Len() + i + } + value = reflect.Indirect(value.Index(i)) + + if value.Kind() == reflect.Slice || value.Kind() == reflect.Map { + if !createPath && value.IsNil() { + value = reflect.ValueOf(nil) + } + } + + if value.IsValid() { + nextvals = append(nextvals, value) + } + } + values = nextvals + } + + components = components[1:] + } + return values +} + +// ValuesAtPath returns a list of values at the case insensitive lexical +// path inside of a structure. +func ValuesAtPath(i interface{}, path string) ([]interface{}, error) { + result, err := jmespath.Search(path, i) + if err != nil { + return nil, err + } + + v := reflect.ValueOf(result) + if !v.IsValid() || (v.Kind() == reflect.Ptr && v.IsNil()) { + return nil, nil + } + if s, ok := result.([]interface{}); ok { + return s, err + } + if v.Kind() == reflect.Map && v.Len() == 0 { + return nil, nil + } + if v.Kind() == reflect.Slice { + out := make([]interface{}, v.Len()) + for i := 0; i < v.Len(); i++ { + out[i] = v.Index(i).Interface() + } + return out, nil + } + + return []interface{}{result}, nil +} + +// SetValueAtPath sets a value at the case insensitive lexical path inside +// of a structure. +func SetValueAtPath(i interface{}, path string, v interface{}) { + if rvals := rValuesAtPath(i, path, true, false, v == nil); rvals != nil { + for _, rval := range rvals { + if rval.Kind() == reflect.Ptr && rval.IsNil() { + continue + } + setValue(rval, v) + } + } +} + +func setValue(dstVal reflect.Value, src interface{}) { + if dstVal.Kind() == reflect.Ptr { + dstVal = reflect.Indirect(dstVal) + } + srcVal := reflect.ValueOf(src) + + if !srcVal.IsValid() { // src is literal nil + if dstVal.CanAddr() { + // Convert to pointer so that pointer's value can be nil'ed + // dstVal = dstVal.Addr() + } + dstVal.Set(reflect.Zero(dstVal.Type())) + + } else if srcVal.Kind() == reflect.Ptr { + if srcVal.IsNil() { + srcVal = reflect.Zero(dstVal.Type()) + } else { + srcVal = reflect.ValueOf(src).Elem() + } + dstVal.Set(srcVal) + } else { + dstVal.Set(srcVal) + } + +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/awsutil/prettify.go b/vendor/github.com/aws/aws-sdk-go/aws/awsutil/prettify.go new file mode 100644 index 0000000000..710eb432f8 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/awsutil/prettify.go @@ -0,0 +1,113 @@ +package awsutil + +import ( + "bytes" + "fmt" + "io" + "reflect" + "strings" +) + +// Prettify returns the string representation of a value. +func Prettify(i interface{}) string { + var buf bytes.Buffer + prettify(reflect.ValueOf(i), 0, &buf) + return buf.String() +} + +// prettify will recursively walk value v to build a textual +// representation of the value. +func prettify(v reflect.Value, indent int, buf *bytes.Buffer) { + for v.Kind() == reflect.Ptr { + v = v.Elem() + } + + switch v.Kind() { + case reflect.Struct: + strtype := v.Type().String() + if strtype == "time.Time" { + fmt.Fprintf(buf, "%s", v.Interface()) + break + } else if strings.HasPrefix(strtype, "io.") { + buf.WriteString("") + break + } + + buf.WriteString("{\n") + + names := []string{} + for i := 0; i < v.Type().NumField(); i++ { + name := v.Type().Field(i).Name + f := v.Field(i) + if name[0:1] == strings.ToLower(name[0:1]) { + continue // ignore unexported fields + } + if (f.Kind() == reflect.Ptr || f.Kind() == reflect.Slice || f.Kind() == reflect.Map) && f.IsNil() { + continue // ignore unset fields + } + names = append(names, name) + } + + for i, n := range names { + val := v.FieldByName(n) + buf.WriteString(strings.Repeat(" ", indent+2)) + buf.WriteString(n + ": ") + prettify(val, indent+2, buf) + + if i < len(names)-1 { + buf.WriteString(",\n") + } + } + + buf.WriteString("\n" + strings.Repeat(" ", indent) + "}") + case reflect.Slice: + strtype := v.Type().String() + if strtype == "[]uint8" { + fmt.Fprintf(buf, " len %d", v.Len()) + break + } + + nl, id, id2 := "", "", "" + if v.Len() > 3 { + nl, id, id2 = "\n", strings.Repeat(" ", indent), strings.Repeat(" ", indent+2) + } + buf.WriteString("[" + nl) + for i := 0; i < v.Len(); i++ { + buf.WriteString(id2) + prettify(v.Index(i), indent+2, buf) + + if i < v.Len()-1 { + buf.WriteString("," + nl) + } + } + + buf.WriteString(nl + id + "]") + case reflect.Map: + buf.WriteString("{\n") + + for i, k := range v.MapKeys() { + buf.WriteString(strings.Repeat(" ", indent+2)) + buf.WriteString(k.String() + ": ") + prettify(v.MapIndex(k), indent+2, buf) + + if i < v.Len()-1 { + buf.WriteString(",\n") + } + } + + buf.WriteString("\n" + strings.Repeat(" ", indent) + "}") + default: + if !v.IsValid() { + fmt.Fprint(buf, "") + return + } + format := "%v" + switch v.Interface().(type) { + case string: + format = "%q" + case io.ReadSeeker, io.Reader: + format = "buffer(%p)" + } + fmt.Fprintf(buf, format, v.Interface()) + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/awsutil/string_value.go b/vendor/github.com/aws/aws-sdk-go/aws/awsutil/string_value.go new file mode 100644 index 0000000000..b6432f1a11 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/awsutil/string_value.go @@ -0,0 +1,89 @@ +package awsutil + +import ( + "bytes" + "fmt" + "reflect" + "strings" +) + +// StringValue returns the string representation of a value. +func StringValue(i interface{}) string { + var buf bytes.Buffer + stringValue(reflect.ValueOf(i), 0, &buf) + return buf.String() +} + +func stringValue(v reflect.Value, indent int, buf *bytes.Buffer) { + for v.Kind() == reflect.Ptr { + v = v.Elem() + } + + switch v.Kind() { + case reflect.Struct: + buf.WriteString("{\n") + + names := []string{} + for i := 0; i < v.Type().NumField(); i++ { + name := v.Type().Field(i).Name + f := v.Field(i) + if name[0:1] == strings.ToLower(name[0:1]) { + continue // ignore unexported fields + } + if (f.Kind() == reflect.Ptr || f.Kind() == reflect.Slice) && f.IsNil() { + continue // ignore unset fields + } + names = append(names, name) + } + + for i, n := range names { + val := v.FieldByName(n) + buf.WriteString(strings.Repeat(" ", indent+2)) + buf.WriteString(n + ": ") + stringValue(val, indent+2, buf) + + if i < len(names)-1 { + buf.WriteString(",\n") + } + } + + buf.WriteString("\n" + strings.Repeat(" ", indent) + "}") + case reflect.Slice: + nl, id, id2 := "", "", "" + if v.Len() > 3 { + nl, id, id2 = "\n", strings.Repeat(" ", indent), strings.Repeat(" ", indent+2) + } + buf.WriteString("[" + nl) + for i := 0; i < v.Len(); i++ { + buf.WriteString(id2) + stringValue(v.Index(i), indent+2, buf) + + if i < v.Len()-1 { + buf.WriteString("," + nl) + } + } + + buf.WriteString(nl + id + "]") + case reflect.Map: + buf.WriteString("{\n") + + for i, k := range v.MapKeys() { + buf.WriteString(strings.Repeat(" ", indent+2)) + buf.WriteString(k.String() + ": ") + stringValue(v.MapIndex(k), indent+2, buf) + + if i < v.Len()-1 { + buf.WriteString(",\n") + } + } + + buf.WriteString("\n" + strings.Repeat(" ", indent) + "}") + default: + format := "%v" + switch v.Interface().(type) { + case string: + format = "%q" + } + fmt.Fprintf(buf, format, v.Interface()) + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/client/client.go b/vendor/github.com/aws/aws-sdk-go/aws/client/client.go new file mode 100644 index 0000000000..788fe6e279 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/client/client.go @@ -0,0 +1,90 @@ +package client + +import ( + "fmt" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/client/metadata" + "github.com/aws/aws-sdk-go/aws/request" +) + +// A Config provides configuration to a service client instance. +type Config struct { + Config *aws.Config + Handlers request.Handlers + Endpoint string + SigningRegion string + SigningName string +} + +// ConfigProvider provides a generic way for a service client to receive +// the ClientConfig without circular dependencies. +type ConfigProvider interface { + ClientConfig(serviceName string, cfgs ...*aws.Config) Config +} + +// ConfigNoResolveEndpointProvider same as ConfigProvider except it will not +// resolve the endpoint automatically. The service client's endpoint must be +// provided via the aws.Config.Endpoint field. +type ConfigNoResolveEndpointProvider interface { + ClientConfigNoResolveEndpoint(cfgs ...*aws.Config) Config +} + +// A Client implements the base client request and response handling +// used by all service clients. +type Client struct { + request.Retryer + metadata.ClientInfo + + Config aws.Config + Handlers request.Handlers +} + +// New will return a pointer to a new initialized service client. +func New(cfg aws.Config, info metadata.ClientInfo, handlers request.Handlers, options ...func(*Client)) *Client { + svc := &Client{ + Config: cfg, + ClientInfo: info, + Handlers: handlers.Copy(), + } + + switch retryer, ok := cfg.Retryer.(request.Retryer); { + case ok: + svc.Retryer = retryer + case cfg.Retryer != nil && cfg.Logger != nil: + s := fmt.Sprintf("WARNING: %T does not implement request.Retryer; using DefaultRetryer instead", cfg.Retryer) + cfg.Logger.Log(s) + fallthrough + default: + maxRetries := aws.IntValue(cfg.MaxRetries) + if cfg.MaxRetries == nil || maxRetries == aws.UseServiceDefaultRetries { + maxRetries = 3 + } + svc.Retryer = DefaultRetryer{NumMaxRetries: maxRetries} + } + + svc.AddDebugHandlers() + + for _, option := range options { + option(svc) + } + + return svc +} + +// NewRequest returns a new Request pointer for the service API +// operation and parameters. +func (c *Client) NewRequest(operation *request.Operation, params interface{}, data interface{}) *request.Request { + return request.New(c.Config, c.ClientInfo, c.Handlers, c.Retryer, operation, params, data) +} + +// AddDebugHandlers injects debug logging handlers into the service to log request +// debug information. +func (c *Client) AddDebugHandlers() { + if !c.Config.LogLevel.AtLeast(aws.LogDebug) { + return + } + + c.Handlers.Send.PushFrontNamed(request.NamedHandler{Name: "awssdk.client.LogRequest", Fn: logRequest}) + c.Handlers.Send.PushBackNamed(request.NamedHandler{Name: "awssdk.client.LogResponse", Fn: logResponse}) +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/client/default_retryer.go b/vendor/github.com/aws/aws-sdk-go/aws/client/default_retryer.go new file mode 100644 index 0000000000..1313478f27 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/client/default_retryer.go @@ -0,0 +1,96 @@ +package client + +import ( + "math/rand" + "sync" + "time" + + "github.com/aws/aws-sdk-go/aws/request" +) + +// DefaultRetryer implements basic retry logic using exponential backoff for +// most services. If you want to implement custom retry logic, implement the +// request.Retryer interface or create a structure type that composes this +// struct and override the specific methods. For example, to override only +// the MaxRetries method: +// +// type retryer struct { +// service.DefaultRetryer +// } +// +// // This implementation always has 100 max retries +// func (d retryer) MaxRetries() uint { return 100 } +type DefaultRetryer struct { + NumMaxRetries int +} + +// MaxRetries returns the number of maximum returns the service will use to make +// an individual API request. +func (d DefaultRetryer) MaxRetries() int { + return d.NumMaxRetries +} + +var seededRand = rand.New(&lockedSource{src: rand.NewSource(time.Now().UnixNano())}) + +// RetryRules returns the delay duration before retrying this request again +func (d DefaultRetryer) RetryRules(r *request.Request) time.Duration { + // Set the upper limit of delay in retrying at ~five minutes + minTime := 30 + throttle := d.shouldThrottle(r) + if throttle { + minTime = 500 + } + + retryCount := r.RetryCount + if retryCount > 13 { + retryCount = 13 + } else if throttle && retryCount > 8 { + retryCount = 8 + } + + delay := (1 << uint(retryCount)) * (seededRand.Intn(minTime) + minTime) + return time.Duration(delay) * time.Millisecond +} + +// ShouldRetry returns true if the request should be retried. +func (d DefaultRetryer) ShouldRetry(r *request.Request) bool { + // If one of the other handlers already set the retry state + // we don't want to override it based on the service's state + if r.Retryable != nil { + return *r.Retryable + } + + if r.HTTPResponse.StatusCode >= 500 { + return true + } + return r.IsErrorRetryable() || d.shouldThrottle(r) +} + +// ShouldThrottle returns true if the request should be throttled. +func (d DefaultRetryer) shouldThrottle(r *request.Request) bool { + if r.HTTPResponse.StatusCode == 502 || + r.HTTPResponse.StatusCode == 503 || + r.HTTPResponse.StatusCode == 504 { + return true + } + return r.IsErrorThrottle() +} + +// lockedSource is a thread-safe implementation of rand.Source +type lockedSource struct { + lk sync.Mutex + src rand.Source +} + +func (r *lockedSource) Int63() (n int64) { + r.lk.Lock() + n = r.src.Int63() + r.lk.Unlock() + return +} + +func (r *lockedSource) Seed(seed int64) { + r.lk.Lock() + r.src.Seed(seed) + r.lk.Unlock() +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/client/logger.go b/vendor/github.com/aws/aws-sdk-go/aws/client/logger.go new file mode 100644 index 0000000000..1f39c91f2e --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/client/logger.go @@ -0,0 +1,108 @@ +package client + +import ( + "bytes" + "fmt" + "io" + "io/ioutil" + "net/http/httputil" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/request" +) + +const logReqMsg = `DEBUG: Request %s/%s Details: +---[ REQUEST POST-SIGN ]----------------------------- +%s +-----------------------------------------------------` + +const logReqErrMsg = `DEBUG ERROR: Request %s/%s: +---[ REQUEST DUMP ERROR ]----------------------------- +%s +------------------------------------------------------` + +type logWriter struct { + // Logger is what we will use to log the payload of a response. + Logger aws.Logger + // buf stores the contents of what has been read + buf *bytes.Buffer +} + +func (logger *logWriter) Write(b []byte) (int, error) { + return logger.buf.Write(b) +} + +type teeReaderCloser struct { + // io.Reader will be a tee reader that is used during logging. + // This structure will read from a body and write the contents to a logger. + io.Reader + // Source is used just to close when we are done reading. + Source io.ReadCloser +} + +func (reader *teeReaderCloser) Close() error { + return reader.Source.Close() +} + +func logRequest(r *request.Request) { + logBody := r.Config.LogLevel.Matches(aws.LogDebugWithHTTPBody) + dumpedBody, err := httputil.DumpRequestOut(r.HTTPRequest, logBody) + if err != nil { + r.Config.Logger.Log(fmt.Sprintf(logReqErrMsg, r.ClientInfo.ServiceName, r.Operation.Name, err)) + return + } + + if logBody { + // Reset the request body because dumpRequest will re-wrap the r.HTTPRequest's + // Body as a NoOpCloser and will not be reset after read by the HTTP + // client reader. + r.ResetBody() + } + + r.Config.Logger.Log(fmt.Sprintf(logReqMsg, r.ClientInfo.ServiceName, r.Operation.Name, string(dumpedBody))) +} + +const logRespMsg = `DEBUG: Response %s/%s Details: +---[ RESPONSE ]-------------------------------------- +%s +-----------------------------------------------------` + +const logRespErrMsg = `DEBUG ERROR: Response %s/%s: +---[ RESPONSE DUMP ERROR ]----------------------------- +%s +-----------------------------------------------------` + +func logResponse(r *request.Request) { + lw := &logWriter{r.Config.Logger, bytes.NewBuffer(nil)} + r.HTTPResponse.Body = &teeReaderCloser{ + Reader: io.TeeReader(r.HTTPResponse.Body, lw), + Source: r.HTTPResponse.Body, + } + + handlerFn := func(req *request.Request) { + body, err := httputil.DumpResponse(req.HTTPResponse, false) + if err != nil { + lw.Logger.Log(fmt.Sprintf(logRespErrMsg, req.ClientInfo.ServiceName, req.Operation.Name, err)) + return + } + + b, err := ioutil.ReadAll(lw.buf) + if err != nil { + lw.Logger.Log(fmt.Sprintf(logRespErrMsg, req.ClientInfo.ServiceName, req.Operation.Name, err)) + return + } + lw.Logger.Log(fmt.Sprintf(logRespMsg, req.ClientInfo.ServiceName, req.Operation.Name, string(body))) + if req.Config.LogLevel.Matches(aws.LogDebugWithHTTPBody) { + lw.Logger.Log(string(b)) + } + } + + const handlerName = "awsdk.client.LogResponse.ResponseBody" + + r.Handlers.Unmarshal.SetBackNamed(request.NamedHandler{ + Name: handlerName, Fn: handlerFn, + }) + r.Handlers.UnmarshalError.SetBackNamed(request.NamedHandler{ + Name: handlerName, Fn: handlerFn, + }) +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/client/metadata/client_info.go b/vendor/github.com/aws/aws-sdk-go/aws/client/metadata/client_info.go new file mode 100644 index 0000000000..4778056ddf --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/client/metadata/client_info.go @@ -0,0 +1,12 @@ +package metadata + +// ClientInfo wraps immutable data from the client.Client structure. +type ClientInfo struct { + ServiceName string + APIVersion string + Endpoint string + SigningName string + SigningRegion string + JSONVersion string + TargetPrefix string +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/config.go b/vendor/github.com/aws/aws-sdk-go/aws/config.go new file mode 100644 index 0000000000..d1f31f1c65 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/config.go @@ -0,0 +1,470 @@ +package aws + +import ( + "net/http" + "time" + + "github.com/aws/aws-sdk-go/aws/credentials" + "github.com/aws/aws-sdk-go/aws/endpoints" +) + +// UseServiceDefaultRetries instructs the config to use the service's own +// default number of retries. This will be the default action if +// Config.MaxRetries is nil also. +const UseServiceDefaultRetries = -1 + +// RequestRetryer is an alias for a type that implements the request.Retryer +// interface. +type RequestRetryer interface{} + +// A Config provides service configuration for service clients. By default, +// all clients will use the defaults.DefaultConfig tructure. +// +// // Create Session with MaxRetry configuration to be shared by multiple +// // service clients. +// sess := session.Must(session.NewSession(&aws.Config{ +// MaxRetries: aws.Int(3), +// })) +// +// // Create S3 service client with a specific Region. +// svc := s3.New(sess, &aws.Config{ +// Region: aws.String("us-west-2"), +// }) +type Config struct { + // Enables verbose error printing of all credential chain errors. + // Should be used when wanting to see all errors while attempting to + // retrieve credentials. + CredentialsChainVerboseErrors *bool + + // The credentials object to use when signing requests. Defaults to a + // chain of credential providers to search for credentials in environment + // variables, shared credential file, and EC2 Instance Roles. + Credentials *credentials.Credentials + + // An optional endpoint URL (hostname only or fully qualified URI) + // that overrides the default generated endpoint for a client. Set this + // to `""` to use the default generated endpoint. + // + // @note You must still provide a `Region` value when specifying an + // endpoint for a client. + Endpoint *string + + // The resolver to use for looking up endpoints for AWS service clients + // to use based on region. + EndpointResolver endpoints.Resolver + + // EnforceShouldRetryCheck is used in the AfterRetryHandler to always call + // ShouldRetry regardless of whether or not if request.Retryable is set. + // This will utilize ShouldRetry method of custom retryers. If EnforceShouldRetryCheck + // is not set, then ShouldRetry will only be called if request.Retryable is nil. + // Proper handling of the request.Retryable field is important when setting this field. + EnforceShouldRetryCheck *bool + + // The region to send requests to. This parameter is required and must + // be configured globally or on a per-client basis unless otherwise + // noted. A full list of regions is found in the "Regions and Endpoints" + // document. + // + // @see http://docs.aws.amazon.com/general/latest/gr/rande.html + // AWS Regions and Endpoints + Region *string + + // Set this to `true` to disable SSL when sending requests. Defaults + // to `false`. + DisableSSL *bool + + // The HTTP client to use when sending requests. Defaults to + // `http.DefaultClient`. + HTTPClient *http.Client + + // An integer value representing the logging level. The default log level + // is zero (LogOff), which represents no logging. To enable logging set + // to a LogLevel Value. + LogLevel *LogLevelType + + // The logger writer interface to write logging messages to. Defaults to + // standard out. + Logger Logger + + // The maximum number of times that a request will be retried for failures. + // Defaults to -1, which defers the max retry setting to the service + // specific configuration. + MaxRetries *int + + // Retryer guides how HTTP requests should be retried in case of + // recoverable failures. + // + // When nil or the value does not implement the request.Retryer interface, + // the request.DefaultRetryer will be used. + // + // When both Retryer and MaxRetries are non-nil, the former is used and + // the latter ignored. + // + // To set the Retryer field in a type-safe manner and with chaining, use + // the request.WithRetryer helper function: + // + // cfg := request.WithRetryer(aws.NewConfig(), myRetryer) + // + Retryer RequestRetryer + + // Disables semantic parameter validation, which validates input for + // missing required fields and/or other semantic request input errors. + DisableParamValidation *bool + + // Disables the computation of request and response checksums, e.g., + // CRC32 checksums in Amazon DynamoDB. + DisableComputeChecksums *bool + + // Set this to `true` to force the request to use path-style addressing, + // i.e., `http://s3.amazonaws.com/BUCKET/KEY`. By default, the S3 client + // will use virtual hosted bucket addressing when possible + // (`http://BUCKET.s3.amazonaws.com/KEY`). + // + // @note This configuration option is specific to the Amazon S3 service. + // @see http://docs.aws.amazon.com/AmazonS3/latest/dev/VirtualHosting.html + // Amazon S3: Virtual Hosting of Buckets + S3ForcePathStyle *bool + + // Set this to `true` to disable the SDK adding the `Expect: 100-Continue` + // header to PUT requests over 2MB of content. 100-Continue instructs the + // HTTP client not to send the body until the service responds with a + // `continue` status. This is useful to prevent sending the request body + // until after the request is authenticated, and validated. + // + // http://docs.aws.amazon.com/AmazonS3/latest/API/RESTObjectPUT.html + // + // 100-Continue is only enabled for Go 1.6 and above. See `http.Transport`'s + // `ExpectContinueTimeout` for information on adjusting the continue wait + // timeout. https://golang.org/pkg/net/http/#Transport + // + // You should use this flag to disble 100-Continue if you experience issues + // with proxies or third party S3 compatible services. + S3Disable100Continue *bool + + // Set this to `true` to enable S3 Accelerate feature. For all operations + // compatible with S3 Accelerate will use the accelerate endpoint for + // requests. Requests not compatible will fall back to normal S3 requests. + // + // The bucket must be enable for accelerate to be used with S3 client with + // accelerate enabled. If the bucket is not enabled for accelerate an error + // will be returned. The bucket name must be DNS compatible to also work + // with accelerate. + S3UseAccelerate *bool + + // Set this to `true` to disable the EC2Metadata client from overriding the + // default http.Client's Timeout. This is helpful if you do not want the + // EC2Metadata client to create a new http.Client. This options is only + // meaningful if you're not already using a custom HTTP client with the + // SDK. Enabled by default. + // + // Must be set and provided to the session.NewSession() in order to disable + // the EC2Metadata overriding the timeout for default credentials chain. + // + // Example: + // sess := session.Must(session.NewSession(aws.NewConfig() + // .WithEC2MetadataDiableTimeoutOverride(true))) + // + // svc := s3.New(sess) + // + EC2MetadataDisableTimeoutOverride *bool + + // Instructs the endpiont to be generated for a service client to + // be the dual stack endpoint. The dual stack endpoint will support + // both IPv4 and IPv6 addressing. + // + // Setting this for a service which does not support dual stack will fail + // to make requets. It is not recommended to set this value on the session + // as it will apply to all service clients created with the session. Even + // services which don't support dual stack endpoints. + // + // If the Endpoint config value is also provided the UseDualStack flag + // will be ignored. + // + // Only supported with. + // + // sess := session.Must(session.NewSession()) + // + // svc := s3.New(sess, &aws.Config{ + // UseDualStack: aws.Bool(true), + // }) + UseDualStack *bool + + // SleepDelay is an override for the func the SDK will call when sleeping + // during the lifecycle of a request. Specifically this will be used for + // request delays. This value should only be used for testing. To adjust + // the delay of a request see the aws/client.DefaultRetryer and + // aws/request.Retryer. + // + // SleepDelay will prevent any Context from being used for canceling retry + // delay of an API operation. It is recommended to not use SleepDelay at all + // and specify a Retryer instead. + SleepDelay func(time.Duration) + + // DisableRestProtocolURICleaning will not clean the URL path when making rest protocol requests. + // Will default to false. This would only be used for empty directory names in s3 requests. + // + // Example: + // sess := session.Must(session.NewSession(&aws.Config{ + // DisableRestProtocolURICleaning: aws.Bool(true), + // })) + // + // svc := s3.New(sess) + // out, err := svc.GetObject(&s3.GetObjectInput { + // Bucket: aws.String("bucketname"), + // Key: aws.String("//foo//bar//moo"), + // }) + DisableRestProtocolURICleaning *bool +} + +// NewConfig returns a new Config pointer that can be chained with builder +// methods to set multiple configuration values inline without using pointers. +// +// // Create Session with MaxRetry configuration to be shared by multiple +// // service clients. +// sess := session.Must(session.NewSession(aws.NewConfig(). +// WithMaxRetries(3), +// )) +// +// // Create S3 service client with a specific Region. +// svc := s3.New(sess, aws.NewConfig(). +// WithRegion("us-west-2"), +// ) +func NewConfig() *Config { + return &Config{} +} + +// WithCredentialsChainVerboseErrors sets a config verbose errors boolean and returning +// a Config pointer. +func (c *Config) WithCredentialsChainVerboseErrors(verboseErrs bool) *Config { + c.CredentialsChainVerboseErrors = &verboseErrs + return c +} + +// WithCredentials sets a config Credentials value returning a Config pointer +// for chaining. +func (c *Config) WithCredentials(creds *credentials.Credentials) *Config { + c.Credentials = creds + return c +} + +// WithEndpoint sets a config Endpoint value returning a Config pointer for +// chaining. +func (c *Config) WithEndpoint(endpoint string) *Config { + c.Endpoint = &endpoint + return c +} + +// WithEndpointResolver sets a config EndpointResolver value returning a +// Config pointer for chaining. +func (c *Config) WithEndpointResolver(resolver endpoints.Resolver) *Config { + c.EndpointResolver = resolver + return c +} + +// WithRegion sets a config Region value returning a Config pointer for +// chaining. +func (c *Config) WithRegion(region string) *Config { + c.Region = ®ion + return c +} + +// WithDisableSSL sets a config DisableSSL value returning a Config pointer +// for chaining. +func (c *Config) WithDisableSSL(disable bool) *Config { + c.DisableSSL = &disable + return c +} + +// WithHTTPClient sets a config HTTPClient value returning a Config pointer +// for chaining. +func (c *Config) WithHTTPClient(client *http.Client) *Config { + c.HTTPClient = client + return c +} + +// WithMaxRetries sets a config MaxRetries value returning a Config pointer +// for chaining. +func (c *Config) WithMaxRetries(max int) *Config { + c.MaxRetries = &max + return c +} + +// WithDisableParamValidation sets a config DisableParamValidation value +// returning a Config pointer for chaining. +func (c *Config) WithDisableParamValidation(disable bool) *Config { + c.DisableParamValidation = &disable + return c +} + +// WithDisableComputeChecksums sets a config DisableComputeChecksums value +// returning a Config pointer for chaining. +func (c *Config) WithDisableComputeChecksums(disable bool) *Config { + c.DisableComputeChecksums = &disable + return c +} + +// WithLogLevel sets a config LogLevel value returning a Config pointer for +// chaining. +func (c *Config) WithLogLevel(level LogLevelType) *Config { + c.LogLevel = &level + return c +} + +// WithLogger sets a config Logger value returning a Config pointer for +// chaining. +func (c *Config) WithLogger(logger Logger) *Config { + c.Logger = logger + return c +} + +// WithS3ForcePathStyle sets a config S3ForcePathStyle value returning a Config +// pointer for chaining. +func (c *Config) WithS3ForcePathStyle(force bool) *Config { + c.S3ForcePathStyle = &force + return c +} + +// WithS3Disable100Continue sets a config S3Disable100Continue value returning +// a Config pointer for chaining. +func (c *Config) WithS3Disable100Continue(disable bool) *Config { + c.S3Disable100Continue = &disable + return c +} + +// WithS3UseAccelerate sets a config S3UseAccelerate value returning a Config +// pointer for chaining. +func (c *Config) WithS3UseAccelerate(enable bool) *Config { + c.S3UseAccelerate = &enable + return c +} + +// WithUseDualStack sets a config UseDualStack value returning a Config +// pointer for chaining. +func (c *Config) WithUseDualStack(enable bool) *Config { + c.UseDualStack = &enable + return c +} + +// WithEC2MetadataDisableTimeoutOverride sets a config EC2MetadataDisableTimeoutOverride value +// returning a Config pointer for chaining. +func (c *Config) WithEC2MetadataDisableTimeoutOverride(enable bool) *Config { + c.EC2MetadataDisableTimeoutOverride = &enable + return c +} + +// WithSleepDelay overrides the function used to sleep while waiting for the +// next retry. Defaults to time.Sleep. +func (c *Config) WithSleepDelay(fn func(time.Duration)) *Config { + c.SleepDelay = fn + return c +} + +// MergeIn merges the passed in configs into the existing config object. +func (c *Config) MergeIn(cfgs ...*Config) { + for _, other := range cfgs { + mergeInConfig(c, other) + } +} + +func mergeInConfig(dst *Config, other *Config) { + if other == nil { + return + } + + if other.CredentialsChainVerboseErrors != nil { + dst.CredentialsChainVerboseErrors = other.CredentialsChainVerboseErrors + } + + if other.Credentials != nil { + dst.Credentials = other.Credentials + } + + if other.Endpoint != nil { + dst.Endpoint = other.Endpoint + } + + if other.EndpointResolver != nil { + dst.EndpointResolver = other.EndpointResolver + } + + if other.Region != nil { + dst.Region = other.Region + } + + if other.DisableSSL != nil { + dst.DisableSSL = other.DisableSSL + } + + if other.HTTPClient != nil { + dst.HTTPClient = other.HTTPClient + } + + if other.LogLevel != nil { + dst.LogLevel = other.LogLevel + } + + if other.Logger != nil { + dst.Logger = other.Logger + } + + if other.MaxRetries != nil { + dst.MaxRetries = other.MaxRetries + } + + if other.Retryer != nil { + dst.Retryer = other.Retryer + } + + if other.DisableParamValidation != nil { + dst.DisableParamValidation = other.DisableParamValidation + } + + if other.DisableComputeChecksums != nil { + dst.DisableComputeChecksums = other.DisableComputeChecksums + } + + if other.S3ForcePathStyle != nil { + dst.S3ForcePathStyle = other.S3ForcePathStyle + } + + if other.S3Disable100Continue != nil { + dst.S3Disable100Continue = other.S3Disable100Continue + } + + if other.S3UseAccelerate != nil { + dst.S3UseAccelerate = other.S3UseAccelerate + } + + if other.UseDualStack != nil { + dst.UseDualStack = other.UseDualStack + } + + if other.EC2MetadataDisableTimeoutOverride != nil { + dst.EC2MetadataDisableTimeoutOverride = other.EC2MetadataDisableTimeoutOverride + } + + if other.SleepDelay != nil { + dst.SleepDelay = other.SleepDelay + } + + if other.DisableRestProtocolURICleaning != nil { + dst.DisableRestProtocolURICleaning = other.DisableRestProtocolURICleaning + } + + if other.EnforceShouldRetryCheck != nil { + dst.EnforceShouldRetryCheck = other.EnforceShouldRetryCheck + } +} + +// Copy will return a shallow copy of the Config object. If any additional +// configurations are provided they will be merged into the new config returned. +func (c *Config) Copy(cfgs ...*Config) *Config { + dst := &Config{} + dst.MergeIn(c) + + for _, cfg := range cfgs { + dst.MergeIn(cfg) + } + + return dst +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/context.go b/vendor/github.com/aws/aws-sdk-go/aws/context.go new file mode 100644 index 0000000000..79f426853b --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/context.go @@ -0,0 +1,71 @@ +package aws + +import ( + "time" +) + +// Context is an copy of the Go v1.7 stdlib's context.Context interface. +// It is represented as a SDK interface to enable you to use the "WithContext" +// API methods with Go v1.6 and a Context type such as golang.org/x/net/context. +// +// See https://golang.org/pkg/context on how to use contexts. +type Context interface { + // Deadline returns the time when work done on behalf of this context + // should be canceled. Deadline returns ok==false when no deadline is + // set. Successive calls to Deadline return the same results. + Deadline() (deadline time.Time, ok bool) + + // Done returns a channel that's closed when work done on behalf of this + // context should be canceled. Done may return nil if this context can + // never be canceled. Successive calls to Done return the same value. + Done() <-chan struct{} + + // Err returns a non-nil error value after Done is closed. Err returns + // Canceled if the context was canceled or DeadlineExceeded if the + // context's deadline passed. No other values for Err are defined. + // After Done is closed, successive calls to Err return the same value. + Err() error + + // Value returns the value associated with this context for key, or nil + // if no value is associated with key. Successive calls to Value with + // the same key returns the same result. + // + // Use context values only for request-scoped data that transits + // processes and API boundaries, not for passing optional parameters to + // functions. + Value(key interface{}) interface{} +} + +// BackgroundContext returns a context that will never be canceled, has no +// values, and no deadline. This context is used by the SDK to provide +// backwards compatibility with non-context API operations and functionality. +// +// Go 1.6 and before: +// This context function is equivalent to context.Background in the Go stdlib. +// +// Go 1.7 and later: +// The context returned will be the value returned by context.Background() +// +// See https://golang.org/pkg/context for more information on Contexts. +func BackgroundContext() Context { + return backgroundCtx +} + +// SleepWithContext will wait for the timer duration to expire, or the context +// is canceled. Which ever happens first. If the context is canceled the Context's +// error will be returned. +// +// Expects Context to always return a non-nil error if the Done channel is closed. +func SleepWithContext(ctx Context, dur time.Duration) error { + t := time.NewTimer(dur) + defer t.Stop() + + select { + case <-t.C: + break + case <-ctx.Done(): + return ctx.Err() + } + + return nil +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/context_1_6.go b/vendor/github.com/aws/aws-sdk-go/aws/context_1_6.go new file mode 100644 index 0000000000..e8cf93d269 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/context_1_6.go @@ -0,0 +1,41 @@ +// +build !go1.7 + +package aws + +import "time" + +// An emptyCtx is a copy of the the Go 1.7 context.emptyCtx type. This +// is copied to provide a 1.6 and 1.5 safe version of context that is compatible +// with Go 1.7's Context. +// +// An emptyCtx is never canceled, has no values, and has no deadline. It is not +// struct{}, since vars of this type must have distinct addresses. +type emptyCtx int + +func (*emptyCtx) Deadline() (deadline time.Time, ok bool) { + return +} + +func (*emptyCtx) Done() <-chan struct{} { + return nil +} + +func (*emptyCtx) Err() error { + return nil +} + +func (*emptyCtx) Value(key interface{}) interface{} { + return nil +} + +func (e *emptyCtx) String() string { + switch e { + case backgroundCtx: + return "aws.BackgroundContext" + } + return "unknown empty Context" +} + +var ( + backgroundCtx = new(emptyCtx) +) diff --git a/vendor/github.com/aws/aws-sdk-go/aws/context_1_7.go b/vendor/github.com/aws/aws-sdk-go/aws/context_1_7.go new file mode 100644 index 0000000000..064f75c925 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/context_1_7.go @@ -0,0 +1,9 @@ +// +build go1.7 + +package aws + +import "context" + +var ( + backgroundCtx = context.Background() +) diff --git a/vendor/github.com/aws/aws-sdk-go/aws/convert_types.go b/vendor/github.com/aws/aws-sdk-go/aws/convert_types.go new file mode 100644 index 0000000000..3b73a7da7f --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/convert_types.go @@ -0,0 +1,369 @@ +package aws + +import "time" + +// String returns a pointer to the string value passed in. +func String(v string) *string { + return &v +} + +// StringValue returns the value of the string pointer passed in or +// "" if the pointer is nil. +func StringValue(v *string) string { + if v != nil { + return *v + } + return "" +} + +// StringSlice converts a slice of string values into a slice of +// string pointers +func StringSlice(src []string) []*string { + dst := make([]*string, len(src)) + for i := 0; i < len(src); i++ { + dst[i] = &(src[i]) + } + return dst +} + +// StringValueSlice converts a slice of string pointers into a slice of +// string values +func StringValueSlice(src []*string) []string { + dst := make([]string, len(src)) + for i := 0; i < len(src); i++ { + if src[i] != nil { + dst[i] = *(src[i]) + } + } + return dst +} + +// StringMap converts a string map of string values into a string +// map of string pointers +func StringMap(src map[string]string) map[string]*string { + dst := make(map[string]*string) + for k, val := range src { + v := val + dst[k] = &v + } + return dst +} + +// StringValueMap converts a string map of string pointers into a string +// map of string values +func StringValueMap(src map[string]*string) map[string]string { + dst := make(map[string]string) + for k, val := range src { + if val != nil { + dst[k] = *val + } + } + return dst +} + +// Bool returns a pointer to the bool value passed in. +func Bool(v bool) *bool { + return &v +} + +// BoolValue returns the value of the bool pointer passed in or +// false if the pointer is nil. +func BoolValue(v *bool) bool { + if v != nil { + return *v + } + return false +} + +// BoolSlice converts a slice of bool values into a slice of +// bool pointers +func BoolSlice(src []bool) []*bool { + dst := make([]*bool, len(src)) + for i := 0; i < len(src); i++ { + dst[i] = &(src[i]) + } + return dst +} + +// BoolValueSlice converts a slice of bool pointers into a slice of +// bool values +func BoolValueSlice(src []*bool) []bool { + dst := make([]bool, len(src)) + for i := 0; i < len(src); i++ { + if src[i] != nil { + dst[i] = *(src[i]) + } + } + return dst +} + +// BoolMap converts a string map of bool values into a string +// map of bool pointers +func BoolMap(src map[string]bool) map[string]*bool { + dst := make(map[string]*bool) + for k, val := range src { + v := val + dst[k] = &v + } + return dst +} + +// BoolValueMap converts a string map of bool pointers into a string +// map of bool values +func BoolValueMap(src map[string]*bool) map[string]bool { + dst := make(map[string]bool) + for k, val := range src { + if val != nil { + dst[k] = *val + } + } + return dst +} + +// Int returns a pointer to the int value passed in. +func Int(v int) *int { + return &v +} + +// IntValue returns the value of the int pointer passed in or +// 0 if the pointer is nil. +func IntValue(v *int) int { + if v != nil { + return *v + } + return 0 +} + +// IntSlice converts a slice of int values into a slice of +// int pointers +func IntSlice(src []int) []*int { + dst := make([]*int, len(src)) + for i := 0; i < len(src); i++ { + dst[i] = &(src[i]) + } + return dst +} + +// IntValueSlice converts a slice of int pointers into a slice of +// int values +func IntValueSlice(src []*int) []int { + dst := make([]int, len(src)) + for i := 0; i < len(src); i++ { + if src[i] != nil { + dst[i] = *(src[i]) + } + } + return dst +} + +// IntMap converts a string map of int values into a string +// map of int pointers +func IntMap(src map[string]int) map[string]*int { + dst := make(map[string]*int) + for k, val := range src { + v := val + dst[k] = &v + } + return dst +} + +// IntValueMap converts a string map of int pointers into a string +// map of int values +func IntValueMap(src map[string]*int) map[string]int { + dst := make(map[string]int) + for k, val := range src { + if val != nil { + dst[k] = *val + } + } + return dst +} + +// Int64 returns a pointer to the int64 value passed in. +func Int64(v int64) *int64 { + return &v +} + +// Int64Value returns the value of the int64 pointer passed in or +// 0 if the pointer is nil. +func Int64Value(v *int64) int64 { + if v != nil { + return *v + } + return 0 +} + +// Int64Slice converts a slice of int64 values into a slice of +// int64 pointers +func Int64Slice(src []int64) []*int64 { + dst := make([]*int64, len(src)) + for i := 0; i < len(src); i++ { + dst[i] = &(src[i]) + } + return dst +} + +// Int64ValueSlice converts a slice of int64 pointers into a slice of +// int64 values +func Int64ValueSlice(src []*int64) []int64 { + dst := make([]int64, len(src)) + for i := 0; i < len(src); i++ { + if src[i] != nil { + dst[i] = *(src[i]) + } + } + return dst +} + +// Int64Map converts a string map of int64 values into a string +// map of int64 pointers +func Int64Map(src map[string]int64) map[string]*int64 { + dst := make(map[string]*int64) + for k, val := range src { + v := val + dst[k] = &v + } + return dst +} + +// Int64ValueMap converts a string map of int64 pointers into a string +// map of int64 values +func Int64ValueMap(src map[string]*int64) map[string]int64 { + dst := make(map[string]int64) + for k, val := range src { + if val != nil { + dst[k] = *val + } + } + return dst +} + +// Float64 returns a pointer to the float64 value passed in. +func Float64(v float64) *float64 { + return &v +} + +// Float64Value returns the value of the float64 pointer passed in or +// 0 if the pointer is nil. +func Float64Value(v *float64) float64 { + if v != nil { + return *v + } + return 0 +} + +// Float64Slice converts a slice of float64 values into a slice of +// float64 pointers +func Float64Slice(src []float64) []*float64 { + dst := make([]*float64, len(src)) + for i := 0; i < len(src); i++ { + dst[i] = &(src[i]) + } + return dst +} + +// Float64ValueSlice converts a slice of float64 pointers into a slice of +// float64 values +func Float64ValueSlice(src []*float64) []float64 { + dst := make([]float64, len(src)) + for i := 0; i < len(src); i++ { + if src[i] != nil { + dst[i] = *(src[i]) + } + } + return dst +} + +// Float64Map converts a string map of float64 values into a string +// map of float64 pointers +func Float64Map(src map[string]float64) map[string]*float64 { + dst := make(map[string]*float64) + for k, val := range src { + v := val + dst[k] = &v + } + return dst +} + +// Float64ValueMap converts a string map of float64 pointers into a string +// map of float64 values +func Float64ValueMap(src map[string]*float64) map[string]float64 { + dst := make(map[string]float64) + for k, val := range src { + if val != nil { + dst[k] = *val + } + } + return dst +} + +// Time returns a pointer to the time.Time value passed in. +func Time(v time.Time) *time.Time { + return &v +} + +// TimeValue returns the value of the time.Time pointer passed in or +// time.Time{} if the pointer is nil. +func TimeValue(v *time.Time) time.Time { + if v != nil { + return *v + } + return time.Time{} +} + +// TimeUnixMilli returns a Unix timestamp in milliseconds from "January 1, 1970 UTC". +// The result is undefined if the Unix time cannot be represented by an int64. +// Which includes calling TimeUnixMilli on a zero Time is undefined. +// +// This utility is useful for service API's such as CloudWatch Logs which require +// their unix time values to be in milliseconds. +// +// See Go stdlib https://golang.org/pkg/time/#Time.UnixNano for more information. +func TimeUnixMilli(t time.Time) int64 { + return t.UnixNano() / int64(time.Millisecond/time.Nanosecond) +} + +// TimeSlice converts a slice of time.Time values into a slice of +// time.Time pointers +func TimeSlice(src []time.Time) []*time.Time { + dst := make([]*time.Time, len(src)) + for i := 0; i < len(src); i++ { + dst[i] = &(src[i]) + } + return dst +} + +// TimeValueSlice converts a slice of time.Time pointers into a slice of +// time.Time values +func TimeValueSlice(src []*time.Time) []time.Time { + dst := make([]time.Time, len(src)) + for i := 0; i < len(src); i++ { + if src[i] != nil { + dst[i] = *(src[i]) + } + } + return dst +} + +// TimeMap converts a string map of time.Time values into a string +// map of time.Time pointers +func TimeMap(src map[string]time.Time) map[string]*time.Time { + dst := make(map[string]*time.Time) + for k, val := range src { + v := val + dst[k] = &v + } + return dst +} + +// TimeValueMap converts a string map of time.Time pointers into a string +// map of time.Time values +func TimeValueMap(src map[string]*time.Time) map[string]time.Time { + dst := make(map[string]time.Time) + for k, val := range src { + if val != nil { + dst[k] = *val + } + } + return dst +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/corehandlers/handlers.go b/vendor/github.com/aws/aws-sdk-go/aws/corehandlers/handlers.go new file mode 100644 index 0000000000..495e3ef62c --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/corehandlers/handlers.go @@ -0,0 +1,242 @@ +package corehandlers + +import ( + "bytes" + "fmt" + "io" + "io/ioutil" + "net/http" + "net/url" + "regexp" + "runtime" + "strconv" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/credentials" + "github.com/aws/aws-sdk-go/aws/request" +) + +// Interface for matching types which also have a Len method. +type lener interface { + Len() int +} + +// BuildContentLengthHandler builds the content length of a request based on the body, +// or will use the HTTPRequest.Header's "Content-Length" if defined. If unable +// to determine request body length and no "Content-Length" was specified it will panic. +// +// The Content-Length will only be added to the request if the length of the body +// is greater than 0. If the body is empty or the current `Content-Length` +// header is <= 0, the header will also be stripped. +var BuildContentLengthHandler = request.NamedHandler{Name: "core.BuildContentLengthHandler", Fn: func(r *request.Request) { + var length int64 + + if slength := r.HTTPRequest.Header.Get("Content-Length"); slength != "" { + length, _ = strconv.ParseInt(slength, 10, 64) + } else { + switch body := r.Body.(type) { + case nil: + length = 0 + case lener: + length = int64(body.Len()) + case io.Seeker: + r.BodyStart, _ = body.Seek(0, 1) + end, _ := body.Seek(0, 2) + body.Seek(r.BodyStart, 0) // make sure to seek back to original location + length = end - r.BodyStart + default: + panic("Cannot get length of body, must provide `ContentLength`") + } + } + + if length > 0 { + r.HTTPRequest.ContentLength = length + r.HTTPRequest.Header.Set("Content-Length", fmt.Sprintf("%d", length)) + } else { + r.HTTPRequest.ContentLength = 0 + r.HTTPRequest.Header.Del("Content-Length") + } +}} + +// SDKVersionUserAgentHandler is a request handler for adding the SDK Version to the user agent. +var SDKVersionUserAgentHandler = request.NamedHandler{ + Name: "core.SDKVersionUserAgentHandler", + Fn: request.MakeAddToUserAgentHandler(aws.SDKName, aws.SDKVersion, + runtime.Version(), runtime.GOOS, runtime.GOARCH), +} + +var reStatusCode = regexp.MustCompile(`^(\d{3})`) + +// ValidateReqSigHandler is a request handler to ensure that the request's +// signature doesn't expire before it is sent. This can happen when a request +// is built and signed significantly before it is sent. Or significant delays +// occur when retrying requests that would cause the signature to expire. +var ValidateReqSigHandler = request.NamedHandler{ + Name: "core.ValidateReqSigHandler", + Fn: func(r *request.Request) { + // Unsigned requests are not signed + if r.Config.Credentials == credentials.AnonymousCredentials { + return + } + + signedTime := r.Time + if !r.LastSignedAt.IsZero() { + signedTime = r.LastSignedAt + } + + // 10 minutes to allow for some clock skew/delays in transmission. + // Would be improved with aws/aws-sdk-go#423 + if signedTime.Add(10 * time.Minute).After(time.Now()) { + return + } + + fmt.Println("request expired, resigning") + r.Sign() + }, +} + +// SendHandler is a request handler to send service request using HTTP client. +var SendHandler = request.NamedHandler{ + Name: "core.SendHandler", + Fn: func(r *request.Request) { + sender := sendFollowRedirects + if r.DisableFollowRedirects { + sender = sendWithoutFollowRedirects + } + + if request.NoBody == r.HTTPRequest.Body { + // Strip off the request body if the NoBody reader was used as a + // place holder for a request body. This prevents the SDK from + // making requests with a request body when it would be invalid + // to do so. + // + // Use a shallow copy of the http.Request to ensure the race condition + // of transport on Body will not trigger + reqOrig, reqCopy := r.HTTPRequest, *r.HTTPRequest + reqCopy.Body = nil + r.HTTPRequest = &reqCopy + defer func() { + r.HTTPRequest = reqOrig + }() + } + + var err error + r.HTTPResponse, err = sender(r) + if err != nil { + handleSendError(r, err) + } + }, +} + +func sendFollowRedirects(r *request.Request) (*http.Response, error) { + return r.Config.HTTPClient.Do(r.HTTPRequest) +} + +func sendWithoutFollowRedirects(r *request.Request) (*http.Response, error) { + transport := r.Config.HTTPClient.Transport + if transport == nil { + transport = http.DefaultTransport + } + + return transport.RoundTrip(r.HTTPRequest) +} + +func handleSendError(r *request.Request, err error) { + // Prevent leaking if an HTTPResponse was returned. Clean up + // the body. + if r.HTTPResponse != nil { + r.HTTPResponse.Body.Close() + } + // Capture the case where url.Error is returned for error processing + // response. e.g. 301 without location header comes back as string + // error and r.HTTPResponse is nil. Other URL redirect errors will + // comeback in a similar method. + if e, ok := err.(*url.Error); ok && e.Err != nil { + if s := reStatusCode.FindStringSubmatch(e.Err.Error()); s != nil { + code, _ := strconv.ParseInt(s[1], 10, 64) + r.HTTPResponse = &http.Response{ + StatusCode: int(code), + Status: http.StatusText(int(code)), + Body: ioutil.NopCloser(bytes.NewReader([]byte{})), + } + return + } + } + if r.HTTPResponse == nil { + // Add a dummy request response object to ensure the HTTPResponse + // value is consistent. + r.HTTPResponse = &http.Response{ + StatusCode: int(0), + Status: http.StatusText(int(0)), + Body: ioutil.NopCloser(bytes.NewReader([]byte{})), + } + } + // Catch all other request errors. + r.Error = awserr.New("RequestError", "send request failed", err) + r.Retryable = aws.Bool(true) // network errors are retryable + + // Override the error with a context canceled error, if that was canceled. + ctx := r.Context() + select { + case <-ctx.Done(): + r.Error = awserr.New(request.CanceledErrorCode, + "request context canceled", ctx.Err()) + r.Retryable = aws.Bool(false) + default: + } +} + +// ValidateResponseHandler is a request handler to validate service response. +var ValidateResponseHandler = request.NamedHandler{Name: "core.ValidateResponseHandler", Fn: func(r *request.Request) { + if r.HTTPResponse.StatusCode == 0 || r.HTTPResponse.StatusCode >= 300 { + // this may be replaced by an UnmarshalError handler + r.Error = awserr.New("UnknownError", "unknown error", nil) + } +}} + +// AfterRetryHandler performs final checks to determine if the request should +// be retried and how long to delay. +var AfterRetryHandler = request.NamedHandler{Name: "core.AfterRetryHandler", Fn: func(r *request.Request) { + // If one of the other handlers already set the retry state + // we don't want to override it based on the service's state + if r.Retryable == nil || aws.BoolValue(r.Config.EnforceShouldRetryCheck) { + r.Retryable = aws.Bool(r.ShouldRetry(r)) + } + + if r.WillRetry() { + r.RetryDelay = r.RetryRules(r) + + if sleepFn := r.Config.SleepDelay; sleepFn != nil { + // Support SleepDelay for backwards compatibility and testing + sleepFn(r.RetryDelay) + } else if err := aws.SleepWithContext(r.Context(), r.RetryDelay); err != nil { + r.Error = awserr.New(request.CanceledErrorCode, + "request context canceled", err) + r.Retryable = aws.Bool(false) + return + } + + // when the expired token exception occurs the credentials + // need to be expired locally so that the next request to + // get credentials will trigger a credentials refresh. + if r.IsErrorExpired() { + r.Config.Credentials.Expire() + } + + r.RetryCount++ + r.Error = nil + } +}} + +// ValidateEndpointHandler is a request handler to validate a request had the +// appropriate Region and Endpoint set. Will set r.Error if the endpoint or +// region is not valid. +var ValidateEndpointHandler = request.NamedHandler{Name: "core.ValidateEndpointHandler", Fn: func(r *request.Request) { + if r.ClientInfo.SigningRegion == "" && aws.StringValue(r.Config.Region) == "" { + r.Error = aws.ErrMissingRegion + } else if r.ClientInfo.Endpoint == "" { + r.Error = aws.ErrMissingEndpoint + } +}} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/corehandlers/param_validator.go b/vendor/github.com/aws/aws-sdk-go/aws/corehandlers/param_validator.go new file mode 100644 index 0000000000..7d50b1557c --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/corehandlers/param_validator.go @@ -0,0 +1,17 @@ +package corehandlers + +import "github.com/aws/aws-sdk-go/aws/request" + +// ValidateParametersHandler is a request handler to validate the input parameters. +// Validating parameters only has meaning if done prior to the request being sent. +var ValidateParametersHandler = request.NamedHandler{Name: "core.ValidateParametersHandler", Fn: func(r *request.Request) { + if !r.ParamsFilled() { + return + } + + if v, ok := r.Params.(request.Validator); ok { + if err := v.Validate(); err != nil { + r.Error = err + } + } +}} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/chain_provider.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/chain_provider.go new file mode 100644 index 0000000000..f298d65962 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/credentials/chain_provider.go @@ -0,0 +1,102 @@ +package credentials + +import ( + "github.com/aws/aws-sdk-go/aws/awserr" +) + +var ( + // ErrNoValidProvidersFoundInChain Is returned when there are no valid + // providers in the ChainProvider. + // + // This has been deprecated. For verbose error messaging set + // aws.Config.CredentialsChainVerboseErrors to true + // + // @readonly + ErrNoValidProvidersFoundInChain = awserr.New("NoCredentialProviders", + `no valid providers in chain. Deprecated. + For verbose messaging see aws.Config.CredentialsChainVerboseErrors`, + nil) +) + +// A ChainProvider will search for a provider which returns credentials +// and cache that provider until Retrieve is called again. +// +// The ChainProvider provides a way of chaining multiple providers together +// which will pick the first available using priority order of the Providers +// in the list. +// +// If none of the Providers retrieve valid credentials Value, ChainProvider's +// Retrieve() will return the error ErrNoValidProvidersFoundInChain. +// +// If a Provider is found which returns valid credentials Value ChainProvider +// will cache that Provider for all calls to IsExpired(), until Retrieve is +// called again. +// +// Example of ChainProvider to be used with an EnvProvider and EC2RoleProvider. +// In this example EnvProvider will first check if any credentials are available +// via the environment variables. If there are none ChainProvider will check +// the next Provider in the list, EC2RoleProvider in this case. If EC2RoleProvider +// does not return any credentials ChainProvider will return the error +// ErrNoValidProvidersFoundInChain +// +// creds := credentials.NewChainCredentials( +// []credentials.Provider{ +// &credentials.EnvProvider{}, +// &ec2rolecreds.EC2RoleProvider{ +// Client: ec2metadata.New(sess), +// }, +// }) +// +// // Usage of ChainCredentials with aws.Config +// svc := ec2.New(session.Must(session.NewSession(&aws.Config{ +// Credentials: creds, +// }))) +// +type ChainProvider struct { + Providers []Provider + curr Provider + VerboseErrors bool +} + +// NewChainCredentials returns a pointer to a new Credentials object +// wrapping a chain of providers. +func NewChainCredentials(providers []Provider) *Credentials { + return NewCredentials(&ChainProvider{ + Providers: append([]Provider{}, providers...), + }) +} + +// Retrieve returns the credentials value or error if no provider returned +// without error. +// +// If a provider is found it will be cached and any calls to IsExpired() +// will return the expired state of the cached provider. +func (c *ChainProvider) Retrieve() (Value, error) { + var errs []error + for _, p := range c.Providers { + creds, err := p.Retrieve() + if err == nil { + c.curr = p + return creds, nil + } + errs = append(errs, err) + } + c.curr = nil + + var err error + err = ErrNoValidProvidersFoundInChain + if c.VerboseErrors { + err = awserr.NewBatchError("NoCredentialProviders", "no valid providers in chain", errs) + } + return Value{}, err +} + +// IsExpired will returned the expired state of the currently cached provider +// if there is one. If there is no current provider, true will be returned. +func (c *ChainProvider) IsExpired() bool { + if c.curr != nil { + return c.curr.IsExpired() + } + + return true +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/credentials.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/credentials.go new file mode 100644 index 0000000000..42416fc2f0 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/credentials/credentials.go @@ -0,0 +1,246 @@ +// Package credentials provides credential retrieval and management +// +// The Credentials is the primary method of getting access to and managing +// credentials Values. Using dependency injection retrieval of the credential +// values is handled by a object which satisfies the Provider interface. +// +// By default the Credentials.Get() will cache the successful result of a +// Provider's Retrieve() until Provider.IsExpired() returns true. At which +// point Credentials will call Provider's Retrieve() to get new credential Value. +// +// The Provider is responsible for determining when credentials Value have expired. +// It is also important to note that Credentials will always call Retrieve the +// first time Credentials.Get() is called. +// +// Example of using the environment variable credentials. +// +// creds := credentials.NewEnvCredentials() +// +// // Retrieve the credentials value +// credValue, err := creds.Get() +// if err != nil { +// // handle error +// } +// +// Example of forcing credentials to expire and be refreshed on the next Get(). +// This may be helpful to proactively expire credentials and refresh them sooner +// than they would naturally expire on their own. +// +// creds := credentials.NewCredentials(&ec2rolecreds.EC2RoleProvider{}) +// creds.Expire() +// credsValue, err := creds.Get() +// // New credentials will be retrieved instead of from cache. +// +// +// Custom Provider +// +// Each Provider built into this package also provides a helper method to generate +// a Credentials pointer setup with the provider. To use a custom Provider just +// create a type which satisfies the Provider interface and pass it to the +// NewCredentials method. +// +// type MyProvider struct{} +// func (m *MyProvider) Retrieve() (Value, error) {...} +// func (m *MyProvider) IsExpired() bool {...} +// +// creds := credentials.NewCredentials(&MyProvider{}) +// credValue, err := creds.Get() +// +package credentials + +import ( + "sync" + "time" +) + +// AnonymousCredentials is an empty Credential object that can be used as +// dummy placeholder credentials for requests that do not need signed. +// +// This Credentials can be used to configure a service to not sign requests +// when making service API calls. For example, when accessing public +// s3 buckets. +// +// svc := s3.New(session.Must(session.NewSession(&aws.Config{ +// Credentials: credentials.AnonymousCredentials, +// }))) +// // Access public S3 buckets. +// +// @readonly +var AnonymousCredentials = NewStaticCredentials("", "", "") + +// A Value is the AWS credentials value for individual credential fields. +type Value struct { + // AWS Access key ID + AccessKeyID string + + // AWS Secret Access Key + SecretAccessKey string + + // AWS Session Token + SessionToken string + + // Provider used to get credentials + ProviderName string +} + +// A Provider is the interface for any component which will provide credentials +// Value. A provider is required to manage its own Expired state, and what to +// be expired means. +// +// The Provider should not need to implement its own mutexes, because +// that will be managed by Credentials. +type Provider interface { + // Retrieve returns nil if it successfully retrieved the value. + // Error is returned if the value were not obtainable, or empty. + Retrieve() (Value, error) + + // IsExpired returns if the credentials are no longer valid, and need + // to be retrieved. + IsExpired() bool +} + +// An ErrorProvider is a stub credentials provider that always returns an error +// this is used by the SDK when construction a known provider is not possible +// due to an error. +type ErrorProvider struct { + // The error to be returned from Retrieve + Err error + + // The provider name to set on the Retrieved returned Value + ProviderName string +} + +// Retrieve will always return the error that the ErrorProvider was created with. +func (p ErrorProvider) Retrieve() (Value, error) { + return Value{ProviderName: p.ProviderName}, p.Err +} + +// IsExpired will always return not expired. +func (p ErrorProvider) IsExpired() bool { + return false +} + +// A Expiry provides shared expiration logic to be used by credentials +// providers to implement expiry functionality. +// +// The best method to use this struct is as an anonymous field within the +// provider's struct. +// +// Example: +// type EC2RoleProvider struct { +// Expiry +// ... +// } +type Expiry struct { + // The date/time when to expire on + expiration time.Time + + // If set will be used by IsExpired to determine the current time. + // Defaults to time.Now if CurrentTime is not set. Available for testing + // to be able to mock out the current time. + CurrentTime func() time.Time +} + +// SetExpiration sets the expiration IsExpired will check when called. +// +// If window is greater than 0 the expiration time will be reduced by the +// window value. +// +// Using a window is helpful to trigger credentials to expire sooner than +// the expiration time given to ensure no requests are made with expired +// tokens. +func (e *Expiry) SetExpiration(expiration time.Time, window time.Duration) { + e.expiration = expiration + if window > 0 { + e.expiration = e.expiration.Add(-window) + } +} + +// IsExpired returns if the credentials are expired. +func (e *Expiry) IsExpired() bool { + if e.CurrentTime == nil { + e.CurrentTime = time.Now + } + return e.expiration.Before(e.CurrentTime()) +} + +// A Credentials provides synchronous safe retrieval of AWS credentials Value. +// Credentials will cache the credentials value until they expire. Once the value +// expires the next Get will attempt to retrieve valid credentials. +// +// Credentials is safe to use across multiple goroutines and will manage the +// synchronous state so the Providers do not need to implement their own +// synchronization. +// +// The first Credentials.Get() will always call Provider.Retrieve() to get the +// first instance of the credentials Value. All calls to Get() after that +// will return the cached credentials Value until IsExpired() returns true. +type Credentials struct { + creds Value + forceRefresh bool + m sync.Mutex + + provider Provider +} + +// NewCredentials returns a pointer to a new Credentials with the provider set. +func NewCredentials(provider Provider) *Credentials { + return &Credentials{ + provider: provider, + forceRefresh: true, + } +} + +// Get returns the credentials value, or error if the credentials Value failed +// to be retrieved. +// +// Will return the cached credentials Value if it has not expired. If the +// credentials Value has expired the Provider's Retrieve() will be called +// to refresh the credentials. +// +// If Credentials.Expire() was called the credentials Value will be force +// expired, and the next call to Get() will cause them to be refreshed. +func (c *Credentials) Get() (Value, error) { + c.m.Lock() + defer c.m.Unlock() + + if c.isExpired() { + creds, err := c.provider.Retrieve() + if err != nil { + return Value{}, err + } + c.creds = creds + c.forceRefresh = false + } + + return c.creds, nil +} + +// Expire expires the credentials and forces them to be retrieved on the +// next call to Get(). +// +// This will override the Provider's expired state, and force Credentials +// to call the Provider's Retrieve(). +func (c *Credentials) Expire() { + c.m.Lock() + defer c.m.Unlock() + + c.forceRefresh = true +} + +// IsExpired returns if the credentials are no longer valid, and need +// to be retrieved. +// +// If the Credentials were forced to be expired with Expire() this will +// reflect that override. +func (c *Credentials) IsExpired() bool { + c.m.Lock() + defer c.m.Unlock() + + return c.isExpired() +} + +// isExpired helper method wrapping the definition of expired credentials. +func (c *Credentials) isExpired() bool { + return c.forceRefresh || c.provider.IsExpired() +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds/ec2_role_provider.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds/ec2_role_provider.go new file mode 100644 index 0000000000..c39749524e --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds/ec2_role_provider.go @@ -0,0 +1,178 @@ +package ec2rolecreds + +import ( + "bufio" + "encoding/json" + "fmt" + "path" + "strings" + "time" + + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/client" + "github.com/aws/aws-sdk-go/aws/credentials" + "github.com/aws/aws-sdk-go/aws/ec2metadata" +) + +// ProviderName provides a name of EC2Role provider +const ProviderName = "EC2RoleProvider" + +// A EC2RoleProvider retrieves credentials from the EC2 service, and keeps track if +// those credentials are expired. +// +// Example how to configure the EC2RoleProvider with custom http Client, Endpoint +// or ExpiryWindow +// +// p := &ec2rolecreds.EC2RoleProvider{ +// // Pass in a custom timeout to be used when requesting +// // IAM EC2 Role credentials. +// Client: ec2metadata.New(sess, aws.Config{ +// HTTPClient: &http.Client{Timeout: 10 * time.Second}, +// }), +// +// // Do not use early expiry of credentials. If a non zero value is +// // specified the credentials will be expired early +// ExpiryWindow: 0, +// } +type EC2RoleProvider struct { + credentials.Expiry + + // Required EC2Metadata client to use when connecting to EC2 metadata service. + Client *ec2metadata.EC2Metadata + + // ExpiryWindow will allow the credentials to trigger refreshing prior to + // the credentials actually expiring. This is beneficial so race conditions + // with expiring credentials do not cause request to fail unexpectedly + // due to ExpiredTokenException exceptions. + // + // So a ExpiryWindow of 10s would cause calls to IsExpired() to return true + // 10 seconds before the credentials are actually expired. + // + // If ExpiryWindow is 0 or less it will be ignored. + ExpiryWindow time.Duration +} + +// NewCredentials returns a pointer to a new Credentials object wrapping +// the EC2RoleProvider. Takes a ConfigProvider to create a EC2Metadata client. +// The ConfigProvider is satisfied by the session.Session type. +func NewCredentials(c client.ConfigProvider, options ...func(*EC2RoleProvider)) *credentials.Credentials { + p := &EC2RoleProvider{ + Client: ec2metadata.New(c), + } + + for _, option := range options { + option(p) + } + + return credentials.NewCredentials(p) +} + +// NewCredentialsWithClient returns a pointer to a new Credentials object wrapping +// the EC2RoleProvider. Takes a EC2Metadata client to use when connecting to EC2 +// metadata service. +func NewCredentialsWithClient(client *ec2metadata.EC2Metadata, options ...func(*EC2RoleProvider)) *credentials.Credentials { + p := &EC2RoleProvider{ + Client: client, + } + + for _, option := range options { + option(p) + } + + return credentials.NewCredentials(p) +} + +// Retrieve retrieves credentials from the EC2 service. +// Error will be returned if the request fails, or unable to extract +// the desired credentials. +func (m *EC2RoleProvider) Retrieve() (credentials.Value, error) { + credsList, err := requestCredList(m.Client) + if err != nil { + return credentials.Value{ProviderName: ProviderName}, err + } + + if len(credsList) == 0 { + return credentials.Value{ProviderName: ProviderName}, awserr.New("EmptyEC2RoleList", "empty EC2 Role list", nil) + } + credsName := credsList[0] + + roleCreds, err := requestCred(m.Client, credsName) + if err != nil { + return credentials.Value{ProviderName: ProviderName}, err + } + + m.SetExpiration(roleCreds.Expiration, m.ExpiryWindow) + + return credentials.Value{ + AccessKeyID: roleCreds.AccessKeyID, + SecretAccessKey: roleCreds.SecretAccessKey, + SessionToken: roleCreds.Token, + ProviderName: ProviderName, + }, nil +} + +// A ec2RoleCredRespBody provides the shape for unmarshaling credential +// request responses. +type ec2RoleCredRespBody struct { + // Success State + Expiration time.Time + AccessKeyID string + SecretAccessKey string + Token string + + // Error state + Code string + Message string +} + +const iamSecurityCredsPath = "/iam/security-credentials" + +// requestCredList requests a list of credentials from the EC2 service. +// If there are no credentials, or there is an error making or receiving the request +func requestCredList(client *ec2metadata.EC2Metadata) ([]string, error) { + resp, err := client.GetMetadata(iamSecurityCredsPath) + if err != nil { + return nil, awserr.New("EC2RoleRequestError", "no EC2 instance role found", err) + } + + credsList := []string{} + s := bufio.NewScanner(strings.NewReader(resp)) + for s.Scan() { + credsList = append(credsList, s.Text()) + } + + if err := s.Err(); err != nil { + return nil, awserr.New("SerializationError", "failed to read EC2 instance role from metadata service", err) + } + + return credsList, nil +} + +// requestCred requests the credentials for a specific credentials from the EC2 service. +// +// If the credentials cannot be found, or there is an error reading the response +// and error will be returned. +func requestCred(client *ec2metadata.EC2Metadata, credsName string) (ec2RoleCredRespBody, error) { + resp, err := client.GetMetadata(path.Join(iamSecurityCredsPath, credsName)) + if err != nil { + return ec2RoleCredRespBody{}, + awserr.New("EC2RoleRequestError", + fmt.Sprintf("failed to get %s EC2 instance role credentials", credsName), + err) + } + + respCreds := ec2RoleCredRespBody{} + if err := json.NewDecoder(strings.NewReader(resp)).Decode(&respCreds); err != nil { + return ec2RoleCredRespBody{}, + awserr.New("SerializationError", + fmt.Sprintf("failed to decode %s EC2 instance role credentials", credsName), + err) + } + + if respCreds.Code != "Success" { + // If an error code was returned something failed requesting the role. + return ec2RoleCredRespBody{}, awserr.New(respCreds.Code, respCreds.Message, nil) + } + + return respCreds, nil +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/endpointcreds/provider.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/endpointcreds/provider.go new file mode 100644 index 0000000000..a4cec5c553 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/credentials/endpointcreds/provider.go @@ -0,0 +1,191 @@ +// Package endpointcreds provides support for retrieving credentials from an +// arbitrary HTTP endpoint. +// +// The credentials endpoint Provider can receive both static and refreshable +// credentials that will expire. Credentials are static when an "Expiration" +// value is not provided in the endpoint's response. +// +// Static credentials will never expire once they have been retrieved. The format +// of the static credentials response: +// { +// "AccessKeyId" : "MUA...", +// "SecretAccessKey" : "/7PC5om....", +// } +// +// Refreshable credentials will expire within the "ExpiryWindow" of the Expiration +// value in the response. The format of the refreshable credentials response: +// { +// "AccessKeyId" : "MUA...", +// "SecretAccessKey" : "/7PC5om....", +// "Token" : "AQoDY....=", +// "Expiration" : "2016-02-25T06:03:31Z" +// } +// +// Errors should be returned in the following format and only returned with 400 +// or 500 HTTP status codes. +// { +// "code": "ErrorCode", +// "message": "Helpful error message." +// } +package endpointcreds + +import ( + "encoding/json" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/client" + "github.com/aws/aws-sdk-go/aws/client/metadata" + "github.com/aws/aws-sdk-go/aws/credentials" + "github.com/aws/aws-sdk-go/aws/request" +) + +// ProviderName is the name of the credentials provider. +const ProviderName = `CredentialsEndpointProvider` + +// Provider satisfies the credentials.Provider interface, and is a client to +// retrieve credentials from an arbitrary endpoint. +type Provider struct { + staticCreds bool + credentials.Expiry + + // Requires a AWS Client to make HTTP requests to the endpoint with. + // the Endpoint the request will be made to is provided by the aws.Config's + // Endpoint value. + Client *client.Client + + // ExpiryWindow will allow the credentials to trigger refreshing prior to + // the credentials actually expiring. This is beneficial so race conditions + // with expiring credentials do not cause request to fail unexpectedly + // due to ExpiredTokenException exceptions. + // + // So a ExpiryWindow of 10s would cause calls to IsExpired() to return true + // 10 seconds before the credentials are actually expired. + // + // If ExpiryWindow is 0 or less it will be ignored. + ExpiryWindow time.Duration +} + +// NewProviderClient returns a credentials Provider for retrieving AWS credentials +// from arbitrary endpoint. +func NewProviderClient(cfg aws.Config, handlers request.Handlers, endpoint string, options ...func(*Provider)) credentials.Provider { + p := &Provider{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: "CredentialsEndpoint", + Endpoint: endpoint, + }, + handlers, + ), + } + + p.Client.Handlers.Unmarshal.PushBack(unmarshalHandler) + p.Client.Handlers.UnmarshalError.PushBack(unmarshalError) + p.Client.Handlers.Validate.Clear() + p.Client.Handlers.Validate.PushBack(validateEndpointHandler) + + for _, option := range options { + option(p) + } + + return p +} + +// NewCredentialsClient returns a Credentials wrapper for retrieving credentials +// from an arbitrary endpoint concurrently. The client will request the +func NewCredentialsClient(cfg aws.Config, handlers request.Handlers, endpoint string, options ...func(*Provider)) *credentials.Credentials { + return credentials.NewCredentials(NewProviderClient(cfg, handlers, endpoint, options...)) +} + +// IsExpired returns true if the credentials retrieved are expired, or not yet +// retrieved. +func (p *Provider) IsExpired() bool { + if p.staticCreds { + return false + } + return p.Expiry.IsExpired() +} + +// Retrieve will attempt to request the credentials from the endpoint the Provider +// was configured for. And error will be returned if the retrieval fails. +func (p *Provider) Retrieve() (credentials.Value, error) { + resp, err := p.getCredentials() + if err != nil { + return credentials.Value{ProviderName: ProviderName}, + awserr.New("CredentialsEndpointError", "failed to load credentials", err) + } + + if resp.Expiration != nil { + p.SetExpiration(*resp.Expiration, p.ExpiryWindow) + } else { + p.staticCreds = true + } + + return credentials.Value{ + AccessKeyID: resp.AccessKeyID, + SecretAccessKey: resp.SecretAccessKey, + SessionToken: resp.Token, + ProviderName: ProviderName, + }, nil +} + +type getCredentialsOutput struct { + Expiration *time.Time + AccessKeyID string + SecretAccessKey string + Token string +} + +type errorOutput struct { + Code string `json:"code"` + Message string `json:"message"` +} + +func (p *Provider) getCredentials() (*getCredentialsOutput, error) { + op := &request.Operation{ + Name: "GetCredentials", + HTTPMethod: "GET", + } + + out := &getCredentialsOutput{} + req := p.Client.NewRequest(op, nil, out) + req.HTTPRequest.Header.Set("Accept", "application/json") + + return out, req.Send() +} + +func validateEndpointHandler(r *request.Request) { + if len(r.ClientInfo.Endpoint) == 0 { + r.Error = aws.ErrMissingEndpoint + } +} + +func unmarshalHandler(r *request.Request) { + defer r.HTTPResponse.Body.Close() + + out := r.Data.(*getCredentialsOutput) + if err := json.NewDecoder(r.HTTPResponse.Body).Decode(&out); err != nil { + r.Error = awserr.New("SerializationError", + "failed to decode endpoint credentials", + err, + ) + } +} + +func unmarshalError(r *request.Request) { + defer r.HTTPResponse.Body.Close() + + var errOut errorOutput + if err := json.NewDecoder(r.HTTPResponse.Body).Decode(&errOut); err != nil { + r.Error = awserr.New("SerializationError", + "failed to decode endpoint credentials", + err, + ) + } + + // Response body format is not consistent between metadata endpoints. + // Grab the error message as a string and include that as the source error + r.Error = awserr.New(errOut.Code, errOut.Message, nil) +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/env_provider.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/env_provider.go new file mode 100644 index 0000000000..c14231a16f --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/credentials/env_provider.go @@ -0,0 +1,78 @@ +package credentials + +import ( + "os" + + "github.com/aws/aws-sdk-go/aws/awserr" +) + +// EnvProviderName provides a name of Env provider +const EnvProviderName = "EnvProvider" + +var ( + // ErrAccessKeyIDNotFound is returned when the AWS Access Key ID can't be + // found in the process's environment. + // + // @readonly + ErrAccessKeyIDNotFound = awserr.New("EnvAccessKeyNotFound", "AWS_ACCESS_KEY_ID or AWS_ACCESS_KEY not found in environment", nil) + + // ErrSecretAccessKeyNotFound is returned when the AWS Secret Access Key + // can't be found in the process's environment. + // + // @readonly + ErrSecretAccessKeyNotFound = awserr.New("EnvSecretNotFound", "AWS_SECRET_ACCESS_KEY or AWS_SECRET_KEY not found in environment", nil) +) + +// A EnvProvider retrieves credentials from the environment variables of the +// running process. Environment credentials never expire. +// +// Environment variables used: +// +// * Access Key ID: AWS_ACCESS_KEY_ID or AWS_ACCESS_KEY +// +// * Secret Access Key: AWS_SECRET_ACCESS_KEY or AWS_SECRET_KEY +type EnvProvider struct { + retrieved bool +} + +// NewEnvCredentials returns a pointer to a new Credentials object +// wrapping the environment variable provider. +func NewEnvCredentials() *Credentials { + return NewCredentials(&EnvProvider{}) +} + +// Retrieve retrieves the keys from the environment. +func (e *EnvProvider) Retrieve() (Value, error) { + e.retrieved = false + + id := os.Getenv("AWS_ACCESS_KEY_ID") + if id == "" { + id = os.Getenv("AWS_ACCESS_KEY") + } + + secret := os.Getenv("AWS_SECRET_ACCESS_KEY") + if secret == "" { + secret = os.Getenv("AWS_SECRET_KEY") + } + + if id == "" { + return Value{ProviderName: EnvProviderName}, ErrAccessKeyIDNotFound + } + + if secret == "" { + return Value{ProviderName: EnvProviderName}, ErrSecretAccessKeyNotFound + } + + e.retrieved = true + return Value{ + AccessKeyID: id, + SecretAccessKey: secret, + SessionToken: os.Getenv("AWS_SESSION_TOKEN"), + ProviderName: EnvProviderName, + }, nil +} + +// IsExpired returns if the credentials have been retrieved. +func (e *EnvProvider) IsExpired() bool { + return !e.retrieved +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/example.ini b/vendor/github.com/aws/aws-sdk-go/aws/credentials/example.ini new file mode 100644 index 0000000000..7fc91d9d20 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/credentials/example.ini @@ -0,0 +1,12 @@ +[default] +aws_access_key_id = accessKey +aws_secret_access_key = secret +aws_session_token = token + +[no_token] +aws_access_key_id = accessKey +aws_secret_access_key = secret + +[with_colon] +aws_access_key_id: accessKey +aws_secret_access_key: secret diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/shared_credentials_provider.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/shared_credentials_provider.go new file mode 100644 index 0000000000..51e21e0f38 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/credentials/shared_credentials_provider.go @@ -0,0 +1,150 @@ +package credentials + +import ( + "fmt" + "os" + + "github.com/go-ini/ini" + + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/internal/shareddefaults" +) + +// SharedCredsProviderName provides a name of SharedCreds provider +const SharedCredsProviderName = "SharedCredentialsProvider" + +var ( + // ErrSharedCredentialsHomeNotFound is emitted when the user directory cannot be found. + ErrSharedCredentialsHomeNotFound = awserr.New("UserHomeNotFound", "user home directory not found.", nil) +) + +// A SharedCredentialsProvider retrieves credentials from the current user's home +// directory, and keeps track if those credentials are expired. +// +// Profile ini file example: $HOME/.aws/credentials +type SharedCredentialsProvider struct { + // Path to the shared credentials file. + // + // If empty will look for "AWS_SHARED_CREDENTIALS_FILE" env variable. If the + // env value is empty will default to current user's home directory. + // Linux/OSX: "$HOME/.aws/credentials" + // Windows: "%USERPROFILE%\.aws\credentials" + Filename string + + // AWS Profile to extract credentials from the shared credentials file. If empty + // will default to environment variable "AWS_PROFILE" or "default" if + // environment variable is also not set. + Profile string + + // retrieved states if the credentials have been successfully retrieved. + retrieved bool +} + +// NewSharedCredentials returns a pointer to a new Credentials object +// wrapping the Profile file provider. +func NewSharedCredentials(filename, profile string) *Credentials { + return NewCredentials(&SharedCredentialsProvider{ + Filename: filename, + Profile: profile, + }) +} + +// Retrieve reads and extracts the shared credentials from the current +// users home directory. +func (p *SharedCredentialsProvider) Retrieve() (Value, error) { + p.retrieved = false + + filename, err := p.filename() + if err != nil { + return Value{ProviderName: SharedCredsProviderName}, err + } + + creds, err := loadProfile(filename, p.profile()) + if err != nil { + return Value{ProviderName: SharedCredsProviderName}, err + } + + p.retrieved = true + return creds, nil +} + +// IsExpired returns if the shared credentials have expired. +func (p *SharedCredentialsProvider) IsExpired() bool { + return !p.retrieved +} + +// loadProfiles loads from the file pointed to by shared credentials filename for profile. +// The credentials retrieved from the profile will be returned or error. Error will be +// returned if it fails to read from the file, or the data is invalid. +func loadProfile(filename, profile string) (Value, error) { + config, err := ini.Load(filename) + if err != nil { + return Value{ProviderName: SharedCredsProviderName}, awserr.New("SharedCredsLoad", "failed to load shared credentials file", err) + } + iniProfile, err := config.GetSection(profile) + if err != nil { + return Value{ProviderName: SharedCredsProviderName}, awserr.New("SharedCredsLoad", "failed to get profile", err) + } + + id, err := iniProfile.GetKey("aws_access_key_id") + if err != nil { + return Value{ProviderName: SharedCredsProviderName}, awserr.New("SharedCredsAccessKey", + fmt.Sprintf("shared credentials %s in %s did not contain aws_access_key_id", profile, filename), + err) + } + + secret, err := iniProfile.GetKey("aws_secret_access_key") + if err != nil { + return Value{ProviderName: SharedCredsProviderName}, awserr.New("SharedCredsSecret", + fmt.Sprintf("shared credentials %s in %s did not contain aws_secret_access_key", profile, filename), + nil) + } + + // Default to empty string if not found + token := iniProfile.Key("aws_session_token") + + return Value{ + AccessKeyID: id.String(), + SecretAccessKey: secret.String(), + SessionToken: token.String(), + ProviderName: SharedCredsProviderName, + }, nil +} + +// filename returns the filename to use to read AWS shared credentials. +// +// Will return an error if the user's home directory path cannot be found. +func (p *SharedCredentialsProvider) filename() (string, error) { + if len(p.Filename) != 0 { + return p.Filename, nil + } + + if p.Filename = os.Getenv("AWS_SHARED_CREDENTIALS_FILE"); len(p.Filename) != 0 { + return p.Filename, nil + } + + if home := shareddefaults.UserHomeDir(); len(home) == 0 { + // Backwards compatibility of home directly not found error being returned. + // This error is too verbose, failure when opening the file would of been + // a better error to return. + return "", ErrSharedCredentialsHomeNotFound + } + + p.Filename = shareddefaults.SharedCredentialsFilename() + + return p.Filename, nil +} + +// profile returns the AWS shared credentials profile. If empty will read +// environment variable "AWS_PROFILE". If that is not set profile will +// return "default". +func (p *SharedCredentialsProvider) profile() string { + if p.Profile == "" { + p.Profile = os.Getenv("AWS_PROFILE") + } + if p.Profile == "" { + p.Profile = "default" + } + + return p.Profile +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/static_provider.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/static_provider.go new file mode 100644 index 0000000000..4f5dab3fcc --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/credentials/static_provider.go @@ -0,0 +1,57 @@ +package credentials + +import ( + "github.com/aws/aws-sdk-go/aws/awserr" +) + +// StaticProviderName provides a name of Static provider +const StaticProviderName = "StaticProvider" + +var ( + // ErrStaticCredentialsEmpty is emitted when static credentials are empty. + // + // @readonly + ErrStaticCredentialsEmpty = awserr.New("EmptyStaticCreds", "static credentials are empty", nil) +) + +// A StaticProvider is a set of credentials which are set programmatically, +// and will never expire. +type StaticProvider struct { + Value +} + +// NewStaticCredentials returns a pointer to a new Credentials object +// wrapping a static credentials value provider. +func NewStaticCredentials(id, secret, token string) *Credentials { + return NewCredentials(&StaticProvider{Value: Value{ + AccessKeyID: id, + SecretAccessKey: secret, + SessionToken: token, + }}) +} + +// NewStaticCredentialsFromCreds returns a pointer to a new Credentials object +// wrapping the static credentials value provide. Same as NewStaticCredentials +// but takes the creds Value instead of individual fields +func NewStaticCredentialsFromCreds(creds Value) *Credentials { + return NewCredentials(&StaticProvider{Value: creds}) +} + +// Retrieve returns the credentials or error if the credentials are invalid. +func (s *StaticProvider) Retrieve() (Value, error) { + if s.AccessKeyID == "" || s.SecretAccessKey == "" { + return Value{ProviderName: StaticProviderName}, ErrStaticCredentialsEmpty + } + + if len(s.Value.ProviderName) == 0 { + s.Value.ProviderName = StaticProviderName + } + return s.Value, nil +} + +// IsExpired returns if the credentials are expired. +// +// For StaticProvider, the credentials never expired. +func (s *StaticProvider) IsExpired() bool { + return false +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/stscreds/assume_role_provider.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/stscreds/assume_role_provider.go new file mode 100644 index 0000000000..4108e433e6 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/credentials/stscreds/assume_role_provider.go @@ -0,0 +1,298 @@ +/* +Package stscreds are credential Providers to retrieve STS AWS credentials. + +STS provides multiple ways to retrieve credentials which can be used when making +future AWS service API operation calls. + +The SDK will ensure that per instance of credentials.Credentials all requests +to refresh the credentials will be synchronized. But, the SDK is unable to +ensure synchronous usage of the AssumeRoleProvider if the value is shared +between multiple Credentials, Sessions or service clients. + +Assume Role + +To assume an IAM role using STS with the SDK you can create a new Credentials +with the SDKs's stscreds package. + + // Initial credentials loaded from SDK's default credential chain. Such as + // the environment, shared credentials (~/.aws/credentials), or EC2 Instance + // Role. These credentials will be used to to make the STS Assume Role API. + sess := session.Must(session.NewSession()) + + // Create the credentials from AssumeRoleProvider to assume the role + // referenced by the "myRoleARN" ARN. + creds := stscreds.NewCredentials(sess, "myRoleArn") + + // Create service client value configured for credentials + // from assumed role. + svc := s3.New(sess, &aws.Config{Credentials: creds}) + +Assume Role with static MFA Token + +To assume an IAM role with a MFA token you can either specify a MFA token code +directly or provide a function to prompt the user each time the credentials +need to refresh the role's credentials. Specifying the TokenCode should be used +for short lived operations that will not need to be refreshed, and when you do +not want to have direct control over the user provides their MFA token. + +With TokenCode the AssumeRoleProvider will be not be able to refresh the role's +credentials. + + // Create the credentials from AssumeRoleProvider to assume the role + // referenced by the "myRoleARN" ARN using the MFA token code provided. + creds := stscreds.NewCredentials(sess, "myRoleArn", func(p *stscreds.AssumeRoleProvider) { + p.SerialNumber = aws.String("myTokenSerialNumber") + p.TokenCode = aws.String("00000000") + }) + + // Create service client value configured for credentials + // from assumed role. + svc := s3.New(sess, &aws.Config{Credentials: creds}) + +Assume Role with MFA Token Provider + +To assume an IAM role with MFA for longer running tasks where the credentials +may need to be refreshed setting the TokenProvider field of AssumeRoleProvider +will allow the credential provider to prompt for new MFA token code when the +role's credentials need to be refreshed. + +The StdinTokenProvider function is available to prompt on stdin to retrieve +the MFA token code from the user. You can also implement custom prompts by +satisfing the TokenProvider function signature. + +Using StdinTokenProvider with multiple AssumeRoleProviders, or Credentials will +have undesirable results as the StdinTokenProvider will not be synchronized. A +single Credentials with an AssumeRoleProvider can be shared safely. + + // Create the credentials from AssumeRoleProvider to assume the role + // referenced by the "myRoleARN" ARN. Prompting for MFA token from stdin. + creds := stscreds.NewCredentials(sess, "myRoleArn", func(p *stscreds.AssumeRoleProvider) { + p.SerialNumber = aws.String("myTokenSerialNumber") + p.TokenProvider = stscreds.StdinTokenProvider + }) + + // Create service client value configured for credentials + // from assumed role. + svc := s3.New(sess, &aws.Config{Credentials: creds}) + +*/ +package stscreds + +import ( + "fmt" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/client" + "github.com/aws/aws-sdk-go/aws/credentials" + "github.com/aws/aws-sdk-go/service/sts" +) + +// StdinTokenProvider will prompt on stdout and read from stdin for a string value. +// An error is returned if reading from stdin fails. +// +// Use this function go read MFA tokens from stdin. The function makes no attempt +// to make atomic prompts from stdin across multiple gorouties. +// +// Using StdinTokenProvider with multiple AssumeRoleProviders, or Credentials will +// have undesirable results as the StdinTokenProvider will not be synchronized. A +// single Credentials with an AssumeRoleProvider can be shared safely +// +// Will wait forever until something is provided on the stdin. +func StdinTokenProvider() (string, error) { + var v string + fmt.Printf("Assume Role MFA token code: ") + _, err := fmt.Scanln(&v) + + return v, err +} + +// ProviderName provides a name of AssumeRole provider +const ProviderName = "AssumeRoleProvider" + +// AssumeRoler represents the minimal subset of the STS client API used by this provider. +type AssumeRoler interface { + AssumeRole(input *sts.AssumeRoleInput) (*sts.AssumeRoleOutput, error) +} + +// DefaultDuration is the default amount of time in minutes that the credentials +// will be valid for. +var DefaultDuration = time.Duration(15) * time.Minute + +// AssumeRoleProvider retrieves temporary credentials from the STS service, and +// keeps track of their expiration time. +// +// This credential provider will be used by the SDKs default credential change +// when shared configuration is enabled, and the shared config or shared credentials +// file configure assume role. See Session docs for how to do this. +// +// AssumeRoleProvider does not provide any synchronization and it is not safe +// to share this value across multiple Credentials, Sessions, or service clients +// without also sharing the same Credentials instance. +type AssumeRoleProvider struct { + credentials.Expiry + + // STS client to make assume role request with. + Client AssumeRoler + + // Role to be assumed. + RoleARN string + + // Session name, if you wish to reuse the credentials elsewhere. + RoleSessionName string + + // Expiry duration of the STS credentials. Defaults to 15 minutes if not set. + Duration time.Duration + + // Optional ExternalID to pass along, defaults to nil if not set. + ExternalID *string + + // The policy plain text must be 2048 bytes or shorter. However, an internal + // conversion compresses it into a packed binary format with a separate limit. + // The PackedPolicySize response element indicates by percentage how close to + // the upper size limit the policy is, with 100% equaling the maximum allowed + // size. + Policy *string + + // The identification number of the MFA device that is associated with the user + // who is making the AssumeRole call. Specify this value if the trust policy + // of the role being assumed includes a condition that requires MFA authentication. + // The value is either the serial number for a hardware device (such as GAHT12345678) + // or an Amazon Resource Name (ARN) for a virtual device (such as arn:aws:iam::123456789012:mfa/user). + SerialNumber *string + + // The value provided by the MFA device, if the trust policy of the role being + // assumed requires MFA (that is, if the policy includes a condition that tests + // for MFA). If the role being assumed requires MFA and if the TokenCode value + // is missing or expired, the AssumeRole call returns an "access denied" error. + // + // If SerialNumber is set and neither TokenCode nor TokenProvider are also + // set an error will be returned. + TokenCode *string + + // Async method of providing MFA token code for assuming an IAM role with MFA. + // The value returned by the function will be used as the TokenCode in the Retrieve + // call. See StdinTokenProvider for a provider that prompts and reads from stdin. + // + // This token provider will be called when ever the assumed role's + // credentials need to be refreshed when SerialNumber is also set and + // TokenCode is not set. + // + // If both TokenCode and TokenProvider is set, TokenProvider will be used and + // TokenCode is ignored. + TokenProvider func() (string, error) + + // ExpiryWindow will allow the credentials to trigger refreshing prior to + // the credentials actually expiring. This is beneficial so race conditions + // with expiring credentials do not cause request to fail unexpectedly + // due to ExpiredTokenException exceptions. + // + // So a ExpiryWindow of 10s would cause calls to IsExpired() to return true + // 10 seconds before the credentials are actually expired. + // + // If ExpiryWindow is 0 or less it will be ignored. + ExpiryWindow time.Duration +} + +// NewCredentials returns a pointer to a new Credentials object wrapping the +// AssumeRoleProvider. The credentials will expire every 15 minutes and the +// role will be named after a nanosecond timestamp of this operation. +// +// Takes a Config provider to create the STS client. The ConfigProvider is +// satisfied by the session.Session type. +// +// It is safe to share the returned Credentials with multiple Sessions and +// service clients. All access to the credentials and refreshing them +// will be synchronized. +func NewCredentials(c client.ConfigProvider, roleARN string, options ...func(*AssumeRoleProvider)) *credentials.Credentials { + p := &AssumeRoleProvider{ + Client: sts.New(c), + RoleARN: roleARN, + Duration: DefaultDuration, + } + + for _, option := range options { + option(p) + } + + return credentials.NewCredentials(p) +} + +// NewCredentialsWithClient returns a pointer to a new Credentials object wrapping the +// AssumeRoleProvider. The credentials will expire every 15 minutes and the +// role will be named after a nanosecond timestamp of this operation. +// +// Takes an AssumeRoler which can be satisfied by the STS client. +// +// It is safe to share the returned Credentials with multiple Sessions and +// service clients. All access to the credentials and refreshing them +// will be synchronized. +func NewCredentialsWithClient(svc AssumeRoler, roleARN string, options ...func(*AssumeRoleProvider)) *credentials.Credentials { + p := &AssumeRoleProvider{ + Client: svc, + RoleARN: roleARN, + Duration: DefaultDuration, + } + + for _, option := range options { + option(p) + } + + return credentials.NewCredentials(p) +} + +// Retrieve generates a new set of temporary credentials using STS. +func (p *AssumeRoleProvider) Retrieve() (credentials.Value, error) { + + // Apply defaults where parameters are not set. + if p.RoleSessionName == "" { + // Try to work out a role name that will hopefully end up unique. + p.RoleSessionName = fmt.Sprintf("%d", time.Now().UTC().UnixNano()) + } + if p.Duration == 0 { + // Expire as often as AWS permits. + p.Duration = DefaultDuration + } + input := &sts.AssumeRoleInput{ + DurationSeconds: aws.Int64(int64(p.Duration / time.Second)), + RoleArn: aws.String(p.RoleARN), + RoleSessionName: aws.String(p.RoleSessionName), + ExternalId: p.ExternalID, + } + if p.Policy != nil { + input.Policy = p.Policy + } + if p.SerialNumber != nil { + if p.TokenCode != nil { + input.SerialNumber = p.SerialNumber + input.TokenCode = p.TokenCode + } else if p.TokenProvider != nil { + input.SerialNumber = p.SerialNumber + code, err := p.TokenProvider() + if err != nil { + return credentials.Value{ProviderName: ProviderName}, err + } + input.TokenCode = aws.String(code) + } else { + return credentials.Value{ProviderName: ProviderName}, + awserr.New("AssumeRoleTokenNotAvailable", + "assume role with MFA enabled, but neither TokenCode nor TokenProvider are set", nil) + } + } + + roleOutput, err := p.Client.AssumeRole(input) + if err != nil { + return credentials.Value{ProviderName: ProviderName}, err + } + + // We will proactively generate new credentials before they expire. + p.SetExpiration(*roleOutput.Credentials.Expiration, p.ExpiryWindow) + + return credentials.Value{ + AccessKeyID: *roleOutput.Credentials.AccessKeyId, + SecretAccessKey: *roleOutput.Credentials.SecretAccessKey, + SessionToken: *roleOutput.Credentials.SessionToken, + ProviderName: ProviderName, + }, nil +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/defaults/defaults.go b/vendor/github.com/aws/aws-sdk-go/aws/defaults/defaults.go new file mode 100644 index 0000000000..07afe3b8e6 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/defaults/defaults.go @@ -0,0 +1,163 @@ +// Package defaults is a collection of helpers to retrieve the SDK's default +// configuration and handlers. +// +// Generally this package shouldn't be used directly, but session.Session +// instead. This package is useful when you need to reset the defaults +// of a session or service client to the SDK defaults before setting +// additional parameters. +package defaults + +import ( + "fmt" + "net/http" + "net/url" + "os" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/corehandlers" + "github.com/aws/aws-sdk-go/aws/credentials" + "github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds" + "github.com/aws/aws-sdk-go/aws/credentials/endpointcreds" + "github.com/aws/aws-sdk-go/aws/ec2metadata" + "github.com/aws/aws-sdk-go/aws/endpoints" + "github.com/aws/aws-sdk-go/aws/request" +) + +// A Defaults provides a collection of default values for SDK clients. +type Defaults struct { + Config *aws.Config + Handlers request.Handlers +} + +// Get returns the SDK's default values with Config and handlers pre-configured. +func Get() Defaults { + cfg := Config() + handlers := Handlers() + cfg.Credentials = CredChain(cfg, handlers) + + return Defaults{ + Config: cfg, + Handlers: handlers, + } +} + +// Config returns the default configuration without credentials. +// To retrieve a config with credentials also included use +// `defaults.Get().Config` instead. +// +// Generally you shouldn't need to use this method directly, but +// is available if you need to reset the configuration of an +// existing service client or session. +func Config() *aws.Config { + return aws.NewConfig(). + WithCredentials(credentials.AnonymousCredentials). + WithRegion(os.Getenv("AWS_REGION")). + WithHTTPClient(http.DefaultClient). + WithMaxRetries(aws.UseServiceDefaultRetries). + WithLogger(aws.NewDefaultLogger()). + WithLogLevel(aws.LogOff). + WithEndpointResolver(endpoints.DefaultResolver()) +} + +// Handlers returns the default request handlers. +// +// Generally you shouldn't need to use this method directly, but +// is available if you need to reset the request handlers of an +// existing service client or session. +func Handlers() request.Handlers { + var handlers request.Handlers + + handlers.Validate.PushBackNamed(corehandlers.ValidateEndpointHandler) + handlers.Validate.AfterEachFn = request.HandlerListStopOnError + handlers.Build.PushBackNamed(corehandlers.SDKVersionUserAgentHandler) + handlers.Build.AfterEachFn = request.HandlerListStopOnError + handlers.Sign.PushBackNamed(corehandlers.BuildContentLengthHandler) + handlers.Send.PushBackNamed(corehandlers.ValidateReqSigHandler) + handlers.Send.PushBackNamed(corehandlers.SendHandler) + handlers.AfterRetry.PushBackNamed(corehandlers.AfterRetryHandler) + handlers.ValidateResponse.PushBackNamed(corehandlers.ValidateResponseHandler) + + return handlers +} + +// CredChain returns the default credential chain. +// +// Generally you shouldn't need to use this method directly, but +// is available if you need to reset the credentials of an +// existing service client or session's Config. +func CredChain(cfg *aws.Config, handlers request.Handlers) *credentials.Credentials { + return credentials.NewCredentials(&credentials.ChainProvider{ + VerboseErrors: aws.BoolValue(cfg.CredentialsChainVerboseErrors), + Providers: []credentials.Provider{ + &credentials.EnvProvider{}, + &credentials.SharedCredentialsProvider{Filename: "", Profile: ""}, + RemoteCredProvider(*cfg, handlers), + }, + }) +} + +const ( + httpProviderEnvVar = "AWS_CONTAINER_CREDENTIALS_FULL_URI" + ecsCredsProviderEnvVar = "AWS_CONTAINER_CREDENTIALS_RELATIVE_URI" +) + +// RemoteCredProvider returns a credentials provider for the default remote +// endpoints such as EC2 or ECS Roles. +func RemoteCredProvider(cfg aws.Config, handlers request.Handlers) credentials.Provider { + if u := os.Getenv(httpProviderEnvVar); len(u) > 0 { + return localHTTPCredProvider(cfg, handlers, u) + } + + if uri := os.Getenv(ecsCredsProviderEnvVar); len(uri) > 0 { + u := fmt.Sprintf("http://169.254.170.2%s", uri) + return httpCredProvider(cfg, handlers, u) + } + + return ec2RoleProvider(cfg, handlers) +} + +func localHTTPCredProvider(cfg aws.Config, handlers request.Handlers, u string) credentials.Provider { + var errMsg string + + parsed, err := url.Parse(u) + if err != nil { + errMsg = fmt.Sprintf("invalid URL, %v", err) + } else if host := aws.URLHostname(parsed); !(host == "localhost" || host == "127.0.0.1") { + errMsg = fmt.Sprintf("invalid host address, %q, only localhost and 127.0.0.1 are valid.", host) + } + + if len(errMsg) > 0 { + if cfg.Logger != nil { + cfg.Logger.Log("Ignoring, HTTP credential provider", errMsg, err) + } + return credentials.ErrorProvider{ + Err: awserr.New("CredentialsEndpointError", errMsg, err), + ProviderName: endpointcreds.ProviderName, + } + } + + return httpCredProvider(cfg, handlers, u) +} + +func httpCredProvider(cfg aws.Config, handlers request.Handlers, u string) credentials.Provider { + return endpointcreds.NewProviderClient(cfg, handlers, u, + func(p *endpointcreds.Provider) { + p.ExpiryWindow = 5 * time.Minute + }, + ) +} + +func ec2RoleProvider(cfg aws.Config, handlers request.Handlers) credentials.Provider { + resolver := cfg.EndpointResolver + if resolver == nil { + resolver = endpoints.DefaultResolver() + } + + e, _ := resolver.EndpointFor(endpoints.Ec2metadataServiceID, "") + return &ec2rolecreds.EC2RoleProvider{ + Client: ec2metadata.NewClient(cfg, handlers, e.URL, e.SigningRegion), + ExpiryWindow: 5 * time.Minute, + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/defaults/shared_config.go b/vendor/github.com/aws/aws-sdk-go/aws/defaults/shared_config.go new file mode 100644 index 0000000000..ca0ee1dcc7 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/defaults/shared_config.go @@ -0,0 +1,27 @@ +package defaults + +import ( + "github.com/aws/aws-sdk-go/internal/shareddefaults" +) + +// SharedCredentialsFilename returns the SDK's default file path +// for the shared credentials file. +// +// Builds the shared config file path based on the OS's platform. +// +// - Linux/Unix: $HOME/.aws/credentials +// - Windows: %USERPROFILE%\.aws\credentials +func SharedCredentialsFilename() string { + return shareddefaults.SharedCredentialsFilename() +} + +// SharedConfigFilename returns the SDK's default file path for +// the shared config file. +// +// Builds the shared config file path based on the OS's platform. +// +// - Linux/Unix: $HOME/.aws/config +// - Windows: %USERPROFILE%\.aws\config +func SharedConfigFilename() string { + return shareddefaults.SharedConfigFilename() +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/doc.go b/vendor/github.com/aws/aws-sdk-go/aws/doc.go new file mode 100644 index 0000000000..4fcb616184 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/doc.go @@ -0,0 +1,56 @@ +// Package aws provides the core SDK's utilities and shared types. Use this package's +// utilities to simplify setting and reading API operations parameters. +// +// Value and Pointer Conversion Utilities +// +// This package includes a helper conversion utility for each scalar type the SDK's +// API use. These utilities make getting a pointer of the scalar, and dereferencing +// a pointer easier. +// +// Each conversion utility comes in two forms. Value to Pointer and Pointer to Value. +// The Pointer to value will safely dereference the pointer and return its value. +// If the pointer was nil, the scalar's zero value will be returned. +// +// The value to pointer functions will be named after the scalar type. So get a +// *string from a string value use the "String" function. This makes it easy to +// to get pointer of a literal string value, because getting the address of a +// literal requires assigning the value to a variable first. +// +// var strPtr *string +// +// // Without the SDK's conversion functions +// str := "my string" +// strPtr = &str +// +// // With the SDK's conversion functions +// strPtr = aws.String("my string") +// +// // Convert *string to string value +// str = aws.StringValue(strPtr) +// +// In addition to scalars the aws package also includes conversion utilities for +// map and slice for commonly types used in API parameters. The map and slice +// conversion functions use similar naming pattern as the scalar conversion +// functions. +// +// var strPtrs []*string +// var strs []string = []string{"Go", "Gophers", "Go"} +// +// // Convert []string to []*string +// strPtrs = aws.StringSlice(strs) +// +// // Convert []*string to []string +// strs = aws.StringValueSlice(strPtrs) +// +// SDK Default HTTP Client +// +// The SDK will use the http.DefaultClient if a HTTP client is not provided to +// the SDK's Session, or service client constructor. This means that if the +// http.DefaultClient is modified by other components of your application the +// modifications will be picked up by the SDK as well. +// +// In some cases this might be intended, but it is a better practice to create +// a custom HTTP Client to share explicitly through your application. You can +// configure the SDK to use the custom HTTP Client by setting the HTTPClient +// value of the SDK's Config type when creating a Session or service client. +package aws diff --git a/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/api.go b/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/api.go new file mode 100644 index 0000000000..984407a580 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/api.go @@ -0,0 +1,162 @@ +package ec2metadata + +import ( + "encoding/json" + "fmt" + "net/http" + "path" + "strings" + "time" + + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/request" +) + +// GetMetadata uses the path provided to request information from the EC2 +// instance metdata service. The content will be returned as a string, or +// error if the request failed. +func (c *EC2Metadata) GetMetadata(p string) (string, error) { + op := &request.Operation{ + Name: "GetMetadata", + HTTPMethod: "GET", + HTTPPath: path.Join("/", "meta-data", p), + } + + output := &metadataOutput{} + req := c.NewRequest(op, nil, output) + + return output.Content, req.Send() +} + +// GetUserData returns the userdata that was configured for the service. If +// there is no user-data setup for the EC2 instance a "NotFoundError" error +// code will be returned. +func (c *EC2Metadata) GetUserData() (string, error) { + op := &request.Operation{ + Name: "GetUserData", + HTTPMethod: "GET", + HTTPPath: path.Join("/", "user-data"), + } + + output := &metadataOutput{} + req := c.NewRequest(op, nil, output) + req.Handlers.UnmarshalError.PushBack(func(r *request.Request) { + if r.HTTPResponse.StatusCode == http.StatusNotFound { + r.Error = awserr.New("NotFoundError", "user-data not found", r.Error) + } + }) + + return output.Content, req.Send() +} + +// GetDynamicData uses the path provided to request information from the EC2 +// instance metadata service for dynamic data. The content will be returned +// as a string, or error if the request failed. +func (c *EC2Metadata) GetDynamicData(p string) (string, error) { + op := &request.Operation{ + Name: "GetDynamicData", + HTTPMethod: "GET", + HTTPPath: path.Join("/", "dynamic", p), + } + + output := &metadataOutput{} + req := c.NewRequest(op, nil, output) + + return output.Content, req.Send() +} + +// GetInstanceIdentityDocument retrieves an identity document describing an +// instance. Error is returned if the request fails or is unable to parse +// the response. +func (c *EC2Metadata) GetInstanceIdentityDocument() (EC2InstanceIdentityDocument, error) { + resp, err := c.GetDynamicData("instance-identity/document") + if err != nil { + return EC2InstanceIdentityDocument{}, + awserr.New("EC2MetadataRequestError", + "failed to get EC2 instance identity document", err) + } + + doc := EC2InstanceIdentityDocument{} + if err := json.NewDecoder(strings.NewReader(resp)).Decode(&doc); err != nil { + return EC2InstanceIdentityDocument{}, + awserr.New("SerializationError", + "failed to decode EC2 instance identity document", err) + } + + return doc, nil +} + +// IAMInfo retrieves IAM info from the metadata API +func (c *EC2Metadata) IAMInfo() (EC2IAMInfo, error) { + resp, err := c.GetMetadata("iam/info") + if err != nil { + return EC2IAMInfo{}, + awserr.New("EC2MetadataRequestError", + "failed to get EC2 IAM info", err) + } + + info := EC2IAMInfo{} + if err := json.NewDecoder(strings.NewReader(resp)).Decode(&info); err != nil { + return EC2IAMInfo{}, + awserr.New("SerializationError", + "failed to decode EC2 IAM info", err) + } + + if info.Code != "Success" { + errMsg := fmt.Sprintf("failed to get EC2 IAM Info (%s)", info.Code) + return EC2IAMInfo{}, + awserr.New("EC2MetadataError", errMsg, nil) + } + + return info, nil +} + +// Region returns the region the instance is running in. +func (c *EC2Metadata) Region() (string, error) { + resp, err := c.GetMetadata("placement/availability-zone") + if err != nil { + return "", err + } + + // returns region without the suffix. Eg: us-west-2a becomes us-west-2 + return resp[:len(resp)-1], nil +} + +// Available returns if the application has access to the EC2 Metadata service. +// Can be used to determine if application is running within an EC2 Instance and +// the metadata service is available. +func (c *EC2Metadata) Available() bool { + if _, err := c.GetMetadata("instance-id"); err != nil { + return false + } + + return true +} + +// An EC2IAMInfo provides the shape for unmarshaling +// an IAM info from the metadata API +type EC2IAMInfo struct { + Code string + LastUpdated time.Time + InstanceProfileArn string + InstanceProfileID string +} + +// An EC2InstanceIdentityDocument provides the shape for unmarshaling +// an instance identity document +type EC2InstanceIdentityDocument struct { + DevpayProductCodes []string `json:"devpayProductCodes"` + AvailabilityZone string `json:"availabilityZone"` + PrivateIP string `json:"privateIp"` + Version string `json:"version"` + Region string `json:"region"` + InstanceID string `json:"instanceId"` + BillingProducts []string `json:"billingProducts"` + InstanceType string `json:"instanceType"` + AccountID string `json:"accountId"` + PendingTime time.Time `json:"pendingTime"` + ImageID string `json:"imageId"` + KernelID string `json:"kernelId"` + RamdiskID string `json:"ramdiskId"` + Architecture string `json:"architecture"` +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/service.go b/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/service.go new file mode 100644 index 0000000000..5b4379dbd8 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/service.go @@ -0,0 +1,124 @@ +// Package ec2metadata provides the client for making API calls to the +// EC2 Metadata service. +package ec2metadata + +import ( + "bytes" + "errors" + "io" + "net/http" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/client" + "github.com/aws/aws-sdk-go/aws/client/metadata" + "github.com/aws/aws-sdk-go/aws/request" +) + +// ServiceName is the name of the service. +const ServiceName = "ec2metadata" + +// A EC2Metadata is an EC2 Metadata service Client. +type EC2Metadata struct { + *client.Client +} + +// New creates a new instance of the EC2Metadata client with a session. +// This client is safe to use across multiple goroutines. +// +// +// Example: +// // Create a EC2Metadata client from just a session. +// svc := ec2metadata.New(mySession) +// +// // Create a EC2Metadata client with additional configuration +// svc := ec2metadata.New(mySession, aws.NewConfig().WithLogLevel(aws.LogDebugHTTPBody)) +func New(p client.ConfigProvider, cfgs ...*aws.Config) *EC2Metadata { + c := p.ClientConfig(ServiceName, cfgs...) + return NewClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// NewClient returns a new EC2Metadata client. Should be used to create +// a client when not using a session. Generally using just New with a session +// is preferred. +// +// If an unmodified HTTP client is provided from the stdlib default, or no client +// the EC2RoleProvider's EC2Metadata HTTP client's timeout will be shortened. +// To disable this set Config.EC2MetadataDisableTimeoutOverride to false. Enabled by default. +func NewClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string, opts ...func(*client.Client)) *EC2Metadata { + if !aws.BoolValue(cfg.EC2MetadataDisableTimeoutOverride) && httpClientZero(cfg.HTTPClient) { + // If the http client is unmodified and this feature is not disabled + // set custom timeouts for EC2Metadata requests. + cfg.HTTPClient = &http.Client{ + // use a shorter timeout than default because the metadata + // service is local if it is running, and to fail faster + // if not running on an ec2 instance. + Timeout: 5 * time.Second, + } + } + + svc := &EC2Metadata{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: ServiceName, + Endpoint: endpoint, + APIVersion: "latest", + }, + handlers, + ), + } + + svc.Handlers.Unmarshal.PushBack(unmarshalHandler) + svc.Handlers.UnmarshalError.PushBack(unmarshalError) + svc.Handlers.Validate.Clear() + svc.Handlers.Validate.PushBack(validateEndpointHandler) + + // Add additional options to the service config + for _, option := range opts { + option(svc.Client) + } + + return svc +} + +func httpClientZero(c *http.Client) bool { + return c == nil || (c.Transport == nil && c.CheckRedirect == nil && c.Jar == nil && c.Timeout == 0) +} + +type metadataOutput struct { + Content string +} + +func unmarshalHandler(r *request.Request) { + defer r.HTTPResponse.Body.Close() + b := &bytes.Buffer{} + if _, err := io.Copy(b, r.HTTPResponse.Body); err != nil { + r.Error = awserr.New("SerializationError", "unable to unmarshal EC2 metadata respose", err) + return + } + + if data, ok := r.Data.(*metadataOutput); ok { + data.Content = b.String() + } +} + +func unmarshalError(r *request.Request) { + defer r.HTTPResponse.Body.Close() + b := &bytes.Buffer{} + if _, err := io.Copy(b, r.HTTPResponse.Body); err != nil { + r.Error = awserr.New("SerializationError", "unable to unmarshal EC2 metadata error respose", err) + return + } + + // Response body format is not consistent between metadata endpoints. + // Grab the error message as a string and include that as the source error + r.Error = awserr.New("EC2MetadataError", "failed to make EC2Metadata request", errors.New(b.String())) +} + +func validateEndpointHandler(r *request.Request) { + if r.ClientInfo.Endpoint == "" { + r.Error = aws.ErrMissingEndpoint + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/decode.go b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/decode.go new file mode 100644 index 0000000000..74f72de073 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/decode.go @@ -0,0 +1,133 @@ +package endpoints + +import ( + "encoding/json" + "fmt" + "io" + + "github.com/aws/aws-sdk-go/aws/awserr" +) + +type modelDefinition map[string]json.RawMessage + +// A DecodeModelOptions are the options for how the endpoints model definition +// are decoded. +type DecodeModelOptions struct { + SkipCustomizations bool +} + +// Set combines all of the option functions together. +func (d *DecodeModelOptions) Set(optFns ...func(*DecodeModelOptions)) { + for _, fn := range optFns { + fn(d) + } +} + +// DecodeModel unmarshals a Regions and Endpoint model definition file into +// a endpoint Resolver. If the file format is not supported, or an error occurs +// when unmarshaling the model an error will be returned. +// +// Casting the return value of this func to a EnumPartitions will +// allow you to get a list of the partitions in the order the endpoints +// will be resolved in. +// +// resolver, err := endpoints.DecodeModel(reader) +// +// partitions := resolver.(endpoints.EnumPartitions).Partitions() +// for _, p := range partitions { +// // ... inspect partitions +// } +func DecodeModel(r io.Reader, optFns ...func(*DecodeModelOptions)) (Resolver, error) { + var opts DecodeModelOptions + opts.Set(optFns...) + + // Get the version of the partition file to determine what + // unmarshaling model to use. + modelDef := modelDefinition{} + if err := json.NewDecoder(r).Decode(&modelDef); err != nil { + return nil, newDecodeModelError("failed to decode endpoints model", err) + } + + var version string + if b, ok := modelDef["version"]; ok { + version = string(b) + } else { + return nil, newDecodeModelError("endpoints version not found in model", nil) + } + + if version == "3" { + return decodeV3Endpoints(modelDef, opts) + } + + return nil, newDecodeModelError( + fmt.Sprintf("endpoints version %s, not supported", version), nil) +} + +func decodeV3Endpoints(modelDef modelDefinition, opts DecodeModelOptions) (Resolver, error) { + b, ok := modelDef["partitions"] + if !ok { + return nil, newDecodeModelError("endpoints model missing partitions", nil) + } + + ps := partitions{} + if err := json.Unmarshal(b, &ps); err != nil { + return nil, newDecodeModelError("failed to decode endpoints model", err) + } + + if opts.SkipCustomizations { + return ps, nil + } + + // Customization + for i := 0; i < len(ps); i++ { + p := &ps[i] + custAddEC2Metadata(p) + custAddS3DualStack(p) + custRmIotDataService(p) + } + + return ps, nil +} + +func custAddS3DualStack(p *partition) { + if p.ID != "aws" { + return + } + + s, ok := p.Services["s3"] + if !ok { + return + } + + s.Defaults.HasDualStack = boxedTrue + s.Defaults.DualStackHostname = "{service}.dualstack.{region}.{dnsSuffix}" + + p.Services["s3"] = s +} + +func custAddEC2Metadata(p *partition) { + p.Services["ec2metadata"] = service{ + IsRegionalized: boxedFalse, + PartitionEndpoint: "aws-global", + Endpoints: endpoints{ + "aws-global": endpoint{ + Hostname: "169.254.169.254/latest", + Protocols: []string{"http"}, + }, + }, + } +} + +func custRmIotDataService(p *partition) { + delete(p.Services, "data.iot") +} + +type decodeModelError struct { + awsError +} + +func newDecodeModelError(msg string, err error) decodeModelError { + return decodeModelError{ + awsError: awserr.New("DecodeEndpointsModelError", msg, err), + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go new file mode 100644 index 0000000000..63bdaad76f --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go @@ -0,0 +1,2268 @@ +// Code generated by aws/endpoints/v3model_codegen.go. DO NOT EDIT. + +package endpoints + +import ( + "regexp" +) + +// Partition identifiers +const ( + AwsPartitionID = "aws" // AWS Standard partition. + AwsCnPartitionID = "aws-cn" // AWS China partition. + AwsUsGovPartitionID = "aws-us-gov" // AWS GovCloud (US) partition. +) + +// AWS Standard partition's regions. +const ( + ApNortheast1RegionID = "ap-northeast-1" // Asia Pacific (Tokyo). + ApNortheast2RegionID = "ap-northeast-2" // Asia Pacific (Seoul). + ApSouth1RegionID = "ap-south-1" // Asia Pacific (Mumbai). + ApSoutheast1RegionID = "ap-southeast-1" // Asia Pacific (Singapore). + ApSoutheast2RegionID = "ap-southeast-2" // Asia Pacific (Sydney). + CaCentral1RegionID = "ca-central-1" // Canada (Central). + EuCentral1RegionID = "eu-central-1" // EU (Frankfurt). + EuWest1RegionID = "eu-west-1" // EU (Ireland). + EuWest2RegionID = "eu-west-2" // EU (London). + SaEast1RegionID = "sa-east-1" // South America (Sao Paulo). + UsEast1RegionID = "us-east-1" // US East (N. Virginia). + UsEast2RegionID = "us-east-2" // US East (Ohio). + UsWest1RegionID = "us-west-1" // US West (N. California). + UsWest2RegionID = "us-west-2" // US West (Oregon). +) + +// AWS China partition's regions. +const ( + CnNorth1RegionID = "cn-north-1" // China (Beijing). +) + +// AWS GovCloud (US) partition's regions. +const ( + UsGovWest1RegionID = "us-gov-west-1" // AWS GovCloud (US). +) + +// Service identifiers +const ( + AcmServiceID = "acm" // Acm. + ApigatewayServiceID = "apigateway" // Apigateway. + ApplicationAutoscalingServiceID = "application-autoscaling" // ApplicationAutoscaling. + Appstream2ServiceID = "appstream2" // Appstream2. + AthenaServiceID = "athena" // Athena. + AutoscalingServiceID = "autoscaling" // Autoscaling. + BatchServiceID = "batch" // Batch. + BudgetsServiceID = "budgets" // Budgets. + ClouddirectoryServiceID = "clouddirectory" // Clouddirectory. + CloudformationServiceID = "cloudformation" // Cloudformation. + CloudfrontServiceID = "cloudfront" // Cloudfront. + CloudhsmServiceID = "cloudhsm" // Cloudhsm. + CloudsearchServiceID = "cloudsearch" // Cloudsearch. + CloudtrailServiceID = "cloudtrail" // Cloudtrail. + CodebuildServiceID = "codebuild" // Codebuild. + CodecommitServiceID = "codecommit" // Codecommit. + CodedeployServiceID = "codedeploy" // Codedeploy. + CodepipelineServiceID = "codepipeline" // Codepipeline. + CodestarServiceID = "codestar" // Codestar. + CognitoIdentityServiceID = "cognito-identity" // CognitoIdentity. + CognitoIdpServiceID = "cognito-idp" // CognitoIdp. + CognitoSyncServiceID = "cognito-sync" // CognitoSync. + ConfigServiceID = "config" // Config. + CurServiceID = "cur" // Cur. + DatapipelineServiceID = "datapipeline" // Datapipeline. + DevicefarmServiceID = "devicefarm" // Devicefarm. + DirectconnectServiceID = "directconnect" // Directconnect. + DiscoveryServiceID = "discovery" // Discovery. + DmsServiceID = "dms" // Dms. + DsServiceID = "ds" // Ds. + DynamodbServiceID = "dynamodb" // Dynamodb. + Ec2ServiceID = "ec2" // Ec2. + Ec2metadataServiceID = "ec2metadata" // Ec2metadata. + EcrServiceID = "ecr" // Ecr. + EcsServiceID = "ecs" // Ecs. + ElasticacheServiceID = "elasticache" // Elasticache. + ElasticbeanstalkServiceID = "elasticbeanstalk" // Elasticbeanstalk. + ElasticfilesystemServiceID = "elasticfilesystem" // Elasticfilesystem. + ElasticloadbalancingServiceID = "elasticloadbalancing" // Elasticloadbalancing. + ElasticmapreduceServiceID = "elasticmapreduce" // Elasticmapreduce. + ElastictranscoderServiceID = "elastictranscoder" // Elastictranscoder. + EmailServiceID = "email" // Email. + EntitlementMarketplaceServiceID = "entitlement.marketplace" // EntitlementMarketplace. + EsServiceID = "es" // Es. + EventsServiceID = "events" // Events. + FirehoseServiceID = "firehose" // Firehose. + GameliftServiceID = "gamelift" // Gamelift. + GlacierServiceID = "glacier" // Glacier. + GreengrassServiceID = "greengrass" // Greengrass. + HealthServiceID = "health" // Health. + IamServiceID = "iam" // Iam. + ImportexportServiceID = "importexport" // Importexport. + InspectorServiceID = "inspector" // Inspector. + IotServiceID = "iot" // Iot. + KinesisServiceID = "kinesis" // Kinesis. + KinesisanalyticsServiceID = "kinesisanalytics" // Kinesisanalytics. + KmsServiceID = "kms" // Kms. + LambdaServiceID = "lambda" // Lambda. + LightsailServiceID = "lightsail" // Lightsail. + LogsServiceID = "logs" // Logs. + MachinelearningServiceID = "machinelearning" // Machinelearning. + MarketplacecommerceanalyticsServiceID = "marketplacecommerceanalytics" // Marketplacecommerceanalytics. + MeteringMarketplaceServiceID = "metering.marketplace" // MeteringMarketplace. + MobileanalyticsServiceID = "mobileanalytics" // Mobileanalytics. + ModelsLexServiceID = "models.lex" // ModelsLex. + MonitoringServiceID = "monitoring" // Monitoring. + MturkRequesterServiceID = "mturk-requester" // MturkRequester. + OpsworksServiceID = "opsworks" // Opsworks. + OpsworksCmServiceID = "opsworks-cm" // OpsworksCm. + OrganizationsServiceID = "organizations" // Organizations. + PinpointServiceID = "pinpoint" // Pinpoint. + PollyServiceID = "polly" // Polly. + RdsServiceID = "rds" // Rds. + RedshiftServiceID = "redshift" // Redshift. + RekognitionServiceID = "rekognition" // Rekognition. + Route53ServiceID = "route53" // Route53. + Route53domainsServiceID = "route53domains" // Route53domains. + RuntimeLexServiceID = "runtime.lex" // RuntimeLex. + S3ServiceID = "s3" // S3. + SdbServiceID = "sdb" // Sdb. + ServicecatalogServiceID = "servicecatalog" // Servicecatalog. + ShieldServiceID = "shield" // Shield. + SmsServiceID = "sms" // Sms. + SnowballServiceID = "snowball" // Snowball. + SnsServiceID = "sns" // Sns. + SqsServiceID = "sqs" // Sqs. + SsmServiceID = "ssm" // Ssm. + StatesServiceID = "states" // States. + StoragegatewayServiceID = "storagegateway" // Storagegateway. + StreamsDynamodbServiceID = "streams.dynamodb" // StreamsDynamodb. + StsServiceID = "sts" // Sts. + SupportServiceID = "support" // Support. + SwfServiceID = "swf" // Swf. + TaggingServiceID = "tagging" // Tagging. + WafServiceID = "waf" // Waf. + WafRegionalServiceID = "waf-regional" // WafRegional. + WorkdocsServiceID = "workdocs" // Workdocs. + WorkspacesServiceID = "workspaces" // Workspaces. + XrayServiceID = "xray" // Xray. +) + +// DefaultResolver returns an Endpoint resolver that will be able +// to resolve endpoints for: AWS Standard, AWS China, and AWS GovCloud (US). +// +// Use DefaultPartitions() to get the list of the default partitions. +func DefaultResolver() Resolver { + return defaultPartitions +} + +// DefaultPartitions returns a list of the partitions the SDK is bundled +// with. The available partitions are: AWS Standard, AWS China, and AWS GovCloud (US). +// +// partitions := endpoints.DefaultPartitions +// for _, p := range partitions { +// // ... inspect partitions +// } +func DefaultPartitions() []Partition { + return defaultPartitions.Partitions() +} + +var defaultPartitions = partitions{ + awsPartition, + awscnPartition, + awsusgovPartition, +} + +// AwsPartition returns the Resolver for AWS Standard. +func AwsPartition() Partition { + return awsPartition.Partition() +} + +var awsPartition = partition{ + ID: "aws", + Name: "AWS Standard", + DNSSuffix: "amazonaws.com", + RegionRegex: regionRegex{ + Regexp: func() *regexp.Regexp { + reg, _ := regexp.Compile("^(us|eu|ap|sa|ca)\\-\\w+\\-\\d+$") + return reg + }(), + }, + Defaults: endpoint{ + Hostname: "{service}.{region}.{dnsSuffix}", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + Regions: regions{ + "ap-northeast-1": region{ + Description: "Asia Pacific (Tokyo)", + }, + "ap-northeast-2": region{ + Description: "Asia Pacific (Seoul)", + }, + "ap-south-1": region{ + Description: "Asia Pacific (Mumbai)", + }, + "ap-southeast-1": region{ + Description: "Asia Pacific (Singapore)", + }, + "ap-southeast-2": region{ + Description: "Asia Pacific (Sydney)", + }, + "ca-central-1": region{ + Description: "Canada (Central)", + }, + "eu-central-1": region{ + Description: "EU (Frankfurt)", + }, + "eu-west-1": region{ + Description: "EU (Ireland)", + }, + "eu-west-2": region{ + Description: "EU (London)", + }, + "sa-east-1": region{ + Description: "South America (Sao Paulo)", + }, + "us-east-1": region{ + Description: "US East (N. Virginia)", + }, + "us-east-2": region{ + Description: "US East (Ohio)", + }, + "us-west-1": region{ + Description: "US West (N. California)", + }, + "us-west-2": region{ + Description: "US West (Oregon)", + }, + }, + Services: services{ + "acm": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "apigateway": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "application-autoscaling": service{ + Defaults: endpoint{ + Hostname: "autoscaling.{region}.amazonaws.com", + Protocols: []string{"http", "https"}, + CredentialScope: credentialScope{ + Service: "application-autoscaling", + }, + }, + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "appstream2": service{ + Defaults: endpoint{ + Protocols: []string{"https"}, + CredentialScope: credentialScope{ + Service: "appstream", + }, + }, + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "eu-west-1": endpoint{}, + "us-east-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "athena": service{ + + Endpoints: endpoints{ + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "autoscaling": service{ + Defaults: endpoint{ + Protocols: []string{"http", "https"}, + }, + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "batch": service{ + + Endpoints: endpoints{ + "eu-west-1": endpoint{}, + "us-east-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "budgets": service{ + PartitionEndpoint: "aws-global", + IsRegionalized: boxedFalse, + + Endpoints: endpoints{ + "aws-global": endpoint{ + Hostname: "budgets.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + }, + }, + "clouddirectory": service{ + + Endpoints: endpoints{ + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "cloudformation": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "cloudfront": service{ + PartitionEndpoint: "aws-global", + IsRegionalized: boxedFalse, + + Endpoints: endpoints{ + "aws-global": endpoint{ + Hostname: "cloudfront.amazonaws.com", + Protocols: []string{"http", "https"}, + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + }, + }, + "cloudhsm": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "cloudsearch": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "cloudtrail": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "codebuild": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "codecommit": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "codedeploy": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "codepipeline": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "codestar": service{ + + Endpoints: endpoints{ + "eu-west-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "cognito-identity": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "cognito-idp": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "cognito-sync": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "config": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "cur": service{ + + Endpoints: endpoints{ + "us-east-1": endpoint{}, + }, + }, + "datapipeline": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "eu-west-1": endpoint{}, + "us-east-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "devicefarm": service{ + + Endpoints: endpoints{ + "us-west-2": endpoint{}, + }, + }, + "directconnect": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "discovery": service{ + + Endpoints: endpoints{ + "us-west-2": endpoint{}, + }, + }, + "dms": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "ds": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "dynamodb": service{ + Defaults: endpoint{ + Protocols: []string{"http", "https"}, + }, + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "local": endpoint{ + Hostname: "localhost:8000", + Protocols: []string{"http"}, + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "ec2": service{ + Defaults: endpoint{ + Protocols: []string{"http", "https"}, + }, + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "ec2metadata": service{ + PartitionEndpoint: "aws-global", + IsRegionalized: boxedFalse, + + Endpoints: endpoints{ + "aws-global": endpoint{ + Hostname: "169.254.169.254/latest", + Protocols: []string{"http"}, + }, + }, + }, + "ecr": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "ecs": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "elasticache": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "elasticbeanstalk": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "elasticfilesystem": service{ + + Endpoints: endpoints{ + "ap-southeast-2": endpoint{}, + "eu-west-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "elasticloadbalancing": service{ + Defaults: endpoint{ + Protocols: []string{"http", "https"}, + }, + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "elasticmapreduce": service{ + Defaults: endpoint{ + SSLCommonName: "{region}.{service}.{dnsSuffix}", + Protocols: []string{"http", "https"}, + }, + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{ + SSLCommonName: "{service}.{region}.{dnsSuffix}", + }, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{ + SSLCommonName: "{service}.{region}.{dnsSuffix}", + }, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "elastictranscoder": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "eu-west-1": endpoint{}, + "us-east-1": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "email": service{ + + Endpoints: endpoints{ + "eu-west-1": endpoint{}, + "us-east-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "entitlement.marketplace": service{ + Defaults: endpoint{ + CredentialScope: credentialScope{ + Service: "aws-marketplace", + }, + }, + Endpoints: endpoints{ + "us-east-1": endpoint{}, + }, + }, + "es": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "events": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "firehose": service{ + + Endpoints: endpoints{ + "eu-west-1": endpoint{}, + "us-east-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "gamelift": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "glacier": service{ + Defaults: endpoint{ + Protocols: []string{"http", "https"}, + }, + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "greengrass": service{ + IsRegionalized: boxedTrue, + Defaults: endpoint{ + Protocols: []string{"https"}, + }, + Endpoints: endpoints{ + "us-east-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "health": service{ + + Endpoints: endpoints{ + "us-east-1": endpoint{}, + }, + }, + "iam": service{ + PartitionEndpoint: "aws-global", + IsRegionalized: boxedFalse, + + Endpoints: endpoints{ + "aws-global": endpoint{ + Hostname: "iam.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + }, + }, + "importexport": service{ + PartitionEndpoint: "aws-global", + IsRegionalized: boxedFalse, + + Endpoints: endpoints{ + "aws-global": endpoint{ + Hostname: "importexport.amazonaws.com", + SignatureVersions: []string{"v2", "v4"}, + CredentialScope: credentialScope{ + Region: "us-east-1", + Service: "IngestionService", + }, + }, + }, + }, + "inspector": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "eu-west-1": endpoint{}, + "us-east-1": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "iot": service{ + Defaults: endpoint{ + CredentialScope: credentialScope{ + Service: "execute-api", + }, + }, + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "kinesis": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "kinesisanalytics": service{ + + Endpoints: endpoints{ + "eu-west-1": endpoint{}, + "us-east-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "kms": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "lambda": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "lightsail": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "logs": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "machinelearning": service{ + + Endpoints: endpoints{ + "eu-west-1": endpoint{}, + "us-east-1": endpoint{}, + }, + }, + "marketplacecommerceanalytics": service{ + + Endpoints: endpoints{ + "us-east-1": endpoint{}, + }, + }, + "metering.marketplace": service{ + Defaults: endpoint{ + CredentialScope: credentialScope{ + Service: "aws-marketplace", + }, + }, + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "mobileanalytics": service{ + + Endpoints: endpoints{ + "us-east-1": endpoint{}, + }, + }, + "models.lex": service{ + Defaults: endpoint{ + CredentialScope: credentialScope{ + Service: "lex", + }, + }, + Endpoints: endpoints{ + "us-east-1": endpoint{}, + }, + }, + "monitoring": service{ + Defaults: endpoint{ + Protocols: []string{"http", "https"}, + }, + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "mturk-requester": service{ + IsRegionalized: boxedFalse, + + Endpoints: endpoints{ + "sandbox": endpoint{ + Hostname: "mturk-requester-sandbox.us-east-1.amazonaws.com", + }, + "us-east-1": endpoint{}, + }, + }, + "opsworks": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "opsworks-cm": service{ + + Endpoints: endpoints{ + "eu-west-1": endpoint{}, + "us-east-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "organizations": service{ + PartitionEndpoint: "aws-global", + IsRegionalized: boxedFalse, + + Endpoints: endpoints{ + "aws-global": endpoint{ + Hostname: "organizations.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + }, + }, + "pinpoint": service{ + Defaults: endpoint{ + CredentialScope: credentialScope{ + Service: "mobiletargeting", + }, + }, + Endpoints: endpoints{ + "us-east-1": endpoint{}, + }, + }, + "polly": service{ + + Endpoints: endpoints{ + "eu-west-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "rds": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{ + SSLCommonName: "{service}.{dnsSuffix}", + }, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "redshift": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "rekognition": service{ + + Endpoints: endpoints{ + "eu-west-1": endpoint{}, + "us-east-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "route53": service{ + PartitionEndpoint: "aws-global", + IsRegionalized: boxedFalse, + + Endpoints: endpoints{ + "aws-global": endpoint{ + Hostname: "route53.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + }, + }, + "route53domains": service{ + + Endpoints: endpoints{ + "us-east-1": endpoint{}, + }, + }, + "runtime.lex": service{ + Defaults: endpoint{ + CredentialScope: credentialScope{ + Service: "lex", + }, + }, + Endpoints: endpoints{ + "us-east-1": endpoint{}, + }, + }, + "s3": service{ + PartitionEndpoint: "us-east-1", + IsRegionalized: boxedTrue, + Defaults: endpoint{ + Protocols: []string{"http", "https"}, + SignatureVersions: []string{"s3v4"}, + + HasDualStack: boxedTrue, + DualStackHostname: "{service}.dualstack.{region}.{dnsSuffix}", + }, + Endpoints: endpoints{ + "ap-northeast-1": endpoint{ + Hostname: "s3-ap-northeast-1.amazonaws.com", + SignatureVersions: []string{"s3", "s3v4"}, + }, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{ + Hostname: "s3-ap-southeast-1.amazonaws.com", + SignatureVersions: []string{"s3", "s3v4"}, + }, + "ap-southeast-2": endpoint{ + Hostname: "s3-ap-southeast-2.amazonaws.com", + SignatureVersions: []string{"s3", "s3v4"}, + }, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{ + Hostname: "s3-eu-west-1.amazonaws.com", + SignatureVersions: []string{"s3", "s3v4"}, + }, + "eu-west-2": endpoint{}, + "s3-external-1": endpoint{ + Hostname: "s3-external-1.amazonaws.com", + SignatureVersions: []string{"s3", "s3v4"}, + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "sa-east-1": endpoint{ + Hostname: "s3-sa-east-1.amazonaws.com", + SignatureVersions: []string{"s3", "s3v4"}, + }, + "us-east-1": endpoint{ + Hostname: "s3.amazonaws.com", + SignatureVersions: []string{"s3", "s3v4"}, + }, + "us-east-2": endpoint{}, + "us-west-1": endpoint{ + Hostname: "s3-us-west-1.amazonaws.com", + SignatureVersions: []string{"s3", "s3v4"}, + }, + "us-west-2": endpoint{ + Hostname: "s3-us-west-2.amazonaws.com", + SignatureVersions: []string{"s3", "s3v4"}, + }, + }, + }, + "sdb": service{ + Defaults: endpoint{ + Protocols: []string{"http", "https"}, + SignatureVersions: []string{"v2"}, + }, + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "eu-west-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{ + Hostname: "sdb.amazonaws.com", + }, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "servicecatalog": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "shield": service{ + IsRegionalized: boxedFalse, + Defaults: endpoint{ + SSLCommonName: "Shield.us-east-1.amazonaws.com", + Protocols: []string{"https"}, + }, + Endpoints: endpoints{ + "us-east-1": endpoint{}, + }, + }, + "sms": service{ + + Endpoints: endpoints{ + "ap-southeast-2": endpoint{}, + "eu-west-1": endpoint{}, + "us-east-1": endpoint{}, + }, + }, + "snowball": service{ + + Endpoints: endpoints{ + "ap-south-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "sns": service{ + Defaults: endpoint{ + Protocols: []string{"http", "https"}, + }, + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "sqs": service{ + Defaults: endpoint{ + SSLCommonName: "{region}.queue.{dnsSuffix}", + Protocols: []string{"http", "https"}, + }, + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{ + SSLCommonName: "queue.{dnsSuffix}", + }, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "ssm": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "states": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "storagegateway": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "streams.dynamodb": service{ + Defaults: endpoint{ + Protocols: []string{"http", "https"}, + CredentialScope: credentialScope{ + Service: "dynamodb", + }, + }, + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "local": endpoint{ + Hostname: "localhost:8000", + Protocols: []string{"http"}, + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "sts": service{ + PartitionEndpoint: "aws-global", + Defaults: endpoint{ + Hostname: "sts.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{ + Hostname: "sts.ap-northeast-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-northeast-2", + }, + }, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "aws-global": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-1-fips": endpoint{ + Hostname: "sts-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "us-east-2": endpoint{}, + "us-east-2-fips": endpoint{ + Hostname: "sts-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "us-west-1": endpoint{}, + "us-west-1-fips": endpoint{ + Hostname: "sts-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "us-west-2": endpoint{}, + "us-west-2-fips": endpoint{ + Hostname: "sts-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + }, + }, + "support": service{ + + Endpoints: endpoints{ + "us-east-1": endpoint{}, + }, + }, + "swf": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "tagging": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "waf": service{ + PartitionEndpoint: "aws-global", + IsRegionalized: boxedFalse, + + Endpoints: endpoints{ + "aws-global": endpoint{ + Hostname: "waf.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + }, + }, + "waf-regional": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "eu-west-1": endpoint{}, + "us-east-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "workdocs": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "eu-west-1": endpoint{}, + "us-east-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "workspaces": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "us-east-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "xray": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + }, +} + +// AwsCnPartition returns the Resolver for AWS China. +func AwsCnPartition() Partition { + return awscnPartition.Partition() +} + +var awscnPartition = partition{ + ID: "aws-cn", + Name: "AWS China", + DNSSuffix: "amazonaws.com.cn", + RegionRegex: regionRegex{ + Regexp: func() *regexp.Regexp { + reg, _ := regexp.Compile("^cn\\-\\w+\\-\\d+$") + return reg + }(), + }, + Defaults: endpoint{ + Hostname: "{service}.{region}.{dnsSuffix}", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + Regions: regions{ + "cn-north-1": region{ + Description: "China (Beijing)", + }, + }, + Services: services{ + "autoscaling": service{ + Defaults: endpoint{ + Protocols: []string{"http", "https"}, + }, + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + }, + }, + "cloudformation": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + }, + }, + "cloudtrail": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + }, + }, + "codedeploy": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + }, + }, + "config": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + }, + }, + "directconnect": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + }, + }, + "dynamodb": service{ + Defaults: endpoint{ + Protocols: []string{"http", "https"}, + }, + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + }, + }, + "ec2": service{ + Defaults: endpoint{ + Protocols: []string{"http", "https"}, + }, + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + }, + }, + "ec2metadata": service{ + PartitionEndpoint: "aws-global", + IsRegionalized: boxedFalse, + + Endpoints: endpoints{ + "aws-global": endpoint{ + Hostname: "169.254.169.254/latest", + Protocols: []string{"http"}, + }, + }, + }, + "elasticache": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + }, + }, + "elasticbeanstalk": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + }, + }, + "elasticloadbalancing": service{ + Defaults: endpoint{ + Protocols: []string{"http", "https"}, + }, + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + }, + }, + "elasticmapreduce": service{ + Defaults: endpoint{ + Protocols: []string{"http", "https"}, + }, + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + }, + }, + "events": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + }, + }, + "glacier": service{ + Defaults: endpoint{ + Protocols: []string{"http", "https"}, + }, + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + }, + }, + "iam": service{ + PartitionEndpoint: "aws-cn-global", + IsRegionalized: boxedFalse, + + Endpoints: endpoints{ + "aws-cn-global": endpoint{ + Hostname: "iam.cn-north-1.amazonaws.com.cn", + CredentialScope: credentialScope{ + Region: "cn-north-1", + }, + }, + }, + }, + "kinesis": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + }, + }, + "logs": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + }, + }, + "monitoring": service{ + Defaults: endpoint{ + Protocols: []string{"http", "https"}, + }, + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + }, + }, + "rds": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + }, + }, + "redshift": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + }, + }, + "s3": service{ + Defaults: endpoint{ + Protocols: []string{"http", "https"}, + SignatureVersions: []string{"s3v4"}, + }, + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + }, + }, + "sns": service{ + Defaults: endpoint{ + Protocols: []string{"http", "https"}, + }, + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + }, + }, + "sqs": service{ + Defaults: endpoint{ + SSLCommonName: "{region}.queue.{dnsSuffix}", + Protocols: []string{"http", "https"}, + }, + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + }, + }, + "storagegateway": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + }, + }, + "streams.dynamodb": service{ + Defaults: endpoint{ + Protocols: []string{"http", "https"}, + CredentialScope: credentialScope{ + Service: "dynamodb", + }, + }, + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + }, + }, + "sts": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + }, + }, + "swf": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + }, + }, + "tagging": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + }, + }, + }, +} + +// AwsUsGovPartition returns the Resolver for AWS GovCloud (US). +func AwsUsGovPartition() Partition { + return awsusgovPartition.Partition() +} + +var awsusgovPartition = partition{ + ID: "aws-us-gov", + Name: "AWS GovCloud (US)", + DNSSuffix: "amazonaws.com", + RegionRegex: regionRegex{ + Regexp: func() *regexp.Regexp { + reg, _ := regexp.Compile("^us\\-gov\\-\\w+\\-\\d+$") + return reg + }(), + }, + Defaults: endpoint{ + Hostname: "{service}.{region}.{dnsSuffix}", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + Regions: regions{ + "us-gov-west-1": region{ + Description: "AWS GovCloud (US)", + }, + }, + Services: services{ + "autoscaling": service{ + + Endpoints: endpoints{ + "us-gov-west-1": endpoint{ + Protocols: []string{"http", "https"}, + }, + }, + }, + "cloudformation": service{ + + Endpoints: endpoints{ + "us-gov-west-1": endpoint{}, + }, + }, + "cloudhsm": service{ + + Endpoints: endpoints{ + "us-gov-west-1": endpoint{}, + }, + }, + "cloudtrail": service{ + + Endpoints: endpoints{ + "us-gov-west-1": endpoint{}, + }, + }, + "codedeploy": service{ + + Endpoints: endpoints{ + "us-gov-west-1": endpoint{}, + }, + }, + "config": service{ + + Endpoints: endpoints{ + "us-gov-west-1": endpoint{}, + }, + }, + "directconnect": service{ + + Endpoints: endpoints{ + "us-gov-west-1": endpoint{}, + }, + }, + "dynamodb": service{ + + Endpoints: endpoints{ + "us-gov-west-1": endpoint{}, + }, + }, + "ec2": service{ + + Endpoints: endpoints{ + "us-gov-west-1": endpoint{}, + }, + }, + "ec2metadata": service{ + PartitionEndpoint: "aws-global", + IsRegionalized: boxedFalse, + + Endpoints: endpoints{ + "aws-global": endpoint{ + Hostname: "169.254.169.254/latest", + Protocols: []string{"http"}, + }, + }, + }, + "elasticache": service{ + + Endpoints: endpoints{ + "us-gov-west-1": endpoint{}, + }, + }, + "elasticloadbalancing": service{ + + Endpoints: endpoints{ + "us-gov-west-1": endpoint{ + Protocols: []string{"http", "https"}, + }, + }, + }, + "elasticmapreduce": service{ + + Endpoints: endpoints{ + "us-gov-west-1": endpoint{ + Protocols: []string{"http", "https"}, + }, + }, + }, + "events": service{ + + Endpoints: endpoints{ + "us-gov-west-1": endpoint{}, + }, + }, + "glacier": service{ + + Endpoints: endpoints{ + "us-gov-west-1": endpoint{ + Protocols: []string{"http", "https"}, + }, + }, + }, + "iam": service{ + PartitionEndpoint: "aws-us-gov-global", + IsRegionalized: boxedFalse, + + Endpoints: endpoints{ + "aws-us-gov-global": endpoint{ + Hostname: "iam.us-gov.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + }, + }, + "kinesis": service{ + + Endpoints: endpoints{ + "us-gov-west-1": endpoint{}, + }, + }, + "kms": service{ + + Endpoints: endpoints{ + "us-gov-west-1": endpoint{}, + }, + }, + "lambda": service{ + + Endpoints: endpoints{ + "us-gov-west-1": endpoint{}, + }, + }, + "logs": service{ + + Endpoints: endpoints{ + "us-gov-west-1": endpoint{}, + }, + }, + "monitoring": service{ + + Endpoints: endpoints{ + "us-gov-west-1": endpoint{}, + }, + }, + "rds": service{ + + Endpoints: endpoints{ + "us-gov-west-1": endpoint{}, + }, + }, + "redshift": service{ + + Endpoints: endpoints{ + "us-gov-west-1": endpoint{}, + }, + }, + "s3": service{ + Defaults: endpoint{ + SignatureVersions: []string{"s3", "s3v4"}, + }, + Endpoints: endpoints{ + "fips-us-gov-west-1": endpoint{ + Hostname: "s3-fips-us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + "us-gov-west-1": endpoint{ + Hostname: "s3-us-gov-west-1.amazonaws.com", + Protocols: []string{"http", "https"}, + }, + }, + }, + "snowball": service{ + + Endpoints: endpoints{ + "us-gov-west-1": endpoint{}, + }, + }, + "sns": service{ + + Endpoints: endpoints{ + "us-gov-west-1": endpoint{ + Protocols: []string{"http", "https"}, + }, + }, + }, + "sqs": service{ + + Endpoints: endpoints{ + "us-gov-west-1": endpoint{ + SSLCommonName: "{region}.queue.{dnsSuffix}", + Protocols: []string{"http", "https"}, + }, + }, + }, + "streams.dynamodb": service{ + Defaults: endpoint{ + CredentialScope: credentialScope{ + Service: "dynamodb", + }, + }, + Endpoints: endpoints{ + "us-gov-west-1": endpoint{}, + }, + }, + "sts": service{ + + Endpoints: endpoints{ + "us-gov-west-1": endpoint{}, + }, + }, + "swf": service{ + + Endpoints: endpoints{ + "us-gov-west-1": endpoint{}, + }, + }, + }, +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/doc.go b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/doc.go new file mode 100644 index 0000000000..a0e9bc4547 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/doc.go @@ -0,0 +1,66 @@ +// Package endpoints provides the types and functionality for defining regions +// and endpoints, as well as querying those definitions. +// +// The SDK's Regions and Endpoints metadata is code generated into the endpoints +// package, and is accessible via the DefaultResolver function. This function +// returns a endpoint Resolver will search the metadata and build an associated +// endpoint if one is found. The default resolver will search all partitions +// known by the SDK. e.g AWS Standard (aws), AWS China (aws-cn), and +// AWS GovCloud (US) (aws-us-gov). +// . +// +// Enumerating Regions and Endpoint Metadata +// +// Casting the Resolver returned by DefaultResolver to a EnumPartitions interface +// will allow you to get access to the list of underlying Partitions with the +// Partitions method. This is helpful if you want to limit the SDK's endpoint +// resolving to a single partition, or enumerate regions, services, and endpoints +// in the partition. +// +// resolver := endpoints.DefaultResolver() +// partitions := resolver.(endpoints.EnumPartitions).Partitions() +// +// for _, p := range partitions { +// fmt.Println("Regions for", p.Name) +// for id, _ := range p.Regions() { +// fmt.Println("*", id) +// } +// +// fmt.Println("Services for", p.Name) +// for id, _ := range p.Services() { +// fmt.Println("*", id) +// } +// } +// +// Using Custom Endpoints +// +// The endpoints package also gives you the ability to use your own logic how +// endpoints are resolved. This is a great way to define a custom endpoint +// for select services, without passing that logic down through your code. +// +// If a type implements the Resolver interface it can be used to resolve +// endpoints. To use this with the SDK's Session and Config set the value +// of the type to the EndpointsResolver field of aws.Config when initializing +// the session, or service client. +// +// In addition the ResolverFunc is a wrapper for a func matching the signature +// of Resolver.EndpointFor, converting it to a type that satisfies the +// Resolver interface. +// +// +// myCustomResolver := func(service, region string, optFns ...func(*endpoints.Options)) (endpoints.ResolvedEndpoint, error) { +// if service == endpoints.S3ServiceID { +// return endpoints.ResolvedEndpoint{ +// URL: "s3.custom.endpoint.com", +// SigningRegion: "custom-signing-region", +// }, nil +// } +// +// return endpoints.DefaultResolver().EndpointFor(service, region, optFns...) +// } +// +// sess := session.Must(session.NewSession(&aws.Config{ +// Region: aws.String("us-west-2"), +// EndpointResolver: endpoints.ResolverFunc(myCustomResolver), +// })) +package endpoints diff --git a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/endpoints.go b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/endpoints.go new file mode 100644 index 0000000000..9c3eedb48d --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/endpoints.go @@ -0,0 +1,439 @@ +package endpoints + +import ( + "fmt" + "regexp" + + "github.com/aws/aws-sdk-go/aws/awserr" +) + +// Options provide the configuration needed to direct how the +// endpoints will be resolved. +type Options struct { + // DisableSSL forces the endpoint to be resolved as HTTP. + // instead of HTTPS if the service supports it. + DisableSSL bool + + // Sets the resolver to resolve the endpoint as a dualstack endpoint + // for the service. If dualstack support for a service is not known and + // StrictMatching is not enabled a dualstack endpoint for the service will + // be returned. This endpoint may not be valid. If StrictMatching is + // enabled only services that are known to support dualstack will return + // dualstack endpoints. + UseDualStack bool + + // Enables strict matching of services and regions resolved endpoints. + // If the partition doesn't enumerate the exact service and region an + // error will be returned. This option will prevent returning endpoints + // that look valid, but may not resolve to any real endpoint. + StrictMatching bool + + // Enables resolving a service endpoint based on the region provided if the + // service does not exist. The service endpoint ID will be used as the service + // domain name prefix. By default the endpoint resolver requires the service + // to be known when resolving endpoints. + // + // If resolving an endpoint on the partition list the provided region will + // be used to determine which partition's domain name pattern to the service + // endpoint ID with. If both the service and region are unkonwn and resolving + // the endpoint on partition list an UnknownEndpointError error will be returned. + // + // If resolving and endpoint on a partition specific resolver that partition's + // domain name pattern will be used with the service endpoint ID. If both + // region and service do not exist when resolving an endpoint on a specific + // partition the partition's domain pattern will be used to combine the + // endpoint and region together. + // + // This option is ignored if StrictMatching is enabled. + ResolveUnknownService bool +} + +// Set combines all of the option functions together. +func (o *Options) Set(optFns ...func(*Options)) { + for _, fn := range optFns { + fn(o) + } +} + +// DisableSSLOption sets the DisableSSL options. Can be used as a functional +// option when resolving endpoints. +func DisableSSLOption(o *Options) { + o.DisableSSL = true +} + +// UseDualStackOption sets the UseDualStack option. Can be used as a functional +// option when resolving endpoints. +func UseDualStackOption(o *Options) { + o.UseDualStack = true +} + +// StrictMatchingOption sets the StrictMatching option. Can be used as a functional +// option when resolving endpoints. +func StrictMatchingOption(o *Options) { + o.StrictMatching = true +} + +// ResolveUnknownServiceOption sets the ResolveUnknownService option. Can be used +// as a functional option when resolving endpoints. +func ResolveUnknownServiceOption(o *Options) { + o.ResolveUnknownService = true +} + +// A Resolver provides the interface for functionality to resolve endpoints. +// The build in Partition and DefaultResolver return value satisfy this interface. +type Resolver interface { + EndpointFor(service, region string, opts ...func(*Options)) (ResolvedEndpoint, error) +} + +// ResolverFunc is a helper utility that wraps a function so it satisfies the +// Resolver interface. This is useful when you want to add additional endpoint +// resolving logic, or stub out specific endpoints with custom values. +type ResolverFunc func(service, region string, opts ...func(*Options)) (ResolvedEndpoint, error) + +// EndpointFor wraps the ResolverFunc function to satisfy the Resolver interface. +func (fn ResolverFunc) EndpointFor(service, region string, opts ...func(*Options)) (ResolvedEndpoint, error) { + return fn(service, region, opts...) +} + +var schemeRE = regexp.MustCompile("^([^:]+)://") + +// AddScheme adds the HTTP or HTTPS schemes to a endpoint URL if there is no +// scheme. If disableSSL is true HTTP will set HTTP instead of the default HTTPS. +// +// If disableSSL is set, it will only set the URL's scheme if the URL does not +// contain a scheme. +func AddScheme(endpoint string, disableSSL bool) string { + if !schemeRE.MatchString(endpoint) { + scheme := "https" + if disableSSL { + scheme = "http" + } + endpoint = fmt.Sprintf("%s://%s", scheme, endpoint) + } + + return endpoint +} + +// EnumPartitions a provides a way to retrieve the underlying partitions that +// make up the SDK's default Resolver, or any resolver decoded from a model +// file. +// +// Use this interface with DefaultResolver and DecodeModels to get the list of +// Partitions. +type EnumPartitions interface { + Partitions() []Partition +} + +// RegionsForService returns a map of regions for the partition and service. +// If either the partition or service does not exist false will be returned +// as the second parameter. +// +// This example shows how to get the regions for DynamoDB in the AWS partition. +// rs, exists := endpoints.RegionsForService(endpoints.DefaultPartitions(), endpoints.AwsPartitionID, endpoints.DynamodbServiceID) +// +// This is equivalent to using the partition directly. +// rs := endpoints.AwsPartition().Services()[endpoints.DynamodbServiceID].Regions() +func RegionsForService(ps []Partition, partitionID, serviceID string) (map[string]Region, bool) { + for _, p := range ps { + if p.ID() != partitionID { + continue + } + if _, ok := p.p.Services[serviceID]; !ok { + break + } + + s := Service{ + id: serviceID, + p: p.p, + } + return s.Regions(), true + } + + return map[string]Region{}, false +} + +// PartitionForRegion returns the first partition which includes the region +// passed in. This includes both known regions and regions which match +// a pattern supported by the partition which may include regions that are +// not explicitly known by the partition. Use the Regions method of the +// returned Partition if explicit support is needed. +func PartitionForRegion(ps []Partition, regionID string) (Partition, bool) { + for _, p := range ps { + if _, ok := p.p.Regions[regionID]; ok || p.p.RegionRegex.MatchString(regionID) { + return p, true + } + } + + return Partition{}, false +} + +// A Partition provides the ability to enumerate the partition's regions +// and services. +type Partition struct { + id string + p *partition +} + +// ID returns the identifier of the partition. +func (p Partition) ID() string { return p.id } + +// EndpointFor attempts to resolve the endpoint based on service and region. +// See Options for information on configuring how the endpoint is resolved. +// +// If the service cannot be found in the metadata the UnknownServiceError +// error will be returned. This validation will occur regardless if +// StrictMatching is enabled. To enable resolving unknown services set the +// "ResolveUnknownService" option to true. When StrictMatching is disabled +// this option allows the partition resolver to resolve a endpoint based on +// the service endpoint ID provided. +// +// When resolving endpoints you can choose to enable StrictMatching. This will +// require the provided service and region to be known by the partition. +// If the endpoint cannot be strictly resolved an error will be returned. This +// mode is useful to ensure the endpoint resolved is valid. Without +// StrictMatching enabled the endpoint returned my look valid but may not work. +// StrictMatching requires the SDK to be updated if you want to take advantage +// of new regions and services expansions. +// +// Errors that can be returned. +// * UnknownServiceError +// * UnknownEndpointError +func (p Partition) EndpointFor(service, region string, opts ...func(*Options)) (ResolvedEndpoint, error) { + return p.p.EndpointFor(service, region, opts...) +} + +// Regions returns a map of Regions indexed by their ID. This is useful for +// enumerating over the regions in a partition. +func (p Partition) Regions() map[string]Region { + rs := map[string]Region{} + for id := range p.p.Regions { + rs[id] = Region{ + id: id, + p: p.p, + } + } + + return rs +} + +// Services returns a map of Service indexed by their ID. This is useful for +// enumerating over the services in a partition. +func (p Partition) Services() map[string]Service { + ss := map[string]Service{} + for id := range p.p.Services { + ss[id] = Service{ + id: id, + p: p.p, + } + } + + return ss +} + +// A Region provides information about a region, and ability to resolve an +// endpoint from the context of a region, given a service. +type Region struct { + id, desc string + p *partition +} + +// ID returns the region's identifier. +func (r Region) ID() string { return r.id } + +// ResolveEndpoint resolves an endpoint from the context of the region given +// a service. See Partition.EndpointFor for usage and errors that can be returned. +func (r Region) ResolveEndpoint(service string, opts ...func(*Options)) (ResolvedEndpoint, error) { + return r.p.EndpointFor(service, r.id, opts...) +} + +// Services returns a list of all services that are known to be in this region. +func (r Region) Services() map[string]Service { + ss := map[string]Service{} + for id, s := range r.p.Services { + if _, ok := s.Endpoints[r.id]; ok { + ss[id] = Service{ + id: id, + p: r.p, + } + } + } + + return ss +} + +// A Service provides information about a service, and ability to resolve an +// endpoint from the context of a service, given a region. +type Service struct { + id string + p *partition +} + +// ID returns the identifier for the service. +func (s Service) ID() string { return s.id } + +// ResolveEndpoint resolves an endpoint from the context of a service given +// a region. See Partition.EndpointFor for usage and errors that can be returned. +func (s Service) ResolveEndpoint(region string, opts ...func(*Options)) (ResolvedEndpoint, error) { + return s.p.EndpointFor(s.id, region, opts...) +} + +// Regions returns a map of Regions that the service is present in. +// +// A region is the AWS region the service exists in. Whereas a Endpoint is +// an URL that can be resolved to a instance of a service. +func (s Service) Regions() map[string]Region { + rs := map[string]Region{} + for id := range s.p.Services[s.id].Endpoints { + if _, ok := s.p.Regions[id]; ok { + rs[id] = Region{ + id: id, + p: s.p, + } + } + } + + return rs +} + +// Endpoints returns a map of Endpoints indexed by their ID for all known +// endpoints for a service. +// +// A region is the AWS region the service exists in. Whereas a Endpoint is +// an URL that can be resolved to a instance of a service. +func (s Service) Endpoints() map[string]Endpoint { + es := map[string]Endpoint{} + for id := range s.p.Services[s.id].Endpoints { + es[id] = Endpoint{ + id: id, + serviceID: s.id, + p: s.p, + } + } + + return es +} + +// A Endpoint provides information about endpoints, and provides the ability +// to resolve that endpoint for the service, and the region the endpoint +// represents. +type Endpoint struct { + id string + serviceID string + p *partition +} + +// ID returns the identifier for an endpoint. +func (e Endpoint) ID() string { return e.id } + +// ServiceID returns the identifier the endpoint belongs to. +func (e Endpoint) ServiceID() string { return e.serviceID } + +// ResolveEndpoint resolves an endpoint from the context of a service and +// region the endpoint represents. See Partition.EndpointFor for usage and +// errors that can be returned. +func (e Endpoint) ResolveEndpoint(opts ...func(*Options)) (ResolvedEndpoint, error) { + return e.p.EndpointFor(e.serviceID, e.id, opts...) +} + +// A ResolvedEndpoint is an endpoint that has been resolved based on a partition +// service, and region. +type ResolvedEndpoint struct { + // The endpoint URL + URL string + + // The region that should be used for signing requests. + SigningRegion string + + // The service name that should be used for signing requests. + SigningName string + + // The signing method that should be used for signing requests. + SigningMethod string +} + +// So that the Error interface type can be included as an anonymous field +// in the requestError struct and not conflict with the error.Error() method. +type awsError awserr.Error + +// A EndpointNotFoundError is returned when in StrictMatching mode, and the +// endpoint for the service and region cannot be found in any of the partitions. +type EndpointNotFoundError struct { + awsError + Partition string + Service string + Region string +} + +// A UnknownServiceError is returned when the service does not resolve to an +// endpoint. Includes a list of all known services for the partition. Returned +// when a partition does not support the service. +type UnknownServiceError struct { + awsError + Partition string + Service string + Known []string +} + +// NewUnknownServiceError builds and returns UnknownServiceError. +func NewUnknownServiceError(p, s string, known []string) UnknownServiceError { + return UnknownServiceError{ + awsError: awserr.New("UnknownServiceError", + "could not resolve endpoint for unknown service", nil), + Partition: p, + Service: s, + Known: known, + } +} + +// String returns the string representation of the error. +func (e UnknownServiceError) Error() string { + extra := fmt.Sprintf("partition: %q, service: %q", + e.Partition, e.Service) + if len(e.Known) > 0 { + extra += fmt.Sprintf(", known: %v", e.Known) + } + return awserr.SprintError(e.Code(), e.Message(), extra, e.OrigErr()) +} + +// String returns the string representation of the error. +func (e UnknownServiceError) String() string { + return e.Error() +} + +// A UnknownEndpointError is returned when in StrictMatching mode and the +// service is valid, but the region does not resolve to an endpoint. Includes +// a list of all known endpoints for the service. +type UnknownEndpointError struct { + awsError + Partition string + Service string + Region string + Known []string +} + +// NewUnknownEndpointError builds and returns UnknownEndpointError. +func NewUnknownEndpointError(p, s, r string, known []string) UnknownEndpointError { + return UnknownEndpointError{ + awsError: awserr.New("UnknownEndpointError", + "could not resolve endpoint", nil), + Partition: p, + Service: s, + Region: r, + Known: known, + } +} + +// String returns the string representation of the error. +func (e UnknownEndpointError) Error() string { + extra := fmt.Sprintf("partition: %q, service: %q, region: %q", + e.Partition, e.Service, e.Region) + if len(e.Known) > 0 { + extra += fmt.Sprintf(", known: %v", e.Known) + } + return awserr.SprintError(e.Code(), e.Message(), extra, e.OrigErr()) +} + +// String returns the string representation of the error. +func (e UnknownEndpointError) String() string { + return e.Error() +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/v3model.go b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/v3model.go new file mode 100644 index 0000000000..13d968a249 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/v3model.go @@ -0,0 +1,303 @@ +package endpoints + +import ( + "fmt" + "regexp" + "strconv" + "strings" +) + +type partitions []partition + +func (ps partitions) EndpointFor(service, region string, opts ...func(*Options)) (ResolvedEndpoint, error) { + var opt Options + opt.Set(opts...) + + for i := 0; i < len(ps); i++ { + if !ps[i].canResolveEndpoint(service, region, opt.StrictMatching) { + continue + } + + return ps[i].EndpointFor(service, region, opts...) + } + + // If loose matching fallback to first partition format to use + // when resolving the endpoint. + if !opt.StrictMatching && len(ps) > 0 { + return ps[0].EndpointFor(service, region, opts...) + } + + return ResolvedEndpoint{}, NewUnknownEndpointError("all partitions", service, region, []string{}) +} + +// Partitions satisfies the EnumPartitions interface and returns a list +// of Partitions representing each partition represented in the SDK's +// endpoints model. +func (ps partitions) Partitions() []Partition { + parts := make([]Partition, 0, len(ps)) + for i := 0; i < len(ps); i++ { + parts = append(parts, ps[i].Partition()) + } + + return parts +} + +type partition struct { + ID string `json:"partition"` + Name string `json:"partitionName"` + DNSSuffix string `json:"dnsSuffix"` + RegionRegex regionRegex `json:"regionRegex"` + Defaults endpoint `json:"defaults"` + Regions regions `json:"regions"` + Services services `json:"services"` +} + +func (p partition) Partition() Partition { + return Partition{ + id: p.ID, + p: &p, + } +} + +func (p partition) canResolveEndpoint(service, region string, strictMatch bool) bool { + s, hasService := p.Services[service] + _, hasEndpoint := s.Endpoints[region] + + if hasEndpoint && hasService { + return true + } + + if strictMatch { + return false + } + + return p.RegionRegex.MatchString(region) +} + +func (p partition) EndpointFor(service, region string, opts ...func(*Options)) (resolved ResolvedEndpoint, err error) { + var opt Options + opt.Set(opts...) + + s, hasService := p.Services[service] + if !(hasService || opt.ResolveUnknownService) { + // Only return error if the resolver will not fallback to creating + // endpoint based on service endpoint ID passed in. + return resolved, NewUnknownServiceError(p.ID, service, serviceList(p.Services)) + } + + e, hasEndpoint := s.endpointForRegion(region) + if !hasEndpoint && opt.StrictMatching { + return resolved, NewUnknownEndpointError(p.ID, service, region, endpointList(s.Endpoints)) + } + + defs := []endpoint{p.Defaults, s.Defaults} + return e.resolve(service, region, p.DNSSuffix, defs, opt), nil +} + +func serviceList(ss services) []string { + list := make([]string, 0, len(ss)) + for k := range ss { + list = append(list, k) + } + return list +} +func endpointList(es endpoints) []string { + list := make([]string, 0, len(es)) + for k := range es { + list = append(list, k) + } + return list +} + +type regionRegex struct { + *regexp.Regexp +} + +func (rr *regionRegex) UnmarshalJSON(b []byte) (err error) { + // Strip leading and trailing quotes + regex, err := strconv.Unquote(string(b)) + if err != nil { + return fmt.Errorf("unable to strip quotes from regex, %v", err) + } + + rr.Regexp, err = regexp.Compile(regex) + if err != nil { + return fmt.Errorf("unable to unmarshal region regex, %v", err) + } + return nil +} + +type regions map[string]region + +type region struct { + Description string `json:"description"` +} + +type services map[string]service + +type service struct { + PartitionEndpoint string `json:"partitionEndpoint"` + IsRegionalized boxedBool `json:"isRegionalized,omitempty"` + Defaults endpoint `json:"defaults"` + Endpoints endpoints `json:"endpoints"` +} + +func (s *service) endpointForRegion(region string) (endpoint, bool) { + if s.IsRegionalized == boxedFalse { + return s.Endpoints[s.PartitionEndpoint], region == s.PartitionEndpoint + } + + if e, ok := s.Endpoints[region]; ok { + return e, true + } + + // Unable to find any matching endpoint, return + // blank that will be used for generic endpoint creation. + return endpoint{}, false +} + +type endpoints map[string]endpoint + +type endpoint struct { + Hostname string `json:"hostname"` + Protocols []string `json:"protocols"` + CredentialScope credentialScope `json:"credentialScope"` + + // Custom fields not modeled + HasDualStack boxedBool `json:"-"` + DualStackHostname string `json:"-"` + + // Signature Version not used + SignatureVersions []string `json:"signatureVersions"` + + // SSLCommonName not used. + SSLCommonName string `json:"sslCommonName"` +} + +const ( + defaultProtocol = "https" + defaultSigner = "v4" +) + +var ( + protocolPriority = []string{"https", "http"} + signerPriority = []string{"v4", "v2"} +) + +func getByPriority(s []string, p []string, def string) string { + if len(s) == 0 { + return def + } + + for i := 0; i < len(p); i++ { + for j := 0; j < len(s); j++ { + if s[j] == p[i] { + return s[j] + } + } + } + + return s[0] +} + +func (e endpoint) resolve(service, region, dnsSuffix string, defs []endpoint, opts Options) ResolvedEndpoint { + var merged endpoint + for _, def := range defs { + merged.mergeIn(def) + } + merged.mergeIn(e) + e = merged + + hostname := e.Hostname + + // Offset the hostname for dualstack if enabled + if opts.UseDualStack && e.HasDualStack == boxedTrue { + hostname = e.DualStackHostname + } + + u := strings.Replace(hostname, "{service}", service, 1) + u = strings.Replace(u, "{region}", region, 1) + u = strings.Replace(u, "{dnsSuffix}", dnsSuffix, 1) + + scheme := getEndpointScheme(e.Protocols, opts.DisableSSL) + u = fmt.Sprintf("%s://%s", scheme, u) + + signingRegion := e.CredentialScope.Region + if len(signingRegion) == 0 { + signingRegion = region + } + signingName := e.CredentialScope.Service + if len(signingName) == 0 { + signingName = service + } + + return ResolvedEndpoint{ + URL: u, + SigningRegion: signingRegion, + SigningName: signingName, + SigningMethod: getByPriority(e.SignatureVersions, signerPriority, defaultSigner), + } +} + +func getEndpointScheme(protocols []string, disableSSL bool) string { + if disableSSL { + return "http" + } + + return getByPriority(protocols, protocolPriority, defaultProtocol) +} + +func (e *endpoint) mergeIn(other endpoint) { + if len(other.Hostname) > 0 { + e.Hostname = other.Hostname + } + if len(other.Protocols) > 0 { + e.Protocols = other.Protocols + } + if len(other.SignatureVersions) > 0 { + e.SignatureVersions = other.SignatureVersions + } + if len(other.CredentialScope.Region) > 0 { + e.CredentialScope.Region = other.CredentialScope.Region + } + if len(other.CredentialScope.Service) > 0 { + e.CredentialScope.Service = other.CredentialScope.Service + } + if len(other.SSLCommonName) > 0 { + e.SSLCommonName = other.SSLCommonName + } + if other.HasDualStack != boxedBoolUnset { + e.HasDualStack = other.HasDualStack + } + if len(other.DualStackHostname) > 0 { + e.DualStackHostname = other.DualStackHostname + } +} + +type credentialScope struct { + Region string `json:"region"` + Service string `json:"service"` +} + +type boxedBool int + +func (b *boxedBool) UnmarshalJSON(buf []byte) error { + v, err := strconv.ParseBool(string(buf)) + if err != nil { + return err + } + + if v { + *b = boxedTrue + } else { + *b = boxedFalse + } + + return nil +} + +const ( + boxedBoolUnset boxedBool = iota + boxedFalse + boxedTrue +) diff --git a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/v3model_codegen.go b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/v3model_codegen.go new file mode 100644 index 0000000000..05e92df22a --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/v3model_codegen.go @@ -0,0 +1,337 @@ +// +build codegen + +package endpoints + +import ( + "fmt" + "io" + "reflect" + "strings" + "text/template" + "unicode" +) + +// A CodeGenOptions are the options for code generating the endpoints into +// Go code from the endpoints model definition. +type CodeGenOptions struct { + // Options for how the model will be decoded. + DecodeModelOptions DecodeModelOptions +} + +// Set combines all of the option functions together +func (d *CodeGenOptions) Set(optFns ...func(*CodeGenOptions)) { + for _, fn := range optFns { + fn(d) + } +} + +// CodeGenModel given a endpoints model file will decode it and attempt to +// generate Go code from the model definition. Error will be returned if +// the code is unable to be generated, or decoded. +func CodeGenModel(modelFile io.Reader, outFile io.Writer, optFns ...func(*CodeGenOptions)) error { + var opts CodeGenOptions + opts.Set(optFns...) + + resolver, err := DecodeModel(modelFile, func(d *DecodeModelOptions) { + *d = opts.DecodeModelOptions + }) + if err != nil { + return err + } + + tmpl := template.Must(template.New("tmpl").Funcs(funcMap).Parse(v3Tmpl)) + if err := tmpl.ExecuteTemplate(outFile, "defaults", resolver); err != nil { + return fmt.Errorf("failed to execute template, %v", err) + } + + return nil +} + +func toSymbol(v string) string { + out := []rune{} + for _, c := range strings.Title(v) { + if !(unicode.IsNumber(c) || unicode.IsLetter(c)) { + continue + } + + out = append(out, c) + } + + return string(out) +} + +func quoteString(v string) string { + return fmt.Sprintf("%q", v) +} + +func regionConstName(p, r string) string { + return toSymbol(p) + toSymbol(r) +} + +func partitionGetter(id string) string { + return fmt.Sprintf("%sPartition", toSymbol(id)) +} + +func partitionVarName(id string) string { + return fmt.Sprintf("%sPartition", strings.ToLower(toSymbol(id))) +} + +func listPartitionNames(ps partitions) string { + names := []string{} + switch len(ps) { + case 1: + return ps[0].Name + case 2: + return fmt.Sprintf("%s and %s", ps[0].Name, ps[1].Name) + default: + for i, p := range ps { + if i == len(ps)-1 { + names = append(names, "and "+p.Name) + } else { + names = append(names, p.Name) + } + } + return strings.Join(names, ", ") + } +} + +func boxedBoolIfSet(msg string, v boxedBool) string { + switch v { + case boxedTrue: + return fmt.Sprintf(msg, "boxedTrue") + case boxedFalse: + return fmt.Sprintf(msg, "boxedFalse") + default: + return "" + } +} + +func stringIfSet(msg, v string) string { + if len(v) == 0 { + return "" + } + + return fmt.Sprintf(msg, v) +} + +func stringSliceIfSet(msg string, vs []string) string { + if len(vs) == 0 { + return "" + } + + names := []string{} + for _, v := range vs { + names = append(names, `"`+v+`"`) + } + + return fmt.Sprintf(msg, strings.Join(names, ",")) +} + +func endpointIsSet(v endpoint) bool { + return !reflect.DeepEqual(v, endpoint{}) +} + +func serviceSet(ps partitions) map[string]struct{} { + set := map[string]struct{}{} + for _, p := range ps { + for id := range p.Services { + set[id] = struct{}{} + } + } + + return set +} + +var funcMap = template.FuncMap{ + "ToSymbol": toSymbol, + "QuoteString": quoteString, + "RegionConst": regionConstName, + "PartitionGetter": partitionGetter, + "PartitionVarName": partitionVarName, + "ListPartitionNames": listPartitionNames, + "BoxedBoolIfSet": boxedBoolIfSet, + "StringIfSet": stringIfSet, + "StringSliceIfSet": stringSliceIfSet, + "EndpointIsSet": endpointIsSet, + "ServicesSet": serviceSet, +} + +const v3Tmpl = ` +{{ define "defaults" -}} +// Code generated by aws/endpoints/v3model_codegen.go. DO NOT EDIT. + +package endpoints + +import ( + "regexp" +) + + {{ template "partition consts" . }} + + {{ range $_, $partition := . }} + {{ template "partition region consts" $partition }} + {{ end }} + + {{ template "service consts" . }} + + {{ template "endpoint resolvers" . }} +{{- end }} + +{{ define "partition consts" }} + // Partition identifiers + const ( + {{ range $_, $p := . -}} + {{ ToSymbol $p.ID }}PartitionID = {{ QuoteString $p.ID }} // {{ $p.Name }} partition. + {{ end -}} + ) +{{- end }} + +{{ define "partition region consts" }} + // {{ .Name }} partition's regions. + const ( + {{ range $id, $region := .Regions -}} + {{ ToSymbol $id }}RegionID = {{ QuoteString $id }} // {{ $region.Description }}. + {{ end -}} + ) +{{- end }} + +{{ define "service consts" }} + // Service identifiers + const ( + {{ $serviceSet := ServicesSet . -}} + {{ range $id, $_ := $serviceSet -}} + {{ ToSymbol $id }}ServiceID = {{ QuoteString $id }} // {{ ToSymbol $id }}. + {{ end -}} + ) +{{- end }} + +{{ define "endpoint resolvers" }} + // DefaultResolver returns an Endpoint resolver that will be able + // to resolve endpoints for: {{ ListPartitionNames . }}. + // + // Use DefaultPartitions() to get the list of the default partitions. + func DefaultResolver() Resolver { + return defaultPartitions + } + + // DefaultPartitions returns a list of the partitions the SDK is bundled + // with. The available partitions are: {{ ListPartitionNames . }}. + // + // partitions := endpoints.DefaultPartitions + // for _, p := range partitions { + // // ... inspect partitions + // } + func DefaultPartitions() []Partition { + return defaultPartitions.Partitions() + } + + var defaultPartitions = partitions{ + {{ range $_, $partition := . -}} + {{ PartitionVarName $partition.ID }}, + {{ end }} + } + + {{ range $_, $partition := . -}} + {{ $name := PartitionGetter $partition.ID -}} + // {{ $name }} returns the Resolver for {{ $partition.Name }}. + func {{ $name }}() Partition { + return {{ PartitionVarName $partition.ID }}.Partition() + } + var {{ PartitionVarName $partition.ID }} = {{ template "gocode Partition" $partition }} + {{ end }} +{{ end }} + +{{ define "default partitions" }} + func DefaultPartitions() []Partition { + return []partition{ + {{ range $_, $partition := . -}} + // {{ ToSymbol $partition.ID}}Partition(), + {{ end }} + } + } +{{ end }} + +{{ define "gocode Partition" -}} +partition{ + {{ StringIfSet "ID: %q,\n" .ID -}} + {{ StringIfSet "Name: %q,\n" .Name -}} + {{ StringIfSet "DNSSuffix: %q,\n" .DNSSuffix -}} + RegionRegex: {{ template "gocode RegionRegex" .RegionRegex }}, + {{ if EndpointIsSet .Defaults -}} + Defaults: {{ template "gocode Endpoint" .Defaults }}, + {{- end }} + Regions: {{ template "gocode Regions" .Regions }}, + Services: {{ template "gocode Services" .Services }}, +} +{{- end }} + +{{ define "gocode RegionRegex" -}} +regionRegex{ + Regexp: func() *regexp.Regexp{ + reg, _ := regexp.Compile({{ QuoteString .Regexp.String }}) + return reg + }(), +} +{{- end }} + +{{ define "gocode Regions" -}} +regions{ + {{ range $id, $region := . -}} + "{{ $id }}": {{ template "gocode Region" $region }}, + {{ end -}} +} +{{- end }} + +{{ define "gocode Region" -}} +region{ + {{ StringIfSet "Description: %q,\n" .Description -}} +} +{{- end }} + +{{ define "gocode Services" -}} +services{ + {{ range $id, $service := . -}} + "{{ $id }}": {{ template "gocode Service" $service }}, + {{ end }} +} +{{- end }} + +{{ define "gocode Service" -}} +service{ + {{ StringIfSet "PartitionEndpoint: %q,\n" .PartitionEndpoint -}} + {{ BoxedBoolIfSet "IsRegionalized: %s,\n" .IsRegionalized -}} + {{ if EndpointIsSet .Defaults -}} + Defaults: {{ template "gocode Endpoint" .Defaults -}}, + {{- end }} + {{ if .Endpoints -}} + Endpoints: {{ template "gocode Endpoints" .Endpoints }}, + {{- end }} +} +{{- end }} + +{{ define "gocode Endpoints" -}} +endpoints{ + {{ range $id, $endpoint := . -}} + "{{ $id }}": {{ template "gocode Endpoint" $endpoint }}, + {{ end }} +} +{{- end }} + +{{ define "gocode Endpoint" -}} +endpoint{ + {{ StringIfSet "Hostname: %q,\n" .Hostname -}} + {{ StringIfSet "SSLCommonName: %q,\n" .SSLCommonName -}} + {{ StringSliceIfSet "Protocols: []string{%s},\n" .Protocols -}} + {{ StringSliceIfSet "SignatureVersions: []string{%s},\n" .SignatureVersions -}} + {{ if or .CredentialScope.Region .CredentialScope.Service -}} + CredentialScope: credentialScope{ + {{ StringIfSet "Region: %q,\n" .CredentialScope.Region -}} + {{ StringIfSet "Service: %q,\n" .CredentialScope.Service -}} + }, + {{- end }} + {{ BoxedBoolIfSet "HasDualStack: %s,\n" .HasDualStack -}} + {{ StringIfSet "DualStackHostname: %q,\n" .DualStackHostname -}} + +} +{{- end }} +` diff --git a/vendor/github.com/aws/aws-sdk-go/aws/errors.go b/vendor/github.com/aws/aws-sdk-go/aws/errors.go new file mode 100644 index 0000000000..5766361686 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/errors.go @@ -0,0 +1,17 @@ +package aws + +import "github.com/aws/aws-sdk-go/aws/awserr" + +var ( + // ErrMissingRegion is an error that is returned if region configuration is + // not found. + // + // @readonly + ErrMissingRegion = awserr.New("MissingRegion", "could not find region configuration", nil) + + // ErrMissingEndpoint is an error that is returned if an endpoint cannot be + // resolved for a service. + // + // @readonly + ErrMissingEndpoint = awserr.New("MissingEndpoint", "'Endpoint' configuration is required for this service", nil) +) diff --git a/vendor/github.com/aws/aws-sdk-go/aws/jsonvalue.go b/vendor/github.com/aws/aws-sdk-go/aws/jsonvalue.go new file mode 100644 index 0000000000..91a6f277a7 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/jsonvalue.go @@ -0,0 +1,12 @@ +package aws + +// JSONValue is a representation of a grab bag type that will be marshaled +// into a json string. This type can be used just like any other map. +// +// Example: +// +// values := aws.JSONValue{ +// "Foo": "Bar", +// } +// values["Baz"] = "Qux" +type JSONValue map[string]interface{} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/logger.go b/vendor/github.com/aws/aws-sdk-go/aws/logger.go new file mode 100644 index 0000000000..3babb5abdb --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/logger.go @@ -0,0 +1,112 @@ +package aws + +import ( + "log" + "os" +) + +// A LogLevelType defines the level logging should be performed at. Used to instruct +// the SDK which statements should be logged. +type LogLevelType uint + +// LogLevel returns the pointer to a LogLevel. Should be used to workaround +// not being able to take the address of a non-composite literal. +func LogLevel(l LogLevelType) *LogLevelType { + return &l +} + +// Value returns the LogLevel value or the default value LogOff if the LogLevel +// is nil. Safe to use on nil value LogLevelTypes. +func (l *LogLevelType) Value() LogLevelType { + if l != nil { + return *l + } + return LogOff +} + +// Matches returns true if the v LogLevel is enabled by this LogLevel. Should be +// used with logging sub levels. Is safe to use on nil value LogLevelTypes. If +// LogLevel is nil, will default to LogOff comparison. +func (l *LogLevelType) Matches(v LogLevelType) bool { + c := l.Value() + return c&v == v +} + +// AtLeast returns true if this LogLevel is at least high enough to satisfies v. +// Is safe to use on nil value LogLevelTypes. If LogLevel is nil, will default +// to LogOff comparison. +func (l *LogLevelType) AtLeast(v LogLevelType) bool { + c := l.Value() + return c >= v +} + +const ( + // LogOff states that no logging should be performed by the SDK. This is the + // default state of the SDK, and should be use to disable all logging. + LogOff LogLevelType = iota * 0x1000 + + // LogDebug state that debug output should be logged by the SDK. This should + // be used to inspect request made and responses received. + LogDebug +) + +// Debug Logging Sub Levels +const ( + // LogDebugWithSigning states that the SDK should log request signing and + // presigning events. This should be used to log the signing details of + // requests for debugging. Will also enable LogDebug. + LogDebugWithSigning LogLevelType = LogDebug | (1 << iota) + + // LogDebugWithHTTPBody states the SDK should log HTTP request and response + // HTTP bodys in addition to the headers and path. This should be used to + // see the body content of requests and responses made while using the SDK + // Will also enable LogDebug. + LogDebugWithHTTPBody + + // LogDebugWithRequestRetries states the SDK should log when service requests will + // be retried. This should be used to log when you want to log when service + // requests are being retried. Will also enable LogDebug. + LogDebugWithRequestRetries + + // LogDebugWithRequestErrors states the SDK should log when service requests fail + // to build, send, validate, or unmarshal. + LogDebugWithRequestErrors +) + +// A Logger is a minimalistic interface for the SDK to log messages to. Should +// be used to provide custom logging writers for the SDK to use. +type Logger interface { + Log(...interface{}) +} + +// A LoggerFunc is a convenience type to convert a function taking a variadic +// list of arguments and wrap it so the Logger interface can be used. +// +// Example: +// s3.New(sess, &aws.Config{Logger: aws.LoggerFunc(func(args ...interface{}) { +// fmt.Fprintln(os.Stdout, args...) +// })}) +type LoggerFunc func(...interface{}) + +// Log calls the wrapped function with the arguments provided +func (f LoggerFunc) Log(args ...interface{}) { + f(args...) +} + +// NewDefaultLogger returns a Logger which will write log messages to stdout, and +// use same formatting runes as the stdlib log.Logger +func NewDefaultLogger() Logger { + return &defaultLogger{ + logger: log.New(os.Stdout, "", log.LstdFlags), + } +} + +// A defaultLogger provides a minimalistic logger satisfying the Logger interface. +type defaultLogger struct { + logger *log.Logger +} + +// Log logs the parameters to the stdlib logger. See log.Println. +func (l defaultLogger) Log(args ...interface{}) { + l.logger.Println(args...) +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/connection_reset_error.go b/vendor/github.com/aws/aws-sdk-go/aws/request/connection_reset_error.go new file mode 100644 index 0000000000..271da432ce --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/request/connection_reset_error.go @@ -0,0 +1,19 @@ +// +build !appengine,!plan9 + +package request + +import ( + "net" + "os" + "syscall" +) + +func isErrConnectionReset(err error) bool { + if opErr, ok := err.(*net.OpError); ok { + if sysErr, ok := opErr.Err.(*os.SyscallError); ok { + return sysErr.Err == syscall.ECONNRESET + } + } + + return false +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/connection_reset_error_other.go b/vendor/github.com/aws/aws-sdk-go/aws/request/connection_reset_error_other.go new file mode 100644 index 0000000000..daf9eca437 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/request/connection_reset_error_other.go @@ -0,0 +1,11 @@ +// +build appengine plan9 + +package request + +import ( + "strings" +) + +func isErrConnectionReset(err error) bool { + return strings.Contains(err.Error(), "connection reset") +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/handlers.go b/vendor/github.com/aws/aws-sdk-go/aws/request/handlers.go new file mode 100644 index 0000000000..802ac88ad5 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/request/handlers.go @@ -0,0 +1,256 @@ +package request + +import ( + "fmt" + "strings" +) + +// A Handlers provides a collection of request handlers for various +// stages of handling requests. +type Handlers struct { + Validate HandlerList + Build HandlerList + Sign HandlerList + Send HandlerList + ValidateResponse HandlerList + Unmarshal HandlerList + UnmarshalMeta HandlerList + UnmarshalError HandlerList + Retry HandlerList + AfterRetry HandlerList + Complete HandlerList +} + +// Copy returns of this handler's lists. +func (h *Handlers) Copy() Handlers { + return Handlers{ + Validate: h.Validate.copy(), + Build: h.Build.copy(), + Sign: h.Sign.copy(), + Send: h.Send.copy(), + ValidateResponse: h.ValidateResponse.copy(), + Unmarshal: h.Unmarshal.copy(), + UnmarshalError: h.UnmarshalError.copy(), + UnmarshalMeta: h.UnmarshalMeta.copy(), + Retry: h.Retry.copy(), + AfterRetry: h.AfterRetry.copy(), + Complete: h.Complete.copy(), + } +} + +// Clear removes callback functions for all handlers +func (h *Handlers) Clear() { + h.Validate.Clear() + h.Build.Clear() + h.Send.Clear() + h.Sign.Clear() + h.Unmarshal.Clear() + h.UnmarshalMeta.Clear() + h.UnmarshalError.Clear() + h.ValidateResponse.Clear() + h.Retry.Clear() + h.AfterRetry.Clear() + h.Complete.Clear() +} + +// A HandlerListRunItem represents an entry in the HandlerList which +// is being run. +type HandlerListRunItem struct { + Index int + Handler NamedHandler + Request *Request +} + +// A HandlerList manages zero or more handlers in a list. +type HandlerList struct { + list []NamedHandler + + // Called after each request handler in the list is called. If set + // and the func returns true the HandlerList will continue to iterate + // over the request handlers. If false is returned the HandlerList + // will stop iterating. + // + // Should be used if extra logic to be performed between each handler + // in the list. This can be used to terminate a list's iteration + // based on a condition such as error like, HandlerListStopOnError. + // Or for logging like HandlerListLogItem. + AfterEachFn func(item HandlerListRunItem) bool +} + +// A NamedHandler is a struct that contains a name and function callback. +type NamedHandler struct { + Name string + Fn func(*Request) +} + +// copy creates a copy of the handler list. +func (l *HandlerList) copy() HandlerList { + n := HandlerList{ + AfterEachFn: l.AfterEachFn, + } + if len(l.list) == 0 { + return n + } + + n.list = append(make([]NamedHandler, 0, len(l.list)), l.list...) + return n +} + +// Clear clears the handler list. +func (l *HandlerList) Clear() { + l.list = l.list[0:0] +} + +// Len returns the number of handlers in the list. +func (l *HandlerList) Len() int { + return len(l.list) +} + +// PushBack pushes handler f to the back of the handler list. +func (l *HandlerList) PushBack(f func(*Request)) { + l.PushBackNamed(NamedHandler{"__anonymous", f}) +} + +// PushBackNamed pushes named handler f to the back of the handler list. +func (l *HandlerList) PushBackNamed(n NamedHandler) { + if cap(l.list) == 0 { + l.list = make([]NamedHandler, 0, 5) + } + l.list = append(l.list, n) +} + +// PushFront pushes handler f to the front of the handler list. +func (l *HandlerList) PushFront(f func(*Request)) { + l.PushFrontNamed(NamedHandler{"__anonymous", f}) +} + +// PushFrontNamed pushes named handler f to the front of the handler list. +func (l *HandlerList) PushFrontNamed(n NamedHandler) { + if cap(l.list) == len(l.list) { + // Allocating new list required + l.list = append([]NamedHandler{n}, l.list...) + } else { + // Enough room to prepend into list. + l.list = append(l.list, NamedHandler{}) + copy(l.list[1:], l.list) + l.list[0] = n + } +} + +// Remove removes a NamedHandler n +func (l *HandlerList) Remove(n NamedHandler) { + l.RemoveByName(n.Name) +} + +// RemoveByName removes a NamedHandler by name. +func (l *HandlerList) RemoveByName(name string) { + for i := 0; i < len(l.list); i++ { + m := l.list[i] + if m.Name == name { + // Shift array preventing creating new arrays + copy(l.list[i:], l.list[i+1:]) + l.list[len(l.list)-1] = NamedHandler{} + l.list = l.list[:len(l.list)-1] + + // decrement list so next check to length is correct + i-- + } + } +} + +// SwapNamed will swap out any existing handlers with the same name as the +// passed in NamedHandler returning true if handlers were swapped. False is +// returned otherwise. +func (l *HandlerList) SwapNamed(n NamedHandler) (swapped bool) { + for i := 0; i < len(l.list); i++ { + if l.list[i].Name == n.Name { + l.list[i].Fn = n.Fn + swapped = true + } + } + + return swapped +} + +// SetBackNamed will replace the named handler if it exists in the handler list. +// If the handler does not exist the handler will be added to the end of the list. +func (l *HandlerList) SetBackNamed(n NamedHandler) { + if !l.SwapNamed(n) { + l.PushBackNamed(n) + } +} + +// SetFrontNamed will replace the named handler if it exists in the handler list. +// If the handler does not exist the handler will be added to the beginning of +// the list. +func (l *HandlerList) SetFrontNamed(n NamedHandler) { + if !l.SwapNamed(n) { + l.PushFrontNamed(n) + } +} + +// Run executes all handlers in the list with a given request object. +func (l *HandlerList) Run(r *Request) { + for i, h := range l.list { + h.Fn(r) + item := HandlerListRunItem{ + Index: i, Handler: h, Request: r, + } + if l.AfterEachFn != nil && !l.AfterEachFn(item) { + return + } + } +} + +// HandlerListLogItem logs the request handler and the state of the +// request's Error value. Always returns true to continue iterating +// request handlers in a HandlerList. +func HandlerListLogItem(item HandlerListRunItem) bool { + if item.Request.Config.Logger == nil { + return true + } + item.Request.Config.Logger.Log("DEBUG: RequestHandler", + item.Index, item.Handler.Name, item.Request.Error) + + return true +} + +// HandlerListStopOnError returns false to stop the HandlerList iterating +// over request handlers if Request.Error is not nil. True otherwise +// to continue iterating. +func HandlerListStopOnError(item HandlerListRunItem) bool { + return item.Request.Error == nil +} + +// WithAppendUserAgent will add a string to the user agent prefixed with a +// single white space. +func WithAppendUserAgent(s string) Option { + return func(r *Request) { + r.Handlers.Build.PushBack(func(r2 *Request) { + AddToUserAgent(r, s) + }) + } +} + +// MakeAddToUserAgentHandler will add the name/version pair to the User-Agent request +// header. If the extra parameters are provided they will be added as metadata to the +// name/version pair resulting in the following format. +// "name/version (extra0; extra1; ...)" +// The user agent part will be concatenated with this current request's user agent string. +func MakeAddToUserAgentHandler(name, version string, extra ...string) func(*Request) { + ua := fmt.Sprintf("%s/%s", name, version) + if len(extra) > 0 { + ua += fmt.Sprintf(" (%s)", strings.Join(extra, "; ")) + } + return func(r *Request) { + AddToUserAgent(r, ua) + } +} + +// MakeAddToUserAgentFreeFormHandler adds the input to the User-Agent request header. +// The input string will be concatenated with the current request's user agent string. +func MakeAddToUserAgentFreeFormHandler(s string) func(*Request) { + return func(r *Request) { + AddToUserAgent(r, s) + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/http_request.go b/vendor/github.com/aws/aws-sdk-go/aws/request/http_request.go new file mode 100644 index 0000000000..79f79602b0 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/request/http_request.go @@ -0,0 +1,24 @@ +package request + +import ( + "io" + "net/http" + "net/url" +) + +func copyHTTPRequest(r *http.Request, body io.ReadCloser) *http.Request { + req := new(http.Request) + *req = *r + req.URL = &url.URL{} + *req.URL = *r.URL + req.Body = body + + req.Header = http.Header{} + for k, v := range r.Header { + for _, vv := range v { + req.Header.Add(k, vv) + } + } + + return req +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/offset_reader.go b/vendor/github.com/aws/aws-sdk-go/aws/request/offset_reader.go new file mode 100644 index 0000000000..02f07f4a46 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/request/offset_reader.go @@ -0,0 +1,58 @@ +package request + +import ( + "io" + "sync" +) + +// offsetReader is a thread-safe io.ReadCloser to prevent racing +// with retrying requests +type offsetReader struct { + buf io.ReadSeeker + lock sync.Mutex + closed bool +} + +func newOffsetReader(buf io.ReadSeeker, offset int64) *offsetReader { + reader := &offsetReader{} + buf.Seek(offset, 0) + + reader.buf = buf + return reader +} + +// Close will close the instance of the offset reader's access to +// the underlying io.ReadSeeker. +func (o *offsetReader) Close() error { + o.lock.Lock() + defer o.lock.Unlock() + o.closed = true + return nil +} + +// Read is a thread-safe read of the underlying io.ReadSeeker +func (o *offsetReader) Read(p []byte) (int, error) { + o.lock.Lock() + defer o.lock.Unlock() + + if o.closed { + return 0, io.EOF + } + + return o.buf.Read(p) +} + +// Seek is a thread-safe seeking operation. +func (o *offsetReader) Seek(offset int64, whence int) (int64, error) { + o.lock.Lock() + defer o.lock.Unlock() + + return o.buf.Seek(offset, whence) +} + +// CloseAndCopy will return a new offsetReader with a copy of the old buffer +// and close the old buffer. +func (o *offsetReader) CloseAndCopy(offset int64) *offsetReader { + o.Close() + return newOffsetReader(o.buf, offset) +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/request.go b/vendor/github.com/aws/aws-sdk-go/aws/request/request.go new file mode 100644 index 0000000000..299dc379d1 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/request/request.go @@ -0,0 +1,575 @@ +package request + +import ( + "bytes" + "fmt" + "io" + "net" + "net/http" + "net/url" + "reflect" + "strings" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/client/metadata" +) + +const ( + // ErrCodeSerialization is the serialization error code that is received + // during protocol unmarshaling. + ErrCodeSerialization = "SerializationError" + + // ErrCodeRead is an error that is returned during HTTP reads. + ErrCodeRead = "ReadError" + + // ErrCodeResponseTimeout is the connection timeout error that is recieved + // during body reads. + ErrCodeResponseTimeout = "ResponseTimeout" + + // CanceledErrorCode is the error code that will be returned by an + // API request that was canceled. Requests given a aws.Context may + // return this error when canceled. + CanceledErrorCode = "RequestCanceled" +) + +// A Request is the service request to be made. +type Request struct { + Config aws.Config + ClientInfo metadata.ClientInfo + Handlers Handlers + + Retryer + Time time.Time + ExpireTime time.Duration + Operation *Operation + HTTPRequest *http.Request + HTTPResponse *http.Response + Body io.ReadSeeker + BodyStart int64 // offset from beginning of Body that the request body starts + Params interface{} + Error error + Data interface{} + RequestID string + RetryCount int + Retryable *bool + RetryDelay time.Duration + NotHoist bool + SignedHeaderVals http.Header + LastSignedAt time.Time + DisableFollowRedirects bool + + context aws.Context + + built bool + + // Need to persist an intermediate body between the input Body and HTTP + // request body because the HTTP Client's transport can maintain a reference + // to the HTTP request's body after the client has returned. This value is + // safe to use concurrently and wrap the input Body for each HTTP request. + safeBody *offsetReader +} + +// An Operation is the service API operation to be made. +type Operation struct { + Name string + HTTPMethod string + HTTPPath string + *Paginator + + BeforePresignFn func(r *Request) error +} + +// New returns a new Request pointer for the service API +// operation and parameters. +// +// Params is any value of input parameters to be the request payload. +// Data is pointer value to an object which the request's response +// payload will be deserialized to. +func New(cfg aws.Config, clientInfo metadata.ClientInfo, handlers Handlers, + retryer Retryer, operation *Operation, params interface{}, data interface{}) *Request { + + method := operation.HTTPMethod + if method == "" { + method = "POST" + } + + httpReq, _ := http.NewRequest(method, "", nil) + + var err error + httpReq.URL, err = url.Parse(clientInfo.Endpoint + operation.HTTPPath) + if err != nil { + httpReq.URL = &url.URL{} + err = awserr.New("InvalidEndpointURL", "invalid endpoint uri", err) + } + + r := &Request{ + Config: cfg, + ClientInfo: clientInfo, + Handlers: handlers.Copy(), + + Retryer: retryer, + Time: time.Now(), + ExpireTime: 0, + Operation: operation, + HTTPRequest: httpReq, + Body: nil, + Params: params, + Error: err, + Data: data, + } + r.SetBufferBody([]byte{}) + + return r +} + +// A Option is a functional option that can augment or modify a request when +// using a WithContext API operation method. +type Option func(*Request) + +// WithGetResponseHeader builds a request Option which will retrieve a single +// header value from the HTTP Response. If there are multiple values for the +// header key use WithGetResponseHeaders instead to access the http.Header +// map directly. The passed in val pointer must be non-nil. +// +// This Option can be used multiple times with a single API operation. +// +// var id2, versionID string +// svc.PutObjectWithContext(ctx, params, +// request.WithGetResponseHeader("x-amz-id-2", &id2), +// request.WithGetResponseHeader("x-amz-version-id", &versionID), +// ) +func WithGetResponseHeader(key string, val *string) Option { + return func(r *Request) { + r.Handlers.Complete.PushBack(func(req *Request) { + *val = req.HTTPResponse.Header.Get(key) + }) + } +} + +// WithGetResponseHeaders builds a request Option which will retrieve the +// headers from the HTTP response and assign them to the passed in headers +// variable. The passed in headers pointer must be non-nil. +// +// var headers http.Header +// svc.PutObjectWithContext(ctx, params, request.WithGetResponseHeaders(&headers)) +func WithGetResponseHeaders(headers *http.Header) Option { + return func(r *Request) { + r.Handlers.Complete.PushBack(func(req *Request) { + *headers = req.HTTPResponse.Header + }) + } +} + +// WithLogLevel is a request option that will set the request to use a specific +// log level when the request is made. +// +// svc.PutObjectWithContext(ctx, params, request.WithLogLevel(aws.LogDebugWithHTTPBody) +func WithLogLevel(l aws.LogLevelType) Option { + return func(r *Request) { + r.Config.LogLevel = aws.LogLevel(l) + } +} + +// ApplyOptions will apply each option to the request calling them in the order +// the were provided. +func (r *Request) ApplyOptions(opts ...Option) { + for _, opt := range opts { + opt(r) + } +} + +// Context will always returns a non-nil context. If Request does not have a +// context aws.BackgroundContext will be returned. +func (r *Request) Context() aws.Context { + if r.context != nil { + return r.context + } + return aws.BackgroundContext() +} + +// SetContext adds a Context to the current request that can be used to cancel +// a in-flight request. The Context value must not be nil, or this method will +// panic. +// +// Unlike http.Request.WithContext, SetContext does not return a copy of the +// Request. It is not safe to use use a single Request value for multiple +// requests. A new Request should be created for each API operation request. +// +// Go 1.6 and below: +// The http.Request's Cancel field will be set to the Done() value of +// the context. This will overwrite the Cancel field's value. +// +// Go 1.7 and above: +// The http.Request.WithContext will be used to set the context on the underlying +// http.Request. This will create a shallow copy of the http.Request. The SDK +// may create sub contexts in the future for nested requests such as retries. +func (r *Request) SetContext(ctx aws.Context) { + if ctx == nil { + panic("context cannot be nil") + } + setRequestContext(r, ctx) +} + +// WillRetry returns if the request's can be retried. +func (r *Request) WillRetry() bool { + return r.Error != nil && aws.BoolValue(r.Retryable) && r.RetryCount < r.MaxRetries() +} + +// ParamsFilled returns if the request's parameters have been populated +// and the parameters are valid. False is returned if no parameters are +// provided or invalid. +func (r *Request) ParamsFilled() bool { + return r.Params != nil && reflect.ValueOf(r.Params).Elem().IsValid() +} + +// DataFilled returns true if the request's data for response deserialization +// target has been set and is a valid. False is returned if data is not +// set, or is invalid. +func (r *Request) DataFilled() bool { + return r.Data != nil && reflect.ValueOf(r.Data).Elem().IsValid() +} + +// SetBufferBody will set the request's body bytes that will be sent to +// the service API. +func (r *Request) SetBufferBody(buf []byte) { + r.SetReaderBody(bytes.NewReader(buf)) +} + +// SetStringBody sets the body of the request to be backed by a string. +func (r *Request) SetStringBody(s string) { + r.SetReaderBody(strings.NewReader(s)) +} + +// SetReaderBody will set the request's body reader. +func (r *Request) SetReaderBody(reader io.ReadSeeker) { + r.Body = reader + r.ResetBody() +} + +// Presign returns the request's signed URL. Error will be returned +// if the signing fails. +func (r *Request) Presign(expireTime time.Duration) (string, error) { + r.ExpireTime = expireTime + r.NotHoist = false + + if r.Operation.BeforePresignFn != nil { + r = r.copy() + err := r.Operation.BeforePresignFn(r) + if err != nil { + return "", err + } + } + + r.Sign() + if r.Error != nil { + return "", r.Error + } + return r.HTTPRequest.URL.String(), nil +} + +// PresignRequest behaves just like presign, but hoists all headers and signs them. +// Also returns the signed hash back to the user +func (r *Request) PresignRequest(expireTime time.Duration) (string, http.Header, error) { + r.ExpireTime = expireTime + r.NotHoist = true + r.Sign() + if r.Error != nil { + return "", nil, r.Error + } + return r.HTTPRequest.URL.String(), r.SignedHeaderVals, nil +} + +func debugLogReqError(r *Request, stage string, retrying bool, err error) { + if !r.Config.LogLevel.Matches(aws.LogDebugWithRequestErrors) { + return + } + + retryStr := "not retrying" + if retrying { + retryStr = "will retry" + } + + r.Config.Logger.Log(fmt.Sprintf("DEBUG: %s %s/%s failed, %s, error %v", + stage, r.ClientInfo.ServiceName, r.Operation.Name, retryStr, err)) +} + +// Build will build the request's object so it can be signed and sent +// to the service. Build will also validate all the request's parameters. +// Anny additional build Handlers set on this request will be run +// in the order they were set. +// +// The request will only be built once. Multiple calls to build will have +// no effect. +// +// If any Validate or Build errors occur the build will stop and the error +// which occurred will be returned. +func (r *Request) Build() error { + if !r.built { + r.Handlers.Validate.Run(r) + if r.Error != nil { + debugLogReqError(r, "Validate Request", false, r.Error) + return r.Error + } + r.Handlers.Build.Run(r) + if r.Error != nil { + debugLogReqError(r, "Build Request", false, r.Error) + return r.Error + } + r.built = true + } + + return r.Error +} + +// Sign will sign the request returning error if errors are encountered. +// +// Send will build the request prior to signing. All Sign Handlers will +// be executed in the order they were set. +func (r *Request) Sign() error { + r.Build() + if r.Error != nil { + debugLogReqError(r, "Build Request", false, r.Error) + return r.Error + } + + r.Handlers.Sign.Run(r) + return r.Error +} + +func (r *Request) getNextRequestBody() (io.ReadCloser, error) { + if r.safeBody != nil { + r.safeBody.Close() + } + + r.safeBody = newOffsetReader(r.Body, r.BodyStart) + + // Go 1.8 tightened and clarified the rules code needs to use when building + // requests with the http package. Go 1.8 removed the automatic detection + // of if the Request.Body was empty, or actually had bytes in it. The SDK + // always sets the Request.Body even if it is empty and should not actually + // be sent. This is incorrect. + // + // Go 1.8 did add a http.NoBody value that the SDK can use to tell the http + // client that the request really should be sent without a body. The + // Request.Body cannot be set to nil, which is preferable, because the + // field is exported and could introduce nil pointer dereferences for users + // of the SDK if they used that field. + // + // Related golang/go#18257 + l, err := computeBodyLength(r.Body) + if err != nil { + return nil, awserr.New(ErrCodeSerialization, "failed to compute request body size", err) + } + + var body io.ReadCloser + if l == 0 { + body = NoBody + } else if l > 0 { + body = r.safeBody + } else { + // Hack to prevent sending bodies for methods where the body + // should be ignored by the server. Sending bodies on these + // methods without an associated ContentLength will cause the + // request to socket timeout because the server does not handle + // Transfer-Encoding: chunked bodies for these methods. + // + // This would only happen if a aws.ReaderSeekerCloser was used with + // a io.Reader that was not also an io.Seeker. + switch r.Operation.HTTPMethod { + case "GET", "HEAD", "DELETE": + body = NoBody + default: + body = r.safeBody + } + } + + return body, nil +} + +// Attempts to compute the length of the body of the reader using the +// io.Seeker interface. If the value is not seekable because of being +// a ReaderSeekerCloser without an unerlying Seeker -1 will be returned. +// If no error occurs the length of the body will be returned. +func computeBodyLength(r io.ReadSeeker) (int64, error) { + seekable := true + // Determine if the seeker is actually seekable. ReaderSeekerCloser + // hides the fact that a io.Readers might not actually be seekable. + switch v := r.(type) { + case aws.ReaderSeekerCloser: + seekable = v.IsSeeker() + case *aws.ReaderSeekerCloser: + seekable = v.IsSeeker() + } + if !seekable { + return -1, nil + } + + curOffset, err := r.Seek(0, 1) + if err != nil { + return 0, err + } + + endOffset, err := r.Seek(0, 2) + if err != nil { + return 0, err + } + + _, err = r.Seek(curOffset, 0) + if err != nil { + return 0, err + } + + return endOffset - curOffset, nil +} + +// GetBody will return an io.ReadSeeker of the Request's underlying +// input body with a concurrency safe wrapper. +func (r *Request) GetBody() io.ReadSeeker { + return r.safeBody +} + +// Send will send the request returning error if errors are encountered. +// +// Send will sign the request prior to sending. All Send Handlers will +// be executed in the order they were set. +// +// Canceling a request is non-deterministic. If a request has been canceled, +// then the transport will choose, randomly, one of the state channels during +// reads or getting the connection. +// +// readLoop() and getConn(req *Request, cm connectMethod) +// https://github.com/golang/go/blob/master/src/net/http/transport.go +// +// Send will not close the request.Request's body. +func (r *Request) Send() error { + defer func() { + // Regardless of success or failure of the request trigger the Complete + // request handlers. + r.Handlers.Complete.Run(r) + }() + + for { + if aws.BoolValue(r.Retryable) { + if r.Config.LogLevel.Matches(aws.LogDebugWithRequestRetries) { + r.Config.Logger.Log(fmt.Sprintf("DEBUG: Retrying Request %s/%s, attempt %d", + r.ClientInfo.ServiceName, r.Operation.Name, r.RetryCount)) + } + + // The previous http.Request will have a reference to the r.Body + // and the HTTP Client's Transport may still be reading from + // the request's body even though the Client's Do returned. + r.HTTPRequest = copyHTTPRequest(r.HTTPRequest, nil) + r.ResetBody() + + // Closing response body to ensure that no response body is leaked + // between retry attempts. + if r.HTTPResponse != nil && r.HTTPResponse.Body != nil { + r.HTTPResponse.Body.Close() + } + } + + r.Sign() + if r.Error != nil { + return r.Error + } + + r.Retryable = nil + + r.Handlers.Send.Run(r) + if r.Error != nil { + if !shouldRetryCancel(r) { + return r.Error + } + + err := r.Error + r.Handlers.Retry.Run(r) + r.Handlers.AfterRetry.Run(r) + if r.Error != nil { + debugLogReqError(r, "Send Request", false, err) + return r.Error + } + debugLogReqError(r, "Send Request", true, err) + continue + } + r.Handlers.UnmarshalMeta.Run(r) + r.Handlers.ValidateResponse.Run(r) + if r.Error != nil { + r.Handlers.UnmarshalError.Run(r) + err := r.Error + + r.Handlers.Retry.Run(r) + r.Handlers.AfterRetry.Run(r) + if r.Error != nil { + debugLogReqError(r, "Validate Response", false, err) + return r.Error + } + debugLogReqError(r, "Validate Response", true, err) + continue + } + + r.Handlers.Unmarshal.Run(r) + if r.Error != nil { + err := r.Error + r.Handlers.Retry.Run(r) + r.Handlers.AfterRetry.Run(r) + if r.Error != nil { + debugLogReqError(r, "Unmarshal Response", false, err) + return r.Error + } + debugLogReqError(r, "Unmarshal Response", true, err) + continue + } + + break + } + + return nil +} + +// copy will copy a request which will allow for local manipulation of the +// request. +func (r *Request) copy() *Request { + req := &Request{} + *req = *r + req.Handlers = r.Handlers.Copy() + op := *r.Operation + req.Operation = &op + return req +} + +// AddToUserAgent adds the string to the end of the request's current user agent. +func AddToUserAgent(r *Request, s string) { + curUA := r.HTTPRequest.Header.Get("User-Agent") + if len(curUA) > 0 { + s = curUA + " " + s + } + r.HTTPRequest.Header.Set("User-Agent", s) +} + +func shouldRetryCancel(r *Request) bool { + awsErr, ok := r.Error.(awserr.Error) + timeoutErr := false + errStr := r.Error.Error() + if ok { + if awsErr.Code() == CanceledErrorCode { + return false + } + err := awsErr.OrigErr() + netErr, netOK := err.(net.Error) + timeoutErr = netOK && netErr.Temporary() + if urlErr, ok := err.(*url.Error); !timeoutErr && ok { + errStr = urlErr.Err.Error() + } + } + + // There can be two types of canceled errors here. + // The first being a net.Error and the other being an error. + // If the request was timed out, we want to continue the retry + // process. Otherwise, return the canceled error. + return timeoutErr || + (errStr != "net/http: request canceled" && + errStr != "net/http: request canceled while waiting for connection") + +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/request_1_7.go b/vendor/github.com/aws/aws-sdk-go/aws/request/request_1_7.go new file mode 100644 index 0000000000..869b97a1a0 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/request/request_1_7.go @@ -0,0 +1,39 @@ +// +build !go1.8 + +package request + +import "io" + +// NoBody is an io.ReadCloser with no bytes. Read always returns EOF +// and Close always returns nil. It can be used in an outgoing client +// request to explicitly signal that a request has zero bytes. +// An alternative, however, is to simply set Request.Body to nil. +// +// Copy of Go 1.8 NoBody type from net/http/http.go +type noBody struct{} + +func (noBody) Read([]byte) (int, error) { return 0, io.EOF } +func (noBody) Close() error { return nil } +func (noBody) WriteTo(io.Writer) (int64, error) { return 0, nil } + +// NoBody is an empty reader that will trigger the Go HTTP client to not include +// and body in the HTTP request. +var NoBody = noBody{} + +// ResetBody rewinds the request body back to its starting position, and +// set's the HTTP Request body reference. When the body is read prior +// to being sent in the HTTP request it will need to be rewound. +// +// ResetBody will automatically be called by the SDK's build handler, but if +// the request is being used directly ResetBody must be called before the request +// is Sent. SetStringBody, SetBufferBody, and SetReaderBody will automatically +// call ResetBody. +func (r *Request) ResetBody() { + body, err := r.getNextRequestBody() + if err != nil { + r.Error = err + return + } + + r.HTTPRequest.Body = body +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/request_1_8.go b/vendor/github.com/aws/aws-sdk-go/aws/request/request_1_8.go new file mode 100644 index 0000000000..c32fc69bc5 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/request/request_1_8.go @@ -0,0 +1,33 @@ +// +build go1.8 + +package request + +import ( + "net/http" +) + +// NoBody is a http.NoBody reader instructing Go HTTP client to not include +// and body in the HTTP request. +var NoBody = http.NoBody + +// ResetBody rewinds the request body back to its starting position, and +// set's the HTTP Request body reference. When the body is read prior +// to being sent in the HTTP request it will need to be rewound. +// +// ResetBody will automatically be called by the SDK's build handler, but if +// the request is being used directly ResetBody must be called before the request +// is Sent. SetStringBody, SetBufferBody, and SetReaderBody will automatically +// call ResetBody. +// +// Will also set the Go 1.8's http.Request.GetBody member to allow retrying +// PUT/POST redirects. +func (r *Request) ResetBody() { + body, err := r.getNextRequestBody() + if err != nil { + r.Error = err + return + } + + r.HTTPRequest.Body = body + r.HTTPRequest.GetBody = r.getNextRequestBody +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/request_context.go b/vendor/github.com/aws/aws-sdk-go/aws/request/request_context.go new file mode 100644 index 0000000000..a7365cd1e4 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/request/request_context.go @@ -0,0 +1,14 @@ +// +build go1.7 + +package request + +import "github.com/aws/aws-sdk-go/aws" + +// setContext updates the Request to use the passed in context for cancellation. +// Context will also be used for request retry delay. +// +// Creates shallow copy of the http.Request with the WithContext method. +func setRequestContext(r *Request, ctx aws.Context) { + r.context = ctx + r.HTTPRequest = r.HTTPRequest.WithContext(ctx) +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/request_context_1_6.go b/vendor/github.com/aws/aws-sdk-go/aws/request/request_context_1_6.go new file mode 100644 index 0000000000..307fa0705b --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/request/request_context_1_6.go @@ -0,0 +1,14 @@ +// +build !go1.7 + +package request + +import "github.com/aws/aws-sdk-go/aws" + +// setContext updates the Request to use the passed in context for cancellation. +// Context will also be used for request retry delay. +// +// Creates shallow copy of the http.Request with the WithContext method. +func setRequestContext(r *Request, ctx aws.Context) { + r.context = ctx + r.HTTPRequest.Cancel = ctx.Done() +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/request_pagination.go b/vendor/github.com/aws/aws-sdk-go/aws/request/request_pagination.go new file mode 100644 index 0000000000..59de6736b6 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/request/request_pagination.go @@ -0,0 +1,236 @@ +package request + +import ( + "reflect" + "sync/atomic" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awsutil" +) + +// A Pagination provides paginating of SDK API operations which are paginatable. +// Generally you should not use this type directly, but use the "Pages" API +// operations method to automatically perform pagination for you. Such as, +// "S3.ListObjectsPages", and "S3.ListObjectsPagesWithContext" methods. +// +// Pagination differs from a Paginator type in that pagination is the type that +// does the pagination between API operations, and Paginator defines the +// configuration that will be used per page request. +// +// cont := true +// for p.Next() && cont { +// data := p.Page().(*s3.ListObjectsOutput) +// // process the page's data +// } +// return p.Err() +// +// See service client API operation Pages methods for examples how the SDK will +// use the Pagination type. +type Pagination struct { + // Function to return a Request value for each pagination request. + // Any configuration or handlers that need to be applied to the request + // prior to getting the next page should be done here before the request + // returned. + // + // NewRequest should always be built from the same API operations. It is + // undefined if different API operations are returned on subsequent calls. + NewRequest func() (*Request, error) + + started bool + nextTokens []interface{} + + err error + curPage interface{} +} + +// HasNextPage will return true if Pagination is able to determine that the API +// operation has additional pages. False will be returned if there are no more +// pages remaining. +// +// Will always return true if Next has not been called yet. +func (p *Pagination) HasNextPage() bool { + return !(p.started && len(p.nextTokens) == 0) +} + +// Err returns the error Pagination encountered when retrieving the next page. +func (p *Pagination) Err() error { + return p.err +} + +// Page returns the current page. Page should only be called after a successful +// call to Next. It is undefined what Page will return if Page is called after +// Next returns false. +func (p *Pagination) Page() interface{} { + return p.curPage +} + +// Next will attempt to retrieve the next page for the API operation. When a page +// is retrieved true will be returned. If the page cannot be retrieved, or there +// are no more pages false will be returned. +// +// Use the Page method to retrieve the current page data. The data will need +// to be cast to the API operation's output type. +// +// Use the Err method to determine if an error occurred if Page returns false. +func (p *Pagination) Next() bool { + if !p.HasNextPage() { + return false + } + + req, err := p.NewRequest() + if err != nil { + p.err = err + return false + } + + if p.started { + for i, intok := range req.Operation.InputTokens { + awsutil.SetValueAtPath(req.Params, intok, p.nextTokens[i]) + } + } + p.started = true + + err = req.Send() + if err != nil { + p.err = err + return false + } + + p.nextTokens = req.nextPageTokens() + p.curPage = req.Data + + return true +} + +// A Paginator is the configuration data that defines how an API operation +// should be paginated. This type is used by the API service models to define +// the generated pagination config for service APIs. +// +// The Pagination type is what provides iterating between pages of an API. It +// is only used to store the token metadata the SDK should use for performing +// pagination. +type Paginator struct { + InputTokens []string + OutputTokens []string + LimitToken string + TruncationToken string +} + +// nextPageTokens returns the tokens to use when asking for the next page of data. +func (r *Request) nextPageTokens() []interface{} { + if r.Operation.Paginator == nil { + return nil + } + if r.Operation.TruncationToken != "" { + tr, _ := awsutil.ValuesAtPath(r.Data, r.Operation.TruncationToken) + if len(tr) == 0 { + return nil + } + + switch v := tr[0].(type) { + case *bool: + if !aws.BoolValue(v) { + return nil + } + case bool: + if v == false { + return nil + } + } + } + + tokens := []interface{}{} + tokenAdded := false + for _, outToken := range r.Operation.OutputTokens { + v, _ := awsutil.ValuesAtPath(r.Data, outToken) + if len(v) > 0 { + tokens = append(tokens, v[0]) + tokenAdded = true + } else { + tokens = append(tokens, nil) + } + } + if !tokenAdded { + return nil + } + + return tokens +} + +// Ensure a deprecated item is only logged once instead of each time its used. +func logDeprecatedf(logger aws.Logger, flag *int32, msg string) { + if logger == nil { + return + } + if atomic.CompareAndSwapInt32(flag, 0, 1) { + logger.Log(msg) + } +} + +var ( + logDeprecatedHasNextPage int32 + logDeprecatedNextPage int32 + logDeprecatedEachPage int32 +) + +// HasNextPage returns true if this request has more pages of data available. +// +// Deprecated Use Pagination type for configurable pagination of API operations +func (r *Request) HasNextPage() bool { + logDeprecatedf(r.Config.Logger, &logDeprecatedHasNextPage, + "Request.HasNextPage deprecated. Use Pagination type for configurable pagination of API operations") + + return len(r.nextPageTokens()) > 0 +} + +// NextPage returns a new Request that can be executed to return the next +// page of result data. Call .Send() on this request to execute it. +// +// Deprecated Use Pagination type for configurable pagination of API operations +func (r *Request) NextPage() *Request { + logDeprecatedf(r.Config.Logger, &logDeprecatedNextPage, + "Request.NextPage deprecated. Use Pagination type for configurable pagination of API operations") + + tokens := r.nextPageTokens() + if len(tokens) == 0 { + return nil + } + + data := reflect.New(reflect.TypeOf(r.Data).Elem()).Interface() + nr := New(r.Config, r.ClientInfo, r.Handlers, r.Retryer, r.Operation, awsutil.CopyOf(r.Params), data) + for i, intok := range nr.Operation.InputTokens { + awsutil.SetValueAtPath(nr.Params, intok, tokens[i]) + } + return nr +} + +// EachPage iterates over each page of a paginated request object. The fn +// parameter should be a function with the following sample signature: +// +// func(page *T, lastPage bool) bool { +// return true // return false to stop iterating +// } +// +// Where "T" is the structure type matching the output structure of the given +// operation. For example, a request object generated by +// DynamoDB.ListTablesRequest() would expect to see dynamodb.ListTablesOutput +// as the structure "T". The lastPage value represents whether the page is +// the last page of data or not. The return value of this function should +// return true to keep iterating or false to stop. +// +// Deprecated Use Pagination type for configurable pagination of API operations +func (r *Request) EachPage(fn func(data interface{}, isLastPage bool) (shouldContinue bool)) error { + logDeprecatedf(r.Config.Logger, &logDeprecatedEachPage, + "Request.EachPage deprecated. Use Pagination type for configurable pagination of API operations") + + for page := r; page != nil; page = page.NextPage() { + if err := page.Send(); err != nil { + return err + } + if getNextPage := fn(page.Data, !page.HasNextPage()); !getNextPage { + return page.Error + } + } + + return nil +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/retryer.go b/vendor/github.com/aws/aws-sdk-go/aws/request/retryer.go new file mode 100644 index 0000000000..8d369c1b8c --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/request/retryer.go @@ -0,0 +1,161 @@ +package request + +import ( + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" +) + +// Retryer is an interface to control retry logic for a given service. +// The default implementation used by most services is the service.DefaultRetryer +// structure, which contains basic retry logic using exponential backoff. +type Retryer interface { + RetryRules(*Request) time.Duration + ShouldRetry(*Request) bool + MaxRetries() int +} + +// WithRetryer sets a config Retryer value to the given Config returning it +// for chaining. +func WithRetryer(cfg *aws.Config, retryer Retryer) *aws.Config { + cfg.Retryer = retryer + return cfg +} + +// retryableCodes is a collection of service response codes which are retry-able +// without any further action. +var retryableCodes = map[string]struct{}{ + "RequestError": {}, + "RequestTimeout": {}, + ErrCodeResponseTimeout: {}, + "RequestTimeoutException": {}, // Glacier's flavor of RequestTimeout +} + +var throttleCodes = map[string]struct{}{ + "ProvisionedThroughputExceededException": {}, + "Throttling": {}, + "ThrottlingException": {}, + "RequestLimitExceeded": {}, + "RequestThrottled": {}, + "TooManyRequestsException": {}, // Lambda functions + "PriorRequestNotComplete": {}, // Route53 +} + +// credsExpiredCodes is a collection of error codes which signify the credentials +// need to be refreshed. Expired tokens require refreshing of credentials, and +// resigning before the request can be retried. +var credsExpiredCodes = map[string]struct{}{ + "ExpiredToken": {}, + "ExpiredTokenException": {}, + "RequestExpired": {}, // EC2 Only +} + +func isCodeThrottle(code string) bool { + _, ok := throttleCodes[code] + return ok +} + +func isCodeRetryable(code string) bool { + if _, ok := retryableCodes[code]; ok { + return true + } + + return isCodeExpiredCreds(code) +} + +func isCodeExpiredCreds(code string) bool { + _, ok := credsExpiredCodes[code] + return ok +} + +var validParentCodes = map[string]struct{}{ + ErrCodeSerialization: struct{}{}, + ErrCodeRead: struct{}{}, +} + +type temporaryError interface { + Temporary() bool +} + +func isNestedErrorRetryable(parentErr awserr.Error) bool { + if parentErr == nil { + return false + } + + if _, ok := validParentCodes[parentErr.Code()]; !ok { + return false + } + + err := parentErr.OrigErr() + if err == nil { + return false + } + + if aerr, ok := err.(awserr.Error); ok { + return isCodeRetryable(aerr.Code()) + } + + if t, ok := err.(temporaryError); ok { + return t.Temporary() + } + + return isErrConnectionReset(err) +} + +// IsErrorRetryable returns whether the error is retryable, based on its Code. +// Returns false if error is nil. +func IsErrorRetryable(err error) bool { + if err != nil { + if aerr, ok := err.(awserr.Error); ok { + return isCodeRetryable(aerr.Code()) || isNestedErrorRetryable(aerr) + } + } + return false +} + +// IsErrorThrottle returns whether the error is to be throttled based on its code. +// Returns false if error is nil. +func IsErrorThrottle(err error) bool { + if err != nil { + if aerr, ok := err.(awserr.Error); ok { + return isCodeThrottle(aerr.Code()) + } + } + return false +} + +// IsErrorExpiredCreds returns whether the error code is a credential expiry error. +// Returns false if error is nil. +func IsErrorExpiredCreds(err error) bool { + if err != nil { + if aerr, ok := err.(awserr.Error); ok { + return isCodeExpiredCreds(aerr.Code()) + } + } + return false +} + +// IsErrorRetryable returns whether the error is retryable, based on its Code. +// Returns false if the request has no Error set. +// +// Alias for the utility function IsErrorRetryable +func (r *Request) IsErrorRetryable() bool { + return IsErrorRetryable(r.Error) +} + +// IsErrorThrottle returns whether the error is to be throttled based on its code. +// Returns false if the request has no Error set +// +// Alias for the utility function IsErrorThrottle +func (r *Request) IsErrorThrottle() bool { + return IsErrorThrottle(r.Error) +} + +// IsErrorExpired returns whether the error code is a credential expiry error. +// Returns false if the request has no Error set. +// +// Alias for the utility function IsErrorExpiredCreds +func (r *Request) IsErrorExpired() bool { + return IsErrorExpiredCreds(r.Error) +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/timeout_read_closer.go b/vendor/github.com/aws/aws-sdk-go/aws/request/timeout_read_closer.go new file mode 100644 index 0000000000..09a44eb987 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/request/timeout_read_closer.go @@ -0,0 +1,94 @@ +package request + +import ( + "io" + "time" + + "github.com/aws/aws-sdk-go/aws/awserr" +) + +var timeoutErr = awserr.New( + ErrCodeResponseTimeout, + "read on body has reached the timeout limit", + nil, +) + +type readResult struct { + n int + err error +} + +// timeoutReadCloser will handle body reads that take too long. +// We will return a ErrReadTimeout error if a timeout occurs. +type timeoutReadCloser struct { + reader io.ReadCloser + duration time.Duration +} + +// Read will spin off a goroutine to call the reader's Read method. We will +// select on the timer's channel or the read's channel. Whoever completes first +// will be returned. +func (r *timeoutReadCloser) Read(b []byte) (int, error) { + timer := time.NewTimer(r.duration) + c := make(chan readResult, 1) + + go func() { + n, err := r.reader.Read(b) + timer.Stop() + c <- readResult{n: n, err: err} + }() + + select { + case data := <-c: + return data.n, data.err + case <-timer.C: + return 0, timeoutErr + } +} + +func (r *timeoutReadCloser) Close() error { + return r.reader.Close() +} + +const ( + // HandlerResponseTimeout is what we use to signify the name of the + // response timeout handler. + HandlerResponseTimeout = "ResponseTimeoutHandler" +) + +// adaptToResponseTimeoutError is a handler that will replace any top level error +// to a ErrCodeResponseTimeout, if its child is that. +func adaptToResponseTimeoutError(req *Request) { + if err, ok := req.Error.(awserr.Error); ok { + aerr, ok := err.OrigErr().(awserr.Error) + if ok && aerr.Code() == ErrCodeResponseTimeout { + req.Error = aerr + } + } +} + +// WithResponseReadTimeout is a request option that will wrap the body in a timeout read closer. +// This will allow for per read timeouts. If a timeout occurred, we will return the +// ErrCodeResponseTimeout. +// +// svc.PutObjectWithContext(ctx, params, request.WithTimeoutReadCloser(30 * time.Second) +func WithResponseReadTimeout(duration time.Duration) Option { + return func(r *Request) { + + var timeoutHandler = NamedHandler{ + HandlerResponseTimeout, + func(req *Request) { + req.HTTPResponse.Body = &timeoutReadCloser{ + reader: req.HTTPResponse.Body, + duration: duration, + } + }} + + // remove the handler so we are not stomping over any new durations. + r.Handlers.Send.RemoveByName(HandlerResponseTimeout) + r.Handlers.Send.PushBackNamed(timeoutHandler) + + r.Handlers.Unmarshal.PushBack(adaptToResponseTimeoutError) + r.Handlers.UnmarshalError.PushBack(adaptToResponseTimeoutError) + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/validation.go b/vendor/github.com/aws/aws-sdk-go/aws/request/validation.go new file mode 100644 index 0000000000..4012462282 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/request/validation.go @@ -0,0 +1,234 @@ +package request + +import ( + "bytes" + "fmt" + + "github.com/aws/aws-sdk-go/aws/awserr" +) + +const ( + // InvalidParameterErrCode is the error code for invalid parameters errors + InvalidParameterErrCode = "InvalidParameter" + // ParamRequiredErrCode is the error code for required parameter errors + ParamRequiredErrCode = "ParamRequiredError" + // ParamMinValueErrCode is the error code for fields with too low of a + // number value. + ParamMinValueErrCode = "ParamMinValueError" + // ParamMinLenErrCode is the error code for fields without enough elements. + ParamMinLenErrCode = "ParamMinLenError" +) + +// Validator provides a way for types to perform validation logic on their +// input values that external code can use to determine if a type's values +// are valid. +type Validator interface { + Validate() error +} + +// An ErrInvalidParams provides wrapping of invalid parameter errors found when +// validating API operation input parameters. +type ErrInvalidParams struct { + // Context is the base context of the invalid parameter group. + Context string + errs []ErrInvalidParam +} + +// Add adds a new invalid parameter error to the collection of invalid +// parameters. The context of the invalid parameter will be updated to reflect +// this collection. +func (e *ErrInvalidParams) Add(err ErrInvalidParam) { + err.SetContext(e.Context) + e.errs = append(e.errs, err) +} + +// AddNested adds the invalid parameter errors from another ErrInvalidParams +// value into this collection. The nested errors will have their nested context +// updated and base context to reflect the merging. +// +// Use for nested validations errors. +func (e *ErrInvalidParams) AddNested(nestedCtx string, nested ErrInvalidParams) { + for _, err := range nested.errs { + err.SetContext(e.Context) + err.AddNestedContext(nestedCtx) + e.errs = append(e.errs, err) + } +} + +// Len returns the number of invalid parameter errors +func (e ErrInvalidParams) Len() int { + return len(e.errs) +} + +// Code returns the code of the error +func (e ErrInvalidParams) Code() string { + return InvalidParameterErrCode +} + +// Message returns the message of the error +func (e ErrInvalidParams) Message() string { + return fmt.Sprintf("%d validation error(s) found.", len(e.errs)) +} + +// Error returns the string formatted form of the invalid parameters. +func (e ErrInvalidParams) Error() string { + w := &bytes.Buffer{} + fmt.Fprintf(w, "%s: %s\n", e.Code(), e.Message()) + + for _, err := range e.errs { + fmt.Fprintf(w, "- %s\n", err.Message()) + } + + return w.String() +} + +// OrigErr returns the invalid parameters as a awserr.BatchedErrors value +func (e ErrInvalidParams) OrigErr() error { + return awserr.NewBatchError( + InvalidParameterErrCode, e.Message(), e.OrigErrs()) +} + +// OrigErrs returns a slice of the invalid parameters +func (e ErrInvalidParams) OrigErrs() []error { + errs := make([]error, len(e.errs)) + for i := 0; i < len(errs); i++ { + errs[i] = e.errs[i] + } + + return errs +} + +// An ErrInvalidParam represents an invalid parameter error type. +type ErrInvalidParam interface { + awserr.Error + + // Field name the error occurred on. + Field() string + + // SetContext updates the context of the error. + SetContext(string) + + // AddNestedContext updates the error's context to include a nested level. + AddNestedContext(string) +} + +type errInvalidParam struct { + context string + nestedContext string + field string + code string + msg string +} + +// Code returns the error code for the type of invalid parameter. +func (e *errInvalidParam) Code() string { + return e.code +} + +// Message returns the reason the parameter was invalid, and its context. +func (e *errInvalidParam) Message() string { + return fmt.Sprintf("%s, %s.", e.msg, e.Field()) +} + +// Error returns the string version of the invalid parameter error. +func (e *errInvalidParam) Error() string { + return fmt.Sprintf("%s: %s", e.code, e.Message()) +} + +// OrigErr returns nil, Implemented for awserr.Error interface. +func (e *errInvalidParam) OrigErr() error { + return nil +} + +// Field Returns the field and context the error occurred. +func (e *errInvalidParam) Field() string { + field := e.context + if len(field) > 0 { + field += "." + } + if len(e.nestedContext) > 0 { + field += fmt.Sprintf("%s.", e.nestedContext) + } + field += e.field + + return field +} + +// SetContext updates the base context of the error. +func (e *errInvalidParam) SetContext(ctx string) { + e.context = ctx +} + +// AddNestedContext prepends a context to the field's path. +func (e *errInvalidParam) AddNestedContext(ctx string) { + if len(e.nestedContext) == 0 { + e.nestedContext = ctx + } else { + e.nestedContext = fmt.Sprintf("%s.%s", ctx, e.nestedContext) + } + +} + +// An ErrParamRequired represents an required parameter error. +type ErrParamRequired struct { + errInvalidParam +} + +// NewErrParamRequired creates a new required parameter error. +func NewErrParamRequired(field string) *ErrParamRequired { + return &ErrParamRequired{ + errInvalidParam{ + code: ParamRequiredErrCode, + field: field, + msg: fmt.Sprintf("missing required field"), + }, + } +} + +// An ErrParamMinValue represents a minimum value parameter error. +type ErrParamMinValue struct { + errInvalidParam + min float64 +} + +// NewErrParamMinValue creates a new minimum value parameter error. +func NewErrParamMinValue(field string, min float64) *ErrParamMinValue { + return &ErrParamMinValue{ + errInvalidParam: errInvalidParam{ + code: ParamMinValueErrCode, + field: field, + msg: fmt.Sprintf("minimum field value of %v", min), + }, + min: min, + } +} + +// MinValue returns the field's require minimum value. +// +// float64 is returned for both int and float min values. +func (e *ErrParamMinValue) MinValue() float64 { + return e.min +} + +// An ErrParamMinLen represents a minimum length parameter error. +type ErrParamMinLen struct { + errInvalidParam + min int +} + +// NewErrParamMinLen creates a new minimum length parameter error. +func NewErrParamMinLen(field string, min int) *ErrParamMinLen { + return &ErrParamMinLen{ + errInvalidParam: errInvalidParam{ + code: ParamMinLenErrCode, + field: field, + msg: fmt.Sprintf("minimum field size of %v", min), + }, + min: min, + } +} + +// MinLen returns the field's required minimum length. +func (e *ErrParamMinLen) MinLen() int { + return e.min +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/waiter.go b/vendor/github.com/aws/aws-sdk-go/aws/request/waiter.go new file mode 100644 index 0000000000..22d2f80980 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/request/waiter.go @@ -0,0 +1,287 @@ +package request + +import ( + "fmt" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/awsutil" +) + +// WaiterResourceNotReadyErrorCode is the error code returned by a waiter when +// the waiter's max attempts have been exhausted. +const WaiterResourceNotReadyErrorCode = "ResourceNotReady" + +// A WaiterOption is a function that will update the Waiter value's fields to +// configure the waiter. +type WaiterOption func(*Waiter) + +// WithWaiterMaxAttempts returns the maximum number of times the waiter should +// attempt to check the resource for the target state. +func WithWaiterMaxAttempts(max int) WaiterOption { + return func(w *Waiter) { + w.MaxAttempts = max + } +} + +// WaiterDelay will return a delay the waiter should pause between attempts to +// check the resource state. The passed in attempt is the number of times the +// Waiter has checked the resource state. +// +// Attempt is the number of attempts the Waiter has made checking the resource +// state. +type WaiterDelay func(attempt int) time.Duration + +// ConstantWaiterDelay returns a WaiterDelay that will always return a constant +// delay the waiter should use between attempts. It ignores the number of +// attempts made. +func ConstantWaiterDelay(delay time.Duration) WaiterDelay { + return func(attempt int) time.Duration { + return delay + } +} + +// WithWaiterDelay will set the Waiter to use the WaiterDelay passed in. +func WithWaiterDelay(delayer WaiterDelay) WaiterOption { + return func(w *Waiter) { + w.Delay = delayer + } +} + +// WithWaiterLogger returns a waiter option to set the logger a waiter +// should use to log warnings and errors to. +func WithWaiterLogger(logger aws.Logger) WaiterOption { + return func(w *Waiter) { + w.Logger = logger + } +} + +// WithWaiterRequestOptions returns a waiter option setting the request +// options for each request the waiter makes. Appends to waiter's request +// options already set. +func WithWaiterRequestOptions(opts ...Option) WaiterOption { + return func(w *Waiter) { + w.RequestOptions = append(w.RequestOptions, opts...) + } +} + +// A Waiter provides the functionality to perform a blocking call which will +// wait for a resource state to be satisfied by a service. +// +// This type should not be used directly. The API operations provided in the +// service packages prefixed with "WaitUntil" should be used instead. +type Waiter struct { + Name string + Acceptors []WaiterAcceptor + Logger aws.Logger + + MaxAttempts int + Delay WaiterDelay + + RequestOptions []Option + NewRequest func([]Option) (*Request, error) +} + +// ApplyOptions updates the waiter with the list of waiter options provided. +func (w *Waiter) ApplyOptions(opts ...WaiterOption) { + for _, fn := range opts { + fn(w) + } +} + +// WaiterState are states the waiter uses based on WaiterAcceptor definitions +// to identify if the resource state the waiter is waiting on has occurred. +type WaiterState int + +// String returns the string representation of the waiter state. +func (s WaiterState) String() string { + switch s { + case SuccessWaiterState: + return "success" + case FailureWaiterState: + return "failure" + case RetryWaiterState: + return "retry" + default: + return "unknown waiter state" + } +} + +// States the waiter acceptors will use to identify target resource states. +const ( + SuccessWaiterState WaiterState = iota // waiter successful + FailureWaiterState // waiter failed + RetryWaiterState // waiter needs to be retried +) + +// WaiterMatchMode is the mode that the waiter will use to match the WaiterAcceptor +// definition's Expected attribute. +type WaiterMatchMode int + +// Modes the waiter will use when inspecting API response to identify target +// resource states. +const ( + PathAllWaiterMatch WaiterMatchMode = iota // match on all paths + PathWaiterMatch // match on specific path + PathAnyWaiterMatch // match on any path + PathListWaiterMatch // match on list of paths + StatusWaiterMatch // match on status code + ErrorWaiterMatch // match on error +) + +// String returns the string representation of the waiter match mode. +func (m WaiterMatchMode) String() string { + switch m { + case PathAllWaiterMatch: + return "pathAll" + case PathWaiterMatch: + return "path" + case PathAnyWaiterMatch: + return "pathAny" + case PathListWaiterMatch: + return "pathList" + case StatusWaiterMatch: + return "status" + case ErrorWaiterMatch: + return "error" + default: + return "unknown waiter match mode" + } +} + +// WaitWithContext will make requests for the API operation using NewRequest to +// build API requests. The request's response will be compared against the +// Waiter's Acceptors to determine the successful state of the resource the +// waiter is inspecting. +// +// The passed in context must not be nil. If it is nil a panic will occur. The +// Context will be used to cancel the waiter's pending requests and retry delays. +// Use aws.BackgroundContext if no context is available. +// +// The waiter will continue until the target state defined by the Acceptors, +// or the max attempts expires. +// +// Will return the WaiterResourceNotReadyErrorCode error code if the waiter's +// retryer ShouldRetry returns false. This normally will happen when the max +// wait attempts expires. +func (w Waiter) WaitWithContext(ctx aws.Context) error { + + for attempt := 1; ; attempt++ { + req, err := w.NewRequest(w.RequestOptions) + if err != nil { + waiterLogf(w.Logger, "unable to create request %v", err) + return err + } + req.Handlers.Build.PushBack(MakeAddToUserAgentFreeFormHandler("Waiter")) + err = req.Send() + + // See if any of the acceptors match the request's response, or error + for _, a := range w.Acceptors { + if matched, matchErr := a.match(w.Name, w.Logger, req, err); matched { + return matchErr + } + } + + // The Waiter should only check the resource state MaxAttempts times + // This is here instead of in the for loop above to prevent delaying + // unnecessary when the waiter will not retry. + if attempt == w.MaxAttempts { + break + } + + // Delay to wait before inspecting the resource again + delay := w.Delay(attempt) + if sleepFn := req.Config.SleepDelay; sleepFn != nil { + // Support SleepDelay for backwards compatibility and testing + sleepFn(delay) + } else if err := aws.SleepWithContext(ctx, delay); err != nil { + return awserr.New(CanceledErrorCode, "waiter context canceled", err) + } + } + + return awserr.New(WaiterResourceNotReadyErrorCode, "exceeded wait attempts", nil) +} + +// A WaiterAcceptor provides the information needed to wait for an API operation +// to complete. +type WaiterAcceptor struct { + State WaiterState + Matcher WaiterMatchMode + Argument string + Expected interface{} +} + +// match returns if the acceptor found a match with the passed in request +// or error. True is returned if the acceptor made a match, error is returned +// if there was an error attempting to perform the match. +func (a *WaiterAcceptor) match(name string, l aws.Logger, req *Request, err error) (bool, error) { + result := false + var vals []interface{} + + switch a.Matcher { + case PathAllWaiterMatch, PathWaiterMatch: + // Require all matches to be equal for result to match + vals, _ = awsutil.ValuesAtPath(req.Data, a.Argument) + if len(vals) == 0 { + break + } + result = true + for _, val := range vals { + if !awsutil.DeepEqual(val, a.Expected) { + result = false + break + } + } + case PathAnyWaiterMatch: + // Only a single match needs to equal for the result to match + vals, _ = awsutil.ValuesAtPath(req.Data, a.Argument) + for _, val := range vals { + if awsutil.DeepEqual(val, a.Expected) { + result = true + break + } + } + case PathListWaiterMatch: + // ignored matcher + case StatusWaiterMatch: + s := a.Expected.(int) + result = s == req.HTTPResponse.StatusCode + case ErrorWaiterMatch: + if aerr, ok := err.(awserr.Error); ok { + result = aerr.Code() == a.Expected.(string) + } + default: + waiterLogf(l, "WARNING: Waiter %s encountered unexpected matcher: %s", + name, a.Matcher) + } + + if !result { + // If there was no matching result found there is nothing more to do + // for this response, retry the request. + return false, nil + } + + switch a.State { + case SuccessWaiterState: + // waiter completed + return true, nil + case FailureWaiterState: + // Waiter failure state triggered + return true, awserr.New(WaiterResourceNotReadyErrorCode, + "failed waiting for successful resource state", err) + case RetryWaiterState: + // clear the error and retry the operation + return false, nil + default: + waiterLogf(l, "WARNING: Waiter %s encountered unexpected state: %s", + name, a.State) + return false, nil + } +} + +func waiterLogf(logger aws.Logger, msg string, args ...interface{}) { + if logger != nil { + logger.Log(fmt.Sprintf(msg, args...)) + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/session/doc.go b/vendor/github.com/aws/aws-sdk-go/aws/session/doc.go new file mode 100644 index 0000000000..ea7b886f81 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/session/doc.go @@ -0,0 +1,273 @@ +/* +Package session provides configuration for the SDK's service clients. + +Sessions can be shared across all service clients that share the same base +configuration. The Session is built from the SDK's default configuration and +request handlers. + +Sessions should be cached when possible, because creating a new Session will +load all configuration values from the environment, and config files each time +the Session is created. Sharing the Session value across all of your service +clients will ensure the configuration is loaded the fewest number of times possible. + +Concurrency + +Sessions are safe to use concurrently as long as the Session is not being +modified. The SDK will not modify the Session once the Session has been created. +Creating service clients concurrently from a shared Session is safe. + +Sessions from Shared Config + +Sessions can be created using the method above that will only load the +additional config if the AWS_SDK_LOAD_CONFIG environment variable is set. +Alternatively you can explicitly create a Session with shared config enabled. +To do this you can use NewSessionWithOptions to configure how the Session will +be created. Using the NewSessionWithOptions with SharedConfigState set to +SharedConfigEnable will create the session as if the AWS_SDK_LOAD_CONFIG +environment variable was set. + +Creating Sessions + +When creating Sessions optional aws.Config values can be passed in that will +override the default, or loaded config values the Session is being created +with. This allows you to provide additional, or case based, configuration +as needed. + +By default NewSession will only load credentials from the shared credentials +file (~/.aws/credentials). If the AWS_SDK_LOAD_CONFIG environment variable is +set to a truthy value the Session will be created from the configuration +values from the shared config (~/.aws/config) and shared credentials +(~/.aws/credentials) files. See the section Sessions from Shared Config for +more information. + +Create a Session with the default config and request handlers. With credentials +region, and profile loaded from the environment and shared config automatically. +Requires the AWS_PROFILE to be set, or "default" is used. + + // Create Session + sess := session.Must(session.NewSession()) + + // Create a Session with a custom region + sess := session.Must(session.NewSession(&aws.Config{ + Region: aws.String("us-east-1"), + })) + + // Create a S3 client instance from a session + sess := session.Must(session.NewSession()) + + svc := s3.New(sess) + +Create Session With Option Overrides + +In addition to NewSession, Sessions can be created using NewSessionWithOptions. +This func allows you to control and override how the Session will be created +through code instead of being driven by environment variables only. + +Use NewSessionWithOptions when you want to provide the config profile, or +override the shared config state (AWS_SDK_LOAD_CONFIG). + + // Equivalent to session.NewSession() + sess := session.Must(session.NewSessionWithOptions(session.Options{ + // Options + })) + + // Specify profile to load for the session's config + sess := session.Must(session.NewSessionWithOptions(session.Options{ + Profile: "profile_name", + })) + + // Specify profile for config and region for requests + sess := session.Must(session.NewSessionWithOptions(session.Options{ + Config: aws.Config{Region: aws.String("us-east-1")}, + Profile: "profile_name", + })) + + // Force enable Shared Config support + sess := session.Must(session.NewSessionWithOptions(session.Options{ + SharedConfigState: session.SharedConfigEnable, + })) + +Adding Handlers + +You can add handlers to a session for processing HTTP requests. All service +clients that use the session inherit the handlers. For example, the following +handler logs every request and its payload made by a service client: + + // Create a session, and add additional handlers for all service + // clients created with the Session to inherit. Adds logging handler. + sess := session.Must(session.NewSession()) + + sess.Handlers.Send.PushFront(func(r *request.Request) { + // Log every request made and its payload + logger.Println("Request: %s/%s, Payload: %s", + r.ClientInfo.ServiceName, r.Operation, r.Params) + }) + +Deprecated "New" function + +The New session function has been deprecated because it does not provide good +way to return errors that occur when loading the configuration files and values. +Because of this, NewSession was created so errors can be retrieved when +creating a session fails. + +Shared Config Fields + +By default the SDK will only load the shared credentials file's (~/.aws/credentials) +credentials values, and all other config is provided by the environment variables, +SDK defaults, and user provided aws.Config values. + +If the AWS_SDK_LOAD_CONFIG environment variable is set, or SharedConfigEnable +option is used to create the Session the full shared config values will be +loaded. This includes credentials, region, and support for assume role. In +addition the Session will load its configuration from both the shared config +file (~/.aws/config) and shared credentials file (~/.aws/credentials). Both +files have the same format. + +If both config files are present the configuration from both files will be +read. The Session will be created from configuration values from the shared +credentials file (~/.aws/credentials) over those in the shared config file (~/.aws/config). + +Credentials are the values the SDK should use for authenticating requests with +AWS Services. They arfrom a configuration file will need to include both +aws_access_key_id and aws_secret_access_key must be provided together in the +same file to be considered valid. The values will be ignored if not a complete +group. aws_session_token is an optional field that can be provided if both of +the other two fields are also provided. + + aws_access_key_id = AKID + aws_secret_access_key = SECRET + aws_session_token = TOKEN + +Assume Role values allow you to configure the SDK to assume an IAM role using +a set of credentials provided in a config file via the source_profile field. +Both "role_arn" and "source_profile" are required. The SDK supports assuming +a role with MFA token if the session option AssumeRoleTokenProvider +is set. + + role_arn = arn:aws:iam:::role/ + source_profile = profile_with_creds + external_id = 1234 + mfa_serial = + role_session_name = session_name + +Region is the region the SDK should use for looking up AWS service endpoints +and signing requests. + + region = us-east-1 + +Assume Role with MFA token + +To create a session with support for assuming an IAM role with MFA set the +session option AssumeRoleTokenProvider to a function that will prompt for the +MFA token code when the SDK assumes the role and refreshes the role's credentials. +This allows you to configure the SDK via the shared config to assumea role +with MFA tokens. + +In order for the SDK to assume a role with MFA the SharedConfigState +session option must be set to SharedConfigEnable, or AWS_SDK_LOAD_CONFIG +environment variable set. + +The shared configuration instructs the SDK to assume an IAM role with MFA +when the mfa_serial configuration field is set in the shared config +(~/.aws/config) or shared credentials (~/.aws/credentials) file. + +If mfa_serial is set in the configuration, the SDK will assume the role, and +the AssumeRoleTokenProvider session option is not set an an error will +be returned when creating the session. + + sess := session.Must(session.NewSessionWithOptions(session.Options{ + AssumeRoleTokenProvider: stscreds.StdinTokenProvider, + })) + + // Create service client value configured for credentials + // from assumed role. + svc := s3.New(sess) + +To setup assume role outside of a session see the stscrds.AssumeRoleProvider +documentation. + +Environment Variables + +When a Session is created several environment variables can be set to adjust +how the SDK functions, and what configuration data it loads when creating +Sessions. All environment values are optional, but some values like credentials +require multiple of the values to set or the partial values will be ignored. +All environment variable values are strings unless otherwise noted. + +Environment configuration values. If set both Access Key ID and Secret Access +Key must be provided. Session Token and optionally also be provided, but is +not required. + + # Access Key ID + AWS_ACCESS_KEY_ID=AKID + AWS_ACCESS_KEY=AKID # only read if AWS_ACCESS_KEY_ID is not set. + + # Secret Access Key + AWS_SECRET_ACCESS_KEY=SECRET + AWS_SECRET_KEY=SECRET=SECRET # only read if AWS_SECRET_ACCESS_KEY is not set. + + # Session Token + AWS_SESSION_TOKEN=TOKEN + +Region value will instruct the SDK where to make service API requests to. If is +not provided in the environment the region must be provided before a service +client request is made. + + AWS_REGION=us-east-1 + + # AWS_DEFAULT_REGION is only read if AWS_SDK_LOAD_CONFIG is also set, + # and AWS_REGION is not also set. + AWS_DEFAULT_REGION=us-east-1 + +Profile name the SDK should load use when loading shared config from the +configuration files. If not provided "default" will be used as the profile name. + + AWS_PROFILE=my_profile + + # AWS_DEFAULT_PROFILE is only read if AWS_SDK_LOAD_CONFIG is also set, + # and AWS_PROFILE is not also set. + AWS_DEFAULT_PROFILE=my_profile + +SDK load config instructs the SDK to load the shared config in addition to +shared credentials. This also expands the configuration loaded so the shared +credentials will have parity with the shared config file. This also enables +Region and Profile support for the AWS_DEFAULT_REGION and AWS_DEFAULT_PROFILE +env values as well. + + AWS_SDK_LOAD_CONFIG=1 + +Shared credentials file path can be set to instruct the SDK to use an alternative +file for the shared credentials. If not set the file will be loaded from +$HOME/.aws/credentials on Linux/Unix based systems, and +%USERPROFILE%\.aws\credentials on Windows. + + AWS_SHARED_CREDENTIALS_FILE=$HOME/my_shared_credentials + +Shared config file path can be set to instruct the SDK to use an alternative +file for the shared config. If not set the file will be loaded from +$HOME/.aws/config on Linux/Unix based systems, and +%USERPROFILE%\.aws\config on Windows. + + AWS_CONFIG_FILE=$HOME/my_shared_config + +Path to a custom Credentials Authority (CA) bundle PEM file that the SDK +will use instead of the default system's root CA bundle. Use this only +if you want to replace the CA bundle the SDK uses for TLS requests. + + AWS_CA_BUNDLE=$HOME/my_custom_ca_bundle + +Enabling this option will attempt to merge the Transport into the SDK's HTTP +client. If the client's Transport is not a http.Transport an error will be +returned. If the Transport's TLS config is set this option will cause the SDK +to overwrite the Transport's TLS config's RootCAs value. If the CA bundle file +contains multiple certificates all of them will be loaded. + +The Session option CustomCABundle is also available when creating sessions +to also enable this feature. CustomCABundle session option field has priority +over the AWS_CA_BUNDLE environment variable, and will be used if both are set. + +Setting a custom HTTPClient in the aws.Config options will override this setting. +To use this option and custom HTTP client, the HTTP client needs to be provided +when creating the session. Not the service client. +*/ +package session diff --git a/vendor/github.com/aws/aws-sdk-go/aws/session/env_config.go b/vendor/github.com/aws/aws-sdk-go/aws/session/env_config.go new file mode 100644 index 0000000000..7357e545ac --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/session/env_config.go @@ -0,0 +1,188 @@ +package session + +import ( + "os" + "strconv" + + "github.com/aws/aws-sdk-go/aws/credentials" +) + +// envConfig is a collection of environment values the SDK will read +// setup config from. All environment values are optional. But some values +// such as credentials require multiple values to be complete or the values +// will be ignored. +type envConfig struct { + // Environment configuration values. If set both Access Key ID and Secret Access + // Key must be provided. Session Token and optionally also be provided, but is + // not required. + // + // # Access Key ID + // AWS_ACCESS_KEY_ID=AKID + // AWS_ACCESS_KEY=AKID # only read if AWS_ACCESS_KEY_ID is not set. + // + // # Secret Access Key + // AWS_SECRET_ACCESS_KEY=SECRET + // AWS_SECRET_KEY=SECRET=SECRET # only read if AWS_SECRET_ACCESS_KEY is not set. + // + // # Session Token + // AWS_SESSION_TOKEN=TOKEN + Creds credentials.Value + + // Region value will instruct the SDK where to make service API requests to. If is + // not provided in the environment the region must be provided before a service + // client request is made. + // + // AWS_REGION=us-east-1 + // + // # AWS_DEFAULT_REGION is only read if AWS_SDK_LOAD_CONFIG is also set, + // # and AWS_REGION is not also set. + // AWS_DEFAULT_REGION=us-east-1 + Region string + + // Profile name the SDK should load use when loading shared configuration from the + // shared configuration files. If not provided "default" will be used as the + // profile name. + // + // AWS_PROFILE=my_profile + // + // # AWS_DEFAULT_PROFILE is only read if AWS_SDK_LOAD_CONFIG is also set, + // # and AWS_PROFILE is not also set. + // AWS_DEFAULT_PROFILE=my_profile + Profile string + + // SDK load config instructs the SDK to load the shared config in addition to + // shared credentials. This also expands the configuration loaded from the shared + // credentials to have parity with the shared config file. This also enables + // Region and Profile support for the AWS_DEFAULT_REGION and AWS_DEFAULT_PROFILE + // env values as well. + // + // AWS_SDK_LOAD_CONFIG=1 + EnableSharedConfig bool + + // Shared credentials file path can be set to instruct the SDK to use an alternate + // file for the shared credentials. If not set the file will be loaded from + // $HOME/.aws/credentials on Linux/Unix based systems, and + // %USERPROFILE%\.aws\credentials on Windows. + // + // AWS_SHARED_CREDENTIALS_FILE=$HOME/my_shared_credentials + SharedCredentialsFile string + + // Shared config file path can be set to instruct the SDK to use an alternate + // file for the shared config. If not set the file will be loaded from + // $HOME/.aws/config on Linux/Unix based systems, and + // %USERPROFILE%\.aws\config on Windows. + // + // AWS_CONFIG_FILE=$HOME/my_shared_config + SharedConfigFile string + + // Sets the path to a custom Credentials Authroity (CA) Bundle PEM file + // that the SDK will use instead of the the system's root CA bundle. + // Only use this if you want to configure the SDK to use a custom set + // of CAs. + // + // Enabling this option will attempt to merge the Transport + // into the SDK's HTTP client. If the client's Transport is + // not a http.Transport an error will be returned. If the + // Transport's TLS config is set this option will cause the + // SDK to overwrite the Transport's TLS config's RootCAs value. + // + // Setting a custom HTTPClient in the aws.Config options will override this setting. + // To use this option and custom HTTP client, the HTTP client needs to be provided + // when creating the session. Not the service client. + // + // AWS_CA_BUNDLE=$HOME/my_custom_ca_bundle + CustomCABundle string +} + +var ( + credAccessEnvKey = []string{ + "AWS_ACCESS_KEY_ID", + "AWS_ACCESS_KEY", + } + credSecretEnvKey = []string{ + "AWS_SECRET_ACCESS_KEY", + "AWS_SECRET_KEY", + } + credSessionEnvKey = []string{ + "AWS_SESSION_TOKEN", + } + + regionEnvKeys = []string{ + "AWS_REGION", + "AWS_DEFAULT_REGION", // Only read if AWS_SDK_LOAD_CONFIG is also set + } + profileEnvKeys = []string{ + "AWS_PROFILE", + "AWS_DEFAULT_PROFILE", // Only read if AWS_SDK_LOAD_CONFIG is also set + } + sharedCredsFileEnvKey = []string{ + "AWS_SHARED_CREDENTIALS_FILE", + } + sharedConfigFileEnvKey = []string{ + "AWS_CONFIG_FILE", + } +) + +// loadEnvConfig retrieves the SDK's environment configuration. +// See `envConfig` for the values that will be retrieved. +// +// If the environment variable `AWS_SDK_LOAD_CONFIG` is set to a truthy value +// the shared SDK config will be loaded in addition to the SDK's specific +// configuration values. +func loadEnvConfig() envConfig { + enableSharedConfig, _ := strconv.ParseBool(os.Getenv("AWS_SDK_LOAD_CONFIG")) + return envConfigLoad(enableSharedConfig) +} + +// loadEnvSharedConfig retrieves the SDK's environment configuration, and the +// SDK shared config. See `envConfig` for the values that will be retrieved. +// +// Loads the shared configuration in addition to the SDK's specific configuration. +// This will load the same values as `loadEnvConfig` if the `AWS_SDK_LOAD_CONFIG` +// environment variable is set. +func loadSharedEnvConfig() envConfig { + return envConfigLoad(true) +} + +func envConfigLoad(enableSharedConfig bool) envConfig { + cfg := envConfig{} + + cfg.EnableSharedConfig = enableSharedConfig + + setFromEnvVal(&cfg.Creds.AccessKeyID, credAccessEnvKey) + setFromEnvVal(&cfg.Creds.SecretAccessKey, credSecretEnvKey) + setFromEnvVal(&cfg.Creds.SessionToken, credSessionEnvKey) + + // Require logical grouping of credentials + if len(cfg.Creds.AccessKeyID) == 0 || len(cfg.Creds.SecretAccessKey) == 0 { + cfg.Creds = credentials.Value{} + } else { + cfg.Creds.ProviderName = "EnvConfigCredentials" + } + + regionKeys := regionEnvKeys + profileKeys := profileEnvKeys + if !cfg.EnableSharedConfig { + regionKeys = regionKeys[:1] + profileKeys = profileKeys[:1] + } + + setFromEnvVal(&cfg.Region, regionKeys) + setFromEnvVal(&cfg.Profile, profileKeys) + + setFromEnvVal(&cfg.SharedCredentialsFile, sharedCredsFileEnvKey) + setFromEnvVal(&cfg.SharedConfigFile, sharedConfigFileEnvKey) + + cfg.CustomCABundle = os.Getenv("AWS_CA_BUNDLE") + + return cfg +} + +func setFromEnvVal(dst *string, keys []string) { + for _, k := range keys { + if v := os.Getenv(k); len(v) > 0 { + *dst = v + break + } + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/session/session.go b/vendor/github.com/aws/aws-sdk-go/aws/session/session.go new file mode 100644 index 0000000000..9f75d5ac58 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/session/session.go @@ -0,0 +1,606 @@ +package session + +import ( + "crypto/tls" + "crypto/x509" + "fmt" + "io" + "io/ioutil" + "net/http" + "os" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/client" + "github.com/aws/aws-sdk-go/aws/corehandlers" + "github.com/aws/aws-sdk-go/aws/credentials" + "github.com/aws/aws-sdk-go/aws/credentials/stscreds" + "github.com/aws/aws-sdk-go/aws/defaults" + "github.com/aws/aws-sdk-go/aws/endpoints" + "github.com/aws/aws-sdk-go/aws/request" +) + +// A Session provides a central location to create service clients from and +// store configurations and request handlers for those services. +// +// Sessions are safe to create service clients concurrently, but it is not safe +// to mutate the Session concurrently. +// +// The Session satisfies the service client's client.ClientConfigProvider. +type Session struct { + Config *aws.Config + Handlers request.Handlers +} + +// New creates a new instance of the handlers merging in the provided configs +// on top of the SDK's default configurations. Once the Session is created it +// can be mutated to modify the Config or Handlers. The Session is safe to be +// read concurrently, but it should not be written to concurrently. +// +// If the AWS_SDK_LOAD_CONFIG environment is set to a truthy value, the New +// method could now encounter an error when loading the configuration. When +// The environment variable is set, and an error occurs, New will return a +// session that will fail all requests reporting the error that occurred while +// loading the session. Use NewSession to get the error when creating the +// session. +// +// If the AWS_SDK_LOAD_CONFIG environment variable is set to a truthy value +// the shared config file (~/.aws/config) will also be loaded, in addition to +// the shared credentials file (~/.aws/credentials). Values set in both the +// shared config, and shared credentials will be taken from the shared +// credentials file. +// +// Deprecated: Use NewSession functions to create sessions instead. NewSession +// has the same functionality as New except an error can be returned when the +// func is called instead of waiting to receive an error until a request is made. +func New(cfgs ...*aws.Config) *Session { + // load initial config from environment + envCfg := loadEnvConfig() + + if envCfg.EnableSharedConfig { + s, err := newSession(Options{}, envCfg, cfgs...) + if err != nil { + // Old session.New expected all errors to be discovered when + // a request is made, and would report the errors then. This + // needs to be replicated if an error occurs while creating + // the session. + msg := "failed to create session with AWS_SDK_LOAD_CONFIG enabled. " + + "Use session.NewSession to handle errors occurring during session creation." + + // Session creation failed, need to report the error and prevent + // any requests from succeeding. + s = &Session{Config: defaults.Config()} + s.Config.MergeIn(cfgs...) + s.Config.Logger.Log("ERROR:", msg, "Error:", err) + s.Handlers.Validate.PushBack(func(r *request.Request) { + r.Error = err + }) + } + return s + } + + return deprecatedNewSession(cfgs...) +} + +// NewSession returns a new Session created from SDK defaults, config files, +// environment, and user provided config files. Once the Session is created +// it can be mutated to modify the Config or Handlers. The Session is safe to +// be read concurrently, but it should not be written to concurrently. +// +// If the AWS_SDK_LOAD_CONFIG environment variable is set to a truthy value +// the shared config file (~/.aws/config) will also be loaded in addition to +// the shared credentials file (~/.aws/credentials). Values set in both the +// shared config, and shared credentials will be taken from the shared +// credentials file. Enabling the Shared Config will also allow the Session +// to be built with retrieving credentials with AssumeRole set in the config. +// +// See the NewSessionWithOptions func for information on how to override or +// control through code how the Session will be created. Such as specifying the +// config profile, and controlling if shared config is enabled or not. +func NewSession(cfgs ...*aws.Config) (*Session, error) { + opts := Options{} + opts.Config.MergeIn(cfgs...) + + return NewSessionWithOptions(opts) +} + +// SharedConfigState provides the ability to optionally override the state +// of the session's creation based on the shared config being enabled or +// disabled. +type SharedConfigState int + +const ( + // SharedConfigStateFromEnv does not override any state of the + // AWS_SDK_LOAD_CONFIG env var. It is the default value of the + // SharedConfigState type. + SharedConfigStateFromEnv SharedConfigState = iota + + // SharedConfigDisable overrides the AWS_SDK_LOAD_CONFIG env var value + // and disables the shared config functionality. + SharedConfigDisable + + // SharedConfigEnable overrides the AWS_SDK_LOAD_CONFIG env var value + // and enables the shared config functionality. + SharedConfigEnable +) + +// Options provides the means to control how a Session is created and what +// configuration values will be loaded. +// +type Options struct { + // Provides config values for the SDK to use when creating service clients + // and making API requests to services. Any value set in with this field + // will override the associated value provided by the SDK defaults, + // environment or config files where relevant. + // + // If not set, configuration values from from SDK defaults, environment, + // config will be used. + Config aws.Config + + // Overrides the config profile the Session should be created from. If not + // set the value of the environment variable will be loaded (AWS_PROFILE, + // or AWS_DEFAULT_PROFILE if the Shared Config is enabled). + // + // If not set and environment variables are not set the "default" + // (DefaultSharedConfigProfile) will be used as the profile to load the + // session config from. + Profile string + + // Instructs how the Session will be created based on the AWS_SDK_LOAD_CONFIG + // environment variable. By default a Session will be created using the + // value provided by the AWS_SDK_LOAD_CONFIG environment variable. + // + // Setting this value to SharedConfigEnable or SharedConfigDisable + // will allow you to override the AWS_SDK_LOAD_CONFIG environment variable + // and enable or disable the shared config functionality. + SharedConfigState SharedConfigState + + // Ordered list of files the session will load configuration from. + // It will override environment variable AWS_SHARED_CREDENTIALS_FILE, AWS_CONFIG_FILE. + SharedConfigFiles []string + + // When the SDK's shared config is configured to assume a role with MFA + // this option is required in order to provide the mechanism that will + // retrieve the MFA token. There is no default value for this field. If + // it is not set an error will be returned when creating the session. + // + // This token provider will be called when ever the assumed role's + // credentials need to be refreshed. Within the context of service clients + // all sharing the same session the SDK will ensure calls to the token + // provider are atomic. When sharing a token provider across multiple + // sessions additional synchronization logic is needed to ensure the + // token providers do not introduce race conditions. It is recommend to + // share the session where possible. + // + // stscreds.StdinTokenProvider is a basic implementation that will prompt + // from stdin for the MFA token code. + // + // This field is only used if the shared configuration is enabled, and + // the config enables assume role wit MFA via the mfa_serial field. + AssumeRoleTokenProvider func() (string, error) + + // Reader for a custom Credentials Authority (CA) bundle in PEM format that + // the SDK will use instead of the default system's root CA bundle. Use this + // only if you want to replace the CA bundle the SDK uses for TLS requests. + // + // Enabling this option will attempt to merge the Transport into the SDK's HTTP + // client. If the client's Transport is not a http.Transport an error will be + // returned. If the Transport's TLS config is set this option will cause the SDK + // to overwrite the Transport's TLS config's RootCAs value. If the CA + // bundle reader contains multiple certificates all of them will be loaded. + // + // The Session option CustomCABundle is also available when creating sessions + // to also enable this feature. CustomCABundle session option field has priority + // over the AWS_CA_BUNDLE environment variable, and will be used if both are set. + CustomCABundle io.Reader +} + +// NewSessionWithOptions returns a new Session created from SDK defaults, config files, +// environment, and user provided config files. This func uses the Options +// values to configure how the Session is created. +// +// If the AWS_SDK_LOAD_CONFIG environment variable is set to a truthy value +// the shared config file (~/.aws/config) will also be loaded in addition to +// the shared credentials file (~/.aws/credentials). Values set in both the +// shared config, and shared credentials will be taken from the shared +// credentials file. Enabling the Shared Config will also allow the Session +// to be built with retrieving credentials with AssumeRole set in the config. +// +// // Equivalent to session.New +// sess := session.Must(session.NewSessionWithOptions(session.Options{})) +// +// // Specify profile to load for the session's config +// sess := session.Must(session.NewSessionWithOptions(session.Options{ +// Profile: "profile_name", +// })) +// +// // Specify profile for config and region for requests +// sess := session.Must(session.NewSessionWithOptions(session.Options{ +// Config: aws.Config{Region: aws.String("us-east-1")}, +// Profile: "profile_name", +// })) +// +// // Force enable Shared Config support +// sess := session.Must(session.NewSessionWithOptions(session.Options{ +// SharedConfigState: session.SharedConfigEnable, +// })) +func NewSessionWithOptions(opts Options) (*Session, error) { + var envCfg envConfig + if opts.SharedConfigState == SharedConfigEnable { + envCfg = loadSharedEnvConfig() + } else { + envCfg = loadEnvConfig() + } + + if len(opts.Profile) > 0 { + envCfg.Profile = opts.Profile + } + + switch opts.SharedConfigState { + case SharedConfigDisable: + envCfg.EnableSharedConfig = false + case SharedConfigEnable: + envCfg.EnableSharedConfig = true + } + + if len(envCfg.SharedCredentialsFile) == 0 { + envCfg.SharedCredentialsFile = defaults.SharedCredentialsFilename() + } + if len(envCfg.SharedConfigFile) == 0 { + envCfg.SharedConfigFile = defaults.SharedConfigFilename() + } + + // Only use AWS_CA_BUNDLE if session option is not provided. + if len(envCfg.CustomCABundle) != 0 && opts.CustomCABundle == nil { + f, err := os.Open(envCfg.CustomCABundle) + if err != nil { + return nil, awserr.New("LoadCustomCABundleError", + "failed to open custom CA bundle PEM file", err) + } + defer f.Close() + opts.CustomCABundle = f + } + + return newSession(opts, envCfg, &opts.Config) +} + +// Must is a helper function to ensure the Session is valid and there was no +// error when calling a NewSession function. +// +// This helper is intended to be used in variable initialization to load the +// Session and configuration at startup. Such as: +// +// var sess = session.Must(session.NewSession()) +func Must(sess *Session, err error) *Session { + if err != nil { + panic(err) + } + + return sess +} + +func deprecatedNewSession(cfgs ...*aws.Config) *Session { + cfg := defaults.Config() + handlers := defaults.Handlers() + + // Apply the passed in configs so the configuration can be applied to the + // default credential chain + cfg.MergeIn(cfgs...) + if cfg.EndpointResolver == nil { + // An endpoint resolver is required for a session to be able to provide + // endpoints for service client configurations. + cfg.EndpointResolver = endpoints.DefaultResolver() + } + cfg.Credentials = defaults.CredChain(cfg, handlers) + + // Reapply any passed in configs to override credentials if set + cfg.MergeIn(cfgs...) + + s := &Session{ + Config: cfg, + Handlers: handlers, + } + + initHandlers(s) + + return s +} + +func newSession(opts Options, envCfg envConfig, cfgs ...*aws.Config) (*Session, error) { + cfg := defaults.Config() + handlers := defaults.Handlers() + + // Get a merged version of the user provided config to determine if + // credentials were. + userCfg := &aws.Config{} + userCfg.MergeIn(cfgs...) + + // Ordered config files will be loaded in with later files overwriting + // previous config file values. + var cfgFiles []string + if opts.SharedConfigFiles != nil { + cfgFiles = opts.SharedConfigFiles + } else { + cfgFiles = []string{envCfg.SharedConfigFile, envCfg.SharedCredentialsFile} + if !envCfg.EnableSharedConfig { + // The shared config file (~/.aws/config) is only loaded if instructed + // to load via the envConfig.EnableSharedConfig (AWS_SDK_LOAD_CONFIG). + cfgFiles = cfgFiles[1:] + } + } + + // Load additional config from file(s) + sharedCfg, err := loadSharedConfig(envCfg.Profile, cfgFiles) + if err != nil { + return nil, err + } + + if err := mergeConfigSrcs(cfg, userCfg, envCfg, sharedCfg, handlers, opts); err != nil { + return nil, err + } + + s := &Session{ + Config: cfg, + Handlers: handlers, + } + + initHandlers(s) + + // Setup HTTP client with custom cert bundle if enabled + if opts.CustomCABundle != nil { + if err := loadCustomCABundle(s, opts.CustomCABundle); err != nil { + return nil, err + } + } + + return s, nil +} + +func loadCustomCABundle(s *Session, bundle io.Reader) error { + var t *http.Transport + switch v := s.Config.HTTPClient.Transport.(type) { + case *http.Transport: + t = v + default: + if s.Config.HTTPClient.Transport != nil { + return awserr.New("LoadCustomCABundleError", + "unable to load custom CA bundle, HTTPClient's transport unsupported type", nil) + } + } + if t == nil { + t = &http.Transport{} + } + + p, err := loadCertPool(bundle) + if err != nil { + return err + } + if t.TLSClientConfig == nil { + t.TLSClientConfig = &tls.Config{} + } + t.TLSClientConfig.RootCAs = p + + s.Config.HTTPClient.Transport = t + + return nil +} + +func loadCertPool(r io.Reader) (*x509.CertPool, error) { + b, err := ioutil.ReadAll(r) + if err != nil { + return nil, awserr.New("LoadCustomCABundleError", + "failed to read custom CA bundle PEM file", err) + } + + p := x509.NewCertPool() + if !p.AppendCertsFromPEM(b) { + return nil, awserr.New("LoadCustomCABundleError", + "failed to load custom CA bundle PEM file", err) + } + + return p, nil +} + +func mergeConfigSrcs(cfg, userCfg *aws.Config, envCfg envConfig, sharedCfg sharedConfig, handlers request.Handlers, sessOpts Options) error { + // Merge in user provided configuration + cfg.MergeIn(userCfg) + + // Region if not already set by user + if len(aws.StringValue(cfg.Region)) == 0 { + if len(envCfg.Region) > 0 { + cfg.WithRegion(envCfg.Region) + } else if envCfg.EnableSharedConfig && len(sharedCfg.Region) > 0 { + cfg.WithRegion(sharedCfg.Region) + } + } + + // Configure credentials if not already set + if cfg.Credentials == credentials.AnonymousCredentials && userCfg.Credentials == nil { + if len(envCfg.Creds.AccessKeyID) > 0 { + cfg.Credentials = credentials.NewStaticCredentialsFromCreds( + envCfg.Creds, + ) + } else if envCfg.EnableSharedConfig && len(sharedCfg.AssumeRole.RoleARN) > 0 && sharedCfg.AssumeRoleSource != nil { + cfgCp := *cfg + cfgCp.Credentials = credentials.NewStaticCredentialsFromCreds( + sharedCfg.AssumeRoleSource.Creds, + ) + if len(sharedCfg.AssumeRole.MFASerial) > 0 && sessOpts.AssumeRoleTokenProvider == nil { + // AssumeRole Token provider is required if doing Assume Role + // with MFA. + return AssumeRoleTokenProviderNotSetError{} + } + cfg.Credentials = stscreds.NewCredentials( + &Session{ + Config: &cfgCp, + Handlers: handlers.Copy(), + }, + sharedCfg.AssumeRole.RoleARN, + func(opt *stscreds.AssumeRoleProvider) { + opt.RoleSessionName = sharedCfg.AssumeRole.RoleSessionName + + // Assume role with external ID + if len(sharedCfg.AssumeRole.ExternalID) > 0 { + opt.ExternalID = aws.String(sharedCfg.AssumeRole.ExternalID) + } + + // Assume role with MFA + if len(sharedCfg.AssumeRole.MFASerial) > 0 { + opt.SerialNumber = aws.String(sharedCfg.AssumeRole.MFASerial) + opt.TokenProvider = sessOpts.AssumeRoleTokenProvider + } + }, + ) + } else if len(sharedCfg.Creds.AccessKeyID) > 0 { + cfg.Credentials = credentials.NewStaticCredentialsFromCreds( + sharedCfg.Creds, + ) + } else { + // Fallback to default credentials provider, include mock errors + // for the credential chain so user can identify why credentials + // failed to be retrieved. + cfg.Credentials = credentials.NewCredentials(&credentials.ChainProvider{ + VerboseErrors: aws.BoolValue(cfg.CredentialsChainVerboseErrors), + Providers: []credentials.Provider{ + &credProviderError{Err: awserr.New("EnvAccessKeyNotFound", "failed to find credentials in the environment.", nil)}, + &credProviderError{Err: awserr.New("SharedCredsLoad", fmt.Sprintf("failed to load profile, %s.", envCfg.Profile), nil)}, + defaults.RemoteCredProvider(*cfg, handlers), + }, + }) + } + } + + return nil +} + +// AssumeRoleTokenProviderNotSetError is an error returned when creating a session when the +// MFAToken option is not set when shared config is configured load assume a +// role with an MFA token. +type AssumeRoleTokenProviderNotSetError struct{} + +// Code is the short id of the error. +func (e AssumeRoleTokenProviderNotSetError) Code() string { + return "AssumeRoleTokenProviderNotSetError" +} + +// Message is the description of the error +func (e AssumeRoleTokenProviderNotSetError) Message() string { + return fmt.Sprintf("assume role with MFA enabled, but AssumeRoleTokenProvider session option not set.") +} + +// OrigErr is the underlying error that caused the failure. +func (e AssumeRoleTokenProviderNotSetError) OrigErr() error { + return nil +} + +// Error satisfies the error interface. +func (e AssumeRoleTokenProviderNotSetError) Error() string { + return awserr.SprintError(e.Code(), e.Message(), "", nil) +} + +type credProviderError struct { + Err error +} + +var emptyCreds = credentials.Value{} + +func (c credProviderError) Retrieve() (credentials.Value, error) { + return credentials.Value{}, c.Err +} +func (c credProviderError) IsExpired() bool { + return true +} + +func initHandlers(s *Session) { + // Add the Validate parameter handler if it is not disabled. + s.Handlers.Validate.Remove(corehandlers.ValidateParametersHandler) + if !aws.BoolValue(s.Config.DisableParamValidation) { + s.Handlers.Validate.PushBackNamed(corehandlers.ValidateParametersHandler) + } +} + +// Copy creates and returns a copy of the current Session, coping the config +// and handlers. If any additional configs are provided they will be merged +// on top of the Session's copied config. +// +// // Create a copy of the current Session, configured for the us-west-2 region. +// sess.Copy(&aws.Config{Region: aws.String("us-west-2")}) +func (s *Session) Copy(cfgs ...*aws.Config) *Session { + newSession := &Session{ + Config: s.Config.Copy(cfgs...), + Handlers: s.Handlers.Copy(), + } + + initHandlers(newSession) + + return newSession +} + +// ClientConfig satisfies the client.ConfigProvider interface and is used to +// configure the service client instances. Passing the Session to the service +// client's constructor (New) will use this method to configure the client. +func (s *Session) ClientConfig(serviceName string, cfgs ...*aws.Config) client.Config { + // Backwards compatibility, the error will be eaten if user calls ClientConfig + // directly. All SDK services will use ClientconfigWithError. + cfg, _ := s.clientConfigWithErr(serviceName, cfgs...) + + return cfg +} + +func (s *Session) clientConfigWithErr(serviceName string, cfgs ...*aws.Config) (client.Config, error) { + s = s.Copy(cfgs...) + + var resolved endpoints.ResolvedEndpoint + var err error + + region := aws.StringValue(s.Config.Region) + + if endpoint := aws.StringValue(s.Config.Endpoint); len(endpoint) != 0 { + resolved.URL = endpoints.AddScheme(endpoint, aws.BoolValue(s.Config.DisableSSL)) + resolved.SigningRegion = region + } else { + resolved, err = s.Config.EndpointResolver.EndpointFor( + serviceName, region, + func(opt *endpoints.Options) { + opt.DisableSSL = aws.BoolValue(s.Config.DisableSSL) + opt.UseDualStack = aws.BoolValue(s.Config.UseDualStack) + + // Support the condition where the service is modeled but its + // endpoint metadata is not available. + opt.ResolveUnknownService = true + }, + ) + } + + return client.Config{ + Config: s.Config, + Handlers: s.Handlers, + Endpoint: resolved.URL, + SigningRegion: resolved.SigningRegion, + SigningName: resolved.SigningName, + }, err +} + +// ClientConfigNoResolveEndpoint is the same as ClientConfig with the exception +// that the EndpointResolver will not be used to resolve the endpoint. The only +// endpoint set must come from the aws.Config.Endpoint field. +func (s *Session) ClientConfigNoResolveEndpoint(cfgs ...*aws.Config) client.Config { + s = s.Copy(cfgs...) + + var resolved endpoints.ResolvedEndpoint + + region := aws.StringValue(s.Config.Region) + + if ep := aws.StringValue(s.Config.Endpoint); len(ep) > 0 { + resolved.URL = endpoints.AddScheme(ep, aws.BoolValue(s.Config.DisableSSL)) + resolved.SigningRegion = region + } + + return client.Config{ + Config: s.Config, + Handlers: s.Handlers, + Endpoint: resolved.URL, + SigningRegion: resolved.SigningRegion, + SigningName: resolved.SigningName, + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/session/shared_config.go b/vendor/github.com/aws/aws-sdk-go/aws/session/shared_config.go new file mode 100644 index 0000000000..09c8e5bc7a --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/session/shared_config.go @@ -0,0 +1,295 @@ +package session + +import ( + "fmt" + "io/ioutil" + + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/credentials" + "github.com/go-ini/ini" +) + +const ( + // Static Credentials group + accessKeyIDKey = `aws_access_key_id` // group required + secretAccessKey = `aws_secret_access_key` // group required + sessionTokenKey = `aws_session_token` // optional + + // Assume Role Credentials group + roleArnKey = `role_arn` // group required + sourceProfileKey = `source_profile` // group required + externalIDKey = `external_id` // optional + mfaSerialKey = `mfa_serial` // optional + roleSessionNameKey = `role_session_name` // optional + + // Additional Config fields + regionKey = `region` + + // DefaultSharedConfigProfile is the default profile to be used when + // loading configuration from the config files if another profile name + // is not provided. + DefaultSharedConfigProfile = `default` +) + +type assumeRoleConfig struct { + RoleARN string + SourceProfile string + ExternalID string + MFASerial string + RoleSessionName string +} + +// sharedConfig represents the configuration fields of the SDK config files. +type sharedConfig struct { + // Credentials values from the config file. Both aws_access_key_id + // and aws_secret_access_key must be provided together in the same file + // to be considered valid. The values will be ignored if not a complete group. + // aws_session_token is an optional field that can be provided if both of the + // other two fields are also provided. + // + // aws_access_key_id + // aws_secret_access_key + // aws_session_token + Creds credentials.Value + + AssumeRole assumeRoleConfig + AssumeRoleSource *sharedConfig + + // Region is the region the SDK should use for looking up AWS service endpoints + // and signing requests. + // + // region + Region string +} + +type sharedConfigFile struct { + Filename string + IniData *ini.File +} + +// loadSharedConfig retrieves the configuration from the list of files +// using the profile provided. The order the files are listed will determine +// precedence. Values in subsequent files will overwrite values defined in +// earlier files. +// +// For example, given two files A and B. Both define credentials. If the order +// of the files are A then B, B's credential values will be used instead of A's. +// +// See sharedConfig.setFromFile for information how the config files +// will be loaded. +func loadSharedConfig(profile string, filenames []string) (sharedConfig, error) { + if len(profile) == 0 { + profile = DefaultSharedConfigProfile + } + + files, err := loadSharedConfigIniFiles(filenames) + if err != nil { + return sharedConfig{}, err + } + + cfg := sharedConfig{} + if err = cfg.setFromIniFiles(profile, files); err != nil { + return sharedConfig{}, err + } + + if len(cfg.AssumeRole.SourceProfile) > 0 { + if err := cfg.setAssumeRoleSource(profile, files); err != nil { + return sharedConfig{}, err + } + } + + return cfg, nil +} + +func loadSharedConfigIniFiles(filenames []string) ([]sharedConfigFile, error) { + files := make([]sharedConfigFile, 0, len(filenames)) + + for _, filename := range filenames { + b, err := ioutil.ReadFile(filename) + if err != nil { + // Skip files which can't be opened and read for whatever reason + continue + } + + f, err := ini.Load(b) + if err != nil { + return nil, SharedConfigLoadError{Filename: filename, Err: err} + } + + files = append(files, sharedConfigFile{ + Filename: filename, IniData: f, + }) + } + + return files, nil +} + +func (cfg *sharedConfig) setAssumeRoleSource(origProfile string, files []sharedConfigFile) error { + var assumeRoleSrc sharedConfig + + // Multiple level assume role chains are not support + if cfg.AssumeRole.SourceProfile == origProfile { + assumeRoleSrc = *cfg + assumeRoleSrc.AssumeRole = assumeRoleConfig{} + } else { + err := assumeRoleSrc.setFromIniFiles(cfg.AssumeRole.SourceProfile, files) + if err != nil { + return err + } + } + + if len(assumeRoleSrc.Creds.AccessKeyID) == 0 { + return SharedConfigAssumeRoleError{RoleARN: cfg.AssumeRole.RoleARN} + } + + cfg.AssumeRoleSource = &assumeRoleSrc + + return nil +} + +func (cfg *sharedConfig) setFromIniFiles(profile string, files []sharedConfigFile) error { + // Trim files from the list that don't exist. + for _, f := range files { + if err := cfg.setFromIniFile(profile, f); err != nil { + if _, ok := err.(SharedConfigProfileNotExistsError); ok { + // Ignore proviles missings + continue + } + return err + } + } + + return nil +} + +// setFromFile loads the configuration from the file using +// the profile provided. A sharedConfig pointer type value is used so that +// multiple config file loadings can be chained. +// +// Only loads complete logically grouped values, and will not set fields in cfg +// for incomplete grouped values in the config. Such as credentials. For example +// if a config file only includes aws_access_key_id but no aws_secret_access_key +// the aws_access_key_id will be ignored. +func (cfg *sharedConfig) setFromIniFile(profile string, file sharedConfigFile) error { + section, err := file.IniData.GetSection(profile) + if err != nil { + // Fallback to to alternate profile name: profile + section, err = file.IniData.GetSection(fmt.Sprintf("profile %s", profile)) + if err != nil { + return SharedConfigProfileNotExistsError{Profile: profile, Err: err} + } + } + + // Shared Credentials + akid := section.Key(accessKeyIDKey).String() + secret := section.Key(secretAccessKey).String() + if len(akid) > 0 && len(secret) > 0 { + cfg.Creds = credentials.Value{ + AccessKeyID: akid, + SecretAccessKey: secret, + SessionToken: section.Key(sessionTokenKey).String(), + ProviderName: fmt.Sprintf("SharedConfigCredentials: %s", file.Filename), + } + } + + // Assume Role + roleArn := section.Key(roleArnKey).String() + srcProfile := section.Key(sourceProfileKey).String() + if len(roleArn) > 0 && len(srcProfile) > 0 { + cfg.AssumeRole = assumeRoleConfig{ + RoleARN: roleArn, + SourceProfile: srcProfile, + ExternalID: section.Key(externalIDKey).String(), + MFASerial: section.Key(mfaSerialKey).String(), + RoleSessionName: section.Key(roleSessionNameKey).String(), + } + } + + // Region + if v := section.Key(regionKey).String(); len(v) > 0 { + cfg.Region = v + } + + return nil +} + +// SharedConfigLoadError is an error for the shared config file failed to load. +type SharedConfigLoadError struct { + Filename string + Err error +} + +// Code is the short id of the error. +func (e SharedConfigLoadError) Code() string { + return "SharedConfigLoadError" +} + +// Message is the description of the error +func (e SharedConfigLoadError) Message() string { + return fmt.Sprintf("failed to load config file, %s", e.Filename) +} + +// OrigErr is the underlying error that caused the failure. +func (e SharedConfigLoadError) OrigErr() error { + return e.Err +} + +// Error satisfies the error interface. +func (e SharedConfigLoadError) Error() string { + return awserr.SprintError(e.Code(), e.Message(), "", e.Err) +} + +// SharedConfigProfileNotExistsError is an error for the shared config when +// the profile was not find in the config file. +type SharedConfigProfileNotExistsError struct { + Profile string + Err error +} + +// Code is the short id of the error. +func (e SharedConfigProfileNotExistsError) Code() string { + return "SharedConfigProfileNotExistsError" +} + +// Message is the description of the error +func (e SharedConfigProfileNotExistsError) Message() string { + return fmt.Sprintf("failed to get profile, %s", e.Profile) +} + +// OrigErr is the underlying error that caused the failure. +func (e SharedConfigProfileNotExistsError) OrigErr() error { + return e.Err +} + +// Error satisfies the error interface. +func (e SharedConfigProfileNotExistsError) Error() string { + return awserr.SprintError(e.Code(), e.Message(), "", e.Err) +} + +// SharedConfigAssumeRoleError is an error for the shared config when the +// profile contains assume role information, but that information is invalid +// or not complete. +type SharedConfigAssumeRoleError struct { + RoleARN string +} + +// Code is the short id of the error. +func (e SharedConfigAssumeRoleError) Code() string { + return "SharedConfigAssumeRoleError" +} + +// Message is the description of the error +func (e SharedConfigAssumeRoleError) Message() string { + return fmt.Sprintf("failed to load assume role for %s, source profile has no shared credentials", + e.RoleARN) +} + +// OrigErr is the underlying error that caused the failure. +func (e SharedConfigAssumeRoleError) OrigErr() error { + return nil +} + +// Error satisfies the error interface. +func (e SharedConfigAssumeRoleError) Error() string { + return awserr.SprintError(e.Code(), e.Message(), "", nil) +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/header_rules.go b/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/header_rules.go new file mode 100644 index 0000000000..244c86da05 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/header_rules.go @@ -0,0 +1,82 @@ +package v4 + +import ( + "net/http" + "strings" +) + +// validator houses a set of rule needed for validation of a +// string value +type rules []rule + +// rule interface allows for more flexible rules and just simply +// checks whether or not a value adheres to that rule +type rule interface { + IsValid(value string) bool +} + +// IsValid will iterate through all rules and see if any rules +// apply to the value and supports nested rules +func (r rules) IsValid(value string) bool { + for _, rule := range r { + if rule.IsValid(value) { + return true + } + } + return false +} + +// mapRule generic rule for maps +type mapRule map[string]struct{} + +// IsValid for the map rule satisfies whether it exists in the map +func (m mapRule) IsValid(value string) bool { + _, ok := m[value] + return ok +} + +// whitelist is a generic rule for whitelisting +type whitelist struct { + rule +} + +// IsValid for whitelist checks if the value is within the whitelist +func (w whitelist) IsValid(value string) bool { + return w.rule.IsValid(value) +} + +// blacklist is a generic rule for blacklisting +type blacklist struct { + rule +} + +// IsValid for whitelist checks if the value is within the whitelist +func (b blacklist) IsValid(value string) bool { + return !b.rule.IsValid(value) +} + +type patterns []string + +// IsValid for patterns checks each pattern and returns if a match has +// been found +func (p patterns) IsValid(value string) bool { + for _, pattern := range p { + if strings.HasPrefix(http.CanonicalHeaderKey(value), pattern) { + return true + } + } + return false +} + +// inclusiveRules rules allow for rules to depend on one another +type inclusiveRules []rule + +// IsValid will return true if all rules are true +func (r inclusiveRules) IsValid(value string) bool { + for _, rule := range r { + if !rule.IsValid(value) { + return false + } + } + return true +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/options.go b/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/options.go new file mode 100644 index 0000000000..6aa2ed241b --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/options.go @@ -0,0 +1,7 @@ +package v4 + +// WithUnsignedPayload will enable and set the UnsignedPayload field to +// true of the signer. +func WithUnsignedPayload(v4 *Signer) { + v4.UnsignedPayload = true +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/uri_path.go b/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/uri_path.go new file mode 100644 index 0000000000..bd082e9d1f --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/uri_path.go @@ -0,0 +1,24 @@ +// +build go1.5 + +package v4 + +import ( + "net/url" + "strings" +) + +func getURIPath(u *url.URL) string { + var uri string + + if len(u.Opaque) > 0 { + uri = "/" + strings.Join(strings.Split(u.Opaque, "/")[3:], "/") + } else { + uri = u.EscapedPath() + } + + if len(uri) == 0 { + uri = "/" + } + + return uri +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/v4.go b/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/v4.go new file mode 100644 index 0000000000..6f7a4b1df1 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/v4.go @@ -0,0 +1,761 @@ +// Package v4 implements signing for AWS V4 signer +// +// Provides request signing for request that need to be signed with +// AWS V4 Signatures. +// +// Standalone Signer +// +// Generally using the signer outside of the SDK should not require any additional +// logic when using Go v1.5 or higher. The signer does this by taking advantage +// of the URL.EscapedPath method. If your request URI requires additional escaping +// you many need to use the URL.Opaque to define what the raw URI should be sent +// to the service as. +// +// The signer will first check the URL.Opaque field, and use its value if set. +// The signer does require the URL.Opaque field to be set in the form of: +// +// "///" +// +// // e.g. +// "//example.com/some/path" +// +// The leading "//" and hostname are required or the URL.Opaque escaping will +// not work correctly. +// +// If URL.Opaque is not set the signer will fallback to the URL.EscapedPath() +// method and using the returned value. If you're using Go v1.4 you must set +// URL.Opaque if the URI path needs escaping. If URL.Opaque is not set with +// Go v1.5 the signer will fallback to URL.Path. +// +// AWS v4 signature validation requires that the canonical string's URI path +// element must be the URI escaped form of the HTTP request's path. +// http://docs.aws.amazon.com/general/latest/gr/sigv4-create-canonical-request.html +// +// The Go HTTP client will perform escaping automatically on the request. Some +// of these escaping may cause signature validation errors because the HTTP +// request differs from the URI path or query that the signature was generated. +// https://golang.org/pkg/net/url/#URL.EscapedPath +// +// Because of this, it is recommended that when using the signer outside of the +// SDK that explicitly escaping the request prior to being signed is preferable, +// and will help prevent signature validation errors. This can be done by setting +// the URL.Opaque or URL.RawPath. The SDK will use URL.Opaque first and then +// call URL.EscapedPath() if Opaque is not set. +// +// If signing a request intended for HTTP2 server, and you're using Go 1.6.2 +// through 1.7.4 you should use the URL.RawPath as the pre-escaped form of the +// request URL. https://github.com/golang/go/issues/16847 points to a bug in +// Go pre 1.8 that failes to make HTTP2 requests using absolute URL in the HTTP +// message. URL.Opaque generally will force Go to make requests with absolute URL. +// URL.RawPath does not do this, but RawPath must be a valid escaping of Path +// or url.EscapedPath will ignore the RawPath escaping. +// +// Test `TestStandaloneSign` provides a complete example of using the signer +// outside of the SDK and pre-escaping the URI path. +package v4 + +import ( + "bytes" + "crypto/hmac" + "crypto/sha256" + "encoding/hex" + "fmt" + "io" + "io/ioutil" + "net/http" + "net/url" + "sort" + "strconv" + "strings" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/credentials" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/private/protocol/rest" +) + +const ( + authHeaderPrefix = "AWS4-HMAC-SHA256" + timeFormat = "20060102T150405Z" + shortTimeFormat = "20060102" + + // emptyStringSHA256 is a SHA256 of an empty string + emptyStringSHA256 = `e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855` +) + +var ignoredHeaders = rules{ + blacklist{ + mapRule{ + "Authorization": struct{}{}, + "User-Agent": struct{}{}, + "X-Amzn-Trace-Id": struct{}{}, + }, + }, +} + +// requiredSignedHeaders is a whitelist for build canonical headers. +var requiredSignedHeaders = rules{ + whitelist{ + mapRule{ + "Cache-Control": struct{}{}, + "Content-Disposition": struct{}{}, + "Content-Encoding": struct{}{}, + "Content-Language": struct{}{}, + "Content-Md5": struct{}{}, + "Content-Type": struct{}{}, + "Expires": struct{}{}, + "If-Match": struct{}{}, + "If-Modified-Since": struct{}{}, + "If-None-Match": struct{}{}, + "If-Unmodified-Since": struct{}{}, + "Range": struct{}{}, + "X-Amz-Acl": struct{}{}, + "X-Amz-Copy-Source": struct{}{}, + "X-Amz-Copy-Source-If-Match": struct{}{}, + "X-Amz-Copy-Source-If-Modified-Since": struct{}{}, + "X-Amz-Copy-Source-If-None-Match": struct{}{}, + "X-Amz-Copy-Source-If-Unmodified-Since": struct{}{}, + "X-Amz-Copy-Source-Range": struct{}{}, + "X-Amz-Copy-Source-Server-Side-Encryption-Customer-Algorithm": struct{}{}, + "X-Amz-Copy-Source-Server-Side-Encryption-Customer-Key": struct{}{}, + "X-Amz-Copy-Source-Server-Side-Encryption-Customer-Key-Md5": struct{}{}, + "X-Amz-Grant-Full-control": struct{}{}, + "X-Amz-Grant-Read": struct{}{}, + "X-Amz-Grant-Read-Acp": struct{}{}, + "X-Amz-Grant-Write": struct{}{}, + "X-Amz-Grant-Write-Acp": struct{}{}, + "X-Amz-Metadata-Directive": struct{}{}, + "X-Amz-Mfa": struct{}{}, + "X-Amz-Request-Payer": struct{}{}, + "X-Amz-Server-Side-Encryption": struct{}{}, + "X-Amz-Server-Side-Encryption-Aws-Kms-Key-Id": struct{}{}, + "X-Amz-Server-Side-Encryption-Customer-Algorithm": struct{}{}, + "X-Amz-Server-Side-Encryption-Customer-Key": struct{}{}, + "X-Amz-Server-Side-Encryption-Customer-Key-Md5": struct{}{}, + "X-Amz-Storage-Class": struct{}{}, + "X-Amz-Website-Redirect-Location": struct{}{}, + }, + }, + patterns{"X-Amz-Meta-"}, +} + +// allowedHoisting is a whitelist for build query headers. The boolean value +// represents whether or not it is a pattern. +var allowedQueryHoisting = inclusiveRules{ + blacklist{requiredSignedHeaders}, + patterns{"X-Amz-"}, +} + +// Signer applies AWS v4 signing to given request. Use this to sign requests +// that need to be signed with AWS V4 Signatures. +type Signer struct { + // The authentication credentials the request will be signed against. + // This value must be set to sign requests. + Credentials *credentials.Credentials + + // Sets the log level the signer should use when reporting information to + // the logger. If the logger is nil nothing will be logged. See + // aws.LogLevelType for more information on available logging levels + // + // By default nothing will be logged. + Debug aws.LogLevelType + + // The logger loging information will be written to. If there the logger + // is nil, nothing will be logged. + Logger aws.Logger + + // Disables the Signer's moving HTTP header key/value pairs from the HTTP + // request header to the request's query string. This is most commonly used + // with pre-signed requests preventing headers from being added to the + // request's query string. + DisableHeaderHoisting bool + + // Disables the automatic escaping of the URI path of the request for the + // siganture's canonical string's path. For services that do not need additional + // escaping then use this to disable the signer escaping the path. + // + // S3 is an example of a service that does not need additional escaping. + // + // http://docs.aws.amazon.com/general/latest/gr/sigv4-create-canonical-request.html + DisableURIPathEscaping bool + + // Disales the automatical setting of the HTTP request's Body field with the + // io.ReadSeeker passed in to the signer. This is useful if you're using a + // custom wrapper around the body for the io.ReadSeeker and want to preserve + // the Body value on the Request.Body. + // + // This does run the risk of signing a request with a body that will not be + // sent in the request. Need to ensure that the underlying data of the Body + // values are the same. + DisableRequestBodyOverwrite bool + + // currentTimeFn returns the time value which represents the current time. + // This value should only be used for testing. If it is nil the default + // time.Now will be used. + currentTimeFn func() time.Time + + // UnsignedPayload will prevent signing of the payload. This will only + // work for services that have support for this. + UnsignedPayload bool +} + +// NewSigner returns a Signer pointer configured with the credentials and optional +// option values provided. If not options are provided the Signer will use its +// default configuration. +func NewSigner(credentials *credentials.Credentials, options ...func(*Signer)) *Signer { + v4 := &Signer{ + Credentials: credentials, + } + + for _, option := range options { + option(v4) + } + + return v4 +} + +type signingCtx struct { + ServiceName string + Region string + Request *http.Request + Body io.ReadSeeker + Query url.Values + Time time.Time + ExpireTime time.Duration + SignedHeaderVals http.Header + + DisableURIPathEscaping bool + + credValues credentials.Value + isPresign bool + formattedTime string + formattedShortTime string + unsignedPayload bool + + bodyDigest string + signedHeaders string + canonicalHeaders string + canonicalString string + credentialString string + stringToSign string + signature string + authorization string +} + +// Sign signs AWS v4 requests with the provided body, service name, region the +// request is made to, and time the request is signed at. The signTime allows +// you to specify that a request is signed for the future, and cannot be +// used until then. +// +// Returns a list of HTTP headers that were included in the signature or an +// error if signing the request failed. Generally for signed requests this value +// is not needed as the full request context will be captured by the http.Request +// value. It is included for reference though. +// +// Sign will set the request's Body to be the `body` parameter passed in. If +// the body is not already an io.ReadCloser, it will be wrapped within one. If +// a `nil` body parameter passed to Sign, the request's Body field will be +// also set to nil. Its important to note that this functionality will not +// change the request's ContentLength of the request. +// +// Sign differs from Presign in that it will sign the request using HTTP +// header values. This type of signing is intended for http.Request values that +// will not be shared, or are shared in a way the header values on the request +// will not be lost. +// +// The requests body is an io.ReadSeeker so the SHA256 of the body can be +// generated. To bypass the signer computing the hash you can set the +// "X-Amz-Content-Sha256" header with a precomputed value. The signer will +// only compute the hash if the request header value is empty. +func (v4 Signer) Sign(r *http.Request, body io.ReadSeeker, service, region string, signTime time.Time) (http.Header, error) { + return v4.signWithBody(r, body, service, region, 0, signTime) +} + +// Presign signs AWS v4 requests with the provided body, service name, region +// the request is made to, and time the request is signed at. The signTime +// allows you to specify that a request is signed for the future, and cannot +// be used until then. +// +// Returns a list of HTTP headers that were included in the signature or an +// error if signing the request failed. For presigned requests these headers +// and their values must be included on the HTTP request when it is made. This +// is helpful to know what header values need to be shared with the party the +// presigned request will be distributed to. +// +// Presign differs from Sign in that it will sign the request using query string +// instead of header values. This allows you to share the Presigned Request's +// URL with third parties, or distribute it throughout your system with minimal +// dependencies. +// +// Presign also takes an exp value which is the duration the +// signed request will be valid after the signing time. This is allows you to +// set when the request will expire. +// +// The requests body is an io.ReadSeeker so the SHA256 of the body can be +// generated. To bypass the signer computing the hash you can set the +// "X-Amz-Content-Sha256" header with a precomputed value. The signer will +// only compute the hash if the request header value is empty. +// +// Presigning a S3 request will not compute the body's SHA256 hash by default. +// This is done due to the general use case for S3 presigned URLs is to share +// PUT/GET capabilities. If you would like to include the body's SHA256 in the +// presigned request's signature you can set the "X-Amz-Content-Sha256" +// HTTP header and that will be included in the request's signature. +func (v4 Signer) Presign(r *http.Request, body io.ReadSeeker, service, region string, exp time.Duration, signTime time.Time) (http.Header, error) { + return v4.signWithBody(r, body, service, region, exp, signTime) +} + +func (v4 Signer) signWithBody(r *http.Request, body io.ReadSeeker, service, region string, exp time.Duration, signTime time.Time) (http.Header, error) { + currentTimeFn := v4.currentTimeFn + if currentTimeFn == nil { + currentTimeFn = time.Now + } + + ctx := &signingCtx{ + Request: r, + Body: body, + Query: r.URL.Query(), + Time: signTime, + ExpireTime: exp, + isPresign: exp != 0, + ServiceName: service, + Region: region, + DisableURIPathEscaping: v4.DisableURIPathEscaping, + unsignedPayload: v4.UnsignedPayload, + } + + for key := range ctx.Query { + sort.Strings(ctx.Query[key]) + } + + if ctx.isRequestSigned() { + ctx.Time = currentTimeFn() + ctx.handlePresignRemoval() + } + + var err error + ctx.credValues, err = v4.Credentials.Get() + if err != nil { + return http.Header{}, err + } + + ctx.assignAmzQueryValues() + ctx.build(v4.DisableHeaderHoisting) + + // If the request is not presigned the body should be attached to it. This + // prevents the confusion of wanting to send a signed request without + // the body the request was signed for attached. + if !(v4.DisableRequestBodyOverwrite || ctx.isPresign) { + var reader io.ReadCloser + if body != nil { + var ok bool + if reader, ok = body.(io.ReadCloser); !ok { + reader = ioutil.NopCloser(body) + } + } + r.Body = reader + } + + if v4.Debug.Matches(aws.LogDebugWithSigning) { + v4.logSigningInfo(ctx) + } + + return ctx.SignedHeaderVals, nil +} + +func (ctx *signingCtx) handlePresignRemoval() { + if !ctx.isPresign { + return + } + + // The credentials have expired for this request. The current signing + // is invalid, and needs to be request because the request will fail. + ctx.removePresign() + + // Update the request's query string to ensure the values stays in + // sync in the case retrieving the new credentials fails. + ctx.Request.URL.RawQuery = ctx.Query.Encode() +} + +func (ctx *signingCtx) assignAmzQueryValues() { + if ctx.isPresign { + ctx.Query.Set("X-Amz-Algorithm", authHeaderPrefix) + if ctx.credValues.SessionToken != "" { + ctx.Query.Set("X-Amz-Security-Token", ctx.credValues.SessionToken) + } else { + ctx.Query.Del("X-Amz-Security-Token") + } + + return + } + + if ctx.credValues.SessionToken != "" { + ctx.Request.Header.Set("X-Amz-Security-Token", ctx.credValues.SessionToken) + } +} + +// SignRequestHandler is a named request handler the SDK will use to sign +// service client request with using the V4 signature. +var SignRequestHandler = request.NamedHandler{ + Name: "v4.SignRequestHandler", Fn: SignSDKRequest, +} + +// SignSDKRequest signs an AWS request with the V4 signature. This +// request handler should only be used with the SDK's built in service client's +// API operation requests. +// +// This function should not be used on its on its own, but in conjunction with +// an AWS service client's API operation call. To sign a standalone request +// not created by a service client's API operation method use the "Sign" or +// "Presign" functions of the "Signer" type. +// +// If the credentials of the request's config are set to +// credentials.AnonymousCredentials the request will not be signed. +func SignSDKRequest(req *request.Request) { + signSDKRequestWithCurrTime(req, time.Now) +} + +// BuildNamedHandler will build a generic handler for signing. +func BuildNamedHandler(name string, opts ...func(*Signer)) request.NamedHandler { + return request.NamedHandler{ + Name: name, + Fn: func(req *request.Request) { + signSDKRequestWithCurrTime(req, time.Now, opts...) + }, + } +} + +func signSDKRequestWithCurrTime(req *request.Request, curTimeFn func() time.Time, opts ...func(*Signer)) { + // If the request does not need to be signed ignore the signing of the + // request if the AnonymousCredentials object is used. + if req.Config.Credentials == credentials.AnonymousCredentials { + return + } + + region := req.ClientInfo.SigningRegion + if region == "" { + region = aws.StringValue(req.Config.Region) + } + + name := req.ClientInfo.SigningName + if name == "" { + name = req.ClientInfo.ServiceName + } + + v4 := NewSigner(req.Config.Credentials, func(v4 *Signer) { + v4.Debug = req.Config.LogLevel.Value() + v4.Logger = req.Config.Logger + v4.DisableHeaderHoisting = req.NotHoist + v4.currentTimeFn = curTimeFn + if name == "s3" { + // S3 service should not have any escaping applied + v4.DisableURIPathEscaping = true + } + // Prevents setting the HTTPRequest's Body. Since the Body could be + // wrapped in a custom io.Closer that we do not want to be stompped + // on top of by the signer. + v4.DisableRequestBodyOverwrite = true + }) + + for _, opt := range opts { + opt(v4) + } + + signingTime := req.Time + if !req.LastSignedAt.IsZero() { + signingTime = req.LastSignedAt + } + + signedHeaders, err := v4.signWithBody(req.HTTPRequest, req.GetBody(), + name, region, req.ExpireTime, signingTime, + ) + if err != nil { + req.Error = err + req.SignedHeaderVals = nil + return + } + + req.SignedHeaderVals = signedHeaders + req.LastSignedAt = curTimeFn() +} + +const logSignInfoMsg = `DEBUG: Request Signature: +---[ CANONICAL STRING ]----------------------------- +%s +---[ STRING TO SIGN ]-------------------------------- +%s%s +-----------------------------------------------------` +const logSignedURLMsg = ` +---[ SIGNED URL ]------------------------------------ +%s` + +func (v4 *Signer) logSigningInfo(ctx *signingCtx) { + signedURLMsg := "" + if ctx.isPresign { + signedURLMsg = fmt.Sprintf(logSignedURLMsg, ctx.Request.URL.String()) + } + msg := fmt.Sprintf(logSignInfoMsg, ctx.canonicalString, ctx.stringToSign, signedURLMsg) + v4.Logger.Log(msg) +} + +func (ctx *signingCtx) build(disableHeaderHoisting bool) { + ctx.buildTime() // no depends + ctx.buildCredentialString() // no depends + + unsignedHeaders := ctx.Request.Header + if ctx.isPresign { + if !disableHeaderHoisting { + urlValues := url.Values{} + urlValues, unsignedHeaders = buildQuery(allowedQueryHoisting, unsignedHeaders) // no depends + for k := range urlValues { + ctx.Query[k] = urlValues[k] + } + } + } + + ctx.buildBodyDigest() + ctx.buildCanonicalHeaders(ignoredHeaders, unsignedHeaders) + ctx.buildCanonicalString() // depends on canon headers / signed headers + ctx.buildStringToSign() // depends on canon string + ctx.buildSignature() // depends on string to sign + + if ctx.isPresign { + ctx.Request.URL.RawQuery += "&X-Amz-Signature=" + ctx.signature + } else { + parts := []string{ + authHeaderPrefix + " Credential=" + ctx.credValues.AccessKeyID + "/" + ctx.credentialString, + "SignedHeaders=" + ctx.signedHeaders, + "Signature=" + ctx.signature, + } + ctx.Request.Header.Set("Authorization", strings.Join(parts, ", ")) + } +} + +func (ctx *signingCtx) buildTime() { + ctx.formattedTime = ctx.Time.UTC().Format(timeFormat) + ctx.formattedShortTime = ctx.Time.UTC().Format(shortTimeFormat) + + if ctx.isPresign { + duration := int64(ctx.ExpireTime / time.Second) + ctx.Query.Set("X-Amz-Date", ctx.formattedTime) + ctx.Query.Set("X-Amz-Expires", strconv.FormatInt(duration, 10)) + } else { + ctx.Request.Header.Set("X-Amz-Date", ctx.formattedTime) + } +} + +func (ctx *signingCtx) buildCredentialString() { + ctx.credentialString = strings.Join([]string{ + ctx.formattedShortTime, + ctx.Region, + ctx.ServiceName, + "aws4_request", + }, "/") + + if ctx.isPresign { + ctx.Query.Set("X-Amz-Credential", ctx.credValues.AccessKeyID+"/"+ctx.credentialString) + } +} + +func buildQuery(r rule, header http.Header) (url.Values, http.Header) { + query := url.Values{} + unsignedHeaders := http.Header{} + for k, h := range header { + if r.IsValid(k) { + query[k] = h + } else { + unsignedHeaders[k] = h + } + } + + return query, unsignedHeaders +} +func (ctx *signingCtx) buildCanonicalHeaders(r rule, header http.Header) { + var headers []string + headers = append(headers, "host") + for k, v := range header { + canonicalKey := http.CanonicalHeaderKey(k) + if !r.IsValid(canonicalKey) { + continue // ignored header + } + if ctx.SignedHeaderVals == nil { + ctx.SignedHeaderVals = make(http.Header) + } + + lowerCaseKey := strings.ToLower(k) + if _, ok := ctx.SignedHeaderVals[lowerCaseKey]; ok { + // include additional values + ctx.SignedHeaderVals[lowerCaseKey] = append(ctx.SignedHeaderVals[lowerCaseKey], v...) + continue + } + + headers = append(headers, lowerCaseKey) + ctx.SignedHeaderVals[lowerCaseKey] = v + } + sort.Strings(headers) + + ctx.signedHeaders = strings.Join(headers, ";") + + if ctx.isPresign { + ctx.Query.Set("X-Amz-SignedHeaders", ctx.signedHeaders) + } + + headerValues := make([]string, len(headers)) + for i, k := range headers { + if k == "host" { + headerValues[i] = "host:" + ctx.Request.URL.Host + } else { + headerValues[i] = k + ":" + + strings.Join(ctx.SignedHeaderVals[k], ",") + } + } + + ctx.canonicalHeaders = strings.Join(stripExcessSpaces(headerValues), "\n") +} + +func (ctx *signingCtx) buildCanonicalString() { + ctx.Request.URL.RawQuery = strings.Replace(ctx.Query.Encode(), "+", "%20", -1) + + uri := getURIPath(ctx.Request.URL) + + if !ctx.DisableURIPathEscaping { + uri = rest.EscapePath(uri, false) + } + + ctx.canonicalString = strings.Join([]string{ + ctx.Request.Method, + uri, + ctx.Request.URL.RawQuery, + ctx.canonicalHeaders + "\n", + ctx.signedHeaders, + ctx.bodyDigest, + }, "\n") +} + +func (ctx *signingCtx) buildStringToSign() { + ctx.stringToSign = strings.Join([]string{ + authHeaderPrefix, + ctx.formattedTime, + ctx.credentialString, + hex.EncodeToString(makeSha256([]byte(ctx.canonicalString))), + }, "\n") +} + +func (ctx *signingCtx) buildSignature() { + secret := ctx.credValues.SecretAccessKey + date := makeHmac([]byte("AWS4"+secret), []byte(ctx.formattedShortTime)) + region := makeHmac(date, []byte(ctx.Region)) + service := makeHmac(region, []byte(ctx.ServiceName)) + credentials := makeHmac(service, []byte("aws4_request")) + signature := makeHmac(credentials, []byte(ctx.stringToSign)) + ctx.signature = hex.EncodeToString(signature) +} + +func (ctx *signingCtx) buildBodyDigest() { + hash := ctx.Request.Header.Get("X-Amz-Content-Sha256") + if hash == "" { + if ctx.unsignedPayload || (ctx.isPresign && ctx.ServiceName == "s3") { + hash = "UNSIGNED-PAYLOAD" + } else if ctx.Body == nil { + hash = emptyStringSHA256 + } else { + hash = hex.EncodeToString(makeSha256Reader(ctx.Body)) + } + if ctx.unsignedPayload || ctx.ServiceName == "s3" || ctx.ServiceName == "glacier" { + ctx.Request.Header.Set("X-Amz-Content-Sha256", hash) + } + } + ctx.bodyDigest = hash +} + +// isRequestSigned returns if the request is currently signed or presigned +func (ctx *signingCtx) isRequestSigned() bool { + if ctx.isPresign && ctx.Query.Get("X-Amz-Signature") != "" { + return true + } + if ctx.Request.Header.Get("Authorization") != "" { + return true + } + + return false +} + +// unsign removes signing flags for both signed and presigned requests. +func (ctx *signingCtx) removePresign() { + ctx.Query.Del("X-Amz-Algorithm") + ctx.Query.Del("X-Amz-Signature") + ctx.Query.Del("X-Amz-Security-Token") + ctx.Query.Del("X-Amz-Date") + ctx.Query.Del("X-Amz-Expires") + ctx.Query.Del("X-Amz-Credential") + ctx.Query.Del("X-Amz-SignedHeaders") +} + +func makeHmac(key []byte, data []byte) []byte { + hash := hmac.New(sha256.New, key) + hash.Write(data) + return hash.Sum(nil) +} + +func makeSha256(data []byte) []byte { + hash := sha256.New() + hash.Write(data) + return hash.Sum(nil) +} + +func makeSha256Reader(reader io.ReadSeeker) []byte { + hash := sha256.New() + start, _ := reader.Seek(0, 1) + defer reader.Seek(start, 0) + + io.Copy(hash, reader) + return hash.Sum(nil) +} + +const doubleSpaces = " " + +var doubleSpaceBytes = []byte(doubleSpaces) + +func stripExcessSpaces(headerVals []string) []string { + vals := make([]string, len(headerVals)) + for i, str := range headerVals { + // Trim leading and trailing spaces + trimmed := strings.TrimSpace(str) + + idx := strings.Index(trimmed, doubleSpaces) + var buf []byte + for idx > -1 { + // Multiple adjacent spaces found + if buf == nil { + // first time create the buffer + buf = []byte(trimmed) + } + + stripToIdx := -1 + for j := idx + 1; j < len(buf); j++ { + if buf[j] != ' ' { + buf = append(buf[:idx+1], buf[j:]...) + stripToIdx = j + break + } + } + + if stripToIdx >= 0 { + idx = bytes.Index(buf[stripToIdx:], doubleSpaceBytes) + if idx >= 0 { + idx += stripToIdx + } + } else { + idx = -1 + } + } + + if buf != nil { + vals[i] = string(buf) + } else { + vals[i] = trimmed + } + } + return vals +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/types.go b/vendor/github.com/aws/aws-sdk-go/aws/types.go new file mode 100644 index 0000000000..0e2d864e10 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/types.go @@ -0,0 +1,118 @@ +package aws + +import ( + "io" + "sync" +) + +// ReadSeekCloser wraps a io.Reader returning a ReaderSeekerCloser. Should +// only be used with an io.Reader that is also an io.Seeker. Doing so may +// cause request signature errors, or request body's not sent for GET, HEAD +// and DELETE HTTP methods. +// +// Deprecated: Should only be used with io.ReadSeeker. If using for +// S3 PutObject to stream content use s3manager.Uploader instead. +func ReadSeekCloser(r io.Reader) ReaderSeekerCloser { + return ReaderSeekerCloser{r} +} + +// ReaderSeekerCloser represents a reader that can also delegate io.Seeker and +// io.Closer interfaces to the underlying object if they are available. +type ReaderSeekerCloser struct { + r io.Reader +} + +// Read reads from the reader up to size of p. The number of bytes read, and +// error if it occurred will be returned. +// +// If the reader is not an io.Reader zero bytes read, and nil error will be returned. +// +// Performs the same functionality as io.Reader Read +func (r ReaderSeekerCloser) Read(p []byte) (int, error) { + switch t := r.r.(type) { + case io.Reader: + return t.Read(p) + } + return 0, nil +} + +// Seek sets the offset for the next Read to offset, interpreted according to +// whence: 0 means relative to the origin of the file, 1 means relative to the +// current offset, and 2 means relative to the end. Seek returns the new offset +// and an error, if any. +// +// If the ReaderSeekerCloser is not an io.Seeker nothing will be done. +func (r ReaderSeekerCloser) Seek(offset int64, whence int) (int64, error) { + switch t := r.r.(type) { + case io.Seeker: + return t.Seek(offset, whence) + } + return int64(0), nil +} + +// IsSeeker returns if the underlying reader is also a seeker. +func (r ReaderSeekerCloser) IsSeeker() bool { + _, ok := r.r.(io.Seeker) + return ok +} + +// Close closes the ReaderSeekerCloser. +// +// If the ReaderSeekerCloser is not an io.Closer nothing will be done. +func (r ReaderSeekerCloser) Close() error { + switch t := r.r.(type) { + case io.Closer: + return t.Close() + } + return nil +} + +// A WriteAtBuffer provides a in memory buffer supporting the io.WriterAt interface +// Can be used with the s3manager.Downloader to download content to a buffer +// in memory. Safe to use concurrently. +type WriteAtBuffer struct { + buf []byte + m sync.Mutex + + // GrowthCoeff defines the growth rate of the internal buffer. By + // default, the growth rate is 1, where expanding the internal + // buffer will allocate only enough capacity to fit the new expected + // length. + GrowthCoeff float64 +} + +// NewWriteAtBuffer creates a WriteAtBuffer with an internal buffer +// provided by buf. +func NewWriteAtBuffer(buf []byte) *WriteAtBuffer { + return &WriteAtBuffer{buf: buf} +} + +// WriteAt writes a slice of bytes to a buffer starting at the position provided +// The number of bytes written will be returned, or error. Can overwrite previous +// written slices if the write ats overlap. +func (b *WriteAtBuffer) WriteAt(p []byte, pos int64) (n int, err error) { + pLen := len(p) + expLen := pos + int64(pLen) + b.m.Lock() + defer b.m.Unlock() + if int64(len(b.buf)) < expLen { + if int64(cap(b.buf)) < expLen { + if b.GrowthCoeff < 1 { + b.GrowthCoeff = 1 + } + newBuf := make([]byte, expLen, int64(b.GrowthCoeff*float64(expLen))) + copy(newBuf, b.buf) + b.buf = newBuf + } + b.buf = b.buf[:expLen] + } + copy(b.buf[pos:], p) + return pLen, nil +} + +// Bytes returns a slice of bytes written to the buffer. +func (b *WriteAtBuffer) Bytes() []byte { + b.m.Lock() + defer b.m.Unlock() + return b.buf +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/url.go b/vendor/github.com/aws/aws-sdk-go/aws/url.go new file mode 100644 index 0000000000..6192b2455b --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/url.go @@ -0,0 +1,12 @@ +// +build go1.8 + +package aws + +import "net/url" + +// URLHostname will extract the Hostname without port from the URL value. +// +// Wrapper of net/url#URL.Hostname for backwards Go version compatibility. +func URLHostname(url *url.URL) string { + return url.Hostname() +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/url_1_7.go b/vendor/github.com/aws/aws-sdk-go/aws/url_1_7.go new file mode 100644 index 0000000000..0210d2720e --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/url_1_7.go @@ -0,0 +1,29 @@ +// +build !go1.8 + +package aws + +import ( + "net/url" + "strings" +) + +// URLHostname will extract the Hostname without port from the URL value. +// +// Copy of Go 1.8's net/url#URL.Hostname functionality. +func URLHostname(url *url.URL) string { + return stripPort(url.Host) + +} + +// stripPort is copy of Go 1.8 url#URL.Hostname functionality. +// https://golang.org/src/net/url/url.go +func stripPort(hostport string) string { + colon := strings.IndexByte(hostport, ':') + if colon == -1 { + return hostport + } + if i := strings.IndexByte(hostport, ']'); i != -1 { + return strings.TrimPrefix(hostport[:i], "[") + } + return hostport[:colon] +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/version.go b/vendor/github.com/aws/aws-sdk-go/aws/version.go new file mode 100644 index 0000000000..b5de9e2e62 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/version.go @@ -0,0 +1,8 @@ +// Package aws provides core functionality for making requests to AWS services. +package aws + +// SDKName is the name of this AWS SDK +const SDKName = "aws-sdk-go" + +// SDKVersion is the version of this SDK +const SDKVersion = "1.9.44" diff --git a/vendor/github.com/aws/aws-sdk-go/internal/shareddefaults/shared_config.go b/vendor/github.com/aws/aws-sdk-go/internal/shareddefaults/shared_config.go new file mode 100644 index 0000000000..ebcbc2b40a --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/internal/shareddefaults/shared_config.go @@ -0,0 +1,40 @@ +package shareddefaults + +import ( + "os" + "path/filepath" + "runtime" +) + +// SharedCredentialsFilename returns the SDK's default file path +// for the shared credentials file. +// +// Builds the shared config file path based on the OS's platform. +// +// - Linux/Unix: $HOME/.aws/credentials +// - Windows: %USERPROFILE%\.aws\credentials +func SharedCredentialsFilename() string { + return filepath.Join(UserHomeDir(), ".aws", "credentials") +} + +// SharedConfigFilename returns the SDK's default file path for +// the shared config file. +// +// Builds the shared config file path based on the OS's platform. +// +// - Linux/Unix: $HOME/.aws/config +// - Windows: %USERPROFILE%\.aws\config +func SharedConfigFilename() string { + return filepath.Join(UserHomeDir(), ".aws", "config") +} + +// UserHomeDir returns the home directory for the user the process is +// running under. +func UserHomeDir() string { + if runtime.GOOS == "windows" { // Windows + return os.Getenv("USERPROFILE") + } + + // *nix + return os.Getenv("HOME") +} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/idempotency.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/idempotency.go new file mode 100644 index 0000000000..53831dff98 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/idempotency.go @@ -0,0 +1,75 @@ +package protocol + +import ( + "crypto/rand" + "fmt" + "reflect" +) + +// RandReader is the random reader the protocol package will use to read +// random bytes from. This is exported for testing, and should not be used. +var RandReader = rand.Reader + +const idempotencyTokenFillTag = `idempotencyToken` + +// CanSetIdempotencyToken returns true if the struct field should be +// automatically populated with a Idempotency token. +// +// Only *string and string type fields that are tagged with idempotencyToken +// which are not already set can be auto filled. +func CanSetIdempotencyToken(v reflect.Value, f reflect.StructField) bool { + switch u := v.Interface().(type) { + // To auto fill an Idempotency token the field must be a string, + // tagged for auto fill, and have a zero value. + case *string: + return u == nil && len(f.Tag.Get(idempotencyTokenFillTag)) != 0 + case string: + return len(u) == 0 && len(f.Tag.Get(idempotencyTokenFillTag)) != 0 + } + + return false +} + +// GetIdempotencyToken returns a randomly generated idempotency token. +func GetIdempotencyToken() string { + b := make([]byte, 16) + RandReader.Read(b) + + return UUIDVersion4(b) +} + +// SetIdempotencyToken will set the value provided with a Idempotency Token. +// Given that the value can be set. Will panic if value is not setable. +func SetIdempotencyToken(v reflect.Value) { + if v.Kind() == reflect.Ptr { + if v.IsNil() && v.CanSet() { + v.Set(reflect.New(v.Type().Elem())) + } + v = v.Elem() + } + v = reflect.Indirect(v) + + if !v.CanSet() { + panic(fmt.Sprintf("unable to set idempotnecy token %v", v)) + } + + b := make([]byte, 16) + _, err := rand.Read(b) + if err != nil { + // TODO handle error + return + } + + v.Set(reflect.ValueOf(UUIDVersion4(b))) +} + +// UUIDVersion4 returns a Version 4 random UUID from the byte slice provided +func UUIDVersion4(u []byte) string { + // https://en.wikipedia.org/wiki/Universally_unique_identifier#Version_4_.28random.29 + // 13th character is "4" + u[6] = (u[6] | 0x40) & 0x4F + // 17th character is "8", "9", "a", or "b" + u[8] = (u[8] | 0x80) & 0xBF + + return fmt.Sprintf(`%X-%X-%X-%X-%X`, u[0:4], u[4:6], u[6:8], u[8:10], u[10:]) +} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/query/build.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/query/build.go new file mode 100644 index 0000000000..18169f0f8c --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/query/build.go @@ -0,0 +1,36 @@ +// Package query provides serialization of AWS query requests, and responses. +package query + +//go:generate go run -tags codegen ../../../models/protocol_tests/generate.go ../../../models/protocol_tests/input/query.json build_test.go + +import ( + "net/url" + + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/private/protocol/query/queryutil" +) + +// BuildHandler is a named request handler for building query protocol requests +var BuildHandler = request.NamedHandler{Name: "awssdk.query.Build", Fn: Build} + +// Build builds a request for an AWS Query service. +func Build(r *request.Request) { + body := url.Values{ + "Action": {r.Operation.Name}, + "Version": {r.ClientInfo.APIVersion}, + } + if err := queryutil.Parse(body, r.Params, false); err != nil { + r.Error = awserr.New("SerializationError", "failed encoding Query request", err) + return + } + + if r.ExpireTime == 0 { + r.HTTPRequest.Method = "POST" + r.HTTPRequest.Header.Set("Content-Type", "application/x-www-form-urlencoded; charset=utf-8") + r.SetBufferBody([]byte(body.Encode())) + } else { // This is a pre-signed request + r.HTTPRequest.Method = "GET" + r.HTTPRequest.URL.RawQuery = body.Encode() + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/query/queryutil/queryutil.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/query/queryutil/queryutil.go new file mode 100644 index 0000000000..524ca952ad --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/query/queryutil/queryutil.go @@ -0,0 +1,237 @@ +package queryutil + +import ( + "encoding/base64" + "fmt" + "net/url" + "reflect" + "sort" + "strconv" + "strings" + "time" + + "github.com/aws/aws-sdk-go/private/protocol" +) + +// Parse parses an object i and fills a url.Values object. The isEC2 flag +// indicates if this is the EC2 Query sub-protocol. +func Parse(body url.Values, i interface{}, isEC2 bool) error { + q := queryParser{isEC2: isEC2} + return q.parseValue(body, reflect.ValueOf(i), "", "") +} + +func elemOf(value reflect.Value) reflect.Value { + for value.Kind() == reflect.Ptr { + value = value.Elem() + } + return value +} + +type queryParser struct { + isEC2 bool +} + +func (q *queryParser) parseValue(v url.Values, value reflect.Value, prefix string, tag reflect.StructTag) error { + value = elemOf(value) + + // no need to handle zero values + if !value.IsValid() { + return nil + } + + t := tag.Get("type") + if t == "" { + switch value.Kind() { + case reflect.Struct: + t = "structure" + case reflect.Slice: + t = "list" + case reflect.Map: + t = "map" + } + } + + switch t { + case "structure": + return q.parseStruct(v, value, prefix) + case "list": + return q.parseList(v, value, prefix, tag) + case "map": + return q.parseMap(v, value, prefix, tag) + default: + return q.parseScalar(v, value, prefix, tag) + } +} + +func (q *queryParser) parseStruct(v url.Values, value reflect.Value, prefix string) error { + if !value.IsValid() { + return nil + } + + t := value.Type() + for i := 0; i < value.NumField(); i++ { + elemValue := elemOf(value.Field(i)) + field := t.Field(i) + + if field.PkgPath != "" { + continue // ignore unexported fields + } + if field.Tag.Get("ignore") != "" { + continue + } + + if protocol.CanSetIdempotencyToken(value.Field(i), field) { + token := protocol.GetIdempotencyToken() + elemValue = reflect.ValueOf(token) + } + + var name string + if q.isEC2 { + name = field.Tag.Get("queryName") + } + if name == "" { + if field.Tag.Get("flattened") != "" && field.Tag.Get("locationNameList") != "" { + name = field.Tag.Get("locationNameList") + } else if locName := field.Tag.Get("locationName"); locName != "" { + name = locName + } + if name != "" && q.isEC2 { + name = strings.ToUpper(name[0:1]) + name[1:] + } + } + if name == "" { + name = field.Name + } + + if prefix != "" { + name = prefix + "." + name + } + + if err := q.parseValue(v, elemValue, name, field.Tag); err != nil { + return err + } + } + return nil +} + +func (q *queryParser) parseList(v url.Values, value reflect.Value, prefix string, tag reflect.StructTag) error { + // If it's empty, generate an empty value + if !value.IsNil() && value.Len() == 0 { + v.Set(prefix, "") + return nil + } + + // check for unflattened list member + if !q.isEC2 && tag.Get("flattened") == "" { + if listName := tag.Get("locationNameList"); listName == "" { + prefix += ".member" + } else { + prefix += "." + listName + } + } + + for i := 0; i < value.Len(); i++ { + slicePrefix := prefix + if slicePrefix == "" { + slicePrefix = strconv.Itoa(i + 1) + } else { + slicePrefix = slicePrefix + "." + strconv.Itoa(i+1) + } + if err := q.parseValue(v, value.Index(i), slicePrefix, ""); err != nil { + return err + } + } + return nil +} + +func (q *queryParser) parseMap(v url.Values, value reflect.Value, prefix string, tag reflect.StructTag) error { + // If it's empty, generate an empty value + if !value.IsNil() && value.Len() == 0 { + v.Set(prefix, "") + return nil + } + + // check for unflattened list member + if !q.isEC2 && tag.Get("flattened") == "" { + prefix += ".entry" + } + + // sort keys for improved serialization consistency. + // this is not strictly necessary for protocol support. + mapKeyValues := value.MapKeys() + mapKeys := map[string]reflect.Value{} + mapKeyNames := make([]string, len(mapKeyValues)) + for i, mapKey := range mapKeyValues { + name := mapKey.String() + mapKeys[name] = mapKey + mapKeyNames[i] = name + } + sort.Strings(mapKeyNames) + + for i, mapKeyName := range mapKeyNames { + mapKey := mapKeys[mapKeyName] + mapValue := value.MapIndex(mapKey) + + kname := tag.Get("locationNameKey") + if kname == "" { + kname = "key" + } + vname := tag.Get("locationNameValue") + if vname == "" { + vname = "value" + } + + // serialize key + var keyName string + if prefix == "" { + keyName = strconv.Itoa(i+1) + "." + kname + } else { + keyName = prefix + "." + strconv.Itoa(i+1) + "." + kname + } + + if err := q.parseValue(v, mapKey, keyName, ""); err != nil { + return err + } + + // serialize value + var valueName string + if prefix == "" { + valueName = strconv.Itoa(i+1) + "." + vname + } else { + valueName = prefix + "." + strconv.Itoa(i+1) + "." + vname + } + + if err := q.parseValue(v, mapValue, valueName, ""); err != nil { + return err + } + } + + return nil +} + +func (q *queryParser) parseScalar(v url.Values, r reflect.Value, name string, tag reflect.StructTag) error { + switch value := r.Interface().(type) { + case string: + v.Set(name, value) + case []byte: + if !r.IsNil() { + v.Set(name, base64.StdEncoding.EncodeToString(value)) + } + case bool: + v.Set(name, strconv.FormatBool(value)) + case int64: + v.Set(name, strconv.FormatInt(value, 10)) + case int: + v.Set(name, strconv.Itoa(value)) + case float64: + v.Set(name, strconv.FormatFloat(value, 'f', -1, 64)) + case float32: + v.Set(name, strconv.FormatFloat(float64(value), 'f', -1, 32)) + case time.Time: + const ISO8601UTC = "2006-01-02T15:04:05Z" + v.Set(name, value.UTC().Format(ISO8601UTC)) + default: + return fmt.Errorf("unsupported value for param %s: %v (%s)", name, r.Interface(), r.Type().Name()) + } + return nil +} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/query/unmarshal.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/query/unmarshal.go new file mode 100644 index 0000000000..e0f4d5a541 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/query/unmarshal.go @@ -0,0 +1,35 @@ +package query + +//go:generate go run -tags codegen ../../../models/protocol_tests/generate.go ../../../models/protocol_tests/output/query.json unmarshal_test.go + +import ( + "encoding/xml" + + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil" +) + +// UnmarshalHandler is a named request handler for unmarshaling query protocol requests +var UnmarshalHandler = request.NamedHandler{Name: "awssdk.query.Unmarshal", Fn: Unmarshal} + +// UnmarshalMetaHandler is a named request handler for unmarshaling query protocol request metadata +var UnmarshalMetaHandler = request.NamedHandler{Name: "awssdk.query.UnmarshalMeta", Fn: UnmarshalMeta} + +// Unmarshal unmarshals a response for an AWS Query service. +func Unmarshal(r *request.Request) { + defer r.HTTPResponse.Body.Close() + if r.DataFilled() { + decoder := xml.NewDecoder(r.HTTPResponse.Body) + err := xmlutil.UnmarshalXML(r.Data, decoder, r.Operation.Name+"Result") + if err != nil { + r.Error = awserr.New("SerializationError", "failed decoding Query response", err) + return + } + } +} + +// UnmarshalMeta unmarshals header response values for an AWS Query service. +func UnmarshalMeta(r *request.Request) { + r.RequestID = r.HTTPResponse.Header.Get("X-Amzn-Requestid") +} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/query/unmarshal_error.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/query/unmarshal_error.go new file mode 100644 index 0000000000..f214296171 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/query/unmarshal_error.go @@ -0,0 +1,66 @@ +package query + +import ( + "encoding/xml" + "io/ioutil" + + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/request" +) + +type xmlErrorResponse struct { + XMLName xml.Name `xml:"ErrorResponse"` + Code string `xml:"Error>Code"` + Message string `xml:"Error>Message"` + RequestID string `xml:"RequestId"` +} + +type xmlServiceUnavailableResponse struct { + XMLName xml.Name `xml:"ServiceUnavailableException"` +} + +// UnmarshalErrorHandler is a name request handler to unmarshal request errors +var UnmarshalErrorHandler = request.NamedHandler{Name: "awssdk.query.UnmarshalError", Fn: UnmarshalError} + +// UnmarshalError unmarshals an error response for an AWS Query service. +func UnmarshalError(r *request.Request) { + defer r.HTTPResponse.Body.Close() + + bodyBytes, err := ioutil.ReadAll(r.HTTPResponse.Body) + if err != nil { + r.Error = awserr.New("SerializationError", "failed to read from query HTTP response body", err) + return + } + + // First check for specific error + resp := xmlErrorResponse{} + decodeErr := xml.Unmarshal(bodyBytes, &resp) + if decodeErr == nil { + reqID := resp.RequestID + if reqID == "" { + reqID = r.RequestID + } + r.Error = awserr.NewRequestFailure( + awserr.New(resp.Code, resp.Message, nil), + r.HTTPResponse.StatusCode, + reqID, + ) + return + } + + // Check for unhandled error + servUnavailResp := xmlServiceUnavailableResponse{} + unavailErr := xml.Unmarshal(bodyBytes, &servUnavailResp) + if unavailErr == nil { + r.Error = awserr.NewRequestFailure( + awserr.New("ServiceUnavailableException", "service is unavailable", nil), + r.HTTPResponse.StatusCode, + r.RequestID, + ) + return + } + + // Failed to retrieve any error message from the response body + r.Error = awserr.New("SerializationError", + "failed to decode query XML error response", decodeErr) +} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/build.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/build.go new file mode 100644 index 0000000000..7161835649 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/build.go @@ -0,0 +1,290 @@ +// Package rest provides RESTful serialization of AWS requests and responses. +package rest + +import ( + "bytes" + "encoding/base64" + "encoding/json" + "fmt" + "io" + "net/http" + "net/url" + "path" + "reflect" + "strconv" + "strings" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/request" +) + +// RFC822 returns an RFC822 formatted timestamp for AWS protocols +const RFC822 = "Mon, 2 Jan 2006 15:04:05 GMT" + +// Whether the byte value can be sent without escaping in AWS URLs +var noEscape [256]bool + +var errValueNotSet = fmt.Errorf("value not set") + +func init() { + for i := 0; i < len(noEscape); i++ { + // AWS expects every character except these to be escaped + noEscape[i] = (i >= 'A' && i <= 'Z') || + (i >= 'a' && i <= 'z') || + (i >= '0' && i <= '9') || + i == '-' || + i == '.' || + i == '_' || + i == '~' + } +} + +// BuildHandler is a named request handler for building rest protocol requests +var BuildHandler = request.NamedHandler{Name: "awssdk.rest.Build", Fn: Build} + +// Build builds the REST component of a service request. +func Build(r *request.Request) { + if r.ParamsFilled() { + v := reflect.ValueOf(r.Params).Elem() + buildLocationElements(r, v, false) + buildBody(r, v) + } +} + +// BuildAsGET builds the REST component of a service request with the ability to hoist +// data from the body. +func BuildAsGET(r *request.Request) { + if r.ParamsFilled() { + v := reflect.ValueOf(r.Params).Elem() + buildLocationElements(r, v, true) + buildBody(r, v) + } +} + +func buildLocationElements(r *request.Request, v reflect.Value, buildGETQuery bool) { + query := r.HTTPRequest.URL.Query() + + // Setup the raw path to match the base path pattern. This is needed + // so that when the path is mutated a custom escaped version can be + // stored in RawPath that will be used by the Go client. + r.HTTPRequest.URL.RawPath = r.HTTPRequest.URL.Path + + for i := 0; i < v.NumField(); i++ { + m := v.Field(i) + if n := v.Type().Field(i).Name; n[0:1] == strings.ToLower(n[0:1]) { + continue + } + + if m.IsValid() { + field := v.Type().Field(i) + name := field.Tag.Get("locationName") + if name == "" { + name = field.Name + } + if kind := m.Kind(); kind == reflect.Ptr { + m = m.Elem() + } else if kind == reflect.Interface { + if !m.Elem().IsValid() { + continue + } + } + if !m.IsValid() { + continue + } + if field.Tag.Get("ignore") != "" { + continue + } + + var err error + switch field.Tag.Get("location") { + case "headers": // header maps + err = buildHeaderMap(&r.HTTPRequest.Header, m, field.Tag) + case "header": + err = buildHeader(&r.HTTPRequest.Header, m, name, field.Tag) + case "uri": + err = buildURI(r.HTTPRequest.URL, m, name, field.Tag) + case "querystring": + err = buildQueryString(query, m, name, field.Tag) + default: + if buildGETQuery { + err = buildQueryString(query, m, name, field.Tag) + } + } + r.Error = err + } + if r.Error != nil { + return + } + } + + r.HTTPRequest.URL.RawQuery = query.Encode() + if !aws.BoolValue(r.Config.DisableRestProtocolURICleaning) { + cleanPath(r.HTTPRequest.URL) + } +} + +func buildBody(r *request.Request, v reflect.Value) { + if field, ok := v.Type().FieldByName("_"); ok { + if payloadName := field.Tag.Get("payload"); payloadName != "" { + pfield, _ := v.Type().FieldByName(payloadName) + if ptag := pfield.Tag.Get("type"); ptag != "" && ptag != "structure" { + payload := reflect.Indirect(v.FieldByName(payloadName)) + if payload.IsValid() && payload.Interface() != nil { + switch reader := payload.Interface().(type) { + case io.ReadSeeker: + r.SetReaderBody(reader) + case []byte: + r.SetBufferBody(reader) + case string: + r.SetStringBody(reader) + default: + r.Error = awserr.New("SerializationError", + "failed to encode REST request", + fmt.Errorf("unknown payload type %s", payload.Type())) + } + } + } + } + } +} + +func buildHeader(header *http.Header, v reflect.Value, name string, tag reflect.StructTag) error { + str, err := convertType(v, tag) + if err == errValueNotSet { + return nil + } else if err != nil { + return awserr.New("SerializationError", "failed to encode REST request", err) + } + + header.Add(name, str) + + return nil +} + +func buildHeaderMap(header *http.Header, v reflect.Value, tag reflect.StructTag) error { + prefix := tag.Get("locationName") + for _, key := range v.MapKeys() { + str, err := convertType(v.MapIndex(key), tag) + if err == errValueNotSet { + continue + } else if err != nil { + return awserr.New("SerializationError", "failed to encode REST request", err) + + } + + header.Add(prefix+key.String(), str) + } + return nil +} + +func buildURI(u *url.URL, v reflect.Value, name string, tag reflect.StructTag) error { + value, err := convertType(v, tag) + if err == errValueNotSet { + return nil + } else if err != nil { + return awserr.New("SerializationError", "failed to encode REST request", err) + } + + u.Path = strings.Replace(u.Path, "{"+name+"}", value, -1) + u.Path = strings.Replace(u.Path, "{"+name+"+}", value, -1) + + u.RawPath = strings.Replace(u.RawPath, "{"+name+"}", EscapePath(value, true), -1) + u.RawPath = strings.Replace(u.RawPath, "{"+name+"+}", EscapePath(value, false), -1) + + return nil +} + +func buildQueryString(query url.Values, v reflect.Value, name string, tag reflect.StructTag) error { + switch value := v.Interface().(type) { + case []*string: + for _, item := range value { + query.Add(name, *item) + } + case map[string]*string: + for key, item := range value { + query.Add(key, *item) + } + case map[string][]*string: + for key, items := range value { + for _, item := range items { + query.Add(key, *item) + } + } + default: + str, err := convertType(v, tag) + if err == errValueNotSet { + return nil + } else if err != nil { + return awserr.New("SerializationError", "failed to encode REST request", err) + } + query.Set(name, str) + } + + return nil +} + +func cleanPath(u *url.URL) { + hasSlash := strings.HasSuffix(u.Path, "/") + + // clean up path, removing duplicate `/` + u.Path = path.Clean(u.Path) + u.RawPath = path.Clean(u.RawPath) + + if hasSlash && !strings.HasSuffix(u.Path, "/") { + u.Path += "/" + u.RawPath += "/" + } +} + +// EscapePath escapes part of a URL path in Amazon style +func EscapePath(path string, encodeSep bool) string { + var buf bytes.Buffer + for i := 0; i < len(path); i++ { + c := path[i] + if noEscape[c] || (c == '/' && !encodeSep) { + buf.WriteByte(c) + } else { + fmt.Fprintf(&buf, "%%%02X", c) + } + } + return buf.String() +} + +func convertType(v reflect.Value, tag reflect.StructTag) (string, error) { + v = reflect.Indirect(v) + if !v.IsValid() { + return "", errValueNotSet + } + + var str string + switch value := v.Interface().(type) { + case string: + str = value + case []byte: + str = base64.StdEncoding.EncodeToString(value) + case bool: + str = strconv.FormatBool(value) + case int64: + str = strconv.FormatInt(value, 10) + case float64: + str = strconv.FormatFloat(value, 'f', -1, 64) + case time.Time: + str = value.UTC().Format(RFC822) + case aws.JSONValue: + b, err := json.Marshal(value) + if err != nil { + return "", err + } + if tag.Get("location") == "header" { + str = base64.StdEncoding.EncodeToString(b) + } else { + str = string(b) + } + default: + err := fmt.Errorf("Unsupported value for param %v (%s)", v.Interface(), v.Type()) + return "", err + } + return str, nil +} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/payload.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/payload.go new file mode 100644 index 0000000000..4366de2e1e --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/payload.go @@ -0,0 +1,45 @@ +package rest + +import "reflect" + +// PayloadMember returns the payload field member of i if there is one, or nil. +func PayloadMember(i interface{}) interface{} { + if i == nil { + return nil + } + + v := reflect.ValueOf(i).Elem() + if !v.IsValid() { + return nil + } + if field, ok := v.Type().FieldByName("_"); ok { + if payloadName := field.Tag.Get("payload"); payloadName != "" { + field, _ := v.Type().FieldByName(payloadName) + if field.Tag.Get("type") != "structure" { + return nil + } + + payload := v.FieldByName(payloadName) + if payload.IsValid() || (payload.Kind() == reflect.Ptr && !payload.IsNil()) { + return payload.Interface() + } + } + } + return nil +} + +// PayloadType returns the type of a payload field member of i if there is one, or "". +func PayloadType(i interface{}) string { + v := reflect.Indirect(reflect.ValueOf(i)) + if !v.IsValid() { + return "" + } + if field, ok := v.Type().FieldByName("_"); ok { + if payloadName := field.Tag.Get("payload"); payloadName != "" { + if member, ok := v.Type().FieldByName(payloadName); ok { + return member.Tag.Get("type") + } + } + } + return "" +} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/unmarshal.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/unmarshal.go new file mode 100644 index 0000000000..7a779ee226 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/unmarshal.go @@ -0,0 +1,227 @@ +package rest + +import ( + "bytes" + "encoding/base64" + "encoding/json" + "fmt" + "io" + "io/ioutil" + "net/http" + "reflect" + "strconv" + "strings" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/request" +) + +// UnmarshalHandler is a named request handler for unmarshaling rest protocol requests +var UnmarshalHandler = request.NamedHandler{Name: "awssdk.rest.Unmarshal", Fn: Unmarshal} + +// UnmarshalMetaHandler is a named request handler for unmarshaling rest protocol request metadata +var UnmarshalMetaHandler = request.NamedHandler{Name: "awssdk.rest.UnmarshalMeta", Fn: UnmarshalMeta} + +// Unmarshal unmarshals the REST component of a response in a REST service. +func Unmarshal(r *request.Request) { + if r.DataFilled() { + v := reflect.Indirect(reflect.ValueOf(r.Data)) + unmarshalBody(r, v) + } +} + +// UnmarshalMeta unmarshals the REST metadata of a response in a REST service +func UnmarshalMeta(r *request.Request) { + r.RequestID = r.HTTPResponse.Header.Get("X-Amzn-Requestid") + if r.RequestID == "" { + // Alternative version of request id in the header + r.RequestID = r.HTTPResponse.Header.Get("X-Amz-Request-Id") + } + if r.DataFilled() { + v := reflect.Indirect(reflect.ValueOf(r.Data)) + unmarshalLocationElements(r, v) + } +} + +func unmarshalBody(r *request.Request, v reflect.Value) { + if field, ok := v.Type().FieldByName("_"); ok { + if payloadName := field.Tag.Get("payload"); payloadName != "" { + pfield, _ := v.Type().FieldByName(payloadName) + if ptag := pfield.Tag.Get("type"); ptag != "" && ptag != "structure" { + payload := v.FieldByName(payloadName) + if payload.IsValid() { + switch payload.Interface().(type) { + case []byte: + defer r.HTTPResponse.Body.Close() + b, err := ioutil.ReadAll(r.HTTPResponse.Body) + if err != nil { + r.Error = awserr.New("SerializationError", "failed to decode REST response", err) + } else { + payload.Set(reflect.ValueOf(b)) + } + case *string: + defer r.HTTPResponse.Body.Close() + b, err := ioutil.ReadAll(r.HTTPResponse.Body) + if err != nil { + r.Error = awserr.New("SerializationError", "failed to decode REST response", err) + } else { + str := string(b) + payload.Set(reflect.ValueOf(&str)) + } + default: + switch payload.Type().String() { + case "io.ReadCloser": + payload.Set(reflect.ValueOf(r.HTTPResponse.Body)) + case "io.ReadSeeker": + b, err := ioutil.ReadAll(r.HTTPResponse.Body) + if err != nil { + r.Error = awserr.New("SerializationError", + "failed to read response body", err) + return + } + payload.Set(reflect.ValueOf(ioutil.NopCloser(bytes.NewReader(b)))) + default: + io.Copy(ioutil.Discard, r.HTTPResponse.Body) + defer r.HTTPResponse.Body.Close() + r.Error = awserr.New("SerializationError", + "failed to decode REST response", + fmt.Errorf("unknown payload type %s", payload.Type())) + } + } + } + } + } + } +} + +func unmarshalLocationElements(r *request.Request, v reflect.Value) { + for i := 0; i < v.NumField(); i++ { + m, field := v.Field(i), v.Type().Field(i) + if n := field.Name; n[0:1] == strings.ToLower(n[0:1]) { + continue + } + + if m.IsValid() { + name := field.Tag.Get("locationName") + if name == "" { + name = field.Name + } + + switch field.Tag.Get("location") { + case "statusCode": + unmarshalStatusCode(m, r.HTTPResponse.StatusCode) + case "header": + err := unmarshalHeader(m, r.HTTPResponse.Header.Get(name), field.Tag) + if err != nil { + r.Error = awserr.New("SerializationError", "failed to decode REST response", err) + break + } + case "headers": + prefix := field.Tag.Get("locationName") + err := unmarshalHeaderMap(m, r.HTTPResponse.Header, prefix) + if err != nil { + r.Error = awserr.New("SerializationError", "failed to decode REST response", err) + break + } + } + } + if r.Error != nil { + return + } + } +} + +func unmarshalStatusCode(v reflect.Value, statusCode int) { + if !v.IsValid() { + return + } + + switch v.Interface().(type) { + case *int64: + s := int64(statusCode) + v.Set(reflect.ValueOf(&s)) + } +} + +func unmarshalHeaderMap(r reflect.Value, headers http.Header, prefix string) error { + switch r.Interface().(type) { + case map[string]*string: // we only support string map value types + out := map[string]*string{} + for k, v := range headers { + k = http.CanonicalHeaderKey(k) + if strings.HasPrefix(strings.ToLower(k), strings.ToLower(prefix)) { + out[k[len(prefix):]] = &v[0] + } + } + r.Set(reflect.ValueOf(out)) + } + return nil +} + +func unmarshalHeader(v reflect.Value, header string, tag reflect.StructTag) error { + isJSONValue := tag.Get("type") == "jsonvalue" + if isJSONValue { + if len(header) == 0 { + return nil + } + } else if !v.IsValid() || (header == "" && v.Elem().Kind() != reflect.String) { + return nil + } + + switch v.Interface().(type) { + case *string: + v.Set(reflect.ValueOf(&header)) + case []byte: + b, err := base64.StdEncoding.DecodeString(header) + if err != nil { + return err + } + v.Set(reflect.ValueOf(&b)) + case *bool: + b, err := strconv.ParseBool(header) + if err != nil { + return err + } + v.Set(reflect.ValueOf(&b)) + case *int64: + i, err := strconv.ParseInt(header, 10, 64) + if err != nil { + return err + } + v.Set(reflect.ValueOf(&i)) + case *float64: + f, err := strconv.ParseFloat(header, 64) + if err != nil { + return err + } + v.Set(reflect.ValueOf(&f)) + case *time.Time: + t, err := time.Parse(RFC822, header) + if err != nil { + return err + } + v.Set(reflect.ValueOf(&t)) + case aws.JSONValue: + b := []byte(header) + var err error + if tag.Get("location") == "header" { + b, err = base64.StdEncoding.DecodeString(header) + if err != nil { + return err + } + } + + m := aws.JSONValue{} + err = json.Unmarshal(b, &m) + if err != nil { + return err + } + v.Set(reflect.ValueOf(m)) + default: + err := fmt.Errorf("Unsupported value for param %v (%s)", v.Interface(), v.Type()) + return err + } + return nil +} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/restxml/restxml.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/restxml/restxml.go new file mode 100644 index 0000000000..7bdf4c8538 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/restxml/restxml.go @@ -0,0 +1,69 @@ +// Package restxml provides RESTful XML serialization of AWS +// requests and responses. +package restxml + +//go:generate go run -tags codegen ../../../models/protocol_tests/generate.go ../../../models/protocol_tests/input/rest-xml.json build_test.go +//go:generate go run -tags codegen ../../../models/protocol_tests/generate.go ../../../models/protocol_tests/output/rest-xml.json unmarshal_test.go + +import ( + "bytes" + "encoding/xml" + + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/private/protocol/query" + "github.com/aws/aws-sdk-go/private/protocol/rest" + "github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil" +) + +// BuildHandler is a named request handler for building restxml protocol requests +var BuildHandler = request.NamedHandler{Name: "awssdk.restxml.Build", Fn: Build} + +// UnmarshalHandler is a named request handler for unmarshaling restxml protocol requests +var UnmarshalHandler = request.NamedHandler{Name: "awssdk.restxml.Unmarshal", Fn: Unmarshal} + +// UnmarshalMetaHandler is a named request handler for unmarshaling restxml protocol request metadata +var UnmarshalMetaHandler = request.NamedHandler{Name: "awssdk.restxml.UnmarshalMeta", Fn: UnmarshalMeta} + +// UnmarshalErrorHandler is a named request handler for unmarshaling restxml protocol request errors +var UnmarshalErrorHandler = request.NamedHandler{Name: "awssdk.restxml.UnmarshalError", Fn: UnmarshalError} + +// Build builds a request payload for the REST XML protocol. +func Build(r *request.Request) { + rest.Build(r) + + if t := rest.PayloadType(r.Params); t == "structure" || t == "" { + var buf bytes.Buffer + err := xmlutil.BuildXML(r.Params, xml.NewEncoder(&buf)) + if err != nil { + r.Error = awserr.New("SerializationError", "failed to encode rest XML request", err) + return + } + r.SetBufferBody(buf.Bytes()) + } +} + +// Unmarshal unmarshals a payload response for the REST XML protocol. +func Unmarshal(r *request.Request) { + if t := rest.PayloadType(r.Data); t == "structure" || t == "" { + defer r.HTTPResponse.Body.Close() + decoder := xml.NewDecoder(r.HTTPResponse.Body) + err := xmlutil.UnmarshalXML(r.Data, decoder, "") + if err != nil { + r.Error = awserr.New("SerializationError", "failed to decode REST XML response", err) + return + } + } else { + rest.Unmarshal(r) + } +} + +// UnmarshalMeta unmarshals response headers for the REST XML protocol. +func UnmarshalMeta(r *request.Request) { + rest.UnmarshalMeta(r) +} + +// UnmarshalError unmarshals a response error for the REST XML protocol. +func UnmarshalError(r *request.Request) { + query.UnmarshalError(r) +} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/unmarshal.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/unmarshal.go new file mode 100644 index 0000000000..da1a68111d --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/unmarshal.go @@ -0,0 +1,21 @@ +package protocol + +import ( + "io" + "io/ioutil" + + "github.com/aws/aws-sdk-go/aws/request" +) + +// UnmarshalDiscardBodyHandler is a named request handler to empty and close a response's body +var UnmarshalDiscardBodyHandler = request.NamedHandler{Name: "awssdk.shared.UnmarshalDiscardBody", Fn: UnmarshalDiscardBody} + +// UnmarshalDiscardBody is a request handler to empty a response's body and closing it. +func UnmarshalDiscardBody(r *request.Request) { + if r.HTTPResponse == nil || r.HTTPResponse.Body == nil { + return + } + + io.Copy(ioutil.Discard, r.HTTPResponse.Body) + r.HTTPResponse.Body.Close() +} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/build.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/build.go new file mode 100644 index 0000000000..7091b456d1 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/build.go @@ -0,0 +1,296 @@ +// Package xmlutil provides XML serialization of AWS requests and responses. +package xmlutil + +import ( + "encoding/base64" + "encoding/xml" + "fmt" + "reflect" + "sort" + "strconv" + "time" + + "github.com/aws/aws-sdk-go/private/protocol" +) + +// BuildXML will serialize params into an xml.Encoder. +// Error will be returned if the serialization of any of the params or nested values fails. +func BuildXML(params interface{}, e *xml.Encoder) error { + b := xmlBuilder{encoder: e, namespaces: map[string]string{}} + root := NewXMLElement(xml.Name{}) + if err := b.buildValue(reflect.ValueOf(params), root, ""); err != nil { + return err + } + for _, c := range root.Children { + for _, v := range c { + return StructToXML(e, v, false) + } + } + return nil +} + +// Returns the reflection element of a value, if it is a pointer. +func elemOf(value reflect.Value) reflect.Value { + for value.Kind() == reflect.Ptr { + value = value.Elem() + } + return value +} + +// A xmlBuilder serializes values from Go code to XML +type xmlBuilder struct { + encoder *xml.Encoder + namespaces map[string]string +} + +// buildValue generic XMLNode builder for any type. Will build value for their specific type +// struct, list, map, scalar. +// +// Also takes a "type" tag value to set what type a value should be converted to XMLNode as. If +// type is not provided reflect will be used to determine the value's type. +func (b *xmlBuilder) buildValue(value reflect.Value, current *XMLNode, tag reflect.StructTag) error { + value = elemOf(value) + if !value.IsValid() { // no need to handle zero values + return nil + } else if tag.Get("location") != "" { // don't handle non-body location values + return nil + } + + t := tag.Get("type") + if t == "" { + switch value.Kind() { + case reflect.Struct: + t = "structure" + case reflect.Slice: + t = "list" + case reflect.Map: + t = "map" + } + } + + switch t { + case "structure": + if field, ok := value.Type().FieldByName("_"); ok { + tag = tag + reflect.StructTag(" ") + field.Tag + } + return b.buildStruct(value, current, tag) + case "list": + return b.buildList(value, current, tag) + case "map": + return b.buildMap(value, current, tag) + default: + return b.buildScalar(value, current, tag) + } +} + +// buildStruct adds a struct and its fields to the current XMLNode. All fields any any nested +// types are converted to XMLNodes also. +func (b *xmlBuilder) buildStruct(value reflect.Value, current *XMLNode, tag reflect.StructTag) error { + if !value.IsValid() { + return nil + } + + fieldAdded := false + + // unwrap payloads + if payload := tag.Get("payload"); payload != "" { + field, _ := value.Type().FieldByName(payload) + tag = field.Tag + value = elemOf(value.FieldByName(payload)) + + if !value.IsValid() { + return nil + } + } + + child := NewXMLElement(xml.Name{Local: tag.Get("locationName")}) + + // there is an xmlNamespace associated with this struct + if prefix, uri := tag.Get("xmlPrefix"), tag.Get("xmlURI"); uri != "" { + ns := xml.Attr{ + Name: xml.Name{Local: "xmlns"}, + Value: uri, + } + if prefix != "" { + b.namespaces[prefix] = uri // register the namespace + ns.Name.Local = "xmlns:" + prefix + } + + child.Attr = append(child.Attr, ns) + } + + t := value.Type() + for i := 0; i < value.NumField(); i++ { + member := elemOf(value.Field(i)) + field := t.Field(i) + + if field.PkgPath != "" { + continue // ignore unexported fields + } + if field.Tag.Get("ignore") != "" { + continue + } + + mTag := field.Tag + if mTag.Get("location") != "" { // skip non-body members + continue + } + + if protocol.CanSetIdempotencyToken(value.Field(i), field) { + token := protocol.GetIdempotencyToken() + member = reflect.ValueOf(token) + } + + memberName := mTag.Get("locationName") + if memberName == "" { + memberName = field.Name + mTag = reflect.StructTag(string(mTag) + ` locationName:"` + memberName + `"`) + } + if err := b.buildValue(member, child, mTag); err != nil { + return err + } + + fieldAdded = true + } + + if fieldAdded { // only append this child if we have one ore more valid members + current.AddChild(child) + } + + return nil +} + +// buildList adds the value's list items to the current XMLNode as children nodes. All +// nested values in the list are converted to XMLNodes also. +func (b *xmlBuilder) buildList(value reflect.Value, current *XMLNode, tag reflect.StructTag) error { + if value.IsNil() { // don't build omitted lists + return nil + } + + // check for unflattened list member + flattened := tag.Get("flattened") != "" + + xname := xml.Name{Local: tag.Get("locationName")} + if flattened { + for i := 0; i < value.Len(); i++ { + child := NewXMLElement(xname) + current.AddChild(child) + if err := b.buildValue(value.Index(i), child, ""); err != nil { + return err + } + } + } else { + list := NewXMLElement(xname) + current.AddChild(list) + + for i := 0; i < value.Len(); i++ { + iname := tag.Get("locationNameList") + if iname == "" { + iname = "member" + } + + child := NewXMLElement(xml.Name{Local: iname}) + list.AddChild(child) + if err := b.buildValue(value.Index(i), child, ""); err != nil { + return err + } + } + } + + return nil +} + +// buildMap adds the value's key/value pairs to the current XMLNode as children nodes. All +// nested values in the map are converted to XMLNodes also. +// +// Error will be returned if it is unable to build the map's values into XMLNodes +func (b *xmlBuilder) buildMap(value reflect.Value, current *XMLNode, tag reflect.StructTag) error { + if value.IsNil() { // don't build omitted maps + return nil + } + + maproot := NewXMLElement(xml.Name{Local: tag.Get("locationName")}) + current.AddChild(maproot) + current = maproot + + kname, vname := "key", "value" + if n := tag.Get("locationNameKey"); n != "" { + kname = n + } + if n := tag.Get("locationNameValue"); n != "" { + vname = n + } + + // sorting is not required for compliance, but it makes testing easier + keys := make([]string, value.Len()) + for i, k := range value.MapKeys() { + keys[i] = k.String() + } + sort.Strings(keys) + + for _, k := range keys { + v := value.MapIndex(reflect.ValueOf(k)) + + mapcur := current + if tag.Get("flattened") == "" { // add "entry" tag to non-flat maps + child := NewXMLElement(xml.Name{Local: "entry"}) + mapcur.AddChild(child) + mapcur = child + } + + kchild := NewXMLElement(xml.Name{Local: kname}) + kchild.Text = k + vchild := NewXMLElement(xml.Name{Local: vname}) + mapcur.AddChild(kchild) + mapcur.AddChild(vchild) + + if err := b.buildValue(v, vchild, ""); err != nil { + return err + } + } + + return nil +} + +// buildScalar will convert the value into a string and append it as a attribute or child +// of the current XMLNode. +// +// The value will be added as an attribute if tag contains a "xmlAttribute" attribute value. +// +// Error will be returned if the value type is unsupported. +func (b *xmlBuilder) buildScalar(value reflect.Value, current *XMLNode, tag reflect.StructTag) error { + var str string + switch converted := value.Interface().(type) { + case string: + str = converted + case []byte: + if !value.IsNil() { + str = base64.StdEncoding.EncodeToString(converted) + } + case bool: + str = strconv.FormatBool(converted) + case int64: + str = strconv.FormatInt(converted, 10) + case int: + str = strconv.Itoa(converted) + case float64: + str = strconv.FormatFloat(converted, 'f', -1, 64) + case float32: + str = strconv.FormatFloat(float64(converted), 'f', -1, 32) + case time.Time: + const ISO8601UTC = "2006-01-02T15:04:05Z" + str = converted.UTC().Format(ISO8601UTC) + default: + return fmt.Errorf("unsupported value for param %s: %v (%s)", + tag.Get("locationName"), value.Interface(), value.Type().Name()) + } + + xname := xml.Name{Local: tag.Get("locationName")} + if tag.Get("xmlAttribute") != "" { // put into current node's attribute list + attr := xml.Attr{Name: xname, Value: str} + current.Attr = append(current.Attr, attr) + } else { // regular text node + current.AddChild(&XMLNode{Name: xname, Text: str}) + } + return nil +} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/unmarshal.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/unmarshal.go new file mode 100644 index 0000000000..87584628a2 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/unmarshal.go @@ -0,0 +1,260 @@ +package xmlutil + +import ( + "encoding/base64" + "encoding/xml" + "fmt" + "io" + "reflect" + "strconv" + "strings" + "time" +) + +// UnmarshalXML deserializes an xml.Decoder into the container v. V +// needs to match the shape of the XML expected to be decoded. +// If the shape doesn't match unmarshaling will fail. +func UnmarshalXML(v interface{}, d *xml.Decoder, wrapper string) error { + n, err := XMLToStruct(d, nil) + if err != nil { + return err + } + if n.Children != nil { + for _, root := range n.Children { + for _, c := range root { + if wrappedChild, ok := c.Children[wrapper]; ok { + c = wrappedChild[0] // pull out wrapped element + } + + err = parse(reflect.ValueOf(v), c, "") + if err != nil { + if err == io.EOF { + return nil + } + return err + } + } + } + return nil + } + return nil +} + +// parse deserializes any value from the XMLNode. The type tag is used to infer the type, or reflect +// will be used to determine the type from r. +func parse(r reflect.Value, node *XMLNode, tag reflect.StructTag) error { + rtype := r.Type() + if rtype.Kind() == reflect.Ptr { + rtype = rtype.Elem() // check kind of actual element type + } + + t := tag.Get("type") + if t == "" { + switch rtype.Kind() { + case reflect.Struct: + t = "structure" + case reflect.Slice: + t = "list" + case reflect.Map: + t = "map" + } + } + + switch t { + case "structure": + if field, ok := rtype.FieldByName("_"); ok { + tag = field.Tag + } + return parseStruct(r, node, tag) + case "list": + return parseList(r, node, tag) + case "map": + return parseMap(r, node, tag) + default: + return parseScalar(r, node, tag) + } +} + +// parseStruct deserializes a structure and its fields from an XMLNode. Any nested +// types in the structure will also be deserialized. +func parseStruct(r reflect.Value, node *XMLNode, tag reflect.StructTag) error { + t := r.Type() + if r.Kind() == reflect.Ptr { + if r.IsNil() { // create the structure if it's nil + s := reflect.New(r.Type().Elem()) + r.Set(s) + r = s + } + + r = r.Elem() + t = t.Elem() + } + + // unwrap any payloads + if payload := tag.Get("payload"); payload != "" { + field, _ := t.FieldByName(payload) + return parseStruct(r.FieldByName(payload), node, field.Tag) + } + + for i := 0; i < t.NumField(); i++ { + field := t.Field(i) + if c := field.Name[0:1]; strings.ToLower(c) == c { + continue // ignore unexported fields + } + + // figure out what this field is called + name := field.Name + if field.Tag.Get("flattened") != "" && field.Tag.Get("locationNameList") != "" { + name = field.Tag.Get("locationNameList") + } else if locName := field.Tag.Get("locationName"); locName != "" { + name = locName + } + + // try to find the field by name in elements + elems := node.Children[name] + + if elems == nil { // try to find the field in attributes + if val, ok := node.findElem(name); ok { + elems = []*XMLNode{{Text: val}} + } + } + + member := r.FieldByName(field.Name) + for _, elem := range elems { + err := parse(member, elem, field.Tag) + if err != nil { + return err + } + } + } + return nil +} + +// parseList deserializes a list of values from an XML node. Each list entry +// will also be deserialized. +func parseList(r reflect.Value, node *XMLNode, tag reflect.StructTag) error { + t := r.Type() + + if tag.Get("flattened") == "" { // look at all item entries + mname := "member" + if name := tag.Get("locationNameList"); name != "" { + mname = name + } + + if Children, ok := node.Children[mname]; ok { + if r.IsNil() { + r.Set(reflect.MakeSlice(t, len(Children), len(Children))) + } + + for i, c := range Children { + err := parse(r.Index(i), c, "") + if err != nil { + return err + } + } + } + } else { // flattened list means this is a single element + if r.IsNil() { + r.Set(reflect.MakeSlice(t, 0, 0)) + } + + childR := reflect.Zero(t.Elem()) + r.Set(reflect.Append(r, childR)) + err := parse(r.Index(r.Len()-1), node, "") + if err != nil { + return err + } + } + + return nil +} + +// parseMap deserializes a map from an XMLNode. The direct children of the XMLNode +// will also be deserialized as map entries. +func parseMap(r reflect.Value, node *XMLNode, tag reflect.StructTag) error { + if r.IsNil() { + r.Set(reflect.MakeMap(r.Type())) + } + + if tag.Get("flattened") == "" { // look at all child entries + for _, entry := range node.Children["entry"] { + parseMapEntry(r, entry, tag) + } + } else { // this element is itself an entry + parseMapEntry(r, node, tag) + } + + return nil +} + +// parseMapEntry deserializes a map entry from a XML node. +func parseMapEntry(r reflect.Value, node *XMLNode, tag reflect.StructTag) error { + kname, vname := "key", "value" + if n := tag.Get("locationNameKey"); n != "" { + kname = n + } + if n := tag.Get("locationNameValue"); n != "" { + vname = n + } + + keys, ok := node.Children[kname] + values := node.Children[vname] + if ok { + for i, key := range keys { + keyR := reflect.ValueOf(key.Text) + value := values[i] + valueR := reflect.New(r.Type().Elem()).Elem() + + parse(valueR, value, "") + r.SetMapIndex(keyR, valueR) + } + } + return nil +} + +// parseScaller deserializes an XMLNode value into a concrete type based on the +// interface type of r. +// +// Error is returned if the deserialization fails due to invalid type conversion, +// or unsupported interface type. +func parseScalar(r reflect.Value, node *XMLNode, tag reflect.StructTag) error { + switch r.Interface().(type) { + case *string: + r.Set(reflect.ValueOf(&node.Text)) + return nil + case []byte: + b, err := base64.StdEncoding.DecodeString(node.Text) + if err != nil { + return err + } + r.Set(reflect.ValueOf(b)) + case *bool: + v, err := strconv.ParseBool(node.Text) + if err != nil { + return err + } + r.Set(reflect.ValueOf(&v)) + case *int64: + v, err := strconv.ParseInt(node.Text, 10, 64) + if err != nil { + return err + } + r.Set(reflect.ValueOf(&v)) + case *float64: + v, err := strconv.ParseFloat(node.Text, 64) + if err != nil { + return err + } + r.Set(reflect.ValueOf(&v)) + case *time.Time: + const ISO8601UTC = "2006-01-02T15:04:05Z" + t, err := time.Parse(ISO8601UTC, node.Text) + if err != nil { + return err + } + r.Set(reflect.ValueOf(&t)) + default: + return fmt.Errorf("unsupported value: %v (%s)", r.Interface(), r.Type()) + } + return nil +} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/xml_to_struct.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/xml_to_struct.go new file mode 100644 index 0000000000..3e970b629d --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/xml_to_struct.go @@ -0,0 +1,147 @@ +package xmlutil + +import ( + "encoding/xml" + "fmt" + "io" + "sort" +) + +// A XMLNode contains the values to be encoded or decoded. +type XMLNode struct { + Name xml.Name `json:",omitempty"` + Children map[string][]*XMLNode `json:",omitempty"` + Text string `json:",omitempty"` + Attr []xml.Attr `json:",omitempty"` + + namespaces map[string]string + parent *XMLNode +} + +// NewXMLElement returns a pointer to a new XMLNode initialized to default values. +func NewXMLElement(name xml.Name) *XMLNode { + return &XMLNode{ + Name: name, + Children: map[string][]*XMLNode{}, + Attr: []xml.Attr{}, + } +} + +// AddChild adds child to the XMLNode. +func (n *XMLNode) AddChild(child *XMLNode) { + if _, ok := n.Children[child.Name.Local]; !ok { + n.Children[child.Name.Local] = []*XMLNode{} + } + n.Children[child.Name.Local] = append(n.Children[child.Name.Local], child) +} + +// XMLToStruct converts a xml.Decoder stream to XMLNode with nested values. +func XMLToStruct(d *xml.Decoder, s *xml.StartElement) (*XMLNode, error) { + out := &XMLNode{} + for { + tok, err := d.Token() + if err != nil { + if err == io.EOF { + break + } else { + return out, err + } + } + + if tok == nil { + break + } + + switch typed := tok.(type) { + case xml.CharData: + out.Text = string(typed.Copy()) + case xml.StartElement: + el := typed.Copy() + out.Attr = el.Attr + if out.Children == nil { + out.Children = map[string][]*XMLNode{} + } + + name := typed.Name.Local + slice := out.Children[name] + if slice == nil { + slice = []*XMLNode{} + } + node, e := XMLToStruct(d, &el) + out.findNamespaces() + if e != nil { + return out, e + } + node.Name = typed.Name + node.findNamespaces() + tempOut := *out + // Save into a temp variable, simply because out gets squashed during + // loop iterations + node.parent = &tempOut + slice = append(slice, node) + out.Children[name] = slice + case xml.EndElement: + if s != nil && s.Name.Local == typed.Name.Local { // matching end token + return out, nil + } + out = &XMLNode{} + } + } + return out, nil +} + +func (n *XMLNode) findNamespaces() { + ns := map[string]string{} + for _, a := range n.Attr { + if a.Name.Space == "xmlns" { + ns[a.Value] = a.Name.Local + } + } + + n.namespaces = ns +} + +func (n *XMLNode) findElem(name string) (string, bool) { + for node := n; node != nil; node = node.parent { + for _, a := range node.Attr { + namespace := a.Name.Space + if v, ok := node.namespaces[namespace]; ok { + namespace = v + } + if name == fmt.Sprintf("%s:%s", namespace, a.Name.Local) { + return a.Value, true + } + } + } + return "", false +} + +// StructToXML writes an XMLNode to a xml.Encoder as tokens. +func StructToXML(e *xml.Encoder, node *XMLNode, sorted bool) error { + e.EncodeToken(xml.StartElement{Name: node.Name, Attr: node.Attr}) + + if node.Text != "" { + e.EncodeToken(xml.CharData([]byte(node.Text))) + } else if sorted { + sortedNames := []string{} + for k := range node.Children { + sortedNames = append(sortedNames, k) + } + sort.Strings(sortedNames) + + for _, k := range sortedNames { + for _, v := range node.Children[k] { + StructToXML(e, v, sorted) + } + } + } else { + for _, c := range node.Children { + for _, v := range c { + StructToXML(e, v, sorted) + } + } + } + + e.EncodeToken(xml.EndElement{Name: node.Name}) + return e.Flush() +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/api.go b/vendor/github.com/aws/aws-sdk-go/service/s3/api.go new file mode 100644 index 0000000000..0435398348 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/s3/api.go @@ -0,0 +1,19245 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package s3 + +import ( + "fmt" + "io" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awsutil" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/private/protocol" + "github.com/aws/aws-sdk-go/private/protocol/restxml" +) + +const opAbortMultipartUpload = "AbortMultipartUpload" + +// AbortMultipartUploadRequest generates a "aws/request.Request" representing the +// client's request for the AbortMultipartUpload operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// See AbortMultipartUpload for usage and error information. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the AbortMultipartUpload method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the AbortMultipartUploadRequest method. +// req, resp := client.AbortMultipartUploadRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/AbortMultipartUpload +func (c *S3) AbortMultipartUploadRequest(input *AbortMultipartUploadInput) (req *request.Request, output *AbortMultipartUploadOutput) { + op := &request.Operation{ + Name: opAbortMultipartUpload, + HTTPMethod: "DELETE", + HTTPPath: "/{Bucket}/{Key+}", + } + + if input == nil { + input = &AbortMultipartUploadInput{} + } + + output = &AbortMultipartUploadOutput{} + req = c.newRequest(op, input, output) + return +} + +// AbortMultipartUpload API operation for Amazon Simple Storage Service. +// +// Aborts a multipart upload. +// +// To verify that all parts have been removed, so you don't get charged for +// the part storage, you should call the List Parts operation and ensure the +// parts list is empty. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation AbortMultipartUpload for usage and error information. +// +// Returned Error Codes: +// * ErrCodeNoSuchUpload "NoSuchUpload" +// The specified multipart upload does not exist. +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/AbortMultipartUpload +func (c *S3) AbortMultipartUpload(input *AbortMultipartUploadInput) (*AbortMultipartUploadOutput, error) { + req, out := c.AbortMultipartUploadRequest(input) + return out, req.Send() +} + +// AbortMultipartUploadWithContext is the same as AbortMultipartUpload with the addition of +// the ability to pass a context and additional request options. +// +// See AbortMultipartUpload for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) AbortMultipartUploadWithContext(ctx aws.Context, input *AbortMultipartUploadInput, opts ...request.Option) (*AbortMultipartUploadOutput, error) { + req, out := c.AbortMultipartUploadRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opCompleteMultipartUpload = "CompleteMultipartUpload" + +// CompleteMultipartUploadRequest generates a "aws/request.Request" representing the +// client's request for the CompleteMultipartUpload operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// See CompleteMultipartUpload for usage and error information. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the CompleteMultipartUpload method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the CompleteMultipartUploadRequest method. +// req, resp := client.CompleteMultipartUploadRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CompleteMultipartUpload +func (c *S3) CompleteMultipartUploadRequest(input *CompleteMultipartUploadInput) (req *request.Request, output *CompleteMultipartUploadOutput) { + op := &request.Operation{ + Name: opCompleteMultipartUpload, + HTTPMethod: "POST", + HTTPPath: "/{Bucket}/{Key+}", + } + + if input == nil { + input = &CompleteMultipartUploadInput{} + } + + output = &CompleteMultipartUploadOutput{} + req = c.newRequest(op, input, output) + return +} + +// CompleteMultipartUpload API operation for Amazon Simple Storage Service. +// +// Completes a multipart upload by assembling previously uploaded parts. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation CompleteMultipartUpload for usage and error information. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CompleteMultipartUpload +func (c *S3) CompleteMultipartUpload(input *CompleteMultipartUploadInput) (*CompleteMultipartUploadOutput, error) { + req, out := c.CompleteMultipartUploadRequest(input) + return out, req.Send() +} + +// CompleteMultipartUploadWithContext is the same as CompleteMultipartUpload with the addition of +// the ability to pass a context and additional request options. +// +// See CompleteMultipartUpload for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) CompleteMultipartUploadWithContext(ctx aws.Context, input *CompleteMultipartUploadInput, opts ...request.Option) (*CompleteMultipartUploadOutput, error) { + req, out := c.CompleteMultipartUploadRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opCopyObject = "CopyObject" + +// CopyObjectRequest generates a "aws/request.Request" representing the +// client's request for the CopyObject operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// See CopyObject for usage and error information. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the CopyObject method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the CopyObjectRequest method. +// req, resp := client.CopyObjectRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CopyObject +func (c *S3) CopyObjectRequest(input *CopyObjectInput) (req *request.Request, output *CopyObjectOutput) { + op := &request.Operation{ + Name: opCopyObject, + HTTPMethod: "PUT", + HTTPPath: "/{Bucket}/{Key+}", + } + + if input == nil { + input = &CopyObjectInput{} + } + + output = &CopyObjectOutput{} + req = c.newRequest(op, input, output) + return +} + +// CopyObject API operation for Amazon Simple Storage Service. +// +// Creates a copy of an object that is already stored in Amazon S3. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation CopyObject for usage and error information. +// +// Returned Error Codes: +// * ErrCodeObjectNotInActiveTierError "ObjectNotInActiveTierError" +// The source object of the COPY operation is not in the active tier and is +// only stored in Amazon Glacier. +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CopyObject +func (c *S3) CopyObject(input *CopyObjectInput) (*CopyObjectOutput, error) { + req, out := c.CopyObjectRequest(input) + return out, req.Send() +} + +// CopyObjectWithContext is the same as CopyObject with the addition of +// the ability to pass a context and additional request options. +// +// See CopyObject for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) CopyObjectWithContext(ctx aws.Context, input *CopyObjectInput, opts ...request.Option) (*CopyObjectOutput, error) { + req, out := c.CopyObjectRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opCreateBucket = "CreateBucket" + +// CreateBucketRequest generates a "aws/request.Request" representing the +// client's request for the CreateBucket operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// See CreateBucket for usage and error information. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the CreateBucket method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the CreateBucketRequest method. +// req, resp := client.CreateBucketRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CreateBucket +func (c *S3) CreateBucketRequest(input *CreateBucketInput) (req *request.Request, output *CreateBucketOutput) { + op := &request.Operation{ + Name: opCreateBucket, + HTTPMethod: "PUT", + HTTPPath: "/{Bucket}", + } + + if input == nil { + input = &CreateBucketInput{} + } + + output = &CreateBucketOutput{} + req = c.newRequest(op, input, output) + return +} + +// CreateBucket API operation for Amazon Simple Storage Service. +// +// Creates a new bucket. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation CreateBucket for usage and error information. +// +// Returned Error Codes: +// * ErrCodeBucketAlreadyExists "BucketAlreadyExists" +// The requested bucket name is not available. The bucket namespace is shared +// by all users of the system. Please select a different name and try again. +// +// * ErrCodeBucketAlreadyOwnedByYou "BucketAlreadyOwnedByYou" +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CreateBucket +func (c *S3) CreateBucket(input *CreateBucketInput) (*CreateBucketOutput, error) { + req, out := c.CreateBucketRequest(input) + return out, req.Send() +} + +// CreateBucketWithContext is the same as CreateBucket with the addition of +// the ability to pass a context and additional request options. +// +// See CreateBucket for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) CreateBucketWithContext(ctx aws.Context, input *CreateBucketInput, opts ...request.Option) (*CreateBucketOutput, error) { + req, out := c.CreateBucketRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opCreateMultipartUpload = "CreateMultipartUpload" + +// CreateMultipartUploadRequest generates a "aws/request.Request" representing the +// client's request for the CreateMultipartUpload operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// See CreateMultipartUpload for usage and error information. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the CreateMultipartUpload method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the CreateMultipartUploadRequest method. +// req, resp := client.CreateMultipartUploadRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CreateMultipartUpload +func (c *S3) CreateMultipartUploadRequest(input *CreateMultipartUploadInput) (req *request.Request, output *CreateMultipartUploadOutput) { + op := &request.Operation{ + Name: opCreateMultipartUpload, + HTTPMethod: "POST", + HTTPPath: "/{Bucket}/{Key+}?uploads", + } + + if input == nil { + input = &CreateMultipartUploadInput{} + } + + output = &CreateMultipartUploadOutput{} + req = c.newRequest(op, input, output) + return +} + +// CreateMultipartUpload API operation for Amazon Simple Storage Service. +// +// Initiates a multipart upload and returns an upload ID. +// +// Note: After you initiate multipart upload and upload one or more parts, you +// must either complete or abort multipart upload in order to stop getting charged +// for storage of the uploaded parts. Only after you either complete or abort +// multipart upload, Amazon S3 frees up the parts storage and stops charging +// you for the parts storage. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation CreateMultipartUpload for usage and error information. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CreateMultipartUpload +func (c *S3) CreateMultipartUpload(input *CreateMultipartUploadInput) (*CreateMultipartUploadOutput, error) { + req, out := c.CreateMultipartUploadRequest(input) + return out, req.Send() +} + +// CreateMultipartUploadWithContext is the same as CreateMultipartUpload with the addition of +// the ability to pass a context and additional request options. +// +// See CreateMultipartUpload for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) CreateMultipartUploadWithContext(ctx aws.Context, input *CreateMultipartUploadInput, opts ...request.Option) (*CreateMultipartUploadOutput, error) { + req, out := c.CreateMultipartUploadRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDeleteBucket = "DeleteBucket" + +// DeleteBucketRequest generates a "aws/request.Request" representing the +// client's request for the DeleteBucket operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// See DeleteBucket for usage and error information. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DeleteBucket method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DeleteBucketRequest method. +// req, resp := client.DeleteBucketRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucket +func (c *S3) DeleteBucketRequest(input *DeleteBucketInput) (req *request.Request, output *DeleteBucketOutput) { + op := &request.Operation{ + Name: opDeleteBucket, + HTTPMethod: "DELETE", + HTTPPath: "/{Bucket}", + } + + if input == nil { + input = &DeleteBucketInput{} + } + + output = &DeleteBucketOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + return +} + +// DeleteBucket API operation for Amazon Simple Storage Service. +// +// Deletes the bucket. All objects (including all object versions and Delete +// Markers) in the bucket must be deleted before the bucket itself can be deleted. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation DeleteBucket for usage and error information. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucket +func (c *S3) DeleteBucket(input *DeleteBucketInput) (*DeleteBucketOutput, error) { + req, out := c.DeleteBucketRequest(input) + return out, req.Send() +} + +// DeleteBucketWithContext is the same as DeleteBucket with the addition of +// the ability to pass a context and additional request options. +// +// See DeleteBucket for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) DeleteBucketWithContext(ctx aws.Context, input *DeleteBucketInput, opts ...request.Option) (*DeleteBucketOutput, error) { + req, out := c.DeleteBucketRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDeleteBucketAnalyticsConfiguration = "DeleteBucketAnalyticsConfiguration" + +// DeleteBucketAnalyticsConfigurationRequest generates a "aws/request.Request" representing the +// client's request for the DeleteBucketAnalyticsConfiguration operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// See DeleteBucketAnalyticsConfiguration for usage and error information. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DeleteBucketAnalyticsConfiguration method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DeleteBucketAnalyticsConfigurationRequest method. +// req, resp := client.DeleteBucketAnalyticsConfigurationRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketAnalyticsConfiguration +func (c *S3) DeleteBucketAnalyticsConfigurationRequest(input *DeleteBucketAnalyticsConfigurationInput) (req *request.Request, output *DeleteBucketAnalyticsConfigurationOutput) { + op := &request.Operation{ + Name: opDeleteBucketAnalyticsConfiguration, + HTTPMethod: "DELETE", + HTTPPath: "/{Bucket}?analytics", + } + + if input == nil { + input = &DeleteBucketAnalyticsConfigurationInput{} + } + + output = &DeleteBucketAnalyticsConfigurationOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + return +} + +// DeleteBucketAnalyticsConfiguration API operation for Amazon Simple Storage Service. +// +// Deletes an analytics configuration for the bucket (specified by the analytics +// configuration ID). +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation DeleteBucketAnalyticsConfiguration for usage and error information. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketAnalyticsConfiguration +func (c *S3) DeleteBucketAnalyticsConfiguration(input *DeleteBucketAnalyticsConfigurationInput) (*DeleteBucketAnalyticsConfigurationOutput, error) { + req, out := c.DeleteBucketAnalyticsConfigurationRequest(input) + return out, req.Send() +} + +// DeleteBucketAnalyticsConfigurationWithContext is the same as DeleteBucketAnalyticsConfiguration with the addition of +// the ability to pass a context and additional request options. +// +// See DeleteBucketAnalyticsConfiguration for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) DeleteBucketAnalyticsConfigurationWithContext(ctx aws.Context, input *DeleteBucketAnalyticsConfigurationInput, opts ...request.Option) (*DeleteBucketAnalyticsConfigurationOutput, error) { + req, out := c.DeleteBucketAnalyticsConfigurationRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDeleteBucketCors = "DeleteBucketCors" + +// DeleteBucketCorsRequest generates a "aws/request.Request" representing the +// client's request for the DeleteBucketCors operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// See DeleteBucketCors for usage and error information. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DeleteBucketCors method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DeleteBucketCorsRequest method. +// req, resp := client.DeleteBucketCorsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketCors +func (c *S3) DeleteBucketCorsRequest(input *DeleteBucketCorsInput) (req *request.Request, output *DeleteBucketCorsOutput) { + op := &request.Operation{ + Name: opDeleteBucketCors, + HTTPMethod: "DELETE", + HTTPPath: "/{Bucket}?cors", + } + + if input == nil { + input = &DeleteBucketCorsInput{} + } + + output = &DeleteBucketCorsOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + return +} + +// DeleteBucketCors API operation for Amazon Simple Storage Service. +// +// Deletes the cors configuration information set for the bucket. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation DeleteBucketCors for usage and error information. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketCors +func (c *S3) DeleteBucketCors(input *DeleteBucketCorsInput) (*DeleteBucketCorsOutput, error) { + req, out := c.DeleteBucketCorsRequest(input) + return out, req.Send() +} + +// DeleteBucketCorsWithContext is the same as DeleteBucketCors with the addition of +// the ability to pass a context and additional request options. +// +// See DeleteBucketCors for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) DeleteBucketCorsWithContext(ctx aws.Context, input *DeleteBucketCorsInput, opts ...request.Option) (*DeleteBucketCorsOutput, error) { + req, out := c.DeleteBucketCorsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDeleteBucketInventoryConfiguration = "DeleteBucketInventoryConfiguration" + +// DeleteBucketInventoryConfigurationRequest generates a "aws/request.Request" representing the +// client's request for the DeleteBucketInventoryConfiguration operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// See DeleteBucketInventoryConfiguration for usage and error information. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DeleteBucketInventoryConfiguration method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DeleteBucketInventoryConfigurationRequest method. +// req, resp := client.DeleteBucketInventoryConfigurationRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketInventoryConfiguration +func (c *S3) DeleteBucketInventoryConfigurationRequest(input *DeleteBucketInventoryConfigurationInput) (req *request.Request, output *DeleteBucketInventoryConfigurationOutput) { + op := &request.Operation{ + Name: opDeleteBucketInventoryConfiguration, + HTTPMethod: "DELETE", + HTTPPath: "/{Bucket}?inventory", + } + + if input == nil { + input = &DeleteBucketInventoryConfigurationInput{} + } + + output = &DeleteBucketInventoryConfigurationOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + return +} + +// DeleteBucketInventoryConfiguration API operation for Amazon Simple Storage Service. +// +// Deletes an inventory configuration (identified by the inventory ID) from +// the bucket. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation DeleteBucketInventoryConfiguration for usage and error information. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketInventoryConfiguration +func (c *S3) DeleteBucketInventoryConfiguration(input *DeleteBucketInventoryConfigurationInput) (*DeleteBucketInventoryConfigurationOutput, error) { + req, out := c.DeleteBucketInventoryConfigurationRequest(input) + return out, req.Send() +} + +// DeleteBucketInventoryConfigurationWithContext is the same as DeleteBucketInventoryConfiguration with the addition of +// the ability to pass a context and additional request options. +// +// See DeleteBucketInventoryConfiguration for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) DeleteBucketInventoryConfigurationWithContext(ctx aws.Context, input *DeleteBucketInventoryConfigurationInput, opts ...request.Option) (*DeleteBucketInventoryConfigurationOutput, error) { + req, out := c.DeleteBucketInventoryConfigurationRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDeleteBucketLifecycle = "DeleteBucketLifecycle" + +// DeleteBucketLifecycleRequest generates a "aws/request.Request" representing the +// client's request for the DeleteBucketLifecycle operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// See DeleteBucketLifecycle for usage and error information. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DeleteBucketLifecycle method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DeleteBucketLifecycleRequest method. +// req, resp := client.DeleteBucketLifecycleRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketLifecycle +func (c *S3) DeleteBucketLifecycleRequest(input *DeleteBucketLifecycleInput) (req *request.Request, output *DeleteBucketLifecycleOutput) { + op := &request.Operation{ + Name: opDeleteBucketLifecycle, + HTTPMethod: "DELETE", + HTTPPath: "/{Bucket}?lifecycle", + } + + if input == nil { + input = &DeleteBucketLifecycleInput{} + } + + output = &DeleteBucketLifecycleOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + return +} + +// DeleteBucketLifecycle API operation for Amazon Simple Storage Service. +// +// Deletes the lifecycle configuration from the bucket. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation DeleteBucketLifecycle for usage and error information. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketLifecycle +func (c *S3) DeleteBucketLifecycle(input *DeleteBucketLifecycleInput) (*DeleteBucketLifecycleOutput, error) { + req, out := c.DeleteBucketLifecycleRequest(input) + return out, req.Send() +} + +// DeleteBucketLifecycleWithContext is the same as DeleteBucketLifecycle with the addition of +// the ability to pass a context and additional request options. +// +// See DeleteBucketLifecycle for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) DeleteBucketLifecycleWithContext(ctx aws.Context, input *DeleteBucketLifecycleInput, opts ...request.Option) (*DeleteBucketLifecycleOutput, error) { + req, out := c.DeleteBucketLifecycleRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDeleteBucketMetricsConfiguration = "DeleteBucketMetricsConfiguration" + +// DeleteBucketMetricsConfigurationRequest generates a "aws/request.Request" representing the +// client's request for the DeleteBucketMetricsConfiguration operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// See DeleteBucketMetricsConfiguration for usage and error information. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DeleteBucketMetricsConfiguration method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DeleteBucketMetricsConfigurationRequest method. +// req, resp := client.DeleteBucketMetricsConfigurationRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketMetricsConfiguration +func (c *S3) DeleteBucketMetricsConfigurationRequest(input *DeleteBucketMetricsConfigurationInput) (req *request.Request, output *DeleteBucketMetricsConfigurationOutput) { + op := &request.Operation{ + Name: opDeleteBucketMetricsConfiguration, + HTTPMethod: "DELETE", + HTTPPath: "/{Bucket}?metrics", + } + + if input == nil { + input = &DeleteBucketMetricsConfigurationInput{} + } + + output = &DeleteBucketMetricsConfigurationOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + return +} + +// DeleteBucketMetricsConfiguration API operation for Amazon Simple Storage Service. +// +// Deletes a metrics configuration (specified by the metrics configuration ID) +// from the bucket. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation DeleteBucketMetricsConfiguration for usage and error information. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketMetricsConfiguration +func (c *S3) DeleteBucketMetricsConfiguration(input *DeleteBucketMetricsConfigurationInput) (*DeleteBucketMetricsConfigurationOutput, error) { + req, out := c.DeleteBucketMetricsConfigurationRequest(input) + return out, req.Send() +} + +// DeleteBucketMetricsConfigurationWithContext is the same as DeleteBucketMetricsConfiguration with the addition of +// the ability to pass a context and additional request options. +// +// See DeleteBucketMetricsConfiguration for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) DeleteBucketMetricsConfigurationWithContext(ctx aws.Context, input *DeleteBucketMetricsConfigurationInput, opts ...request.Option) (*DeleteBucketMetricsConfigurationOutput, error) { + req, out := c.DeleteBucketMetricsConfigurationRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDeleteBucketPolicy = "DeleteBucketPolicy" + +// DeleteBucketPolicyRequest generates a "aws/request.Request" representing the +// client's request for the DeleteBucketPolicy operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// See DeleteBucketPolicy for usage and error information. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DeleteBucketPolicy method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DeleteBucketPolicyRequest method. +// req, resp := client.DeleteBucketPolicyRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketPolicy +func (c *S3) DeleteBucketPolicyRequest(input *DeleteBucketPolicyInput) (req *request.Request, output *DeleteBucketPolicyOutput) { + op := &request.Operation{ + Name: opDeleteBucketPolicy, + HTTPMethod: "DELETE", + HTTPPath: "/{Bucket}?policy", + } + + if input == nil { + input = &DeleteBucketPolicyInput{} + } + + output = &DeleteBucketPolicyOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + return +} + +// DeleteBucketPolicy API operation for Amazon Simple Storage Service. +// +// Deletes the policy from the bucket. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation DeleteBucketPolicy for usage and error information. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketPolicy +func (c *S3) DeleteBucketPolicy(input *DeleteBucketPolicyInput) (*DeleteBucketPolicyOutput, error) { + req, out := c.DeleteBucketPolicyRequest(input) + return out, req.Send() +} + +// DeleteBucketPolicyWithContext is the same as DeleteBucketPolicy with the addition of +// the ability to pass a context and additional request options. +// +// See DeleteBucketPolicy for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) DeleteBucketPolicyWithContext(ctx aws.Context, input *DeleteBucketPolicyInput, opts ...request.Option) (*DeleteBucketPolicyOutput, error) { + req, out := c.DeleteBucketPolicyRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDeleteBucketReplication = "DeleteBucketReplication" + +// DeleteBucketReplicationRequest generates a "aws/request.Request" representing the +// client's request for the DeleteBucketReplication operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// See DeleteBucketReplication for usage and error information. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DeleteBucketReplication method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DeleteBucketReplicationRequest method. +// req, resp := client.DeleteBucketReplicationRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketReplication +func (c *S3) DeleteBucketReplicationRequest(input *DeleteBucketReplicationInput) (req *request.Request, output *DeleteBucketReplicationOutput) { + op := &request.Operation{ + Name: opDeleteBucketReplication, + HTTPMethod: "DELETE", + HTTPPath: "/{Bucket}?replication", + } + + if input == nil { + input = &DeleteBucketReplicationInput{} + } + + output = &DeleteBucketReplicationOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + return +} + +// DeleteBucketReplication API operation for Amazon Simple Storage Service. +// +// Deletes the replication configuration from the bucket. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation DeleteBucketReplication for usage and error information. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketReplication +func (c *S3) DeleteBucketReplication(input *DeleteBucketReplicationInput) (*DeleteBucketReplicationOutput, error) { + req, out := c.DeleteBucketReplicationRequest(input) + return out, req.Send() +} + +// DeleteBucketReplicationWithContext is the same as DeleteBucketReplication with the addition of +// the ability to pass a context and additional request options. +// +// See DeleteBucketReplication for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) DeleteBucketReplicationWithContext(ctx aws.Context, input *DeleteBucketReplicationInput, opts ...request.Option) (*DeleteBucketReplicationOutput, error) { + req, out := c.DeleteBucketReplicationRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDeleteBucketTagging = "DeleteBucketTagging" + +// DeleteBucketTaggingRequest generates a "aws/request.Request" representing the +// client's request for the DeleteBucketTagging operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// See DeleteBucketTagging for usage and error information. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DeleteBucketTagging method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DeleteBucketTaggingRequest method. +// req, resp := client.DeleteBucketTaggingRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketTagging +func (c *S3) DeleteBucketTaggingRequest(input *DeleteBucketTaggingInput) (req *request.Request, output *DeleteBucketTaggingOutput) { + op := &request.Operation{ + Name: opDeleteBucketTagging, + HTTPMethod: "DELETE", + HTTPPath: "/{Bucket}?tagging", + } + + if input == nil { + input = &DeleteBucketTaggingInput{} + } + + output = &DeleteBucketTaggingOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + return +} + +// DeleteBucketTagging API operation for Amazon Simple Storage Service. +// +// Deletes the tags from the bucket. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation DeleteBucketTagging for usage and error information. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketTagging +func (c *S3) DeleteBucketTagging(input *DeleteBucketTaggingInput) (*DeleteBucketTaggingOutput, error) { + req, out := c.DeleteBucketTaggingRequest(input) + return out, req.Send() +} + +// DeleteBucketTaggingWithContext is the same as DeleteBucketTagging with the addition of +// the ability to pass a context and additional request options. +// +// See DeleteBucketTagging for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) DeleteBucketTaggingWithContext(ctx aws.Context, input *DeleteBucketTaggingInput, opts ...request.Option) (*DeleteBucketTaggingOutput, error) { + req, out := c.DeleteBucketTaggingRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDeleteBucketWebsite = "DeleteBucketWebsite" + +// DeleteBucketWebsiteRequest generates a "aws/request.Request" representing the +// client's request for the DeleteBucketWebsite operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// See DeleteBucketWebsite for usage and error information. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DeleteBucketWebsite method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DeleteBucketWebsiteRequest method. +// req, resp := client.DeleteBucketWebsiteRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketWebsite +func (c *S3) DeleteBucketWebsiteRequest(input *DeleteBucketWebsiteInput) (req *request.Request, output *DeleteBucketWebsiteOutput) { + op := &request.Operation{ + Name: opDeleteBucketWebsite, + HTTPMethod: "DELETE", + HTTPPath: "/{Bucket}?website", + } + + if input == nil { + input = &DeleteBucketWebsiteInput{} + } + + output = &DeleteBucketWebsiteOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + return +} + +// DeleteBucketWebsite API operation for Amazon Simple Storage Service. +// +// This operation removes the website configuration from the bucket. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation DeleteBucketWebsite for usage and error information. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketWebsite +func (c *S3) DeleteBucketWebsite(input *DeleteBucketWebsiteInput) (*DeleteBucketWebsiteOutput, error) { + req, out := c.DeleteBucketWebsiteRequest(input) + return out, req.Send() +} + +// DeleteBucketWebsiteWithContext is the same as DeleteBucketWebsite with the addition of +// the ability to pass a context and additional request options. +// +// See DeleteBucketWebsite for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) DeleteBucketWebsiteWithContext(ctx aws.Context, input *DeleteBucketWebsiteInput, opts ...request.Option) (*DeleteBucketWebsiteOutput, error) { + req, out := c.DeleteBucketWebsiteRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDeleteObject = "DeleteObject" + +// DeleteObjectRequest generates a "aws/request.Request" representing the +// client's request for the DeleteObject operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// See DeleteObject for usage and error information. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DeleteObject method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DeleteObjectRequest method. +// req, resp := client.DeleteObjectRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteObject +func (c *S3) DeleteObjectRequest(input *DeleteObjectInput) (req *request.Request, output *DeleteObjectOutput) { + op := &request.Operation{ + Name: opDeleteObject, + HTTPMethod: "DELETE", + HTTPPath: "/{Bucket}/{Key+}", + } + + if input == nil { + input = &DeleteObjectInput{} + } + + output = &DeleteObjectOutput{} + req = c.newRequest(op, input, output) + return +} + +// DeleteObject API operation for Amazon Simple Storage Service. +// +// Removes the null version (if there is one) of an object and inserts a delete +// marker, which becomes the latest version of the object. If there isn't a +// null version, Amazon S3 does not remove any objects. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation DeleteObject for usage and error information. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteObject +func (c *S3) DeleteObject(input *DeleteObjectInput) (*DeleteObjectOutput, error) { + req, out := c.DeleteObjectRequest(input) + return out, req.Send() +} + +// DeleteObjectWithContext is the same as DeleteObject with the addition of +// the ability to pass a context and additional request options. +// +// See DeleteObject for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) DeleteObjectWithContext(ctx aws.Context, input *DeleteObjectInput, opts ...request.Option) (*DeleteObjectOutput, error) { + req, out := c.DeleteObjectRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDeleteObjectTagging = "DeleteObjectTagging" + +// DeleteObjectTaggingRequest generates a "aws/request.Request" representing the +// client's request for the DeleteObjectTagging operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// See DeleteObjectTagging for usage and error information. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DeleteObjectTagging method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DeleteObjectTaggingRequest method. +// req, resp := client.DeleteObjectTaggingRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteObjectTagging +func (c *S3) DeleteObjectTaggingRequest(input *DeleteObjectTaggingInput) (req *request.Request, output *DeleteObjectTaggingOutput) { + op := &request.Operation{ + Name: opDeleteObjectTagging, + HTTPMethod: "DELETE", + HTTPPath: "/{Bucket}/{Key+}?tagging", + } + + if input == nil { + input = &DeleteObjectTaggingInput{} + } + + output = &DeleteObjectTaggingOutput{} + req = c.newRequest(op, input, output) + return +} + +// DeleteObjectTagging API operation for Amazon Simple Storage Service. +// +// Removes the tag-set from an existing object. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation DeleteObjectTagging for usage and error information. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteObjectTagging +func (c *S3) DeleteObjectTagging(input *DeleteObjectTaggingInput) (*DeleteObjectTaggingOutput, error) { + req, out := c.DeleteObjectTaggingRequest(input) + return out, req.Send() +} + +// DeleteObjectTaggingWithContext is the same as DeleteObjectTagging with the addition of +// the ability to pass a context and additional request options. +// +// See DeleteObjectTagging for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) DeleteObjectTaggingWithContext(ctx aws.Context, input *DeleteObjectTaggingInput, opts ...request.Option) (*DeleteObjectTaggingOutput, error) { + req, out := c.DeleteObjectTaggingRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDeleteObjects = "DeleteObjects" + +// DeleteObjectsRequest generates a "aws/request.Request" representing the +// client's request for the DeleteObjects operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// See DeleteObjects for usage and error information. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DeleteObjects method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DeleteObjectsRequest method. +// req, resp := client.DeleteObjectsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteObjects +func (c *S3) DeleteObjectsRequest(input *DeleteObjectsInput) (req *request.Request, output *DeleteObjectsOutput) { + op := &request.Operation{ + Name: opDeleteObjects, + HTTPMethod: "POST", + HTTPPath: "/{Bucket}?delete", + } + + if input == nil { + input = &DeleteObjectsInput{} + } + + output = &DeleteObjectsOutput{} + req = c.newRequest(op, input, output) + return +} + +// DeleteObjects API operation for Amazon Simple Storage Service. +// +// This operation enables you to delete multiple objects from a bucket using +// a single HTTP request. You may specify up to 1000 keys. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation DeleteObjects for usage and error information. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteObjects +func (c *S3) DeleteObjects(input *DeleteObjectsInput) (*DeleteObjectsOutput, error) { + req, out := c.DeleteObjectsRequest(input) + return out, req.Send() +} + +// DeleteObjectsWithContext is the same as DeleteObjects with the addition of +// the ability to pass a context and additional request options. +// +// See DeleteObjects for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) DeleteObjectsWithContext(ctx aws.Context, input *DeleteObjectsInput, opts ...request.Option) (*DeleteObjectsOutput, error) { + req, out := c.DeleteObjectsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opGetBucketAccelerateConfiguration = "GetBucketAccelerateConfiguration" + +// GetBucketAccelerateConfigurationRequest generates a "aws/request.Request" representing the +// client's request for the GetBucketAccelerateConfiguration operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// See GetBucketAccelerateConfiguration for usage and error information. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the GetBucketAccelerateConfiguration method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the GetBucketAccelerateConfigurationRequest method. +// req, resp := client.GetBucketAccelerateConfigurationRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketAccelerateConfiguration +func (c *S3) GetBucketAccelerateConfigurationRequest(input *GetBucketAccelerateConfigurationInput) (req *request.Request, output *GetBucketAccelerateConfigurationOutput) { + op := &request.Operation{ + Name: opGetBucketAccelerateConfiguration, + HTTPMethod: "GET", + HTTPPath: "/{Bucket}?accelerate", + } + + if input == nil { + input = &GetBucketAccelerateConfigurationInput{} + } + + output = &GetBucketAccelerateConfigurationOutput{} + req = c.newRequest(op, input, output) + return +} + +// GetBucketAccelerateConfiguration API operation for Amazon Simple Storage Service. +// +// Returns the accelerate configuration of a bucket. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation GetBucketAccelerateConfiguration for usage and error information. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketAccelerateConfiguration +func (c *S3) GetBucketAccelerateConfiguration(input *GetBucketAccelerateConfigurationInput) (*GetBucketAccelerateConfigurationOutput, error) { + req, out := c.GetBucketAccelerateConfigurationRequest(input) + return out, req.Send() +} + +// GetBucketAccelerateConfigurationWithContext is the same as GetBucketAccelerateConfiguration with the addition of +// the ability to pass a context and additional request options. +// +// See GetBucketAccelerateConfiguration for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) GetBucketAccelerateConfigurationWithContext(ctx aws.Context, input *GetBucketAccelerateConfigurationInput, opts ...request.Option) (*GetBucketAccelerateConfigurationOutput, error) { + req, out := c.GetBucketAccelerateConfigurationRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opGetBucketAcl = "GetBucketAcl" + +// GetBucketAclRequest generates a "aws/request.Request" representing the +// client's request for the GetBucketAcl operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// See GetBucketAcl for usage and error information. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the GetBucketAcl method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the GetBucketAclRequest method. +// req, resp := client.GetBucketAclRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketAcl +func (c *S3) GetBucketAclRequest(input *GetBucketAclInput) (req *request.Request, output *GetBucketAclOutput) { + op := &request.Operation{ + Name: opGetBucketAcl, + HTTPMethod: "GET", + HTTPPath: "/{Bucket}?acl", + } + + if input == nil { + input = &GetBucketAclInput{} + } + + output = &GetBucketAclOutput{} + req = c.newRequest(op, input, output) + return +} + +// GetBucketAcl API operation for Amazon Simple Storage Service. +// +// Gets the access control policy for the bucket. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation GetBucketAcl for usage and error information. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketAcl +func (c *S3) GetBucketAcl(input *GetBucketAclInput) (*GetBucketAclOutput, error) { + req, out := c.GetBucketAclRequest(input) + return out, req.Send() +} + +// GetBucketAclWithContext is the same as GetBucketAcl with the addition of +// the ability to pass a context and additional request options. +// +// See GetBucketAcl for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) GetBucketAclWithContext(ctx aws.Context, input *GetBucketAclInput, opts ...request.Option) (*GetBucketAclOutput, error) { + req, out := c.GetBucketAclRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opGetBucketAnalyticsConfiguration = "GetBucketAnalyticsConfiguration" + +// GetBucketAnalyticsConfigurationRequest generates a "aws/request.Request" representing the +// client's request for the GetBucketAnalyticsConfiguration operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// See GetBucketAnalyticsConfiguration for usage and error information. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the GetBucketAnalyticsConfiguration method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the GetBucketAnalyticsConfigurationRequest method. +// req, resp := client.GetBucketAnalyticsConfigurationRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketAnalyticsConfiguration +func (c *S3) GetBucketAnalyticsConfigurationRequest(input *GetBucketAnalyticsConfigurationInput) (req *request.Request, output *GetBucketAnalyticsConfigurationOutput) { + op := &request.Operation{ + Name: opGetBucketAnalyticsConfiguration, + HTTPMethod: "GET", + HTTPPath: "/{Bucket}?analytics", + } + + if input == nil { + input = &GetBucketAnalyticsConfigurationInput{} + } + + output = &GetBucketAnalyticsConfigurationOutput{} + req = c.newRequest(op, input, output) + return +} + +// GetBucketAnalyticsConfiguration API operation for Amazon Simple Storage Service. +// +// Gets an analytics configuration for the bucket (specified by the analytics +// configuration ID). +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation GetBucketAnalyticsConfiguration for usage and error information. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketAnalyticsConfiguration +func (c *S3) GetBucketAnalyticsConfiguration(input *GetBucketAnalyticsConfigurationInput) (*GetBucketAnalyticsConfigurationOutput, error) { + req, out := c.GetBucketAnalyticsConfigurationRequest(input) + return out, req.Send() +} + +// GetBucketAnalyticsConfigurationWithContext is the same as GetBucketAnalyticsConfiguration with the addition of +// the ability to pass a context and additional request options. +// +// See GetBucketAnalyticsConfiguration for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) GetBucketAnalyticsConfigurationWithContext(ctx aws.Context, input *GetBucketAnalyticsConfigurationInput, opts ...request.Option) (*GetBucketAnalyticsConfigurationOutput, error) { + req, out := c.GetBucketAnalyticsConfigurationRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opGetBucketCors = "GetBucketCors" + +// GetBucketCorsRequest generates a "aws/request.Request" representing the +// client's request for the GetBucketCors operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// See GetBucketCors for usage and error information. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the GetBucketCors method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the GetBucketCorsRequest method. +// req, resp := client.GetBucketCorsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketCors +func (c *S3) GetBucketCorsRequest(input *GetBucketCorsInput) (req *request.Request, output *GetBucketCorsOutput) { + op := &request.Operation{ + Name: opGetBucketCors, + HTTPMethod: "GET", + HTTPPath: "/{Bucket}?cors", + } + + if input == nil { + input = &GetBucketCorsInput{} + } + + output = &GetBucketCorsOutput{} + req = c.newRequest(op, input, output) + return +} + +// GetBucketCors API operation for Amazon Simple Storage Service. +// +// Returns the cors configuration for the bucket. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation GetBucketCors for usage and error information. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketCors +func (c *S3) GetBucketCors(input *GetBucketCorsInput) (*GetBucketCorsOutput, error) { + req, out := c.GetBucketCorsRequest(input) + return out, req.Send() +} + +// GetBucketCorsWithContext is the same as GetBucketCors with the addition of +// the ability to pass a context and additional request options. +// +// See GetBucketCors for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) GetBucketCorsWithContext(ctx aws.Context, input *GetBucketCorsInput, opts ...request.Option) (*GetBucketCorsOutput, error) { + req, out := c.GetBucketCorsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opGetBucketInventoryConfiguration = "GetBucketInventoryConfiguration" + +// GetBucketInventoryConfigurationRequest generates a "aws/request.Request" representing the +// client's request for the GetBucketInventoryConfiguration operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// See GetBucketInventoryConfiguration for usage and error information. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the GetBucketInventoryConfiguration method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the GetBucketInventoryConfigurationRequest method. +// req, resp := client.GetBucketInventoryConfigurationRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketInventoryConfiguration +func (c *S3) GetBucketInventoryConfigurationRequest(input *GetBucketInventoryConfigurationInput) (req *request.Request, output *GetBucketInventoryConfigurationOutput) { + op := &request.Operation{ + Name: opGetBucketInventoryConfiguration, + HTTPMethod: "GET", + HTTPPath: "/{Bucket}?inventory", + } + + if input == nil { + input = &GetBucketInventoryConfigurationInput{} + } + + output = &GetBucketInventoryConfigurationOutput{} + req = c.newRequest(op, input, output) + return +} + +// GetBucketInventoryConfiguration API operation for Amazon Simple Storage Service. +// +// Returns an inventory configuration (identified by the inventory ID) from +// the bucket. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation GetBucketInventoryConfiguration for usage and error information. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketInventoryConfiguration +func (c *S3) GetBucketInventoryConfiguration(input *GetBucketInventoryConfigurationInput) (*GetBucketInventoryConfigurationOutput, error) { + req, out := c.GetBucketInventoryConfigurationRequest(input) + return out, req.Send() +} + +// GetBucketInventoryConfigurationWithContext is the same as GetBucketInventoryConfiguration with the addition of +// the ability to pass a context and additional request options. +// +// See GetBucketInventoryConfiguration for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) GetBucketInventoryConfigurationWithContext(ctx aws.Context, input *GetBucketInventoryConfigurationInput, opts ...request.Option) (*GetBucketInventoryConfigurationOutput, error) { + req, out := c.GetBucketInventoryConfigurationRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opGetBucketLifecycle = "GetBucketLifecycle" + +// GetBucketLifecycleRequest generates a "aws/request.Request" representing the +// client's request for the GetBucketLifecycle operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// See GetBucketLifecycle for usage and error information. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the GetBucketLifecycle method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the GetBucketLifecycleRequest method. +// req, resp := client.GetBucketLifecycleRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketLifecycle +func (c *S3) GetBucketLifecycleRequest(input *GetBucketLifecycleInput) (req *request.Request, output *GetBucketLifecycleOutput) { + if c.Client.Config.Logger != nil { + c.Client.Config.Logger.Log("This operation, GetBucketLifecycle, has been deprecated") + } + op := &request.Operation{ + Name: opGetBucketLifecycle, + HTTPMethod: "GET", + HTTPPath: "/{Bucket}?lifecycle", + } + + if input == nil { + input = &GetBucketLifecycleInput{} + } + + output = &GetBucketLifecycleOutput{} + req = c.newRequest(op, input, output) + return +} + +// GetBucketLifecycle API operation for Amazon Simple Storage Service. +// +// Deprecated, see the GetBucketLifecycleConfiguration operation. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation GetBucketLifecycle for usage and error information. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketLifecycle +func (c *S3) GetBucketLifecycle(input *GetBucketLifecycleInput) (*GetBucketLifecycleOutput, error) { + req, out := c.GetBucketLifecycleRequest(input) + return out, req.Send() +} + +// GetBucketLifecycleWithContext is the same as GetBucketLifecycle with the addition of +// the ability to pass a context and additional request options. +// +// See GetBucketLifecycle for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) GetBucketLifecycleWithContext(ctx aws.Context, input *GetBucketLifecycleInput, opts ...request.Option) (*GetBucketLifecycleOutput, error) { + req, out := c.GetBucketLifecycleRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opGetBucketLifecycleConfiguration = "GetBucketLifecycleConfiguration" + +// GetBucketLifecycleConfigurationRequest generates a "aws/request.Request" representing the +// client's request for the GetBucketLifecycleConfiguration operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// See GetBucketLifecycleConfiguration for usage and error information. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the GetBucketLifecycleConfiguration method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the GetBucketLifecycleConfigurationRequest method. +// req, resp := client.GetBucketLifecycleConfigurationRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketLifecycleConfiguration +func (c *S3) GetBucketLifecycleConfigurationRequest(input *GetBucketLifecycleConfigurationInput) (req *request.Request, output *GetBucketLifecycleConfigurationOutput) { + op := &request.Operation{ + Name: opGetBucketLifecycleConfiguration, + HTTPMethod: "GET", + HTTPPath: "/{Bucket}?lifecycle", + } + + if input == nil { + input = &GetBucketLifecycleConfigurationInput{} + } + + output = &GetBucketLifecycleConfigurationOutput{} + req = c.newRequest(op, input, output) + return +} + +// GetBucketLifecycleConfiguration API operation for Amazon Simple Storage Service. +// +// Returns the lifecycle configuration information set on the bucket. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation GetBucketLifecycleConfiguration for usage and error information. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketLifecycleConfiguration +func (c *S3) GetBucketLifecycleConfiguration(input *GetBucketLifecycleConfigurationInput) (*GetBucketLifecycleConfigurationOutput, error) { + req, out := c.GetBucketLifecycleConfigurationRequest(input) + return out, req.Send() +} + +// GetBucketLifecycleConfigurationWithContext is the same as GetBucketLifecycleConfiguration with the addition of +// the ability to pass a context and additional request options. +// +// See GetBucketLifecycleConfiguration for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) GetBucketLifecycleConfigurationWithContext(ctx aws.Context, input *GetBucketLifecycleConfigurationInput, opts ...request.Option) (*GetBucketLifecycleConfigurationOutput, error) { + req, out := c.GetBucketLifecycleConfigurationRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opGetBucketLocation = "GetBucketLocation" + +// GetBucketLocationRequest generates a "aws/request.Request" representing the +// client's request for the GetBucketLocation operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// See GetBucketLocation for usage and error information. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the GetBucketLocation method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the GetBucketLocationRequest method. +// req, resp := client.GetBucketLocationRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketLocation +func (c *S3) GetBucketLocationRequest(input *GetBucketLocationInput) (req *request.Request, output *GetBucketLocationOutput) { + op := &request.Operation{ + Name: opGetBucketLocation, + HTTPMethod: "GET", + HTTPPath: "/{Bucket}?location", + } + + if input == nil { + input = &GetBucketLocationInput{} + } + + output = &GetBucketLocationOutput{} + req = c.newRequest(op, input, output) + return +} + +// GetBucketLocation API operation for Amazon Simple Storage Service. +// +// Returns the region the bucket resides in. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation GetBucketLocation for usage and error information. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketLocation +func (c *S3) GetBucketLocation(input *GetBucketLocationInput) (*GetBucketLocationOutput, error) { + req, out := c.GetBucketLocationRequest(input) + return out, req.Send() +} + +// GetBucketLocationWithContext is the same as GetBucketLocation with the addition of +// the ability to pass a context and additional request options. +// +// See GetBucketLocation for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) GetBucketLocationWithContext(ctx aws.Context, input *GetBucketLocationInput, opts ...request.Option) (*GetBucketLocationOutput, error) { + req, out := c.GetBucketLocationRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opGetBucketLogging = "GetBucketLogging" + +// GetBucketLoggingRequest generates a "aws/request.Request" representing the +// client's request for the GetBucketLogging operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// See GetBucketLogging for usage and error information. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the GetBucketLogging method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the GetBucketLoggingRequest method. +// req, resp := client.GetBucketLoggingRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketLogging +func (c *S3) GetBucketLoggingRequest(input *GetBucketLoggingInput) (req *request.Request, output *GetBucketLoggingOutput) { + op := &request.Operation{ + Name: opGetBucketLogging, + HTTPMethod: "GET", + HTTPPath: "/{Bucket}?logging", + } + + if input == nil { + input = &GetBucketLoggingInput{} + } + + output = &GetBucketLoggingOutput{} + req = c.newRequest(op, input, output) + return +} + +// GetBucketLogging API operation for Amazon Simple Storage Service. +// +// Returns the logging status of a bucket and the permissions users have to +// view and modify that status. To use GET, you must be the bucket owner. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation GetBucketLogging for usage and error information. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketLogging +func (c *S3) GetBucketLogging(input *GetBucketLoggingInput) (*GetBucketLoggingOutput, error) { + req, out := c.GetBucketLoggingRequest(input) + return out, req.Send() +} + +// GetBucketLoggingWithContext is the same as GetBucketLogging with the addition of +// the ability to pass a context and additional request options. +// +// See GetBucketLogging for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) GetBucketLoggingWithContext(ctx aws.Context, input *GetBucketLoggingInput, opts ...request.Option) (*GetBucketLoggingOutput, error) { + req, out := c.GetBucketLoggingRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opGetBucketMetricsConfiguration = "GetBucketMetricsConfiguration" + +// GetBucketMetricsConfigurationRequest generates a "aws/request.Request" representing the +// client's request for the GetBucketMetricsConfiguration operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// See GetBucketMetricsConfiguration for usage and error information. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the GetBucketMetricsConfiguration method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the GetBucketMetricsConfigurationRequest method. +// req, resp := client.GetBucketMetricsConfigurationRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketMetricsConfiguration +func (c *S3) GetBucketMetricsConfigurationRequest(input *GetBucketMetricsConfigurationInput) (req *request.Request, output *GetBucketMetricsConfigurationOutput) { + op := &request.Operation{ + Name: opGetBucketMetricsConfiguration, + HTTPMethod: "GET", + HTTPPath: "/{Bucket}?metrics", + } + + if input == nil { + input = &GetBucketMetricsConfigurationInput{} + } + + output = &GetBucketMetricsConfigurationOutput{} + req = c.newRequest(op, input, output) + return +} + +// GetBucketMetricsConfiguration API operation for Amazon Simple Storage Service. +// +// Gets a metrics configuration (specified by the metrics configuration ID) +// from the bucket. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation GetBucketMetricsConfiguration for usage and error information. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketMetricsConfiguration +func (c *S3) GetBucketMetricsConfiguration(input *GetBucketMetricsConfigurationInput) (*GetBucketMetricsConfigurationOutput, error) { + req, out := c.GetBucketMetricsConfigurationRequest(input) + return out, req.Send() +} + +// GetBucketMetricsConfigurationWithContext is the same as GetBucketMetricsConfiguration with the addition of +// the ability to pass a context and additional request options. +// +// See GetBucketMetricsConfiguration for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) GetBucketMetricsConfigurationWithContext(ctx aws.Context, input *GetBucketMetricsConfigurationInput, opts ...request.Option) (*GetBucketMetricsConfigurationOutput, error) { + req, out := c.GetBucketMetricsConfigurationRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opGetBucketNotification = "GetBucketNotification" + +// GetBucketNotificationRequest generates a "aws/request.Request" representing the +// client's request for the GetBucketNotification operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// See GetBucketNotification for usage and error information. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the GetBucketNotification method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the GetBucketNotificationRequest method. +// req, resp := client.GetBucketNotificationRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketNotification +func (c *S3) GetBucketNotificationRequest(input *GetBucketNotificationConfigurationRequest) (req *request.Request, output *NotificationConfigurationDeprecated) { + if c.Client.Config.Logger != nil { + c.Client.Config.Logger.Log("This operation, GetBucketNotification, has been deprecated") + } + op := &request.Operation{ + Name: opGetBucketNotification, + HTTPMethod: "GET", + HTTPPath: "/{Bucket}?notification", + } + + if input == nil { + input = &GetBucketNotificationConfigurationRequest{} + } + + output = &NotificationConfigurationDeprecated{} + req = c.newRequest(op, input, output) + return +} + +// GetBucketNotification API operation for Amazon Simple Storage Service. +// +// Deprecated, see the GetBucketNotificationConfiguration operation. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation GetBucketNotification for usage and error information. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketNotification +func (c *S3) GetBucketNotification(input *GetBucketNotificationConfigurationRequest) (*NotificationConfigurationDeprecated, error) { + req, out := c.GetBucketNotificationRequest(input) + return out, req.Send() +} + +// GetBucketNotificationWithContext is the same as GetBucketNotification with the addition of +// the ability to pass a context and additional request options. +// +// See GetBucketNotification for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) GetBucketNotificationWithContext(ctx aws.Context, input *GetBucketNotificationConfigurationRequest, opts ...request.Option) (*NotificationConfigurationDeprecated, error) { + req, out := c.GetBucketNotificationRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opGetBucketNotificationConfiguration = "GetBucketNotificationConfiguration" + +// GetBucketNotificationConfigurationRequest generates a "aws/request.Request" representing the +// client's request for the GetBucketNotificationConfiguration operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// See GetBucketNotificationConfiguration for usage and error information. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the GetBucketNotificationConfiguration method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the GetBucketNotificationConfigurationRequest method. +// req, resp := client.GetBucketNotificationConfigurationRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketNotificationConfiguration +func (c *S3) GetBucketNotificationConfigurationRequest(input *GetBucketNotificationConfigurationRequest) (req *request.Request, output *NotificationConfiguration) { + op := &request.Operation{ + Name: opGetBucketNotificationConfiguration, + HTTPMethod: "GET", + HTTPPath: "/{Bucket}?notification", + } + + if input == nil { + input = &GetBucketNotificationConfigurationRequest{} + } + + output = &NotificationConfiguration{} + req = c.newRequest(op, input, output) + return +} + +// GetBucketNotificationConfiguration API operation for Amazon Simple Storage Service. +// +// Returns the notification configuration of a bucket. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation GetBucketNotificationConfiguration for usage and error information. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketNotificationConfiguration +func (c *S3) GetBucketNotificationConfiguration(input *GetBucketNotificationConfigurationRequest) (*NotificationConfiguration, error) { + req, out := c.GetBucketNotificationConfigurationRequest(input) + return out, req.Send() +} + +// GetBucketNotificationConfigurationWithContext is the same as GetBucketNotificationConfiguration with the addition of +// the ability to pass a context and additional request options. +// +// See GetBucketNotificationConfiguration for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) GetBucketNotificationConfigurationWithContext(ctx aws.Context, input *GetBucketNotificationConfigurationRequest, opts ...request.Option) (*NotificationConfiguration, error) { + req, out := c.GetBucketNotificationConfigurationRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opGetBucketPolicy = "GetBucketPolicy" + +// GetBucketPolicyRequest generates a "aws/request.Request" representing the +// client's request for the GetBucketPolicy operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// See GetBucketPolicy for usage and error information. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the GetBucketPolicy method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the GetBucketPolicyRequest method. +// req, resp := client.GetBucketPolicyRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketPolicy +func (c *S3) GetBucketPolicyRequest(input *GetBucketPolicyInput) (req *request.Request, output *GetBucketPolicyOutput) { + op := &request.Operation{ + Name: opGetBucketPolicy, + HTTPMethod: "GET", + HTTPPath: "/{Bucket}?policy", + } + + if input == nil { + input = &GetBucketPolicyInput{} + } + + output = &GetBucketPolicyOutput{} + req = c.newRequest(op, input, output) + return +} + +// GetBucketPolicy API operation for Amazon Simple Storage Service. +// +// Returns the policy of a specified bucket. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation GetBucketPolicy for usage and error information. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketPolicy +func (c *S3) GetBucketPolicy(input *GetBucketPolicyInput) (*GetBucketPolicyOutput, error) { + req, out := c.GetBucketPolicyRequest(input) + return out, req.Send() +} + +// GetBucketPolicyWithContext is the same as GetBucketPolicy with the addition of +// the ability to pass a context and additional request options. +// +// See GetBucketPolicy for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) GetBucketPolicyWithContext(ctx aws.Context, input *GetBucketPolicyInput, opts ...request.Option) (*GetBucketPolicyOutput, error) { + req, out := c.GetBucketPolicyRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opGetBucketReplication = "GetBucketReplication" + +// GetBucketReplicationRequest generates a "aws/request.Request" representing the +// client's request for the GetBucketReplication operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// See GetBucketReplication for usage and error information. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the GetBucketReplication method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the GetBucketReplicationRequest method. +// req, resp := client.GetBucketReplicationRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketReplication +func (c *S3) GetBucketReplicationRequest(input *GetBucketReplicationInput) (req *request.Request, output *GetBucketReplicationOutput) { + op := &request.Operation{ + Name: opGetBucketReplication, + HTTPMethod: "GET", + HTTPPath: "/{Bucket}?replication", + } + + if input == nil { + input = &GetBucketReplicationInput{} + } + + output = &GetBucketReplicationOutput{} + req = c.newRequest(op, input, output) + return +} + +// GetBucketReplication API operation for Amazon Simple Storage Service. +// +// Returns the replication configuration of a bucket. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation GetBucketReplication for usage and error information. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketReplication +func (c *S3) GetBucketReplication(input *GetBucketReplicationInput) (*GetBucketReplicationOutput, error) { + req, out := c.GetBucketReplicationRequest(input) + return out, req.Send() +} + +// GetBucketReplicationWithContext is the same as GetBucketReplication with the addition of +// the ability to pass a context and additional request options. +// +// See GetBucketReplication for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) GetBucketReplicationWithContext(ctx aws.Context, input *GetBucketReplicationInput, opts ...request.Option) (*GetBucketReplicationOutput, error) { + req, out := c.GetBucketReplicationRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opGetBucketRequestPayment = "GetBucketRequestPayment" + +// GetBucketRequestPaymentRequest generates a "aws/request.Request" representing the +// client's request for the GetBucketRequestPayment operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// See GetBucketRequestPayment for usage and error information. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the GetBucketRequestPayment method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the GetBucketRequestPaymentRequest method. +// req, resp := client.GetBucketRequestPaymentRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketRequestPayment +func (c *S3) GetBucketRequestPaymentRequest(input *GetBucketRequestPaymentInput) (req *request.Request, output *GetBucketRequestPaymentOutput) { + op := &request.Operation{ + Name: opGetBucketRequestPayment, + HTTPMethod: "GET", + HTTPPath: "/{Bucket}?requestPayment", + } + + if input == nil { + input = &GetBucketRequestPaymentInput{} + } + + output = &GetBucketRequestPaymentOutput{} + req = c.newRequest(op, input, output) + return +} + +// GetBucketRequestPayment API operation for Amazon Simple Storage Service. +// +// Returns the request payment configuration of a bucket. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation GetBucketRequestPayment for usage and error information. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketRequestPayment +func (c *S3) GetBucketRequestPayment(input *GetBucketRequestPaymentInput) (*GetBucketRequestPaymentOutput, error) { + req, out := c.GetBucketRequestPaymentRequest(input) + return out, req.Send() +} + +// GetBucketRequestPaymentWithContext is the same as GetBucketRequestPayment with the addition of +// the ability to pass a context and additional request options. +// +// See GetBucketRequestPayment for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) GetBucketRequestPaymentWithContext(ctx aws.Context, input *GetBucketRequestPaymentInput, opts ...request.Option) (*GetBucketRequestPaymentOutput, error) { + req, out := c.GetBucketRequestPaymentRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opGetBucketTagging = "GetBucketTagging" + +// GetBucketTaggingRequest generates a "aws/request.Request" representing the +// client's request for the GetBucketTagging operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// See GetBucketTagging for usage and error information. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the GetBucketTagging method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the GetBucketTaggingRequest method. +// req, resp := client.GetBucketTaggingRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketTagging +func (c *S3) GetBucketTaggingRequest(input *GetBucketTaggingInput) (req *request.Request, output *GetBucketTaggingOutput) { + op := &request.Operation{ + Name: opGetBucketTagging, + HTTPMethod: "GET", + HTTPPath: "/{Bucket}?tagging", + } + + if input == nil { + input = &GetBucketTaggingInput{} + } + + output = &GetBucketTaggingOutput{} + req = c.newRequest(op, input, output) + return +} + +// GetBucketTagging API operation for Amazon Simple Storage Service. +// +// Returns the tag set associated with the bucket. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation GetBucketTagging for usage and error information. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketTagging +func (c *S3) GetBucketTagging(input *GetBucketTaggingInput) (*GetBucketTaggingOutput, error) { + req, out := c.GetBucketTaggingRequest(input) + return out, req.Send() +} + +// GetBucketTaggingWithContext is the same as GetBucketTagging with the addition of +// the ability to pass a context and additional request options. +// +// See GetBucketTagging for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) GetBucketTaggingWithContext(ctx aws.Context, input *GetBucketTaggingInput, opts ...request.Option) (*GetBucketTaggingOutput, error) { + req, out := c.GetBucketTaggingRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opGetBucketVersioning = "GetBucketVersioning" + +// GetBucketVersioningRequest generates a "aws/request.Request" representing the +// client's request for the GetBucketVersioning operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// See GetBucketVersioning for usage and error information. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the GetBucketVersioning method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the GetBucketVersioningRequest method. +// req, resp := client.GetBucketVersioningRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketVersioning +func (c *S3) GetBucketVersioningRequest(input *GetBucketVersioningInput) (req *request.Request, output *GetBucketVersioningOutput) { + op := &request.Operation{ + Name: opGetBucketVersioning, + HTTPMethod: "GET", + HTTPPath: "/{Bucket}?versioning", + } + + if input == nil { + input = &GetBucketVersioningInput{} + } + + output = &GetBucketVersioningOutput{} + req = c.newRequest(op, input, output) + return +} + +// GetBucketVersioning API operation for Amazon Simple Storage Service. +// +// Returns the versioning state of a bucket. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation GetBucketVersioning for usage and error information. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketVersioning +func (c *S3) GetBucketVersioning(input *GetBucketVersioningInput) (*GetBucketVersioningOutput, error) { + req, out := c.GetBucketVersioningRequest(input) + return out, req.Send() +} + +// GetBucketVersioningWithContext is the same as GetBucketVersioning with the addition of +// the ability to pass a context and additional request options. +// +// See GetBucketVersioning for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) GetBucketVersioningWithContext(ctx aws.Context, input *GetBucketVersioningInput, opts ...request.Option) (*GetBucketVersioningOutput, error) { + req, out := c.GetBucketVersioningRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opGetBucketWebsite = "GetBucketWebsite" + +// GetBucketWebsiteRequest generates a "aws/request.Request" representing the +// client's request for the GetBucketWebsite operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// See GetBucketWebsite for usage and error information. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the GetBucketWebsite method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the GetBucketWebsiteRequest method. +// req, resp := client.GetBucketWebsiteRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketWebsite +func (c *S3) GetBucketWebsiteRequest(input *GetBucketWebsiteInput) (req *request.Request, output *GetBucketWebsiteOutput) { + op := &request.Operation{ + Name: opGetBucketWebsite, + HTTPMethod: "GET", + HTTPPath: "/{Bucket}?website", + } + + if input == nil { + input = &GetBucketWebsiteInput{} + } + + output = &GetBucketWebsiteOutput{} + req = c.newRequest(op, input, output) + return +} + +// GetBucketWebsite API operation for Amazon Simple Storage Service. +// +// Returns the website configuration for a bucket. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation GetBucketWebsite for usage and error information. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketWebsite +func (c *S3) GetBucketWebsite(input *GetBucketWebsiteInput) (*GetBucketWebsiteOutput, error) { + req, out := c.GetBucketWebsiteRequest(input) + return out, req.Send() +} + +// GetBucketWebsiteWithContext is the same as GetBucketWebsite with the addition of +// the ability to pass a context and additional request options. +// +// See GetBucketWebsite for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) GetBucketWebsiteWithContext(ctx aws.Context, input *GetBucketWebsiteInput, opts ...request.Option) (*GetBucketWebsiteOutput, error) { + req, out := c.GetBucketWebsiteRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opGetObject = "GetObject" + +// GetObjectRequest generates a "aws/request.Request" representing the +// client's request for the GetObject operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// See GetObject for usage and error information. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the GetObject method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the GetObjectRequest method. +// req, resp := client.GetObjectRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetObject +func (c *S3) GetObjectRequest(input *GetObjectInput) (req *request.Request, output *GetObjectOutput) { + op := &request.Operation{ + Name: opGetObject, + HTTPMethod: "GET", + HTTPPath: "/{Bucket}/{Key+}", + } + + if input == nil { + input = &GetObjectInput{} + } + + output = &GetObjectOutput{} + req = c.newRequest(op, input, output) + return +} + +// GetObject API operation for Amazon Simple Storage Service. +// +// Retrieves objects from Amazon S3. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation GetObject for usage and error information. +// +// Returned Error Codes: +// * ErrCodeNoSuchKey "NoSuchKey" +// The specified key does not exist. +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetObject +func (c *S3) GetObject(input *GetObjectInput) (*GetObjectOutput, error) { + req, out := c.GetObjectRequest(input) + return out, req.Send() +} + +// GetObjectWithContext is the same as GetObject with the addition of +// the ability to pass a context and additional request options. +// +// See GetObject for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) GetObjectWithContext(ctx aws.Context, input *GetObjectInput, opts ...request.Option) (*GetObjectOutput, error) { + req, out := c.GetObjectRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opGetObjectAcl = "GetObjectAcl" + +// GetObjectAclRequest generates a "aws/request.Request" representing the +// client's request for the GetObjectAcl operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// See GetObjectAcl for usage and error information. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the GetObjectAcl method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the GetObjectAclRequest method. +// req, resp := client.GetObjectAclRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetObjectAcl +func (c *S3) GetObjectAclRequest(input *GetObjectAclInput) (req *request.Request, output *GetObjectAclOutput) { + op := &request.Operation{ + Name: opGetObjectAcl, + HTTPMethod: "GET", + HTTPPath: "/{Bucket}/{Key+}?acl", + } + + if input == nil { + input = &GetObjectAclInput{} + } + + output = &GetObjectAclOutput{} + req = c.newRequest(op, input, output) + return +} + +// GetObjectAcl API operation for Amazon Simple Storage Service. +// +// Returns the access control list (ACL) of an object. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation GetObjectAcl for usage and error information. +// +// Returned Error Codes: +// * ErrCodeNoSuchKey "NoSuchKey" +// The specified key does not exist. +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetObjectAcl +func (c *S3) GetObjectAcl(input *GetObjectAclInput) (*GetObjectAclOutput, error) { + req, out := c.GetObjectAclRequest(input) + return out, req.Send() +} + +// GetObjectAclWithContext is the same as GetObjectAcl with the addition of +// the ability to pass a context and additional request options. +// +// See GetObjectAcl for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) GetObjectAclWithContext(ctx aws.Context, input *GetObjectAclInput, opts ...request.Option) (*GetObjectAclOutput, error) { + req, out := c.GetObjectAclRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opGetObjectTagging = "GetObjectTagging" + +// GetObjectTaggingRequest generates a "aws/request.Request" representing the +// client's request for the GetObjectTagging operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// See GetObjectTagging for usage and error information. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the GetObjectTagging method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the GetObjectTaggingRequest method. +// req, resp := client.GetObjectTaggingRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetObjectTagging +func (c *S3) GetObjectTaggingRequest(input *GetObjectTaggingInput) (req *request.Request, output *GetObjectTaggingOutput) { + op := &request.Operation{ + Name: opGetObjectTagging, + HTTPMethod: "GET", + HTTPPath: "/{Bucket}/{Key+}?tagging", + } + + if input == nil { + input = &GetObjectTaggingInput{} + } + + output = &GetObjectTaggingOutput{} + req = c.newRequest(op, input, output) + return +} + +// GetObjectTagging API operation for Amazon Simple Storage Service. +// +// Returns the tag-set of an object. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation GetObjectTagging for usage and error information. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetObjectTagging +func (c *S3) GetObjectTagging(input *GetObjectTaggingInput) (*GetObjectTaggingOutput, error) { + req, out := c.GetObjectTaggingRequest(input) + return out, req.Send() +} + +// GetObjectTaggingWithContext is the same as GetObjectTagging with the addition of +// the ability to pass a context and additional request options. +// +// See GetObjectTagging for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) GetObjectTaggingWithContext(ctx aws.Context, input *GetObjectTaggingInput, opts ...request.Option) (*GetObjectTaggingOutput, error) { + req, out := c.GetObjectTaggingRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opGetObjectTorrent = "GetObjectTorrent" + +// GetObjectTorrentRequest generates a "aws/request.Request" representing the +// client's request for the GetObjectTorrent operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// See GetObjectTorrent for usage and error information. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the GetObjectTorrent method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the GetObjectTorrentRequest method. +// req, resp := client.GetObjectTorrentRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetObjectTorrent +func (c *S3) GetObjectTorrentRequest(input *GetObjectTorrentInput) (req *request.Request, output *GetObjectTorrentOutput) { + op := &request.Operation{ + Name: opGetObjectTorrent, + HTTPMethod: "GET", + HTTPPath: "/{Bucket}/{Key+}?torrent", + } + + if input == nil { + input = &GetObjectTorrentInput{} + } + + output = &GetObjectTorrentOutput{} + req = c.newRequest(op, input, output) + return +} + +// GetObjectTorrent API operation for Amazon Simple Storage Service. +// +// Return torrent files from a bucket. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation GetObjectTorrent for usage and error information. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetObjectTorrent +func (c *S3) GetObjectTorrent(input *GetObjectTorrentInput) (*GetObjectTorrentOutput, error) { + req, out := c.GetObjectTorrentRequest(input) + return out, req.Send() +} + +// GetObjectTorrentWithContext is the same as GetObjectTorrent with the addition of +// the ability to pass a context and additional request options. +// +// See GetObjectTorrent for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) GetObjectTorrentWithContext(ctx aws.Context, input *GetObjectTorrentInput, opts ...request.Option) (*GetObjectTorrentOutput, error) { + req, out := c.GetObjectTorrentRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opHeadBucket = "HeadBucket" + +// HeadBucketRequest generates a "aws/request.Request" representing the +// client's request for the HeadBucket operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// See HeadBucket for usage and error information. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the HeadBucket method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the HeadBucketRequest method. +// req, resp := client.HeadBucketRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/HeadBucket +func (c *S3) HeadBucketRequest(input *HeadBucketInput) (req *request.Request, output *HeadBucketOutput) { + op := &request.Operation{ + Name: opHeadBucket, + HTTPMethod: "HEAD", + HTTPPath: "/{Bucket}", + } + + if input == nil { + input = &HeadBucketInput{} + } + + output = &HeadBucketOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + return +} + +// HeadBucket API operation for Amazon Simple Storage Service. +// +// This operation is useful to determine if a bucket exists and you have permission +// to access it. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation HeadBucket for usage and error information. +// +// Returned Error Codes: +// * ErrCodeNoSuchBucket "NoSuchBucket" +// The specified bucket does not exist. +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/HeadBucket +func (c *S3) HeadBucket(input *HeadBucketInput) (*HeadBucketOutput, error) { + req, out := c.HeadBucketRequest(input) + return out, req.Send() +} + +// HeadBucketWithContext is the same as HeadBucket with the addition of +// the ability to pass a context and additional request options. +// +// See HeadBucket for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) HeadBucketWithContext(ctx aws.Context, input *HeadBucketInput, opts ...request.Option) (*HeadBucketOutput, error) { + req, out := c.HeadBucketRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opHeadObject = "HeadObject" + +// HeadObjectRequest generates a "aws/request.Request" representing the +// client's request for the HeadObject operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// See HeadObject for usage and error information. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the HeadObject method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the HeadObjectRequest method. +// req, resp := client.HeadObjectRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/HeadObject +func (c *S3) HeadObjectRequest(input *HeadObjectInput) (req *request.Request, output *HeadObjectOutput) { + op := &request.Operation{ + Name: opHeadObject, + HTTPMethod: "HEAD", + HTTPPath: "/{Bucket}/{Key+}", + } + + if input == nil { + input = &HeadObjectInput{} + } + + output = &HeadObjectOutput{} + req = c.newRequest(op, input, output) + return +} + +// HeadObject API operation for Amazon Simple Storage Service. +// +// The HEAD operation retrieves metadata from an object without returning the +// object itself. This operation is useful if you're only interested in an object's +// metadata. To use HEAD, you must have READ access to the object. +// +// See http://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html#RESTErrorResponses +// for more information on returned errors. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation HeadObject for usage and error information. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/HeadObject +func (c *S3) HeadObject(input *HeadObjectInput) (*HeadObjectOutput, error) { + req, out := c.HeadObjectRequest(input) + return out, req.Send() +} + +// HeadObjectWithContext is the same as HeadObject with the addition of +// the ability to pass a context and additional request options. +// +// See HeadObject for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) HeadObjectWithContext(ctx aws.Context, input *HeadObjectInput, opts ...request.Option) (*HeadObjectOutput, error) { + req, out := c.HeadObjectRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opListBucketAnalyticsConfigurations = "ListBucketAnalyticsConfigurations" + +// ListBucketAnalyticsConfigurationsRequest generates a "aws/request.Request" representing the +// client's request for the ListBucketAnalyticsConfigurations operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// See ListBucketAnalyticsConfigurations for usage and error information. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ListBucketAnalyticsConfigurations method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ListBucketAnalyticsConfigurationsRequest method. +// req, resp := client.ListBucketAnalyticsConfigurationsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListBucketAnalyticsConfigurations +func (c *S3) ListBucketAnalyticsConfigurationsRequest(input *ListBucketAnalyticsConfigurationsInput) (req *request.Request, output *ListBucketAnalyticsConfigurationsOutput) { + op := &request.Operation{ + Name: opListBucketAnalyticsConfigurations, + HTTPMethod: "GET", + HTTPPath: "/{Bucket}?analytics", + } + + if input == nil { + input = &ListBucketAnalyticsConfigurationsInput{} + } + + output = &ListBucketAnalyticsConfigurationsOutput{} + req = c.newRequest(op, input, output) + return +} + +// ListBucketAnalyticsConfigurations API operation for Amazon Simple Storage Service. +// +// Lists the analytics configurations for the bucket. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation ListBucketAnalyticsConfigurations for usage and error information. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListBucketAnalyticsConfigurations +func (c *S3) ListBucketAnalyticsConfigurations(input *ListBucketAnalyticsConfigurationsInput) (*ListBucketAnalyticsConfigurationsOutput, error) { + req, out := c.ListBucketAnalyticsConfigurationsRequest(input) + return out, req.Send() +} + +// ListBucketAnalyticsConfigurationsWithContext is the same as ListBucketAnalyticsConfigurations with the addition of +// the ability to pass a context and additional request options. +// +// See ListBucketAnalyticsConfigurations for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) ListBucketAnalyticsConfigurationsWithContext(ctx aws.Context, input *ListBucketAnalyticsConfigurationsInput, opts ...request.Option) (*ListBucketAnalyticsConfigurationsOutput, error) { + req, out := c.ListBucketAnalyticsConfigurationsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opListBucketInventoryConfigurations = "ListBucketInventoryConfigurations" + +// ListBucketInventoryConfigurationsRequest generates a "aws/request.Request" representing the +// client's request for the ListBucketInventoryConfigurations operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// See ListBucketInventoryConfigurations for usage and error information. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ListBucketInventoryConfigurations method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ListBucketInventoryConfigurationsRequest method. +// req, resp := client.ListBucketInventoryConfigurationsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListBucketInventoryConfigurations +func (c *S3) ListBucketInventoryConfigurationsRequest(input *ListBucketInventoryConfigurationsInput) (req *request.Request, output *ListBucketInventoryConfigurationsOutput) { + op := &request.Operation{ + Name: opListBucketInventoryConfigurations, + HTTPMethod: "GET", + HTTPPath: "/{Bucket}?inventory", + } + + if input == nil { + input = &ListBucketInventoryConfigurationsInput{} + } + + output = &ListBucketInventoryConfigurationsOutput{} + req = c.newRequest(op, input, output) + return +} + +// ListBucketInventoryConfigurations API operation for Amazon Simple Storage Service. +// +// Returns a list of inventory configurations for the bucket. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation ListBucketInventoryConfigurations for usage and error information. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListBucketInventoryConfigurations +func (c *S3) ListBucketInventoryConfigurations(input *ListBucketInventoryConfigurationsInput) (*ListBucketInventoryConfigurationsOutput, error) { + req, out := c.ListBucketInventoryConfigurationsRequest(input) + return out, req.Send() +} + +// ListBucketInventoryConfigurationsWithContext is the same as ListBucketInventoryConfigurations with the addition of +// the ability to pass a context and additional request options. +// +// See ListBucketInventoryConfigurations for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) ListBucketInventoryConfigurationsWithContext(ctx aws.Context, input *ListBucketInventoryConfigurationsInput, opts ...request.Option) (*ListBucketInventoryConfigurationsOutput, error) { + req, out := c.ListBucketInventoryConfigurationsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opListBucketMetricsConfigurations = "ListBucketMetricsConfigurations" + +// ListBucketMetricsConfigurationsRequest generates a "aws/request.Request" representing the +// client's request for the ListBucketMetricsConfigurations operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// See ListBucketMetricsConfigurations for usage and error information. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ListBucketMetricsConfigurations method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ListBucketMetricsConfigurationsRequest method. +// req, resp := client.ListBucketMetricsConfigurationsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListBucketMetricsConfigurations +func (c *S3) ListBucketMetricsConfigurationsRequest(input *ListBucketMetricsConfigurationsInput) (req *request.Request, output *ListBucketMetricsConfigurationsOutput) { + op := &request.Operation{ + Name: opListBucketMetricsConfigurations, + HTTPMethod: "GET", + HTTPPath: "/{Bucket}?metrics", + } + + if input == nil { + input = &ListBucketMetricsConfigurationsInput{} + } + + output = &ListBucketMetricsConfigurationsOutput{} + req = c.newRequest(op, input, output) + return +} + +// ListBucketMetricsConfigurations API operation for Amazon Simple Storage Service. +// +// Lists the metrics configurations for the bucket. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation ListBucketMetricsConfigurations for usage and error information. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListBucketMetricsConfigurations +func (c *S3) ListBucketMetricsConfigurations(input *ListBucketMetricsConfigurationsInput) (*ListBucketMetricsConfigurationsOutput, error) { + req, out := c.ListBucketMetricsConfigurationsRequest(input) + return out, req.Send() +} + +// ListBucketMetricsConfigurationsWithContext is the same as ListBucketMetricsConfigurations with the addition of +// the ability to pass a context and additional request options. +// +// See ListBucketMetricsConfigurations for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) ListBucketMetricsConfigurationsWithContext(ctx aws.Context, input *ListBucketMetricsConfigurationsInput, opts ...request.Option) (*ListBucketMetricsConfigurationsOutput, error) { + req, out := c.ListBucketMetricsConfigurationsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opListBuckets = "ListBuckets" + +// ListBucketsRequest generates a "aws/request.Request" representing the +// client's request for the ListBuckets operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// See ListBuckets for usage and error information. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ListBuckets method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ListBucketsRequest method. +// req, resp := client.ListBucketsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListBuckets +func (c *S3) ListBucketsRequest(input *ListBucketsInput) (req *request.Request, output *ListBucketsOutput) { + op := &request.Operation{ + Name: opListBuckets, + HTTPMethod: "GET", + HTTPPath: "/", + } + + if input == nil { + input = &ListBucketsInput{} + } + + output = &ListBucketsOutput{} + req = c.newRequest(op, input, output) + return +} + +// ListBuckets API operation for Amazon Simple Storage Service. +// +// Returns a list of all buckets owned by the authenticated sender of the request. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation ListBuckets for usage and error information. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListBuckets +func (c *S3) ListBuckets(input *ListBucketsInput) (*ListBucketsOutput, error) { + req, out := c.ListBucketsRequest(input) + return out, req.Send() +} + +// ListBucketsWithContext is the same as ListBuckets with the addition of +// the ability to pass a context and additional request options. +// +// See ListBuckets for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) ListBucketsWithContext(ctx aws.Context, input *ListBucketsInput, opts ...request.Option) (*ListBucketsOutput, error) { + req, out := c.ListBucketsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opListMultipartUploads = "ListMultipartUploads" + +// ListMultipartUploadsRequest generates a "aws/request.Request" representing the +// client's request for the ListMultipartUploads operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// See ListMultipartUploads for usage and error information. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ListMultipartUploads method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ListMultipartUploadsRequest method. +// req, resp := client.ListMultipartUploadsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListMultipartUploads +func (c *S3) ListMultipartUploadsRequest(input *ListMultipartUploadsInput) (req *request.Request, output *ListMultipartUploadsOutput) { + op := &request.Operation{ + Name: opListMultipartUploads, + HTTPMethod: "GET", + HTTPPath: "/{Bucket}?uploads", + Paginator: &request.Paginator{ + InputTokens: []string{"KeyMarker", "UploadIdMarker"}, + OutputTokens: []string{"NextKeyMarker", "NextUploadIdMarker"}, + LimitToken: "MaxUploads", + TruncationToken: "IsTruncated", + }, + } + + if input == nil { + input = &ListMultipartUploadsInput{} + } + + output = &ListMultipartUploadsOutput{} + req = c.newRequest(op, input, output) + return +} + +// ListMultipartUploads API operation for Amazon Simple Storage Service. +// +// This operation lists in-progress multipart uploads. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation ListMultipartUploads for usage and error information. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListMultipartUploads +func (c *S3) ListMultipartUploads(input *ListMultipartUploadsInput) (*ListMultipartUploadsOutput, error) { + req, out := c.ListMultipartUploadsRequest(input) + return out, req.Send() +} + +// ListMultipartUploadsWithContext is the same as ListMultipartUploads with the addition of +// the ability to pass a context and additional request options. +// +// See ListMultipartUploads for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) ListMultipartUploadsWithContext(ctx aws.Context, input *ListMultipartUploadsInput, opts ...request.Option) (*ListMultipartUploadsOutput, error) { + req, out := c.ListMultipartUploadsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +// ListMultipartUploadsPages iterates over the pages of a ListMultipartUploads operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See ListMultipartUploads method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a ListMultipartUploads operation. +// pageNum := 0 +// err := client.ListMultipartUploadsPages(params, +// func(page *ListMultipartUploadsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *S3) ListMultipartUploadsPages(input *ListMultipartUploadsInput, fn func(*ListMultipartUploadsOutput, bool) bool) error { + return c.ListMultipartUploadsPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// ListMultipartUploadsPagesWithContext same as ListMultipartUploadsPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) ListMultipartUploadsPagesWithContext(ctx aws.Context, input *ListMultipartUploadsInput, fn func(*ListMultipartUploadsOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *ListMultipartUploadsInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.ListMultipartUploadsRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + cont := true + for p.Next() && cont { + cont = fn(p.Page().(*ListMultipartUploadsOutput), !p.HasNextPage()) + } + return p.Err() +} + +const opListObjectVersions = "ListObjectVersions" + +// ListObjectVersionsRequest generates a "aws/request.Request" representing the +// client's request for the ListObjectVersions operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// See ListObjectVersions for usage and error information. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ListObjectVersions method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ListObjectVersionsRequest method. +// req, resp := client.ListObjectVersionsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListObjectVersions +func (c *S3) ListObjectVersionsRequest(input *ListObjectVersionsInput) (req *request.Request, output *ListObjectVersionsOutput) { + op := &request.Operation{ + Name: opListObjectVersions, + HTTPMethod: "GET", + HTTPPath: "/{Bucket}?versions", + Paginator: &request.Paginator{ + InputTokens: []string{"KeyMarker", "VersionIdMarker"}, + OutputTokens: []string{"NextKeyMarker", "NextVersionIdMarker"}, + LimitToken: "MaxKeys", + TruncationToken: "IsTruncated", + }, + } + + if input == nil { + input = &ListObjectVersionsInput{} + } + + output = &ListObjectVersionsOutput{} + req = c.newRequest(op, input, output) + return +} + +// ListObjectVersions API operation for Amazon Simple Storage Service. +// +// Returns metadata about all of the versions of objects in a bucket. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation ListObjectVersions for usage and error information. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListObjectVersions +func (c *S3) ListObjectVersions(input *ListObjectVersionsInput) (*ListObjectVersionsOutput, error) { + req, out := c.ListObjectVersionsRequest(input) + return out, req.Send() +} + +// ListObjectVersionsWithContext is the same as ListObjectVersions with the addition of +// the ability to pass a context and additional request options. +// +// See ListObjectVersions for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) ListObjectVersionsWithContext(ctx aws.Context, input *ListObjectVersionsInput, opts ...request.Option) (*ListObjectVersionsOutput, error) { + req, out := c.ListObjectVersionsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +// ListObjectVersionsPages iterates over the pages of a ListObjectVersions operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See ListObjectVersions method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a ListObjectVersions operation. +// pageNum := 0 +// err := client.ListObjectVersionsPages(params, +// func(page *ListObjectVersionsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *S3) ListObjectVersionsPages(input *ListObjectVersionsInput, fn func(*ListObjectVersionsOutput, bool) bool) error { + return c.ListObjectVersionsPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// ListObjectVersionsPagesWithContext same as ListObjectVersionsPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) ListObjectVersionsPagesWithContext(ctx aws.Context, input *ListObjectVersionsInput, fn func(*ListObjectVersionsOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *ListObjectVersionsInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.ListObjectVersionsRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + cont := true + for p.Next() && cont { + cont = fn(p.Page().(*ListObjectVersionsOutput), !p.HasNextPage()) + } + return p.Err() +} + +const opListObjects = "ListObjects" + +// ListObjectsRequest generates a "aws/request.Request" representing the +// client's request for the ListObjects operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// See ListObjects for usage and error information. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ListObjects method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ListObjectsRequest method. +// req, resp := client.ListObjectsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListObjects +func (c *S3) ListObjectsRequest(input *ListObjectsInput) (req *request.Request, output *ListObjectsOutput) { + op := &request.Operation{ + Name: opListObjects, + HTTPMethod: "GET", + HTTPPath: "/{Bucket}", + Paginator: &request.Paginator{ + InputTokens: []string{"Marker"}, + OutputTokens: []string{"NextMarker || Contents[-1].Key"}, + LimitToken: "MaxKeys", + TruncationToken: "IsTruncated", + }, + } + + if input == nil { + input = &ListObjectsInput{} + } + + output = &ListObjectsOutput{} + req = c.newRequest(op, input, output) + return +} + +// ListObjects API operation for Amazon Simple Storage Service. +// +// Returns some or all (up to 1000) of the objects in a bucket. You can use +// the request parameters as selection criteria to return a subset of the objects +// in a bucket. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation ListObjects for usage and error information. +// +// Returned Error Codes: +// * ErrCodeNoSuchBucket "NoSuchBucket" +// The specified bucket does not exist. +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListObjects +func (c *S3) ListObjects(input *ListObjectsInput) (*ListObjectsOutput, error) { + req, out := c.ListObjectsRequest(input) + return out, req.Send() +} + +// ListObjectsWithContext is the same as ListObjects with the addition of +// the ability to pass a context and additional request options. +// +// See ListObjects for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) ListObjectsWithContext(ctx aws.Context, input *ListObjectsInput, opts ...request.Option) (*ListObjectsOutput, error) { + req, out := c.ListObjectsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +// ListObjectsPages iterates over the pages of a ListObjects operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See ListObjects method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a ListObjects operation. +// pageNum := 0 +// err := client.ListObjectsPages(params, +// func(page *ListObjectsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *S3) ListObjectsPages(input *ListObjectsInput, fn func(*ListObjectsOutput, bool) bool) error { + return c.ListObjectsPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// ListObjectsPagesWithContext same as ListObjectsPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) ListObjectsPagesWithContext(ctx aws.Context, input *ListObjectsInput, fn func(*ListObjectsOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *ListObjectsInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.ListObjectsRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + cont := true + for p.Next() && cont { + cont = fn(p.Page().(*ListObjectsOutput), !p.HasNextPage()) + } + return p.Err() +} + +const opListObjectsV2 = "ListObjectsV2" + +// ListObjectsV2Request generates a "aws/request.Request" representing the +// client's request for the ListObjectsV2 operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// See ListObjectsV2 for usage and error information. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ListObjectsV2 method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ListObjectsV2Request method. +// req, resp := client.ListObjectsV2Request(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListObjectsV2 +func (c *S3) ListObjectsV2Request(input *ListObjectsV2Input) (req *request.Request, output *ListObjectsV2Output) { + op := &request.Operation{ + Name: opListObjectsV2, + HTTPMethod: "GET", + HTTPPath: "/{Bucket}?list-type=2", + Paginator: &request.Paginator{ + InputTokens: []string{"ContinuationToken"}, + OutputTokens: []string{"NextContinuationToken"}, + LimitToken: "MaxKeys", + TruncationToken: "", + }, + } + + if input == nil { + input = &ListObjectsV2Input{} + } + + output = &ListObjectsV2Output{} + req = c.newRequest(op, input, output) + return +} + +// ListObjectsV2 API operation for Amazon Simple Storage Service. +// +// Returns some or all (up to 1000) of the objects in a bucket. You can use +// the request parameters as selection criteria to return a subset of the objects +// in a bucket. Note: ListObjectsV2 is the revised List Objects API and we recommend +// you use this revised API for new application development. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation ListObjectsV2 for usage and error information. +// +// Returned Error Codes: +// * ErrCodeNoSuchBucket "NoSuchBucket" +// The specified bucket does not exist. +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListObjectsV2 +func (c *S3) ListObjectsV2(input *ListObjectsV2Input) (*ListObjectsV2Output, error) { + req, out := c.ListObjectsV2Request(input) + return out, req.Send() +} + +// ListObjectsV2WithContext is the same as ListObjectsV2 with the addition of +// the ability to pass a context and additional request options. +// +// See ListObjectsV2 for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) ListObjectsV2WithContext(ctx aws.Context, input *ListObjectsV2Input, opts ...request.Option) (*ListObjectsV2Output, error) { + req, out := c.ListObjectsV2Request(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +// ListObjectsV2Pages iterates over the pages of a ListObjectsV2 operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See ListObjectsV2 method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a ListObjectsV2 operation. +// pageNum := 0 +// err := client.ListObjectsV2Pages(params, +// func(page *ListObjectsV2Output, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *S3) ListObjectsV2Pages(input *ListObjectsV2Input, fn func(*ListObjectsV2Output, bool) bool) error { + return c.ListObjectsV2PagesWithContext(aws.BackgroundContext(), input, fn) +} + +// ListObjectsV2PagesWithContext same as ListObjectsV2Pages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) ListObjectsV2PagesWithContext(ctx aws.Context, input *ListObjectsV2Input, fn func(*ListObjectsV2Output, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *ListObjectsV2Input + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.ListObjectsV2Request(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + cont := true + for p.Next() && cont { + cont = fn(p.Page().(*ListObjectsV2Output), !p.HasNextPage()) + } + return p.Err() +} + +const opListParts = "ListParts" + +// ListPartsRequest generates a "aws/request.Request" representing the +// client's request for the ListParts operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// See ListParts for usage and error information. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ListParts method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ListPartsRequest method. +// req, resp := client.ListPartsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListParts +func (c *S3) ListPartsRequest(input *ListPartsInput) (req *request.Request, output *ListPartsOutput) { + op := &request.Operation{ + Name: opListParts, + HTTPMethod: "GET", + HTTPPath: "/{Bucket}/{Key+}", + Paginator: &request.Paginator{ + InputTokens: []string{"PartNumberMarker"}, + OutputTokens: []string{"NextPartNumberMarker"}, + LimitToken: "MaxParts", + TruncationToken: "IsTruncated", + }, + } + + if input == nil { + input = &ListPartsInput{} + } + + output = &ListPartsOutput{} + req = c.newRequest(op, input, output) + return +} + +// ListParts API operation for Amazon Simple Storage Service. +// +// Lists the parts that have been uploaded for a specific multipart upload. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation ListParts for usage and error information. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListParts +func (c *S3) ListParts(input *ListPartsInput) (*ListPartsOutput, error) { + req, out := c.ListPartsRequest(input) + return out, req.Send() +} + +// ListPartsWithContext is the same as ListParts with the addition of +// the ability to pass a context and additional request options. +// +// See ListParts for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) ListPartsWithContext(ctx aws.Context, input *ListPartsInput, opts ...request.Option) (*ListPartsOutput, error) { + req, out := c.ListPartsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +// ListPartsPages iterates over the pages of a ListParts operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See ListParts method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a ListParts operation. +// pageNum := 0 +// err := client.ListPartsPages(params, +// func(page *ListPartsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *S3) ListPartsPages(input *ListPartsInput, fn func(*ListPartsOutput, bool) bool) error { + return c.ListPartsPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// ListPartsPagesWithContext same as ListPartsPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) ListPartsPagesWithContext(ctx aws.Context, input *ListPartsInput, fn func(*ListPartsOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *ListPartsInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.ListPartsRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + cont := true + for p.Next() && cont { + cont = fn(p.Page().(*ListPartsOutput), !p.HasNextPage()) + } + return p.Err() +} + +const opPutBucketAccelerateConfiguration = "PutBucketAccelerateConfiguration" + +// PutBucketAccelerateConfigurationRequest generates a "aws/request.Request" representing the +// client's request for the PutBucketAccelerateConfiguration operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// See PutBucketAccelerateConfiguration for usage and error information. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the PutBucketAccelerateConfiguration method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the PutBucketAccelerateConfigurationRequest method. +// req, resp := client.PutBucketAccelerateConfigurationRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketAccelerateConfiguration +func (c *S3) PutBucketAccelerateConfigurationRequest(input *PutBucketAccelerateConfigurationInput) (req *request.Request, output *PutBucketAccelerateConfigurationOutput) { + op := &request.Operation{ + Name: opPutBucketAccelerateConfiguration, + HTTPMethod: "PUT", + HTTPPath: "/{Bucket}?accelerate", + } + + if input == nil { + input = &PutBucketAccelerateConfigurationInput{} + } + + output = &PutBucketAccelerateConfigurationOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + return +} + +// PutBucketAccelerateConfiguration API operation for Amazon Simple Storage Service. +// +// Sets the accelerate configuration of an existing bucket. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation PutBucketAccelerateConfiguration for usage and error information. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketAccelerateConfiguration +func (c *S3) PutBucketAccelerateConfiguration(input *PutBucketAccelerateConfigurationInput) (*PutBucketAccelerateConfigurationOutput, error) { + req, out := c.PutBucketAccelerateConfigurationRequest(input) + return out, req.Send() +} + +// PutBucketAccelerateConfigurationWithContext is the same as PutBucketAccelerateConfiguration with the addition of +// the ability to pass a context and additional request options. +// +// See PutBucketAccelerateConfiguration for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) PutBucketAccelerateConfigurationWithContext(ctx aws.Context, input *PutBucketAccelerateConfigurationInput, opts ...request.Option) (*PutBucketAccelerateConfigurationOutput, error) { + req, out := c.PutBucketAccelerateConfigurationRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opPutBucketAcl = "PutBucketAcl" + +// PutBucketAclRequest generates a "aws/request.Request" representing the +// client's request for the PutBucketAcl operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// See PutBucketAcl for usage and error information. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the PutBucketAcl method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the PutBucketAclRequest method. +// req, resp := client.PutBucketAclRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketAcl +func (c *S3) PutBucketAclRequest(input *PutBucketAclInput) (req *request.Request, output *PutBucketAclOutput) { + op := &request.Operation{ + Name: opPutBucketAcl, + HTTPMethod: "PUT", + HTTPPath: "/{Bucket}?acl", + } + + if input == nil { + input = &PutBucketAclInput{} + } + + output = &PutBucketAclOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + return +} + +// PutBucketAcl API operation for Amazon Simple Storage Service. +// +// Sets the permissions on a bucket using access control lists (ACL). +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation PutBucketAcl for usage and error information. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketAcl +func (c *S3) PutBucketAcl(input *PutBucketAclInput) (*PutBucketAclOutput, error) { + req, out := c.PutBucketAclRequest(input) + return out, req.Send() +} + +// PutBucketAclWithContext is the same as PutBucketAcl with the addition of +// the ability to pass a context and additional request options. +// +// See PutBucketAcl for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) PutBucketAclWithContext(ctx aws.Context, input *PutBucketAclInput, opts ...request.Option) (*PutBucketAclOutput, error) { + req, out := c.PutBucketAclRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opPutBucketAnalyticsConfiguration = "PutBucketAnalyticsConfiguration" + +// PutBucketAnalyticsConfigurationRequest generates a "aws/request.Request" representing the +// client's request for the PutBucketAnalyticsConfiguration operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// See PutBucketAnalyticsConfiguration for usage and error information. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the PutBucketAnalyticsConfiguration method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the PutBucketAnalyticsConfigurationRequest method. +// req, resp := client.PutBucketAnalyticsConfigurationRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketAnalyticsConfiguration +func (c *S3) PutBucketAnalyticsConfigurationRequest(input *PutBucketAnalyticsConfigurationInput) (req *request.Request, output *PutBucketAnalyticsConfigurationOutput) { + op := &request.Operation{ + Name: opPutBucketAnalyticsConfiguration, + HTTPMethod: "PUT", + HTTPPath: "/{Bucket}?analytics", + } + + if input == nil { + input = &PutBucketAnalyticsConfigurationInput{} + } + + output = &PutBucketAnalyticsConfigurationOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + return +} + +// PutBucketAnalyticsConfiguration API operation for Amazon Simple Storage Service. +// +// Sets an analytics configuration for the bucket (specified by the analytics +// configuration ID). +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation PutBucketAnalyticsConfiguration for usage and error information. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketAnalyticsConfiguration +func (c *S3) PutBucketAnalyticsConfiguration(input *PutBucketAnalyticsConfigurationInput) (*PutBucketAnalyticsConfigurationOutput, error) { + req, out := c.PutBucketAnalyticsConfigurationRequest(input) + return out, req.Send() +} + +// PutBucketAnalyticsConfigurationWithContext is the same as PutBucketAnalyticsConfiguration with the addition of +// the ability to pass a context and additional request options. +// +// See PutBucketAnalyticsConfiguration for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) PutBucketAnalyticsConfigurationWithContext(ctx aws.Context, input *PutBucketAnalyticsConfigurationInput, opts ...request.Option) (*PutBucketAnalyticsConfigurationOutput, error) { + req, out := c.PutBucketAnalyticsConfigurationRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opPutBucketCors = "PutBucketCors" + +// PutBucketCorsRequest generates a "aws/request.Request" representing the +// client's request for the PutBucketCors operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// See PutBucketCors for usage and error information. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the PutBucketCors method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the PutBucketCorsRequest method. +// req, resp := client.PutBucketCorsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketCors +func (c *S3) PutBucketCorsRequest(input *PutBucketCorsInput) (req *request.Request, output *PutBucketCorsOutput) { + op := &request.Operation{ + Name: opPutBucketCors, + HTTPMethod: "PUT", + HTTPPath: "/{Bucket}?cors", + } + + if input == nil { + input = &PutBucketCorsInput{} + } + + output = &PutBucketCorsOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + return +} + +// PutBucketCors API operation for Amazon Simple Storage Service. +// +// Sets the cors configuration for a bucket. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation PutBucketCors for usage and error information. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketCors +func (c *S3) PutBucketCors(input *PutBucketCorsInput) (*PutBucketCorsOutput, error) { + req, out := c.PutBucketCorsRequest(input) + return out, req.Send() +} + +// PutBucketCorsWithContext is the same as PutBucketCors with the addition of +// the ability to pass a context and additional request options. +// +// See PutBucketCors for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) PutBucketCorsWithContext(ctx aws.Context, input *PutBucketCorsInput, opts ...request.Option) (*PutBucketCorsOutput, error) { + req, out := c.PutBucketCorsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opPutBucketInventoryConfiguration = "PutBucketInventoryConfiguration" + +// PutBucketInventoryConfigurationRequest generates a "aws/request.Request" representing the +// client's request for the PutBucketInventoryConfiguration operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// See PutBucketInventoryConfiguration for usage and error information. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the PutBucketInventoryConfiguration method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the PutBucketInventoryConfigurationRequest method. +// req, resp := client.PutBucketInventoryConfigurationRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketInventoryConfiguration +func (c *S3) PutBucketInventoryConfigurationRequest(input *PutBucketInventoryConfigurationInput) (req *request.Request, output *PutBucketInventoryConfigurationOutput) { + op := &request.Operation{ + Name: opPutBucketInventoryConfiguration, + HTTPMethod: "PUT", + HTTPPath: "/{Bucket}?inventory", + } + + if input == nil { + input = &PutBucketInventoryConfigurationInput{} + } + + output = &PutBucketInventoryConfigurationOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + return +} + +// PutBucketInventoryConfiguration API operation for Amazon Simple Storage Service. +// +// Adds an inventory configuration (identified by the inventory ID) from the +// bucket. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation PutBucketInventoryConfiguration for usage and error information. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketInventoryConfiguration +func (c *S3) PutBucketInventoryConfiguration(input *PutBucketInventoryConfigurationInput) (*PutBucketInventoryConfigurationOutput, error) { + req, out := c.PutBucketInventoryConfigurationRequest(input) + return out, req.Send() +} + +// PutBucketInventoryConfigurationWithContext is the same as PutBucketInventoryConfiguration with the addition of +// the ability to pass a context and additional request options. +// +// See PutBucketInventoryConfiguration for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) PutBucketInventoryConfigurationWithContext(ctx aws.Context, input *PutBucketInventoryConfigurationInput, opts ...request.Option) (*PutBucketInventoryConfigurationOutput, error) { + req, out := c.PutBucketInventoryConfigurationRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opPutBucketLifecycle = "PutBucketLifecycle" + +// PutBucketLifecycleRequest generates a "aws/request.Request" representing the +// client's request for the PutBucketLifecycle operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// See PutBucketLifecycle for usage and error information. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the PutBucketLifecycle method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the PutBucketLifecycleRequest method. +// req, resp := client.PutBucketLifecycleRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketLifecycle +func (c *S3) PutBucketLifecycleRequest(input *PutBucketLifecycleInput) (req *request.Request, output *PutBucketLifecycleOutput) { + if c.Client.Config.Logger != nil { + c.Client.Config.Logger.Log("This operation, PutBucketLifecycle, has been deprecated") + } + op := &request.Operation{ + Name: opPutBucketLifecycle, + HTTPMethod: "PUT", + HTTPPath: "/{Bucket}?lifecycle", + } + + if input == nil { + input = &PutBucketLifecycleInput{} + } + + output = &PutBucketLifecycleOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + return +} + +// PutBucketLifecycle API operation for Amazon Simple Storage Service. +// +// Deprecated, see the PutBucketLifecycleConfiguration operation. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation PutBucketLifecycle for usage and error information. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketLifecycle +func (c *S3) PutBucketLifecycle(input *PutBucketLifecycleInput) (*PutBucketLifecycleOutput, error) { + req, out := c.PutBucketLifecycleRequest(input) + return out, req.Send() +} + +// PutBucketLifecycleWithContext is the same as PutBucketLifecycle with the addition of +// the ability to pass a context and additional request options. +// +// See PutBucketLifecycle for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) PutBucketLifecycleWithContext(ctx aws.Context, input *PutBucketLifecycleInput, opts ...request.Option) (*PutBucketLifecycleOutput, error) { + req, out := c.PutBucketLifecycleRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opPutBucketLifecycleConfiguration = "PutBucketLifecycleConfiguration" + +// PutBucketLifecycleConfigurationRequest generates a "aws/request.Request" representing the +// client's request for the PutBucketLifecycleConfiguration operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// See PutBucketLifecycleConfiguration for usage and error information. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the PutBucketLifecycleConfiguration method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the PutBucketLifecycleConfigurationRequest method. +// req, resp := client.PutBucketLifecycleConfigurationRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketLifecycleConfiguration +func (c *S3) PutBucketLifecycleConfigurationRequest(input *PutBucketLifecycleConfigurationInput) (req *request.Request, output *PutBucketLifecycleConfigurationOutput) { + op := &request.Operation{ + Name: opPutBucketLifecycleConfiguration, + HTTPMethod: "PUT", + HTTPPath: "/{Bucket}?lifecycle", + } + + if input == nil { + input = &PutBucketLifecycleConfigurationInput{} + } + + output = &PutBucketLifecycleConfigurationOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + return +} + +// PutBucketLifecycleConfiguration API operation for Amazon Simple Storage Service. +// +// Sets lifecycle configuration for your bucket. If a lifecycle configuration +// exists, it replaces it. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation PutBucketLifecycleConfiguration for usage and error information. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketLifecycleConfiguration +func (c *S3) PutBucketLifecycleConfiguration(input *PutBucketLifecycleConfigurationInput) (*PutBucketLifecycleConfigurationOutput, error) { + req, out := c.PutBucketLifecycleConfigurationRequest(input) + return out, req.Send() +} + +// PutBucketLifecycleConfigurationWithContext is the same as PutBucketLifecycleConfiguration with the addition of +// the ability to pass a context and additional request options. +// +// See PutBucketLifecycleConfiguration for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) PutBucketLifecycleConfigurationWithContext(ctx aws.Context, input *PutBucketLifecycleConfigurationInput, opts ...request.Option) (*PutBucketLifecycleConfigurationOutput, error) { + req, out := c.PutBucketLifecycleConfigurationRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opPutBucketLogging = "PutBucketLogging" + +// PutBucketLoggingRequest generates a "aws/request.Request" representing the +// client's request for the PutBucketLogging operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// See PutBucketLogging for usage and error information. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the PutBucketLogging method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the PutBucketLoggingRequest method. +// req, resp := client.PutBucketLoggingRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketLogging +func (c *S3) PutBucketLoggingRequest(input *PutBucketLoggingInput) (req *request.Request, output *PutBucketLoggingOutput) { + op := &request.Operation{ + Name: opPutBucketLogging, + HTTPMethod: "PUT", + HTTPPath: "/{Bucket}?logging", + } + + if input == nil { + input = &PutBucketLoggingInput{} + } + + output = &PutBucketLoggingOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + return +} + +// PutBucketLogging API operation for Amazon Simple Storage Service. +// +// Set the logging parameters for a bucket and to specify permissions for who +// can view and modify the logging parameters. To set the logging status of +// a bucket, you must be the bucket owner. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation PutBucketLogging for usage and error information. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketLogging +func (c *S3) PutBucketLogging(input *PutBucketLoggingInput) (*PutBucketLoggingOutput, error) { + req, out := c.PutBucketLoggingRequest(input) + return out, req.Send() +} + +// PutBucketLoggingWithContext is the same as PutBucketLogging with the addition of +// the ability to pass a context and additional request options. +// +// See PutBucketLogging for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) PutBucketLoggingWithContext(ctx aws.Context, input *PutBucketLoggingInput, opts ...request.Option) (*PutBucketLoggingOutput, error) { + req, out := c.PutBucketLoggingRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opPutBucketMetricsConfiguration = "PutBucketMetricsConfiguration" + +// PutBucketMetricsConfigurationRequest generates a "aws/request.Request" representing the +// client's request for the PutBucketMetricsConfiguration operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// See PutBucketMetricsConfiguration for usage and error information. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the PutBucketMetricsConfiguration method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the PutBucketMetricsConfigurationRequest method. +// req, resp := client.PutBucketMetricsConfigurationRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketMetricsConfiguration +func (c *S3) PutBucketMetricsConfigurationRequest(input *PutBucketMetricsConfigurationInput) (req *request.Request, output *PutBucketMetricsConfigurationOutput) { + op := &request.Operation{ + Name: opPutBucketMetricsConfiguration, + HTTPMethod: "PUT", + HTTPPath: "/{Bucket}?metrics", + } + + if input == nil { + input = &PutBucketMetricsConfigurationInput{} + } + + output = &PutBucketMetricsConfigurationOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + return +} + +// PutBucketMetricsConfiguration API operation for Amazon Simple Storage Service. +// +// Sets a metrics configuration (specified by the metrics configuration ID) +// for the bucket. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation PutBucketMetricsConfiguration for usage and error information. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketMetricsConfiguration +func (c *S3) PutBucketMetricsConfiguration(input *PutBucketMetricsConfigurationInput) (*PutBucketMetricsConfigurationOutput, error) { + req, out := c.PutBucketMetricsConfigurationRequest(input) + return out, req.Send() +} + +// PutBucketMetricsConfigurationWithContext is the same as PutBucketMetricsConfiguration with the addition of +// the ability to pass a context and additional request options. +// +// See PutBucketMetricsConfiguration for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) PutBucketMetricsConfigurationWithContext(ctx aws.Context, input *PutBucketMetricsConfigurationInput, opts ...request.Option) (*PutBucketMetricsConfigurationOutput, error) { + req, out := c.PutBucketMetricsConfigurationRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opPutBucketNotification = "PutBucketNotification" + +// PutBucketNotificationRequest generates a "aws/request.Request" representing the +// client's request for the PutBucketNotification operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// See PutBucketNotification for usage and error information. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the PutBucketNotification method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the PutBucketNotificationRequest method. +// req, resp := client.PutBucketNotificationRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketNotification +func (c *S3) PutBucketNotificationRequest(input *PutBucketNotificationInput) (req *request.Request, output *PutBucketNotificationOutput) { + if c.Client.Config.Logger != nil { + c.Client.Config.Logger.Log("This operation, PutBucketNotification, has been deprecated") + } + op := &request.Operation{ + Name: opPutBucketNotification, + HTTPMethod: "PUT", + HTTPPath: "/{Bucket}?notification", + } + + if input == nil { + input = &PutBucketNotificationInput{} + } + + output = &PutBucketNotificationOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + return +} + +// PutBucketNotification API operation for Amazon Simple Storage Service. +// +// Deprecated, see the PutBucketNotificationConfiguraiton operation. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation PutBucketNotification for usage and error information. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketNotification +func (c *S3) PutBucketNotification(input *PutBucketNotificationInput) (*PutBucketNotificationOutput, error) { + req, out := c.PutBucketNotificationRequest(input) + return out, req.Send() +} + +// PutBucketNotificationWithContext is the same as PutBucketNotification with the addition of +// the ability to pass a context and additional request options. +// +// See PutBucketNotification for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) PutBucketNotificationWithContext(ctx aws.Context, input *PutBucketNotificationInput, opts ...request.Option) (*PutBucketNotificationOutput, error) { + req, out := c.PutBucketNotificationRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opPutBucketNotificationConfiguration = "PutBucketNotificationConfiguration" + +// PutBucketNotificationConfigurationRequest generates a "aws/request.Request" representing the +// client's request for the PutBucketNotificationConfiguration operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// See PutBucketNotificationConfiguration for usage and error information. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the PutBucketNotificationConfiguration method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the PutBucketNotificationConfigurationRequest method. +// req, resp := client.PutBucketNotificationConfigurationRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketNotificationConfiguration +func (c *S3) PutBucketNotificationConfigurationRequest(input *PutBucketNotificationConfigurationInput) (req *request.Request, output *PutBucketNotificationConfigurationOutput) { + op := &request.Operation{ + Name: opPutBucketNotificationConfiguration, + HTTPMethod: "PUT", + HTTPPath: "/{Bucket}?notification", + } + + if input == nil { + input = &PutBucketNotificationConfigurationInput{} + } + + output = &PutBucketNotificationConfigurationOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + return +} + +// PutBucketNotificationConfiguration API operation for Amazon Simple Storage Service. +// +// Enables notifications of specified events for a bucket. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation PutBucketNotificationConfiguration for usage and error information. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketNotificationConfiguration +func (c *S3) PutBucketNotificationConfiguration(input *PutBucketNotificationConfigurationInput) (*PutBucketNotificationConfigurationOutput, error) { + req, out := c.PutBucketNotificationConfigurationRequest(input) + return out, req.Send() +} + +// PutBucketNotificationConfigurationWithContext is the same as PutBucketNotificationConfiguration with the addition of +// the ability to pass a context and additional request options. +// +// See PutBucketNotificationConfiguration for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) PutBucketNotificationConfigurationWithContext(ctx aws.Context, input *PutBucketNotificationConfigurationInput, opts ...request.Option) (*PutBucketNotificationConfigurationOutput, error) { + req, out := c.PutBucketNotificationConfigurationRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opPutBucketPolicy = "PutBucketPolicy" + +// PutBucketPolicyRequest generates a "aws/request.Request" representing the +// client's request for the PutBucketPolicy operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// See PutBucketPolicy for usage and error information. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the PutBucketPolicy method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the PutBucketPolicyRequest method. +// req, resp := client.PutBucketPolicyRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketPolicy +func (c *S3) PutBucketPolicyRequest(input *PutBucketPolicyInput) (req *request.Request, output *PutBucketPolicyOutput) { + op := &request.Operation{ + Name: opPutBucketPolicy, + HTTPMethod: "PUT", + HTTPPath: "/{Bucket}?policy", + } + + if input == nil { + input = &PutBucketPolicyInput{} + } + + output = &PutBucketPolicyOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + return +} + +// PutBucketPolicy API operation for Amazon Simple Storage Service. +// +// Replaces a policy on a bucket. If the bucket already has a policy, the one +// in this request completely replaces it. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation PutBucketPolicy for usage and error information. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketPolicy +func (c *S3) PutBucketPolicy(input *PutBucketPolicyInput) (*PutBucketPolicyOutput, error) { + req, out := c.PutBucketPolicyRequest(input) + return out, req.Send() +} + +// PutBucketPolicyWithContext is the same as PutBucketPolicy with the addition of +// the ability to pass a context and additional request options. +// +// See PutBucketPolicy for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) PutBucketPolicyWithContext(ctx aws.Context, input *PutBucketPolicyInput, opts ...request.Option) (*PutBucketPolicyOutput, error) { + req, out := c.PutBucketPolicyRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opPutBucketReplication = "PutBucketReplication" + +// PutBucketReplicationRequest generates a "aws/request.Request" representing the +// client's request for the PutBucketReplication operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// See PutBucketReplication for usage and error information. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the PutBucketReplication method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the PutBucketReplicationRequest method. +// req, resp := client.PutBucketReplicationRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketReplication +func (c *S3) PutBucketReplicationRequest(input *PutBucketReplicationInput) (req *request.Request, output *PutBucketReplicationOutput) { + op := &request.Operation{ + Name: opPutBucketReplication, + HTTPMethod: "PUT", + HTTPPath: "/{Bucket}?replication", + } + + if input == nil { + input = &PutBucketReplicationInput{} + } + + output = &PutBucketReplicationOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + return +} + +// PutBucketReplication API operation for Amazon Simple Storage Service. +// +// Creates a new replication configuration (or replaces an existing one, if +// present). +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation PutBucketReplication for usage and error information. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketReplication +func (c *S3) PutBucketReplication(input *PutBucketReplicationInput) (*PutBucketReplicationOutput, error) { + req, out := c.PutBucketReplicationRequest(input) + return out, req.Send() +} + +// PutBucketReplicationWithContext is the same as PutBucketReplication with the addition of +// the ability to pass a context and additional request options. +// +// See PutBucketReplication for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) PutBucketReplicationWithContext(ctx aws.Context, input *PutBucketReplicationInput, opts ...request.Option) (*PutBucketReplicationOutput, error) { + req, out := c.PutBucketReplicationRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opPutBucketRequestPayment = "PutBucketRequestPayment" + +// PutBucketRequestPaymentRequest generates a "aws/request.Request" representing the +// client's request for the PutBucketRequestPayment operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// See PutBucketRequestPayment for usage and error information. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the PutBucketRequestPayment method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the PutBucketRequestPaymentRequest method. +// req, resp := client.PutBucketRequestPaymentRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketRequestPayment +func (c *S3) PutBucketRequestPaymentRequest(input *PutBucketRequestPaymentInput) (req *request.Request, output *PutBucketRequestPaymentOutput) { + op := &request.Operation{ + Name: opPutBucketRequestPayment, + HTTPMethod: "PUT", + HTTPPath: "/{Bucket}?requestPayment", + } + + if input == nil { + input = &PutBucketRequestPaymentInput{} + } + + output = &PutBucketRequestPaymentOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + return +} + +// PutBucketRequestPayment API operation for Amazon Simple Storage Service. +// +// Sets the request payment configuration for a bucket. By default, the bucket +// owner pays for downloads from the bucket. This configuration parameter enables +// the bucket owner (only) to specify that the person requesting the download +// will be charged for the download. Documentation on requester pays buckets +// can be found at http://docs.aws.amazon.com/AmazonS3/latest/dev/RequesterPaysBuckets.html +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation PutBucketRequestPayment for usage and error information. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketRequestPayment +func (c *S3) PutBucketRequestPayment(input *PutBucketRequestPaymentInput) (*PutBucketRequestPaymentOutput, error) { + req, out := c.PutBucketRequestPaymentRequest(input) + return out, req.Send() +} + +// PutBucketRequestPaymentWithContext is the same as PutBucketRequestPayment with the addition of +// the ability to pass a context and additional request options. +// +// See PutBucketRequestPayment for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) PutBucketRequestPaymentWithContext(ctx aws.Context, input *PutBucketRequestPaymentInput, opts ...request.Option) (*PutBucketRequestPaymentOutput, error) { + req, out := c.PutBucketRequestPaymentRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opPutBucketTagging = "PutBucketTagging" + +// PutBucketTaggingRequest generates a "aws/request.Request" representing the +// client's request for the PutBucketTagging operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// See PutBucketTagging for usage and error information. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the PutBucketTagging method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the PutBucketTaggingRequest method. +// req, resp := client.PutBucketTaggingRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketTagging +func (c *S3) PutBucketTaggingRequest(input *PutBucketTaggingInput) (req *request.Request, output *PutBucketTaggingOutput) { + op := &request.Operation{ + Name: opPutBucketTagging, + HTTPMethod: "PUT", + HTTPPath: "/{Bucket}?tagging", + } + + if input == nil { + input = &PutBucketTaggingInput{} + } + + output = &PutBucketTaggingOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + return +} + +// PutBucketTagging API operation for Amazon Simple Storage Service. +// +// Sets the tags for a bucket. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation PutBucketTagging for usage and error information. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketTagging +func (c *S3) PutBucketTagging(input *PutBucketTaggingInput) (*PutBucketTaggingOutput, error) { + req, out := c.PutBucketTaggingRequest(input) + return out, req.Send() +} + +// PutBucketTaggingWithContext is the same as PutBucketTagging with the addition of +// the ability to pass a context and additional request options. +// +// See PutBucketTagging for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) PutBucketTaggingWithContext(ctx aws.Context, input *PutBucketTaggingInput, opts ...request.Option) (*PutBucketTaggingOutput, error) { + req, out := c.PutBucketTaggingRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opPutBucketVersioning = "PutBucketVersioning" + +// PutBucketVersioningRequest generates a "aws/request.Request" representing the +// client's request for the PutBucketVersioning operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// See PutBucketVersioning for usage and error information. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the PutBucketVersioning method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the PutBucketVersioningRequest method. +// req, resp := client.PutBucketVersioningRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketVersioning +func (c *S3) PutBucketVersioningRequest(input *PutBucketVersioningInput) (req *request.Request, output *PutBucketVersioningOutput) { + op := &request.Operation{ + Name: opPutBucketVersioning, + HTTPMethod: "PUT", + HTTPPath: "/{Bucket}?versioning", + } + + if input == nil { + input = &PutBucketVersioningInput{} + } + + output = &PutBucketVersioningOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + return +} + +// PutBucketVersioning API operation for Amazon Simple Storage Service. +// +// Sets the versioning state of an existing bucket. To set the versioning state, +// you must be the bucket owner. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation PutBucketVersioning for usage and error information. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketVersioning +func (c *S3) PutBucketVersioning(input *PutBucketVersioningInput) (*PutBucketVersioningOutput, error) { + req, out := c.PutBucketVersioningRequest(input) + return out, req.Send() +} + +// PutBucketVersioningWithContext is the same as PutBucketVersioning with the addition of +// the ability to pass a context and additional request options. +// +// See PutBucketVersioning for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) PutBucketVersioningWithContext(ctx aws.Context, input *PutBucketVersioningInput, opts ...request.Option) (*PutBucketVersioningOutput, error) { + req, out := c.PutBucketVersioningRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opPutBucketWebsite = "PutBucketWebsite" + +// PutBucketWebsiteRequest generates a "aws/request.Request" representing the +// client's request for the PutBucketWebsite operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// See PutBucketWebsite for usage and error information. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the PutBucketWebsite method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the PutBucketWebsiteRequest method. +// req, resp := client.PutBucketWebsiteRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketWebsite +func (c *S3) PutBucketWebsiteRequest(input *PutBucketWebsiteInput) (req *request.Request, output *PutBucketWebsiteOutput) { + op := &request.Operation{ + Name: opPutBucketWebsite, + HTTPMethod: "PUT", + HTTPPath: "/{Bucket}?website", + } + + if input == nil { + input = &PutBucketWebsiteInput{} + } + + output = &PutBucketWebsiteOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + return +} + +// PutBucketWebsite API operation for Amazon Simple Storage Service. +// +// Set the website configuration for a bucket. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation PutBucketWebsite for usage and error information. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketWebsite +func (c *S3) PutBucketWebsite(input *PutBucketWebsiteInput) (*PutBucketWebsiteOutput, error) { + req, out := c.PutBucketWebsiteRequest(input) + return out, req.Send() +} + +// PutBucketWebsiteWithContext is the same as PutBucketWebsite with the addition of +// the ability to pass a context and additional request options. +// +// See PutBucketWebsite for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) PutBucketWebsiteWithContext(ctx aws.Context, input *PutBucketWebsiteInput, opts ...request.Option) (*PutBucketWebsiteOutput, error) { + req, out := c.PutBucketWebsiteRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opPutObject = "PutObject" + +// PutObjectRequest generates a "aws/request.Request" representing the +// client's request for the PutObject operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// See PutObject for usage and error information. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the PutObject method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the PutObjectRequest method. +// req, resp := client.PutObjectRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutObject +func (c *S3) PutObjectRequest(input *PutObjectInput) (req *request.Request, output *PutObjectOutput) { + op := &request.Operation{ + Name: opPutObject, + HTTPMethod: "PUT", + HTTPPath: "/{Bucket}/{Key+}", + } + + if input == nil { + input = &PutObjectInput{} + } + + output = &PutObjectOutput{} + req = c.newRequest(op, input, output) + return +} + +// PutObject API operation for Amazon Simple Storage Service. +// +// Adds an object to a bucket. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation PutObject for usage and error information. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutObject +func (c *S3) PutObject(input *PutObjectInput) (*PutObjectOutput, error) { + req, out := c.PutObjectRequest(input) + return out, req.Send() +} + +// PutObjectWithContext is the same as PutObject with the addition of +// the ability to pass a context and additional request options. +// +// See PutObject for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) PutObjectWithContext(ctx aws.Context, input *PutObjectInput, opts ...request.Option) (*PutObjectOutput, error) { + req, out := c.PutObjectRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opPutObjectAcl = "PutObjectAcl" + +// PutObjectAclRequest generates a "aws/request.Request" representing the +// client's request for the PutObjectAcl operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// See PutObjectAcl for usage and error information. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the PutObjectAcl method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the PutObjectAclRequest method. +// req, resp := client.PutObjectAclRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutObjectAcl +func (c *S3) PutObjectAclRequest(input *PutObjectAclInput) (req *request.Request, output *PutObjectAclOutput) { + op := &request.Operation{ + Name: opPutObjectAcl, + HTTPMethod: "PUT", + HTTPPath: "/{Bucket}/{Key+}?acl", + } + + if input == nil { + input = &PutObjectAclInput{} + } + + output = &PutObjectAclOutput{} + req = c.newRequest(op, input, output) + return +} + +// PutObjectAcl API operation for Amazon Simple Storage Service. +// +// uses the acl subresource to set the access control list (ACL) permissions +// for an object that already exists in a bucket +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation PutObjectAcl for usage and error information. +// +// Returned Error Codes: +// * ErrCodeNoSuchKey "NoSuchKey" +// The specified key does not exist. +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutObjectAcl +func (c *S3) PutObjectAcl(input *PutObjectAclInput) (*PutObjectAclOutput, error) { + req, out := c.PutObjectAclRequest(input) + return out, req.Send() +} + +// PutObjectAclWithContext is the same as PutObjectAcl with the addition of +// the ability to pass a context and additional request options. +// +// See PutObjectAcl for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) PutObjectAclWithContext(ctx aws.Context, input *PutObjectAclInput, opts ...request.Option) (*PutObjectAclOutput, error) { + req, out := c.PutObjectAclRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opPutObjectTagging = "PutObjectTagging" + +// PutObjectTaggingRequest generates a "aws/request.Request" representing the +// client's request for the PutObjectTagging operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// See PutObjectTagging for usage and error information. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the PutObjectTagging method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the PutObjectTaggingRequest method. +// req, resp := client.PutObjectTaggingRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutObjectTagging +func (c *S3) PutObjectTaggingRequest(input *PutObjectTaggingInput) (req *request.Request, output *PutObjectTaggingOutput) { + op := &request.Operation{ + Name: opPutObjectTagging, + HTTPMethod: "PUT", + HTTPPath: "/{Bucket}/{Key+}?tagging", + } + + if input == nil { + input = &PutObjectTaggingInput{} + } + + output = &PutObjectTaggingOutput{} + req = c.newRequest(op, input, output) + return +} + +// PutObjectTagging API operation for Amazon Simple Storage Service. +// +// Sets the supplied tag-set to an object that already exists in a bucket +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation PutObjectTagging for usage and error information. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutObjectTagging +func (c *S3) PutObjectTagging(input *PutObjectTaggingInput) (*PutObjectTaggingOutput, error) { + req, out := c.PutObjectTaggingRequest(input) + return out, req.Send() +} + +// PutObjectTaggingWithContext is the same as PutObjectTagging with the addition of +// the ability to pass a context and additional request options. +// +// See PutObjectTagging for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) PutObjectTaggingWithContext(ctx aws.Context, input *PutObjectTaggingInput, opts ...request.Option) (*PutObjectTaggingOutput, error) { + req, out := c.PutObjectTaggingRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opRestoreObject = "RestoreObject" + +// RestoreObjectRequest generates a "aws/request.Request" representing the +// client's request for the RestoreObject operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// See RestoreObject for usage and error information. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the RestoreObject method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the RestoreObjectRequest method. +// req, resp := client.RestoreObjectRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/RestoreObject +func (c *S3) RestoreObjectRequest(input *RestoreObjectInput) (req *request.Request, output *RestoreObjectOutput) { + op := &request.Operation{ + Name: opRestoreObject, + HTTPMethod: "POST", + HTTPPath: "/{Bucket}/{Key+}?restore", + } + + if input == nil { + input = &RestoreObjectInput{} + } + + output = &RestoreObjectOutput{} + req = c.newRequest(op, input, output) + return +} + +// RestoreObject API operation for Amazon Simple Storage Service. +// +// Restores an archived copy of an object back into Amazon S3 +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation RestoreObject for usage and error information. +// +// Returned Error Codes: +// * ErrCodeObjectAlreadyInActiveTierError "ObjectAlreadyInActiveTierError" +// This operation is not allowed against this storage tier +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/RestoreObject +func (c *S3) RestoreObject(input *RestoreObjectInput) (*RestoreObjectOutput, error) { + req, out := c.RestoreObjectRequest(input) + return out, req.Send() +} + +// RestoreObjectWithContext is the same as RestoreObject with the addition of +// the ability to pass a context and additional request options. +// +// See RestoreObject for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) RestoreObjectWithContext(ctx aws.Context, input *RestoreObjectInput, opts ...request.Option) (*RestoreObjectOutput, error) { + req, out := c.RestoreObjectRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opUploadPart = "UploadPart" + +// UploadPartRequest generates a "aws/request.Request" representing the +// client's request for the UploadPart operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// See UploadPart for usage and error information. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the UploadPart method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the UploadPartRequest method. +// req, resp := client.UploadPartRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/UploadPart +func (c *S3) UploadPartRequest(input *UploadPartInput) (req *request.Request, output *UploadPartOutput) { + op := &request.Operation{ + Name: opUploadPart, + HTTPMethod: "PUT", + HTTPPath: "/{Bucket}/{Key+}", + } + + if input == nil { + input = &UploadPartInput{} + } + + output = &UploadPartOutput{} + req = c.newRequest(op, input, output) + return +} + +// UploadPart API operation for Amazon Simple Storage Service. +// +// Uploads a part in a multipart upload. +// +// Note: After you initiate multipart upload and upload one or more parts, you +// must either complete or abort multipart upload in order to stop getting charged +// for storage of the uploaded parts. Only after you either complete or abort +// multipart upload, Amazon S3 frees up the parts storage and stops charging +// you for the parts storage. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation UploadPart for usage and error information. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/UploadPart +func (c *S3) UploadPart(input *UploadPartInput) (*UploadPartOutput, error) { + req, out := c.UploadPartRequest(input) + return out, req.Send() +} + +// UploadPartWithContext is the same as UploadPart with the addition of +// the ability to pass a context and additional request options. +// +// See UploadPart for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) UploadPartWithContext(ctx aws.Context, input *UploadPartInput, opts ...request.Option) (*UploadPartOutput, error) { + req, out := c.UploadPartRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opUploadPartCopy = "UploadPartCopy" + +// UploadPartCopyRequest generates a "aws/request.Request" representing the +// client's request for the UploadPartCopy operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// See UploadPartCopy for usage and error information. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the UploadPartCopy method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the UploadPartCopyRequest method. +// req, resp := client.UploadPartCopyRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/UploadPartCopy +func (c *S3) UploadPartCopyRequest(input *UploadPartCopyInput) (req *request.Request, output *UploadPartCopyOutput) { + op := &request.Operation{ + Name: opUploadPartCopy, + HTTPMethod: "PUT", + HTTPPath: "/{Bucket}/{Key+}", + } + + if input == nil { + input = &UploadPartCopyInput{} + } + + output = &UploadPartCopyOutput{} + req = c.newRequest(op, input, output) + return +} + +// UploadPartCopy API operation for Amazon Simple Storage Service. +// +// Uploads a part by copying data from an existing object as data source. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation UploadPartCopy for usage and error information. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/UploadPartCopy +func (c *S3) UploadPartCopy(input *UploadPartCopyInput) (*UploadPartCopyOutput, error) { + req, out := c.UploadPartCopyRequest(input) + return out, req.Send() +} + +// UploadPartCopyWithContext is the same as UploadPartCopy with the addition of +// the ability to pass a context and additional request options. +// +// See UploadPartCopy for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) UploadPartCopyWithContext(ctx aws.Context, input *UploadPartCopyInput, opts ...request.Option) (*UploadPartCopyOutput, error) { + req, out := c.UploadPartCopyRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +// Specifies the days since the initiation of an Incomplete Multipart Upload +// that Lifecycle will wait before permanently removing all parts of the upload. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/AbortIncompleteMultipartUpload +type AbortIncompleteMultipartUpload struct { + _ struct{} `type:"structure"` + + // Indicates the number of days that must pass since initiation for Lifecycle + // to abort an Incomplete Multipart Upload. + DaysAfterInitiation *int64 `type:"integer"` +} + +// String returns the string representation +func (s AbortIncompleteMultipartUpload) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AbortIncompleteMultipartUpload) GoString() string { + return s.String() +} + +// SetDaysAfterInitiation sets the DaysAfterInitiation field's value. +func (s *AbortIncompleteMultipartUpload) SetDaysAfterInitiation(v int64) *AbortIncompleteMultipartUpload { + s.DaysAfterInitiation = &v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/AbortMultipartUploadRequest +type AbortMultipartUploadInput struct { + _ struct{} `type:"structure"` + + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // Key is a required field + Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` + + // Confirms that the requester knows that she or he will be charged for the + // request. Bucket owners need not specify this parameter in their requests. + // Documentation on downloading objects from requester pays buckets can be found + // at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html + RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` + + // UploadId is a required field + UploadId *string `location:"querystring" locationName:"uploadId" type:"string" required:"true"` +} + +// String returns the string representation +func (s AbortMultipartUploadInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AbortMultipartUploadInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *AbortMultipartUploadInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "AbortMultipartUploadInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Key == nil { + invalidParams.Add(request.NewErrParamRequired("Key")) + } + if s.Key != nil && len(*s.Key) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Key", 1)) + } + if s.UploadId == nil { + invalidParams.Add(request.NewErrParamRequired("UploadId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *AbortMultipartUploadInput) SetBucket(v string) *AbortMultipartUploadInput { + s.Bucket = &v + return s +} + +// SetKey sets the Key field's value. +func (s *AbortMultipartUploadInput) SetKey(v string) *AbortMultipartUploadInput { + s.Key = &v + return s +} + +// SetRequestPayer sets the RequestPayer field's value. +func (s *AbortMultipartUploadInput) SetRequestPayer(v string) *AbortMultipartUploadInput { + s.RequestPayer = &v + return s +} + +// SetUploadId sets the UploadId field's value. +func (s *AbortMultipartUploadInput) SetUploadId(v string) *AbortMultipartUploadInput { + s.UploadId = &v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/AbortMultipartUploadOutput +type AbortMultipartUploadOutput struct { + _ struct{} `type:"structure"` + + // If present, indicates that the requester was successfully charged for the + // request. + RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"` +} + +// String returns the string representation +func (s AbortMultipartUploadOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AbortMultipartUploadOutput) GoString() string { + return s.String() +} + +// SetRequestCharged sets the RequestCharged field's value. +func (s *AbortMultipartUploadOutput) SetRequestCharged(v string) *AbortMultipartUploadOutput { + s.RequestCharged = &v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/AccelerateConfiguration +type AccelerateConfiguration struct { + _ struct{} `type:"structure"` + + // The accelerate configuration of the bucket. + Status *string `type:"string" enum:"BucketAccelerateStatus"` +} + +// String returns the string representation +func (s AccelerateConfiguration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AccelerateConfiguration) GoString() string { + return s.String() +} + +// SetStatus sets the Status field's value. +func (s *AccelerateConfiguration) SetStatus(v string) *AccelerateConfiguration { + s.Status = &v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/AccessControlPolicy +type AccessControlPolicy struct { + _ struct{} `type:"structure"` + + // A list of grants. + Grants []*Grant `locationName:"AccessControlList" locationNameList:"Grant" type:"list"` + + Owner *Owner `type:"structure"` +} + +// String returns the string representation +func (s AccessControlPolicy) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AccessControlPolicy) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *AccessControlPolicy) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "AccessControlPolicy"} + if s.Grants != nil { + for i, v := range s.Grants { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Grants", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetGrants sets the Grants field's value. +func (s *AccessControlPolicy) SetGrants(v []*Grant) *AccessControlPolicy { + s.Grants = v + return s +} + +// SetOwner sets the Owner field's value. +func (s *AccessControlPolicy) SetOwner(v *Owner) *AccessControlPolicy { + s.Owner = v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/AnalyticsAndOperator +type AnalyticsAndOperator struct { + _ struct{} `type:"structure"` + + // The prefix to use when evaluating an AND predicate. + Prefix *string `type:"string"` + + // The list of tags to use when evaluating an AND predicate. + Tags []*Tag `locationName:"Tag" locationNameList:"Tag" type:"list" flattened:"true"` +} + +// String returns the string representation +func (s AnalyticsAndOperator) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AnalyticsAndOperator) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *AnalyticsAndOperator) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "AnalyticsAndOperator"} + if s.Tags != nil { + for i, v := range s.Tags { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Tags", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetPrefix sets the Prefix field's value. +func (s *AnalyticsAndOperator) SetPrefix(v string) *AnalyticsAndOperator { + s.Prefix = &v + return s +} + +// SetTags sets the Tags field's value. +func (s *AnalyticsAndOperator) SetTags(v []*Tag) *AnalyticsAndOperator { + s.Tags = v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/AnalyticsConfiguration +type AnalyticsConfiguration struct { + _ struct{} `type:"structure"` + + // The filter used to describe a set of objects for analyses. A filter must + // have exactly one prefix, one tag, or one conjunction (AnalyticsAndOperator). + // If no filter is provided, all objects will be considered in any analysis. + Filter *AnalyticsFilter `type:"structure"` + + // The identifier used to represent an analytics configuration. + // + // Id is a required field + Id *string `type:"string" required:"true"` + + // If present, it indicates that data related to access patterns will be collected + // and made available to analyze the tradeoffs between different storage classes. + // + // StorageClassAnalysis is a required field + StorageClassAnalysis *StorageClassAnalysis `type:"structure" required:"true"` +} + +// String returns the string representation +func (s AnalyticsConfiguration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AnalyticsConfiguration) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *AnalyticsConfiguration) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "AnalyticsConfiguration"} + if s.Id == nil { + invalidParams.Add(request.NewErrParamRequired("Id")) + } + if s.StorageClassAnalysis == nil { + invalidParams.Add(request.NewErrParamRequired("StorageClassAnalysis")) + } + if s.Filter != nil { + if err := s.Filter.Validate(); err != nil { + invalidParams.AddNested("Filter", err.(request.ErrInvalidParams)) + } + } + if s.StorageClassAnalysis != nil { + if err := s.StorageClassAnalysis.Validate(); err != nil { + invalidParams.AddNested("StorageClassAnalysis", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetFilter sets the Filter field's value. +func (s *AnalyticsConfiguration) SetFilter(v *AnalyticsFilter) *AnalyticsConfiguration { + s.Filter = v + return s +} + +// SetId sets the Id field's value. +func (s *AnalyticsConfiguration) SetId(v string) *AnalyticsConfiguration { + s.Id = &v + return s +} + +// SetStorageClassAnalysis sets the StorageClassAnalysis field's value. +func (s *AnalyticsConfiguration) SetStorageClassAnalysis(v *StorageClassAnalysis) *AnalyticsConfiguration { + s.StorageClassAnalysis = v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/AnalyticsExportDestination +type AnalyticsExportDestination struct { + _ struct{} `type:"structure"` + + // A destination signifying output to an S3 bucket. + // + // S3BucketDestination is a required field + S3BucketDestination *AnalyticsS3BucketDestination `type:"structure" required:"true"` +} + +// String returns the string representation +func (s AnalyticsExportDestination) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AnalyticsExportDestination) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *AnalyticsExportDestination) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "AnalyticsExportDestination"} + if s.S3BucketDestination == nil { + invalidParams.Add(request.NewErrParamRequired("S3BucketDestination")) + } + if s.S3BucketDestination != nil { + if err := s.S3BucketDestination.Validate(); err != nil { + invalidParams.AddNested("S3BucketDestination", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetS3BucketDestination sets the S3BucketDestination field's value. +func (s *AnalyticsExportDestination) SetS3BucketDestination(v *AnalyticsS3BucketDestination) *AnalyticsExportDestination { + s.S3BucketDestination = v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/AnalyticsFilter +type AnalyticsFilter struct { + _ struct{} `type:"structure"` + + // A conjunction (logical AND) of predicates, which is used in evaluating an + // analytics filter. The operator must have at least two predicates. + And *AnalyticsAndOperator `type:"structure"` + + // The prefix to use when evaluating an analytics filter. + Prefix *string `type:"string"` + + // The tag to use when evaluating an analytics filter. + Tag *Tag `type:"structure"` +} + +// String returns the string representation +func (s AnalyticsFilter) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AnalyticsFilter) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *AnalyticsFilter) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "AnalyticsFilter"} + if s.And != nil { + if err := s.And.Validate(); err != nil { + invalidParams.AddNested("And", err.(request.ErrInvalidParams)) + } + } + if s.Tag != nil { + if err := s.Tag.Validate(); err != nil { + invalidParams.AddNested("Tag", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAnd sets the And field's value. +func (s *AnalyticsFilter) SetAnd(v *AnalyticsAndOperator) *AnalyticsFilter { + s.And = v + return s +} + +// SetPrefix sets the Prefix field's value. +func (s *AnalyticsFilter) SetPrefix(v string) *AnalyticsFilter { + s.Prefix = &v + return s +} + +// SetTag sets the Tag field's value. +func (s *AnalyticsFilter) SetTag(v *Tag) *AnalyticsFilter { + s.Tag = v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/AnalyticsS3BucketDestination +type AnalyticsS3BucketDestination struct { + _ struct{} `type:"structure"` + + // The Amazon resource name (ARN) of the bucket to which data is exported. + // + // Bucket is a required field + Bucket *string `type:"string" required:"true"` + + // The account ID that owns the destination bucket. If no account ID is provided, + // the owner will not be validated prior to exporting data. + BucketAccountId *string `type:"string"` + + // The file format used when exporting data to Amazon S3. + // + // Format is a required field + Format *string `type:"string" required:"true" enum:"AnalyticsS3ExportFileFormat"` + + // The prefix to use when exporting data. The exported data begins with this + // prefix. + Prefix *string `type:"string"` +} + +// String returns the string representation +func (s AnalyticsS3BucketDestination) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AnalyticsS3BucketDestination) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *AnalyticsS3BucketDestination) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "AnalyticsS3BucketDestination"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Format == nil { + invalidParams.Add(request.NewErrParamRequired("Format")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *AnalyticsS3BucketDestination) SetBucket(v string) *AnalyticsS3BucketDestination { + s.Bucket = &v + return s +} + +// SetBucketAccountId sets the BucketAccountId field's value. +func (s *AnalyticsS3BucketDestination) SetBucketAccountId(v string) *AnalyticsS3BucketDestination { + s.BucketAccountId = &v + return s +} + +// SetFormat sets the Format field's value. +func (s *AnalyticsS3BucketDestination) SetFormat(v string) *AnalyticsS3BucketDestination { + s.Format = &v + return s +} + +// SetPrefix sets the Prefix field's value. +func (s *AnalyticsS3BucketDestination) SetPrefix(v string) *AnalyticsS3BucketDestination { + s.Prefix = &v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/Bucket +type Bucket struct { + _ struct{} `type:"structure"` + + // Date the bucket was created. + CreationDate *time.Time `type:"timestamp" timestampFormat:"iso8601"` + + // The name of the bucket. + Name *string `type:"string"` +} + +// String returns the string representation +func (s Bucket) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Bucket) GoString() string { + return s.String() +} + +// SetCreationDate sets the CreationDate field's value. +func (s *Bucket) SetCreationDate(v time.Time) *Bucket { + s.CreationDate = &v + return s +} + +// SetName sets the Name field's value. +func (s *Bucket) SetName(v string) *Bucket { + s.Name = &v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/BucketLifecycleConfiguration +type BucketLifecycleConfiguration struct { + _ struct{} `type:"structure"` + + // Rules is a required field + Rules []*LifecycleRule `locationName:"Rule" type:"list" flattened:"true" required:"true"` +} + +// String returns the string representation +func (s BucketLifecycleConfiguration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s BucketLifecycleConfiguration) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *BucketLifecycleConfiguration) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "BucketLifecycleConfiguration"} + if s.Rules == nil { + invalidParams.Add(request.NewErrParamRequired("Rules")) + } + if s.Rules != nil { + for i, v := range s.Rules { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Rules", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetRules sets the Rules field's value. +func (s *BucketLifecycleConfiguration) SetRules(v []*LifecycleRule) *BucketLifecycleConfiguration { + s.Rules = v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/BucketLoggingStatus +type BucketLoggingStatus struct { + _ struct{} `type:"structure"` + + LoggingEnabled *LoggingEnabled `type:"structure"` +} + +// String returns the string representation +func (s BucketLoggingStatus) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s BucketLoggingStatus) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *BucketLoggingStatus) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "BucketLoggingStatus"} + if s.LoggingEnabled != nil { + if err := s.LoggingEnabled.Validate(); err != nil { + invalidParams.AddNested("LoggingEnabled", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetLoggingEnabled sets the LoggingEnabled field's value. +func (s *BucketLoggingStatus) SetLoggingEnabled(v *LoggingEnabled) *BucketLoggingStatus { + s.LoggingEnabled = v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CORSConfiguration +type CORSConfiguration struct { + _ struct{} `type:"structure"` + + // CORSRules is a required field + CORSRules []*CORSRule `locationName:"CORSRule" type:"list" flattened:"true" required:"true"` +} + +// String returns the string representation +func (s CORSConfiguration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CORSConfiguration) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CORSConfiguration) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CORSConfiguration"} + if s.CORSRules == nil { + invalidParams.Add(request.NewErrParamRequired("CORSRules")) + } + if s.CORSRules != nil { + for i, v := range s.CORSRules { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "CORSRules", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetCORSRules sets the CORSRules field's value. +func (s *CORSConfiguration) SetCORSRules(v []*CORSRule) *CORSConfiguration { + s.CORSRules = v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CORSRule +type CORSRule struct { + _ struct{} `type:"structure"` + + // Specifies which headers are allowed in a pre-flight OPTIONS request. + AllowedHeaders []*string `locationName:"AllowedHeader" type:"list" flattened:"true"` + + // Identifies HTTP methods that the domain/origin specified in the rule is allowed + // to execute. + // + // AllowedMethods is a required field + AllowedMethods []*string `locationName:"AllowedMethod" type:"list" flattened:"true" required:"true"` + + // One or more origins you want customers to be able to access the bucket from. + // + // AllowedOrigins is a required field + AllowedOrigins []*string `locationName:"AllowedOrigin" type:"list" flattened:"true" required:"true"` + + // One or more headers in the response that you want customers to be able to + // access from their applications (for example, from a JavaScript XMLHttpRequest + // object). + ExposeHeaders []*string `locationName:"ExposeHeader" type:"list" flattened:"true"` + + // The time in seconds that your browser is to cache the preflight response + // for the specified resource. + MaxAgeSeconds *int64 `type:"integer"` +} + +// String returns the string representation +func (s CORSRule) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CORSRule) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CORSRule) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CORSRule"} + if s.AllowedMethods == nil { + invalidParams.Add(request.NewErrParamRequired("AllowedMethods")) + } + if s.AllowedOrigins == nil { + invalidParams.Add(request.NewErrParamRequired("AllowedOrigins")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAllowedHeaders sets the AllowedHeaders field's value. +func (s *CORSRule) SetAllowedHeaders(v []*string) *CORSRule { + s.AllowedHeaders = v + return s +} + +// SetAllowedMethods sets the AllowedMethods field's value. +func (s *CORSRule) SetAllowedMethods(v []*string) *CORSRule { + s.AllowedMethods = v + return s +} + +// SetAllowedOrigins sets the AllowedOrigins field's value. +func (s *CORSRule) SetAllowedOrigins(v []*string) *CORSRule { + s.AllowedOrigins = v + return s +} + +// SetExposeHeaders sets the ExposeHeaders field's value. +func (s *CORSRule) SetExposeHeaders(v []*string) *CORSRule { + s.ExposeHeaders = v + return s +} + +// SetMaxAgeSeconds sets the MaxAgeSeconds field's value. +func (s *CORSRule) SetMaxAgeSeconds(v int64) *CORSRule { + s.MaxAgeSeconds = &v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CloudFunctionConfiguration +type CloudFunctionConfiguration struct { + _ struct{} `type:"structure"` + + CloudFunction *string `type:"string"` + + // Bucket event for which to send notifications. + Event *string `deprecated:"true" type:"string" enum:"Event"` + + Events []*string `locationName:"Event" type:"list" flattened:"true"` + + // Optional unique identifier for configurations in a notification configuration. + // If you don't provide one, Amazon S3 will assign an ID. + Id *string `type:"string"` + + InvocationRole *string `type:"string"` +} + +// String returns the string representation +func (s CloudFunctionConfiguration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CloudFunctionConfiguration) GoString() string { + return s.String() +} + +// SetCloudFunction sets the CloudFunction field's value. +func (s *CloudFunctionConfiguration) SetCloudFunction(v string) *CloudFunctionConfiguration { + s.CloudFunction = &v + return s +} + +// SetEvent sets the Event field's value. +func (s *CloudFunctionConfiguration) SetEvent(v string) *CloudFunctionConfiguration { + s.Event = &v + return s +} + +// SetEvents sets the Events field's value. +func (s *CloudFunctionConfiguration) SetEvents(v []*string) *CloudFunctionConfiguration { + s.Events = v + return s +} + +// SetId sets the Id field's value. +func (s *CloudFunctionConfiguration) SetId(v string) *CloudFunctionConfiguration { + s.Id = &v + return s +} + +// SetInvocationRole sets the InvocationRole field's value. +func (s *CloudFunctionConfiguration) SetInvocationRole(v string) *CloudFunctionConfiguration { + s.InvocationRole = &v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CommonPrefix +type CommonPrefix struct { + _ struct{} `type:"structure"` + + Prefix *string `type:"string"` +} + +// String returns the string representation +func (s CommonPrefix) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CommonPrefix) GoString() string { + return s.String() +} + +// SetPrefix sets the Prefix field's value. +func (s *CommonPrefix) SetPrefix(v string) *CommonPrefix { + s.Prefix = &v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CompleteMultipartUploadRequest +type CompleteMultipartUploadInput struct { + _ struct{} `type:"structure" payload:"MultipartUpload"` + + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // Key is a required field + Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` + + MultipartUpload *CompletedMultipartUpload `locationName:"CompleteMultipartUpload" type:"structure" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` + + // Confirms that the requester knows that she or he will be charged for the + // request. Bucket owners need not specify this parameter in their requests. + // Documentation on downloading objects from requester pays buckets can be found + // at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html + RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` + + // UploadId is a required field + UploadId *string `location:"querystring" locationName:"uploadId" type:"string" required:"true"` +} + +// String returns the string representation +func (s CompleteMultipartUploadInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CompleteMultipartUploadInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CompleteMultipartUploadInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CompleteMultipartUploadInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Key == nil { + invalidParams.Add(request.NewErrParamRequired("Key")) + } + if s.Key != nil && len(*s.Key) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Key", 1)) + } + if s.UploadId == nil { + invalidParams.Add(request.NewErrParamRequired("UploadId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *CompleteMultipartUploadInput) SetBucket(v string) *CompleteMultipartUploadInput { + s.Bucket = &v + return s +} + +// SetKey sets the Key field's value. +func (s *CompleteMultipartUploadInput) SetKey(v string) *CompleteMultipartUploadInput { + s.Key = &v + return s +} + +// SetMultipartUpload sets the MultipartUpload field's value. +func (s *CompleteMultipartUploadInput) SetMultipartUpload(v *CompletedMultipartUpload) *CompleteMultipartUploadInput { + s.MultipartUpload = v + return s +} + +// SetRequestPayer sets the RequestPayer field's value. +func (s *CompleteMultipartUploadInput) SetRequestPayer(v string) *CompleteMultipartUploadInput { + s.RequestPayer = &v + return s +} + +// SetUploadId sets the UploadId field's value. +func (s *CompleteMultipartUploadInput) SetUploadId(v string) *CompleteMultipartUploadInput { + s.UploadId = &v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CompleteMultipartUploadOutput +type CompleteMultipartUploadOutput struct { + _ struct{} `type:"structure"` + + Bucket *string `type:"string"` + + // Entity tag of the object. + ETag *string `type:"string"` + + // If the object expiration is configured, this will contain the expiration + // date (expiry-date) and rule ID (rule-id). The value of rule-id is URL encoded. + Expiration *string `location:"header" locationName:"x-amz-expiration" type:"string"` + + Key *string `min:"1" type:"string"` + + Location *string `type:"string"` + + // If present, indicates that the requester was successfully charged for the + // request. + RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"` + + // If present, specifies the ID of the AWS Key Management Service (KMS) master + // encryption key that was used for the object. + SSEKMSKeyId *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string"` + + // The Server-side encryption algorithm used when storing this object in S3 + // (e.g., AES256, aws:kms). + ServerSideEncryption *string `location:"header" locationName:"x-amz-server-side-encryption" type:"string" enum:"ServerSideEncryption"` + + // Version of the object. + VersionId *string `location:"header" locationName:"x-amz-version-id" type:"string"` +} + +// String returns the string representation +func (s CompleteMultipartUploadOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CompleteMultipartUploadOutput) GoString() string { + return s.String() +} + +// SetBucket sets the Bucket field's value. +func (s *CompleteMultipartUploadOutput) SetBucket(v string) *CompleteMultipartUploadOutput { + s.Bucket = &v + return s +} + +// SetETag sets the ETag field's value. +func (s *CompleteMultipartUploadOutput) SetETag(v string) *CompleteMultipartUploadOutput { + s.ETag = &v + return s +} + +// SetExpiration sets the Expiration field's value. +func (s *CompleteMultipartUploadOutput) SetExpiration(v string) *CompleteMultipartUploadOutput { + s.Expiration = &v + return s +} + +// SetKey sets the Key field's value. +func (s *CompleteMultipartUploadOutput) SetKey(v string) *CompleteMultipartUploadOutput { + s.Key = &v + return s +} + +// SetLocation sets the Location field's value. +func (s *CompleteMultipartUploadOutput) SetLocation(v string) *CompleteMultipartUploadOutput { + s.Location = &v + return s +} + +// SetRequestCharged sets the RequestCharged field's value. +func (s *CompleteMultipartUploadOutput) SetRequestCharged(v string) *CompleteMultipartUploadOutput { + s.RequestCharged = &v + return s +} + +// SetSSEKMSKeyId sets the SSEKMSKeyId field's value. +func (s *CompleteMultipartUploadOutput) SetSSEKMSKeyId(v string) *CompleteMultipartUploadOutput { + s.SSEKMSKeyId = &v + return s +} + +// SetServerSideEncryption sets the ServerSideEncryption field's value. +func (s *CompleteMultipartUploadOutput) SetServerSideEncryption(v string) *CompleteMultipartUploadOutput { + s.ServerSideEncryption = &v + return s +} + +// SetVersionId sets the VersionId field's value. +func (s *CompleteMultipartUploadOutput) SetVersionId(v string) *CompleteMultipartUploadOutput { + s.VersionId = &v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CompletedMultipartUpload +type CompletedMultipartUpload struct { + _ struct{} `type:"structure"` + + Parts []*CompletedPart `locationName:"Part" type:"list" flattened:"true"` +} + +// String returns the string representation +func (s CompletedMultipartUpload) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CompletedMultipartUpload) GoString() string { + return s.String() +} + +// SetParts sets the Parts field's value. +func (s *CompletedMultipartUpload) SetParts(v []*CompletedPart) *CompletedMultipartUpload { + s.Parts = v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CompletedPart +type CompletedPart struct { + _ struct{} `type:"structure"` + + // Entity tag returned when the part was uploaded. + ETag *string `type:"string"` + + // Part number that identifies the part. This is a positive integer between + // 1 and 10,000. + PartNumber *int64 `type:"integer"` +} + +// String returns the string representation +func (s CompletedPart) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CompletedPart) GoString() string { + return s.String() +} + +// SetETag sets the ETag field's value. +func (s *CompletedPart) SetETag(v string) *CompletedPart { + s.ETag = &v + return s +} + +// SetPartNumber sets the PartNumber field's value. +func (s *CompletedPart) SetPartNumber(v int64) *CompletedPart { + s.PartNumber = &v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/Condition +type Condition struct { + _ struct{} `type:"structure"` + + // The HTTP error code when the redirect is applied. In the event of an error, + // if the error code equals this value, then the specified redirect is applied. + // Required when parent element Condition is specified and sibling KeyPrefixEquals + // is not specified. If both are specified, then both must be true for the redirect + // to be applied. + HttpErrorCodeReturnedEquals *string `type:"string"` + + // The object key name prefix when the redirect is applied. For example, to + // redirect requests for ExamplePage.html, the key prefix will be ExamplePage.html. + // To redirect request for all pages with the prefix docs/, the key prefix will + // be /docs, which identifies all objects in the docs/ folder. Required when + // the parent element Condition is specified and sibling HttpErrorCodeReturnedEquals + // is not specified. If both conditions are specified, both must be true for + // the redirect to be applied. + KeyPrefixEquals *string `type:"string"` +} + +// String returns the string representation +func (s Condition) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Condition) GoString() string { + return s.String() +} + +// SetHttpErrorCodeReturnedEquals sets the HttpErrorCodeReturnedEquals field's value. +func (s *Condition) SetHttpErrorCodeReturnedEquals(v string) *Condition { + s.HttpErrorCodeReturnedEquals = &v + return s +} + +// SetKeyPrefixEquals sets the KeyPrefixEquals field's value. +func (s *Condition) SetKeyPrefixEquals(v string) *Condition { + s.KeyPrefixEquals = &v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CopyObjectRequest +type CopyObjectInput struct { + _ struct{} `type:"structure"` + + // The canned ACL to apply to the object. + ACL *string `location:"header" locationName:"x-amz-acl" type:"string" enum:"ObjectCannedACL"` + + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // Specifies caching behavior along the request/reply chain. + CacheControl *string `location:"header" locationName:"Cache-Control" type:"string"` + + // Specifies presentational information for the object. + ContentDisposition *string `location:"header" locationName:"Content-Disposition" type:"string"` + + // Specifies what content encodings have been applied to the object and thus + // what decoding mechanisms must be applied to obtain the media-type referenced + // by the Content-Type header field. + ContentEncoding *string `location:"header" locationName:"Content-Encoding" type:"string"` + + // The language the content is in. + ContentLanguage *string `location:"header" locationName:"Content-Language" type:"string"` + + // A standard MIME type describing the format of the object data. + ContentType *string `location:"header" locationName:"Content-Type" type:"string"` + + // The name of the source bucket and key name of the source object, separated + // by a slash (/). Must be URL-encoded. + // + // CopySource is a required field + CopySource *string `location:"header" locationName:"x-amz-copy-source" type:"string" required:"true"` + + // Copies the object if its entity tag (ETag) matches the specified tag. + CopySourceIfMatch *string `location:"header" locationName:"x-amz-copy-source-if-match" type:"string"` + + // Copies the object if it has been modified since the specified time. + CopySourceIfModifiedSince *time.Time `location:"header" locationName:"x-amz-copy-source-if-modified-since" type:"timestamp" timestampFormat:"rfc822"` + + // Copies the object if its entity tag (ETag) is different than the specified + // ETag. + CopySourceIfNoneMatch *string `location:"header" locationName:"x-amz-copy-source-if-none-match" type:"string"` + + // Copies the object if it hasn't been modified since the specified time. + CopySourceIfUnmodifiedSince *time.Time `location:"header" locationName:"x-amz-copy-source-if-unmodified-since" type:"timestamp" timestampFormat:"rfc822"` + + // Specifies the algorithm to use when decrypting the source object (e.g., AES256). + CopySourceSSECustomerAlgorithm *string `location:"header" locationName:"x-amz-copy-source-server-side-encryption-customer-algorithm" type:"string"` + + // Specifies the customer-provided encryption key for Amazon S3 to use to decrypt + // the source object. The encryption key provided in this header must be one + // that was used when the source object was created. + CopySourceSSECustomerKey *string `location:"header" locationName:"x-amz-copy-source-server-side-encryption-customer-key" type:"string"` + + // Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. + // Amazon S3 uses this header for a message integrity check to ensure the encryption + // key was transmitted without error. + CopySourceSSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-copy-source-server-side-encryption-customer-key-MD5" type:"string"` + + // The date and time at which the object is no longer cacheable. + Expires *time.Time `location:"header" locationName:"Expires" type:"timestamp" timestampFormat:"rfc822"` + + // Gives the grantee READ, READ_ACP, and WRITE_ACP permissions on the object. + GrantFullControl *string `location:"header" locationName:"x-amz-grant-full-control" type:"string"` + + // Allows grantee to read the object data and its metadata. + GrantRead *string `location:"header" locationName:"x-amz-grant-read" type:"string"` + + // Allows grantee to read the object ACL. + GrantReadACP *string `location:"header" locationName:"x-amz-grant-read-acp" type:"string"` + + // Allows grantee to write the ACL for the applicable object. + GrantWriteACP *string `location:"header" locationName:"x-amz-grant-write-acp" type:"string"` + + // Key is a required field + Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` + + // A map of metadata to store with the object in S3. + Metadata map[string]*string `location:"headers" locationName:"x-amz-meta-" type:"map"` + + // Specifies whether the metadata is copied from the source object or replaced + // with metadata provided in the request. + MetadataDirective *string `location:"header" locationName:"x-amz-metadata-directive" type:"string" enum:"MetadataDirective"` + + // Confirms that the requester knows that she or he will be charged for the + // request. Bucket owners need not specify this parameter in their requests. + // Documentation on downloading objects from requester pays buckets can be found + // at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html + RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` + + // Specifies the algorithm to use to when encrypting the object (e.g., AES256). + SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"` + + // Specifies the customer-provided encryption key for Amazon S3 to use in encrypting + // data. This value is used to store the object and then it is discarded; Amazon + // does not store the encryption key. The key must be appropriate for use with + // the algorithm specified in the x-amz-server-side​-encryption​-customer-algorithm + // header. + SSECustomerKey *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key" type:"string"` + + // Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. + // Amazon S3 uses this header for a message integrity check to ensure the encryption + // key was transmitted without error. + SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"` + + // Specifies the AWS KMS key ID to use for object encryption. All GET and PUT + // requests for an object protected by AWS KMS will fail if not made via SSL + // or using SigV4. Documentation on configuring any of the officially supported + // AWS SDKs and CLI can be found at http://docs.aws.amazon.com/AmazonS3/latest/dev/UsingAWSSDK.html#specify-signature-version + SSEKMSKeyId *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string"` + + // The Server-side encryption algorithm used when storing this object in S3 + // (e.g., AES256, aws:kms). + ServerSideEncryption *string `location:"header" locationName:"x-amz-server-side-encryption" type:"string" enum:"ServerSideEncryption"` + + // The type of storage to use for the object. Defaults to 'STANDARD'. + StorageClass *string `location:"header" locationName:"x-amz-storage-class" type:"string" enum:"StorageClass"` + + // The tag-set for the object destination object this value must be used in + // conjunction with the TaggingDirective. The tag-set must be encoded as URL + // Query parameters + Tagging *string `location:"header" locationName:"x-amz-tagging" type:"string"` + + // Specifies whether the object tag-set are copied from the source object or + // replaced with tag-set provided in the request. + TaggingDirective *string `location:"header" locationName:"x-amz-tagging-directive" type:"string" enum:"TaggingDirective"` + + // If the bucket is configured as a website, redirects requests for this object + // to another object in the same bucket or to an external URL. Amazon S3 stores + // the value of this header in the object metadata. + WebsiteRedirectLocation *string `location:"header" locationName:"x-amz-website-redirect-location" type:"string"` +} + +// String returns the string representation +func (s CopyObjectInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CopyObjectInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CopyObjectInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CopyObjectInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.CopySource == nil { + invalidParams.Add(request.NewErrParamRequired("CopySource")) + } + if s.Key == nil { + invalidParams.Add(request.NewErrParamRequired("Key")) + } + if s.Key != nil && len(*s.Key) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Key", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetACL sets the ACL field's value. +func (s *CopyObjectInput) SetACL(v string) *CopyObjectInput { + s.ACL = &v + return s +} + +// SetBucket sets the Bucket field's value. +func (s *CopyObjectInput) SetBucket(v string) *CopyObjectInput { + s.Bucket = &v + return s +} + +// SetCacheControl sets the CacheControl field's value. +func (s *CopyObjectInput) SetCacheControl(v string) *CopyObjectInput { + s.CacheControl = &v + return s +} + +// SetContentDisposition sets the ContentDisposition field's value. +func (s *CopyObjectInput) SetContentDisposition(v string) *CopyObjectInput { + s.ContentDisposition = &v + return s +} + +// SetContentEncoding sets the ContentEncoding field's value. +func (s *CopyObjectInput) SetContentEncoding(v string) *CopyObjectInput { + s.ContentEncoding = &v + return s +} + +// SetContentLanguage sets the ContentLanguage field's value. +func (s *CopyObjectInput) SetContentLanguage(v string) *CopyObjectInput { + s.ContentLanguage = &v + return s +} + +// SetContentType sets the ContentType field's value. +func (s *CopyObjectInput) SetContentType(v string) *CopyObjectInput { + s.ContentType = &v + return s +} + +// SetCopySource sets the CopySource field's value. +func (s *CopyObjectInput) SetCopySource(v string) *CopyObjectInput { + s.CopySource = &v + return s +} + +// SetCopySourceIfMatch sets the CopySourceIfMatch field's value. +func (s *CopyObjectInput) SetCopySourceIfMatch(v string) *CopyObjectInput { + s.CopySourceIfMatch = &v + return s +} + +// SetCopySourceIfModifiedSince sets the CopySourceIfModifiedSince field's value. +func (s *CopyObjectInput) SetCopySourceIfModifiedSince(v time.Time) *CopyObjectInput { + s.CopySourceIfModifiedSince = &v + return s +} + +// SetCopySourceIfNoneMatch sets the CopySourceIfNoneMatch field's value. +func (s *CopyObjectInput) SetCopySourceIfNoneMatch(v string) *CopyObjectInput { + s.CopySourceIfNoneMatch = &v + return s +} + +// SetCopySourceIfUnmodifiedSince sets the CopySourceIfUnmodifiedSince field's value. +func (s *CopyObjectInput) SetCopySourceIfUnmodifiedSince(v time.Time) *CopyObjectInput { + s.CopySourceIfUnmodifiedSince = &v + return s +} + +// SetCopySourceSSECustomerAlgorithm sets the CopySourceSSECustomerAlgorithm field's value. +func (s *CopyObjectInput) SetCopySourceSSECustomerAlgorithm(v string) *CopyObjectInput { + s.CopySourceSSECustomerAlgorithm = &v + return s +} + +// SetCopySourceSSECustomerKey sets the CopySourceSSECustomerKey field's value. +func (s *CopyObjectInput) SetCopySourceSSECustomerKey(v string) *CopyObjectInput { + s.CopySourceSSECustomerKey = &v + return s +} + +// SetCopySourceSSECustomerKeyMD5 sets the CopySourceSSECustomerKeyMD5 field's value. +func (s *CopyObjectInput) SetCopySourceSSECustomerKeyMD5(v string) *CopyObjectInput { + s.CopySourceSSECustomerKeyMD5 = &v + return s +} + +// SetExpires sets the Expires field's value. +func (s *CopyObjectInput) SetExpires(v time.Time) *CopyObjectInput { + s.Expires = &v + return s +} + +// SetGrantFullControl sets the GrantFullControl field's value. +func (s *CopyObjectInput) SetGrantFullControl(v string) *CopyObjectInput { + s.GrantFullControl = &v + return s +} + +// SetGrantRead sets the GrantRead field's value. +func (s *CopyObjectInput) SetGrantRead(v string) *CopyObjectInput { + s.GrantRead = &v + return s +} + +// SetGrantReadACP sets the GrantReadACP field's value. +func (s *CopyObjectInput) SetGrantReadACP(v string) *CopyObjectInput { + s.GrantReadACP = &v + return s +} + +// SetGrantWriteACP sets the GrantWriteACP field's value. +func (s *CopyObjectInput) SetGrantWriteACP(v string) *CopyObjectInput { + s.GrantWriteACP = &v + return s +} + +// SetKey sets the Key field's value. +func (s *CopyObjectInput) SetKey(v string) *CopyObjectInput { + s.Key = &v + return s +} + +// SetMetadata sets the Metadata field's value. +func (s *CopyObjectInput) SetMetadata(v map[string]*string) *CopyObjectInput { + s.Metadata = v + return s +} + +// SetMetadataDirective sets the MetadataDirective field's value. +func (s *CopyObjectInput) SetMetadataDirective(v string) *CopyObjectInput { + s.MetadataDirective = &v + return s +} + +// SetRequestPayer sets the RequestPayer field's value. +func (s *CopyObjectInput) SetRequestPayer(v string) *CopyObjectInput { + s.RequestPayer = &v + return s +} + +// SetSSECustomerAlgorithm sets the SSECustomerAlgorithm field's value. +func (s *CopyObjectInput) SetSSECustomerAlgorithm(v string) *CopyObjectInput { + s.SSECustomerAlgorithm = &v + return s +} + +// SetSSECustomerKey sets the SSECustomerKey field's value. +func (s *CopyObjectInput) SetSSECustomerKey(v string) *CopyObjectInput { + s.SSECustomerKey = &v + return s +} + +// SetSSECustomerKeyMD5 sets the SSECustomerKeyMD5 field's value. +func (s *CopyObjectInput) SetSSECustomerKeyMD5(v string) *CopyObjectInput { + s.SSECustomerKeyMD5 = &v + return s +} + +// SetSSEKMSKeyId sets the SSEKMSKeyId field's value. +func (s *CopyObjectInput) SetSSEKMSKeyId(v string) *CopyObjectInput { + s.SSEKMSKeyId = &v + return s +} + +// SetServerSideEncryption sets the ServerSideEncryption field's value. +func (s *CopyObjectInput) SetServerSideEncryption(v string) *CopyObjectInput { + s.ServerSideEncryption = &v + return s +} + +// SetStorageClass sets the StorageClass field's value. +func (s *CopyObjectInput) SetStorageClass(v string) *CopyObjectInput { + s.StorageClass = &v + return s +} + +// SetTagging sets the Tagging field's value. +func (s *CopyObjectInput) SetTagging(v string) *CopyObjectInput { + s.Tagging = &v + return s +} + +// SetTaggingDirective sets the TaggingDirective field's value. +func (s *CopyObjectInput) SetTaggingDirective(v string) *CopyObjectInput { + s.TaggingDirective = &v + return s +} + +// SetWebsiteRedirectLocation sets the WebsiteRedirectLocation field's value. +func (s *CopyObjectInput) SetWebsiteRedirectLocation(v string) *CopyObjectInput { + s.WebsiteRedirectLocation = &v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CopyObjectOutput +type CopyObjectOutput struct { + _ struct{} `type:"structure" payload:"CopyObjectResult"` + + CopyObjectResult *CopyObjectResult `type:"structure"` + + CopySourceVersionId *string `location:"header" locationName:"x-amz-copy-source-version-id" type:"string"` + + // If the object expiration is configured, the response includes this header. + Expiration *string `location:"header" locationName:"x-amz-expiration" type:"string"` + + // If present, indicates that the requester was successfully charged for the + // request. + RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"` + + // If server-side encryption with a customer-provided encryption key was requested, + // the response will include this header confirming the encryption algorithm + // used. + SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"` + + // If server-side encryption with a customer-provided encryption key was requested, + // the response will include this header to provide round trip message integrity + // verification of the customer-provided encryption key. + SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"` + + // If present, specifies the ID of the AWS Key Management Service (KMS) master + // encryption key that was used for the object. + SSEKMSKeyId *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string"` + + // The Server-side encryption algorithm used when storing this object in S3 + // (e.g., AES256, aws:kms). + ServerSideEncryption *string `location:"header" locationName:"x-amz-server-side-encryption" type:"string" enum:"ServerSideEncryption"` + + // Version ID of the newly created copy. + VersionId *string `location:"header" locationName:"x-amz-version-id" type:"string"` +} + +// String returns the string representation +func (s CopyObjectOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CopyObjectOutput) GoString() string { + return s.String() +} + +// SetCopyObjectResult sets the CopyObjectResult field's value. +func (s *CopyObjectOutput) SetCopyObjectResult(v *CopyObjectResult) *CopyObjectOutput { + s.CopyObjectResult = v + return s +} + +// SetCopySourceVersionId sets the CopySourceVersionId field's value. +func (s *CopyObjectOutput) SetCopySourceVersionId(v string) *CopyObjectOutput { + s.CopySourceVersionId = &v + return s +} + +// SetExpiration sets the Expiration field's value. +func (s *CopyObjectOutput) SetExpiration(v string) *CopyObjectOutput { + s.Expiration = &v + return s +} + +// SetRequestCharged sets the RequestCharged field's value. +func (s *CopyObjectOutput) SetRequestCharged(v string) *CopyObjectOutput { + s.RequestCharged = &v + return s +} + +// SetSSECustomerAlgorithm sets the SSECustomerAlgorithm field's value. +func (s *CopyObjectOutput) SetSSECustomerAlgorithm(v string) *CopyObjectOutput { + s.SSECustomerAlgorithm = &v + return s +} + +// SetSSECustomerKeyMD5 sets the SSECustomerKeyMD5 field's value. +func (s *CopyObjectOutput) SetSSECustomerKeyMD5(v string) *CopyObjectOutput { + s.SSECustomerKeyMD5 = &v + return s +} + +// SetSSEKMSKeyId sets the SSEKMSKeyId field's value. +func (s *CopyObjectOutput) SetSSEKMSKeyId(v string) *CopyObjectOutput { + s.SSEKMSKeyId = &v + return s +} + +// SetServerSideEncryption sets the ServerSideEncryption field's value. +func (s *CopyObjectOutput) SetServerSideEncryption(v string) *CopyObjectOutput { + s.ServerSideEncryption = &v + return s +} + +// SetVersionId sets the VersionId field's value. +func (s *CopyObjectOutput) SetVersionId(v string) *CopyObjectOutput { + s.VersionId = &v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CopyObjectResult +type CopyObjectResult struct { + _ struct{} `type:"structure"` + + ETag *string `type:"string"` + + LastModified *time.Time `type:"timestamp" timestampFormat:"iso8601"` +} + +// String returns the string representation +func (s CopyObjectResult) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CopyObjectResult) GoString() string { + return s.String() +} + +// SetETag sets the ETag field's value. +func (s *CopyObjectResult) SetETag(v string) *CopyObjectResult { + s.ETag = &v + return s +} + +// SetLastModified sets the LastModified field's value. +func (s *CopyObjectResult) SetLastModified(v time.Time) *CopyObjectResult { + s.LastModified = &v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CopyPartResult +type CopyPartResult struct { + _ struct{} `type:"structure"` + + // Entity tag of the object. + ETag *string `type:"string"` + + // Date and time at which the object was uploaded. + LastModified *time.Time `type:"timestamp" timestampFormat:"iso8601"` +} + +// String returns the string representation +func (s CopyPartResult) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CopyPartResult) GoString() string { + return s.String() +} + +// SetETag sets the ETag field's value. +func (s *CopyPartResult) SetETag(v string) *CopyPartResult { + s.ETag = &v + return s +} + +// SetLastModified sets the LastModified field's value. +func (s *CopyPartResult) SetLastModified(v time.Time) *CopyPartResult { + s.LastModified = &v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CreateBucketConfiguration +type CreateBucketConfiguration struct { + _ struct{} `type:"structure"` + + // Specifies the region where the bucket will be created. If you don't specify + // a region, the bucket will be created in US Standard. + LocationConstraint *string `type:"string" enum:"BucketLocationConstraint"` +} + +// String returns the string representation +func (s CreateBucketConfiguration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateBucketConfiguration) GoString() string { + return s.String() +} + +// SetLocationConstraint sets the LocationConstraint field's value. +func (s *CreateBucketConfiguration) SetLocationConstraint(v string) *CreateBucketConfiguration { + s.LocationConstraint = &v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CreateBucketRequest +type CreateBucketInput struct { + _ struct{} `type:"structure" payload:"CreateBucketConfiguration"` + + // The canned ACL to apply to the bucket. + ACL *string `location:"header" locationName:"x-amz-acl" type:"string" enum:"BucketCannedACL"` + + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + CreateBucketConfiguration *CreateBucketConfiguration `locationName:"CreateBucketConfiguration" type:"structure" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` + + // Allows grantee the read, write, read ACP, and write ACP permissions on the + // bucket. + GrantFullControl *string `location:"header" locationName:"x-amz-grant-full-control" type:"string"` + + // Allows grantee to list the objects in the bucket. + GrantRead *string `location:"header" locationName:"x-amz-grant-read" type:"string"` + + // Allows grantee to read the bucket ACL. + GrantReadACP *string `location:"header" locationName:"x-amz-grant-read-acp" type:"string"` + + // Allows grantee to create, overwrite, and delete any object in the bucket. + GrantWrite *string `location:"header" locationName:"x-amz-grant-write" type:"string"` + + // Allows grantee to write the ACL for the applicable bucket. + GrantWriteACP *string `location:"header" locationName:"x-amz-grant-write-acp" type:"string"` +} + +// String returns the string representation +func (s CreateBucketInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateBucketInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateBucketInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateBucketInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetACL sets the ACL field's value. +func (s *CreateBucketInput) SetACL(v string) *CreateBucketInput { + s.ACL = &v + return s +} + +// SetBucket sets the Bucket field's value. +func (s *CreateBucketInput) SetBucket(v string) *CreateBucketInput { + s.Bucket = &v + return s +} + +// SetCreateBucketConfiguration sets the CreateBucketConfiguration field's value. +func (s *CreateBucketInput) SetCreateBucketConfiguration(v *CreateBucketConfiguration) *CreateBucketInput { + s.CreateBucketConfiguration = v + return s +} + +// SetGrantFullControl sets the GrantFullControl field's value. +func (s *CreateBucketInput) SetGrantFullControl(v string) *CreateBucketInput { + s.GrantFullControl = &v + return s +} + +// SetGrantRead sets the GrantRead field's value. +func (s *CreateBucketInput) SetGrantRead(v string) *CreateBucketInput { + s.GrantRead = &v + return s +} + +// SetGrantReadACP sets the GrantReadACP field's value. +func (s *CreateBucketInput) SetGrantReadACP(v string) *CreateBucketInput { + s.GrantReadACP = &v + return s +} + +// SetGrantWrite sets the GrantWrite field's value. +func (s *CreateBucketInput) SetGrantWrite(v string) *CreateBucketInput { + s.GrantWrite = &v + return s +} + +// SetGrantWriteACP sets the GrantWriteACP field's value. +func (s *CreateBucketInput) SetGrantWriteACP(v string) *CreateBucketInput { + s.GrantWriteACP = &v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CreateBucketOutput +type CreateBucketOutput struct { + _ struct{} `type:"structure"` + + Location *string `location:"header" locationName:"Location" type:"string"` +} + +// String returns the string representation +func (s CreateBucketOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateBucketOutput) GoString() string { + return s.String() +} + +// SetLocation sets the Location field's value. +func (s *CreateBucketOutput) SetLocation(v string) *CreateBucketOutput { + s.Location = &v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CreateMultipartUploadRequest +type CreateMultipartUploadInput struct { + _ struct{} `type:"structure"` + + // The canned ACL to apply to the object. + ACL *string `location:"header" locationName:"x-amz-acl" type:"string" enum:"ObjectCannedACL"` + + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // Specifies caching behavior along the request/reply chain. + CacheControl *string `location:"header" locationName:"Cache-Control" type:"string"` + + // Specifies presentational information for the object. + ContentDisposition *string `location:"header" locationName:"Content-Disposition" type:"string"` + + // Specifies what content encodings have been applied to the object and thus + // what decoding mechanisms must be applied to obtain the media-type referenced + // by the Content-Type header field. + ContentEncoding *string `location:"header" locationName:"Content-Encoding" type:"string"` + + // The language the content is in. + ContentLanguage *string `location:"header" locationName:"Content-Language" type:"string"` + + // A standard MIME type describing the format of the object data. + ContentType *string `location:"header" locationName:"Content-Type" type:"string"` + + // The date and time at which the object is no longer cacheable. + Expires *time.Time `location:"header" locationName:"Expires" type:"timestamp" timestampFormat:"rfc822"` + + // Gives the grantee READ, READ_ACP, and WRITE_ACP permissions on the object. + GrantFullControl *string `location:"header" locationName:"x-amz-grant-full-control" type:"string"` + + // Allows grantee to read the object data and its metadata. + GrantRead *string `location:"header" locationName:"x-amz-grant-read" type:"string"` + + // Allows grantee to read the object ACL. + GrantReadACP *string `location:"header" locationName:"x-amz-grant-read-acp" type:"string"` + + // Allows grantee to write the ACL for the applicable object. + GrantWriteACP *string `location:"header" locationName:"x-amz-grant-write-acp" type:"string"` + + // Key is a required field + Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` + + // A map of metadata to store with the object in S3. + Metadata map[string]*string `location:"headers" locationName:"x-amz-meta-" type:"map"` + + // Confirms that the requester knows that she or he will be charged for the + // request. Bucket owners need not specify this parameter in their requests. + // Documentation on downloading objects from requester pays buckets can be found + // at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html + RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` + + // Specifies the algorithm to use to when encrypting the object (e.g., AES256). + SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"` + + // Specifies the customer-provided encryption key for Amazon S3 to use in encrypting + // data. This value is used to store the object and then it is discarded; Amazon + // does not store the encryption key. The key must be appropriate for use with + // the algorithm specified in the x-amz-server-side​-encryption​-customer-algorithm + // header. + SSECustomerKey *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key" type:"string"` + + // Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. + // Amazon S3 uses this header for a message integrity check to ensure the encryption + // key was transmitted without error. + SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"` + + // Specifies the AWS KMS key ID to use for object encryption. All GET and PUT + // requests for an object protected by AWS KMS will fail if not made via SSL + // or using SigV4. Documentation on configuring any of the officially supported + // AWS SDKs and CLI can be found at http://docs.aws.amazon.com/AmazonS3/latest/dev/UsingAWSSDK.html#specify-signature-version + SSEKMSKeyId *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string"` + + // The Server-side encryption algorithm used when storing this object in S3 + // (e.g., AES256, aws:kms). + ServerSideEncryption *string `location:"header" locationName:"x-amz-server-side-encryption" type:"string" enum:"ServerSideEncryption"` + + // The type of storage to use for the object. Defaults to 'STANDARD'. + StorageClass *string `location:"header" locationName:"x-amz-storage-class" type:"string" enum:"StorageClass"` + + // If the bucket is configured as a website, redirects requests for this object + // to another object in the same bucket or to an external URL. Amazon S3 stores + // the value of this header in the object metadata. + WebsiteRedirectLocation *string `location:"header" locationName:"x-amz-website-redirect-location" type:"string"` +} + +// String returns the string representation +func (s CreateMultipartUploadInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateMultipartUploadInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateMultipartUploadInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateMultipartUploadInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Key == nil { + invalidParams.Add(request.NewErrParamRequired("Key")) + } + if s.Key != nil && len(*s.Key) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Key", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetACL sets the ACL field's value. +func (s *CreateMultipartUploadInput) SetACL(v string) *CreateMultipartUploadInput { + s.ACL = &v + return s +} + +// SetBucket sets the Bucket field's value. +func (s *CreateMultipartUploadInput) SetBucket(v string) *CreateMultipartUploadInput { + s.Bucket = &v + return s +} + +// SetCacheControl sets the CacheControl field's value. +func (s *CreateMultipartUploadInput) SetCacheControl(v string) *CreateMultipartUploadInput { + s.CacheControl = &v + return s +} + +// SetContentDisposition sets the ContentDisposition field's value. +func (s *CreateMultipartUploadInput) SetContentDisposition(v string) *CreateMultipartUploadInput { + s.ContentDisposition = &v + return s +} + +// SetContentEncoding sets the ContentEncoding field's value. +func (s *CreateMultipartUploadInput) SetContentEncoding(v string) *CreateMultipartUploadInput { + s.ContentEncoding = &v + return s +} + +// SetContentLanguage sets the ContentLanguage field's value. +func (s *CreateMultipartUploadInput) SetContentLanguage(v string) *CreateMultipartUploadInput { + s.ContentLanguage = &v + return s +} + +// SetContentType sets the ContentType field's value. +func (s *CreateMultipartUploadInput) SetContentType(v string) *CreateMultipartUploadInput { + s.ContentType = &v + return s +} + +// SetExpires sets the Expires field's value. +func (s *CreateMultipartUploadInput) SetExpires(v time.Time) *CreateMultipartUploadInput { + s.Expires = &v + return s +} + +// SetGrantFullControl sets the GrantFullControl field's value. +func (s *CreateMultipartUploadInput) SetGrantFullControl(v string) *CreateMultipartUploadInput { + s.GrantFullControl = &v + return s +} + +// SetGrantRead sets the GrantRead field's value. +func (s *CreateMultipartUploadInput) SetGrantRead(v string) *CreateMultipartUploadInput { + s.GrantRead = &v + return s +} + +// SetGrantReadACP sets the GrantReadACP field's value. +func (s *CreateMultipartUploadInput) SetGrantReadACP(v string) *CreateMultipartUploadInput { + s.GrantReadACP = &v + return s +} + +// SetGrantWriteACP sets the GrantWriteACP field's value. +func (s *CreateMultipartUploadInput) SetGrantWriteACP(v string) *CreateMultipartUploadInput { + s.GrantWriteACP = &v + return s +} + +// SetKey sets the Key field's value. +func (s *CreateMultipartUploadInput) SetKey(v string) *CreateMultipartUploadInput { + s.Key = &v + return s +} + +// SetMetadata sets the Metadata field's value. +func (s *CreateMultipartUploadInput) SetMetadata(v map[string]*string) *CreateMultipartUploadInput { + s.Metadata = v + return s +} + +// SetRequestPayer sets the RequestPayer field's value. +func (s *CreateMultipartUploadInput) SetRequestPayer(v string) *CreateMultipartUploadInput { + s.RequestPayer = &v + return s +} + +// SetSSECustomerAlgorithm sets the SSECustomerAlgorithm field's value. +func (s *CreateMultipartUploadInput) SetSSECustomerAlgorithm(v string) *CreateMultipartUploadInput { + s.SSECustomerAlgorithm = &v + return s +} + +// SetSSECustomerKey sets the SSECustomerKey field's value. +func (s *CreateMultipartUploadInput) SetSSECustomerKey(v string) *CreateMultipartUploadInput { + s.SSECustomerKey = &v + return s +} + +// SetSSECustomerKeyMD5 sets the SSECustomerKeyMD5 field's value. +func (s *CreateMultipartUploadInput) SetSSECustomerKeyMD5(v string) *CreateMultipartUploadInput { + s.SSECustomerKeyMD5 = &v + return s +} + +// SetSSEKMSKeyId sets the SSEKMSKeyId field's value. +func (s *CreateMultipartUploadInput) SetSSEKMSKeyId(v string) *CreateMultipartUploadInput { + s.SSEKMSKeyId = &v + return s +} + +// SetServerSideEncryption sets the ServerSideEncryption field's value. +func (s *CreateMultipartUploadInput) SetServerSideEncryption(v string) *CreateMultipartUploadInput { + s.ServerSideEncryption = &v + return s +} + +// SetStorageClass sets the StorageClass field's value. +func (s *CreateMultipartUploadInput) SetStorageClass(v string) *CreateMultipartUploadInput { + s.StorageClass = &v + return s +} + +// SetWebsiteRedirectLocation sets the WebsiteRedirectLocation field's value. +func (s *CreateMultipartUploadInput) SetWebsiteRedirectLocation(v string) *CreateMultipartUploadInput { + s.WebsiteRedirectLocation = &v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CreateMultipartUploadOutput +type CreateMultipartUploadOutput struct { + _ struct{} `type:"structure"` + + // Date when multipart upload will become eligible for abort operation by lifecycle. + AbortDate *time.Time `location:"header" locationName:"x-amz-abort-date" type:"timestamp" timestampFormat:"rfc822"` + + // Id of the lifecycle rule that makes a multipart upload eligible for abort + // operation. + AbortRuleId *string `location:"header" locationName:"x-amz-abort-rule-id" type:"string"` + + // Name of the bucket to which the multipart upload was initiated. + Bucket *string `locationName:"Bucket" type:"string"` + + // Object key for which the multipart upload was initiated. + Key *string `min:"1" type:"string"` + + // If present, indicates that the requester was successfully charged for the + // request. + RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"` + + // If server-side encryption with a customer-provided encryption key was requested, + // the response will include this header confirming the encryption algorithm + // used. + SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"` + + // If server-side encryption with a customer-provided encryption key was requested, + // the response will include this header to provide round trip message integrity + // verification of the customer-provided encryption key. + SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"` + + // If present, specifies the ID of the AWS Key Management Service (KMS) master + // encryption key that was used for the object. + SSEKMSKeyId *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string"` + + // The Server-side encryption algorithm used when storing this object in S3 + // (e.g., AES256, aws:kms). + ServerSideEncryption *string `location:"header" locationName:"x-amz-server-side-encryption" type:"string" enum:"ServerSideEncryption"` + + // ID for the initiated multipart upload. + UploadId *string `type:"string"` +} + +// String returns the string representation +func (s CreateMultipartUploadOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateMultipartUploadOutput) GoString() string { + return s.String() +} + +// SetAbortDate sets the AbortDate field's value. +func (s *CreateMultipartUploadOutput) SetAbortDate(v time.Time) *CreateMultipartUploadOutput { + s.AbortDate = &v + return s +} + +// SetAbortRuleId sets the AbortRuleId field's value. +func (s *CreateMultipartUploadOutput) SetAbortRuleId(v string) *CreateMultipartUploadOutput { + s.AbortRuleId = &v + return s +} + +// SetBucket sets the Bucket field's value. +func (s *CreateMultipartUploadOutput) SetBucket(v string) *CreateMultipartUploadOutput { + s.Bucket = &v + return s +} + +// SetKey sets the Key field's value. +func (s *CreateMultipartUploadOutput) SetKey(v string) *CreateMultipartUploadOutput { + s.Key = &v + return s +} + +// SetRequestCharged sets the RequestCharged field's value. +func (s *CreateMultipartUploadOutput) SetRequestCharged(v string) *CreateMultipartUploadOutput { + s.RequestCharged = &v + return s +} + +// SetSSECustomerAlgorithm sets the SSECustomerAlgorithm field's value. +func (s *CreateMultipartUploadOutput) SetSSECustomerAlgorithm(v string) *CreateMultipartUploadOutput { + s.SSECustomerAlgorithm = &v + return s +} + +// SetSSECustomerKeyMD5 sets the SSECustomerKeyMD5 field's value. +func (s *CreateMultipartUploadOutput) SetSSECustomerKeyMD5(v string) *CreateMultipartUploadOutput { + s.SSECustomerKeyMD5 = &v + return s +} + +// SetSSEKMSKeyId sets the SSEKMSKeyId field's value. +func (s *CreateMultipartUploadOutput) SetSSEKMSKeyId(v string) *CreateMultipartUploadOutput { + s.SSEKMSKeyId = &v + return s +} + +// SetServerSideEncryption sets the ServerSideEncryption field's value. +func (s *CreateMultipartUploadOutput) SetServerSideEncryption(v string) *CreateMultipartUploadOutput { + s.ServerSideEncryption = &v + return s +} + +// SetUploadId sets the UploadId field's value. +func (s *CreateMultipartUploadOutput) SetUploadId(v string) *CreateMultipartUploadOutput { + s.UploadId = &v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/Delete +type Delete struct { + _ struct{} `type:"structure"` + + // Objects is a required field + Objects []*ObjectIdentifier `locationName:"Object" type:"list" flattened:"true" required:"true"` + + // Element to enable quiet mode for the request. When you add this element, + // you must set its value to true. + Quiet *bool `type:"boolean"` +} + +// String returns the string representation +func (s Delete) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Delete) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *Delete) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "Delete"} + if s.Objects == nil { + invalidParams.Add(request.NewErrParamRequired("Objects")) + } + if s.Objects != nil { + for i, v := range s.Objects { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Objects", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetObjects sets the Objects field's value. +func (s *Delete) SetObjects(v []*ObjectIdentifier) *Delete { + s.Objects = v + return s +} + +// SetQuiet sets the Quiet field's value. +func (s *Delete) SetQuiet(v bool) *Delete { + s.Quiet = &v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketAnalyticsConfigurationRequest +type DeleteBucketAnalyticsConfigurationInput struct { + _ struct{} `type:"structure"` + + // The name of the bucket from which an analytics configuration is deleted. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // The identifier used to represent an analytics configuration. + // + // Id is a required field + Id *string `location:"querystring" locationName:"id" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteBucketAnalyticsConfigurationInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteBucketAnalyticsConfigurationInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteBucketAnalyticsConfigurationInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteBucketAnalyticsConfigurationInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Id == nil { + invalidParams.Add(request.NewErrParamRequired("Id")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *DeleteBucketAnalyticsConfigurationInput) SetBucket(v string) *DeleteBucketAnalyticsConfigurationInput { + s.Bucket = &v + return s +} + +// SetId sets the Id field's value. +func (s *DeleteBucketAnalyticsConfigurationInput) SetId(v string) *DeleteBucketAnalyticsConfigurationInput { + s.Id = &v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketAnalyticsConfigurationOutput +type DeleteBucketAnalyticsConfigurationOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteBucketAnalyticsConfigurationOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteBucketAnalyticsConfigurationOutput) GoString() string { + return s.String() +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketCorsRequest +type DeleteBucketCorsInput struct { + _ struct{} `type:"structure"` + + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteBucketCorsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteBucketCorsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteBucketCorsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteBucketCorsInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *DeleteBucketCorsInput) SetBucket(v string) *DeleteBucketCorsInput { + s.Bucket = &v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketCorsOutput +type DeleteBucketCorsOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteBucketCorsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteBucketCorsOutput) GoString() string { + return s.String() +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketRequest +type DeleteBucketInput struct { + _ struct{} `type:"structure"` + + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteBucketInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteBucketInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteBucketInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteBucketInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *DeleteBucketInput) SetBucket(v string) *DeleteBucketInput { + s.Bucket = &v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketInventoryConfigurationRequest +type DeleteBucketInventoryConfigurationInput struct { + _ struct{} `type:"structure"` + + // The name of the bucket containing the inventory configuration to delete. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // The ID used to identify the inventory configuration. + // + // Id is a required field + Id *string `location:"querystring" locationName:"id" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteBucketInventoryConfigurationInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteBucketInventoryConfigurationInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteBucketInventoryConfigurationInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteBucketInventoryConfigurationInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Id == nil { + invalidParams.Add(request.NewErrParamRequired("Id")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *DeleteBucketInventoryConfigurationInput) SetBucket(v string) *DeleteBucketInventoryConfigurationInput { + s.Bucket = &v + return s +} + +// SetId sets the Id field's value. +func (s *DeleteBucketInventoryConfigurationInput) SetId(v string) *DeleteBucketInventoryConfigurationInput { + s.Id = &v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketInventoryConfigurationOutput +type DeleteBucketInventoryConfigurationOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteBucketInventoryConfigurationOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteBucketInventoryConfigurationOutput) GoString() string { + return s.String() +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketLifecycleRequest +type DeleteBucketLifecycleInput struct { + _ struct{} `type:"structure"` + + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteBucketLifecycleInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteBucketLifecycleInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteBucketLifecycleInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteBucketLifecycleInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *DeleteBucketLifecycleInput) SetBucket(v string) *DeleteBucketLifecycleInput { + s.Bucket = &v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketLifecycleOutput +type DeleteBucketLifecycleOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteBucketLifecycleOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteBucketLifecycleOutput) GoString() string { + return s.String() +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketMetricsConfigurationRequest +type DeleteBucketMetricsConfigurationInput struct { + _ struct{} `type:"structure"` + + // The name of the bucket containing the metrics configuration to delete. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // The ID used to identify the metrics configuration. + // + // Id is a required field + Id *string `location:"querystring" locationName:"id" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteBucketMetricsConfigurationInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteBucketMetricsConfigurationInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteBucketMetricsConfigurationInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteBucketMetricsConfigurationInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Id == nil { + invalidParams.Add(request.NewErrParamRequired("Id")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *DeleteBucketMetricsConfigurationInput) SetBucket(v string) *DeleteBucketMetricsConfigurationInput { + s.Bucket = &v + return s +} + +// SetId sets the Id field's value. +func (s *DeleteBucketMetricsConfigurationInput) SetId(v string) *DeleteBucketMetricsConfigurationInput { + s.Id = &v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketMetricsConfigurationOutput +type DeleteBucketMetricsConfigurationOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteBucketMetricsConfigurationOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteBucketMetricsConfigurationOutput) GoString() string { + return s.String() +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketOutput +type DeleteBucketOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteBucketOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteBucketOutput) GoString() string { + return s.String() +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketPolicyRequest +type DeleteBucketPolicyInput struct { + _ struct{} `type:"structure"` + + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteBucketPolicyInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteBucketPolicyInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteBucketPolicyInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteBucketPolicyInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *DeleteBucketPolicyInput) SetBucket(v string) *DeleteBucketPolicyInput { + s.Bucket = &v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketPolicyOutput +type DeleteBucketPolicyOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteBucketPolicyOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteBucketPolicyOutput) GoString() string { + return s.String() +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketReplicationRequest +type DeleteBucketReplicationInput struct { + _ struct{} `type:"structure"` + + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteBucketReplicationInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteBucketReplicationInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteBucketReplicationInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteBucketReplicationInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *DeleteBucketReplicationInput) SetBucket(v string) *DeleteBucketReplicationInput { + s.Bucket = &v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketReplicationOutput +type DeleteBucketReplicationOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteBucketReplicationOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteBucketReplicationOutput) GoString() string { + return s.String() +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketTaggingRequest +type DeleteBucketTaggingInput struct { + _ struct{} `type:"structure"` + + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteBucketTaggingInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteBucketTaggingInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteBucketTaggingInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteBucketTaggingInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *DeleteBucketTaggingInput) SetBucket(v string) *DeleteBucketTaggingInput { + s.Bucket = &v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketTaggingOutput +type DeleteBucketTaggingOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteBucketTaggingOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteBucketTaggingOutput) GoString() string { + return s.String() +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketWebsiteRequest +type DeleteBucketWebsiteInput struct { + _ struct{} `type:"structure"` + + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteBucketWebsiteInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteBucketWebsiteInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteBucketWebsiteInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteBucketWebsiteInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *DeleteBucketWebsiteInput) SetBucket(v string) *DeleteBucketWebsiteInput { + s.Bucket = &v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketWebsiteOutput +type DeleteBucketWebsiteOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteBucketWebsiteOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteBucketWebsiteOutput) GoString() string { + return s.String() +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteMarkerEntry +type DeleteMarkerEntry struct { + _ struct{} `type:"structure"` + + // Specifies whether the object is (true) or is not (false) the latest version + // of an object. + IsLatest *bool `type:"boolean"` + + // The object key. + Key *string `min:"1" type:"string"` + + // Date and time the object was last modified. + LastModified *time.Time `type:"timestamp" timestampFormat:"iso8601"` + + Owner *Owner `type:"structure"` + + // Version ID of an object. + VersionId *string `type:"string"` +} + +// String returns the string representation +func (s DeleteMarkerEntry) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteMarkerEntry) GoString() string { + return s.String() +} + +// SetIsLatest sets the IsLatest field's value. +func (s *DeleteMarkerEntry) SetIsLatest(v bool) *DeleteMarkerEntry { + s.IsLatest = &v + return s +} + +// SetKey sets the Key field's value. +func (s *DeleteMarkerEntry) SetKey(v string) *DeleteMarkerEntry { + s.Key = &v + return s +} + +// SetLastModified sets the LastModified field's value. +func (s *DeleteMarkerEntry) SetLastModified(v time.Time) *DeleteMarkerEntry { + s.LastModified = &v + return s +} + +// SetOwner sets the Owner field's value. +func (s *DeleteMarkerEntry) SetOwner(v *Owner) *DeleteMarkerEntry { + s.Owner = v + return s +} + +// SetVersionId sets the VersionId field's value. +func (s *DeleteMarkerEntry) SetVersionId(v string) *DeleteMarkerEntry { + s.VersionId = &v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteObjectRequest +type DeleteObjectInput struct { + _ struct{} `type:"structure"` + + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // Key is a required field + Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` + + // The concatenation of the authentication device's serial number, a space, + // and the value that is displayed on your authentication device. + MFA *string `location:"header" locationName:"x-amz-mfa" type:"string"` + + // Confirms that the requester knows that she or he will be charged for the + // request. Bucket owners need not specify this parameter in their requests. + // Documentation on downloading objects from requester pays buckets can be found + // at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html + RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` + + // VersionId used to reference a specific version of the object. + VersionId *string `location:"querystring" locationName:"versionId" type:"string"` +} + +// String returns the string representation +func (s DeleteObjectInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteObjectInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteObjectInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteObjectInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Key == nil { + invalidParams.Add(request.NewErrParamRequired("Key")) + } + if s.Key != nil && len(*s.Key) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Key", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *DeleteObjectInput) SetBucket(v string) *DeleteObjectInput { + s.Bucket = &v + return s +} + +// SetKey sets the Key field's value. +func (s *DeleteObjectInput) SetKey(v string) *DeleteObjectInput { + s.Key = &v + return s +} + +// SetMFA sets the MFA field's value. +func (s *DeleteObjectInput) SetMFA(v string) *DeleteObjectInput { + s.MFA = &v + return s +} + +// SetRequestPayer sets the RequestPayer field's value. +func (s *DeleteObjectInput) SetRequestPayer(v string) *DeleteObjectInput { + s.RequestPayer = &v + return s +} + +// SetVersionId sets the VersionId field's value. +func (s *DeleteObjectInput) SetVersionId(v string) *DeleteObjectInput { + s.VersionId = &v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteObjectOutput +type DeleteObjectOutput struct { + _ struct{} `type:"structure"` + + // Specifies whether the versioned object that was permanently deleted was (true) + // or was not (false) a delete marker. + DeleteMarker *bool `location:"header" locationName:"x-amz-delete-marker" type:"boolean"` + + // If present, indicates that the requester was successfully charged for the + // request. + RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"` + + // Returns the version ID of the delete marker created as a result of the DELETE + // operation. + VersionId *string `location:"header" locationName:"x-amz-version-id" type:"string"` +} + +// String returns the string representation +func (s DeleteObjectOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteObjectOutput) GoString() string { + return s.String() +} + +// SetDeleteMarker sets the DeleteMarker field's value. +func (s *DeleteObjectOutput) SetDeleteMarker(v bool) *DeleteObjectOutput { + s.DeleteMarker = &v + return s +} + +// SetRequestCharged sets the RequestCharged field's value. +func (s *DeleteObjectOutput) SetRequestCharged(v string) *DeleteObjectOutput { + s.RequestCharged = &v + return s +} + +// SetVersionId sets the VersionId field's value. +func (s *DeleteObjectOutput) SetVersionId(v string) *DeleteObjectOutput { + s.VersionId = &v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteObjectTaggingRequest +type DeleteObjectTaggingInput struct { + _ struct{} `type:"structure"` + + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // Key is a required field + Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` + + // The versionId of the object that the tag-set will be removed from. + VersionId *string `location:"querystring" locationName:"versionId" type:"string"` +} + +// String returns the string representation +func (s DeleteObjectTaggingInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteObjectTaggingInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteObjectTaggingInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteObjectTaggingInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Key == nil { + invalidParams.Add(request.NewErrParamRequired("Key")) + } + if s.Key != nil && len(*s.Key) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Key", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *DeleteObjectTaggingInput) SetBucket(v string) *DeleteObjectTaggingInput { + s.Bucket = &v + return s +} + +// SetKey sets the Key field's value. +func (s *DeleteObjectTaggingInput) SetKey(v string) *DeleteObjectTaggingInput { + s.Key = &v + return s +} + +// SetVersionId sets the VersionId field's value. +func (s *DeleteObjectTaggingInput) SetVersionId(v string) *DeleteObjectTaggingInput { + s.VersionId = &v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteObjectTaggingOutput +type DeleteObjectTaggingOutput struct { + _ struct{} `type:"structure"` + + // The versionId of the object the tag-set was removed from. + VersionId *string `location:"header" locationName:"x-amz-version-id" type:"string"` +} + +// String returns the string representation +func (s DeleteObjectTaggingOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteObjectTaggingOutput) GoString() string { + return s.String() +} + +// SetVersionId sets the VersionId field's value. +func (s *DeleteObjectTaggingOutput) SetVersionId(v string) *DeleteObjectTaggingOutput { + s.VersionId = &v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteObjectsRequest +type DeleteObjectsInput struct { + _ struct{} `type:"structure" payload:"Delete"` + + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // Delete is a required field + Delete *Delete `locationName:"Delete" type:"structure" required:"true" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` + + // The concatenation of the authentication device's serial number, a space, + // and the value that is displayed on your authentication device. + MFA *string `location:"header" locationName:"x-amz-mfa" type:"string"` + + // Confirms that the requester knows that she or he will be charged for the + // request. Bucket owners need not specify this parameter in their requests. + // Documentation on downloading objects from requester pays buckets can be found + // at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html + RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` +} + +// String returns the string representation +func (s DeleteObjectsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteObjectsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteObjectsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteObjectsInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Delete == nil { + invalidParams.Add(request.NewErrParamRequired("Delete")) + } + if s.Delete != nil { + if err := s.Delete.Validate(); err != nil { + invalidParams.AddNested("Delete", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *DeleteObjectsInput) SetBucket(v string) *DeleteObjectsInput { + s.Bucket = &v + return s +} + +// SetDelete sets the Delete field's value. +func (s *DeleteObjectsInput) SetDelete(v *Delete) *DeleteObjectsInput { + s.Delete = v + return s +} + +// SetMFA sets the MFA field's value. +func (s *DeleteObjectsInput) SetMFA(v string) *DeleteObjectsInput { + s.MFA = &v + return s +} + +// SetRequestPayer sets the RequestPayer field's value. +func (s *DeleteObjectsInput) SetRequestPayer(v string) *DeleteObjectsInput { + s.RequestPayer = &v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteObjectsOutput +type DeleteObjectsOutput struct { + _ struct{} `type:"structure"` + + Deleted []*DeletedObject `type:"list" flattened:"true"` + + Errors []*Error `locationName:"Error" type:"list" flattened:"true"` + + // If present, indicates that the requester was successfully charged for the + // request. + RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"` +} + +// String returns the string representation +func (s DeleteObjectsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteObjectsOutput) GoString() string { + return s.String() +} + +// SetDeleted sets the Deleted field's value. +func (s *DeleteObjectsOutput) SetDeleted(v []*DeletedObject) *DeleteObjectsOutput { + s.Deleted = v + return s +} + +// SetErrors sets the Errors field's value. +func (s *DeleteObjectsOutput) SetErrors(v []*Error) *DeleteObjectsOutput { + s.Errors = v + return s +} + +// SetRequestCharged sets the RequestCharged field's value. +func (s *DeleteObjectsOutput) SetRequestCharged(v string) *DeleteObjectsOutput { + s.RequestCharged = &v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeletedObject +type DeletedObject struct { + _ struct{} `type:"structure"` + + DeleteMarker *bool `type:"boolean"` + + DeleteMarkerVersionId *string `type:"string"` + + Key *string `min:"1" type:"string"` + + VersionId *string `type:"string"` +} + +// String returns the string representation +func (s DeletedObject) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeletedObject) GoString() string { + return s.String() +} + +// SetDeleteMarker sets the DeleteMarker field's value. +func (s *DeletedObject) SetDeleteMarker(v bool) *DeletedObject { + s.DeleteMarker = &v + return s +} + +// SetDeleteMarkerVersionId sets the DeleteMarkerVersionId field's value. +func (s *DeletedObject) SetDeleteMarkerVersionId(v string) *DeletedObject { + s.DeleteMarkerVersionId = &v + return s +} + +// SetKey sets the Key field's value. +func (s *DeletedObject) SetKey(v string) *DeletedObject { + s.Key = &v + return s +} + +// SetVersionId sets the VersionId field's value. +func (s *DeletedObject) SetVersionId(v string) *DeletedObject { + s.VersionId = &v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/Destination +type Destination struct { + _ struct{} `type:"structure"` + + // Amazon resource name (ARN) of the bucket where you want Amazon S3 to store + // replicas of the object identified by the rule. + // + // Bucket is a required field + Bucket *string `type:"string" required:"true"` + + // The class of storage used to store the object. + StorageClass *string `type:"string" enum:"StorageClass"` +} + +// String returns the string representation +func (s Destination) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Destination) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *Destination) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "Destination"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *Destination) SetBucket(v string) *Destination { + s.Bucket = &v + return s +} + +// SetStorageClass sets the StorageClass field's value. +func (s *Destination) SetStorageClass(v string) *Destination { + s.StorageClass = &v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/Error +type Error struct { + _ struct{} `type:"structure"` + + Code *string `type:"string"` + + Key *string `min:"1" type:"string"` + + Message *string `type:"string"` + + VersionId *string `type:"string"` +} + +// String returns the string representation +func (s Error) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Error) GoString() string { + return s.String() +} + +// SetCode sets the Code field's value. +func (s *Error) SetCode(v string) *Error { + s.Code = &v + return s +} + +// SetKey sets the Key field's value. +func (s *Error) SetKey(v string) *Error { + s.Key = &v + return s +} + +// SetMessage sets the Message field's value. +func (s *Error) SetMessage(v string) *Error { + s.Message = &v + return s +} + +// SetVersionId sets the VersionId field's value. +func (s *Error) SetVersionId(v string) *Error { + s.VersionId = &v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ErrorDocument +type ErrorDocument struct { + _ struct{} `type:"structure"` + + // The object key name to use when a 4XX class error occurs. + // + // Key is a required field + Key *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s ErrorDocument) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ErrorDocument) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ErrorDocument) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ErrorDocument"} + if s.Key == nil { + invalidParams.Add(request.NewErrParamRequired("Key")) + } + if s.Key != nil && len(*s.Key) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Key", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetKey sets the Key field's value. +func (s *ErrorDocument) SetKey(v string) *ErrorDocument { + s.Key = &v + return s +} + +// Container for key value pair that defines the criteria for the filter rule. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/FilterRule +type FilterRule struct { + _ struct{} `type:"structure"` + + // Object key name prefix or suffix identifying one or more objects to which + // the filtering rule applies. Maximum prefix length can be up to 1,024 characters. + // Overlapping prefixes and suffixes are not supported. For more information, + // go to Configuring Event Notifications (http://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html) + Name *string `type:"string" enum:"FilterRuleName"` + + Value *string `type:"string"` +} + +// String returns the string representation +func (s FilterRule) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s FilterRule) GoString() string { + return s.String() +} + +// SetName sets the Name field's value. +func (s *FilterRule) SetName(v string) *FilterRule { + s.Name = &v + return s +} + +// SetValue sets the Value field's value. +func (s *FilterRule) SetValue(v string) *FilterRule { + s.Value = &v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketAccelerateConfigurationRequest +type GetBucketAccelerateConfigurationInput struct { + _ struct{} `type:"structure"` + + // Name of the bucket for which the accelerate configuration is retrieved. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetBucketAccelerateConfigurationInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetBucketAccelerateConfigurationInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetBucketAccelerateConfigurationInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetBucketAccelerateConfigurationInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *GetBucketAccelerateConfigurationInput) SetBucket(v string) *GetBucketAccelerateConfigurationInput { + s.Bucket = &v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketAccelerateConfigurationOutput +type GetBucketAccelerateConfigurationOutput struct { + _ struct{} `type:"structure"` + + // The accelerate configuration of the bucket. + Status *string `type:"string" enum:"BucketAccelerateStatus"` +} + +// String returns the string representation +func (s GetBucketAccelerateConfigurationOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetBucketAccelerateConfigurationOutput) GoString() string { + return s.String() +} + +// SetStatus sets the Status field's value. +func (s *GetBucketAccelerateConfigurationOutput) SetStatus(v string) *GetBucketAccelerateConfigurationOutput { + s.Status = &v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketAclRequest +type GetBucketAclInput struct { + _ struct{} `type:"structure"` + + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetBucketAclInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetBucketAclInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetBucketAclInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetBucketAclInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *GetBucketAclInput) SetBucket(v string) *GetBucketAclInput { + s.Bucket = &v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketAclOutput +type GetBucketAclOutput struct { + _ struct{} `type:"structure"` + + // A list of grants. + Grants []*Grant `locationName:"AccessControlList" locationNameList:"Grant" type:"list"` + + Owner *Owner `type:"structure"` +} + +// String returns the string representation +func (s GetBucketAclOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetBucketAclOutput) GoString() string { + return s.String() +} + +// SetGrants sets the Grants field's value. +func (s *GetBucketAclOutput) SetGrants(v []*Grant) *GetBucketAclOutput { + s.Grants = v + return s +} + +// SetOwner sets the Owner field's value. +func (s *GetBucketAclOutput) SetOwner(v *Owner) *GetBucketAclOutput { + s.Owner = v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketAnalyticsConfigurationRequest +type GetBucketAnalyticsConfigurationInput struct { + _ struct{} `type:"structure"` + + // The name of the bucket from which an analytics configuration is retrieved. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // The identifier used to represent an analytics configuration. + // + // Id is a required field + Id *string `location:"querystring" locationName:"id" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetBucketAnalyticsConfigurationInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetBucketAnalyticsConfigurationInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetBucketAnalyticsConfigurationInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetBucketAnalyticsConfigurationInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Id == nil { + invalidParams.Add(request.NewErrParamRequired("Id")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *GetBucketAnalyticsConfigurationInput) SetBucket(v string) *GetBucketAnalyticsConfigurationInput { + s.Bucket = &v + return s +} + +// SetId sets the Id field's value. +func (s *GetBucketAnalyticsConfigurationInput) SetId(v string) *GetBucketAnalyticsConfigurationInput { + s.Id = &v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketAnalyticsConfigurationOutput +type GetBucketAnalyticsConfigurationOutput struct { + _ struct{} `type:"structure" payload:"AnalyticsConfiguration"` + + // The configuration and any analyses for the analytics filter. + AnalyticsConfiguration *AnalyticsConfiguration `type:"structure"` +} + +// String returns the string representation +func (s GetBucketAnalyticsConfigurationOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetBucketAnalyticsConfigurationOutput) GoString() string { + return s.String() +} + +// SetAnalyticsConfiguration sets the AnalyticsConfiguration field's value. +func (s *GetBucketAnalyticsConfigurationOutput) SetAnalyticsConfiguration(v *AnalyticsConfiguration) *GetBucketAnalyticsConfigurationOutput { + s.AnalyticsConfiguration = v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketCorsRequest +type GetBucketCorsInput struct { + _ struct{} `type:"structure"` + + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetBucketCorsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetBucketCorsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetBucketCorsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetBucketCorsInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *GetBucketCorsInput) SetBucket(v string) *GetBucketCorsInput { + s.Bucket = &v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketCorsOutput +type GetBucketCorsOutput struct { + _ struct{} `type:"structure"` + + CORSRules []*CORSRule `locationName:"CORSRule" type:"list" flattened:"true"` +} + +// String returns the string representation +func (s GetBucketCorsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetBucketCorsOutput) GoString() string { + return s.String() +} + +// SetCORSRules sets the CORSRules field's value. +func (s *GetBucketCorsOutput) SetCORSRules(v []*CORSRule) *GetBucketCorsOutput { + s.CORSRules = v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketInventoryConfigurationRequest +type GetBucketInventoryConfigurationInput struct { + _ struct{} `type:"structure"` + + // The name of the bucket containing the inventory configuration to retrieve. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // The ID used to identify the inventory configuration. + // + // Id is a required field + Id *string `location:"querystring" locationName:"id" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetBucketInventoryConfigurationInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetBucketInventoryConfigurationInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetBucketInventoryConfigurationInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetBucketInventoryConfigurationInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Id == nil { + invalidParams.Add(request.NewErrParamRequired("Id")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *GetBucketInventoryConfigurationInput) SetBucket(v string) *GetBucketInventoryConfigurationInput { + s.Bucket = &v + return s +} + +// SetId sets the Id field's value. +func (s *GetBucketInventoryConfigurationInput) SetId(v string) *GetBucketInventoryConfigurationInput { + s.Id = &v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketInventoryConfigurationOutput +type GetBucketInventoryConfigurationOutput struct { + _ struct{} `type:"structure" payload:"InventoryConfiguration"` + + // Specifies the inventory configuration. + InventoryConfiguration *InventoryConfiguration `type:"structure"` +} + +// String returns the string representation +func (s GetBucketInventoryConfigurationOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetBucketInventoryConfigurationOutput) GoString() string { + return s.String() +} + +// SetInventoryConfiguration sets the InventoryConfiguration field's value. +func (s *GetBucketInventoryConfigurationOutput) SetInventoryConfiguration(v *InventoryConfiguration) *GetBucketInventoryConfigurationOutput { + s.InventoryConfiguration = v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketLifecycleConfigurationRequest +type GetBucketLifecycleConfigurationInput struct { + _ struct{} `type:"structure"` + + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetBucketLifecycleConfigurationInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetBucketLifecycleConfigurationInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetBucketLifecycleConfigurationInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetBucketLifecycleConfigurationInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *GetBucketLifecycleConfigurationInput) SetBucket(v string) *GetBucketLifecycleConfigurationInput { + s.Bucket = &v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketLifecycleConfigurationOutput +type GetBucketLifecycleConfigurationOutput struct { + _ struct{} `type:"structure"` + + Rules []*LifecycleRule `locationName:"Rule" type:"list" flattened:"true"` +} + +// String returns the string representation +func (s GetBucketLifecycleConfigurationOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetBucketLifecycleConfigurationOutput) GoString() string { + return s.String() +} + +// SetRules sets the Rules field's value. +func (s *GetBucketLifecycleConfigurationOutput) SetRules(v []*LifecycleRule) *GetBucketLifecycleConfigurationOutput { + s.Rules = v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketLifecycleRequest +type GetBucketLifecycleInput struct { + _ struct{} `type:"structure"` + + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetBucketLifecycleInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetBucketLifecycleInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetBucketLifecycleInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetBucketLifecycleInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *GetBucketLifecycleInput) SetBucket(v string) *GetBucketLifecycleInput { + s.Bucket = &v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketLifecycleOutput +type GetBucketLifecycleOutput struct { + _ struct{} `type:"structure"` + + Rules []*Rule `locationName:"Rule" type:"list" flattened:"true"` +} + +// String returns the string representation +func (s GetBucketLifecycleOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetBucketLifecycleOutput) GoString() string { + return s.String() +} + +// SetRules sets the Rules field's value. +func (s *GetBucketLifecycleOutput) SetRules(v []*Rule) *GetBucketLifecycleOutput { + s.Rules = v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketLocationRequest +type GetBucketLocationInput struct { + _ struct{} `type:"structure"` + + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetBucketLocationInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetBucketLocationInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetBucketLocationInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetBucketLocationInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *GetBucketLocationInput) SetBucket(v string) *GetBucketLocationInput { + s.Bucket = &v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketLocationOutput +type GetBucketLocationOutput struct { + _ struct{} `type:"structure"` + + LocationConstraint *string `type:"string" enum:"BucketLocationConstraint"` +} + +// String returns the string representation +func (s GetBucketLocationOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetBucketLocationOutput) GoString() string { + return s.String() +} + +// SetLocationConstraint sets the LocationConstraint field's value. +func (s *GetBucketLocationOutput) SetLocationConstraint(v string) *GetBucketLocationOutput { + s.LocationConstraint = &v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketLoggingRequest +type GetBucketLoggingInput struct { + _ struct{} `type:"structure"` + + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetBucketLoggingInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetBucketLoggingInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetBucketLoggingInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetBucketLoggingInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *GetBucketLoggingInput) SetBucket(v string) *GetBucketLoggingInput { + s.Bucket = &v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketLoggingOutput +type GetBucketLoggingOutput struct { + _ struct{} `type:"structure"` + + LoggingEnabled *LoggingEnabled `type:"structure"` +} + +// String returns the string representation +func (s GetBucketLoggingOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetBucketLoggingOutput) GoString() string { + return s.String() +} + +// SetLoggingEnabled sets the LoggingEnabled field's value. +func (s *GetBucketLoggingOutput) SetLoggingEnabled(v *LoggingEnabled) *GetBucketLoggingOutput { + s.LoggingEnabled = v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketMetricsConfigurationRequest +type GetBucketMetricsConfigurationInput struct { + _ struct{} `type:"structure"` + + // The name of the bucket containing the metrics configuration to retrieve. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // The ID used to identify the metrics configuration. + // + // Id is a required field + Id *string `location:"querystring" locationName:"id" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetBucketMetricsConfigurationInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetBucketMetricsConfigurationInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetBucketMetricsConfigurationInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetBucketMetricsConfigurationInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Id == nil { + invalidParams.Add(request.NewErrParamRequired("Id")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *GetBucketMetricsConfigurationInput) SetBucket(v string) *GetBucketMetricsConfigurationInput { + s.Bucket = &v + return s +} + +// SetId sets the Id field's value. +func (s *GetBucketMetricsConfigurationInput) SetId(v string) *GetBucketMetricsConfigurationInput { + s.Id = &v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketMetricsConfigurationOutput +type GetBucketMetricsConfigurationOutput struct { + _ struct{} `type:"structure" payload:"MetricsConfiguration"` + + // Specifies the metrics configuration. + MetricsConfiguration *MetricsConfiguration `type:"structure"` +} + +// String returns the string representation +func (s GetBucketMetricsConfigurationOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetBucketMetricsConfigurationOutput) GoString() string { + return s.String() +} + +// SetMetricsConfiguration sets the MetricsConfiguration field's value. +func (s *GetBucketMetricsConfigurationOutput) SetMetricsConfiguration(v *MetricsConfiguration) *GetBucketMetricsConfigurationOutput { + s.MetricsConfiguration = v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketNotificationConfigurationRequest +type GetBucketNotificationConfigurationRequest struct { + _ struct{} `type:"structure"` + + // Name of the bucket to get the notification configuration for. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetBucketNotificationConfigurationRequest) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetBucketNotificationConfigurationRequest) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetBucketNotificationConfigurationRequest) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetBucketNotificationConfigurationRequest"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *GetBucketNotificationConfigurationRequest) SetBucket(v string) *GetBucketNotificationConfigurationRequest { + s.Bucket = &v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketPolicyRequest +type GetBucketPolicyInput struct { + _ struct{} `type:"structure"` + + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetBucketPolicyInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetBucketPolicyInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetBucketPolicyInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetBucketPolicyInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *GetBucketPolicyInput) SetBucket(v string) *GetBucketPolicyInput { + s.Bucket = &v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketPolicyOutput +type GetBucketPolicyOutput struct { + _ struct{} `type:"structure" payload:"Policy"` + + // The bucket policy as a JSON document. + Policy *string `type:"string"` +} + +// String returns the string representation +func (s GetBucketPolicyOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetBucketPolicyOutput) GoString() string { + return s.String() +} + +// SetPolicy sets the Policy field's value. +func (s *GetBucketPolicyOutput) SetPolicy(v string) *GetBucketPolicyOutput { + s.Policy = &v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketReplicationRequest +type GetBucketReplicationInput struct { + _ struct{} `type:"structure"` + + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetBucketReplicationInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetBucketReplicationInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetBucketReplicationInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetBucketReplicationInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *GetBucketReplicationInput) SetBucket(v string) *GetBucketReplicationInput { + s.Bucket = &v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketReplicationOutput +type GetBucketReplicationOutput struct { + _ struct{} `type:"structure" payload:"ReplicationConfiguration"` + + // Container for replication rules. You can add as many as 1,000 rules. Total + // replication configuration size can be up to 2 MB. + ReplicationConfiguration *ReplicationConfiguration `type:"structure"` +} + +// String returns the string representation +func (s GetBucketReplicationOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetBucketReplicationOutput) GoString() string { + return s.String() +} + +// SetReplicationConfiguration sets the ReplicationConfiguration field's value. +func (s *GetBucketReplicationOutput) SetReplicationConfiguration(v *ReplicationConfiguration) *GetBucketReplicationOutput { + s.ReplicationConfiguration = v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketRequestPaymentRequest +type GetBucketRequestPaymentInput struct { + _ struct{} `type:"structure"` + + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetBucketRequestPaymentInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetBucketRequestPaymentInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetBucketRequestPaymentInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetBucketRequestPaymentInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *GetBucketRequestPaymentInput) SetBucket(v string) *GetBucketRequestPaymentInput { + s.Bucket = &v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketRequestPaymentOutput +type GetBucketRequestPaymentOutput struct { + _ struct{} `type:"structure"` + + // Specifies who pays for the download and request fees. + Payer *string `type:"string" enum:"Payer"` +} + +// String returns the string representation +func (s GetBucketRequestPaymentOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetBucketRequestPaymentOutput) GoString() string { + return s.String() +} + +// SetPayer sets the Payer field's value. +func (s *GetBucketRequestPaymentOutput) SetPayer(v string) *GetBucketRequestPaymentOutput { + s.Payer = &v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketTaggingRequest +type GetBucketTaggingInput struct { + _ struct{} `type:"structure"` + + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetBucketTaggingInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetBucketTaggingInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetBucketTaggingInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetBucketTaggingInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *GetBucketTaggingInput) SetBucket(v string) *GetBucketTaggingInput { + s.Bucket = &v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketTaggingOutput +type GetBucketTaggingOutput struct { + _ struct{} `type:"structure"` + + // TagSet is a required field + TagSet []*Tag `locationNameList:"Tag" type:"list" required:"true"` +} + +// String returns the string representation +func (s GetBucketTaggingOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetBucketTaggingOutput) GoString() string { + return s.String() +} + +// SetTagSet sets the TagSet field's value. +func (s *GetBucketTaggingOutput) SetTagSet(v []*Tag) *GetBucketTaggingOutput { + s.TagSet = v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketVersioningRequest +type GetBucketVersioningInput struct { + _ struct{} `type:"structure"` + + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetBucketVersioningInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetBucketVersioningInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetBucketVersioningInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetBucketVersioningInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *GetBucketVersioningInput) SetBucket(v string) *GetBucketVersioningInput { + s.Bucket = &v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketVersioningOutput +type GetBucketVersioningOutput struct { + _ struct{} `type:"structure"` + + // Specifies whether MFA delete is enabled in the bucket versioning configuration. + // This element is only returned if the bucket has been configured with MFA + // delete. If the bucket has never been so configured, this element is not returned. + MFADelete *string `locationName:"MfaDelete" type:"string" enum:"MFADeleteStatus"` + + // The versioning state of the bucket. + Status *string `type:"string" enum:"BucketVersioningStatus"` +} + +// String returns the string representation +func (s GetBucketVersioningOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetBucketVersioningOutput) GoString() string { + return s.String() +} + +// SetMFADelete sets the MFADelete field's value. +func (s *GetBucketVersioningOutput) SetMFADelete(v string) *GetBucketVersioningOutput { + s.MFADelete = &v + return s +} + +// SetStatus sets the Status field's value. +func (s *GetBucketVersioningOutput) SetStatus(v string) *GetBucketVersioningOutput { + s.Status = &v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketWebsiteRequest +type GetBucketWebsiteInput struct { + _ struct{} `type:"structure"` + + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetBucketWebsiteInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetBucketWebsiteInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetBucketWebsiteInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetBucketWebsiteInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *GetBucketWebsiteInput) SetBucket(v string) *GetBucketWebsiteInput { + s.Bucket = &v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketWebsiteOutput +type GetBucketWebsiteOutput struct { + _ struct{} `type:"structure"` + + ErrorDocument *ErrorDocument `type:"structure"` + + IndexDocument *IndexDocument `type:"structure"` + + RedirectAllRequestsTo *RedirectAllRequestsTo `type:"structure"` + + RoutingRules []*RoutingRule `locationNameList:"RoutingRule" type:"list"` +} + +// String returns the string representation +func (s GetBucketWebsiteOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetBucketWebsiteOutput) GoString() string { + return s.String() +} + +// SetErrorDocument sets the ErrorDocument field's value. +func (s *GetBucketWebsiteOutput) SetErrorDocument(v *ErrorDocument) *GetBucketWebsiteOutput { + s.ErrorDocument = v + return s +} + +// SetIndexDocument sets the IndexDocument field's value. +func (s *GetBucketWebsiteOutput) SetIndexDocument(v *IndexDocument) *GetBucketWebsiteOutput { + s.IndexDocument = v + return s +} + +// SetRedirectAllRequestsTo sets the RedirectAllRequestsTo field's value. +func (s *GetBucketWebsiteOutput) SetRedirectAllRequestsTo(v *RedirectAllRequestsTo) *GetBucketWebsiteOutput { + s.RedirectAllRequestsTo = v + return s +} + +// SetRoutingRules sets the RoutingRules field's value. +func (s *GetBucketWebsiteOutput) SetRoutingRules(v []*RoutingRule) *GetBucketWebsiteOutput { + s.RoutingRules = v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetObjectAclRequest +type GetObjectAclInput struct { + _ struct{} `type:"structure"` + + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // Key is a required field + Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` + + // Confirms that the requester knows that she or he will be charged for the + // request. Bucket owners need not specify this parameter in their requests. + // Documentation on downloading objects from requester pays buckets can be found + // at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html + RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` + + // VersionId used to reference a specific version of the object. + VersionId *string `location:"querystring" locationName:"versionId" type:"string"` +} + +// String returns the string representation +func (s GetObjectAclInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetObjectAclInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetObjectAclInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetObjectAclInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Key == nil { + invalidParams.Add(request.NewErrParamRequired("Key")) + } + if s.Key != nil && len(*s.Key) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Key", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *GetObjectAclInput) SetBucket(v string) *GetObjectAclInput { + s.Bucket = &v + return s +} + +// SetKey sets the Key field's value. +func (s *GetObjectAclInput) SetKey(v string) *GetObjectAclInput { + s.Key = &v + return s +} + +// SetRequestPayer sets the RequestPayer field's value. +func (s *GetObjectAclInput) SetRequestPayer(v string) *GetObjectAclInput { + s.RequestPayer = &v + return s +} + +// SetVersionId sets the VersionId field's value. +func (s *GetObjectAclInput) SetVersionId(v string) *GetObjectAclInput { + s.VersionId = &v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetObjectAclOutput +type GetObjectAclOutput struct { + _ struct{} `type:"structure"` + + // A list of grants. + Grants []*Grant `locationName:"AccessControlList" locationNameList:"Grant" type:"list"` + + Owner *Owner `type:"structure"` + + // If present, indicates that the requester was successfully charged for the + // request. + RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"` +} + +// String returns the string representation +func (s GetObjectAclOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetObjectAclOutput) GoString() string { + return s.String() +} + +// SetGrants sets the Grants field's value. +func (s *GetObjectAclOutput) SetGrants(v []*Grant) *GetObjectAclOutput { + s.Grants = v + return s +} + +// SetOwner sets the Owner field's value. +func (s *GetObjectAclOutput) SetOwner(v *Owner) *GetObjectAclOutput { + s.Owner = v + return s +} + +// SetRequestCharged sets the RequestCharged field's value. +func (s *GetObjectAclOutput) SetRequestCharged(v string) *GetObjectAclOutput { + s.RequestCharged = &v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetObjectRequest +type GetObjectInput struct { + _ struct{} `type:"structure"` + + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // Return the object only if its entity tag (ETag) is the same as the one specified, + // otherwise return a 412 (precondition failed). + IfMatch *string `location:"header" locationName:"If-Match" type:"string"` + + // Return the object only if it has been modified since the specified time, + // otherwise return a 304 (not modified). + IfModifiedSince *time.Time `location:"header" locationName:"If-Modified-Since" type:"timestamp" timestampFormat:"rfc822"` + + // Return the object only if its entity tag (ETag) is different from the one + // specified, otherwise return a 304 (not modified). + IfNoneMatch *string `location:"header" locationName:"If-None-Match" type:"string"` + + // Return the object only if it has not been modified since the specified time, + // otherwise return a 412 (precondition failed). + IfUnmodifiedSince *time.Time `location:"header" locationName:"If-Unmodified-Since" type:"timestamp" timestampFormat:"rfc822"` + + // Key is a required field + Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` + + // Part number of the object being read. This is a positive integer between + // 1 and 10,000. Effectively performs a 'ranged' GET request for the part specified. + // Useful for downloading just a part of an object. + PartNumber *int64 `location:"querystring" locationName:"partNumber" type:"integer"` + + // Downloads the specified range bytes of an object. For more information about + // the HTTP Range header, go to http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.35. + Range *string `location:"header" locationName:"Range" type:"string"` + + // Confirms that the requester knows that she or he will be charged for the + // request. Bucket owners need not specify this parameter in their requests. + // Documentation on downloading objects from requester pays buckets can be found + // at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html + RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` + + // Sets the Cache-Control header of the response. + ResponseCacheControl *string `location:"querystring" locationName:"response-cache-control" type:"string"` + + // Sets the Content-Disposition header of the response + ResponseContentDisposition *string `location:"querystring" locationName:"response-content-disposition" type:"string"` + + // Sets the Content-Encoding header of the response. + ResponseContentEncoding *string `location:"querystring" locationName:"response-content-encoding" type:"string"` + + // Sets the Content-Language header of the response. + ResponseContentLanguage *string `location:"querystring" locationName:"response-content-language" type:"string"` + + // Sets the Content-Type header of the response. + ResponseContentType *string `location:"querystring" locationName:"response-content-type" type:"string"` + + // Sets the Expires header of the response. + ResponseExpires *time.Time `location:"querystring" locationName:"response-expires" type:"timestamp" timestampFormat:"iso8601"` + + // Specifies the algorithm to use to when encrypting the object (e.g., AES256). + SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"` + + // Specifies the customer-provided encryption key for Amazon S3 to use in encrypting + // data. This value is used to store the object and then it is discarded; Amazon + // does not store the encryption key. The key must be appropriate for use with + // the algorithm specified in the x-amz-server-side​-encryption​-customer-algorithm + // header. + SSECustomerKey *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key" type:"string"` + + // Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. + // Amazon S3 uses this header for a message integrity check to ensure the encryption + // key was transmitted without error. + SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"` + + // VersionId used to reference a specific version of the object. + VersionId *string `location:"querystring" locationName:"versionId" type:"string"` +} + +// String returns the string representation +func (s GetObjectInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetObjectInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetObjectInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetObjectInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Key == nil { + invalidParams.Add(request.NewErrParamRequired("Key")) + } + if s.Key != nil && len(*s.Key) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Key", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *GetObjectInput) SetBucket(v string) *GetObjectInput { + s.Bucket = &v + return s +} + +// SetIfMatch sets the IfMatch field's value. +func (s *GetObjectInput) SetIfMatch(v string) *GetObjectInput { + s.IfMatch = &v + return s +} + +// SetIfModifiedSince sets the IfModifiedSince field's value. +func (s *GetObjectInput) SetIfModifiedSince(v time.Time) *GetObjectInput { + s.IfModifiedSince = &v + return s +} + +// SetIfNoneMatch sets the IfNoneMatch field's value. +func (s *GetObjectInput) SetIfNoneMatch(v string) *GetObjectInput { + s.IfNoneMatch = &v + return s +} + +// SetIfUnmodifiedSince sets the IfUnmodifiedSince field's value. +func (s *GetObjectInput) SetIfUnmodifiedSince(v time.Time) *GetObjectInput { + s.IfUnmodifiedSince = &v + return s +} + +// SetKey sets the Key field's value. +func (s *GetObjectInput) SetKey(v string) *GetObjectInput { + s.Key = &v + return s +} + +// SetPartNumber sets the PartNumber field's value. +func (s *GetObjectInput) SetPartNumber(v int64) *GetObjectInput { + s.PartNumber = &v + return s +} + +// SetRange sets the Range field's value. +func (s *GetObjectInput) SetRange(v string) *GetObjectInput { + s.Range = &v + return s +} + +// SetRequestPayer sets the RequestPayer field's value. +func (s *GetObjectInput) SetRequestPayer(v string) *GetObjectInput { + s.RequestPayer = &v + return s +} + +// SetResponseCacheControl sets the ResponseCacheControl field's value. +func (s *GetObjectInput) SetResponseCacheControl(v string) *GetObjectInput { + s.ResponseCacheControl = &v + return s +} + +// SetResponseContentDisposition sets the ResponseContentDisposition field's value. +func (s *GetObjectInput) SetResponseContentDisposition(v string) *GetObjectInput { + s.ResponseContentDisposition = &v + return s +} + +// SetResponseContentEncoding sets the ResponseContentEncoding field's value. +func (s *GetObjectInput) SetResponseContentEncoding(v string) *GetObjectInput { + s.ResponseContentEncoding = &v + return s +} + +// SetResponseContentLanguage sets the ResponseContentLanguage field's value. +func (s *GetObjectInput) SetResponseContentLanguage(v string) *GetObjectInput { + s.ResponseContentLanguage = &v + return s +} + +// SetResponseContentType sets the ResponseContentType field's value. +func (s *GetObjectInput) SetResponseContentType(v string) *GetObjectInput { + s.ResponseContentType = &v + return s +} + +// SetResponseExpires sets the ResponseExpires field's value. +func (s *GetObjectInput) SetResponseExpires(v time.Time) *GetObjectInput { + s.ResponseExpires = &v + return s +} + +// SetSSECustomerAlgorithm sets the SSECustomerAlgorithm field's value. +func (s *GetObjectInput) SetSSECustomerAlgorithm(v string) *GetObjectInput { + s.SSECustomerAlgorithm = &v + return s +} + +// SetSSECustomerKey sets the SSECustomerKey field's value. +func (s *GetObjectInput) SetSSECustomerKey(v string) *GetObjectInput { + s.SSECustomerKey = &v + return s +} + +// SetSSECustomerKeyMD5 sets the SSECustomerKeyMD5 field's value. +func (s *GetObjectInput) SetSSECustomerKeyMD5(v string) *GetObjectInput { + s.SSECustomerKeyMD5 = &v + return s +} + +// SetVersionId sets the VersionId field's value. +func (s *GetObjectInput) SetVersionId(v string) *GetObjectInput { + s.VersionId = &v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetObjectOutput +type GetObjectOutput struct { + _ struct{} `type:"structure" payload:"Body"` + + AcceptRanges *string `location:"header" locationName:"accept-ranges" type:"string"` + + // Object data. + Body io.ReadCloser `type:"blob"` + + // Specifies caching behavior along the request/reply chain. + CacheControl *string `location:"header" locationName:"Cache-Control" type:"string"` + + // Specifies presentational information for the object. + ContentDisposition *string `location:"header" locationName:"Content-Disposition" type:"string"` + + // Specifies what content encodings have been applied to the object and thus + // what decoding mechanisms must be applied to obtain the media-type referenced + // by the Content-Type header field. + ContentEncoding *string `location:"header" locationName:"Content-Encoding" type:"string"` + + // The language the content is in. + ContentLanguage *string `location:"header" locationName:"Content-Language" type:"string"` + + // Size of the body in bytes. + ContentLength *int64 `location:"header" locationName:"Content-Length" type:"long"` + + // The portion of the object returned in the response. + ContentRange *string `location:"header" locationName:"Content-Range" type:"string"` + + // A standard MIME type describing the format of the object data. + ContentType *string `location:"header" locationName:"Content-Type" type:"string"` + + // Specifies whether the object retrieved was (true) or was not (false) a Delete + // Marker. If false, this response header does not appear in the response. + DeleteMarker *bool `location:"header" locationName:"x-amz-delete-marker" type:"boolean"` + + // An ETag is an opaque identifier assigned by a web server to a specific version + // of a resource found at a URL + ETag *string `location:"header" locationName:"ETag" type:"string"` + + // If the object expiration is configured (see PUT Bucket lifecycle), the response + // includes this header. It includes the expiry-date and rule-id key value pairs + // providing object expiration information. The value of the rule-id is URL + // encoded. + Expiration *string `location:"header" locationName:"x-amz-expiration" type:"string"` + + // The date and time at which the object is no longer cacheable. + Expires *string `location:"header" locationName:"Expires" type:"string"` + + // Last modified date of the object + LastModified *time.Time `location:"header" locationName:"Last-Modified" type:"timestamp" timestampFormat:"rfc822"` + + // A map of metadata to store with the object in S3. + Metadata map[string]*string `location:"headers" locationName:"x-amz-meta-" type:"map"` + + // This is set to the number of metadata entries not returned in x-amz-meta + // headers. This can happen if you create metadata using an API like SOAP that + // supports more flexible metadata than the REST API. For example, using SOAP, + // you can create metadata whose values are not legal HTTP headers. + MissingMeta *int64 `location:"header" locationName:"x-amz-missing-meta" type:"integer"` + + // The count of parts this object has. + PartsCount *int64 `location:"header" locationName:"x-amz-mp-parts-count" type:"integer"` + + ReplicationStatus *string `location:"header" locationName:"x-amz-replication-status" type:"string" enum:"ReplicationStatus"` + + // If present, indicates that the requester was successfully charged for the + // request. + RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"` + + // Provides information about object restoration operation and expiration time + // of the restored object copy. + Restore *string `location:"header" locationName:"x-amz-restore" type:"string"` + + // If server-side encryption with a customer-provided encryption key was requested, + // the response will include this header confirming the encryption algorithm + // used. + SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"` + + // If server-side encryption with a customer-provided encryption key was requested, + // the response will include this header to provide round trip message integrity + // verification of the customer-provided encryption key. + SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"` + + // If present, specifies the ID of the AWS Key Management Service (KMS) master + // encryption key that was used for the object. + SSEKMSKeyId *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string"` + + // The Server-side encryption algorithm used when storing this object in S3 + // (e.g., AES256, aws:kms). + ServerSideEncryption *string `location:"header" locationName:"x-amz-server-side-encryption" type:"string" enum:"ServerSideEncryption"` + + StorageClass *string `location:"header" locationName:"x-amz-storage-class" type:"string" enum:"StorageClass"` + + // The number of tags, if any, on the object. + TagCount *int64 `location:"header" locationName:"x-amz-tagging-count" type:"integer"` + + // Version of the object. + VersionId *string `location:"header" locationName:"x-amz-version-id" type:"string"` + + // If the bucket is configured as a website, redirects requests for this object + // to another object in the same bucket or to an external URL. Amazon S3 stores + // the value of this header in the object metadata. + WebsiteRedirectLocation *string `location:"header" locationName:"x-amz-website-redirect-location" type:"string"` +} + +// String returns the string representation +func (s GetObjectOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetObjectOutput) GoString() string { + return s.String() +} + +// SetAcceptRanges sets the AcceptRanges field's value. +func (s *GetObjectOutput) SetAcceptRanges(v string) *GetObjectOutput { + s.AcceptRanges = &v + return s +} + +// SetBody sets the Body field's value. +func (s *GetObjectOutput) SetBody(v io.ReadCloser) *GetObjectOutput { + s.Body = v + return s +} + +// SetCacheControl sets the CacheControl field's value. +func (s *GetObjectOutput) SetCacheControl(v string) *GetObjectOutput { + s.CacheControl = &v + return s +} + +// SetContentDisposition sets the ContentDisposition field's value. +func (s *GetObjectOutput) SetContentDisposition(v string) *GetObjectOutput { + s.ContentDisposition = &v + return s +} + +// SetContentEncoding sets the ContentEncoding field's value. +func (s *GetObjectOutput) SetContentEncoding(v string) *GetObjectOutput { + s.ContentEncoding = &v + return s +} + +// SetContentLanguage sets the ContentLanguage field's value. +func (s *GetObjectOutput) SetContentLanguage(v string) *GetObjectOutput { + s.ContentLanguage = &v + return s +} + +// SetContentLength sets the ContentLength field's value. +func (s *GetObjectOutput) SetContentLength(v int64) *GetObjectOutput { + s.ContentLength = &v + return s +} + +// SetContentRange sets the ContentRange field's value. +func (s *GetObjectOutput) SetContentRange(v string) *GetObjectOutput { + s.ContentRange = &v + return s +} + +// SetContentType sets the ContentType field's value. +func (s *GetObjectOutput) SetContentType(v string) *GetObjectOutput { + s.ContentType = &v + return s +} + +// SetDeleteMarker sets the DeleteMarker field's value. +func (s *GetObjectOutput) SetDeleteMarker(v bool) *GetObjectOutput { + s.DeleteMarker = &v + return s +} + +// SetETag sets the ETag field's value. +func (s *GetObjectOutput) SetETag(v string) *GetObjectOutput { + s.ETag = &v + return s +} + +// SetExpiration sets the Expiration field's value. +func (s *GetObjectOutput) SetExpiration(v string) *GetObjectOutput { + s.Expiration = &v + return s +} + +// SetExpires sets the Expires field's value. +func (s *GetObjectOutput) SetExpires(v string) *GetObjectOutput { + s.Expires = &v + return s +} + +// SetLastModified sets the LastModified field's value. +func (s *GetObjectOutput) SetLastModified(v time.Time) *GetObjectOutput { + s.LastModified = &v + return s +} + +// SetMetadata sets the Metadata field's value. +func (s *GetObjectOutput) SetMetadata(v map[string]*string) *GetObjectOutput { + s.Metadata = v + return s +} + +// SetMissingMeta sets the MissingMeta field's value. +func (s *GetObjectOutput) SetMissingMeta(v int64) *GetObjectOutput { + s.MissingMeta = &v + return s +} + +// SetPartsCount sets the PartsCount field's value. +func (s *GetObjectOutput) SetPartsCount(v int64) *GetObjectOutput { + s.PartsCount = &v + return s +} + +// SetReplicationStatus sets the ReplicationStatus field's value. +func (s *GetObjectOutput) SetReplicationStatus(v string) *GetObjectOutput { + s.ReplicationStatus = &v + return s +} + +// SetRequestCharged sets the RequestCharged field's value. +func (s *GetObjectOutput) SetRequestCharged(v string) *GetObjectOutput { + s.RequestCharged = &v + return s +} + +// SetRestore sets the Restore field's value. +func (s *GetObjectOutput) SetRestore(v string) *GetObjectOutput { + s.Restore = &v + return s +} + +// SetSSECustomerAlgorithm sets the SSECustomerAlgorithm field's value. +func (s *GetObjectOutput) SetSSECustomerAlgorithm(v string) *GetObjectOutput { + s.SSECustomerAlgorithm = &v + return s +} + +// SetSSECustomerKeyMD5 sets the SSECustomerKeyMD5 field's value. +func (s *GetObjectOutput) SetSSECustomerKeyMD5(v string) *GetObjectOutput { + s.SSECustomerKeyMD5 = &v + return s +} + +// SetSSEKMSKeyId sets the SSEKMSKeyId field's value. +func (s *GetObjectOutput) SetSSEKMSKeyId(v string) *GetObjectOutput { + s.SSEKMSKeyId = &v + return s +} + +// SetServerSideEncryption sets the ServerSideEncryption field's value. +func (s *GetObjectOutput) SetServerSideEncryption(v string) *GetObjectOutput { + s.ServerSideEncryption = &v + return s +} + +// SetStorageClass sets the StorageClass field's value. +func (s *GetObjectOutput) SetStorageClass(v string) *GetObjectOutput { + s.StorageClass = &v + return s +} + +// SetTagCount sets the TagCount field's value. +func (s *GetObjectOutput) SetTagCount(v int64) *GetObjectOutput { + s.TagCount = &v + return s +} + +// SetVersionId sets the VersionId field's value. +func (s *GetObjectOutput) SetVersionId(v string) *GetObjectOutput { + s.VersionId = &v + return s +} + +// SetWebsiteRedirectLocation sets the WebsiteRedirectLocation field's value. +func (s *GetObjectOutput) SetWebsiteRedirectLocation(v string) *GetObjectOutput { + s.WebsiteRedirectLocation = &v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetObjectTaggingRequest +type GetObjectTaggingInput struct { + _ struct{} `type:"structure"` + + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // Key is a required field + Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` + + VersionId *string `location:"querystring" locationName:"versionId" type:"string"` +} + +// String returns the string representation +func (s GetObjectTaggingInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetObjectTaggingInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetObjectTaggingInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetObjectTaggingInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Key == nil { + invalidParams.Add(request.NewErrParamRequired("Key")) + } + if s.Key != nil && len(*s.Key) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Key", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *GetObjectTaggingInput) SetBucket(v string) *GetObjectTaggingInput { + s.Bucket = &v + return s +} + +// SetKey sets the Key field's value. +func (s *GetObjectTaggingInput) SetKey(v string) *GetObjectTaggingInput { + s.Key = &v + return s +} + +// SetVersionId sets the VersionId field's value. +func (s *GetObjectTaggingInput) SetVersionId(v string) *GetObjectTaggingInput { + s.VersionId = &v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetObjectTaggingOutput +type GetObjectTaggingOutput struct { + _ struct{} `type:"structure"` + + // TagSet is a required field + TagSet []*Tag `locationNameList:"Tag" type:"list" required:"true"` + + VersionId *string `location:"header" locationName:"x-amz-version-id" type:"string"` +} + +// String returns the string representation +func (s GetObjectTaggingOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetObjectTaggingOutput) GoString() string { + return s.String() +} + +// SetTagSet sets the TagSet field's value. +func (s *GetObjectTaggingOutput) SetTagSet(v []*Tag) *GetObjectTaggingOutput { + s.TagSet = v + return s +} + +// SetVersionId sets the VersionId field's value. +func (s *GetObjectTaggingOutput) SetVersionId(v string) *GetObjectTaggingOutput { + s.VersionId = &v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetObjectTorrentRequest +type GetObjectTorrentInput struct { + _ struct{} `type:"structure"` + + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // Key is a required field + Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` + + // Confirms that the requester knows that she or he will be charged for the + // request. Bucket owners need not specify this parameter in their requests. + // Documentation on downloading objects from requester pays buckets can be found + // at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html + RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` +} + +// String returns the string representation +func (s GetObjectTorrentInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetObjectTorrentInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetObjectTorrentInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetObjectTorrentInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Key == nil { + invalidParams.Add(request.NewErrParamRequired("Key")) + } + if s.Key != nil && len(*s.Key) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Key", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *GetObjectTorrentInput) SetBucket(v string) *GetObjectTorrentInput { + s.Bucket = &v + return s +} + +// SetKey sets the Key field's value. +func (s *GetObjectTorrentInput) SetKey(v string) *GetObjectTorrentInput { + s.Key = &v + return s +} + +// SetRequestPayer sets the RequestPayer field's value. +func (s *GetObjectTorrentInput) SetRequestPayer(v string) *GetObjectTorrentInput { + s.RequestPayer = &v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetObjectTorrentOutput +type GetObjectTorrentOutput struct { + _ struct{} `type:"structure" payload:"Body"` + + Body io.ReadCloser `type:"blob"` + + // If present, indicates that the requester was successfully charged for the + // request. + RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"` +} + +// String returns the string representation +func (s GetObjectTorrentOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetObjectTorrentOutput) GoString() string { + return s.String() +} + +// SetBody sets the Body field's value. +func (s *GetObjectTorrentOutput) SetBody(v io.ReadCloser) *GetObjectTorrentOutput { + s.Body = v + return s +} + +// SetRequestCharged sets the RequestCharged field's value. +func (s *GetObjectTorrentOutput) SetRequestCharged(v string) *GetObjectTorrentOutput { + s.RequestCharged = &v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GlacierJobParameters +type GlacierJobParameters struct { + _ struct{} `type:"structure"` + + // Glacier retrieval tier at which the restore will be processed. + // + // Tier is a required field + Tier *string `type:"string" required:"true" enum:"Tier"` +} + +// String returns the string representation +func (s GlacierJobParameters) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GlacierJobParameters) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GlacierJobParameters) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GlacierJobParameters"} + if s.Tier == nil { + invalidParams.Add(request.NewErrParamRequired("Tier")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetTier sets the Tier field's value. +func (s *GlacierJobParameters) SetTier(v string) *GlacierJobParameters { + s.Tier = &v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/Grant +type Grant struct { + _ struct{} `type:"structure"` + + Grantee *Grantee `type:"structure" xmlPrefix:"xsi" xmlURI:"http://www.w3.org/2001/XMLSchema-instance"` + + // Specifies the permission given to the grantee. + Permission *string `type:"string" enum:"Permission"` +} + +// String returns the string representation +func (s Grant) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Grant) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *Grant) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "Grant"} + if s.Grantee != nil { + if err := s.Grantee.Validate(); err != nil { + invalidParams.AddNested("Grantee", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetGrantee sets the Grantee field's value. +func (s *Grant) SetGrantee(v *Grantee) *Grant { + s.Grantee = v + return s +} + +// SetPermission sets the Permission field's value. +func (s *Grant) SetPermission(v string) *Grant { + s.Permission = &v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/Grantee +type Grantee struct { + _ struct{} `type:"structure" xmlPrefix:"xsi" xmlURI:"http://www.w3.org/2001/XMLSchema-instance"` + + // Screen name of the grantee. + DisplayName *string `type:"string"` + + // Email address of the grantee. + EmailAddress *string `type:"string"` + + // The canonical user ID of the grantee. + ID *string `type:"string"` + + // Type of grantee + // + // Type is a required field + Type *string `locationName:"xsi:type" type:"string" xmlAttribute:"true" required:"true" enum:"Type"` + + // URI of the grantee group. + URI *string `type:"string"` +} + +// String returns the string representation +func (s Grantee) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Grantee) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *Grantee) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "Grantee"} + if s.Type == nil { + invalidParams.Add(request.NewErrParamRequired("Type")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDisplayName sets the DisplayName field's value. +func (s *Grantee) SetDisplayName(v string) *Grantee { + s.DisplayName = &v + return s +} + +// SetEmailAddress sets the EmailAddress field's value. +func (s *Grantee) SetEmailAddress(v string) *Grantee { + s.EmailAddress = &v + return s +} + +// SetID sets the ID field's value. +func (s *Grantee) SetID(v string) *Grantee { + s.ID = &v + return s +} + +// SetType sets the Type field's value. +func (s *Grantee) SetType(v string) *Grantee { + s.Type = &v + return s +} + +// SetURI sets the URI field's value. +func (s *Grantee) SetURI(v string) *Grantee { + s.URI = &v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/HeadBucketRequest +type HeadBucketInput struct { + _ struct{} `type:"structure"` + + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` +} + +// String returns the string representation +func (s HeadBucketInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s HeadBucketInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *HeadBucketInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "HeadBucketInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *HeadBucketInput) SetBucket(v string) *HeadBucketInput { + s.Bucket = &v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/HeadBucketOutput +type HeadBucketOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s HeadBucketOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s HeadBucketOutput) GoString() string { + return s.String() +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/HeadObjectRequest +type HeadObjectInput struct { + _ struct{} `type:"structure"` + + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // Return the object only if its entity tag (ETag) is the same as the one specified, + // otherwise return a 412 (precondition failed). + IfMatch *string `location:"header" locationName:"If-Match" type:"string"` + + // Return the object only if it has been modified since the specified time, + // otherwise return a 304 (not modified). + IfModifiedSince *time.Time `location:"header" locationName:"If-Modified-Since" type:"timestamp" timestampFormat:"rfc822"` + + // Return the object only if its entity tag (ETag) is different from the one + // specified, otherwise return a 304 (not modified). + IfNoneMatch *string `location:"header" locationName:"If-None-Match" type:"string"` + + // Return the object only if it has not been modified since the specified time, + // otherwise return a 412 (precondition failed). + IfUnmodifiedSince *time.Time `location:"header" locationName:"If-Unmodified-Since" type:"timestamp" timestampFormat:"rfc822"` + + // Key is a required field + Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` + + // Part number of the object being read. This is a positive integer between + // 1 and 10,000. Effectively performs a 'ranged' HEAD request for the part specified. + // Useful querying about the size of the part and the number of parts in this + // object. + PartNumber *int64 `location:"querystring" locationName:"partNumber" type:"integer"` + + // Downloads the specified range bytes of an object. For more information about + // the HTTP Range header, go to http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.35. + Range *string `location:"header" locationName:"Range" type:"string"` + + // Confirms that the requester knows that she or he will be charged for the + // request. Bucket owners need not specify this parameter in their requests. + // Documentation on downloading objects from requester pays buckets can be found + // at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html + RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` + + // Specifies the algorithm to use to when encrypting the object (e.g., AES256). + SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"` + + // Specifies the customer-provided encryption key for Amazon S3 to use in encrypting + // data. This value is used to store the object and then it is discarded; Amazon + // does not store the encryption key. The key must be appropriate for use with + // the algorithm specified in the x-amz-server-side​-encryption​-customer-algorithm + // header. + SSECustomerKey *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key" type:"string"` + + // Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. + // Amazon S3 uses this header for a message integrity check to ensure the encryption + // key was transmitted without error. + SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"` + + // VersionId used to reference a specific version of the object. + VersionId *string `location:"querystring" locationName:"versionId" type:"string"` +} + +// String returns the string representation +func (s HeadObjectInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s HeadObjectInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *HeadObjectInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "HeadObjectInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Key == nil { + invalidParams.Add(request.NewErrParamRequired("Key")) + } + if s.Key != nil && len(*s.Key) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Key", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *HeadObjectInput) SetBucket(v string) *HeadObjectInput { + s.Bucket = &v + return s +} + +// SetIfMatch sets the IfMatch field's value. +func (s *HeadObjectInput) SetIfMatch(v string) *HeadObjectInput { + s.IfMatch = &v + return s +} + +// SetIfModifiedSince sets the IfModifiedSince field's value. +func (s *HeadObjectInput) SetIfModifiedSince(v time.Time) *HeadObjectInput { + s.IfModifiedSince = &v + return s +} + +// SetIfNoneMatch sets the IfNoneMatch field's value. +func (s *HeadObjectInput) SetIfNoneMatch(v string) *HeadObjectInput { + s.IfNoneMatch = &v + return s +} + +// SetIfUnmodifiedSince sets the IfUnmodifiedSince field's value. +func (s *HeadObjectInput) SetIfUnmodifiedSince(v time.Time) *HeadObjectInput { + s.IfUnmodifiedSince = &v + return s +} + +// SetKey sets the Key field's value. +func (s *HeadObjectInput) SetKey(v string) *HeadObjectInput { + s.Key = &v + return s +} + +// SetPartNumber sets the PartNumber field's value. +func (s *HeadObjectInput) SetPartNumber(v int64) *HeadObjectInput { + s.PartNumber = &v + return s +} + +// SetRange sets the Range field's value. +func (s *HeadObjectInput) SetRange(v string) *HeadObjectInput { + s.Range = &v + return s +} + +// SetRequestPayer sets the RequestPayer field's value. +func (s *HeadObjectInput) SetRequestPayer(v string) *HeadObjectInput { + s.RequestPayer = &v + return s +} + +// SetSSECustomerAlgorithm sets the SSECustomerAlgorithm field's value. +func (s *HeadObjectInput) SetSSECustomerAlgorithm(v string) *HeadObjectInput { + s.SSECustomerAlgorithm = &v + return s +} + +// SetSSECustomerKey sets the SSECustomerKey field's value. +func (s *HeadObjectInput) SetSSECustomerKey(v string) *HeadObjectInput { + s.SSECustomerKey = &v + return s +} + +// SetSSECustomerKeyMD5 sets the SSECustomerKeyMD5 field's value. +func (s *HeadObjectInput) SetSSECustomerKeyMD5(v string) *HeadObjectInput { + s.SSECustomerKeyMD5 = &v + return s +} + +// SetVersionId sets the VersionId field's value. +func (s *HeadObjectInput) SetVersionId(v string) *HeadObjectInput { + s.VersionId = &v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/HeadObjectOutput +type HeadObjectOutput struct { + _ struct{} `type:"structure"` + + AcceptRanges *string `location:"header" locationName:"accept-ranges" type:"string"` + + // Specifies caching behavior along the request/reply chain. + CacheControl *string `location:"header" locationName:"Cache-Control" type:"string"` + + // Specifies presentational information for the object. + ContentDisposition *string `location:"header" locationName:"Content-Disposition" type:"string"` + + // Specifies what content encodings have been applied to the object and thus + // what decoding mechanisms must be applied to obtain the media-type referenced + // by the Content-Type header field. + ContentEncoding *string `location:"header" locationName:"Content-Encoding" type:"string"` + + // The language the content is in. + ContentLanguage *string `location:"header" locationName:"Content-Language" type:"string"` + + // Size of the body in bytes. + ContentLength *int64 `location:"header" locationName:"Content-Length" type:"long"` + + // A standard MIME type describing the format of the object data. + ContentType *string `location:"header" locationName:"Content-Type" type:"string"` + + // Specifies whether the object retrieved was (true) or was not (false) a Delete + // Marker. If false, this response header does not appear in the response. + DeleteMarker *bool `location:"header" locationName:"x-amz-delete-marker" type:"boolean"` + + // An ETag is an opaque identifier assigned by a web server to a specific version + // of a resource found at a URL + ETag *string `location:"header" locationName:"ETag" type:"string"` + + // If the object expiration is configured (see PUT Bucket lifecycle), the response + // includes this header. It includes the expiry-date and rule-id key value pairs + // providing object expiration information. The value of the rule-id is URL + // encoded. + Expiration *string `location:"header" locationName:"x-amz-expiration" type:"string"` + + // The date and time at which the object is no longer cacheable. + Expires *string `location:"header" locationName:"Expires" type:"string"` + + // Last modified date of the object + LastModified *time.Time `location:"header" locationName:"Last-Modified" type:"timestamp" timestampFormat:"rfc822"` + + // A map of metadata to store with the object in S3. + Metadata map[string]*string `location:"headers" locationName:"x-amz-meta-" type:"map"` + + // This is set to the number of metadata entries not returned in x-amz-meta + // headers. This can happen if you create metadata using an API like SOAP that + // supports more flexible metadata than the REST API. For example, using SOAP, + // you can create metadata whose values are not legal HTTP headers. + MissingMeta *int64 `location:"header" locationName:"x-amz-missing-meta" type:"integer"` + + // The count of parts this object has. + PartsCount *int64 `location:"header" locationName:"x-amz-mp-parts-count" type:"integer"` + + ReplicationStatus *string `location:"header" locationName:"x-amz-replication-status" type:"string" enum:"ReplicationStatus"` + + // If present, indicates that the requester was successfully charged for the + // request. + RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"` + + // Provides information about object restoration operation and expiration time + // of the restored object copy. + Restore *string `location:"header" locationName:"x-amz-restore" type:"string"` + + // If server-side encryption with a customer-provided encryption key was requested, + // the response will include this header confirming the encryption algorithm + // used. + SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"` + + // If server-side encryption with a customer-provided encryption key was requested, + // the response will include this header to provide round trip message integrity + // verification of the customer-provided encryption key. + SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"` + + // If present, specifies the ID of the AWS Key Management Service (KMS) master + // encryption key that was used for the object. + SSEKMSKeyId *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string"` + + // The Server-side encryption algorithm used when storing this object in S3 + // (e.g., AES256, aws:kms). + ServerSideEncryption *string `location:"header" locationName:"x-amz-server-side-encryption" type:"string" enum:"ServerSideEncryption"` + + StorageClass *string `location:"header" locationName:"x-amz-storage-class" type:"string" enum:"StorageClass"` + + // Version of the object. + VersionId *string `location:"header" locationName:"x-amz-version-id" type:"string"` + + // If the bucket is configured as a website, redirects requests for this object + // to another object in the same bucket or to an external URL. Amazon S3 stores + // the value of this header in the object metadata. + WebsiteRedirectLocation *string `location:"header" locationName:"x-amz-website-redirect-location" type:"string"` +} + +// String returns the string representation +func (s HeadObjectOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s HeadObjectOutput) GoString() string { + return s.String() +} + +// SetAcceptRanges sets the AcceptRanges field's value. +func (s *HeadObjectOutput) SetAcceptRanges(v string) *HeadObjectOutput { + s.AcceptRanges = &v + return s +} + +// SetCacheControl sets the CacheControl field's value. +func (s *HeadObjectOutput) SetCacheControl(v string) *HeadObjectOutput { + s.CacheControl = &v + return s +} + +// SetContentDisposition sets the ContentDisposition field's value. +func (s *HeadObjectOutput) SetContentDisposition(v string) *HeadObjectOutput { + s.ContentDisposition = &v + return s +} + +// SetContentEncoding sets the ContentEncoding field's value. +func (s *HeadObjectOutput) SetContentEncoding(v string) *HeadObjectOutput { + s.ContentEncoding = &v + return s +} + +// SetContentLanguage sets the ContentLanguage field's value. +func (s *HeadObjectOutput) SetContentLanguage(v string) *HeadObjectOutput { + s.ContentLanguage = &v + return s +} + +// SetContentLength sets the ContentLength field's value. +func (s *HeadObjectOutput) SetContentLength(v int64) *HeadObjectOutput { + s.ContentLength = &v + return s +} + +// SetContentType sets the ContentType field's value. +func (s *HeadObjectOutput) SetContentType(v string) *HeadObjectOutput { + s.ContentType = &v + return s +} + +// SetDeleteMarker sets the DeleteMarker field's value. +func (s *HeadObjectOutput) SetDeleteMarker(v bool) *HeadObjectOutput { + s.DeleteMarker = &v + return s +} + +// SetETag sets the ETag field's value. +func (s *HeadObjectOutput) SetETag(v string) *HeadObjectOutput { + s.ETag = &v + return s +} + +// SetExpiration sets the Expiration field's value. +func (s *HeadObjectOutput) SetExpiration(v string) *HeadObjectOutput { + s.Expiration = &v + return s +} + +// SetExpires sets the Expires field's value. +func (s *HeadObjectOutput) SetExpires(v string) *HeadObjectOutput { + s.Expires = &v + return s +} + +// SetLastModified sets the LastModified field's value. +func (s *HeadObjectOutput) SetLastModified(v time.Time) *HeadObjectOutput { + s.LastModified = &v + return s +} + +// SetMetadata sets the Metadata field's value. +func (s *HeadObjectOutput) SetMetadata(v map[string]*string) *HeadObjectOutput { + s.Metadata = v + return s +} + +// SetMissingMeta sets the MissingMeta field's value. +func (s *HeadObjectOutput) SetMissingMeta(v int64) *HeadObjectOutput { + s.MissingMeta = &v + return s +} + +// SetPartsCount sets the PartsCount field's value. +func (s *HeadObjectOutput) SetPartsCount(v int64) *HeadObjectOutput { + s.PartsCount = &v + return s +} + +// SetReplicationStatus sets the ReplicationStatus field's value. +func (s *HeadObjectOutput) SetReplicationStatus(v string) *HeadObjectOutput { + s.ReplicationStatus = &v + return s +} + +// SetRequestCharged sets the RequestCharged field's value. +func (s *HeadObjectOutput) SetRequestCharged(v string) *HeadObjectOutput { + s.RequestCharged = &v + return s +} + +// SetRestore sets the Restore field's value. +func (s *HeadObjectOutput) SetRestore(v string) *HeadObjectOutput { + s.Restore = &v + return s +} + +// SetSSECustomerAlgorithm sets the SSECustomerAlgorithm field's value. +func (s *HeadObjectOutput) SetSSECustomerAlgorithm(v string) *HeadObjectOutput { + s.SSECustomerAlgorithm = &v + return s +} + +// SetSSECustomerKeyMD5 sets the SSECustomerKeyMD5 field's value. +func (s *HeadObjectOutput) SetSSECustomerKeyMD5(v string) *HeadObjectOutput { + s.SSECustomerKeyMD5 = &v + return s +} + +// SetSSEKMSKeyId sets the SSEKMSKeyId field's value. +func (s *HeadObjectOutput) SetSSEKMSKeyId(v string) *HeadObjectOutput { + s.SSEKMSKeyId = &v + return s +} + +// SetServerSideEncryption sets the ServerSideEncryption field's value. +func (s *HeadObjectOutput) SetServerSideEncryption(v string) *HeadObjectOutput { + s.ServerSideEncryption = &v + return s +} + +// SetStorageClass sets the StorageClass field's value. +func (s *HeadObjectOutput) SetStorageClass(v string) *HeadObjectOutput { + s.StorageClass = &v + return s +} + +// SetVersionId sets the VersionId field's value. +func (s *HeadObjectOutput) SetVersionId(v string) *HeadObjectOutput { + s.VersionId = &v + return s +} + +// SetWebsiteRedirectLocation sets the WebsiteRedirectLocation field's value. +func (s *HeadObjectOutput) SetWebsiteRedirectLocation(v string) *HeadObjectOutput { + s.WebsiteRedirectLocation = &v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/IndexDocument +type IndexDocument struct { + _ struct{} `type:"structure"` + + // A suffix that is appended to a request that is for a directory on the website + // endpoint (e.g. if the suffix is index.html and you make a request to samplebucket/images/ + // the data that is returned will be for the object with the key name images/index.html) + // The suffix must not be empty and must not include a slash character. + // + // Suffix is a required field + Suffix *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s IndexDocument) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s IndexDocument) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *IndexDocument) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "IndexDocument"} + if s.Suffix == nil { + invalidParams.Add(request.NewErrParamRequired("Suffix")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetSuffix sets the Suffix field's value. +func (s *IndexDocument) SetSuffix(v string) *IndexDocument { + s.Suffix = &v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/Initiator +type Initiator struct { + _ struct{} `type:"structure"` + + // Name of the Principal. + DisplayName *string `type:"string"` + + // If the principal is an AWS account, it provides the Canonical User ID. If + // the principal is an IAM User, it provides a user ARN value. + ID *string `type:"string"` +} + +// String returns the string representation +func (s Initiator) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Initiator) GoString() string { + return s.String() +} + +// SetDisplayName sets the DisplayName field's value. +func (s *Initiator) SetDisplayName(v string) *Initiator { + s.DisplayName = &v + return s +} + +// SetID sets the ID field's value. +func (s *Initiator) SetID(v string) *Initiator { + s.ID = &v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/InventoryConfiguration +type InventoryConfiguration struct { + _ struct{} `type:"structure"` + + // Contains information about where to publish the inventory results. + // + // Destination is a required field + Destination *InventoryDestination `type:"structure" required:"true"` + + // Specifies an inventory filter. The inventory only includes objects that meet + // the filter's criteria. + Filter *InventoryFilter `type:"structure"` + + // The ID used to identify the inventory configuration. + // + // Id is a required field + Id *string `type:"string" required:"true"` + + // Specifies which object version(s) to included in the inventory results. + // + // IncludedObjectVersions is a required field + IncludedObjectVersions *string `type:"string" required:"true" enum:"InventoryIncludedObjectVersions"` + + // Specifies whether the inventory is enabled or disabled. + // + // IsEnabled is a required field + IsEnabled *bool `type:"boolean" required:"true"` + + // Contains the optional fields that are included in the inventory results. + OptionalFields []*string `locationNameList:"Field" type:"list"` + + // Specifies the schedule for generating inventory results. + // + // Schedule is a required field + Schedule *InventorySchedule `type:"structure" required:"true"` +} + +// String returns the string representation +func (s InventoryConfiguration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s InventoryConfiguration) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *InventoryConfiguration) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "InventoryConfiguration"} + if s.Destination == nil { + invalidParams.Add(request.NewErrParamRequired("Destination")) + } + if s.Id == nil { + invalidParams.Add(request.NewErrParamRequired("Id")) + } + if s.IncludedObjectVersions == nil { + invalidParams.Add(request.NewErrParamRequired("IncludedObjectVersions")) + } + if s.IsEnabled == nil { + invalidParams.Add(request.NewErrParamRequired("IsEnabled")) + } + if s.Schedule == nil { + invalidParams.Add(request.NewErrParamRequired("Schedule")) + } + if s.Destination != nil { + if err := s.Destination.Validate(); err != nil { + invalidParams.AddNested("Destination", err.(request.ErrInvalidParams)) + } + } + if s.Filter != nil { + if err := s.Filter.Validate(); err != nil { + invalidParams.AddNested("Filter", err.(request.ErrInvalidParams)) + } + } + if s.Schedule != nil { + if err := s.Schedule.Validate(); err != nil { + invalidParams.AddNested("Schedule", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDestination sets the Destination field's value. +func (s *InventoryConfiguration) SetDestination(v *InventoryDestination) *InventoryConfiguration { + s.Destination = v + return s +} + +// SetFilter sets the Filter field's value. +func (s *InventoryConfiguration) SetFilter(v *InventoryFilter) *InventoryConfiguration { + s.Filter = v + return s +} + +// SetId sets the Id field's value. +func (s *InventoryConfiguration) SetId(v string) *InventoryConfiguration { + s.Id = &v + return s +} + +// SetIncludedObjectVersions sets the IncludedObjectVersions field's value. +func (s *InventoryConfiguration) SetIncludedObjectVersions(v string) *InventoryConfiguration { + s.IncludedObjectVersions = &v + return s +} + +// SetIsEnabled sets the IsEnabled field's value. +func (s *InventoryConfiguration) SetIsEnabled(v bool) *InventoryConfiguration { + s.IsEnabled = &v + return s +} + +// SetOptionalFields sets the OptionalFields field's value. +func (s *InventoryConfiguration) SetOptionalFields(v []*string) *InventoryConfiguration { + s.OptionalFields = v + return s +} + +// SetSchedule sets the Schedule field's value. +func (s *InventoryConfiguration) SetSchedule(v *InventorySchedule) *InventoryConfiguration { + s.Schedule = v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/InventoryDestination +type InventoryDestination struct { + _ struct{} `type:"structure"` + + // Contains the bucket name, file format, bucket owner (optional), and prefix + // (optional) where inventory results are published. + // + // S3BucketDestination is a required field + S3BucketDestination *InventoryS3BucketDestination `type:"structure" required:"true"` +} + +// String returns the string representation +func (s InventoryDestination) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s InventoryDestination) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *InventoryDestination) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "InventoryDestination"} + if s.S3BucketDestination == nil { + invalidParams.Add(request.NewErrParamRequired("S3BucketDestination")) + } + if s.S3BucketDestination != nil { + if err := s.S3BucketDestination.Validate(); err != nil { + invalidParams.AddNested("S3BucketDestination", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetS3BucketDestination sets the S3BucketDestination field's value. +func (s *InventoryDestination) SetS3BucketDestination(v *InventoryS3BucketDestination) *InventoryDestination { + s.S3BucketDestination = v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/InventoryFilter +type InventoryFilter struct { + _ struct{} `type:"structure"` + + // The prefix that an object must have to be included in the inventory results. + // + // Prefix is a required field + Prefix *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s InventoryFilter) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s InventoryFilter) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *InventoryFilter) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "InventoryFilter"} + if s.Prefix == nil { + invalidParams.Add(request.NewErrParamRequired("Prefix")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetPrefix sets the Prefix field's value. +func (s *InventoryFilter) SetPrefix(v string) *InventoryFilter { + s.Prefix = &v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/InventoryS3BucketDestination +type InventoryS3BucketDestination struct { + _ struct{} `type:"structure"` + + // The ID of the account that owns the destination bucket. + AccountId *string `type:"string"` + + // The Amazon resource name (ARN) of the bucket where inventory results will + // be published. + // + // Bucket is a required field + Bucket *string `type:"string" required:"true"` + + // Specifies the output format of the inventory results. + // + // Format is a required field + Format *string `type:"string" required:"true" enum:"InventoryFormat"` + + // The prefix that is prepended to all inventory results. + Prefix *string `type:"string"` +} + +// String returns the string representation +func (s InventoryS3BucketDestination) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s InventoryS3BucketDestination) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *InventoryS3BucketDestination) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "InventoryS3BucketDestination"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Format == nil { + invalidParams.Add(request.NewErrParamRequired("Format")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAccountId sets the AccountId field's value. +func (s *InventoryS3BucketDestination) SetAccountId(v string) *InventoryS3BucketDestination { + s.AccountId = &v + return s +} + +// SetBucket sets the Bucket field's value. +func (s *InventoryS3BucketDestination) SetBucket(v string) *InventoryS3BucketDestination { + s.Bucket = &v + return s +} + +// SetFormat sets the Format field's value. +func (s *InventoryS3BucketDestination) SetFormat(v string) *InventoryS3BucketDestination { + s.Format = &v + return s +} + +// SetPrefix sets the Prefix field's value. +func (s *InventoryS3BucketDestination) SetPrefix(v string) *InventoryS3BucketDestination { + s.Prefix = &v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/InventorySchedule +type InventorySchedule struct { + _ struct{} `type:"structure"` + + // Specifies how frequently inventory results are produced. + // + // Frequency is a required field + Frequency *string `type:"string" required:"true" enum:"InventoryFrequency"` +} + +// String returns the string representation +func (s InventorySchedule) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s InventorySchedule) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *InventorySchedule) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "InventorySchedule"} + if s.Frequency == nil { + invalidParams.Add(request.NewErrParamRequired("Frequency")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetFrequency sets the Frequency field's value. +func (s *InventorySchedule) SetFrequency(v string) *InventorySchedule { + s.Frequency = &v + return s +} + +// Container for object key name prefix and suffix filtering rules. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/S3KeyFilter +type KeyFilter struct { + _ struct{} `type:"structure"` + + // A list of containers for key value pair that defines the criteria for the + // filter rule. + FilterRules []*FilterRule `locationName:"FilterRule" type:"list" flattened:"true"` +} + +// String returns the string representation +func (s KeyFilter) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s KeyFilter) GoString() string { + return s.String() +} + +// SetFilterRules sets the FilterRules field's value. +func (s *KeyFilter) SetFilterRules(v []*FilterRule) *KeyFilter { + s.FilterRules = v + return s +} + +// Container for specifying the AWS Lambda notification configuration. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/LambdaFunctionConfiguration +type LambdaFunctionConfiguration struct { + _ struct{} `type:"structure"` + + // Events is a required field + Events []*string `locationName:"Event" type:"list" flattened:"true" required:"true"` + + // Container for object key name filtering rules. For information about key + // name filtering, go to Configuring Event Notifications (http://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html) + Filter *NotificationConfigurationFilter `type:"structure"` + + // Optional unique identifier for configurations in a notification configuration. + // If you don't provide one, Amazon S3 will assign an ID. + Id *string `type:"string"` + + // Lambda cloud function ARN that Amazon S3 can invoke when it detects events + // of the specified type. + // + // LambdaFunctionArn is a required field + LambdaFunctionArn *string `locationName:"CloudFunction" type:"string" required:"true"` +} + +// String returns the string representation +func (s LambdaFunctionConfiguration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s LambdaFunctionConfiguration) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *LambdaFunctionConfiguration) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "LambdaFunctionConfiguration"} + if s.Events == nil { + invalidParams.Add(request.NewErrParamRequired("Events")) + } + if s.LambdaFunctionArn == nil { + invalidParams.Add(request.NewErrParamRequired("LambdaFunctionArn")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetEvents sets the Events field's value. +func (s *LambdaFunctionConfiguration) SetEvents(v []*string) *LambdaFunctionConfiguration { + s.Events = v + return s +} + +// SetFilter sets the Filter field's value. +func (s *LambdaFunctionConfiguration) SetFilter(v *NotificationConfigurationFilter) *LambdaFunctionConfiguration { + s.Filter = v + return s +} + +// SetId sets the Id field's value. +func (s *LambdaFunctionConfiguration) SetId(v string) *LambdaFunctionConfiguration { + s.Id = &v + return s +} + +// SetLambdaFunctionArn sets the LambdaFunctionArn field's value. +func (s *LambdaFunctionConfiguration) SetLambdaFunctionArn(v string) *LambdaFunctionConfiguration { + s.LambdaFunctionArn = &v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/LifecycleConfiguration +type LifecycleConfiguration struct { + _ struct{} `type:"structure"` + + // Rules is a required field + Rules []*Rule `locationName:"Rule" type:"list" flattened:"true" required:"true"` +} + +// String returns the string representation +func (s LifecycleConfiguration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s LifecycleConfiguration) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *LifecycleConfiguration) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "LifecycleConfiguration"} + if s.Rules == nil { + invalidParams.Add(request.NewErrParamRequired("Rules")) + } + if s.Rules != nil { + for i, v := range s.Rules { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Rules", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetRules sets the Rules field's value. +func (s *LifecycleConfiguration) SetRules(v []*Rule) *LifecycleConfiguration { + s.Rules = v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/LifecycleExpiration +type LifecycleExpiration struct { + _ struct{} `type:"structure"` + + // Indicates at what date the object is to be moved or deleted. Should be in + // GMT ISO 8601 Format. + Date *time.Time `type:"timestamp" timestampFormat:"iso8601"` + + // Indicates the lifetime, in days, of the objects that are subject to the rule. + // The value must be a non-zero positive integer. + Days *int64 `type:"integer"` + + // Indicates whether Amazon S3 will remove a delete marker with no noncurrent + // versions. If set to true, the delete marker will be expired; if set to false + // the policy takes no action. This cannot be specified with Days or Date in + // a Lifecycle Expiration Policy. + ExpiredObjectDeleteMarker *bool `type:"boolean"` +} + +// String returns the string representation +func (s LifecycleExpiration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s LifecycleExpiration) GoString() string { + return s.String() +} + +// SetDate sets the Date field's value. +func (s *LifecycleExpiration) SetDate(v time.Time) *LifecycleExpiration { + s.Date = &v + return s +} + +// SetDays sets the Days field's value. +func (s *LifecycleExpiration) SetDays(v int64) *LifecycleExpiration { + s.Days = &v + return s +} + +// SetExpiredObjectDeleteMarker sets the ExpiredObjectDeleteMarker field's value. +func (s *LifecycleExpiration) SetExpiredObjectDeleteMarker(v bool) *LifecycleExpiration { + s.ExpiredObjectDeleteMarker = &v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/LifecycleRule +type LifecycleRule struct { + _ struct{} `type:"structure"` + + // Specifies the days since the initiation of an Incomplete Multipart Upload + // that Lifecycle will wait before permanently removing all parts of the upload. + AbortIncompleteMultipartUpload *AbortIncompleteMultipartUpload `type:"structure"` + + Expiration *LifecycleExpiration `type:"structure"` + + // The Filter is used to identify objects that a Lifecycle Rule applies to. + // A Filter must have exactly one of Prefix, Tag, or And specified. + Filter *LifecycleRuleFilter `type:"structure"` + + // Unique identifier for the rule. The value cannot be longer than 255 characters. + ID *string `type:"string"` + + // Specifies when noncurrent object versions expire. Upon expiration, Amazon + // S3 permanently deletes the noncurrent object versions. You set this lifecycle + // configuration action on a bucket that has versioning enabled (or suspended) + // to request that Amazon S3 delete noncurrent object versions at a specific + // period in the object's lifetime. + NoncurrentVersionExpiration *NoncurrentVersionExpiration `type:"structure"` + + NoncurrentVersionTransitions []*NoncurrentVersionTransition `locationName:"NoncurrentVersionTransition" type:"list" flattened:"true"` + + // Prefix identifying one or more objects to which the rule applies. This is + // deprecated; use Filter instead. + Prefix *string `deprecated:"true" type:"string"` + + // If 'Enabled', the rule is currently being applied. If 'Disabled', the rule + // is not currently being applied. + // + // Status is a required field + Status *string `type:"string" required:"true" enum:"ExpirationStatus"` + + Transitions []*Transition `locationName:"Transition" type:"list" flattened:"true"` +} + +// String returns the string representation +func (s LifecycleRule) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s LifecycleRule) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *LifecycleRule) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "LifecycleRule"} + if s.Status == nil { + invalidParams.Add(request.NewErrParamRequired("Status")) + } + if s.Filter != nil { + if err := s.Filter.Validate(); err != nil { + invalidParams.AddNested("Filter", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAbortIncompleteMultipartUpload sets the AbortIncompleteMultipartUpload field's value. +func (s *LifecycleRule) SetAbortIncompleteMultipartUpload(v *AbortIncompleteMultipartUpload) *LifecycleRule { + s.AbortIncompleteMultipartUpload = v + return s +} + +// SetExpiration sets the Expiration field's value. +func (s *LifecycleRule) SetExpiration(v *LifecycleExpiration) *LifecycleRule { + s.Expiration = v + return s +} + +// SetFilter sets the Filter field's value. +func (s *LifecycleRule) SetFilter(v *LifecycleRuleFilter) *LifecycleRule { + s.Filter = v + return s +} + +// SetID sets the ID field's value. +func (s *LifecycleRule) SetID(v string) *LifecycleRule { + s.ID = &v + return s +} + +// SetNoncurrentVersionExpiration sets the NoncurrentVersionExpiration field's value. +func (s *LifecycleRule) SetNoncurrentVersionExpiration(v *NoncurrentVersionExpiration) *LifecycleRule { + s.NoncurrentVersionExpiration = v + return s +} + +// SetNoncurrentVersionTransitions sets the NoncurrentVersionTransitions field's value. +func (s *LifecycleRule) SetNoncurrentVersionTransitions(v []*NoncurrentVersionTransition) *LifecycleRule { + s.NoncurrentVersionTransitions = v + return s +} + +// SetPrefix sets the Prefix field's value. +func (s *LifecycleRule) SetPrefix(v string) *LifecycleRule { + s.Prefix = &v + return s +} + +// SetStatus sets the Status field's value. +func (s *LifecycleRule) SetStatus(v string) *LifecycleRule { + s.Status = &v + return s +} + +// SetTransitions sets the Transitions field's value. +func (s *LifecycleRule) SetTransitions(v []*Transition) *LifecycleRule { + s.Transitions = v + return s +} + +// This is used in a Lifecycle Rule Filter to apply a logical AND to two or +// more predicates. The Lifecycle Rule will apply to any object matching all +// of the predicates configured inside the And operator. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/LifecycleRuleAndOperator +type LifecycleRuleAndOperator struct { + _ struct{} `type:"structure"` + + Prefix *string `type:"string"` + + // All of these tags must exist in the object's tag set in order for the rule + // to apply. + Tags []*Tag `locationName:"Tag" locationNameList:"Tag" type:"list" flattened:"true"` +} + +// String returns the string representation +func (s LifecycleRuleAndOperator) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s LifecycleRuleAndOperator) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *LifecycleRuleAndOperator) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "LifecycleRuleAndOperator"} + if s.Tags != nil { + for i, v := range s.Tags { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Tags", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetPrefix sets the Prefix field's value. +func (s *LifecycleRuleAndOperator) SetPrefix(v string) *LifecycleRuleAndOperator { + s.Prefix = &v + return s +} + +// SetTags sets the Tags field's value. +func (s *LifecycleRuleAndOperator) SetTags(v []*Tag) *LifecycleRuleAndOperator { + s.Tags = v + return s +} + +// The Filter is used to identify objects that a Lifecycle Rule applies to. +// A Filter must have exactly one of Prefix, Tag, or And specified. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/LifecycleRuleFilter +type LifecycleRuleFilter struct { + _ struct{} `type:"structure"` + + // This is used in a Lifecycle Rule Filter to apply a logical AND to two or + // more predicates. The Lifecycle Rule will apply to any object matching all + // of the predicates configured inside the And operator. + And *LifecycleRuleAndOperator `type:"structure"` + + // Prefix identifying one or more objects to which the rule applies. + Prefix *string `type:"string"` + + // This tag must exist in the object's tag set in order for the rule to apply. + Tag *Tag `type:"structure"` +} + +// String returns the string representation +func (s LifecycleRuleFilter) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s LifecycleRuleFilter) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *LifecycleRuleFilter) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "LifecycleRuleFilter"} + if s.And != nil { + if err := s.And.Validate(); err != nil { + invalidParams.AddNested("And", err.(request.ErrInvalidParams)) + } + } + if s.Tag != nil { + if err := s.Tag.Validate(); err != nil { + invalidParams.AddNested("Tag", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAnd sets the And field's value. +func (s *LifecycleRuleFilter) SetAnd(v *LifecycleRuleAndOperator) *LifecycleRuleFilter { + s.And = v + return s +} + +// SetPrefix sets the Prefix field's value. +func (s *LifecycleRuleFilter) SetPrefix(v string) *LifecycleRuleFilter { + s.Prefix = &v + return s +} + +// SetTag sets the Tag field's value. +func (s *LifecycleRuleFilter) SetTag(v *Tag) *LifecycleRuleFilter { + s.Tag = v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListBucketAnalyticsConfigurationsRequest +type ListBucketAnalyticsConfigurationsInput struct { + _ struct{} `type:"structure"` + + // The name of the bucket from which analytics configurations are retrieved. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // The ContinuationToken that represents a placeholder from where this request + // should begin. + ContinuationToken *string `location:"querystring" locationName:"continuation-token" type:"string"` +} + +// String returns the string representation +func (s ListBucketAnalyticsConfigurationsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListBucketAnalyticsConfigurationsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListBucketAnalyticsConfigurationsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListBucketAnalyticsConfigurationsInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *ListBucketAnalyticsConfigurationsInput) SetBucket(v string) *ListBucketAnalyticsConfigurationsInput { + s.Bucket = &v + return s +} + +// SetContinuationToken sets the ContinuationToken field's value. +func (s *ListBucketAnalyticsConfigurationsInput) SetContinuationToken(v string) *ListBucketAnalyticsConfigurationsInput { + s.ContinuationToken = &v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListBucketAnalyticsConfigurationsOutput +type ListBucketAnalyticsConfigurationsOutput struct { + _ struct{} `type:"structure"` + + // The list of analytics configurations for a bucket. + AnalyticsConfigurationList []*AnalyticsConfiguration `locationName:"AnalyticsConfiguration" type:"list" flattened:"true"` + + // The ContinuationToken that represents where this request began. + ContinuationToken *string `type:"string"` + + // Indicates whether the returned list of analytics configurations is complete. + // A value of true indicates that the list is not complete and the NextContinuationToken + // will be provided for a subsequent request. + IsTruncated *bool `type:"boolean"` + + // NextContinuationToken is sent when isTruncated is true, which indicates that + // there are more analytics configurations to list. The next request must include + // this NextContinuationToken. The token is obfuscated and is not a usable value. + NextContinuationToken *string `type:"string"` +} + +// String returns the string representation +func (s ListBucketAnalyticsConfigurationsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListBucketAnalyticsConfigurationsOutput) GoString() string { + return s.String() +} + +// SetAnalyticsConfigurationList sets the AnalyticsConfigurationList field's value. +func (s *ListBucketAnalyticsConfigurationsOutput) SetAnalyticsConfigurationList(v []*AnalyticsConfiguration) *ListBucketAnalyticsConfigurationsOutput { + s.AnalyticsConfigurationList = v + return s +} + +// SetContinuationToken sets the ContinuationToken field's value. +func (s *ListBucketAnalyticsConfigurationsOutput) SetContinuationToken(v string) *ListBucketAnalyticsConfigurationsOutput { + s.ContinuationToken = &v + return s +} + +// SetIsTruncated sets the IsTruncated field's value. +func (s *ListBucketAnalyticsConfigurationsOutput) SetIsTruncated(v bool) *ListBucketAnalyticsConfigurationsOutput { + s.IsTruncated = &v + return s +} + +// SetNextContinuationToken sets the NextContinuationToken field's value. +func (s *ListBucketAnalyticsConfigurationsOutput) SetNextContinuationToken(v string) *ListBucketAnalyticsConfigurationsOutput { + s.NextContinuationToken = &v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListBucketInventoryConfigurationsRequest +type ListBucketInventoryConfigurationsInput struct { + _ struct{} `type:"structure"` + + // The name of the bucket containing the inventory configurations to retrieve. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // The marker used to continue an inventory configuration listing that has been + // truncated. Use the NextContinuationToken from a previously truncated list + // response to continue the listing. The continuation token is an opaque value + // that Amazon S3 understands. + ContinuationToken *string `location:"querystring" locationName:"continuation-token" type:"string"` +} + +// String returns the string representation +func (s ListBucketInventoryConfigurationsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListBucketInventoryConfigurationsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListBucketInventoryConfigurationsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListBucketInventoryConfigurationsInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *ListBucketInventoryConfigurationsInput) SetBucket(v string) *ListBucketInventoryConfigurationsInput { + s.Bucket = &v + return s +} + +// SetContinuationToken sets the ContinuationToken field's value. +func (s *ListBucketInventoryConfigurationsInput) SetContinuationToken(v string) *ListBucketInventoryConfigurationsInput { + s.ContinuationToken = &v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListBucketInventoryConfigurationsOutput +type ListBucketInventoryConfigurationsOutput struct { + _ struct{} `type:"structure"` + + // If sent in the request, the marker that is used as a starting point for this + // inventory configuration list response. + ContinuationToken *string `type:"string"` + + // The list of inventory configurations for a bucket. + InventoryConfigurationList []*InventoryConfiguration `locationName:"InventoryConfiguration" type:"list" flattened:"true"` + + // Indicates whether the returned list of inventory configurations is truncated + // in this response. A value of true indicates that the list is truncated. + IsTruncated *bool `type:"boolean"` + + // The marker used to continue this inventory configuration listing. Use the + // NextContinuationToken from this response to continue the listing in a subsequent + // request. The continuation token is an opaque value that Amazon S3 understands. + NextContinuationToken *string `type:"string"` +} + +// String returns the string representation +func (s ListBucketInventoryConfigurationsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListBucketInventoryConfigurationsOutput) GoString() string { + return s.String() +} + +// SetContinuationToken sets the ContinuationToken field's value. +func (s *ListBucketInventoryConfigurationsOutput) SetContinuationToken(v string) *ListBucketInventoryConfigurationsOutput { + s.ContinuationToken = &v + return s +} + +// SetInventoryConfigurationList sets the InventoryConfigurationList field's value. +func (s *ListBucketInventoryConfigurationsOutput) SetInventoryConfigurationList(v []*InventoryConfiguration) *ListBucketInventoryConfigurationsOutput { + s.InventoryConfigurationList = v + return s +} + +// SetIsTruncated sets the IsTruncated field's value. +func (s *ListBucketInventoryConfigurationsOutput) SetIsTruncated(v bool) *ListBucketInventoryConfigurationsOutput { + s.IsTruncated = &v + return s +} + +// SetNextContinuationToken sets the NextContinuationToken field's value. +func (s *ListBucketInventoryConfigurationsOutput) SetNextContinuationToken(v string) *ListBucketInventoryConfigurationsOutput { + s.NextContinuationToken = &v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListBucketMetricsConfigurationsRequest +type ListBucketMetricsConfigurationsInput struct { + _ struct{} `type:"structure"` + + // The name of the bucket containing the metrics configurations to retrieve. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // The marker that is used to continue a metrics configuration listing that + // has been truncated. Use the NextContinuationToken from a previously truncated + // list response to continue the listing. The continuation token is an opaque + // value that Amazon S3 understands. + ContinuationToken *string `location:"querystring" locationName:"continuation-token" type:"string"` +} + +// String returns the string representation +func (s ListBucketMetricsConfigurationsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListBucketMetricsConfigurationsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListBucketMetricsConfigurationsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListBucketMetricsConfigurationsInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *ListBucketMetricsConfigurationsInput) SetBucket(v string) *ListBucketMetricsConfigurationsInput { + s.Bucket = &v + return s +} + +// SetContinuationToken sets the ContinuationToken field's value. +func (s *ListBucketMetricsConfigurationsInput) SetContinuationToken(v string) *ListBucketMetricsConfigurationsInput { + s.ContinuationToken = &v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListBucketMetricsConfigurationsOutput +type ListBucketMetricsConfigurationsOutput struct { + _ struct{} `type:"structure"` + + // The marker that is used as a starting point for this metrics configuration + // list response. This value is present if it was sent in the request. + ContinuationToken *string `type:"string"` + + // Indicates whether the returned list of metrics configurations is complete. + // A value of true indicates that the list is not complete and the NextContinuationToken + // will be provided for a subsequent request. + IsTruncated *bool `type:"boolean"` + + // The list of metrics configurations for a bucket. + MetricsConfigurationList []*MetricsConfiguration `locationName:"MetricsConfiguration" type:"list" flattened:"true"` + + // The marker used to continue a metrics configuration listing that has been + // truncated. Use the NextContinuationToken from a previously truncated list + // response to continue the listing. The continuation token is an opaque value + // that Amazon S3 understands. + NextContinuationToken *string `type:"string"` +} + +// String returns the string representation +func (s ListBucketMetricsConfigurationsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListBucketMetricsConfigurationsOutput) GoString() string { + return s.String() +} + +// SetContinuationToken sets the ContinuationToken field's value. +func (s *ListBucketMetricsConfigurationsOutput) SetContinuationToken(v string) *ListBucketMetricsConfigurationsOutput { + s.ContinuationToken = &v + return s +} + +// SetIsTruncated sets the IsTruncated field's value. +func (s *ListBucketMetricsConfigurationsOutput) SetIsTruncated(v bool) *ListBucketMetricsConfigurationsOutput { + s.IsTruncated = &v + return s +} + +// SetMetricsConfigurationList sets the MetricsConfigurationList field's value. +func (s *ListBucketMetricsConfigurationsOutput) SetMetricsConfigurationList(v []*MetricsConfiguration) *ListBucketMetricsConfigurationsOutput { + s.MetricsConfigurationList = v + return s +} + +// SetNextContinuationToken sets the NextContinuationToken field's value. +func (s *ListBucketMetricsConfigurationsOutput) SetNextContinuationToken(v string) *ListBucketMetricsConfigurationsOutput { + s.NextContinuationToken = &v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListBucketsInput +type ListBucketsInput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s ListBucketsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListBucketsInput) GoString() string { + return s.String() +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListBucketsOutput +type ListBucketsOutput struct { + _ struct{} `type:"structure"` + + Buckets []*Bucket `locationNameList:"Bucket" type:"list"` + + Owner *Owner `type:"structure"` +} + +// String returns the string representation +func (s ListBucketsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListBucketsOutput) GoString() string { + return s.String() +} + +// SetBuckets sets the Buckets field's value. +func (s *ListBucketsOutput) SetBuckets(v []*Bucket) *ListBucketsOutput { + s.Buckets = v + return s +} + +// SetOwner sets the Owner field's value. +func (s *ListBucketsOutput) SetOwner(v *Owner) *ListBucketsOutput { + s.Owner = v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListMultipartUploadsRequest +type ListMultipartUploadsInput struct { + _ struct{} `type:"structure"` + + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // Character you use to group keys. + Delimiter *string `location:"querystring" locationName:"delimiter" type:"string"` + + // Requests Amazon S3 to encode the object keys in the response and specifies + // the encoding method to use. An object key may contain any Unicode character; + // however, XML 1.0 parser cannot parse some characters, such as characters + // with an ASCII value from 0 to 10. For characters that are not supported in + // XML 1.0, you can add this parameter to request that Amazon S3 encode the + // keys in the response. + EncodingType *string `location:"querystring" locationName:"encoding-type" type:"string" enum:"EncodingType"` + + // Together with upload-id-marker, this parameter specifies the multipart upload + // after which listing should begin. + KeyMarker *string `location:"querystring" locationName:"key-marker" type:"string"` + + // Sets the maximum number of multipart uploads, from 1 to 1,000, to return + // in the response body. 1,000 is the maximum number of uploads that can be + // returned in a response. + MaxUploads *int64 `location:"querystring" locationName:"max-uploads" type:"integer"` + + // Lists in-progress uploads only for those keys that begin with the specified + // prefix. + Prefix *string `location:"querystring" locationName:"prefix" type:"string"` + + // Together with key-marker, specifies the multipart upload after which listing + // should begin. If key-marker is not specified, the upload-id-marker parameter + // is ignored. + UploadIdMarker *string `location:"querystring" locationName:"upload-id-marker" type:"string"` +} + +// String returns the string representation +func (s ListMultipartUploadsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListMultipartUploadsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListMultipartUploadsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListMultipartUploadsInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *ListMultipartUploadsInput) SetBucket(v string) *ListMultipartUploadsInput { + s.Bucket = &v + return s +} + +// SetDelimiter sets the Delimiter field's value. +func (s *ListMultipartUploadsInput) SetDelimiter(v string) *ListMultipartUploadsInput { + s.Delimiter = &v + return s +} + +// SetEncodingType sets the EncodingType field's value. +func (s *ListMultipartUploadsInput) SetEncodingType(v string) *ListMultipartUploadsInput { + s.EncodingType = &v + return s +} + +// SetKeyMarker sets the KeyMarker field's value. +func (s *ListMultipartUploadsInput) SetKeyMarker(v string) *ListMultipartUploadsInput { + s.KeyMarker = &v + return s +} + +// SetMaxUploads sets the MaxUploads field's value. +func (s *ListMultipartUploadsInput) SetMaxUploads(v int64) *ListMultipartUploadsInput { + s.MaxUploads = &v + return s +} + +// SetPrefix sets the Prefix field's value. +func (s *ListMultipartUploadsInput) SetPrefix(v string) *ListMultipartUploadsInput { + s.Prefix = &v + return s +} + +// SetUploadIdMarker sets the UploadIdMarker field's value. +func (s *ListMultipartUploadsInput) SetUploadIdMarker(v string) *ListMultipartUploadsInput { + s.UploadIdMarker = &v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListMultipartUploadsOutput +type ListMultipartUploadsOutput struct { + _ struct{} `type:"structure"` + + // Name of the bucket to which the multipart upload was initiated. + Bucket *string `type:"string"` + + CommonPrefixes []*CommonPrefix `type:"list" flattened:"true"` + + Delimiter *string `type:"string"` + + // Encoding type used by Amazon S3 to encode object keys in the response. + EncodingType *string `type:"string" enum:"EncodingType"` + + // Indicates whether the returned list of multipart uploads is truncated. A + // value of true indicates that the list was truncated. The list can be truncated + // if the number of multipart uploads exceeds the limit allowed or specified + // by max uploads. + IsTruncated *bool `type:"boolean"` + + // The key at or after which the listing began. + KeyMarker *string `type:"string"` + + // Maximum number of multipart uploads that could have been included in the + // response. + MaxUploads *int64 `type:"integer"` + + // When a list is truncated, this element specifies the value that should be + // used for the key-marker request parameter in a subsequent request. + NextKeyMarker *string `type:"string"` + + // When a list is truncated, this element specifies the value that should be + // used for the upload-id-marker request parameter in a subsequent request. + NextUploadIdMarker *string `type:"string"` + + // When a prefix is provided in the request, this field contains the specified + // prefix. The result contains only keys starting with the specified prefix. + Prefix *string `type:"string"` + + // Upload ID after which listing began. + UploadIdMarker *string `type:"string"` + + Uploads []*MultipartUpload `locationName:"Upload" type:"list" flattened:"true"` +} + +// String returns the string representation +func (s ListMultipartUploadsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListMultipartUploadsOutput) GoString() string { + return s.String() +} + +// SetBucket sets the Bucket field's value. +func (s *ListMultipartUploadsOutput) SetBucket(v string) *ListMultipartUploadsOutput { + s.Bucket = &v + return s +} + +// SetCommonPrefixes sets the CommonPrefixes field's value. +func (s *ListMultipartUploadsOutput) SetCommonPrefixes(v []*CommonPrefix) *ListMultipartUploadsOutput { + s.CommonPrefixes = v + return s +} + +// SetDelimiter sets the Delimiter field's value. +func (s *ListMultipartUploadsOutput) SetDelimiter(v string) *ListMultipartUploadsOutput { + s.Delimiter = &v + return s +} + +// SetEncodingType sets the EncodingType field's value. +func (s *ListMultipartUploadsOutput) SetEncodingType(v string) *ListMultipartUploadsOutput { + s.EncodingType = &v + return s +} + +// SetIsTruncated sets the IsTruncated field's value. +func (s *ListMultipartUploadsOutput) SetIsTruncated(v bool) *ListMultipartUploadsOutput { + s.IsTruncated = &v + return s +} + +// SetKeyMarker sets the KeyMarker field's value. +func (s *ListMultipartUploadsOutput) SetKeyMarker(v string) *ListMultipartUploadsOutput { + s.KeyMarker = &v + return s +} + +// SetMaxUploads sets the MaxUploads field's value. +func (s *ListMultipartUploadsOutput) SetMaxUploads(v int64) *ListMultipartUploadsOutput { + s.MaxUploads = &v + return s +} + +// SetNextKeyMarker sets the NextKeyMarker field's value. +func (s *ListMultipartUploadsOutput) SetNextKeyMarker(v string) *ListMultipartUploadsOutput { + s.NextKeyMarker = &v + return s +} + +// SetNextUploadIdMarker sets the NextUploadIdMarker field's value. +func (s *ListMultipartUploadsOutput) SetNextUploadIdMarker(v string) *ListMultipartUploadsOutput { + s.NextUploadIdMarker = &v + return s +} + +// SetPrefix sets the Prefix field's value. +func (s *ListMultipartUploadsOutput) SetPrefix(v string) *ListMultipartUploadsOutput { + s.Prefix = &v + return s +} + +// SetUploadIdMarker sets the UploadIdMarker field's value. +func (s *ListMultipartUploadsOutput) SetUploadIdMarker(v string) *ListMultipartUploadsOutput { + s.UploadIdMarker = &v + return s +} + +// SetUploads sets the Uploads field's value. +func (s *ListMultipartUploadsOutput) SetUploads(v []*MultipartUpload) *ListMultipartUploadsOutput { + s.Uploads = v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListObjectVersionsRequest +type ListObjectVersionsInput struct { + _ struct{} `type:"structure"` + + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // A delimiter is a character you use to group keys. + Delimiter *string `location:"querystring" locationName:"delimiter" type:"string"` + + // Requests Amazon S3 to encode the object keys in the response and specifies + // the encoding method to use. An object key may contain any Unicode character; + // however, XML 1.0 parser cannot parse some characters, such as characters + // with an ASCII value from 0 to 10. For characters that are not supported in + // XML 1.0, you can add this parameter to request that Amazon S3 encode the + // keys in the response. + EncodingType *string `location:"querystring" locationName:"encoding-type" type:"string" enum:"EncodingType"` + + // Specifies the key to start with when listing objects in a bucket. + KeyMarker *string `location:"querystring" locationName:"key-marker" type:"string"` + + // Sets the maximum number of keys returned in the response. The response might + // contain fewer keys but will never contain more. + MaxKeys *int64 `location:"querystring" locationName:"max-keys" type:"integer"` + + // Limits the response to keys that begin with the specified prefix. + Prefix *string `location:"querystring" locationName:"prefix" type:"string"` + + // Specifies the object version you want to start listing from. + VersionIdMarker *string `location:"querystring" locationName:"version-id-marker" type:"string"` +} + +// String returns the string representation +func (s ListObjectVersionsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListObjectVersionsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListObjectVersionsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListObjectVersionsInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *ListObjectVersionsInput) SetBucket(v string) *ListObjectVersionsInput { + s.Bucket = &v + return s +} + +// SetDelimiter sets the Delimiter field's value. +func (s *ListObjectVersionsInput) SetDelimiter(v string) *ListObjectVersionsInput { + s.Delimiter = &v + return s +} + +// SetEncodingType sets the EncodingType field's value. +func (s *ListObjectVersionsInput) SetEncodingType(v string) *ListObjectVersionsInput { + s.EncodingType = &v + return s +} + +// SetKeyMarker sets the KeyMarker field's value. +func (s *ListObjectVersionsInput) SetKeyMarker(v string) *ListObjectVersionsInput { + s.KeyMarker = &v + return s +} + +// SetMaxKeys sets the MaxKeys field's value. +func (s *ListObjectVersionsInput) SetMaxKeys(v int64) *ListObjectVersionsInput { + s.MaxKeys = &v + return s +} + +// SetPrefix sets the Prefix field's value. +func (s *ListObjectVersionsInput) SetPrefix(v string) *ListObjectVersionsInput { + s.Prefix = &v + return s +} + +// SetVersionIdMarker sets the VersionIdMarker field's value. +func (s *ListObjectVersionsInput) SetVersionIdMarker(v string) *ListObjectVersionsInput { + s.VersionIdMarker = &v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListObjectVersionsOutput +type ListObjectVersionsOutput struct { + _ struct{} `type:"structure"` + + CommonPrefixes []*CommonPrefix `type:"list" flattened:"true"` + + DeleteMarkers []*DeleteMarkerEntry `locationName:"DeleteMarker" type:"list" flattened:"true"` + + Delimiter *string `type:"string"` + + // Encoding type used by Amazon S3 to encode object keys in the response. + EncodingType *string `type:"string" enum:"EncodingType"` + + // A flag that indicates whether or not Amazon S3 returned all of the results + // that satisfied the search criteria. If your results were truncated, you can + // make a follow-up paginated request using the NextKeyMarker and NextVersionIdMarker + // response parameters as a starting place in another request to return the + // rest of the results. + IsTruncated *bool `type:"boolean"` + + // Marks the last Key returned in a truncated response. + KeyMarker *string `type:"string"` + + MaxKeys *int64 `type:"integer"` + + Name *string `type:"string"` + + // Use this value for the key marker request parameter in a subsequent request. + NextKeyMarker *string `type:"string"` + + // Use this value for the next version id marker parameter in a subsequent request. + NextVersionIdMarker *string `type:"string"` + + Prefix *string `type:"string"` + + VersionIdMarker *string `type:"string"` + + Versions []*ObjectVersion `locationName:"Version" type:"list" flattened:"true"` +} + +// String returns the string representation +func (s ListObjectVersionsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListObjectVersionsOutput) GoString() string { + return s.String() +} + +// SetCommonPrefixes sets the CommonPrefixes field's value. +func (s *ListObjectVersionsOutput) SetCommonPrefixes(v []*CommonPrefix) *ListObjectVersionsOutput { + s.CommonPrefixes = v + return s +} + +// SetDeleteMarkers sets the DeleteMarkers field's value. +func (s *ListObjectVersionsOutput) SetDeleteMarkers(v []*DeleteMarkerEntry) *ListObjectVersionsOutput { + s.DeleteMarkers = v + return s +} + +// SetDelimiter sets the Delimiter field's value. +func (s *ListObjectVersionsOutput) SetDelimiter(v string) *ListObjectVersionsOutput { + s.Delimiter = &v + return s +} + +// SetEncodingType sets the EncodingType field's value. +func (s *ListObjectVersionsOutput) SetEncodingType(v string) *ListObjectVersionsOutput { + s.EncodingType = &v + return s +} + +// SetIsTruncated sets the IsTruncated field's value. +func (s *ListObjectVersionsOutput) SetIsTruncated(v bool) *ListObjectVersionsOutput { + s.IsTruncated = &v + return s +} + +// SetKeyMarker sets the KeyMarker field's value. +func (s *ListObjectVersionsOutput) SetKeyMarker(v string) *ListObjectVersionsOutput { + s.KeyMarker = &v + return s +} + +// SetMaxKeys sets the MaxKeys field's value. +func (s *ListObjectVersionsOutput) SetMaxKeys(v int64) *ListObjectVersionsOutput { + s.MaxKeys = &v + return s +} + +// SetName sets the Name field's value. +func (s *ListObjectVersionsOutput) SetName(v string) *ListObjectVersionsOutput { + s.Name = &v + return s +} + +// SetNextKeyMarker sets the NextKeyMarker field's value. +func (s *ListObjectVersionsOutput) SetNextKeyMarker(v string) *ListObjectVersionsOutput { + s.NextKeyMarker = &v + return s +} + +// SetNextVersionIdMarker sets the NextVersionIdMarker field's value. +func (s *ListObjectVersionsOutput) SetNextVersionIdMarker(v string) *ListObjectVersionsOutput { + s.NextVersionIdMarker = &v + return s +} + +// SetPrefix sets the Prefix field's value. +func (s *ListObjectVersionsOutput) SetPrefix(v string) *ListObjectVersionsOutput { + s.Prefix = &v + return s +} + +// SetVersionIdMarker sets the VersionIdMarker field's value. +func (s *ListObjectVersionsOutput) SetVersionIdMarker(v string) *ListObjectVersionsOutput { + s.VersionIdMarker = &v + return s +} + +// SetVersions sets the Versions field's value. +func (s *ListObjectVersionsOutput) SetVersions(v []*ObjectVersion) *ListObjectVersionsOutput { + s.Versions = v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListObjectsRequest +type ListObjectsInput struct { + _ struct{} `type:"structure"` + + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // A delimiter is a character you use to group keys. + Delimiter *string `location:"querystring" locationName:"delimiter" type:"string"` + + // Requests Amazon S3 to encode the object keys in the response and specifies + // the encoding method to use. An object key may contain any Unicode character; + // however, XML 1.0 parser cannot parse some characters, such as characters + // with an ASCII value from 0 to 10. For characters that are not supported in + // XML 1.0, you can add this parameter to request that Amazon S3 encode the + // keys in the response. + EncodingType *string `location:"querystring" locationName:"encoding-type" type:"string" enum:"EncodingType"` + + // Specifies the key to start with when listing objects in a bucket. + Marker *string `location:"querystring" locationName:"marker" type:"string"` + + // Sets the maximum number of keys returned in the response. The response might + // contain fewer keys but will never contain more. + MaxKeys *int64 `location:"querystring" locationName:"max-keys" type:"integer"` + + // Limits the response to keys that begin with the specified prefix. + Prefix *string `location:"querystring" locationName:"prefix" type:"string"` + + // Confirms that the requester knows that she or he will be charged for the + // list objects request. Bucket owners need not specify this parameter in their + // requests. + RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` +} + +// String returns the string representation +func (s ListObjectsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListObjectsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListObjectsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListObjectsInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *ListObjectsInput) SetBucket(v string) *ListObjectsInput { + s.Bucket = &v + return s +} + +// SetDelimiter sets the Delimiter field's value. +func (s *ListObjectsInput) SetDelimiter(v string) *ListObjectsInput { + s.Delimiter = &v + return s +} + +// SetEncodingType sets the EncodingType field's value. +func (s *ListObjectsInput) SetEncodingType(v string) *ListObjectsInput { + s.EncodingType = &v + return s +} + +// SetMarker sets the Marker field's value. +func (s *ListObjectsInput) SetMarker(v string) *ListObjectsInput { + s.Marker = &v + return s +} + +// SetMaxKeys sets the MaxKeys field's value. +func (s *ListObjectsInput) SetMaxKeys(v int64) *ListObjectsInput { + s.MaxKeys = &v + return s +} + +// SetPrefix sets the Prefix field's value. +func (s *ListObjectsInput) SetPrefix(v string) *ListObjectsInput { + s.Prefix = &v + return s +} + +// SetRequestPayer sets the RequestPayer field's value. +func (s *ListObjectsInput) SetRequestPayer(v string) *ListObjectsInput { + s.RequestPayer = &v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListObjectsOutput +type ListObjectsOutput struct { + _ struct{} `type:"structure"` + + CommonPrefixes []*CommonPrefix `type:"list" flattened:"true"` + + Contents []*Object `type:"list" flattened:"true"` + + Delimiter *string `type:"string"` + + // Encoding type used by Amazon S3 to encode object keys in the response. + EncodingType *string `type:"string" enum:"EncodingType"` + + // A flag that indicates whether or not Amazon S3 returned all of the results + // that satisfied the search criteria. + IsTruncated *bool `type:"boolean"` + + Marker *string `type:"string"` + + MaxKeys *int64 `type:"integer"` + + Name *string `type:"string"` + + // When response is truncated (the IsTruncated element value in the response + // is true), you can use the key name in this field as marker in the subsequent + // request to get next set of objects. Amazon S3 lists objects in alphabetical + // order Note: This element is returned only if you have delimiter request parameter + // specified. If response does not include the NextMaker and it is truncated, + // you can use the value of the last Key in the response as the marker in the + // subsequent request to get the next set of object keys. + NextMarker *string `type:"string"` + + Prefix *string `type:"string"` +} + +// String returns the string representation +func (s ListObjectsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListObjectsOutput) GoString() string { + return s.String() +} + +// SetCommonPrefixes sets the CommonPrefixes field's value. +func (s *ListObjectsOutput) SetCommonPrefixes(v []*CommonPrefix) *ListObjectsOutput { + s.CommonPrefixes = v + return s +} + +// SetContents sets the Contents field's value. +func (s *ListObjectsOutput) SetContents(v []*Object) *ListObjectsOutput { + s.Contents = v + return s +} + +// SetDelimiter sets the Delimiter field's value. +func (s *ListObjectsOutput) SetDelimiter(v string) *ListObjectsOutput { + s.Delimiter = &v + return s +} + +// SetEncodingType sets the EncodingType field's value. +func (s *ListObjectsOutput) SetEncodingType(v string) *ListObjectsOutput { + s.EncodingType = &v + return s +} + +// SetIsTruncated sets the IsTruncated field's value. +func (s *ListObjectsOutput) SetIsTruncated(v bool) *ListObjectsOutput { + s.IsTruncated = &v + return s +} + +// SetMarker sets the Marker field's value. +func (s *ListObjectsOutput) SetMarker(v string) *ListObjectsOutput { + s.Marker = &v + return s +} + +// SetMaxKeys sets the MaxKeys field's value. +func (s *ListObjectsOutput) SetMaxKeys(v int64) *ListObjectsOutput { + s.MaxKeys = &v + return s +} + +// SetName sets the Name field's value. +func (s *ListObjectsOutput) SetName(v string) *ListObjectsOutput { + s.Name = &v + return s +} + +// SetNextMarker sets the NextMarker field's value. +func (s *ListObjectsOutput) SetNextMarker(v string) *ListObjectsOutput { + s.NextMarker = &v + return s +} + +// SetPrefix sets the Prefix field's value. +func (s *ListObjectsOutput) SetPrefix(v string) *ListObjectsOutput { + s.Prefix = &v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListObjectsV2Request +type ListObjectsV2Input struct { + _ struct{} `type:"structure"` + + // Name of the bucket to list. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // ContinuationToken indicates Amazon S3 that the list is being continued on + // this bucket with a token. ContinuationToken is obfuscated and is not a real + // key + ContinuationToken *string `location:"querystring" locationName:"continuation-token" type:"string"` + + // A delimiter is a character you use to group keys. + Delimiter *string `location:"querystring" locationName:"delimiter" type:"string"` + + // Encoding type used by Amazon S3 to encode object keys in the response. + EncodingType *string `location:"querystring" locationName:"encoding-type" type:"string" enum:"EncodingType"` + + // The owner field is not present in listV2 by default, if you want to return + // owner field with each key in the result then set the fetch owner field to + // true + FetchOwner *bool `location:"querystring" locationName:"fetch-owner" type:"boolean"` + + // Sets the maximum number of keys returned in the response. The response might + // contain fewer keys but will never contain more. + MaxKeys *int64 `location:"querystring" locationName:"max-keys" type:"integer"` + + // Limits the response to keys that begin with the specified prefix. + Prefix *string `location:"querystring" locationName:"prefix" type:"string"` + + // Confirms that the requester knows that she or he will be charged for the + // list objects request in V2 style. Bucket owners need not specify this parameter + // in their requests. + RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` + + // StartAfter is where you want Amazon S3 to start listing from. Amazon S3 starts + // listing after this specified key. StartAfter can be any key in the bucket + StartAfter *string `location:"querystring" locationName:"start-after" type:"string"` +} + +// String returns the string representation +func (s ListObjectsV2Input) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListObjectsV2Input) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListObjectsV2Input) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListObjectsV2Input"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *ListObjectsV2Input) SetBucket(v string) *ListObjectsV2Input { + s.Bucket = &v + return s +} + +// SetContinuationToken sets the ContinuationToken field's value. +func (s *ListObjectsV2Input) SetContinuationToken(v string) *ListObjectsV2Input { + s.ContinuationToken = &v + return s +} + +// SetDelimiter sets the Delimiter field's value. +func (s *ListObjectsV2Input) SetDelimiter(v string) *ListObjectsV2Input { + s.Delimiter = &v + return s +} + +// SetEncodingType sets the EncodingType field's value. +func (s *ListObjectsV2Input) SetEncodingType(v string) *ListObjectsV2Input { + s.EncodingType = &v + return s +} + +// SetFetchOwner sets the FetchOwner field's value. +func (s *ListObjectsV2Input) SetFetchOwner(v bool) *ListObjectsV2Input { + s.FetchOwner = &v + return s +} + +// SetMaxKeys sets the MaxKeys field's value. +func (s *ListObjectsV2Input) SetMaxKeys(v int64) *ListObjectsV2Input { + s.MaxKeys = &v + return s +} + +// SetPrefix sets the Prefix field's value. +func (s *ListObjectsV2Input) SetPrefix(v string) *ListObjectsV2Input { + s.Prefix = &v + return s +} + +// SetRequestPayer sets the RequestPayer field's value. +func (s *ListObjectsV2Input) SetRequestPayer(v string) *ListObjectsV2Input { + s.RequestPayer = &v + return s +} + +// SetStartAfter sets the StartAfter field's value. +func (s *ListObjectsV2Input) SetStartAfter(v string) *ListObjectsV2Input { + s.StartAfter = &v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListObjectsV2Output +type ListObjectsV2Output struct { + _ struct{} `type:"structure"` + + // CommonPrefixes contains all (if there are any) keys between Prefix and the + // next occurrence of the string specified by delimiter + CommonPrefixes []*CommonPrefix `type:"list" flattened:"true"` + + // Metadata about each object returned. + Contents []*Object `type:"list" flattened:"true"` + + // ContinuationToken indicates Amazon S3 that the list is being continued on + // this bucket with a token. ContinuationToken is obfuscated and is not a real + // key + ContinuationToken *string `type:"string"` + + // A delimiter is a character you use to group keys. + Delimiter *string `type:"string"` + + // Encoding type used by Amazon S3 to encode object keys in the response. + EncodingType *string `type:"string" enum:"EncodingType"` + + // A flag that indicates whether or not Amazon S3 returned all of the results + // that satisfied the search criteria. + IsTruncated *bool `type:"boolean"` + + // KeyCount is the number of keys returned with this request. KeyCount will + // always be less than equals to MaxKeys field. Say you ask for 50 keys, your + // result will include less than equals 50 keys + KeyCount *int64 `type:"integer"` + + // Sets the maximum number of keys returned in the response. The response might + // contain fewer keys but will never contain more. + MaxKeys *int64 `type:"integer"` + + // Name of the bucket to list. + Name *string `type:"string"` + + // NextContinuationToken is sent when isTruncated is true which means there + // are more keys in the bucket that can be listed. The next list requests to + // Amazon S3 can be continued with this NextContinuationToken. NextContinuationToken + // is obfuscated and is not a real key + NextContinuationToken *string `type:"string"` + + // Limits the response to keys that begin with the specified prefix. + Prefix *string `type:"string"` + + // StartAfter is where you want Amazon S3 to start listing from. Amazon S3 starts + // listing after this specified key. StartAfter can be any key in the bucket + StartAfter *string `type:"string"` +} + +// String returns the string representation +func (s ListObjectsV2Output) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListObjectsV2Output) GoString() string { + return s.String() +} + +// SetCommonPrefixes sets the CommonPrefixes field's value. +func (s *ListObjectsV2Output) SetCommonPrefixes(v []*CommonPrefix) *ListObjectsV2Output { + s.CommonPrefixes = v + return s +} + +// SetContents sets the Contents field's value. +func (s *ListObjectsV2Output) SetContents(v []*Object) *ListObjectsV2Output { + s.Contents = v + return s +} + +// SetContinuationToken sets the ContinuationToken field's value. +func (s *ListObjectsV2Output) SetContinuationToken(v string) *ListObjectsV2Output { + s.ContinuationToken = &v + return s +} + +// SetDelimiter sets the Delimiter field's value. +func (s *ListObjectsV2Output) SetDelimiter(v string) *ListObjectsV2Output { + s.Delimiter = &v + return s +} + +// SetEncodingType sets the EncodingType field's value. +func (s *ListObjectsV2Output) SetEncodingType(v string) *ListObjectsV2Output { + s.EncodingType = &v + return s +} + +// SetIsTruncated sets the IsTruncated field's value. +func (s *ListObjectsV2Output) SetIsTruncated(v bool) *ListObjectsV2Output { + s.IsTruncated = &v + return s +} + +// SetKeyCount sets the KeyCount field's value. +func (s *ListObjectsV2Output) SetKeyCount(v int64) *ListObjectsV2Output { + s.KeyCount = &v + return s +} + +// SetMaxKeys sets the MaxKeys field's value. +func (s *ListObjectsV2Output) SetMaxKeys(v int64) *ListObjectsV2Output { + s.MaxKeys = &v + return s +} + +// SetName sets the Name field's value. +func (s *ListObjectsV2Output) SetName(v string) *ListObjectsV2Output { + s.Name = &v + return s +} + +// SetNextContinuationToken sets the NextContinuationToken field's value. +func (s *ListObjectsV2Output) SetNextContinuationToken(v string) *ListObjectsV2Output { + s.NextContinuationToken = &v + return s +} + +// SetPrefix sets the Prefix field's value. +func (s *ListObjectsV2Output) SetPrefix(v string) *ListObjectsV2Output { + s.Prefix = &v + return s +} + +// SetStartAfter sets the StartAfter field's value. +func (s *ListObjectsV2Output) SetStartAfter(v string) *ListObjectsV2Output { + s.StartAfter = &v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListPartsRequest +type ListPartsInput struct { + _ struct{} `type:"structure"` + + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // Key is a required field + Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` + + // Sets the maximum number of parts to return. + MaxParts *int64 `location:"querystring" locationName:"max-parts" type:"integer"` + + // Specifies the part after which listing should begin. Only parts with higher + // part numbers will be listed. + PartNumberMarker *int64 `location:"querystring" locationName:"part-number-marker" type:"integer"` + + // Confirms that the requester knows that she or he will be charged for the + // request. Bucket owners need not specify this parameter in their requests. + // Documentation on downloading objects from requester pays buckets can be found + // at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html + RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` + + // Upload ID identifying the multipart upload whose parts are being listed. + // + // UploadId is a required field + UploadId *string `location:"querystring" locationName:"uploadId" type:"string" required:"true"` +} + +// String returns the string representation +func (s ListPartsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListPartsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListPartsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListPartsInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Key == nil { + invalidParams.Add(request.NewErrParamRequired("Key")) + } + if s.Key != nil && len(*s.Key) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Key", 1)) + } + if s.UploadId == nil { + invalidParams.Add(request.NewErrParamRequired("UploadId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *ListPartsInput) SetBucket(v string) *ListPartsInput { + s.Bucket = &v + return s +} + +// SetKey sets the Key field's value. +func (s *ListPartsInput) SetKey(v string) *ListPartsInput { + s.Key = &v + return s +} + +// SetMaxParts sets the MaxParts field's value. +func (s *ListPartsInput) SetMaxParts(v int64) *ListPartsInput { + s.MaxParts = &v + return s +} + +// SetPartNumberMarker sets the PartNumberMarker field's value. +func (s *ListPartsInput) SetPartNumberMarker(v int64) *ListPartsInput { + s.PartNumberMarker = &v + return s +} + +// SetRequestPayer sets the RequestPayer field's value. +func (s *ListPartsInput) SetRequestPayer(v string) *ListPartsInput { + s.RequestPayer = &v + return s +} + +// SetUploadId sets the UploadId field's value. +func (s *ListPartsInput) SetUploadId(v string) *ListPartsInput { + s.UploadId = &v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListPartsOutput +type ListPartsOutput struct { + _ struct{} `type:"structure"` + + // Date when multipart upload will become eligible for abort operation by lifecycle. + AbortDate *time.Time `location:"header" locationName:"x-amz-abort-date" type:"timestamp" timestampFormat:"rfc822"` + + // Id of the lifecycle rule that makes a multipart upload eligible for abort + // operation. + AbortRuleId *string `location:"header" locationName:"x-amz-abort-rule-id" type:"string"` + + // Name of the bucket to which the multipart upload was initiated. + Bucket *string `type:"string"` + + // Identifies who initiated the multipart upload. + Initiator *Initiator `type:"structure"` + + // Indicates whether the returned list of parts is truncated. + IsTruncated *bool `type:"boolean"` + + // Object key for which the multipart upload was initiated. + Key *string `min:"1" type:"string"` + + // Maximum number of parts that were allowed in the response. + MaxParts *int64 `type:"integer"` + + // When a list is truncated, this element specifies the last part in the list, + // as well as the value to use for the part-number-marker request parameter + // in a subsequent request. + NextPartNumberMarker *int64 `type:"integer"` + + Owner *Owner `type:"structure"` + + // Part number after which listing begins. + PartNumberMarker *int64 `type:"integer"` + + Parts []*Part `locationName:"Part" type:"list" flattened:"true"` + + // If present, indicates that the requester was successfully charged for the + // request. + RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"` + + // The class of storage used to store the object. + StorageClass *string `type:"string" enum:"StorageClass"` + + // Upload ID identifying the multipart upload whose parts are being listed. + UploadId *string `type:"string"` +} + +// String returns the string representation +func (s ListPartsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListPartsOutput) GoString() string { + return s.String() +} + +// SetAbortDate sets the AbortDate field's value. +func (s *ListPartsOutput) SetAbortDate(v time.Time) *ListPartsOutput { + s.AbortDate = &v + return s +} + +// SetAbortRuleId sets the AbortRuleId field's value. +func (s *ListPartsOutput) SetAbortRuleId(v string) *ListPartsOutput { + s.AbortRuleId = &v + return s +} + +// SetBucket sets the Bucket field's value. +func (s *ListPartsOutput) SetBucket(v string) *ListPartsOutput { + s.Bucket = &v + return s +} + +// SetInitiator sets the Initiator field's value. +func (s *ListPartsOutput) SetInitiator(v *Initiator) *ListPartsOutput { + s.Initiator = v + return s +} + +// SetIsTruncated sets the IsTruncated field's value. +func (s *ListPartsOutput) SetIsTruncated(v bool) *ListPartsOutput { + s.IsTruncated = &v + return s +} + +// SetKey sets the Key field's value. +func (s *ListPartsOutput) SetKey(v string) *ListPartsOutput { + s.Key = &v + return s +} + +// SetMaxParts sets the MaxParts field's value. +func (s *ListPartsOutput) SetMaxParts(v int64) *ListPartsOutput { + s.MaxParts = &v + return s +} + +// SetNextPartNumberMarker sets the NextPartNumberMarker field's value. +func (s *ListPartsOutput) SetNextPartNumberMarker(v int64) *ListPartsOutput { + s.NextPartNumberMarker = &v + return s +} + +// SetOwner sets the Owner field's value. +func (s *ListPartsOutput) SetOwner(v *Owner) *ListPartsOutput { + s.Owner = v + return s +} + +// SetPartNumberMarker sets the PartNumberMarker field's value. +func (s *ListPartsOutput) SetPartNumberMarker(v int64) *ListPartsOutput { + s.PartNumberMarker = &v + return s +} + +// SetParts sets the Parts field's value. +func (s *ListPartsOutput) SetParts(v []*Part) *ListPartsOutput { + s.Parts = v + return s +} + +// SetRequestCharged sets the RequestCharged field's value. +func (s *ListPartsOutput) SetRequestCharged(v string) *ListPartsOutput { + s.RequestCharged = &v + return s +} + +// SetStorageClass sets the StorageClass field's value. +func (s *ListPartsOutput) SetStorageClass(v string) *ListPartsOutput { + s.StorageClass = &v + return s +} + +// SetUploadId sets the UploadId field's value. +func (s *ListPartsOutput) SetUploadId(v string) *ListPartsOutput { + s.UploadId = &v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/LoggingEnabled +type LoggingEnabled struct { + _ struct{} `type:"structure"` + + // Specifies the bucket where you want Amazon S3 to store server access logs. + // You can have your logs delivered to any bucket that you own, including the + // same bucket that is being logged. You can also configure multiple buckets + // to deliver their logs to the same target bucket. In this case you should + // choose a different TargetPrefix for each source bucket so that the delivered + // log files can be distinguished by key. + TargetBucket *string `type:"string"` + + TargetGrants []*TargetGrant `locationNameList:"Grant" type:"list"` + + // This element lets you specify a prefix for the keys that the log files will + // be stored under. + TargetPrefix *string `type:"string"` +} + +// String returns the string representation +func (s LoggingEnabled) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s LoggingEnabled) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *LoggingEnabled) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "LoggingEnabled"} + if s.TargetGrants != nil { + for i, v := range s.TargetGrants { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "TargetGrants", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetTargetBucket sets the TargetBucket field's value. +func (s *LoggingEnabled) SetTargetBucket(v string) *LoggingEnabled { + s.TargetBucket = &v + return s +} + +// SetTargetGrants sets the TargetGrants field's value. +func (s *LoggingEnabled) SetTargetGrants(v []*TargetGrant) *LoggingEnabled { + s.TargetGrants = v + return s +} + +// SetTargetPrefix sets the TargetPrefix field's value. +func (s *LoggingEnabled) SetTargetPrefix(v string) *LoggingEnabled { + s.TargetPrefix = &v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/MetricsAndOperator +type MetricsAndOperator struct { + _ struct{} `type:"structure"` + + // The prefix used when evaluating an AND predicate. + Prefix *string `type:"string"` + + // The list of tags used when evaluating an AND predicate. + Tags []*Tag `locationName:"Tag" locationNameList:"Tag" type:"list" flattened:"true"` +} + +// String returns the string representation +func (s MetricsAndOperator) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s MetricsAndOperator) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *MetricsAndOperator) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "MetricsAndOperator"} + if s.Tags != nil { + for i, v := range s.Tags { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Tags", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetPrefix sets the Prefix field's value. +func (s *MetricsAndOperator) SetPrefix(v string) *MetricsAndOperator { + s.Prefix = &v + return s +} + +// SetTags sets the Tags field's value. +func (s *MetricsAndOperator) SetTags(v []*Tag) *MetricsAndOperator { + s.Tags = v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/MetricsConfiguration +type MetricsConfiguration struct { + _ struct{} `type:"structure"` + + // Specifies a metrics configuration filter. The metrics configuration will + // only include objects that meet the filter's criteria. A filter must be a + // prefix, a tag, or a conjunction (MetricsAndOperator). + Filter *MetricsFilter `type:"structure"` + + // The ID used to identify the metrics configuration. + // + // Id is a required field + Id *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s MetricsConfiguration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s MetricsConfiguration) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *MetricsConfiguration) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "MetricsConfiguration"} + if s.Id == nil { + invalidParams.Add(request.NewErrParamRequired("Id")) + } + if s.Filter != nil { + if err := s.Filter.Validate(); err != nil { + invalidParams.AddNested("Filter", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetFilter sets the Filter field's value. +func (s *MetricsConfiguration) SetFilter(v *MetricsFilter) *MetricsConfiguration { + s.Filter = v + return s +} + +// SetId sets the Id field's value. +func (s *MetricsConfiguration) SetId(v string) *MetricsConfiguration { + s.Id = &v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/MetricsFilter +type MetricsFilter struct { + _ struct{} `type:"structure"` + + // A conjunction (logical AND) of predicates, which is used in evaluating a + // metrics filter. The operator must have at least two predicates, and an object + // must match all of the predicates in order for the filter to apply. + And *MetricsAndOperator `type:"structure"` + + // The prefix used when evaluating a metrics filter. + Prefix *string `type:"string"` + + // The tag used when evaluating a metrics filter. + Tag *Tag `type:"structure"` +} + +// String returns the string representation +func (s MetricsFilter) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s MetricsFilter) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *MetricsFilter) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "MetricsFilter"} + if s.And != nil { + if err := s.And.Validate(); err != nil { + invalidParams.AddNested("And", err.(request.ErrInvalidParams)) + } + } + if s.Tag != nil { + if err := s.Tag.Validate(); err != nil { + invalidParams.AddNested("Tag", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAnd sets the And field's value. +func (s *MetricsFilter) SetAnd(v *MetricsAndOperator) *MetricsFilter { + s.And = v + return s +} + +// SetPrefix sets the Prefix field's value. +func (s *MetricsFilter) SetPrefix(v string) *MetricsFilter { + s.Prefix = &v + return s +} + +// SetTag sets the Tag field's value. +func (s *MetricsFilter) SetTag(v *Tag) *MetricsFilter { + s.Tag = v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/MultipartUpload +type MultipartUpload struct { + _ struct{} `type:"structure"` + + // Date and time at which the multipart upload was initiated. + Initiated *time.Time `type:"timestamp" timestampFormat:"iso8601"` + + // Identifies who initiated the multipart upload. + Initiator *Initiator `type:"structure"` + + // Key of the object for which the multipart upload was initiated. + Key *string `min:"1" type:"string"` + + Owner *Owner `type:"structure"` + + // The class of storage used to store the object. + StorageClass *string `type:"string" enum:"StorageClass"` + + // Upload ID that identifies the multipart upload. + UploadId *string `type:"string"` +} + +// String returns the string representation +func (s MultipartUpload) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s MultipartUpload) GoString() string { + return s.String() +} + +// SetInitiated sets the Initiated field's value. +func (s *MultipartUpload) SetInitiated(v time.Time) *MultipartUpload { + s.Initiated = &v + return s +} + +// SetInitiator sets the Initiator field's value. +func (s *MultipartUpload) SetInitiator(v *Initiator) *MultipartUpload { + s.Initiator = v + return s +} + +// SetKey sets the Key field's value. +func (s *MultipartUpload) SetKey(v string) *MultipartUpload { + s.Key = &v + return s +} + +// SetOwner sets the Owner field's value. +func (s *MultipartUpload) SetOwner(v *Owner) *MultipartUpload { + s.Owner = v + return s +} + +// SetStorageClass sets the StorageClass field's value. +func (s *MultipartUpload) SetStorageClass(v string) *MultipartUpload { + s.StorageClass = &v + return s +} + +// SetUploadId sets the UploadId field's value. +func (s *MultipartUpload) SetUploadId(v string) *MultipartUpload { + s.UploadId = &v + return s +} + +// Specifies when noncurrent object versions expire. Upon expiration, Amazon +// S3 permanently deletes the noncurrent object versions. You set this lifecycle +// configuration action on a bucket that has versioning enabled (or suspended) +// to request that Amazon S3 delete noncurrent object versions at a specific +// period in the object's lifetime. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/NoncurrentVersionExpiration +type NoncurrentVersionExpiration struct { + _ struct{} `type:"structure"` + + // Specifies the number of days an object is noncurrent before Amazon S3 can + // perform the associated action. For information about the noncurrent days + // calculations, see How Amazon S3 Calculates When an Object Became Noncurrent + // (http://docs.aws.amazon.com/AmazonS3/latest/dev/s3-access-control.html) + NoncurrentDays *int64 `type:"integer"` +} + +// String returns the string representation +func (s NoncurrentVersionExpiration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s NoncurrentVersionExpiration) GoString() string { + return s.String() +} + +// SetNoncurrentDays sets the NoncurrentDays field's value. +func (s *NoncurrentVersionExpiration) SetNoncurrentDays(v int64) *NoncurrentVersionExpiration { + s.NoncurrentDays = &v + return s +} + +// Container for the transition rule that describes when noncurrent objects +// transition to the STANDARD_IA or GLACIER storage class. If your bucket is +// versioning-enabled (or versioning is suspended), you can set this action +// to request that Amazon S3 transition noncurrent object versions to the STANDARD_IA +// or GLACIER storage class at a specific period in the object's lifetime. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/NoncurrentVersionTransition +type NoncurrentVersionTransition struct { + _ struct{} `type:"structure"` + + // Specifies the number of days an object is noncurrent before Amazon S3 can + // perform the associated action. For information about the noncurrent days + // calculations, see How Amazon S3 Calculates When an Object Became Noncurrent + // (http://docs.aws.amazon.com/AmazonS3/latest/dev/s3-access-control.html) + NoncurrentDays *int64 `type:"integer"` + + // The class of storage used to store the object. + StorageClass *string `type:"string" enum:"TransitionStorageClass"` +} + +// String returns the string representation +func (s NoncurrentVersionTransition) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s NoncurrentVersionTransition) GoString() string { + return s.String() +} + +// SetNoncurrentDays sets the NoncurrentDays field's value. +func (s *NoncurrentVersionTransition) SetNoncurrentDays(v int64) *NoncurrentVersionTransition { + s.NoncurrentDays = &v + return s +} + +// SetStorageClass sets the StorageClass field's value. +func (s *NoncurrentVersionTransition) SetStorageClass(v string) *NoncurrentVersionTransition { + s.StorageClass = &v + return s +} + +// Container for specifying the notification configuration of the bucket. If +// this element is empty, notifications are turned off on the bucket. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/NotificationConfiguration +type NotificationConfiguration struct { + _ struct{} `type:"structure"` + + LambdaFunctionConfigurations []*LambdaFunctionConfiguration `locationName:"CloudFunctionConfiguration" type:"list" flattened:"true"` + + QueueConfigurations []*QueueConfiguration `locationName:"QueueConfiguration" type:"list" flattened:"true"` + + TopicConfigurations []*TopicConfiguration `locationName:"TopicConfiguration" type:"list" flattened:"true"` +} + +// String returns the string representation +func (s NotificationConfiguration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s NotificationConfiguration) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *NotificationConfiguration) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "NotificationConfiguration"} + if s.LambdaFunctionConfigurations != nil { + for i, v := range s.LambdaFunctionConfigurations { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "LambdaFunctionConfigurations", i), err.(request.ErrInvalidParams)) + } + } + } + if s.QueueConfigurations != nil { + for i, v := range s.QueueConfigurations { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "QueueConfigurations", i), err.(request.ErrInvalidParams)) + } + } + } + if s.TopicConfigurations != nil { + for i, v := range s.TopicConfigurations { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "TopicConfigurations", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetLambdaFunctionConfigurations sets the LambdaFunctionConfigurations field's value. +func (s *NotificationConfiguration) SetLambdaFunctionConfigurations(v []*LambdaFunctionConfiguration) *NotificationConfiguration { + s.LambdaFunctionConfigurations = v + return s +} + +// SetQueueConfigurations sets the QueueConfigurations field's value. +func (s *NotificationConfiguration) SetQueueConfigurations(v []*QueueConfiguration) *NotificationConfiguration { + s.QueueConfigurations = v + return s +} + +// SetTopicConfigurations sets the TopicConfigurations field's value. +func (s *NotificationConfiguration) SetTopicConfigurations(v []*TopicConfiguration) *NotificationConfiguration { + s.TopicConfigurations = v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/NotificationConfigurationDeprecated +type NotificationConfigurationDeprecated struct { + _ struct{} `type:"structure"` + + CloudFunctionConfiguration *CloudFunctionConfiguration `type:"structure"` + + QueueConfiguration *QueueConfigurationDeprecated `type:"structure"` + + TopicConfiguration *TopicConfigurationDeprecated `type:"structure"` +} + +// String returns the string representation +func (s NotificationConfigurationDeprecated) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s NotificationConfigurationDeprecated) GoString() string { + return s.String() +} + +// SetCloudFunctionConfiguration sets the CloudFunctionConfiguration field's value. +func (s *NotificationConfigurationDeprecated) SetCloudFunctionConfiguration(v *CloudFunctionConfiguration) *NotificationConfigurationDeprecated { + s.CloudFunctionConfiguration = v + return s +} + +// SetQueueConfiguration sets the QueueConfiguration field's value. +func (s *NotificationConfigurationDeprecated) SetQueueConfiguration(v *QueueConfigurationDeprecated) *NotificationConfigurationDeprecated { + s.QueueConfiguration = v + return s +} + +// SetTopicConfiguration sets the TopicConfiguration field's value. +func (s *NotificationConfigurationDeprecated) SetTopicConfiguration(v *TopicConfigurationDeprecated) *NotificationConfigurationDeprecated { + s.TopicConfiguration = v + return s +} + +// Container for object key name filtering rules. For information about key +// name filtering, go to Configuring Event Notifications (http://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html) +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/NotificationConfigurationFilter +type NotificationConfigurationFilter struct { + _ struct{} `type:"structure"` + + // Container for object key name prefix and suffix filtering rules. + Key *KeyFilter `locationName:"S3Key" type:"structure"` +} + +// String returns the string representation +func (s NotificationConfigurationFilter) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s NotificationConfigurationFilter) GoString() string { + return s.String() +} + +// SetKey sets the Key field's value. +func (s *NotificationConfigurationFilter) SetKey(v *KeyFilter) *NotificationConfigurationFilter { + s.Key = v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/Object +type Object struct { + _ struct{} `type:"structure"` + + ETag *string `type:"string"` + + Key *string `min:"1" type:"string"` + + LastModified *time.Time `type:"timestamp" timestampFormat:"iso8601"` + + Owner *Owner `type:"structure"` + + Size *int64 `type:"integer"` + + // The class of storage used to store the object. + StorageClass *string `type:"string" enum:"ObjectStorageClass"` +} + +// String returns the string representation +func (s Object) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Object) GoString() string { + return s.String() +} + +// SetETag sets the ETag field's value. +func (s *Object) SetETag(v string) *Object { + s.ETag = &v + return s +} + +// SetKey sets the Key field's value. +func (s *Object) SetKey(v string) *Object { + s.Key = &v + return s +} + +// SetLastModified sets the LastModified field's value. +func (s *Object) SetLastModified(v time.Time) *Object { + s.LastModified = &v + return s +} + +// SetOwner sets the Owner field's value. +func (s *Object) SetOwner(v *Owner) *Object { + s.Owner = v + return s +} + +// SetSize sets the Size field's value. +func (s *Object) SetSize(v int64) *Object { + s.Size = &v + return s +} + +// SetStorageClass sets the StorageClass field's value. +func (s *Object) SetStorageClass(v string) *Object { + s.StorageClass = &v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ObjectIdentifier +type ObjectIdentifier struct { + _ struct{} `type:"structure"` + + // Key name of the object to delete. + // + // Key is a required field + Key *string `min:"1" type:"string" required:"true"` + + // VersionId for the specific version of the object to delete. + VersionId *string `type:"string"` +} + +// String returns the string representation +func (s ObjectIdentifier) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ObjectIdentifier) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ObjectIdentifier) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ObjectIdentifier"} + if s.Key == nil { + invalidParams.Add(request.NewErrParamRequired("Key")) + } + if s.Key != nil && len(*s.Key) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Key", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetKey sets the Key field's value. +func (s *ObjectIdentifier) SetKey(v string) *ObjectIdentifier { + s.Key = &v + return s +} + +// SetVersionId sets the VersionId field's value. +func (s *ObjectIdentifier) SetVersionId(v string) *ObjectIdentifier { + s.VersionId = &v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ObjectVersion +type ObjectVersion struct { + _ struct{} `type:"structure"` + + ETag *string `type:"string"` + + // Specifies whether the object is (true) or is not (false) the latest version + // of an object. + IsLatest *bool `type:"boolean"` + + // The object key. + Key *string `min:"1" type:"string"` + + // Date and time the object was last modified. + LastModified *time.Time `type:"timestamp" timestampFormat:"iso8601"` + + Owner *Owner `type:"structure"` + + // Size in bytes of the object. + Size *int64 `type:"integer"` + + // The class of storage used to store the object. + StorageClass *string `type:"string" enum:"ObjectVersionStorageClass"` + + // Version ID of an object. + VersionId *string `type:"string"` +} + +// String returns the string representation +func (s ObjectVersion) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ObjectVersion) GoString() string { + return s.String() +} + +// SetETag sets the ETag field's value. +func (s *ObjectVersion) SetETag(v string) *ObjectVersion { + s.ETag = &v + return s +} + +// SetIsLatest sets the IsLatest field's value. +func (s *ObjectVersion) SetIsLatest(v bool) *ObjectVersion { + s.IsLatest = &v + return s +} + +// SetKey sets the Key field's value. +func (s *ObjectVersion) SetKey(v string) *ObjectVersion { + s.Key = &v + return s +} + +// SetLastModified sets the LastModified field's value. +func (s *ObjectVersion) SetLastModified(v time.Time) *ObjectVersion { + s.LastModified = &v + return s +} + +// SetOwner sets the Owner field's value. +func (s *ObjectVersion) SetOwner(v *Owner) *ObjectVersion { + s.Owner = v + return s +} + +// SetSize sets the Size field's value. +func (s *ObjectVersion) SetSize(v int64) *ObjectVersion { + s.Size = &v + return s +} + +// SetStorageClass sets the StorageClass field's value. +func (s *ObjectVersion) SetStorageClass(v string) *ObjectVersion { + s.StorageClass = &v + return s +} + +// SetVersionId sets the VersionId field's value. +func (s *ObjectVersion) SetVersionId(v string) *ObjectVersion { + s.VersionId = &v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/Owner +type Owner struct { + _ struct{} `type:"structure"` + + DisplayName *string `type:"string"` + + ID *string `type:"string"` +} + +// String returns the string representation +func (s Owner) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Owner) GoString() string { + return s.String() +} + +// SetDisplayName sets the DisplayName field's value. +func (s *Owner) SetDisplayName(v string) *Owner { + s.DisplayName = &v + return s +} + +// SetID sets the ID field's value. +func (s *Owner) SetID(v string) *Owner { + s.ID = &v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/Part +type Part struct { + _ struct{} `type:"structure"` + + // Entity tag returned when the part was uploaded. + ETag *string `type:"string"` + + // Date and time at which the part was uploaded. + LastModified *time.Time `type:"timestamp" timestampFormat:"iso8601"` + + // Part number identifying the part. This is a positive integer between 1 and + // 10,000. + PartNumber *int64 `type:"integer"` + + // Size of the uploaded part data. + Size *int64 `type:"integer"` +} + +// String returns the string representation +func (s Part) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Part) GoString() string { + return s.String() +} + +// SetETag sets the ETag field's value. +func (s *Part) SetETag(v string) *Part { + s.ETag = &v + return s +} + +// SetLastModified sets the LastModified field's value. +func (s *Part) SetLastModified(v time.Time) *Part { + s.LastModified = &v + return s +} + +// SetPartNumber sets the PartNumber field's value. +func (s *Part) SetPartNumber(v int64) *Part { + s.PartNumber = &v + return s +} + +// SetSize sets the Size field's value. +func (s *Part) SetSize(v int64) *Part { + s.Size = &v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketAccelerateConfigurationRequest +type PutBucketAccelerateConfigurationInput struct { + _ struct{} `type:"structure" payload:"AccelerateConfiguration"` + + // Specifies the Accelerate Configuration you want to set for the bucket. + // + // AccelerateConfiguration is a required field + AccelerateConfiguration *AccelerateConfiguration `locationName:"AccelerateConfiguration" type:"structure" required:"true" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` + + // Name of the bucket for which the accelerate configuration is set. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` +} + +// String returns the string representation +func (s PutBucketAccelerateConfigurationInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutBucketAccelerateConfigurationInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *PutBucketAccelerateConfigurationInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PutBucketAccelerateConfigurationInput"} + if s.AccelerateConfiguration == nil { + invalidParams.Add(request.NewErrParamRequired("AccelerateConfiguration")) + } + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAccelerateConfiguration sets the AccelerateConfiguration field's value. +func (s *PutBucketAccelerateConfigurationInput) SetAccelerateConfiguration(v *AccelerateConfiguration) *PutBucketAccelerateConfigurationInput { + s.AccelerateConfiguration = v + return s +} + +// SetBucket sets the Bucket field's value. +func (s *PutBucketAccelerateConfigurationInput) SetBucket(v string) *PutBucketAccelerateConfigurationInput { + s.Bucket = &v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketAccelerateConfigurationOutput +type PutBucketAccelerateConfigurationOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s PutBucketAccelerateConfigurationOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutBucketAccelerateConfigurationOutput) GoString() string { + return s.String() +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketAclRequest +type PutBucketAclInput struct { + _ struct{} `type:"structure" payload:"AccessControlPolicy"` + + // The canned ACL to apply to the bucket. + ACL *string `location:"header" locationName:"x-amz-acl" type:"string" enum:"BucketCannedACL"` + + AccessControlPolicy *AccessControlPolicy `locationName:"AccessControlPolicy" type:"structure" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` + + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // Allows grantee the read, write, read ACP, and write ACP permissions on the + // bucket. + GrantFullControl *string `location:"header" locationName:"x-amz-grant-full-control" type:"string"` + + // Allows grantee to list the objects in the bucket. + GrantRead *string `location:"header" locationName:"x-amz-grant-read" type:"string"` + + // Allows grantee to read the bucket ACL. + GrantReadACP *string `location:"header" locationName:"x-amz-grant-read-acp" type:"string"` + + // Allows grantee to create, overwrite, and delete any object in the bucket. + GrantWrite *string `location:"header" locationName:"x-amz-grant-write" type:"string"` + + // Allows grantee to write the ACL for the applicable bucket. + GrantWriteACP *string `location:"header" locationName:"x-amz-grant-write-acp" type:"string"` +} + +// String returns the string representation +func (s PutBucketAclInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutBucketAclInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *PutBucketAclInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PutBucketAclInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.AccessControlPolicy != nil { + if err := s.AccessControlPolicy.Validate(); err != nil { + invalidParams.AddNested("AccessControlPolicy", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetACL sets the ACL field's value. +func (s *PutBucketAclInput) SetACL(v string) *PutBucketAclInput { + s.ACL = &v + return s +} + +// SetAccessControlPolicy sets the AccessControlPolicy field's value. +func (s *PutBucketAclInput) SetAccessControlPolicy(v *AccessControlPolicy) *PutBucketAclInput { + s.AccessControlPolicy = v + return s +} + +// SetBucket sets the Bucket field's value. +func (s *PutBucketAclInput) SetBucket(v string) *PutBucketAclInput { + s.Bucket = &v + return s +} + +// SetGrantFullControl sets the GrantFullControl field's value. +func (s *PutBucketAclInput) SetGrantFullControl(v string) *PutBucketAclInput { + s.GrantFullControl = &v + return s +} + +// SetGrantRead sets the GrantRead field's value. +func (s *PutBucketAclInput) SetGrantRead(v string) *PutBucketAclInput { + s.GrantRead = &v + return s +} + +// SetGrantReadACP sets the GrantReadACP field's value. +func (s *PutBucketAclInput) SetGrantReadACP(v string) *PutBucketAclInput { + s.GrantReadACP = &v + return s +} + +// SetGrantWrite sets the GrantWrite field's value. +func (s *PutBucketAclInput) SetGrantWrite(v string) *PutBucketAclInput { + s.GrantWrite = &v + return s +} + +// SetGrantWriteACP sets the GrantWriteACP field's value. +func (s *PutBucketAclInput) SetGrantWriteACP(v string) *PutBucketAclInput { + s.GrantWriteACP = &v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketAclOutput +type PutBucketAclOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s PutBucketAclOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutBucketAclOutput) GoString() string { + return s.String() +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketAnalyticsConfigurationRequest +type PutBucketAnalyticsConfigurationInput struct { + _ struct{} `type:"structure" payload:"AnalyticsConfiguration"` + + // The configuration and any analyses for the analytics filter. + // + // AnalyticsConfiguration is a required field + AnalyticsConfiguration *AnalyticsConfiguration `locationName:"AnalyticsConfiguration" type:"structure" required:"true" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` + + // The name of the bucket to which an analytics configuration is stored. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // The identifier used to represent an analytics configuration. + // + // Id is a required field + Id *string `location:"querystring" locationName:"id" type:"string" required:"true"` +} + +// String returns the string representation +func (s PutBucketAnalyticsConfigurationInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutBucketAnalyticsConfigurationInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *PutBucketAnalyticsConfigurationInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PutBucketAnalyticsConfigurationInput"} + if s.AnalyticsConfiguration == nil { + invalidParams.Add(request.NewErrParamRequired("AnalyticsConfiguration")) + } + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Id == nil { + invalidParams.Add(request.NewErrParamRequired("Id")) + } + if s.AnalyticsConfiguration != nil { + if err := s.AnalyticsConfiguration.Validate(); err != nil { + invalidParams.AddNested("AnalyticsConfiguration", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAnalyticsConfiguration sets the AnalyticsConfiguration field's value. +func (s *PutBucketAnalyticsConfigurationInput) SetAnalyticsConfiguration(v *AnalyticsConfiguration) *PutBucketAnalyticsConfigurationInput { + s.AnalyticsConfiguration = v + return s +} + +// SetBucket sets the Bucket field's value. +func (s *PutBucketAnalyticsConfigurationInput) SetBucket(v string) *PutBucketAnalyticsConfigurationInput { + s.Bucket = &v + return s +} + +// SetId sets the Id field's value. +func (s *PutBucketAnalyticsConfigurationInput) SetId(v string) *PutBucketAnalyticsConfigurationInput { + s.Id = &v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketAnalyticsConfigurationOutput +type PutBucketAnalyticsConfigurationOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s PutBucketAnalyticsConfigurationOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutBucketAnalyticsConfigurationOutput) GoString() string { + return s.String() +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketCorsRequest +type PutBucketCorsInput struct { + _ struct{} `type:"structure" payload:"CORSConfiguration"` + + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // CORSConfiguration is a required field + CORSConfiguration *CORSConfiguration `locationName:"CORSConfiguration" type:"structure" required:"true" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` +} + +// String returns the string representation +func (s PutBucketCorsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutBucketCorsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *PutBucketCorsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PutBucketCorsInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.CORSConfiguration == nil { + invalidParams.Add(request.NewErrParamRequired("CORSConfiguration")) + } + if s.CORSConfiguration != nil { + if err := s.CORSConfiguration.Validate(); err != nil { + invalidParams.AddNested("CORSConfiguration", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *PutBucketCorsInput) SetBucket(v string) *PutBucketCorsInput { + s.Bucket = &v + return s +} + +// SetCORSConfiguration sets the CORSConfiguration field's value. +func (s *PutBucketCorsInput) SetCORSConfiguration(v *CORSConfiguration) *PutBucketCorsInput { + s.CORSConfiguration = v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketCorsOutput +type PutBucketCorsOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s PutBucketCorsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutBucketCorsOutput) GoString() string { + return s.String() +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketInventoryConfigurationRequest +type PutBucketInventoryConfigurationInput struct { + _ struct{} `type:"structure" payload:"InventoryConfiguration"` + + // The name of the bucket where the inventory configuration will be stored. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // The ID used to identify the inventory configuration. + // + // Id is a required field + Id *string `location:"querystring" locationName:"id" type:"string" required:"true"` + + // Specifies the inventory configuration. + // + // InventoryConfiguration is a required field + InventoryConfiguration *InventoryConfiguration `locationName:"InventoryConfiguration" type:"structure" required:"true" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` +} + +// String returns the string representation +func (s PutBucketInventoryConfigurationInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutBucketInventoryConfigurationInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *PutBucketInventoryConfigurationInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PutBucketInventoryConfigurationInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Id == nil { + invalidParams.Add(request.NewErrParamRequired("Id")) + } + if s.InventoryConfiguration == nil { + invalidParams.Add(request.NewErrParamRequired("InventoryConfiguration")) + } + if s.InventoryConfiguration != nil { + if err := s.InventoryConfiguration.Validate(); err != nil { + invalidParams.AddNested("InventoryConfiguration", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *PutBucketInventoryConfigurationInput) SetBucket(v string) *PutBucketInventoryConfigurationInput { + s.Bucket = &v + return s +} + +// SetId sets the Id field's value. +func (s *PutBucketInventoryConfigurationInput) SetId(v string) *PutBucketInventoryConfigurationInput { + s.Id = &v + return s +} + +// SetInventoryConfiguration sets the InventoryConfiguration field's value. +func (s *PutBucketInventoryConfigurationInput) SetInventoryConfiguration(v *InventoryConfiguration) *PutBucketInventoryConfigurationInput { + s.InventoryConfiguration = v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketInventoryConfigurationOutput +type PutBucketInventoryConfigurationOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s PutBucketInventoryConfigurationOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutBucketInventoryConfigurationOutput) GoString() string { + return s.String() +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketLifecycleConfigurationRequest +type PutBucketLifecycleConfigurationInput struct { + _ struct{} `type:"structure" payload:"LifecycleConfiguration"` + + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + LifecycleConfiguration *BucketLifecycleConfiguration `locationName:"LifecycleConfiguration" type:"structure" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` +} + +// String returns the string representation +func (s PutBucketLifecycleConfigurationInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutBucketLifecycleConfigurationInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *PutBucketLifecycleConfigurationInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PutBucketLifecycleConfigurationInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.LifecycleConfiguration != nil { + if err := s.LifecycleConfiguration.Validate(); err != nil { + invalidParams.AddNested("LifecycleConfiguration", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *PutBucketLifecycleConfigurationInput) SetBucket(v string) *PutBucketLifecycleConfigurationInput { + s.Bucket = &v + return s +} + +// SetLifecycleConfiguration sets the LifecycleConfiguration field's value. +func (s *PutBucketLifecycleConfigurationInput) SetLifecycleConfiguration(v *BucketLifecycleConfiguration) *PutBucketLifecycleConfigurationInput { + s.LifecycleConfiguration = v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketLifecycleConfigurationOutput +type PutBucketLifecycleConfigurationOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s PutBucketLifecycleConfigurationOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutBucketLifecycleConfigurationOutput) GoString() string { + return s.String() +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketLifecycleRequest +type PutBucketLifecycleInput struct { + _ struct{} `type:"structure" payload:"LifecycleConfiguration"` + + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + LifecycleConfiguration *LifecycleConfiguration `locationName:"LifecycleConfiguration" type:"structure" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` +} + +// String returns the string representation +func (s PutBucketLifecycleInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutBucketLifecycleInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *PutBucketLifecycleInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PutBucketLifecycleInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.LifecycleConfiguration != nil { + if err := s.LifecycleConfiguration.Validate(); err != nil { + invalidParams.AddNested("LifecycleConfiguration", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *PutBucketLifecycleInput) SetBucket(v string) *PutBucketLifecycleInput { + s.Bucket = &v + return s +} + +// SetLifecycleConfiguration sets the LifecycleConfiguration field's value. +func (s *PutBucketLifecycleInput) SetLifecycleConfiguration(v *LifecycleConfiguration) *PutBucketLifecycleInput { + s.LifecycleConfiguration = v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketLifecycleOutput +type PutBucketLifecycleOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s PutBucketLifecycleOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutBucketLifecycleOutput) GoString() string { + return s.String() +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketLoggingRequest +type PutBucketLoggingInput struct { + _ struct{} `type:"structure" payload:"BucketLoggingStatus"` + + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // BucketLoggingStatus is a required field + BucketLoggingStatus *BucketLoggingStatus `locationName:"BucketLoggingStatus" type:"structure" required:"true" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` +} + +// String returns the string representation +func (s PutBucketLoggingInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutBucketLoggingInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *PutBucketLoggingInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PutBucketLoggingInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.BucketLoggingStatus == nil { + invalidParams.Add(request.NewErrParamRequired("BucketLoggingStatus")) + } + if s.BucketLoggingStatus != nil { + if err := s.BucketLoggingStatus.Validate(); err != nil { + invalidParams.AddNested("BucketLoggingStatus", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *PutBucketLoggingInput) SetBucket(v string) *PutBucketLoggingInput { + s.Bucket = &v + return s +} + +// SetBucketLoggingStatus sets the BucketLoggingStatus field's value. +func (s *PutBucketLoggingInput) SetBucketLoggingStatus(v *BucketLoggingStatus) *PutBucketLoggingInput { + s.BucketLoggingStatus = v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketLoggingOutput +type PutBucketLoggingOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s PutBucketLoggingOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutBucketLoggingOutput) GoString() string { + return s.String() +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketMetricsConfigurationRequest +type PutBucketMetricsConfigurationInput struct { + _ struct{} `type:"structure" payload:"MetricsConfiguration"` + + // The name of the bucket for which the metrics configuration is set. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // The ID used to identify the metrics configuration. + // + // Id is a required field + Id *string `location:"querystring" locationName:"id" type:"string" required:"true"` + + // Specifies the metrics configuration. + // + // MetricsConfiguration is a required field + MetricsConfiguration *MetricsConfiguration `locationName:"MetricsConfiguration" type:"structure" required:"true" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` +} + +// String returns the string representation +func (s PutBucketMetricsConfigurationInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutBucketMetricsConfigurationInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *PutBucketMetricsConfigurationInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PutBucketMetricsConfigurationInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Id == nil { + invalidParams.Add(request.NewErrParamRequired("Id")) + } + if s.MetricsConfiguration == nil { + invalidParams.Add(request.NewErrParamRequired("MetricsConfiguration")) + } + if s.MetricsConfiguration != nil { + if err := s.MetricsConfiguration.Validate(); err != nil { + invalidParams.AddNested("MetricsConfiguration", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *PutBucketMetricsConfigurationInput) SetBucket(v string) *PutBucketMetricsConfigurationInput { + s.Bucket = &v + return s +} + +// SetId sets the Id field's value. +func (s *PutBucketMetricsConfigurationInput) SetId(v string) *PutBucketMetricsConfigurationInput { + s.Id = &v + return s +} + +// SetMetricsConfiguration sets the MetricsConfiguration field's value. +func (s *PutBucketMetricsConfigurationInput) SetMetricsConfiguration(v *MetricsConfiguration) *PutBucketMetricsConfigurationInput { + s.MetricsConfiguration = v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketMetricsConfigurationOutput +type PutBucketMetricsConfigurationOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s PutBucketMetricsConfigurationOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutBucketMetricsConfigurationOutput) GoString() string { + return s.String() +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketNotificationConfigurationRequest +type PutBucketNotificationConfigurationInput struct { + _ struct{} `type:"structure" payload:"NotificationConfiguration"` + + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // Container for specifying the notification configuration of the bucket. If + // this element is empty, notifications are turned off on the bucket. + // + // NotificationConfiguration is a required field + NotificationConfiguration *NotificationConfiguration `locationName:"NotificationConfiguration" type:"structure" required:"true" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` +} + +// String returns the string representation +func (s PutBucketNotificationConfigurationInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutBucketNotificationConfigurationInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *PutBucketNotificationConfigurationInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PutBucketNotificationConfigurationInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.NotificationConfiguration == nil { + invalidParams.Add(request.NewErrParamRequired("NotificationConfiguration")) + } + if s.NotificationConfiguration != nil { + if err := s.NotificationConfiguration.Validate(); err != nil { + invalidParams.AddNested("NotificationConfiguration", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *PutBucketNotificationConfigurationInput) SetBucket(v string) *PutBucketNotificationConfigurationInput { + s.Bucket = &v + return s +} + +// SetNotificationConfiguration sets the NotificationConfiguration field's value. +func (s *PutBucketNotificationConfigurationInput) SetNotificationConfiguration(v *NotificationConfiguration) *PutBucketNotificationConfigurationInput { + s.NotificationConfiguration = v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketNotificationConfigurationOutput +type PutBucketNotificationConfigurationOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s PutBucketNotificationConfigurationOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutBucketNotificationConfigurationOutput) GoString() string { + return s.String() +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketNotificationRequest +type PutBucketNotificationInput struct { + _ struct{} `type:"structure" payload:"NotificationConfiguration"` + + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // NotificationConfiguration is a required field + NotificationConfiguration *NotificationConfigurationDeprecated `locationName:"NotificationConfiguration" type:"structure" required:"true" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` +} + +// String returns the string representation +func (s PutBucketNotificationInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutBucketNotificationInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *PutBucketNotificationInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PutBucketNotificationInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.NotificationConfiguration == nil { + invalidParams.Add(request.NewErrParamRequired("NotificationConfiguration")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *PutBucketNotificationInput) SetBucket(v string) *PutBucketNotificationInput { + s.Bucket = &v + return s +} + +// SetNotificationConfiguration sets the NotificationConfiguration field's value. +func (s *PutBucketNotificationInput) SetNotificationConfiguration(v *NotificationConfigurationDeprecated) *PutBucketNotificationInput { + s.NotificationConfiguration = v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketNotificationOutput +type PutBucketNotificationOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s PutBucketNotificationOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutBucketNotificationOutput) GoString() string { + return s.String() +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketPolicyRequest +type PutBucketPolicyInput struct { + _ struct{} `type:"structure" payload:"Policy"` + + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // The bucket policy as a JSON document. + // + // Policy is a required field + Policy *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s PutBucketPolicyInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutBucketPolicyInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *PutBucketPolicyInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PutBucketPolicyInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Policy == nil { + invalidParams.Add(request.NewErrParamRequired("Policy")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *PutBucketPolicyInput) SetBucket(v string) *PutBucketPolicyInput { + s.Bucket = &v + return s +} + +// SetPolicy sets the Policy field's value. +func (s *PutBucketPolicyInput) SetPolicy(v string) *PutBucketPolicyInput { + s.Policy = &v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketPolicyOutput +type PutBucketPolicyOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s PutBucketPolicyOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutBucketPolicyOutput) GoString() string { + return s.String() +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketReplicationRequest +type PutBucketReplicationInput struct { + _ struct{} `type:"structure" payload:"ReplicationConfiguration"` + + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // Container for replication rules. You can add as many as 1,000 rules. Total + // replication configuration size can be up to 2 MB. + // + // ReplicationConfiguration is a required field + ReplicationConfiguration *ReplicationConfiguration `locationName:"ReplicationConfiguration" type:"structure" required:"true" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` +} + +// String returns the string representation +func (s PutBucketReplicationInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutBucketReplicationInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *PutBucketReplicationInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PutBucketReplicationInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.ReplicationConfiguration == nil { + invalidParams.Add(request.NewErrParamRequired("ReplicationConfiguration")) + } + if s.ReplicationConfiguration != nil { + if err := s.ReplicationConfiguration.Validate(); err != nil { + invalidParams.AddNested("ReplicationConfiguration", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *PutBucketReplicationInput) SetBucket(v string) *PutBucketReplicationInput { + s.Bucket = &v + return s +} + +// SetReplicationConfiguration sets the ReplicationConfiguration field's value. +func (s *PutBucketReplicationInput) SetReplicationConfiguration(v *ReplicationConfiguration) *PutBucketReplicationInput { + s.ReplicationConfiguration = v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketReplicationOutput +type PutBucketReplicationOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s PutBucketReplicationOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutBucketReplicationOutput) GoString() string { + return s.String() +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketRequestPaymentRequest +type PutBucketRequestPaymentInput struct { + _ struct{} `type:"structure" payload:"RequestPaymentConfiguration"` + + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // RequestPaymentConfiguration is a required field + RequestPaymentConfiguration *RequestPaymentConfiguration `locationName:"RequestPaymentConfiguration" type:"structure" required:"true" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` +} + +// String returns the string representation +func (s PutBucketRequestPaymentInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutBucketRequestPaymentInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *PutBucketRequestPaymentInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PutBucketRequestPaymentInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.RequestPaymentConfiguration == nil { + invalidParams.Add(request.NewErrParamRequired("RequestPaymentConfiguration")) + } + if s.RequestPaymentConfiguration != nil { + if err := s.RequestPaymentConfiguration.Validate(); err != nil { + invalidParams.AddNested("RequestPaymentConfiguration", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *PutBucketRequestPaymentInput) SetBucket(v string) *PutBucketRequestPaymentInput { + s.Bucket = &v + return s +} + +// SetRequestPaymentConfiguration sets the RequestPaymentConfiguration field's value. +func (s *PutBucketRequestPaymentInput) SetRequestPaymentConfiguration(v *RequestPaymentConfiguration) *PutBucketRequestPaymentInput { + s.RequestPaymentConfiguration = v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketRequestPaymentOutput +type PutBucketRequestPaymentOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s PutBucketRequestPaymentOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutBucketRequestPaymentOutput) GoString() string { + return s.String() +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketTaggingRequest +type PutBucketTaggingInput struct { + _ struct{} `type:"structure" payload:"Tagging"` + + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // Tagging is a required field + Tagging *Tagging `locationName:"Tagging" type:"structure" required:"true" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` +} + +// String returns the string representation +func (s PutBucketTaggingInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutBucketTaggingInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *PutBucketTaggingInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PutBucketTaggingInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Tagging == nil { + invalidParams.Add(request.NewErrParamRequired("Tagging")) + } + if s.Tagging != nil { + if err := s.Tagging.Validate(); err != nil { + invalidParams.AddNested("Tagging", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *PutBucketTaggingInput) SetBucket(v string) *PutBucketTaggingInput { + s.Bucket = &v + return s +} + +// SetTagging sets the Tagging field's value. +func (s *PutBucketTaggingInput) SetTagging(v *Tagging) *PutBucketTaggingInput { + s.Tagging = v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketTaggingOutput +type PutBucketTaggingOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s PutBucketTaggingOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutBucketTaggingOutput) GoString() string { + return s.String() +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketVersioningRequest +type PutBucketVersioningInput struct { + _ struct{} `type:"structure" payload:"VersioningConfiguration"` + + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // The concatenation of the authentication device's serial number, a space, + // and the value that is displayed on your authentication device. + MFA *string `location:"header" locationName:"x-amz-mfa" type:"string"` + + // VersioningConfiguration is a required field + VersioningConfiguration *VersioningConfiguration `locationName:"VersioningConfiguration" type:"structure" required:"true" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` +} + +// String returns the string representation +func (s PutBucketVersioningInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutBucketVersioningInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *PutBucketVersioningInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PutBucketVersioningInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.VersioningConfiguration == nil { + invalidParams.Add(request.NewErrParamRequired("VersioningConfiguration")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *PutBucketVersioningInput) SetBucket(v string) *PutBucketVersioningInput { + s.Bucket = &v + return s +} + +// SetMFA sets the MFA field's value. +func (s *PutBucketVersioningInput) SetMFA(v string) *PutBucketVersioningInput { + s.MFA = &v + return s +} + +// SetVersioningConfiguration sets the VersioningConfiguration field's value. +func (s *PutBucketVersioningInput) SetVersioningConfiguration(v *VersioningConfiguration) *PutBucketVersioningInput { + s.VersioningConfiguration = v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketVersioningOutput +type PutBucketVersioningOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s PutBucketVersioningOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutBucketVersioningOutput) GoString() string { + return s.String() +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketWebsiteRequest +type PutBucketWebsiteInput struct { + _ struct{} `type:"structure" payload:"WebsiteConfiguration"` + + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // WebsiteConfiguration is a required field + WebsiteConfiguration *WebsiteConfiguration `locationName:"WebsiteConfiguration" type:"structure" required:"true" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` +} + +// String returns the string representation +func (s PutBucketWebsiteInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutBucketWebsiteInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *PutBucketWebsiteInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PutBucketWebsiteInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.WebsiteConfiguration == nil { + invalidParams.Add(request.NewErrParamRequired("WebsiteConfiguration")) + } + if s.WebsiteConfiguration != nil { + if err := s.WebsiteConfiguration.Validate(); err != nil { + invalidParams.AddNested("WebsiteConfiguration", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *PutBucketWebsiteInput) SetBucket(v string) *PutBucketWebsiteInput { + s.Bucket = &v + return s +} + +// SetWebsiteConfiguration sets the WebsiteConfiguration field's value. +func (s *PutBucketWebsiteInput) SetWebsiteConfiguration(v *WebsiteConfiguration) *PutBucketWebsiteInput { + s.WebsiteConfiguration = v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketWebsiteOutput +type PutBucketWebsiteOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s PutBucketWebsiteOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutBucketWebsiteOutput) GoString() string { + return s.String() +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutObjectAclRequest +type PutObjectAclInput struct { + _ struct{} `type:"structure" payload:"AccessControlPolicy"` + + // The canned ACL to apply to the object. + ACL *string `location:"header" locationName:"x-amz-acl" type:"string" enum:"ObjectCannedACL"` + + AccessControlPolicy *AccessControlPolicy `locationName:"AccessControlPolicy" type:"structure" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` + + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // Allows grantee the read, write, read ACP, and write ACP permissions on the + // bucket. + GrantFullControl *string `location:"header" locationName:"x-amz-grant-full-control" type:"string"` + + // Allows grantee to list the objects in the bucket. + GrantRead *string `location:"header" locationName:"x-amz-grant-read" type:"string"` + + // Allows grantee to read the bucket ACL. + GrantReadACP *string `location:"header" locationName:"x-amz-grant-read-acp" type:"string"` + + // Allows grantee to create, overwrite, and delete any object in the bucket. + GrantWrite *string `location:"header" locationName:"x-amz-grant-write" type:"string"` + + // Allows grantee to write the ACL for the applicable bucket. + GrantWriteACP *string `location:"header" locationName:"x-amz-grant-write-acp" type:"string"` + + // Key is a required field + Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` + + // Confirms that the requester knows that she or he will be charged for the + // request. Bucket owners need not specify this parameter in their requests. + // Documentation on downloading objects from requester pays buckets can be found + // at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html + RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` + + // VersionId used to reference a specific version of the object. + VersionId *string `location:"querystring" locationName:"versionId" type:"string"` +} + +// String returns the string representation +func (s PutObjectAclInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutObjectAclInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *PutObjectAclInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PutObjectAclInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Key == nil { + invalidParams.Add(request.NewErrParamRequired("Key")) + } + if s.Key != nil && len(*s.Key) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Key", 1)) + } + if s.AccessControlPolicy != nil { + if err := s.AccessControlPolicy.Validate(); err != nil { + invalidParams.AddNested("AccessControlPolicy", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetACL sets the ACL field's value. +func (s *PutObjectAclInput) SetACL(v string) *PutObjectAclInput { + s.ACL = &v + return s +} + +// SetAccessControlPolicy sets the AccessControlPolicy field's value. +func (s *PutObjectAclInput) SetAccessControlPolicy(v *AccessControlPolicy) *PutObjectAclInput { + s.AccessControlPolicy = v + return s +} + +// SetBucket sets the Bucket field's value. +func (s *PutObjectAclInput) SetBucket(v string) *PutObjectAclInput { + s.Bucket = &v + return s +} + +// SetGrantFullControl sets the GrantFullControl field's value. +func (s *PutObjectAclInput) SetGrantFullControl(v string) *PutObjectAclInput { + s.GrantFullControl = &v + return s +} + +// SetGrantRead sets the GrantRead field's value. +func (s *PutObjectAclInput) SetGrantRead(v string) *PutObjectAclInput { + s.GrantRead = &v + return s +} + +// SetGrantReadACP sets the GrantReadACP field's value. +func (s *PutObjectAclInput) SetGrantReadACP(v string) *PutObjectAclInput { + s.GrantReadACP = &v + return s +} + +// SetGrantWrite sets the GrantWrite field's value. +func (s *PutObjectAclInput) SetGrantWrite(v string) *PutObjectAclInput { + s.GrantWrite = &v + return s +} + +// SetGrantWriteACP sets the GrantWriteACP field's value. +func (s *PutObjectAclInput) SetGrantWriteACP(v string) *PutObjectAclInput { + s.GrantWriteACP = &v + return s +} + +// SetKey sets the Key field's value. +func (s *PutObjectAclInput) SetKey(v string) *PutObjectAclInput { + s.Key = &v + return s +} + +// SetRequestPayer sets the RequestPayer field's value. +func (s *PutObjectAclInput) SetRequestPayer(v string) *PutObjectAclInput { + s.RequestPayer = &v + return s +} + +// SetVersionId sets the VersionId field's value. +func (s *PutObjectAclInput) SetVersionId(v string) *PutObjectAclInput { + s.VersionId = &v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutObjectAclOutput +type PutObjectAclOutput struct { + _ struct{} `type:"structure"` + + // If present, indicates that the requester was successfully charged for the + // request. + RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"` +} + +// String returns the string representation +func (s PutObjectAclOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutObjectAclOutput) GoString() string { + return s.String() +} + +// SetRequestCharged sets the RequestCharged field's value. +func (s *PutObjectAclOutput) SetRequestCharged(v string) *PutObjectAclOutput { + s.RequestCharged = &v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutObjectRequest +type PutObjectInput struct { + _ struct{} `type:"structure" payload:"Body"` + + // The canned ACL to apply to the object. + ACL *string `location:"header" locationName:"x-amz-acl" type:"string" enum:"ObjectCannedACL"` + + // Object data. + Body io.ReadSeeker `type:"blob"` + + // Name of the bucket to which the PUT operation was initiated. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // Specifies caching behavior along the request/reply chain. + CacheControl *string `location:"header" locationName:"Cache-Control" type:"string"` + + // Specifies presentational information for the object. + ContentDisposition *string `location:"header" locationName:"Content-Disposition" type:"string"` + + // Specifies what content encodings have been applied to the object and thus + // what decoding mechanisms must be applied to obtain the media-type referenced + // by the Content-Type header field. + ContentEncoding *string `location:"header" locationName:"Content-Encoding" type:"string"` + + // The language the content is in. + ContentLanguage *string `location:"header" locationName:"Content-Language" type:"string"` + + // Size of the body in bytes. This parameter is useful when the size of the + // body cannot be determined automatically. + ContentLength *int64 `location:"header" locationName:"Content-Length" type:"long"` + + // A standard MIME type describing the format of the object data. + ContentType *string `location:"header" locationName:"Content-Type" type:"string"` + + // The date and time at which the object is no longer cacheable. + Expires *time.Time `location:"header" locationName:"Expires" type:"timestamp" timestampFormat:"rfc822"` + + // Gives the grantee READ, READ_ACP, and WRITE_ACP permissions on the object. + GrantFullControl *string `location:"header" locationName:"x-amz-grant-full-control" type:"string"` + + // Allows grantee to read the object data and its metadata. + GrantRead *string `location:"header" locationName:"x-amz-grant-read" type:"string"` + + // Allows grantee to read the object ACL. + GrantReadACP *string `location:"header" locationName:"x-amz-grant-read-acp" type:"string"` + + // Allows grantee to write the ACL for the applicable object. + GrantWriteACP *string `location:"header" locationName:"x-amz-grant-write-acp" type:"string"` + + // Object key for which the PUT operation was initiated. + // + // Key is a required field + Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` + + // A map of metadata to store with the object in S3. + Metadata map[string]*string `location:"headers" locationName:"x-amz-meta-" type:"map"` + + // Confirms that the requester knows that she or he will be charged for the + // request. Bucket owners need not specify this parameter in their requests. + // Documentation on downloading objects from requester pays buckets can be found + // at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html + RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` + + // Specifies the algorithm to use to when encrypting the object (e.g., AES256). + SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"` + + // Specifies the customer-provided encryption key for Amazon S3 to use in encrypting + // data. This value is used to store the object and then it is discarded; Amazon + // does not store the encryption key. The key must be appropriate for use with + // the algorithm specified in the x-amz-server-side​-encryption​-customer-algorithm + // header. + SSECustomerKey *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key" type:"string"` + + // Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. + // Amazon S3 uses this header for a message integrity check to ensure the encryption + // key was transmitted without error. + SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"` + + // Specifies the AWS KMS key ID to use for object encryption. All GET and PUT + // requests for an object protected by AWS KMS will fail if not made via SSL + // or using SigV4. Documentation on configuring any of the officially supported + // AWS SDKs and CLI can be found at http://docs.aws.amazon.com/AmazonS3/latest/dev/UsingAWSSDK.html#specify-signature-version + SSEKMSKeyId *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string"` + + // The Server-side encryption algorithm used when storing this object in S3 + // (e.g., AES256, aws:kms). + ServerSideEncryption *string `location:"header" locationName:"x-amz-server-side-encryption" type:"string" enum:"ServerSideEncryption"` + + // The type of storage to use for the object. Defaults to 'STANDARD'. + StorageClass *string `location:"header" locationName:"x-amz-storage-class" type:"string" enum:"StorageClass"` + + // The tag-set for the object. The tag-set must be encoded as URL Query parameters + Tagging *string `location:"header" locationName:"x-amz-tagging" type:"string"` + + // If the bucket is configured as a website, redirects requests for this object + // to another object in the same bucket or to an external URL. Amazon S3 stores + // the value of this header in the object metadata. + WebsiteRedirectLocation *string `location:"header" locationName:"x-amz-website-redirect-location" type:"string"` +} + +// String returns the string representation +func (s PutObjectInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutObjectInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *PutObjectInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PutObjectInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Key == nil { + invalidParams.Add(request.NewErrParamRequired("Key")) + } + if s.Key != nil && len(*s.Key) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Key", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetACL sets the ACL field's value. +func (s *PutObjectInput) SetACL(v string) *PutObjectInput { + s.ACL = &v + return s +} + +// SetBody sets the Body field's value. +func (s *PutObjectInput) SetBody(v io.ReadSeeker) *PutObjectInput { + s.Body = v + return s +} + +// SetBucket sets the Bucket field's value. +func (s *PutObjectInput) SetBucket(v string) *PutObjectInput { + s.Bucket = &v + return s +} + +// SetCacheControl sets the CacheControl field's value. +func (s *PutObjectInput) SetCacheControl(v string) *PutObjectInput { + s.CacheControl = &v + return s +} + +// SetContentDisposition sets the ContentDisposition field's value. +func (s *PutObjectInput) SetContentDisposition(v string) *PutObjectInput { + s.ContentDisposition = &v + return s +} + +// SetContentEncoding sets the ContentEncoding field's value. +func (s *PutObjectInput) SetContentEncoding(v string) *PutObjectInput { + s.ContentEncoding = &v + return s +} + +// SetContentLanguage sets the ContentLanguage field's value. +func (s *PutObjectInput) SetContentLanguage(v string) *PutObjectInput { + s.ContentLanguage = &v + return s +} + +// SetContentLength sets the ContentLength field's value. +func (s *PutObjectInput) SetContentLength(v int64) *PutObjectInput { + s.ContentLength = &v + return s +} + +// SetContentType sets the ContentType field's value. +func (s *PutObjectInput) SetContentType(v string) *PutObjectInput { + s.ContentType = &v + return s +} + +// SetExpires sets the Expires field's value. +func (s *PutObjectInput) SetExpires(v time.Time) *PutObjectInput { + s.Expires = &v + return s +} + +// SetGrantFullControl sets the GrantFullControl field's value. +func (s *PutObjectInput) SetGrantFullControl(v string) *PutObjectInput { + s.GrantFullControl = &v + return s +} + +// SetGrantRead sets the GrantRead field's value. +func (s *PutObjectInput) SetGrantRead(v string) *PutObjectInput { + s.GrantRead = &v + return s +} + +// SetGrantReadACP sets the GrantReadACP field's value. +func (s *PutObjectInput) SetGrantReadACP(v string) *PutObjectInput { + s.GrantReadACP = &v + return s +} + +// SetGrantWriteACP sets the GrantWriteACP field's value. +func (s *PutObjectInput) SetGrantWriteACP(v string) *PutObjectInput { + s.GrantWriteACP = &v + return s +} + +// SetKey sets the Key field's value. +func (s *PutObjectInput) SetKey(v string) *PutObjectInput { + s.Key = &v + return s +} + +// SetMetadata sets the Metadata field's value. +func (s *PutObjectInput) SetMetadata(v map[string]*string) *PutObjectInput { + s.Metadata = v + return s +} + +// SetRequestPayer sets the RequestPayer field's value. +func (s *PutObjectInput) SetRequestPayer(v string) *PutObjectInput { + s.RequestPayer = &v + return s +} + +// SetSSECustomerAlgorithm sets the SSECustomerAlgorithm field's value. +func (s *PutObjectInput) SetSSECustomerAlgorithm(v string) *PutObjectInput { + s.SSECustomerAlgorithm = &v + return s +} + +// SetSSECustomerKey sets the SSECustomerKey field's value. +func (s *PutObjectInput) SetSSECustomerKey(v string) *PutObjectInput { + s.SSECustomerKey = &v + return s +} + +// SetSSECustomerKeyMD5 sets the SSECustomerKeyMD5 field's value. +func (s *PutObjectInput) SetSSECustomerKeyMD5(v string) *PutObjectInput { + s.SSECustomerKeyMD5 = &v + return s +} + +// SetSSEKMSKeyId sets the SSEKMSKeyId field's value. +func (s *PutObjectInput) SetSSEKMSKeyId(v string) *PutObjectInput { + s.SSEKMSKeyId = &v + return s +} + +// SetServerSideEncryption sets the ServerSideEncryption field's value. +func (s *PutObjectInput) SetServerSideEncryption(v string) *PutObjectInput { + s.ServerSideEncryption = &v + return s +} + +// SetStorageClass sets the StorageClass field's value. +func (s *PutObjectInput) SetStorageClass(v string) *PutObjectInput { + s.StorageClass = &v + return s +} + +// SetTagging sets the Tagging field's value. +func (s *PutObjectInput) SetTagging(v string) *PutObjectInput { + s.Tagging = &v + return s +} + +// SetWebsiteRedirectLocation sets the WebsiteRedirectLocation field's value. +func (s *PutObjectInput) SetWebsiteRedirectLocation(v string) *PutObjectInput { + s.WebsiteRedirectLocation = &v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutObjectOutput +type PutObjectOutput struct { + _ struct{} `type:"structure"` + + // Entity tag for the uploaded object. + ETag *string `location:"header" locationName:"ETag" type:"string"` + + // If the object expiration is configured, this will contain the expiration + // date (expiry-date) and rule ID (rule-id). The value of rule-id is URL encoded. + Expiration *string `location:"header" locationName:"x-amz-expiration" type:"string"` + + // If present, indicates that the requester was successfully charged for the + // request. + RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"` + + // If server-side encryption with a customer-provided encryption key was requested, + // the response will include this header confirming the encryption algorithm + // used. + SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"` + + // If server-side encryption with a customer-provided encryption key was requested, + // the response will include this header to provide round trip message integrity + // verification of the customer-provided encryption key. + SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"` + + // If present, specifies the ID of the AWS Key Management Service (KMS) master + // encryption key that was used for the object. + SSEKMSKeyId *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string"` + + // The Server-side encryption algorithm used when storing this object in S3 + // (e.g., AES256, aws:kms). + ServerSideEncryption *string `location:"header" locationName:"x-amz-server-side-encryption" type:"string" enum:"ServerSideEncryption"` + + // Version of the object. + VersionId *string `location:"header" locationName:"x-amz-version-id" type:"string"` +} + +// String returns the string representation +func (s PutObjectOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutObjectOutput) GoString() string { + return s.String() +} + +// SetETag sets the ETag field's value. +func (s *PutObjectOutput) SetETag(v string) *PutObjectOutput { + s.ETag = &v + return s +} + +// SetExpiration sets the Expiration field's value. +func (s *PutObjectOutput) SetExpiration(v string) *PutObjectOutput { + s.Expiration = &v + return s +} + +// SetRequestCharged sets the RequestCharged field's value. +func (s *PutObjectOutput) SetRequestCharged(v string) *PutObjectOutput { + s.RequestCharged = &v + return s +} + +// SetSSECustomerAlgorithm sets the SSECustomerAlgorithm field's value. +func (s *PutObjectOutput) SetSSECustomerAlgorithm(v string) *PutObjectOutput { + s.SSECustomerAlgorithm = &v + return s +} + +// SetSSECustomerKeyMD5 sets the SSECustomerKeyMD5 field's value. +func (s *PutObjectOutput) SetSSECustomerKeyMD5(v string) *PutObjectOutput { + s.SSECustomerKeyMD5 = &v + return s +} + +// SetSSEKMSKeyId sets the SSEKMSKeyId field's value. +func (s *PutObjectOutput) SetSSEKMSKeyId(v string) *PutObjectOutput { + s.SSEKMSKeyId = &v + return s +} + +// SetServerSideEncryption sets the ServerSideEncryption field's value. +func (s *PutObjectOutput) SetServerSideEncryption(v string) *PutObjectOutput { + s.ServerSideEncryption = &v + return s +} + +// SetVersionId sets the VersionId field's value. +func (s *PutObjectOutput) SetVersionId(v string) *PutObjectOutput { + s.VersionId = &v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutObjectTaggingRequest +type PutObjectTaggingInput struct { + _ struct{} `type:"structure" payload:"Tagging"` + + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // Key is a required field + Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` + + // Tagging is a required field + Tagging *Tagging `locationName:"Tagging" type:"structure" required:"true" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` + + VersionId *string `location:"querystring" locationName:"versionId" type:"string"` +} + +// String returns the string representation +func (s PutObjectTaggingInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutObjectTaggingInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *PutObjectTaggingInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PutObjectTaggingInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Key == nil { + invalidParams.Add(request.NewErrParamRequired("Key")) + } + if s.Key != nil && len(*s.Key) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Key", 1)) + } + if s.Tagging == nil { + invalidParams.Add(request.NewErrParamRequired("Tagging")) + } + if s.Tagging != nil { + if err := s.Tagging.Validate(); err != nil { + invalidParams.AddNested("Tagging", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *PutObjectTaggingInput) SetBucket(v string) *PutObjectTaggingInput { + s.Bucket = &v + return s +} + +// SetKey sets the Key field's value. +func (s *PutObjectTaggingInput) SetKey(v string) *PutObjectTaggingInput { + s.Key = &v + return s +} + +// SetTagging sets the Tagging field's value. +func (s *PutObjectTaggingInput) SetTagging(v *Tagging) *PutObjectTaggingInput { + s.Tagging = v + return s +} + +// SetVersionId sets the VersionId field's value. +func (s *PutObjectTaggingInput) SetVersionId(v string) *PutObjectTaggingInput { + s.VersionId = &v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutObjectTaggingOutput +type PutObjectTaggingOutput struct { + _ struct{} `type:"structure"` + + VersionId *string `location:"header" locationName:"x-amz-version-id" type:"string"` +} + +// String returns the string representation +func (s PutObjectTaggingOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutObjectTaggingOutput) GoString() string { + return s.String() +} + +// SetVersionId sets the VersionId field's value. +func (s *PutObjectTaggingOutput) SetVersionId(v string) *PutObjectTaggingOutput { + s.VersionId = &v + return s +} + +// Container for specifying an configuration when you want Amazon S3 to publish +// events to an Amazon Simple Queue Service (Amazon SQS) queue. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/QueueConfiguration +type QueueConfiguration struct { + _ struct{} `type:"structure"` + + // Events is a required field + Events []*string `locationName:"Event" type:"list" flattened:"true" required:"true"` + + // Container for object key name filtering rules. For information about key + // name filtering, go to Configuring Event Notifications (http://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html) + Filter *NotificationConfigurationFilter `type:"structure"` + + // Optional unique identifier for configurations in a notification configuration. + // If you don't provide one, Amazon S3 will assign an ID. + Id *string `type:"string"` + + // Amazon SQS queue ARN to which Amazon S3 will publish a message when it detects + // events of specified type. + // + // QueueArn is a required field + QueueArn *string `locationName:"Queue" type:"string" required:"true"` +} + +// String returns the string representation +func (s QueueConfiguration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s QueueConfiguration) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *QueueConfiguration) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "QueueConfiguration"} + if s.Events == nil { + invalidParams.Add(request.NewErrParamRequired("Events")) + } + if s.QueueArn == nil { + invalidParams.Add(request.NewErrParamRequired("QueueArn")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetEvents sets the Events field's value. +func (s *QueueConfiguration) SetEvents(v []*string) *QueueConfiguration { + s.Events = v + return s +} + +// SetFilter sets the Filter field's value. +func (s *QueueConfiguration) SetFilter(v *NotificationConfigurationFilter) *QueueConfiguration { + s.Filter = v + return s +} + +// SetId sets the Id field's value. +func (s *QueueConfiguration) SetId(v string) *QueueConfiguration { + s.Id = &v + return s +} + +// SetQueueArn sets the QueueArn field's value. +func (s *QueueConfiguration) SetQueueArn(v string) *QueueConfiguration { + s.QueueArn = &v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/QueueConfigurationDeprecated +type QueueConfigurationDeprecated struct { + _ struct{} `type:"structure"` + + // Bucket event for which to send notifications. + Event *string `deprecated:"true" type:"string" enum:"Event"` + + Events []*string `locationName:"Event" type:"list" flattened:"true"` + + // Optional unique identifier for configurations in a notification configuration. + // If you don't provide one, Amazon S3 will assign an ID. + Id *string `type:"string"` + + Queue *string `type:"string"` +} + +// String returns the string representation +func (s QueueConfigurationDeprecated) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s QueueConfigurationDeprecated) GoString() string { + return s.String() +} + +// SetEvent sets the Event field's value. +func (s *QueueConfigurationDeprecated) SetEvent(v string) *QueueConfigurationDeprecated { + s.Event = &v + return s +} + +// SetEvents sets the Events field's value. +func (s *QueueConfigurationDeprecated) SetEvents(v []*string) *QueueConfigurationDeprecated { + s.Events = v + return s +} + +// SetId sets the Id field's value. +func (s *QueueConfigurationDeprecated) SetId(v string) *QueueConfigurationDeprecated { + s.Id = &v + return s +} + +// SetQueue sets the Queue field's value. +func (s *QueueConfigurationDeprecated) SetQueue(v string) *QueueConfigurationDeprecated { + s.Queue = &v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/Redirect +type Redirect struct { + _ struct{} `type:"structure"` + + // The host name to use in the redirect request. + HostName *string `type:"string"` + + // The HTTP redirect code to use on the response. Not required if one of the + // siblings is present. + HttpRedirectCode *string `type:"string"` + + // Protocol to use (http, https) when redirecting requests. The default is the + // protocol that is used in the original request. + Protocol *string `type:"string" enum:"Protocol"` + + // The object key prefix to use in the redirect request. For example, to redirect + // requests for all pages with prefix docs/ (objects in the docs/ folder) to + // documents/, you can set a condition block with KeyPrefixEquals set to docs/ + // and in the Redirect set ReplaceKeyPrefixWith to /documents. Not required + // if one of the siblings is present. Can be present only if ReplaceKeyWith + // is not provided. + ReplaceKeyPrefixWith *string `type:"string"` + + // The specific object key to use in the redirect request. For example, redirect + // request to error.html. Not required if one of the sibling is present. Can + // be present only if ReplaceKeyPrefixWith is not provided. + ReplaceKeyWith *string `type:"string"` +} + +// String returns the string representation +func (s Redirect) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Redirect) GoString() string { + return s.String() +} + +// SetHostName sets the HostName field's value. +func (s *Redirect) SetHostName(v string) *Redirect { + s.HostName = &v + return s +} + +// SetHttpRedirectCode sets the HttpRedirectCode field's value. +func (s *Redirect) SetHttpRedirectCode(v string) *Redirect { + s.HttpRedirectCode = &v + return s +} + +// SetProtocol sets the Protocol field's value. +func (s *Redirect) SetProtocol(v string) *Redirect { + s.Protocol = &v + return s +} + +// SetReplaceKeyPrefixWith sets the ReplaceKeyPrefixWith field's value. +func (s *Redirect) SetReplaceKeyPrefixWith(v string) *Redirect { + s.ReplaceKeyPrefixWith = &v + return s +} + +// SetReplaceKeyWith sets the ReplaceKeyWith field's value. +func (s *Redirect) SetReplaceKeyWith(v string) *Redirect { + s.ReplaceKeyWith = &v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/RedirectAllRequestsTo +type RedirectAllRequestsTo struct { + _ struct{} `type:"structure"` + + // Name of the host where requests will be redirected. + // + // HostName is a required field + HostName *string `type:"string" required:"true"` + + // Protocol to use (http, https) when redirecting requests. The default is the + // protocol that is used in the original request. + Protocol *string `type:"string" enum:"Protocol"` +} + +// String returns the string representation +func (s RedirectAllRequestsTo) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RedirectAllRequestsTo) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *RedirectAllRequestsTo) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "RedirectAllRequestsTo"} + if s.HostName == nil { + invalidParams.Add(request.NewErrParamRequired("HostName")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetHostName sets the HostName field's value. +func (s *RedirectAllRequestsTo) SetHostName(v string) *RedirectAllRequestsTo { + s.HostName = &v + return s +} + +// SetProtocol sets the Protocol field's value. +func (s *RedirectAllRequestsTo) SetProtocol(v string) *RedirectAllRequestsTo { + s.Protocol = &v + return s +} + +// Container for replication rules. You can add as many as 1,000 rules. Total +// replication configuration size can be up to 2 MB. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ReplicationConfiguration +type ReplicationConfiguration struct { + _ struct{} `type:"structure"` + + // Amazon Resource Name (ARN) of an IAM role for Amazon S3 to assume when replicating + // the objects. + // + // Role is a required field + Role *string `type:"string" required:"true"` + + // Container for information about a particular replication rule. Replication + // configuration must have at least one rule and can contain up to 1,000 rules. + // + // Rules is a required field + Rules []*ReplicationRule `locationName:"Rule" type:"list" flattened:"true" required:"true"` +} + +// String returns the string representation +func (s ReplicationConfiguration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ReplicationConfiguration) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ReplicationConfiguration) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ReplicationConfiguration"} + if s.Role == nil { + invalidParams.Add(request.NewErrParamRequired("Role")) + } + if s.Rules == nil { + invalidParams.Add(request.NewErrParamRequired("Rules")) + } + if s.Rules != nil { + for i, v := range s.Rules { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Rules", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetRole sets the Role field's value. +func (s *ReplicationConfiguration) SetRole(v string) *ReplicationConfiguration { + s.Role = &v + return s +} + +// SetRules sets the Rules field's value. +func (s *ReplicationConfiguration) SetRules(v []*ReplicationRule) *ReplicationConfiguration { + s.Rules = v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ReplicationRule +type ReplicationRule struct { + _ struct{} `type:"structure"` + + // Destination is a required field + Destination *Destination `type:"structure" required:"true"` + + // Unique identifier for the rule. The value cannot be longer than 255 characters. + ID *string `type:"string"` + + // Object keyname prefix identifying one or more objects to which the rule applies. + // Maximum prefix length can be up to 1,024 characters. Overlapping prefixes + // are not supported. + // + // Prefix is a required field + Prefix *string `type:"string" required:"true"` + + // The rule is ignored if status is not Enabled. + // + // Status is a required field + Status *string `type:"string" required:"true" enum:"ReplicationRuleStatus"` +} + +// String returns the string representation +func (s ReplicationRule) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ReplicationRule) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ReplicationRule) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ReplicationRule"} + if s.Destination == nil { + invalidParams.Add(request.NewErrParamRequired("Destination")) + } + if s.Prefix == nil { + invalidParams.Add(request.NewErrParamRequired("Prefix")) + } + if s.Status == nil { + invalidParams.Add(request.NewErrParamRequired("Status")) + } + if s.Destination != nil { + if err := s.Destination.Validate(); err != nil { + invalidParams.AddNested("Destination", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDestination sets the Destination field's value. +func (s *ReplicationRule) SetDestination(v *Destination) *ReplicationRule { + s.Destination = v + return s +} + +// SetID sets the ID field's value. +func (s *ReplicationRule) SetID(v string) *ReplicationRule { + s.ID = &v + return s +} + +// SetPrefix sets the Prefix field's value. +func (s *ReplicationRule) SetPrefix(v string) *ReplicationRule { + s.Prefix = &v + return s +} + +// SetStatus sets the Status field's value. +func (s *ReplicationRule) SetStatus(v string) *ReplicationRule { + s.Status = &v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/RequestPaymentConfiguration +type RequestPaymentConfiguration struct { + _ struct{} `type:"structure"` + + // Specifies who pays for the download and request fees. + // + // Payer is a required field + Payer *string `type:"string" required:"true" enum:"Payer"` +} + +// String returns the string representation +func (s RequestPaymentConfiguration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RequestPaymentConfiguration) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *RequestPaymentConfiguration) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "RequestPaymentConfiguration"} + if s.Payer == nil { + invalidParams.Add(request.NewErrParamRequired("Payer")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetPayer sets the Payer field's value. +func (s *RequestPaymentConfiguration) SetPayer(v string) *RequestPaymentConfiguration { + s.Payer = &v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/RestoreObjectRequest +type RestoreObjectInput struct { + _ struct{} `type:"structure" payload:"RestoreRequest"` + + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // Key is a required field + Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` + + // Confirms that the requester knows that she or he will be charged for the + // request. Bucket owners need not specify this parameter in their requests. + // Documentation on downloading objects from requester pays buckets can be found + // at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html + RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` + + RestoreRequest *RestoreRequest `locationName:"RestoreRequest" type:"structure" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` + + VersionId *string `location:"querystring" locationName:"versionId" type:"string"` +} + +// String returns the string representation +func (s RestoreObjectInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RestoreObjectInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *RestoreObjectInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "RestoreObjectInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Key == nil { + invalidParams.Add(request.NewErrParamRequired("Key")) + } + if s.Key != nil && len(*s.Key) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Key", 1)) + } + if s.RestoreRequest != nil { + if err := s.RestoreRequest.Validate(); err != nil { + invalidParams.AddNested("RestoreRequest", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *RestoreObjectInput) SetBucket(v string) *RestoreObjectInput { + s.Bucket = &v + return s +} + +// SetKey sets the Key field's value. +func (s *RestoreObjectInput) SetKey(v string) *RestoreObjectInput { + s.Key = &v + return s +} + +// SetRequestPayer sets the RequestPayer field's value. +func (s *RestoreObjectInput) SetRequestPayer(v string) *RestoreObjectInput { + s.RequestPayer = &v + return s +} + +// SetRestoreRequest sets the RestoreRequest field's value. +func (s *RestoreObjectInput) SetRestoreRequest(v *RestoreRequest) *RestoreObjectInput { + s.RestoreRequest = v + return s +} + +// SetVersionId sets the VersionId field's value. +func (s *RestoreObjectInput) SetVersionId(v string) *RestoreObjectInput { + s.VersionId = &v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/RestoreObjectOutput +type RestoreObjectOutput struct { + _ struct{} `type:"structure"` + + // If present, indicates that the requester was successfully charged for the + // request. + RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"` +} + +// String returns the string representation +func (s RestoreObjectOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RestoreObjectOutput) GoString() string { + return s.String() +} + +// SetRequestCharged sets the RequestCharged field's value. +func (s *RestoreObjectOutput) SetRequestCharged(v string) *RestoreObjectOutput { + s.RequestCharged = &v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/RestoreRequest +type RestoreRequest struct { + _ struct{} `type:"structure"` + + // Lifetime of the active copy in days + // + // Days is a required field + Days *int64 `type:"integer" required:"true"` + + // Glacier related prameters pertaining to this job. + GlacierJobParameters *GlacierJobParameters `type:"structure"` +} + +// String returns the string representation +func (s RestoreRequest) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RestoreRequest) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *RestoreRequest) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "RestoreRequest"} + if s.Days == nil { + invalidParams.Add(request.NewErrParamRequired("Days")) + } + if s.GlacierJobParameters != nil { + if err := s.GlacierJobParameters.Validate(); err != nil { + invalidParams.AddNested("GlacierJobParameters", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDays sets the Days field's value. +func (s *RestoreRequest) SetDays(v int64) *RestoreRequest { + s.Days = &v + return s +} + +// SetGlacierJobParameters sets the GlacierJobParameters field's value. +func (s *RestoreRequest) SetGlacierJobParameters(v *GlacierJobParameters) *RestoreRequest { + s.GlacierJobParameters = v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/RoutingRule +type RoutingRule struct { + _ struct{} `type:"structure"` + + // A container for describing a condition that must be met for the specified + // redirect to apply. For example, 1. If request is for pages in the /docs folder, + // redirect to the /documents folder. 2. If request results in HTTP error 4xx, + // redirect request to another host where you might process the error. + Condition *Condition `type:"structure"` + + // Container for redirect information. You can redirect requests to another + // host, to another page, or with another protocol. In the event of an error, + // you can can specify a different error code to return. + // + // Redirect is a required field + Redirect *Redirect `type:"structure" required:"true"` +} + +// String returns the string representation +func (s RoutingRule) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RoutingRule) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *RoutingRule) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "RoutingRule"} + if s.Redirect == nil { + invalidParams.Add(request.NewErrParamRequired("Redirect")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetCondition sets the Condition field's value. +func (s *RoutingRule) SetCondition(v *Condition) *RoutingRule { + s.Condition = v + return s +} + +// SetRedirect sets the Redirect field's value. +func (s *RoutingRule) SetRedirect(v *Redirect) *RoutingRule { + s.Redirect = v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/Rule +type Rule struct { + _ struct{} `type:"structure"` + + // Specifies the days since the initiation of an Incomplete Multipart Upload + // that Lifecycle will wait before permanently removing all parts of the upload. + AbortIncompleteMultipartUpload *AbortIncompleteMultipartUpload `type:"structure"` + + Expiration *LifecycleExpiration `type:"structure"` + + // Unique identifier for the rule. The value cannot be longer than 255 characters. + ID *string `type:"string"` + + // Specifies when noncurrent object versions expire. Upon expiration, Amazon + // S3 permanently deletes the noncurrent object versions. You set this lifecycle + // configuration action on a bucket that has versioning enabled (or suspended) + // to request that Amazon S3 delete noncurrent object versions at a specific + // period in the object's lifetime. + NoncurrentVersionExpiration *NoncurrentVersionExpiration `type:"structure"` + + // Container for the transition rule that describes when noncurrent objects + // transition to the STANDARD_IA or GLACIER storage class. If your bucket is + // versioning-enabled (or versioning is suspended), you can set this action + // to request that Amazon S3 transition noncurrent object versions to the STANDARD_IA + // or GLACIER storage class at a specific period in the object's lifetime. + NoncurrentVersionTransition *NoncurrentVersionTransition `type:"structure"` + + // Prefix identifying one or more objects to which the rule applies. + // + // Prefix is a required field + Prefix *string `type:"string" required:"true"` + + // If 'Enabled', the rule is currently being applied. If 'Disabled', the rule + // is not currently being applied. + // + // Status is a required field + Status *string `type:"string" required:"true" enum:"ExpirationStatus"` + + Transition *Transition `type:"structure"` +} + +// String returns the string representation +func (s Rule) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Rule) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *Rule) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "Rule"} + if s.Prefix == nil { + invalidParams.Add(request.NewErrParamRequired("Prefix")) + } + if s.Status == nil { + invalidParams.Add(request.NewErrParamRequired("Status")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAbortIncompleteMultipartUpload sets the AbortIncompleteMultipartUpload field's value. +func (s *Rule) SetAbortIncompleteMultipartUpload(v *AbortIncompleteMultipartUpload) *Rule { + s.AbortIncompleteMultipartUpload = v + return s +} + +// SetExpiration sets the Expiration field's value. +func (s *Rule) SetExpiration(v *LifecycleExpiration) *Rule { + s.Expiration = v + return s +} + +// SetID sets the ID field's value. +func (s *Rule) SetID(v string) *Rule { + s.ID = &v + return s +} + +// SetNoncurrentVersionExpiration sets the NoncurrentVersionExpiration field's value. +func (s *Rule) SetNoncurrentVersionExpiration(v *NoncurrentVersionExpiration) *Rule { + s.NoncurrentVersionExpiration = v + return s +} + +// SetNoncurrentVersionTransition sets the NoncurrentVersionTransition field's value. +func (s *Rule) SetNoncurrentVersionTransition(v *NoncurrentVersionTransition) *Rule { + s.NoncurrentVersionTransition = v + return s +} + +// SetPrefix sets the Prefix field's value. +func (s *Rule) SetPrefix(v string) *Rule { + s.Prefix = &v + return s +} + +// SetStatus sets the Status field's value. +func (s *Rule) SetStatus(v string) *Rule { + s.Status = &v + return s +} + +// SetTransition sets the Transition field's value. +func (s *Rule) SetTransition(v *Transition) *Rule { + s.Transition = v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/StorageClassAnalysis +type StorageClassAnalysis struct { + _ struct{} `type:"structure"` + + // A container used to describe how data related to the storage class analysis + // should be exported. + DataExport *StorageClassAnalysisDataExport `type:"structure"` +} + +// String returns the string representation +func (s StorageClassAnalysis) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s StorageClassAnalysis) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *StorageClassAnalysis) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "StorageClassAnalysis"} + if s.DataExport != nil { + if err := s.DataExport.Validate(); err != nil { + invalidParams.AddNested("DataExport", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDataExport sets the DataExport field's value. +func (s *StorageClassAnalysis) SetDataExport(v *StorageClassAnalysisDataExport) *StorageClassAnalysis { + s.DataExport = v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/StorageClassAnalysisDataExport +type StorageClassAnalysisDataExport struct { + _ struct{} `type:"structure"` + + // The place to store the data for an analysis. + // + // Destination is a required field + Destination *AnalyticsExportDestination `type:"structure" required:"true"` + + // The version of the output schema to use when exporting data. Must be V_1. + // + // OutputSchemaVersion is a required field + OutputSchemaVersion *string `type:"string" required:"true" enum:"StorageClassAnalysisSchemaVersion"` +} + +// String returns the string representation +func (s StorageClassAnalysisDataExport) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s StorageClassAnalysisDataExport) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *StorageClassAnalysisDataExport) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "StorageClassAnalysisDataExport"} + if s.Destination == nil { + invalidParams.Add(request.NewErrParamRequired("Destination")) + } + if s.OutputSchemaVersion == nil { + invalidParams.Add(request.NewErrParamRequired("OutputSchemaVersion")) + } + if s.Destination != nil { + if err := s.Destination.Validate(); err != nil { + invalidParams.AddNested("Destination", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDestination sets the Destination field's value. +func (s *StorageClassAnalysisDataExport) SetDestination(v *AnalyticsExportDestination) *StorageClassAnalysisDataExport { + s.Destination = v + return s +} + +// SetOutputSchemaVersion sets the OutputSchemaVersion field's value. +func (s *StorageClassAnalysisDataExport) SetOutputSchemaVersion(v string) *StorageClassAnalysisDataExport { + s.OutputSchemaVersion = &v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/Tag +type Tag struct { + _ struct{} `type:"structure"` + + // Name of the tag. + // + // Key is a required field + Key *string `min:"1" type:"string" required:"true"` + + // Value of the tag. + // + // Value is a required field + Value *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s Tag) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Tag) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *Tag) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "Tag"} + if s.Key == nil { + invalidParams.Add(request.NewErrParamRequired("Key")) + } + if s.Key != nil && len(*s.Key) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Key", 1)) + } + if s.Value == nil { + invalidParams.Add(request.NewErrParamRequired("Value")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetKey sets the Key field's value. +func (s *Tag) SetKey(v string) *Tag { + s.Key = &v + return s +} + +// SetValue sets the Value field's value. +func (s *Tag) SetValue(v string) *Tag { + s.Value = &v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/Tagging +type Tagging struct { + _ struct{} `type:"structure"` + + // TagSet is a required field + TagSet []*Tag `locationNameList:"Tag" type:"list" required:"true"` +} + +// String returns the string representation +func (s Tagging) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Tagging) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *Tagging) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "Tagging"} + if s.TagSet == nil { + invalidParams.Add(request.NewErrParamRequired("TagSet")) + } + if s.TagSet != nil { + for i, v := range s.TagSet { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "TagSet", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetTagSet sets the TagSet field's value. +func (s *Tagging) SetTagSet(v []*Tag) *Tagging { + s.TagSet = v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/TargetGrant +type TargetGrant struct { + _ struct{} `type:"structure"` + + Grantee *Grantee `type:"structure" xmlPrefix:"xsi" xmlURI:"http://www.w3.org/2001/XMLSchema-instance"` + + // Logging permissions assigned to the Grantee for the bucket. + Permission *string `type:"string" enum:"BucketLogsPermission"` +} + +// String returns the string representation +func (s TargetGrant) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s TargetGrant) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *TargetGrant) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "TargetGrant"} + if s.Grantee != nil { + if err := s.Grantee.Validate(); err != nil { + invalidParams.AddNested("Grantee", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetGrantee sets the Grantee field's value. +func (s *TargetGrant) SetGrantee(v *Grantee) *TargetGrant { + s.Grantee = v + return s +} + +// SetPermission sets the Permission field's value. +func (s *TargetGrant) SetPermission(v string) *TargetGrant { + s.Permission = &v + return s +} + +// Container for specifying the configuration when you want Amazon S3 to publish +// events to an Amazon Simple Notification Service (Amazon SNS) topic. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/TopicConfiguration +type TopicConfiguration struct { + _ struct{} `type:"structure"` + + // Events is a required field + Events []*string `locationName:"Event" type:"list" flattened:"true" required:"true"` + + // Container for object key name filtering rules. For information about key + // name filtering, go to Configuring Event Notifications (http://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html) + Filter *NotificationConfigurationFilter `type:"structure"` + + // Optional unique identifier for configurations in a notification configuration. + // If you don't provide one, Amazon S3 will assign an ID. + Id *string `type:"string"` + + // Amazon SNS topic ARN to which Amazon S3 will publish a message when it detects + // events of specified type. + // + // TopicArn is a required field + TopicArn *string `locationName:"Topic" type:"string" required:"true"` +} + +// String returns the string representation +func (s TopicConfiguration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s TopicConfiguration) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *TopicConfiguration) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "TopicConfiguration"} + if s.Events == nil { + invalidParams.Add(request.NewErrParamRequired("Events")) + } + if s.TopicArn == nil { + invalidParams.Add(request.NewErrParamRequired("TopicArn")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetEvents sets the Events field's value. +func (s *TopicConfiguration) SetEvents(v []*string) *TopicConfiguration { + s.Events = v + return s +} + +// SetFilter sets the Filter field's value. +func (s *TopicConfiguration) SetFilter(v *NotificationConfigurationFilter) *TopicConfiguration { + s.Filter = v + return s +} + +// SetId sets the Id field's value. +func (s *TopicConfiguration) SetId(v string) *TopicConfiguration { + s.Id = &v + return s +} + +// SetTopicArn sets the TopicArn field's value. +func (s *TopicConfiguration) SetTopicArn(v string) *TopicConfiguration { + s.TopicArn = &v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/TopicConfigurationDeprecated +type TopicConfigurationDeprecated struct { + _ struct{} `type:"structure"` + + // Bucket event for which to send notifications. + Event *string `deprecated:"true" type:"string" enum:"Event"` + + Events []*string `locationName:"Event" type:"list" flattened:"true"` + + // Optional unique identifier for configurations in a notification configuration. + // If you don't provide one, Amazon S3 will assign an ID. + Id *string `type:"string"` + + // Amazon SNS topic to which Amazon S3 will publish a message to report the + // specified events for the bucket. + Topic *string `type:"string"` +} + +// String returns the string representation +func (s TopicConfigurationDeprecated) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s TopicConfigurationDeprecated) GoString() string { + return s.String() +} + +// SetEvent sets the Event field's value. +func (s *TopicConfigurationDeprecated) SetEvent(v string) *TopicConfigurationDeprecated { + s.Event = &v + return s +} + +// SetEvents sets the Events field's value. +func (s *TopicConfigurationDeprecated) SetEvents(v []*string) *TopicConfigurationDeprecated { + s.Events = v + return s +} + +// SetId sets the Id field's value. +func (s *TopicConfigurationDeprecated) SetId(v string) *TopicConfigurationDeprecated { + s.Id = &v + return s +} + +// SetTopic sets the Topic field's value. +func (s *TopicConfigurationDeprecated) SetTopic(v string) *TopicConfigurationDeprecated { + s.Topic = &v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/Transition +type Transition struct { + _ struct{} `type:"structure"` + + // Indicates at what date the object is to be moved or deleted. Should be in + // GMT ISO 8601 Format. + Date *time.Time `type:"timestamp" timestampFormat:"iso8601"` + + // Indicates the lifetime, in days, of the objects that are subject to the rule. + // The value must be a non-zero positive integer. + Days *int64 `type:"integer"` + + // The class of storage used to store the object. + StorageClass *string `type:"string" enum:"TransitionStorageClass"` +} + +// String returns the string representation +func (s Transition) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Transition) GoString() string { + return s.String() +} + +// SetDate sets the Date field's value. +func (s *Transition) SetDate(v time.Time) *Transition { + s.Date = &v + return s +} + +// SetDays sets the Days field's value. +func (s *Transition) SetDays(v int64) *Transition { + s.Days = &v + return s +} + +// SetStorageClass sets the StorageClass field's value. +func (s *Transition) SetStorageClass(v string) *Transition { + s.StorageClass = &v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/UploadPartCopyRequest +type UploadPartCopyInput struct { + _ struct{} `type:"structure"` + + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // The name of the source bucket and key name of the source object, separated + // by a slash (/). Must be URL-encoded. + // + // CopySource is a required field + CopySource *string `location:"header" locationName:"x-amz-copy-source" type:"string" required:"true"` + + // Copies the object if its entity tag (ETag) matches the specified tag. + CopySourceIfMatch *string `location:"header" locationName:"x-amz-copy-source-if-match" type:"string"` + + // Copies the object if it has been modified since the specified time. + CopySourceIfModifiedSince *time.Time `location:"header" locationName:"x-amz-copy-source-if-modified-since" type:"timestamp" timestampFormat:"rfc822"` + + // Copies the object if its entity tag (ETag) is different than the specified + // ETag. + CopySourceIfNoneMatch *string `location:"header" locationName:"x-amz-copy-source-if-none-match" type:"string"` + + // Copies the object if it hasn't been modified since the specified time. + CopySourceIfUnmodifiedSince *time.Time `location:"header" locationName:"x-amz-copy-source-if-unmodified-since" type:"timestamp" timestampFormat:"rfc822"` + + // The range of bytes to copy from the source object. The range value must use + // the form bytes=first-last, where the first and last are the zero-based byte + // offsets to copy. For example, bytes=0-9 indicates that you want to copy the + // first ten bytes of the source. You can copy a range only if the source object + // is greater than 5 GB. + CopySourceRange *string `location:"header" locationName:"x-amz-copy-source-range" type:"string"` + + // Specifies the algorithm to use when decrypting the source object (e.g., AES256). + CopySourceSSECustomerAlgorithm *string `location:"header" locationName:"x-amz-copy-source-server-side-encryption-customer-algorithm" type:"string"` + + // Specifies the customer-provided encryption key for Amazon S3 to use to decrypt + // the source object. The encryption key provided in this header must be one + // that was used when the source object was created. + CopySourceSSECustomerKey *string `location:"header" locationName:"x-amz-copy-source-server-side-encryption-customer-key" type:"string"` + + // Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. + // Amazon S3 uses this header for a message integrity check to ensure the encryption + // key was transmitted without error. + CopySourceSSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-copy-source-server-side-encryption-customer-key-MD5" type:"string"` + + // Key is a required field + Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` + + // Part number of part being copied. This is a positive integer between 1 and + // 10,000. + // + // PartNumber is a required field + PartNumber *int64 `location:"querystring" locationName:"partNumber" type:"integer" required:"true"` + + // Confirms that the requester knows that she or he will be charged for the + // request. Bucket owners need not specify this parameter in their requests. + // Documentation on downloading objects from requester pays buckets can be found + // at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html + RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` + + // Specifies the algorithm to use to when encrypting the object (e.g., AES256). + SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"` + + // Specifies the customer-provided encryption key for Amazon S3 to use in encrypting + // data. This value is used to store the object and then it is discarded; Amazon + // does not store the encryption key. The key must be appropriate for use with + // the algorithm specified in the x-amz-server-side​-encryption​-customer-algorithm + // header. This must be the same encryption key specified in the initiate multipart + // upload request. + SSECustomerKey *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key" type:"string"` + + // Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. + // Amazon S3 uses this header for a message integrity check to ensure the encryption + // key was transmitted without error. + SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"` + + // Upload ID identifying the multipart upload whose part is being copied. + // + // UploadId is a required field + UploadId *string `location:"querystring" locationName:"uploadId" type:"string" required:"true"` +} + +// String returns the string representation +func (s UploadPartCopyInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UploadPartCopyInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *UploadPartCopyInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "UploadPartCopyInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.CopySource == nil { + invalidParams.Add(request.NewErrParamRequired("CopySource")) + } + if s.Key == nil { + invalidParams.Add(request.NewErrParamRequired("Key")) + } + if s.Key != nil && len(*s.Key) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Key", 1)) + } + if s.PartNumber == nil { + invalidParams.Add(request.NewErrParamRequired("PartNumber")) + } + if s.UploadId == nil { + invalidParams.Add(request.NewErrParamRequired("UploadId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *UploadPartCopyInput) SetBucket(v string) *UploadPartCopyInput { + s.Bucket = &v + return s +} + +// SetCopySource sets the CopySource field's value. +func (s *UploadPartCopyInput) SetCopySource(v string) *UploadPartCopyInput { + s.CopySource = &v + return s +} + +// SetCopySourceIfMatch sets the CopySourceIfMatch field's value. +func (s *UploadPartCopyInput) SetCopySourceIfMatch(v string) *UploadPartCopyInput { + s.CopySourceIfMatch = &v + return s +} + +// SetCopySourceIfModifiedSince sets the CopySourceIfModifiedSince field's value. +func (s *UploadPartCopyInput) SetCopySourceIfModifiedSince(v time.Time) *UploadPartCopyInput { + s.CopySourceIfModifiedSince = &v + return s +} + +// SetCopySourceIfNoneMatch sets the CopySourceIfNoneMatch field's value. +func (s *UploadPartCopyInput) SetCopySourceIfNoneMatch(v string) *UploadPartCopyInput { + s.CopySourceIfNoneMatch = &v + return s +} + +// SetCopySourceIfUnmodifiedSince sets the CopySourceIfUnmodifiedSince field's value. +func (s *UploadPartCopyInput) SetCopySourceIfUnmodifiedSince(v time.Time) *UploadPartCopyInput { + s.CopySourceIfUnmodifiedSince = &v + return s +} + +// SetCopySourceRange sets the CopySourceRange field's value. +func (s *UploadPartCopyInput) SetCopySourceRange(v string) *UploadPartCopyInput { + s.CopySourceRange = &v + return s +} + +// SetCopySourceSSECustomerAlgorithm sets the CopySourceSSECustomerAlgorithm field's value. +func (s *UploadPartCopyInput) SetCopySourceSSECustomerAlgorithm(v string) *UploadPartCopyInput { + s.CopySourceSSECustomerAlgorithm = &v + return s +} + +// SetCopySourceSSECustomerKey sets the CopySourceSSECustomerKey field's value. +func (s *UploadPartCopyInput) SetCopySourceSSECustomerKey(v string) *UploadPartCopyInput { + s.CopySourceSSECustomerKey = &v + return s +} + +// SetCopySourceSSECustomerKeyMD5 sets the CopySourceSSECustomerKeyMD5 field's value. +func (s *UploadPartCopyInput) SetCopySourceSSECustomerKeyMD5(v string) *UploadPartCopyInput { + s.CopySourceSSECustomerKeyMD5 = &v + return s +} + +// SetKey sets the Key field's value. +func (s *UploadPartCopyInput) SetKey(v string) *UploadPartCopyInput { + s.Key = &v + return s +} + +// SetPartNumber sets the PartNumber field's value. +func (s *UploadPartCopyInput) SetPartNumber(v int64) *UploadPartCopyInput { + s.PartNumber = &v + return s +} + +// SetRequestPayer sets the RequestPayer field's value. +func (s *UploadPartCopyInput) SetRequestPayer(v string) *UploadPartCopyInput { + s.RequestPayer = &v + return s +} + +// SetSSECustomerAlgorithm sets the SSECustomerAlgorithm field's value. +func (s *UploadPartCopyInput) SetSSECustomerAlgorithm(v string) *UploadPartCopyInput { + s.SSECustomerAlgorithm = &v + return s +} + +// SetSSECustomerKey sets the SSECustomerKey field's value. +func (s *UploadPartCopyInput) SetSSECustomerKey(v string) *UploadPartCopyInput { + s.SSECustomerKey = &v + return s +} + +// SetSSECustomerKeyMD5 sets the SSECustomerKeyMD5 field's value. +func (s *UploadPartCopyInput) SetSSECustomerKeyMD5(v string) *UploadPartCopyInput { + s.SSECustomerKeyMD5 = &v + return s +} + +// SetUploadId sets the UploadId field's value. +func (s *UploadPartCopyInput) SetUploadId(v string) *UploadPartCopyInput { + s.UploadId = &v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/UploadPartCopyOutput +type UploadPartCopyOutput struct { + _ struct{} `type:"structure" payload:"CopyPartResult"` + + CopyPartResult *CopyPartResult `type:"structure"` + + // The version of the source object that was copied, if you have enabled versioning + // on the source bucket. + CopySourceVersionId *string `location:"header" locationName:"x-amz-copy-source-version-id" type:"string"` + + // If present, indicates that the requester was successfully charged for the + // request. + RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"` + + // If server-side encryption with a customer-provided encryption key was requested, + // the response will include this header confirming the encryption algorithm + // used. + SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"` + + // If server-side encryption with a customer-provided encryption key was requested, + // the response will include this header to provide round trip message integrity + // verification of the customer-provided encryption key. + SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"` + + // If present, specifies the ID of the AWS Key Management Service (KMS) master + // encryption key that was used for the object. + SSEKMSKeyId *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string"` + + // The Server-side encryption algorithm used when storing this object in S3 + // (e.g., AES256, aws:kms). + ServerSideEncryption *string `location:"header" locationName:"x-amz-server-side-encryption" type:"string" enum:"ServerSideEncryption"` +} + +// String returns the string representation +func (s UploadPartCopyOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UploadPartCopyOutput) GoString() string { + return s.String() +} + +// SetCopyPartResult sets the CopyPartResult field's value. +func (s *UploadPartCopyOutput) SetCopyPartResult(v *CopyPartResult) *UploadPartCopyOutput { + s.CopyPartResult = v + return s +} + +// SetCopySourceVersionId sets the CopySourceVersionId field's value. +func (s *UploadPartCopyOutput) SetCopySourceVersionId(v string) *UploadPartCopyOutput { + s.CopySourceVersionId = &v + return s +} + +// SetRequestCharged sets the RequestCharged field's value. +func (s *UploadPartCopyOutput) SetRequestCharged(v string) *UploadPartCopyOutput { + s.RequestCharged = &v + return s +} + +// SetSSECustomerAlgorithm sets the SSECustomerAlgorithm field's value. +func (s *UploadPartCopyOutput) SetSSECustomerAlgorithm(v string) *UploadPartCopyOutput { + s.SSECustomerAlgorithm = &v + return s +} + +// SetSSECustomerKeyMD5 sets the SSECustomerKeyMD5 field's value. +func (s *UploadPartCopyOutput) SetSSECustomerKeyMD5(v string) *UploadPartCopyOutput { + s.SSECustomerKeyMD5 = &v + return s +} + +// SetSSEKMSKeyId sets the SSEKMSKeyId field's value. +func (s *UploadPartCopyOutput) SetSSEKMSKeyId(v string) *UploadPartCopyOutput { + s.SSEKMSKeyId = &v + return s +} + +// SetServerSideEncryption sets the ServerSideEncryption field's value. +func (s *UploadPartCopyOutput) SetServerSideEncryption(v string) *UploadPartCopyOutput { + s.ServerSideEncryption = &v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/UploadPartRequest +type UploadPartInput struct { + _ struct{} `type:"structure" payload:"Body"` + + // Object data. + Body io.ReadSeeker `type:"blob"` + + // Name of the bucket to which the multipart upload was initiated. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // Size of the body in bytes. This parameter is useful when the size of the + // body cannot be determined automatically. + ContentLength *int64 `location:"header" locationName:"Content-Length" type:"long"` + + // Object key for which the multipart upload was initiated. + // + // Key is a required field + Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` + + // Part number of part being uploaded. This is a positive integer between 1 + // and 10,000. + // + // PartNumber is a required field + PartNumber *int64 `location:"querystring" locationName:"partNumber" type:"integer" required:"true"` + + // Confirms that the requester knows that she or he will be charged for the + // request. Bucket owners need not specify this parameter in their requests. + // Documentation on downloading objects from requester pays buckets can be found + // at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html + RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` + + // Specifies the algorithm to use to when encrypting the object (e.g., AES256). + SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"` + + // Specifies the customer-provided encryption key for Amazon S3 to use in encrypting + // data. This value is used to store the object and then it is discarded; Amazon + // does not store the encryption key. The key must be appropriate for use with + // the algorithm specified in the x-amz-server-side​-encryption​-customer-algorithm + // header. This must be the same encryption key specified in the initiate multipart + // upload request. + SSECustomerKey *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key" type:"string"` + + // Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. + // Amazon S3 uses this header for a message integrity check to ensure the encryption + // key was transmitted without error. + SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"` + + // Upload ID identifying the multipart upload whose part is being uploaded. + // + // UploadId is a required field + UploadId *string `location:"querystring" locationName:"uploadId" type:"string" required:"true"` +} + +// String returns the string representation +func (s UploadPartInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UploadPartInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *UploadPartInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "UploadPartInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Key == nil { + invalidParams.Add(request.NewErrParamRequired("Key")) + } + if s.Key != nil && len(*s.Key) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Key", 1)) + } + if s.PartNumber == nil { + invalidParams.Add(request.NewErrParamRequired("PartNumber")) + } + if s.UploadId == nil { + invalidParams.Add(request.NewErrParamRequired("UploadId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBody sets the Body field's value. +func (s *UploadPartInput) SetBody(v io.ReadSeeker) *UploadPartInput { + s.Body = v + return s +} + +// SetBucket sets the Bucket field's value. +func (s *UploadPartInput) SetBucket(v string) *UploadPartInput { + s.Bucket = &v + return s +} + +// SetContentLength sets the ContentLength field's value. +func (s *UploadPartInput) SetContentLength(v int64) *UploadPartInput { + s.ContentLength = &v + return s +} + +// SetKey sets the Key field's value. +func (s *UploadPartInput) SetKey(v string) *UploadPartInput { + s.Key = &v + return s +} + +// SetPartNumber sets the PartNumber field's value. +func (s *UploadPartInput) SetPartNumber(v int64) *UploadPartInput { + s.PartNumber = &v + return s +} + +// SetRequestPayer sets the RequestPayer field's value. +func (s *UploadPartInput) SetRequestPayer(v string) *UploadPartInput { + s.RequestPayer = &v + return s +} + +// SetSSECustomerAlgorithm sets the SSECustomerAlgorithm field's value. +func (s *UploadPartInput) SetSSECustomerAlgorithm(v string) *UploadPartInput { + s.SSECustomerAlgorithm = &v + return s +} + +// SetSSECustomerKey sets the SSECustomerKey field's value. +func (s *UploadPartInput) SetSSECustomerKey(v string) *UploadPartInput { + s.SSECustomerKey = &v + return s +} + +// SetSSECustomerKeyMD5 sets the SSECustomerKeyMD5 field's value. +func (s *UploadPartInput) SetSSECustomerKeyMD5(v string) *UploadPartInput { + s.SSECustomerKeyMD5 = &v + return s +} + +// SetUploadId sets the UploadId field's value. +func (s *UploadPartInput) SetUploadId(v string) *UploadPartInput { + s.UploadId = &v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/UploadPartOutput +type UploadPartOutput struct { + _ struct{} `type:"structure"` + + // Entity tag for the uploaded object. + ETag *string `location:"header" locationName:"ETag" type:"string"` + + // If present, indicates that the requester was successfully charged for the + // request. + RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"` + + // If server-side encryption with a customer-provided encryption key was requested, + // the response will include this header confirming the encryption algorithm + // used. + SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"` + + // If server-side encryption with a customer-provided encryption key was requested, + // the response will include this header to provide round trip message integrity + // verification of the customer-provided encryption key. + SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"` + + // If present, specifies the ID of the AWS Key Management Service (KMS) master + // encryption key that was used for the object. + SSEKMSKeyId *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string"` + + // The Server-side encryption algorithm used when storing this object in S3 + // (e.g., AES256, aws:kms). + ServerSideEncryption *string `location:"header" locationName:"x-amz-server-side-encryption" type:"string" enum:"ServerSideEncryption"` +} + +// String returns the string representation +func (s UploadPartOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UploadPartOutput) GoString() string { + return s.String() +} + +// SetETag sets the ETag field's value. +func (s *UploadPartOutput) SetETag(v string) *UploadPartOutput { + s.ETag = &v + return s +} + +// SetRequestCharged sets the RequestCharged field's value. +func (s *UploadPartOutput) SetRequestCharged(v string) *UploadPartOutput { + s.RequestCharged = &v + return s +} + +// SetSSECustomerAlgorithm sets the SSECustomerAlgorithm field's value. +func (s *UploadPartOutput) SetSSECustomerAlgorithm(v string) *UploadPartOutput { + s.SSECustomerAlgorithm = &v + return s +} + +// SetSSECustomerKeyMD5 sets the SSECustomerKeyMD5 field's value. +func (s *UploadPartOutput) SetSSECustomerKeyMD5(v string) *UploadPartOutput { + s.SSECustomerKeyMD5 = &v + return s +} + +// SetSSEKMSKeyId sets the SSEKMSKeyId field's value. +func (s *UploadPartOutput) SetSSEKMSKeyId(v string) *UploadPartOutput { + s.SSEKMSKeyId = &v + return s +} + +// SetServerSideEncryption sets the ServerSideEncryption field's value. +func (s *UploadPartOutput) SetServerSideEncryption(v string) *UploadPartOutput { + s.ServerSideEncryption = &v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/VersioningConfiguration +type VersioningConfiguration struct { + _ struct{} `type:"structure"` + + // Specifies whether MFA delete is enabled in the bucket versioning configuration. + // This element is only returned if the bucket has been configured with MFA + // delete. If the bucket has never been so configured, this element is not returned. + MFADelete *string `locationName:"MfaDelete" type:"string" enum:"MFADelete"` + + // The versioning state of the bucket. + Status *string `type:"string" enum:"BucketVersioningStatus"` +} + +// String returns the string representation +func (s VersioningConfiguration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s VersioningConfiguration) GoString() string { + return s.String() +} + +// SetMFADelete sets the MFADelete field's value. +func (s *VersioningConfiguration) SetMFADelete(v string) *VersioningConfiguration { + s.MFADelete = &v + return s +} + +// SetStatus sets the Status field's value. +func (s *VersioningConfiguration) SetStatus(v string) *VersioningConfiguration { + s.Status = &v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/WebsiteConfiguration +type WebsiteConfiguration struct { + _ struct{} `type:"structure"` + + ErrorDocument *ErrorDocument `type:"structure"` + + IndexDocument *IndexDocument `type:"structure"` + + RedirectAllRequestsTo *RedirectAllRequestsTo `type:"structure"` + + RoutingRules []*RoutingRule `locationNameList:"RoutingRule" type:"list"` +} + +// String returns the string representation +func (s WebsiteConfiguration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s WebsiteConfiguration) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *WebsiteConfiguration) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "WebsiteConfiguration"} + if s.ErrorDocument != nil { + if err := s.ErrorDocument.Validate(); err != nil { + invalidParams.AddNested("ErrorDocument", err.(request.ErrInvalidParams)) + } + } + if s.IndexDocument != nil { + if err := s.IndexDocument.Validate(); err != nil { + invalidParams.AddNested("IndexDocument", err.(request.ErrInvalidParams)) + } + } + if s.RedirectAllRequestsTo != nil { + if err := s.RedirectAllRequestsTo.Validate(); err != nil { + invalidParams.AddNested("RedirectAllRequestsTo", err.(request.ErrInvalidParams)) + } + } + if s.RoutingRules != nil { + for i, v := range s.RoutingRules { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "RoutingRules", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetErrorDocument sets the ErrorDocument field's value. +func (s *WebsiteConfiguration) SetErrorDocument(v *ErrorDocument) *WebsiteConfiguration { + s.ErrorDocument = v + return s +} + +// SetIndexDocument sets the IndexDocument field's value. +func (s *WebsiteConfiguration) SetIndexDocument(v *IndexDocument) *WebsiteConfiguration { + s.IndexDocument = v + return s +} + +// SetRedirectAllRequestsTo sets the RedirectAllRequestsTo field's value. +func (s *WebsiteConfiguration) SetRedirectAllRequestsTo(v *RedirectAllRequestsTo) *WebsiteConfiguration { + s.RedirectAllRequestsTo = v + return s +} + +// SetRoutingRules sets the RoutingRules field's value. +func (s *WebsiteConfiguration) SetRoutingRules(v []*RoutingRule) *WebsiteConfiguration { + s.RoutingRules = v + return s +} + +const ( + // AnalyticsS3ExportFileFormatCsv is a AnalyticsS3ExportFileFormat enum value + AnalyticsS3ExportFileFormatCsv = "CSV" +) + +const ( + // BucketAccelerateStatusEnabled is a BucketAccelerateStatus enum value + BucketAccelerateStatusEnabled = "Enabled" + + // BucketAccelerateStatusSuspended is a BucketAccelerateStatus enum value + BucketAccelerateStatusSuspended = "Suspended" +) + +const ( + // BucketCannedACLPrivate is a BucketCannedACL enum value + BucketCannedACLPrivate = "private" + + // BucketCannedACLPublicRead is a BucketCannedACL enum value + BucketCannedACLPublicRead = "public-read" + + // BucketCannedACLPublicReadWrite is a BucketCannedACL enum value + BucketCannedACLPublicReadWrite = "public-read-write" + + // BucketCannedACLAuthenticatedRead is a BucketCannedACL enum value + BucketCannedACLAuthenticatedRead = "authenticated-read" +) + +const ( + // BucketLocationConstraintEu is a BucketLocationConstraint enum value + BucketLocationConstraintEu = "EU" + + // BucketLocationConstraintEuWest1 is a BucketLocationConstraint enum value + BucketLocationConstraintEuWest1 = "eu-west-1" + + // BucketLocationConstraintUsWest1 is a BucketLocationConstraint enum value + BucketLocationConstraintUsWest1 = "us-west-1" + + // BucketLocationConstraintUsWest2 is a BucketLocationConstraint enum value + BucketLocationConstraintUsWest2 = "us-west-2" + + // BucketLocationConstraintApSouth1 is a BucketLocationConstraint enum value + BucketLocationConstraintApSouth1 = "ap-south-1" + + // BucketLocationConstraintApSoutheast1 is a BucketLocationConstraint enum value + BucketLocationConstraintApSoutheast1 = "ap-southeast-1" + + // BucketLocationConstraintApSoutheast2 is a BucketLocationConstraint enum value + BucketLocationConstraintApSoutheast2 = "ap-southeast-2" + + // BucketLocationConstraintApNortheast1 is a BucketLocationConstraint enum value + BucketLocationConstraintApNortheast1 = "ap-northeast-1" + + // BucketLocationConstraintSaEast1 is a BucketLocationConstraint enum value + BucketLocationConstraintSaEast1 = "sa-east-1" + + // BucketLocationConstraintCnNorth1 is a BucketLocationConstraint enum value + BucketLocationConstraintCnNorth1 = "cn-north-1" + + // BucketLocationConstraintEuCentral1 is a BucketLocationConstraint enum value + BucketLocationConstraintEuCentral1 = "eu-central-1" +) + +const ( + // BucketLogsPermissionFullControl is a BucketLogsPermission enum value + BucketLogsPermissionFullControl = "FULL_CONTROL" + + // BucketLogsPermissionRead is a BucketLogsPermission enum value + BucketLogsPermissionRead = "READ" + + // BucketLogsPermissionWrite is a BucketLogsPermission enum value + BucketLogsPermissionWrite = "WRITE" +) + +const ( + // BucketVersioningStatusEnabled is a BucketVersioningStatus enum value + BucketVersioningStatusEnabled = "Enabled" + + // BucketVersioningStatusSuspended is a BucketVersioningStatus enum value + BucketVersioningStatusSuspended = "Suspended" +) + +// Requests Amazon S3 to encode the object keys in the response and specifies +// the encoding method to use. An object key may contain any Unicode character; +// however, XML 1.0 parser cannot parse some characters, such as characters +// with an ASCII value from 0 to 10. For characters that are not supported in +// XML 1.0, you can add this parameter to request that Amazon S3 encode the +// keys in the response. +const ( + // EncodingTypeUrl is a EncodingType enum value + EncodingTypeUrl = "url" +) + +// Bucket event for which to send notifications. +const ( + // EventS3ReducedRedundancyLostObject is a Event enum value + EventS3ReducedRedundancyLostObject = "s3:ReducedRedundancyLostObject" + + // EventS3ObjectCreated is a Event enum value + EventS3ObjectCreated = "s3:ObjectCreated:*" + + // EventS3ObjectCreatedPut is a Event enum value + EventS3ObjectCreatedPut = "s3:ObjectCreated:Put" + + // EventS3ObjectCreatedPost is a Event enum value + EventS3ObjectCreatedPost = "s3:ObjectCreated:Post" + + // EventS3ObjectCreatedCopy is a Event enum value + EventS3ObjectCreatedCopy = "s3:ObjectCreated:Copy" + + // EventS3ObjectCreatedCompleteMultipartUpload is a Event enum value + EventS3ObjectCreatedCompleteMultipartUpload = "s3:ObjectCreated:CompleteMultipartUpload" + + // EventS3ObjectRemoved is a Event enum value + EventS3ObjectRemoved = "s3:ObjectRemoved:*" + + // EventS3ObjectRemovedDelete is a Event enum value + EventS3ObjectRemovedDelete = "s3:ObjectRemoved:Delete" + + // EventS3ObjectRemovedDeleteMarkerCreated is a Event enum value + EventS3ObjectRemovedDeleteMarkerCreated = "s3:ObjectRemoved:DeleteMarkerCreated" +) + +const ( + // ExpirationStatusEnabled is a ExpirationStatus enum value + ExpirationStatusEnabled = "Enabled" + + // ExpirationStatusDisabled is a ExpirationStatus enum value + ExpirationStatusDisabled = "Disabled" +) + +const ( + // FilterRuleNamePrefix is a FilterRuleName enum value + FilterRuleNamePrefix = "prefix" + + // FilterRuleNameSuffix is a FilterRuleName enum value + FilterRuleNameSuffix = "suffix" +) + +const ( + // InventoryFormatCsv is a InventoryFormat enum value + InventoryFormatCsv = "CSV" +) + +const ( + // InventoryFrequencyDaily is a InventoryFrequency enum value + InventoryFrequencyDaily = "Daily" + + // InventoryFrequencyWeekly is a InventoryFrequency enum value + InventoryFrequencyWeekly = "Weekly" +) + +const ( + // InventoryIncludedObjectVersionsAll is a InventoryIncludedObjectVersions enum value + InventoryIncludedObjectVersionsAll = "All" + + // InventoryIncludedObjectVersionsCurrent is a InventoryIncludedObjectVersions enum value + InventoryIncludedObjectVersionsCurrent = "Current" +) + +const ( + // InventoryOptionalFieldSize is a InventoryOptionalField enum value + InventoryOptionalFieldSize = "Size" + + // InventoryOptionalFieldLastModifiedDate is a InventoryOptionalField enum value + InventoryOptionalFieldLastModifiedDate = "LastModifiedDate" + + // InventoryOptionalFieldStorageClass is a InventoryOptionalField enum value + InventoryOptionalFieldStorageClass = "StorageClass" + + // InventoryOptionalFieldEtag is a InventoryOptionalField enum value + InventoryOptionalFieldEtag = "ETag" + + // InventoryOptionalFieldIsMultipartUploaded is a InventoryOptionalField enum value + InventoryOptionalFieldIsMultipartUploaded = "IsMultipartUploaded" + + // InventoryOptionalFieldReplicationStatus is a InventoryOptionalField enum value + InventoryOptionalFieldReplicationStatus = "ReplicationStatus" +) + +const ( + // MFADeleteEnabled is a MFADelete enum value + MFADeleteEnabled = "Enabled" + + // MFADeleteDisabled is a MFADelete enum value + MFADeleteDisabled = "Disabled" +) + +const ( + // MFADeleteStatusEnabled is a MFADeleteStatus enum value + MFADeleteStatusEnabled = "Enabled" + + // MFADeleteStatusDisabled is a MFADeleteStatus enum value + MFADeleteStatusDisabled = "Disabled" +) + +const ( + // MetadataDirectiveCopy is a MetadataDirective enum value + MetadataDirectiveCopy = "COPY" + + // MetadataDirectiveReplace is a MetadataDirective enum value + MetadataDirectiveReplace = "REPLACE" +) + +const ( + // ObjectCannedACLPrivate is a ObjectCannedACL enum value + ObjectCannedACLPrivate = "private" + + // ObjectCannedACLPublicRead is a ObjectCannedACL enum value + ObjectCannedACLPublicRead = "public-read" + + // ObjectCannedACLPublicReadWrite is a ObjectCannedACL enum value + ObjectCannedACLPublicReadWrite = "public-read-write" + + // ObjectCannedACLAuthenticatedRead is a ObjectCannedACL enum value + ObjectCannedACLAuthenticatedRead = "authenticated-read" + + // ObjectCannedACLAwsExecRead is a ObjectCannedACL enum value + ObjectCannedACLAwsExecRead = "aws-exec-read" + + // ObjectCannedACLBucketOwnerRead is a ObjectCannedACL enum value + ObjectCannedACLBucketOwnerRead = "bucket-owner-read" + + // ObjectCannedACLBucketOwnerFullControl is a ObjectCannedACL enum value + ObjectCannedACLBucketOwnerFullControl = "bucket-owner-full-control" +) + +const ( + // ObjectStorageClassStandard is a ObjectStorageClass enum value + ObjectStorageClassStandard = "STANDARD" + + // ObjectStorageClassReducedRedundancy is a ObjectStorageClass enum value + ObjectStorageClassReducedRedundancy = "REDUCED_REDUNDANCY" + + // ObjectStorageClassGlacier is a ObjectStorageClass enum value + ObjectStorageClassGlacier = "GLACIER" +) + +const ( + // ObjectVersionStorageClassStandard is a ObjectVersionStorageClass enum value + ObjectVersionStorageClassStandard = "STANDARD" +) + +const ( + // PayerRequester is a Payer enum value + PayerRequester = "Requester" + + // PayerBucketOwner is a Payer enum value + PayerBucketOwner = "BucketOwner" +) + +const ( + // PermissionFullControl is a Permission enum value + PermissionFullControl = "FULL_CONTROL" + + // PermissionWrite is a Permission enum value + PermissionWrite = "WRITE" + + // PermissionWriteAcp is a Permission enum value + PermissionWriteAcp = "WRITE_ACP" + + // PermissionRead is a Permission enum value + PermissionRead = "READ" + + // PermissionReadAcp is a Permission enum value + PermissionReadAcp = "READ_ACP" +) + +const ( + // ProtocolHttp is a Protocol enum value + ProtocolHttp = "http" + + // ProtocolHttps is a Protocol enum value + ProtocolHttps = "https" +) + +const ( + // ReplicationRuleStatusEnabled is a ReplicationRuleStatus enum value + ReplicationRuleStatusEnabled = "Enabled" + + // ReplicationRuleStatusDisabled is a ReplicationRuleStatus enum value + ReplicationRuleStatusDisabled = "Disabled" +) + +const ( + // ReplicationStatusComplete is a ReplicationStatus enum value + ReplicationStatusComplete = "COMPLETE" + + // ReplicationStatusPending is a ReplicationStatus enum value + ReplicationStatusPending = "PENDING" + + // ReplicationStatusFailed is a ReplicationStatus enum value + ReplicationStatusFailed = "FAILED" + + // ReplicationStatusReplica is a ReplicationStatus enum value + ReplicationStatusReplica = "REPLICA" +) + +// If present, indicates that the requester was successfully charged for the +// request. +const ( + // RequestChargedRequester is a RequestCharged enum value + RequestChargedRequester = "requester" +) + +// Confirms that the requester knows that she or he will be charged for the +// request. Bucket owners need not specify this parameter in their requests. +// Documentation on downloading objects from requester pays buckets can be found +// at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html +const ( + // RequestPayerRequester is a RequestPayer enum value + RequestPayerRequester = "requester" +) + +const ( + // ServerSideEncryptionAes256 is a ServerSideEncryption enum value + ServerSideEncryptionAes256 = "AES256" + + // ServerSideEncryptionAwsKms is a ServerSideEncryption enum value + ServerSideEncryptionAwsKms = "aws:kms" +) + +const ( + // StorageClassStandard is a StorageClass enum value + StorageClassStandard = "STANDARD" + + // StorageClassReducedRedundancy is a StorageClass enum value + StorageClassReducedRedundancy = "REDUCED_REDUNDANCY" + + // StorageClassStandardIa is a StorageClass enum value + StorageClassStandardIa = "STANDARD_IA" +) + +const ( + // StorageClassAnalysisSchemaVersionV1 is a StorageClassAnalysisSchemaVersion enum value + StorageClassAnalysisSchemaVersionV1 = "V_1" +) + +const ( + // TaggingDirectiveCopy is a TaggingDirective enum value + TaggingDirectiveCopy = "COPY" + + // TaggingDirectiveReplace is a TaggingDirective enum value + TaggingDirectiveReplace = "REPLACE" +) + +const ( + // TierStandard is a Tier enum value + TierStandard = "Standard" + + // TierBulk is a Tier enum value + TierBulk = "Bulk" + + // TierExpedited is a Tier enum value + TierExpedited = "Expedited" +) + +const ( + // TransitionStorageClassGlacier is a TransitionStorageClass enum value + TransitionStorageClassGlacier = "GLACIER" + + // TransitionStorageClassStandardIa is a TransitionStorageClass enum value + TransitionStorageClassStandardIa = "STANDARD_IA" +) + +const ( + // TypeCanonicalUser is a Type enum value + TypeCanonicalUser = "CanonicalUser" + + // TypeAmazonCustomerByEmail is a Type enum value + TypeAmazonCustomerByEmail = "AmazonCustomerByEmail" + + // TypeGroup is a Type enum value + TypeGroup = "Group" +) diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/bucket_location.go b/vendor/github.com/aws/aws-sdk-go/service/s3/bucket_location.go new file mode 100644 index 0000000000..bc68a46acf --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/s3/bucket_location.go @@ -0,0 +1,106 @@ +package s3 + +import ( + "io/ioutil" + "regexp" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/awsutil" + "github.com/aws/aws-sdk-go/aws/request" +) + +var reBucketLocation = regexp.MustCompile(`>([^<>]+)<\/Location`) + +// NormalizeBucketLocation is a utility function which will update the +// passed in value to always be a region ID. Generally this would be used +// with GetBucketLocation API operation. +// +// Replaces empty string with "us-east-1", and "EU" with "eu-west-1". +// +// See http://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketGETlocation.html +// for more information on the values that can be returned. +func NormalizeBucketLocation(loc string) string { + switch loc { + case "": + loc = "us-east-1" + case "EU": + loc = "eu-west-1" + } + + return loc +} + +// NormalizeBucketLocationHandler is a request handler which will update the +// GetBucketLocation's result LocationConstraint value to always be a region ID. +// +// Replaces empty string with "us-east-1", and "EU" with "eu-west-1". +// +// See http://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketGETlocation.html +// for more information on the values that can be returned. +// +// req, result := svc.GetBucketLocationRequest(&s3.GetBucketLocationInput{ +// Bucket: aws.String(bucket), +// }) +// req.Handlers.Unmarshal.PushBackNamed(NormalizeBucketLocationHandler) +// err := req.Send() +var NormalizeBucketLocationHandler = request.NamedHandler{ + Name: "awssdk.s3.NormalizeBucketLocation", + Fn: func(req *request.Request) { + if req.Error != nil { + return + } + + out := req.Data.(*GetBucketLocationOutput) + loc := NormalizeBucketLocation(aws.StringValue(out.LocationConstraint)) + out.LocationConstraint = aws.String(loc) + }, +} + +// WithNormalizeBucketLocation is a request option which will update the +// GetBucketLocation's result LocationConstraint value to always be a region ID. +// +// Replaces empty string with "us-east-1", and "EU" with "eu-west-1". +// +// See http://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketGETlocation.html +// for more information on the values that can be returned. +// +// result, err := svc.GetBucketLocationWithContext(ctx, +// &s3.GetBucketLocationInput{ +// Bucket: aws.String(bucket), +// }, +// s3.WithNormalizeBucketLocation, +// ) +func WithNormalizeBucketLocation(r *request.Request) { + r.Handlers.Unmarshal.PushBackNamed(NormalizeBucketLocationHandler) +} + +func buildGetBucketLocation(r *request.Request) { + if r.DataFilled() { + out := r.Data.(*GetBucketLocationOutput) + b, err := ioutil.ReadAll(r.HTTPResponse.Body) + if err != nil { + r.Error = awserr.New("SerializationError", "failed reading response body", err) + return + } + + match := reBucketLocation.FindSubmatch(b) + if len(match) > 1 { + loc := string(match[1]) + out.LocationConstraint = aws.String(loc) + } + } +} + +func populateLocationConstraint(r *request.Request) { + if r.ParamsFilled() && aws.StringValue(r.Config.Region) != "us-east-1" { + in := r.Params.(*CreateBucketInput) + if in.CreateBucketConfiguration == nil { + r.Params = awsutil.CopyOf(r.Params) + in = r.Params.(*CreateBucketInput) + in.CreateBucketConfiguration = &CreateBucketConfiguration{ + LocationConstraint: r.Config.Region, + } + } + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/content_md5.go b/vendor/github.com/aws/aws-sdk-go/service/s3/content_md5.go new file mode 100644 index 0000000000..9fc5df94d3 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/s3/content_md5.go @@ -0,0 +1,36 @@ +package s3 + +import ( + "crypto/md5" + "encoding/base64" + "io" + + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/request" +) + +// contentMD5 computes and sets the HTTP Content-MD5 header for requests that +// require it. +func contentMD5(r *request.Request) { + h := md5.New() + + // hash the body. seek back to the first position after reading to reset + // the body for transmission. copy errors may be assumed to be from the + // body. + _, err := io.Copy(h, r.Body) + if err != nil { + r.Error = awserr.New("ContentMD5", "failed to read body", err) + return + } + _, err = r.Body.Seek(0, 0) + if err != nil { + r.Error = awserr.New("ContentMD5", "failed to seek body", err) + return + } + + // encode the md5 checksum in base64 and set the request header. + sum := h.Sum(nil) + sum64 := make([]byte, base64.StdEncoding.EncodedLen(len(sum))) + base64.StdEncoding.Encode(sum64, sum) + r.HTTPRequest.Header.Set("Content-MD5", string(sum64)) +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/customizations.go b/vendor/github.com/aws/aws-sdk-go/service/s3/customizations.go new file mode 100644 index 0000000000..8463347230 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/s3/customizations.go @@ -0,0 +1,46 @@ +package s3 + +import ( + "github.com/aws/aws-sdk-go/aws/client" + "github.com/aws/aws-sdk-go/aws/request" +) + +func init() { + initClient = defaultInitClientFn + initRequest = defaultInitRequestFn +} + +func defaultInitClientFn(c *client.Client) { + // Support building custom endpoints based on config + c.Handlers.Build.PushFront(updateEndpointForS3Config) + + // Require SSL when using SSE keys + c.Handlers.Validate.PushBack(validateSSERequiresSSL) + c.Handlers.Build.PushBack(computeSSEKeys) + + // S3 uses custom error unmarshaling logic + c.Handlers.UnmarshalError.Clear() + c.Handlers.UnmarshalError.PushBack(unmarshalError) +} + +func defaultInitRequestFn(r *request.Request) { + // Add reuest handlers for specific platforms. + // e.g. 100-continue support for PUT requests using Go 1.6 + platformRequestHandlers(r) + + switch r.Operation.Name { + case opPutBucketCors, opPutBucketLifecycle, opPutBucketPolicy, + opPutBucketTagging, opDeleteObjects, opPutBucketLifecycleConfiguration, + opPutBucketReplication: + // These S3 operations require Content-MD5 to be set + r.Handlers.Build.PushBack(contentMD5) + case opGetBucketLocation: + // GetBucketLocation has custom parsing logic + r.Handlers.Unmarshal.PushFront(buildGetBucketLocation) + case opCreateBucket: + // Auto-populate LocationConstraint with current region + r.Handlers.Validate.PushFront(populateLocationConstraint) + case opCopyObject, opUploadPartCopy, opCompleteMultipartUpload: + r.Handlers.Unmarshal.PushFront(copyMultipartStatusOKUnmarhsalError) + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/doc.go b/vendor/github.com/aws/aws-sdk-go/service/s3/doc.go new file mode 100644 index 0000000000..f045fd0db9 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/s3/doc.go @@ -0,0 +1,78 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +// Package s3 provides the client and types for making API +// requests to Amazon Simple Storage Service. +// +// See https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01 for more information on this service. +// +// See s3 package documentation for more information. +// https://docs.aws.amazon.com/sdk-for-go/api/service/s3/ +// +// Using the Client +// +// To use the client for Amazon Simple Storage Service you will first need +// to create a new instance of it. +// +// When creating a client for an AWS service you'll first need to have a Session +// already created. The Session provides configuration that can be shared +// between multiple service clients. Additional configuration can be applied to +// the Session and service's client when they are constructed. The aws package's +// Config type contains several fields such as Region for the AWS Region the +// client should make API requests too. The optional Config value can be provided +// as the variadic argument for Sessions and client creation. +// +// Once the service's client is created you can use it to make API requests the +// AWS service. These clients are safe to use concurrently. +// +// // Create a session to share configuration, and load external configuration. +// sess := session.Must(session.NewSession()) +// +// // Create the service's client with the session. +// svc := s3.New(sess) +// +// See the SDK's documentation for more information on how to use service clients. +// https://docs.aws.amazon.com/sdk-for-go/api/ +// +// See aws package's Config type for more information on configuration options. +// https://docs.aws.amazon.com/sdk-for-go/api/aws/#Config +// +// See the Amazon Simple Storage Service client S3 for more +// information on creating the service's client. +// https://docs.aws.amazon.com/sdk-for-go/api/service/s3/#New +// +// Once the client is created you can make an API request to the service. +// Each API method takes a input parameter, and returns the service response +// and an error. +// +// The API method will document which error codes the service can be returned +// by the operation if the service models the API operation's errors. These +// errors will also be available as const strings prefixed with "ErrCode". +// +// result, err := svc.AbortMultipartUpload(params) +// if err != nil { +// // Cast err to awserr.Error to handle specific error codes. +// aerr, ok := err.(awserr.Error) +// if ok && aerr.Code() == { +// // Specific error code handling +// } +// return err +// } +// +// fmt.Println("AbortMultipartUpload result:") +// fmt.Println(result) +// +// Using the Client with Context +// +// The service's client also provides methods to make API requests with a Context +// value. This allows you to control the timeout, and cancellation of pending +// requests. These methods also take request Option as variadic parameter to apply +// additional configuration to the API request. +// +// ctx := context.Background() +// +// result, err := svc.AbortMultipartUploadWithContext(ctx, params) +// +// See the request package documentation for more information on using Context pattern +// with the SDK. +// https://docs.aws.amazon.com/sdk-for-go/api/aws/request/ +package s3 diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/doc_custom.go b/vendor/github.com/aws/aws-sdk-go/service/s3/doc_custom.go new file mode 100644 index 0000000000..b794a63ba2 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/s3/doc_custom.go @@ -0,0 +1,109 @@ +// Upload Managers +// +// The s3manager package's Uploader provides concurrent upload of content to S3 +// by taking advantage of S3's Multipart APIs. The Uploader also supports both +// io.Reader for streaming uploads, and will also take advantage of io.ReadSeeker +// for optimizations if the Body satisfies that type. Once the Uploader instance +// is created you can call Upload concurrently from multiple goroutines safely. +// +// // The session the S3 Uploader will use +// sess := session.Must(session.NewSession()) +// +// // Create an uploader with the session and default options +// uploader := s3manager.NewUploader(sess) +// +// f, err := os.Open(filename) +// if err != nil { +// return fmt.Errorf("failed to open file %q, %v", filename, err) +// } +// +// // Upload the file to S3. +// result, err := uploader.Upload(&s3manager.UploadInput{ +// Bucket: aws.String(myBucket), +// Key: aws.String(myString), +// Body: f, +// }) +// if err != nil { +// return fmt.Errorf("failed to upload file, %v", err) +// } +// fmt.Printf("file uploaded to, %s\n", aws.StringValue(result.Location)) +// +// See the s3manager package's Uploader type documentation for more information. +// https://docs.aws.amazon.com/sdk-for-go/api/service/s3/s3manager/#Uploader +// +// Download Manager +// +// The s3manager package's Downloader provides concurrently downloading of Objects +// from S3. The Downloader will write S3 Object content with an io.WriterAt. +// Once the Downloader instance is created you can call Upload concurrently from +// multiple goroutines safely. +// +// // The session the S3 Downloader will use +// sess := session.Must(session.NewSession()) +// +// // Create a downloader with the session and default options +// downloader := s3manager.NewDownloader(sess) +// +// // Create a file to write the S3 Object contents to. +// f, err := os.Create(filename) +// if err != nil { +// return fmt.Errorf("failed to create file %q, %v", filename, err) +// } +// +// // Write the contents of S3 Object to the file +// n, err := downloader.Download(f, &s3.GetObjectInput{ +// Bucket: aws.String(myBucket), +// Key: aws.String(myString), +// }) +// if err != nil { +// return fmt.Errorf("failed to upload file, %v", err) +// } +// fmt.Printf("file downloaded, %d bytes\n", n) +// +// See the s3manager package's Downloader type documentation for more information. +// https://docs.aws.amazon.com/sdk-for-go/api/service/s3/s3manager/#Downloader +// +// Get Bucket Region +// +// GetBucketRegion will attempt to get the region for a bucket using a region +// hint to determine which AWS partition to perform the query on. Use this utility +// to determine the region a bucket is in. +// +// sess := session.Must(session.NewSession()) +// +// bucket := "my-bucket" +// region, err := s3manager.GetBucketRegion(ctx, sess, bucket, "us-west-2") +// if err != nil { +// if aerr, ok := err.(awserr.Error); ok && aerr.Code() == "NotFound" { +// fmt.Fprintf(os.Stderr, "unable to find bucket %s's region not found\n", bucket) +// } +// return err +// } +// fmt.Printf("Bucket %s is in %s region\n", bucket, region) +// +// See the s3manager package's GetBucketRegion function documentation for more information +// https://docs.aws.amazon.com/sdk-for-go/api/service/s3/s3manager/#GetBucketRegion +// +// S3 Crypto Client +// +// The s3crypto package provides the tools to upload and download encrypted +// content from S3. The Encryption and Decryption clients can be used concurrently +// once the client is created. +// +// sess := session.Must(session.NewSession()) +// +// // Create the decryption client. +// svc := s3crypto.NewDecryptionClient(sess) +// +// // The object will be downloaded from S3 and decrypted locally. By metadata +// // about the object's encryption will instruct the decryption client how +// // decrypt the content of the object. By default KMS is used for keys. +// result, err := svc.GetObject(&s3.GetObjectInput { +// Bucket: aws.String(myBucket), +// Key: aws.String(myKey), +// }) +// +// See the s3crypto package documentation for more information. +// https://docs.aws.amazon.com/sdk-for-go/api/service/s3/s3crypto/ +// +package s3 diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/errors.go b/vendor/github.com/aws/aws-sdk-go/service/s3/errors.go new file mode 100644 index 0000000000..931cb17bb0 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/s3/errors.go @@ -0,0 +1,48 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package s3 + +const ( + + // ErrCodeBucketAlreadyExists for service response error code + // "BucketAlreadyExists". + // + // The requested bucket name is not available. The bucket namespace is shared + // by all users of the system. Please select a different name and try again. + ErrCodeBucketAlreadyExists = "BucketAlreadyExists" + + // ErrCodeBucketAlreadyOwnedByYou for service response error code + // "BucketAlreadyOwnedByYou". + ErrCodeBucketAlreadyOwnedByYou = "BucketAlreadyOwnedByYou" + + // ErrCodeNoSuchBucket for service response error code + // "NoSuchBucket". + // + // The specified bucket does not exist. + ErrCodeNoSuchBucket = "NoSuchBucket" + + // ErrCodeNoSuchKey for service response error code + // "NoSuchKey". + // + // The specified key does not exist. + ErrCodeNoSuchKey = "NoSuchKey" + + // ErrCodeNoSuchUpload for service response error code + // "NoSuchUpload". + // + // The specified multipart upload does not exist. + ErrCodeNoSuchUpload = "NoSuchUpload" + + // ErrCodeObjectAlreadyInActiveTierError for service response error code + // "ObjectAlreadyInActiveTierError". + // + // This operation is not allowed against this storage tier + ErrCodeObjectAlreadyInActiveTierError = "ObjectAlreadyInActiveTierError" + + // ErrCodeObjectNotInActiveTierError for service response error code + // "ObjectNotInActiveTierError". + // + // The source object of the COPY operation is not in the active tier and is + // only stored in Amazon Glacier. + ErrCodeObjectNotInActiveTierError = "ObjectNotInActiveTierError" +) diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/host_style_bucket.go b/vendor/github.com/aws/aws-sdk-go/service/s3/host_style_bucket.go new file mode 100644 index 0000000000..ec3ffe4484 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/s3/host_style_bucket.go @@ -0,0 +1,162 @@ +package s3 + +import ( + "fmt" + "net/url" + "regexp" + "strings" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/awsutil" + "github.com/aws/aws-sdk-go/aws/request" +) + +// an operationBlacklist is a list of operation names that should a +// request handler should not be executed with. +type operationBlacklist []string + +// Continue will return true of the Request's operation name is not +// in the blacklist. False otherwise. +func (b operationBlacklist) Continue(r *request.Request) bool { + for i := 0; i < len(b); i++ { + if b[i] == r.Operation.Name { + return false + } + } + return true +} + +var accelerateOpBlacklist = operationBlacklist{ + opListBuckets, opCreateBucket, opDeleteBucket, +} + +// Request handler to automatically add the bucket name to the endpoint domain +// if possible. This style of bucket is valid for all bucket names which are +// DNS compatible and do not contain "." +func updateEndpointForS3Config(r *request.Request) { + forceHostStyle := aws.BoolValue(r.Config.S3ForcePathStyle) + accelerate := aws.BoolValue(r.Config.S3UseAccelerate) + + if accelerate && accelerateOpBlacklist.Continue(r) { + if forceHostStyle { + if r.Config.Logger != nil { + r.Config.Logger.Log("ERROR: aws.Config.S3UseAccelerate is not compatible with aws.Config.S3ForcePathStyle, ignoring S3ForcePathStyle.") + } + } + updateEndpointForAccelerate(r) + } else if !forceHostStyle && r.Operation.Name != opGetBucketLocation { + updateEndpointForHostStyle(r) + } +} + +func updateEndpointForHostStyle(r *request.Request) { + bucket, ok := bucketNameFromReqParams(r.Params) + if !ok { + // Ignore operation requests if the bucketname was not provided + // if this is an input validation error the validation handler + // will report it. + return + } + + if !hostCompatibleBucketName(r.HTTPRequest.URL, bucket) { + // bucket name must be valid to put into the host + return + } + + moveBucketToHost(r.HTTPRequest.URL, bucket) +} + +var ( + accelElem = []byte("s3-accelerate.dualstack.") +) + +func updateEndpointForAccelerate(r *request.Request) { + bucket, ok := bucketNameFromReqParams(r.Params) + if !ok { + // Ignore operation requests if the bucketname was not provided + // if this is an input validation error the validation handler + // will report it. + return + } + + if !hostCompatibleBucketName(r.HTTPRequest.URL, bucket) { + r.Error = awserr.New("InvalidParameterException", + fmt.Sprintf("bucket name %s is not compatible with S3 Accelerate", bucket), + nil) + return + } + + parts := strings.Split(r.HTTPRequest.URL.Host, ".") + if len(parts) < 3 { + r.Error = awserr.New("InvalidParameterExecption", + fmt.Sprintf("unable to update endpoint host for S3 accelerate, hostname invalid, %s", + r.HTTPRequest.URL.Host), nil) + return + } + + if parts[0] == "s3" || strings.HasPrefix(parts[0], "s3-") { + parts[0] = "s3-accelerate" + } + for i := 1; i+1 < len(parts); i++ { + if parts[i] == aws.StringValue(r.Config.Region) { + parts = append(parts[:i], parts[i+1:]...) + break + } + } + + r.HTTPRequest.URL.Host = strings.Join(parts, ".") + + moveBucketToHost(r.HTTPRequest.URL, bucket) +} + +// Attempts to retrieve the bucket name from the request input parameters. +// If no bucket is found, or the field is empty "", false will be returned. +func bucketNameFromReqParams(params interface{}) (string, bool) { + b, _ := awsutil.ValuesAtPath(params, "Bucket") + if len(b) == 0 { + return "", false + } + + if bucket, ok := b[0].(*string); ok { + if bucketStr := aws.StringValue(bucket); bucketStr != "" { + return bucketStr, true + } + } + + return "", false +} + +// hostCompatibleBucketName returns true if the request should +// put the bucket in the host. This is false if S3ForcePathStyle is +// explicitly set or if the bucket is not DNS compatible. +func hostCompatibleBucketName(u *url.URL, bucket string) bool { + // Bucket might be DNS compatible but dots in the hostname will fail + // certificate validation, so do not use host-style. + if u.Scheme == "https" && strings.Contains(bucket, ".") { + return false + } + + // if the bucket is DNS compatible + return dnsCompatibleBucketName(bucket) +} + +var reDomain = regexp.MustCompile(`^[a-z0-9][a-z0-9\.\-]{1,61}[a-z0-9]$`) +var reIPAddress = regexp.MustCompile(`^(\d+\.){3}\d+$`) + +// dnsCompatibleBucketName returns true if the bucket name is DNS compatible. +// Buckets created outside of the classic region MUST be DNS compatible. +func dnsCompatibleBucketName(bucket string) bool { + return reDomain.MatchString(bucket) && + !reIPAddress.MatchString(bucket) && + !strings.Contains(bucket, "..") +} + +// moveBucketToHost moves the bucket name from the URI path to URL host. +func moveBucketToHost(u *url.URL, bucket string) { + u.Host = bucket + "." + u.Host + u.Path = strings.Replace(u.Path, "/{Bucket}", "", -1) + if u.Path == "" { + u.Path = "/" + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/platform_handlers.go b/vendor/github.com/aws/aws-sdk-go/service/s3/platform_handlers.go new file mode 100644 index 0000000000..8e6f3307d4 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/s3/platform_handlers.go @@ -0,0 +1,8 @@ +// +build !go1.6 + +package s3 + +import "github.com/aws/aws-sdk-go/aws/request" + +func platformRequestHandlers(r *request.Request) { +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/platform_handlers_go1.6.go b/vendor/github.com/aws/aws-sdk-go/service/s3/platform_handlers_go1.6.go new file mode 100644 index 0000000000..14d05f7b75 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/s3/platform_handlers_go1.6.go @@ -0,0 +1,28 @@ +// +build go1.6 + +package s3 + +import ( + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/request" +) + +func platformRequestHandlers(r *request.Request) { + if r.Operation.HTTPMethod == "PUT" { + // 100-Continue should only be used on put requests. + r.Handlers.Sign.PushBack(add100Continue) + } +} + +func add100Continue(r *request.Request) { + if aws.BoolValue(r.Config.S3Disable100Continue) { + return + } + if r.HTTPRequest.ContentLength < 1024*1024*2 { + // Ignore requests smaller than 2MB. This helps prevent delaying + // requests unnecessarily. + return + } + + r.HTTPRequest.Header.Set("Expect", "100-Continue") +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/service.go b/vendor/github.com/aws/aws-sdk-go/service/s3/service.go new file mode 100644 index 0000000000..614e477d3b --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/s3/service.go @@ -0,0 +1,93 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package s3 + +import ( + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/client" + "github.com/aws/aws-sdk-go/aws/client/metadata" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/aws/signer/v4" + "github.com/aws/aws-sdk-go/private/protocol/restxml" +) + +// S3 provides the API operation methods for making requests to +// Amazon Simple Storage Service. See this package's package overview docs +// for details on the service. +// +// S3 methods are safe to use concurrently. It is not safe to +// modify mutate any of the struct's properties though. +type S3 struct { + *client.Client +} + +// Used for custom client initialization logic +var initClient func(*client.Client) + +// Used for custom request initialization logic +var initRequest func(*request.Request) + +// Service information constants +const ( + ServiceName = "s3" // Service endpoint prefix API calls made to. + EndpointsID = ServiceName // Service ID for Regions and Endpoints metadata. +) + +// New creates a new instance of the S3 client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a S3 client from just a session. +// svc := s3.New(mySession) +// +// // Create a S3 client with additional configuration +// svc := s3.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func New(p client.ConfigProvider, cfgs ...*aws.Config) *S3 { + c := p.ClientConfig(EndpointsID, cfgs...) + return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion, c.SigningName) +} + +// newClient creates, initializes and returns a new service client instance. +func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion, signingName string) *S3 { + svc := &S3{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: ServiceName, + SigningName: signingName, + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "2006-03-01", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBackNamed(v4.SignRequestHandler) + svc.Handlers.Build.PushBackNamed(restxml.BuildHandler) + svc.Handlers.Unmarshal.PushBackNamed(restxml.UnmarshalHandler) + svc.Handlers.UnmarshalMeta.PushBackNamed(restxml.UnmarshalMetaHandler) + svc.Handlers.UnmarshalError.PushBackNamed(restxml.UnmarshalErrorHandler) + + // Run custom client initialization if present + if initClient != nil { + initClient(svc.Client) + } + + return svc +} + +// newRequest creates a new request for a S3 operation and runs any +// custom request initialization. +func (c *S3) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + // Run custom request initialization if present + if initRequest != nil { + initRequest(req) + } + + return req +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/sse.go b/vendor/github.com/aws/aws-sdk-go/service/s3/sse.go new file mode 100644 index 0000000000..268ea2fb45 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/s3/sse.go @@ -0,0 +1,44 @@ +package s3 + +import ( + "crypto/md5" + "encoding/base64" + + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/awsutil" + "github.com/aws/aws-sdk-go/aws/request" +) + +var errSSERequiresSSL = awserr.New("ConfigError", "cannot send SSE keys over HTTP.", nil) + +func validateSSERequiresSSL(r *request.Request) { + if r.HTTPRequest.URL.Scheme != "https" { + p, _ := awsutil.ValuesAtPath(r.Params, "SSECustomerKey||CopySourceSSECustomerKey") + if len(p) > 0 { + r.Error = errSSERequiresSSL + } + } +} + +func computeSSEKeys(r *request.Request) { + headers := []string{ + "x-amz-server-side-encryption-customer-key", + "x-amz-copy-source-server-side-encryption-customer-key", + } + + for _, h := range headers { + md5h := h + "-md5" + if key := r.HTTPRequest.Header.Get(h); key != "" { + // Base64-encode the value + b64v := base64.StdEncoding.EncodeToString([]byte(key)) + r.HTTPRequest.Header.Set(h, b64v) + + // Add MD5 if it wasn't computed + if r.HTTPRequest.Header.Get(md5h) == "" { + sum := md5.Sum([]byte(key)) + b64sum := base64.StdEncoding.EncodeToString(sum[:]) + r.HTTPRequest.Header.Set(md5h, b64sum) + } + } + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/statusok_error.go b/vendor/github.com/aws/aws-sdk-go/service/s3/statusok_error.go new file mode 100644 index 0000000000..5a78fd3370 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/s3/statusok_error.go @@ -0,0 +1,35 @@ +package s3 + +import ( + "bytes" + "io/ioutil" + "net/http" + + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/request" +) + +func copyMultipartStatusOKUnmarhsalError(r *request.Request) { + b, err := ioutil.ReadAll(r.HTTPResponse.Body) + if err != nil { + r.Error = awserr.New("SerializationError", "unable to read response body", err) + return + } + body := bytes.NewReader(b) + r.HTTPResponse.Body = ioutil.NopCloser(body) + defer body.Seek(0, 0) + + if body.Len() == 0 { + // If there is no body don't attempt to parse the body. + return + } + + unmarshalError(r) + if err, ok := r.Error.(awserr.Error); ok && err != nil { + if err.Code() == "SerializationError" { + r.Error = nil + return + } + r.HTTPResponse.StatusCode = http.StatusServiceUnavailable + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/unmarshal_error.go b/vendor/github.com/aws/aws-sdk-go/service/s3/unmarshal_error.go new file mode 100644 index 0000000000..bcca8627af --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/s3/unmarshal_error.go @@ -0,0 +1,103 @@ +package s3 + +import ( + "encoding/xml" + "fmt" + "io" + "io/ioutil" + "net/http" + "strings" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/request" +) + +type xmlErrorResponse struct { + XMLName xml.Name `xml:"Error"` + Code string `xml:"Code"` + Message string `xml:"Message"` +} + +func unmarshalError(r *request.Request) { + defer r.HTTPResponse.Body.Close() + defer io.Copy(ioutil.Discard, r.HTTPResponse.Body) + + hostID := r.HTTPResponse.Header.Get("X-Amz-Id-2") + + // Bucket exists in a different region, and request needs + // to be made to the correct region. + if r.HTTPResponse.StatusCode == http.StatusMovedPermanently { + r.Error = requestFailure{ + RequestFailure: awserr.NewRequestFailure( + awserr.New("BucketRegionError", + fmt.Sprintf("incorrect region, the bucket is not in '%s' region", + aws.StringValue(r.Config.Region)), + nil), + r.HTTPResponse.StatusCode, + r.RequestID, + ), + hostID: hostID, + } + return + } + + var errCode, errMsg string + + // Attempt to parse error from body if it is known + resp := &xmlErrorResponse{} + err := xml.NewDecoder(r.HTTPResponse.Body).Decode(resp) + if err != nil && err != io.EOF { + errCode = "SerializationError" + errMsg = "failed to decode S3 XML error response" + } else { + errCode = resp.Code + errMsg = resp.Message + err = nil + } + + // Fallback to status code converted to message if still no error code + if len(errCode) == 0 { + statusText := http.StatusText(r.HTTPResponse.StatusCode) + errCode = strings.Replace(statusText, " ", "", -1) + errMsg = statusText + } + + r.Error = requestFailure{ + RequestFailure: awserr.NewRequestFailure( + awserr.New(errCode, errMsg, err), + r.HTTPResponse.StatusCode, + r.RequestID, + ), + hostID: hostID, + } +} + +// A RequestFailure provides access to the S3 Request ID and Host ID values +// returned from API operation errors. Getting the error as a string will +// return the formated error with the same information as awserr.RequestFailure, +// while also adding the HostID value from the response. +type RequestFailure interface { + awserr.RequestFailure + + // Host ID is the S3 Host ID needed for debug, and contacting support + HostID() string +} + +type requestFailure struct { + awserr.RequestFailure + + hostID string +} + +func (r requestFailure) Error() string { + extra := fmt.Sprintf("status code: %d, request id: %s, host id: %s", + r.StatusCode(), r.RequestID(), r.hostID) + return awserr.SprintError(r.Code(), r.Message(), extra, r.OrigErr()) +} +func (r requestFailure) String() string { + return r.Error() +} +func (r requestFailure) HostID() string { + return r.hostID +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/waiters.go b/vendor/github.com/aws/aws-sdk-go/service/s3/waiters.go new file mode 100644 index 0000000000..cccfa8c2b3 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/s3/waiters.go @@ -0,0 +1,214 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package s3 + +import ( + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/request" +) + +// WaitUntilBucketExists uses the Amazon S3 API operation +// HeadBucket to wait for a condition to be met before returning. +// If the condition is not meet within the max attempt window an error will +// be returned. +func (c *S3) WaitUntilBucketExists(input *HeadBucketInput) error { + return c.WaitUntilBucketExistsWithContext(aws.BackgroundContext(), input) +} + +// WaitUntilBucketExistsWithContext is an extended version of WaitUntilBucketExists. +// With the support for passing in a context and options to configure the +// Waiter and the underlying request options. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) WaitUntilBucketExistsWithContext(ctx aws.Context, input *HeadBucketInput, opts ...request.WaiterOption) error { + w := request.Waiter{ + Name: "WaitUntilBucketExists", + MaxAttempts: 20, + Delay: request.ConstantWaiterDelay(5 * time.Second), + Acceptors: []request.WaiterAcceptor{ + { + State: request.SuccessWaiterState, + Matcher: request.StatusWaiterMatch, + Expected: 200, + }, + { + State: request.SuccessWaiterState, + Matcher: request.StatusWaiterMatch, + Expected: 301, + }, + { + State: request.SuccessWaiterState, + Matcher: request.StatusWaiterMatch, + Expected: 403, + }, + { + State: request.RetryWaiterState, + Matcher: request.StatusWaiterMatch, + Expected: 404, + }, + }, + Logger: c.Config.Logger, + NewRequest: func(opts []request.Option) (*request.Request, error) { + var inCpy *HeadBucketInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.HeadBucketRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + w.ApplyOptions(opts...) + + return w.WaitWithContext(ctx) +} + +// WaitUntilBucketNotExists uses the Amazon S3 API operation +// HeadBucket to wait for a condition to be met before returning. +// If the condition is not meet within the max attempt window an error will +// be returned. +func (c *S3) WaitUntilBucketNotExists(input *HeadBucketInput) error { + return c.WaitUntilBucketNotExistsWithContext(aws.BackgroundContext(), input) +} + +// WaitUntilBucketNotExistsWithContext is an extended version of WaitUntilBucketNotExists. +// With the support for passing in a context and options to configure the +// Waiter and the underlying request options. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) WaitUntilBucketNotExistsWithContext(ctx aws.Context, input *HeadBucketInput, opts ...request.WaiterOption) error { + w := request.Waiter{ + Name: "WaitUntilBucketNotExists", + MaxAttempts: 20, + Delay: request.ConstantWaiterDelay(5 * time.Second), + Acceptors: []request.WaiterAcceptor{ + { + State: request.SuccessWaiterState, + Matcher: request.StatusWaiterMatch, + Expected: 404, + }, + }, + Logger: c.Config.Logger, + NewRequest: func(opts []request.Option) (*request.Request, error) { + var inCpy *HeadBucketInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.HeadBucketRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + w.ApplyOptions(opts...) + + return w.WaitWithContext(ctx) +} + +// WaitUntilObjectExists uses the Amazon S3 API operation +// HeadObject to wait for a condition to be met before returning. +// If the condition is not meet within the max attempt window an error will +// be returned. +func (c *S3) WaitUntilObjectExists(input *HeadObjectInput) error { + return c.WaitUntilObjectExistsWithContext(aws.BackgroundContext(), input) +} + +// WaitUntilObjectExistsWithContext is an extended version of WaitUntilObjectExists. +// With the support for passing in a context and options to configure the +// Waiter and the underlying request options. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) WaitUntilObjectExistsWithContext(ctx aws.Context, input *HeadObjectInput, opts ...request.WaiterOption) error { + w := request.Waiter{ + Name: "WaitUntilObjectExists", + MaxAttempts: 20, + Delay: request.ConstantWaiterDelay(5 * time.Second), + Acceptors: []request.WaiterAcceptor{ + { + State: request.SuccessWaiterState, + Matcher: request.StatusWaiterMatch, + Expected: 200, + }, + { + State: request.RetryWaiterState, + Matcher: request.StatusWaiterMatch, + Expected: 404, + }, + }, + Logger: c.Config.Logger, + NewRequest: func(opts []request.Option) (*request.Request, error) { + var inCpy *HeadObjectInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.HeadObjectRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + w.ApplyOptions(opts...) + + return w.WaitWithContext(ctx) +} + +// WaitUntilObjectNotExists uses the Amazon S3 API operation +// HeadObject to wait for a condition to be met before returning. +// If the condition is not meet within the max attempt window an error will +// be returned. +func (c *S3) WaitUntilObjectNotExists(input *HeadObjectInput) error { + return c.WaitUntilObjectNotExistsWithContext(aws.BackgroundContext(), input) +} + +// WaitUntilObjectNotExistsWithContext is an extended version of WaitUntilObjectNotExists. +// With the support for passing in a context and options to configure the +// Waiter and the underlying request options. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) WaitUntilObjectNotExistsWithContext(ctx aws.Context, input *HeadObjectInput, opts ...request.WaiterOption) error { + w := request.Waiter{ + Name: "WaitUntilObjectNotExists", + MaxAttempts: 20, + Delay: request.ConstantWaiterDelay(5 * time.Second), + Acceptors: []request.WaiterAcceptor{ + { + State: request.SuccessWaiterState, + Matcher: request.StatusWaiterMatch, + Expected: 404, + }, + }, + Logger: c.Config.Logger, + NewRequest: func(opts []request.Option) (*request.Request, error) { + var inCpy *HeadObjectInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.HeadObjectRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + w.ApplyOptions(opts...) + + return w.WaitWithContext(ctx) +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/sts/api.go b/vendor/github.com/aws/aws-sdk-go/service/sts/api.go new file mode 100644 index 0000000000..e5c105fed8 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/sts/api.go @@ -0,0 +1,2365 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package sts + +import ( + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awsutil" + "github.com/aws/aws-sdk-go/aws/request" +) + +const opAssumeRole = "AssumeRole" + +// AssumeRoleRequest generates a "aws/request.Request" representing the +// client's request for the AssumeRole operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// See AssumeRole for usage and error information. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the AssumeRole method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the AssumeRoleRequest method. +// req, resp := client.AssumeRoleRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/AssumeRole +func (c *STS) AssumeRoleRequest(input *AssumeRoleInput) (req *request.Request, output *AssumeRoleOutput) { + op := &request.Operation{ + Name: opAssumeRole, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &AssumeRoleInput{} + } + + output = &AssumeRoleOutput{} + req = c.newRequest(op, input, output) + return +} + +// AssumeRole API operation for AWS Security Token Service. +// +// Returns a set of temporary security credentials (consisting of an access +// key ID, a secret access key, and a security token) that you can use to access +// AWS resources that you might not normally have access to. Typically, you +// use AssumeRole for cross-account access or federation. For a comparison of +// AssumeRole with the other APIs that produce temporary credentials, see Requesting +// Temporary Security Credentials (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html) +// and Comparing the AWS STS APIs (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#stsapi_comparison) +// in the IAM User Guide. +// +// Important: You cannot call AssumeRole by using AWS root account credentials; +// access is denied. You must use credentials for an IAM user or an IAM role +// to call AssumeRole. +// +// For cross-account access, imagine that you own multiple accounts and need +// to access resources in each account. You could create long-term credentials +// in each account to access those resources. However, managing all those credentials +// and remembering which one can access which account can be time consuming. +// Instead, you can create one set of long-term credentials in one account and +// then use temporary security credentials to access all the other accounts +// by assuming roles in those accounts. For more information about roles, see +// IAM Roles (Delegation and Federation) (http://docs.aws.amazon.com/IAM/latest/UserGuide/roles-toplevel.html) +// in the IAM User Guide. +// +// For federation, you can, for example, grant single sign-on access to the +// AWS Management Console. If you already have an identity and authentication +// system in your corporate network, you don't have to recreate user identities +// in AWS in order to grant those user identities access to AWS. Instead, after +// a user has been authenticated, you call AssumeRole (and specify the role +// with the appropriate permissions) to get temporary security credentials for +// that user. With those temporary security credentials, you construct a sign-in +// URL that users can use to access the console. For more information, see Common +// Scenarios for Temporary Credentials (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp.html#sts-introduction) +// in the IAM User Guide. +// +// The temporary security credentials are valid for the duration that you specified +// when calling AssumeRole, which can be from 900 seconds (15 minutes) to a +// maximum of 3600 seconds (1 hour). The default is 1 hour. +// +// The temporary security credentials created by AssumeRole can be used to make +// API calls to any AWS service with the following exception: you cannot call +// the STS service's GetFederationToken or GetSessionToken APIs. +// +// Optionally, you can pass an IAM access policy to this operation. If you choose +// not to pass a policy, the temporary security credentials that are returned +// by the operation have the permissions that are defined in the access policy +// of the role that is being assumed. If you pass a policy to this operation, +// the temporary security credentials that are returned by the operation have +// the permissions that are allowed by both the access policy of the role that +// is being assumed, and the policy that you pass. This gives you a way to further +// restrict the permissions for the resulting temporary security credentials. +// You cannot use the passed policy to grant permissions that are in excess +// of those allowed by the access policy of the role that is being assumed. +// For more information, see Permissions for AssumeRole, AssumeRoleWithSAML, +// and AssumeRoleWithWebIdentity (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_control-access_assumerole.html) +// in the IAM User Guide. +// +// To assume a role, your AWS account must be trusted by the role. The trust +// relationship is defined in the role's trust policy when the role is created. +// That trust policy states which accounts are allowed to delegate access to +// this account's role. +// +// The user who wants to access the role must also have permissions delegated +// from the role's administrator. If the user is in a different account than +// the role, then the user's administrator must attach a policy that allows +// the user to call AssumeRole on the ARN of the role in the other account. +// If the user is in the same account as the role, then you can either attach +// a policy to the user (identical to the previous different account user), +// or you can add the user as a principal directly in the role's trust policy +// +// Using MFA with AssumeRole +// +// You can optionally include multi-factor authentication (MFA) information +// when you call AssumeRole. This is useful for cross-account scenarios in which +// you want to make sure that the user who is assuming the role has been authenticated +// using an AWS MFA device. In that scenario, the trust policy of the role being +// assumed includes a condition that tests for MFA authentication; if the caller +// does not include valid MFA information, the request to assume the role is +// denied. The condition in a trust policy that tests for MFA authentication +// might look like the following example. +// +// "Condition": {"Bool": {"aws:MultiFactorAuthPresent": true}} +// +// For more information, see Configuring MFA-Protected API Access (http://docs.aws.amazon.com/IAM/latest/UserGuide/MFAProtectedAPI.html) +// in the IAM User Guide guide. +// +// To use MFA with AssumeRole, you pass values for the SerialNumber and TokenCode +// parameters. The SerialNumber value identifies the user's hardware or virtual +// MFA device. The TokenCode is the time-based one-time password (TOTP) that +// the MFA devices produces. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Security Token Service's +// API operation AssumeRole for usage and error information. +// +// Returned Error Codes: +// * ErrCodeMalformedPolicyDocumentException "MalformedPolicyDocument" +// The request was rejected because the policy document was malformed. The error +// message describes the specific error. +// +// * ErrCodePackedPolicyTooLargeException "PackedPolicyTooLarge" +// The request was rejected because the policy document was too large. The error +// message describes how big the policy document is, in packed form, as a percentage +// of what the API allows. +// +// * ErrCodeRegionDisabledException "RegionDisabledException" +// STS is not activated in the requested region for the account that is being +// asked to generate credentials. The account administrator must use the IAM +// console to activate STS in that region. For more information, see Activating +// and Deactivating AWS STS in an AWS Region (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html) +// in the IAM User Guide. +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/AssumeRole +func (c *STS) AssumeRole(input *AssumeRoleInput) (*AssumeRoleOutput, error) { + req, out := c.AssumeRoleRequest(input) + return out, req.Send() +} + +// AssumeRoleWithContext is the same as AssumeRole with the addition of +// the ability to pass a context and additional request options. +// +// See AssumeRole for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *STS) AssumeRoleWithContext(ctx aws.Context, input *AssumeRoleInput, opts ...request.Option) (*AssumeRoleOutput, error) { + req, out := c.AssumeRoleRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opAssumeRoleWithSAML = "AssumeRoleWithSAML" + +// AssumeRoleWithSAMLRequest generates a "aws/request.Request" representing the +// client's request for the AssumeRoleWithSAML operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// See AssumeRoleWithSAML for usage and error information. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the AssumeRoleWithSAML method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the AssumeRoleWithSAMLRequest method. +// req, resp := client.AssumeRoleWithSAMLRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/AssumeRoleWithSAML +func (c *STS) AssumeRoleWithSAMLRequest(input *AssumeRoleWithSAMLInput) (req *request.Request, output *AssumeRoleWithSAMLOutput) { + op := &request.Operation{ + Name: opAssumeRoleWithSAML, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &AssumeRoleWithSAMLInput{} + } + + output = &AssumeRoleWithSAMLOutput{} + req = c.newRequest(op, input, output) + return +} + +// AssumeRoleWithSAML API operation for AWS Security Token Service. +// +// Returns a set of temporary security credentials for users who have been authenticated +// via a SAML authentication response. This operation provides a mechanism for +// tying an enterprise identity store or directory to role-based AWS access +// without user-specific credentials or configuration. For a comparison of AssumeRoleWithSAML +// with the other APIs that produce temporary credentials, see Requesting Temporary +// Security Credentials (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html) +// and Comparing the AWS STS APIs (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#stsapi_comparison) +// in the IAM User Guide. +// +// The temporary security credentials returned by this operation consist of +// an access key ID, a secret access key, and a security token. Applications +// can use these temporary security credentials to sign calls to AWS services. +// +// The temporary security credentials are valid for the duration that you specified +// when calling AssumeRole, or until the time specified in the SAML authentication +// response's SessionNotOnOrAfter value, whichever is shorter. The duration +// can be from 900 seconds (15 minutes) to a maximum of 3600 seconds (1 hour). +// The default is 1 hour. +// +// The temporary security credentials created by AssumeRoleWithSAML can be used +// to make API calls to any AWS service with the following exception: you cannot +// call the STS service's GetFederationToken or GetSessionToken APIs. +// +// Optionally, you can pass an IAM access policy to this operation. If you choose +// not to pass a policy, the temporary security credentials that are returned +// by the operation have the permissions that are defined in the access policy +// of the role that is being assumed. If you pass a policy to this operation, +// the temporary security credentials that are returned by the operation have +// the permissions that are allowed by the intersection of both the access policy +// of the role that is being assumed, and the policy that you pass. This means +// that both policies must grant the permission for the action to be allowed. +// This gives you a way to further restrict the permissions for the resulting +// temporary security credentials. You cannot use the passed policy to grant +// permissions that are in excess of those allowed by the access policy of the +// role that is being assumed. For more information, see Permissions for AssumeRole, +// AssumeRoleWithSAML, and AssumeRoleWithWebIdentity (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_control-access_assumerole.html) +// in the IAM User Guide. +// +// Before your application can call AssumeRoleWithSAML, you must configure your +// SAML identity provider (IdP) to issue the claims required by AWS. Additionally, +// you must use AWS Identity and Access Management (IAM) to create a SAML provider +// entity in your AWS account that represents your identity provider, and create +// an IAM role that specifies this SAML provider in its trust policy. +// +// Calling AssumeRoleWithSAML does not require the use of AWS security credentials. +// The identity of the caller is validated by using keys in the metadata document +// that is uploaded for the SAML provider entity for your identity provider. +// +// Calling AssumeRoleWithSAML can result in an entry in your AWS CloudTrail +// logs. The entry includes the value in the NameID element of the SAML assertion. +// We recommend that you use a NameIDType that is not associated with any personally +// identifiable information (PII). For example, you could instead use the Persistent +// Identifier (urn:oasis:names:tc:SAML:2.0:nameid-format:persistent). +// +// For more information, see the following resources: +// +// * About SAML 2.0-based Federation (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_saml.html) +// in the IAM User Guide. +// +// * Creating SAML Identity Providers (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_create_saml.html) +// in the IAM User Guide. +// +// * Configuring a Relying Party and Claims (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_create_saml_relying-party.html) +// in the IAM User Guide. +// +// * Creating a Role for SAML 2.0 Federation (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_create_for-idp_saml.html) +// in the IAM User Guide. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Security Token Service's +// API operation AssumeRoleWithSAML for usage and error information. +// +// Returned Error Codes: +// * ErrCodeMalformedPolicyDocumentException "MalformedPolicyDocument" +// The request was rejected because the policy document was malformed. The error +// message describes the specific error. +// +// * ErrCodePackedPolicyTooLargeException "PackedPolicyTooLarge" +// The request was rejected because the policy document was too large. The error +// message describes how big the policy document is, in packed form, as a percentage +// of what the API allows. +// +// * ErrCodeIDPRejectedClaimException "IDPRejectedClaim" +// The identity provider (IdP) reported that authentication failed. This might +// be because the claim is invalid. +// +// If this error is returned for the AssumeRoleWithWebIdentity operation, it +// can also mean that the claim has expired or has been explicitly revoked. +// +// * ErrCodeInvalidIdentityTokenException "InvalidIdentityToken" +// The web identity token that was passed could not be validated by AWS. Get +// a new identity token from the identity provider and then retry the request. +// +// * ErrCodeExpiredTokenException "ExpiredTokenException" +// The web identity token that was passed is expired or is not valid. Get a +// new identity token from the identity provider and then retry the request. +// +// * ErrCodeRegionDisabledException "RegionDisabledException" +// STS is not activated in the requested region for the account that is being +// asked to generate credentials. The account administrator must use the IAM +// console to activate STS in that region. For more information, see Activating +// and Deactivating AWS STS in an AWS Region (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html) +// in the IAM User Guide. +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/AssumeRoleWithSAML +func (c *STS) AssumeRoleWithSAML(input *AssumeRoleWithSAMLInput) (*AssumeRoleWithSAMLOutput, error) { + req, out := c.AssumeRoleWithSAMLRequest(input) + return out, req.Send() +} + +// AssumeRoleWithSAMLWithContext is the same as AssumeRoleWithSAML with the addition of +// the ability to pass a context and additional request options. +// +// See AssumeRoleWithSAML for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *STS) AssumeRoleWithSAMLWithContext(ctx aws.Context, input *AssumeRoleWithSAMLInput, opts ...request.Option) (*AssumeRoleWithSAMLOutput, error) { + req, out := c.AssumeRoleWithSAMLRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opAssumeRoleWithWebIdentity = "AssumeRoleWithWebIdentity" + +// AssumeRoleWithWebIdentityRequest generates a "aws/request.Request" representing the +// client's request for the AssumeRoleWithWebIdentity operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// See AssumeRoleWithWebIdentity for usage and error information. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the AssumeRoleWithWebIdentity method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the AssumeRoleWithWebIdentityRequest method. +// req, resp := client.AssumeRoleWithWebIdentityRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/AssumeRoleWithWebIdentity +func (c *STS) AssumeRoleWithWebIdentityRequest(input *AssumeRoleWithWebIdentityInput) (req *request.Request, output *AssumeRoleWithWebIdentityOutput) { + op := &request.Operation{ + Name: opAssumeRoleWithWebIdentity, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &AssumeRoleWithWebIdentityInput{} + } + + output = &AssumeRoleWithWebIdentityOutput{} + req = c.newRequest(op, input, output) + return +} + +// AssumeRoleWithWebIdentity API operation for AWS Security Token Service. +// +// Returns a set of temporary security credentials for users who have been authenticated +// in a mobile or web application with a web identity provider, such as Amazon +// Cognito, Login with Amazon, Facebook, Google, or any OpenID Connect-compatible +// identity provider. +// +// For mobile applications, we recommend that you use Amazon Cognito. You can +// use Amazon Cognito with the AWS SDK for iOS (http://aws.amazon.com/sdkforios/) +// and the AWS SDK for Android (http://aws.amazon.com/sdkforandroid/) to uniquely +// identify a user and supply the user with a consistent identity throughout +// the lifetime of an application. +// +// To learn more about Amazon Cognito, see Amazon Cognito Overview (http://docs.aws.amazon.com/mobile/sdkforandroid/developerguide/cognito-auth.html#d0e840) +// in the AWS SDK for Android Developer Guide guide and Amazon Cognito Overview +// (http://docs.aws.amazon.com/mobile/sdkforios/developerguide/cognito-auth.html#d0e664) +// in the AWS SDK for iOS Developer Guide. +// +// Calling AssumeRoleWithWebIdentity does not require the use of AWS security +// credentials. Therefore, you can distribute an application (for example, on +// mobile devices) that requests temporary security credentials without including +// long-term AWS credentials in the application, and without deploying server-based +// proxy services that use long-term AWS credentials. Instead, the identity +// of the caller is validated by using a token from the web identity provider. +// For a comparison of AssumeRoleWithWebIdentity with the other APIs that produce +// temporary credentials, see Requesting Temporary Security Credentials (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html) +// and Comparing the AWS STS APIs (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#stsapi_comparison) +// in the IAM User Guide. +// +// The temporary security credentials returned by this API consist of an access +// key ID, a secret access key, and a security token. Applications can use these +// temporary security credentials to sign calls to AWS service APIs. +// +// The credentials are valid for the duration that you specified when calling +// AssumeRoleWithWebIdentity, which can be from 900 seconds (15 minutes) to +// a maximum of 3600 seconds (1 hour). The default is 1 hour. +// +// The temporary security credentials created by AssumeRoleWithWebIdentity can +// be used to make API calls to any AWS service with the following exception: +// you cannot call the STS service's GetFederationToken or GetSessionToken APIs. +// +// Optionally, you can pass an IAM access policy to this operation. If you choose +// not to pass a policy, the temporary security credentials that are returned +// by the operation have the permissions that are defined in the access policy +// of the role that is being assumed. If you pass a policy to this operation, +// the temporary security credentials that are returned by the operation have +// the permissions that are allowed by both the access policy of the role that +// is being assumed, and the policy that you pass. This gives you a way to further +// restrict the permissions for the resulting temporary security credentials. +// You cannot use the passed policy to grant permissions that are in excess +// of those allowed by the access policy of the role that is being assumed. +// For more information, see Permissions for AssumeRole, AssumeRoleWithSAML, +// and AssumeRoleWithWebIdentity (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_control-access_assumerole.html) +// in the IAM User Guide. +// +// Before your application can call AssumeRoleWithWebIdentity, you must have +// an identity token from a supported identity provider and create a role that +// the application can assume. The role that your application assumes must trust +// the identity provider that is associated with the identity token. In other +// words, the identity provider must be specified in the role's trust policy. +// +// Calling AssumeRoleWithWebIdentity can result in an entry in your AWS CloudTrail +// logs. The entry includes the Subject (http://openid.net/specs/openid-connect-core-1_0.html#Claims) +// of the provided Web Identity Token. We recommend that you avoid using any +// personally identifiable information (PII) in this field. For example, you +// could instead use a GUID or a pairwise identifier, as suggested in the OIDC +// specification (http://openid.net/specs/openid-connect-core-1_0.html#SubjectIDTypes). +// +// For more information about how to use web identity federation and the AssumeRoleWithWebIdentity +// API, see the following resources: +// +// * Using Web Identity Federation APIs for Mobile Apps (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_oidc_manual.html) +// and Federation Through a Web-based Identity Provider (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#api_assumerolewithwebidentity). +// +// +// * Web Identity Federation Playground (https://web-identity-federation-playground.s3.amazonaws.com/index.html). +// This interactive website lets you walk through the process of authenticating +// via Login with Amazon, Facebook, or Google, getting temporary security +// credentials, and then using those credentials to make a request to AWS. +// +// +// * AWS SDK for iOS (http://aws.amazon.com/sdkforios/) and AWS SDK for Android +// (http://aws.amazon.com/sdkforandroid/). These toolkits contain sample +// apps that show how to invoke the identity providers, and then how to use +// the information from these providers to get and use temporary security +// credentials. +// +// * Web Identity Federation with Mobile Applications (http://aws.amazon.com/articles/4617974389850313). +// This article discusses web identity federation and shows an example of +// how to use web identity federation to get access to content in Amazon +// S3. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Security Token Service's +// API operation AssumeRoleWithWebIdentity for usage and error information. +// +// Returned Error Codes: +// * ErrCodeMalformedPolicyDocumentException "MalformedPolicyDocument" +// The request was rejected because the policy document was malformed. The error +// message describes the specific error. +// +// * ErrCodePackedPolicyTooLargeException "PackedPolicyTooLarge" +// The request was rejected because the policy document was too large. The error +// message describes how big the policy document is, in packed form, as a percentage +// of what the API allows. +// +// * ErrCodeIDPRejectedClaimException "IDPRejectedClaim" +// The identity provider (IdP) reported that authentication failed. This might +// be because the claim is invalid. +// +// If this error is returned for the AssumeRoleWithWebIdentity operation, it +// can also mean that the claim has expired or has been explicitly revoked. +// +// * ErrCodeIDPCommunicationErrorException "IDPCommunicationError" +// The request could not be fulfilled because the non-AWS identity provider +// (IDP) that was asked to verify the incoming identity token could not be reached. +// This is often a transient error caused by network conditions. Retry the request +// a limited number of times so that you don't exceed the request rate. If the +// error persists, the non-AWS identity provider might be down or not responding. +// +// * ErrCodeInvalidIdentityTokenException "InvalidIdentityToken" +// The web identity token that was passed could not be validated by AWS. Get +// a new identity token from the identity provider and then retry the request. +// +// * ErrCodeExpiredTokenException "ExpiredTokenException" +// The web identity token that was passed is expired or is not valid. Get a +// new identity token from the identity provider and then retry the request. +// +// * ErrCodeRegionDisabledException "RegionDisabledException" +// STS is not activated in the requested region for the account that is being +// asked to generate credentials. The account administrator must use the IAM +// console to activate STS in that region. For more information, see Activating +// and Deactivating AWS STS in an AWS Region (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html) +// in the IAM User Guide. +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/AssumeRoleWithWebIdentity +func (c *STS) AssumeRoleWithWebIdentity(input *AssumeRoleWithWebIdentityInput) (*AssumeRoleWithWebIdentityOutput, error) { + req, out := c.AssumeRoleWithWebIdentityRequest(input) + return out, req.Send() +} + +// AssumeRoleWithWebIdentityWithContext is the same as AssumeRoleWithWebIdentity with the addition of +// the ability to pass a context and additional request options. +// +// See AssumeRoleWithWebIdentity for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *STS) AssumeRoleWithWebIdentityWithContext(ctx aws.Context, input *AssumeRoleWithWebIdentityInput, opts ...request.Option) (*AssumeRoleWithWebIdentityOutput, error) { + req, out := c.AssumeRoleWithWebIdentityRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDecodeAuthorizationMessage = "DecodeAuthorizationMessage" + +// DecodeAuthorizationMessageRequest generates a "aws/request.Request" representing the +// client's request for the DecodeAuthorizationMessage operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// See DecodeAuthorizationMessage for usage and error information. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DecodeAuthorizationMessage method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DecodeAuthorizationMessageRequest method. +// req, resp := client.DecodeAuthorizationMessageRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/DecodeAuthorizationMessage +func (c *STS) DecodeAuthorizationMessageRequest(input *DecodeAuthorizationMessageInput) (req *request.Request, output *DecodeAuthorizationMessageOutput) { + op := &request.Operation{ + Name: opDecodeAuthorizationMessage, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DecodeAuthorizationMessageInput{} + } + + output = &DecodeAuthorizationMessageOutput{} + req = c.newRequest(op, input, output) + return +} + +// DecodeAuthorizationMessage API operation for AWS Security Token Service. +// +// Decodes additional information about the authorization status of a request +// from an encoded message returned in response to an AWS request. +// +// For example, if a user is not authorized to perform an action that he or +// she has requested, the request returns a Client.UnauthorizedOperation response +// (an HTTP 403 response). Some AWS actions additionally return an encoded message +// that can provide details about this authorization failure. +// +// Only certain AWS actions return an encoded authorization message. The documentation +// for an individual action indicates whether that action returns an encoded +// message in addition to returning an HTTP code. +// +// The message is encoded because the details of the authorization status can +// constitute privileged information that the user who requested the action +// should not see. To decode an authorization status message, a user must be +// granted permissions via an IAM policy to request the DecodeAuthorizationMessage +// (sts:DecodeAuthorizationMessage) action. +// +// The decoded message includes the following type of information: +// +// * Whether the request was denied due to an explicit deny or due to the +// absence of an explicit allow. For more information, see Determining Whether +// a Request is Allowed or Denied (http://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies_evaluation-logic.html#policy-eval-denyallow) +// in the IAM User Guide. +// +// * The principal who made the request. +// +// * The requested action. +// +// * The requested resource. +// +// * The values of condition keys in the context of the user's request. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Security Token Service's +// API operation DecodeAuthorizationMessage for usage and error information. +// +// Returned Error Codes: +// * ErrCodeInvalidAuthorizationMessageException "InvalidAuthorizationMessageException" +// The error returned if the message passed to DecodeAuthorizationMessage was +// invalid. This can happen if the token contains invalid characters, such as +// linebreaks. +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/DecodeAuthorizationMessage +func (c *STS) DecodeAuthorizationMessage(input *DecodeAuthorizationMessageInput) (*DecodeAuthorizationMessageOutput, error) { + req, out := c.DecodeAuthorizationMessageRequest(input) + return out, req.Send() +} + +// DecodeAuthorizationMessageWithContext is the same as DecodeAuthorizationMessage with the addition of +// the ability to pass a context and additional request options. +// +// See DecodeAuthorizationMessage for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *STS) DecodeAuthorizationMessageWithContext(ctx aws.Context, input *DecodeAuthorizationMessageInput, opts ...request.Option) (*DecodeAuthorizationMessageOutput, error) { + req, out := c.DecodeAuthorizationMessageRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opGetCallerIdentity = "GetCallerIdentity" + +// GetCallerIdentityRequest generates a "aws/request.Request" representing the +// client's request for the GetCallerIdentity operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// See GetCallerIdentity for usage and error information. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the GetCallerIdentity method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the GetCallerIdentityRequest method. +// req, resp := client.GetCallerIdentityRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/GetCallerIdentity +func (c *STS) GetCallerIdentityRequest(input *GetCallerIdentityInput) (req *request.Request, output *GetCallerIdentityOutput) { + op := &request.Operation{ + Name: opGetCallerIdentity, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &GetCallerIdentityInput{} + } + + output = &GetCallerIdentityOutput{} + req = c.newRequest(op, input, output) + return +} + +// GetCallerIdentity API operation for AWS Security Token Service. +// +// Returns details about the IAM identity whose credentials are used to call +// the API. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Security Token Service's +// API operation GetCallerIdentity for usage and error information. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/GetCallerIdentity +func (c *STS) GetCallerIdentity(input *GetCallerIdentityInput) (*GetCallerIdentityOutput, error) { + req, out := c.GetCallerIdentityRequest(input) + return out, req.Send() +} + +// GetCallerIdentityWithContext is the same as GetCallerIdentity with the addition of +// the ability to pass a context and additional request options. +// +// See GetCallerIdentity for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *STS) GetCallerIdentityWithContext(ctx aws.Context, input *GetCallerIdentityInput, opts ...request.Option) (*GetCallerIdentityOutput, error) { + req, out := c.GetCallerIdentityRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opGetFederationToken = "GetFederationToken" + +// GetFederationTokenRequest generates a "aws/request.Request" representing the +// client's request for the GetFederationToken operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// See GetFederationToken for usage and error information. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the GetFederationToken method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the GetFederationTokenRequest method. +// req, resp := client.GetFederationTokenRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/GetFederationToken +func (c *STS) GetFederationTokenRequest(input *GetFederationTokenInput) (req *request.Request, output *GetFederationTokenOutput) { + op := &request.Operation{ + Name: opGetFederationToken, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &GetFederationTokenInput{} + } + + output = &GetFederationTokenOutput{} + req = c.newRequest(op, input, output) + return +} + +// GetFederationToken API operation for AWS Security Token Service. +// +// Returns a set of temporary security credentials (consisting of an access +// key ID, a secret access key, and a security token) for a federated user. +// A typical use is in a proxy application that gets temporary security credentials +// on behalf of distributed applications inside a corporate network. Because +// you must call the GetFederationToken action using the long-term security +// credentials of an IAM user, this call is appropriate in contexts where those +// credentials can be safely stored, usually in a server-based application. +// For a comparison of GetFederationToken with the other APIs that produce temporary +// credentials, see Requesting Temporary Security Credentials (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html) +// and Comparing the AWS STS APIs (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#stsapi_comparison) +// in the IAM User Guide. +// +// If you are creating a mobile-based or browser-based app that can authenticate +// users using a web identity provider like Login with Amazon, Facebook, Google, +// or an OpenID Connect-compatible identity provider, we recommend that you +// use Amazon Cognito (http://aws.amazon.com/cognito/) or AssumeRoleWithWebIdentity. +// For more information, see Federation Through a Web-based Identity Provider +// (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#api_assumerolewithwebidentity). +// +// The GetFederationToken action must be called by using the long-term AWS security +// credentials of an IAM user. You can also call GetFederationToken using the +// security credentials of an AWS root account, but we do not recommended it. +// Instead, we recommend that you create an IAM user for the purpose of the +// proxy application and then attach a policy to the IAM user that limits federated +// users to only the actions and resources that they need access to. For more +// information, see IAM Best Practices (http://docs.aws.amazon.com/IAM/latest/UserGuide/best-practices.html) +// in the IAM User Guide. +// +// The temporary security credentials that are obtained by using the long-term +// credentials of an IAM user are valid for the specified duration, from 900 +// seconds (15 minutes) up to a maximium of 129600 seconds (36 hours). The default +// is 43200 seconds (12 hours). Temporary credentials that are obtained by using +// AWS root account credentials have a maximum duration of 3600 seconds (1 hour). +// +// The temporary security credentials created by GetFederationToken can be used +// to make API calls to any AWS service with the following exceptions: +// +// * You cannot use these credentials to call any IAM APIs. +// +// * You cannot call any STS APIs except GetCallerIdentity. +// +// Permissions +// +// The permissions for the temporary security credentials returned by GetFederationToken +// are determined by a combination of the following: +// +// * The policy or policies that are attached to the IAM user whose credentials +// are used to call GetFederationToken. +// +// * The policy that is passed as a parameter in the call. +// +// The passed policy is attached to the temporary security credentials that +// result from the GetFederationToken API call--that is, to the federated user. +// When the federated user makes an AWS request, AWS evaluates the policy attached +// to the federated user in combination with the policy or policies attached +// to the IAM user whose credentials were used to call GetFederationToken. AWS +// allows the federated user's request only when both the federated user and +// the IAM user are explicitly allowed to perform the requested action. The +// passed policy cannot grant more permissions than those that are defined in +// the IAM user policy. +// +// A typical use case is that the permissions of the IAM user whose credentials +// are used to call GetFederationToken are designed to allow access to all the +// actions and resources that any federated user will need. Then, for individual +// users, you pass a policy to the operation that scopes down the permissions +// to a level that's appropriate to that individual user, using a policy that +// allows only a subset of permissions that are granted to the IAM user. +// +// If you do not pass a policy, the resulting temporary security credentials +// have no effective permissions. The only exception is when the temporary security +// credentials are used to access a resource that has a resource-based policy +// that specifically allows the federated user to access the resource. +// +// For more information about how permissions work, see Permissions for GetFederationToken +// (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_control-access_getfederationtoken.html). +// For information about using GetFederationToken to create temporary security +// credentials, see GetFederationToken—Federation Through a Custom Identity +// Broker (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#api_getfederationtoken). +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Security Token Service's +// API operation GetFederationToken for usage and error information. +// +// Returned Error Codes: +// * ErrCodeMalformedPolicyDocumentException "MalformedPolicyDocument" +// The request was rejected because the policy document was malformed. The error +// message describes the specific error. +// +// * ErrCodePackedPolicyTooLargeException "PackedPolicyTooLarge" +// The request was rejected because the policy document was too large. The error +// message describes how big the policy document is, in packed form, as a percentage +// of what the API allows. +// +// * ErrCodeRegionDisabledException "RegionDisabledException" +// STS is not activated in the requested region for the account that is being +// asked to generate credentials. The account administrator must use the IAM +// console to activate STS in that region. For more information, see Activating +// and Deactivating AWS STS in an AWS Region (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html) +// in the IAM User Guide. +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/GetFederationToken +func (c *STS) GetFederationToken(input *GetFederationTokenInput) (*GetFederationTokenOutput, error) { + req, out := c.GetFederationTokenRequest(input) + return out, req.Send() +} + +// GetFederationTokenWithContext is the same as GetFederationToken with the addition of +// the ability to pass a context and additional request options. +// +// See GetFederationToken for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *STS) GetFederationTokenWithContext(ctx aws.Context, input *GetFederationTokenInput, opts ...request.Option) (*GetFederationTokenOutput, error) { + req, out := c.GetFederationTokenRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opGetSessionToken = "GetSessionToken" + +// GetSessionTokenRequest generates a "aws/request.Request" representing the +// client's request for the GetSessionToken operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// See GetSessionToken for usage and error information. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the GetSessionToken method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the GetSessionTokenRequest method. +// req, resp := client.GetSessionTokenRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/GetSessionToken +func (c *STS) GetSessionTokenRequest(input *GetSessionTokenInput) (req *request.Request, output *GetSessionTokenOutput) { + op := &request.Operation{ + Name: opGetSessionToken, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &GetSessionTokenInput{} + } + + output = &GetSessionTokenOutput{} + req = c.newRequest(op, input, output) + return +} + +// GetSessionToken API operation for AWS Security Token Service. +// +// Returns a set of temporary credentials for an AWS account or IAM user. The +// credentials consist of an access key ID, a secret access key, and a security +// token. Typically, you use GetSessionToken if you want to use MFA to protect +// programmatic calls to specific AWS APIs like Amazon EC2 StopInstances. MFA-enabled +// IAM users would need to call GetSessionToken and submit an MFA code that +// is associated with their MFA device. Using the temporary security credentials +// that are returned from the call, IAM users can then make programmatic calls +// to APIs that require MFA authentication. If you do not supply a correct MFA +// code, then the API returns an access denied error. For a comparison of GetSessionToken +// with the other APIs that produce temporary credentials, see Requesting Temporary +// Security Credentials (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html) +// and Comparing the AWS STS APIs (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#stsapi_comparison) +// in the IAM User Guide. +// +// The GetSessionToken action must be called by using the long-term AWS security +// credentials of the AWS account or an IAM user. Credentials that are created +// by IAM users are valid for the duration that you specify, from 900 seconds +// (15 minutes) up to a maximum of 129600 seconds (36 hours), with a default +// of 43200 seconds (12 hours); credentials that are created by using account +// credentials can range from 900 seconds (15 minutes) up to a maximum of 3600 +// seconds (1 hour), with a default of 1 hour. +// +// The temporary security credentials created by GetSessionToken can be used +// to make API calls to any AWS service with the following exceptions: +// +// * You cannot call any IAM APIs unless MFA authentication information is +// included in the request. +// +// * You cannot call any STS API exceptAssumeRole or GetCallerIdentity. +// +// We recommend that you do not call GetSessionToken with root account credentials. +// Instead, follow our best practices (http://docs.aws.amazon.com/IAM/latest/UserGuide/best-practices.html#create-iam-users) +// by creating one or more IAM users, giving them the necessary permissions, +// and using IAM users for everyday interaction with AWS. +// +// The permissions associated with the temporary security credentials returned +// by GetSessionToken are based on the permissions associated with account or +// IAM user whose credentials are used to call the action. If GetSessionToken +// is called using root account credentials, the temporary credentials have +// root account permissions. Similarly, if GetSessionToken is called using the +// credentials of an IAM user, the temporary credentials have the same permissions +// as the IAM user. +// +// For more information about using GetSessionToken to create temporary credentials, +// go to Temporary Credentials for Users in Untrusted Environments (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#api_getsessiontoken) +// in the IAM User Guide. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Security Token Service's +// API operation GetSessionToken for usage and error information. +// +// Returned Error Codes: +// * ErrCodeRegionDisabledException "RegionDisabledException" +// STS is not activated in the requested region for the account that is being +// asked to generate credentials. The account administrator must use the IAM +// console to activate STS in that region. For more information, see Activating +// and Deactivating AWS STS in an AWS Region (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html) +// in the IAM User Guide. +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/GetSessionToken +func (c *STS) GetSessionToken(input *GetSessionTokenInput) (*GetSessionTokenOutput, error) { + req, out := c.GetSessionTokenRequest(input) + return out, req.Send() +} + +// GetSessionTokenWithContext is the same as GetSessionToken with the addition of +// the ability to pass a context and additional request options. +// +// See GetSessionToken for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *STS) GetSessionTokenWithContext(ctx aws.Context, input *GetSessionTokenInput, opts ...request.Option) (*GetSessionTokenOutput, error) { + req, out := c.GetSessionTokenRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/AssumeRoleRequest +type AssumeRoleInput struct { + _ struct{} `type:"structure"` + + // The duration, in seconds, of the role session. The value can range from 900 + // seconds (15 minutes) to 3600 seconds (1 hour). By default, the value is set + // to 3600 seconds. + // + // This is separate from the duration of a console session that you might request + // using the returned credentials. The request to the federation endpoint for + // a console sign-in token takes a SessionDuration parameter that specifies + // the maximum length of the console session, separately from the DurationSeconds + // parameter on this API. For more information, see Creating a URL that Enables + // Federated Users to Access the AWS Management Console (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_enable-console-custom-url.html) + // in the IAM User Guide. + DurationSeconds *int64 `min:"900" type:"integer"` + + // A unique identifier that is used by third parties when assuming roles in + // their customers' accounts. For each role that the third party can assume, + // they should instruct their customers to ensure the role's trust policy checks + // for the external ID that the third party generated. Each time the third party + // assumes the role, they should pass the customer's external ID. The external + // ID is useful in order to help third parties bind a role to the customer who + // created it. For more information about the external ID, see How to Use an + // External ID When Granting Access to Your AWS Resources to a Third Party (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_create_for-user_externalid.html) + // in the IAM User Guide. + // + // The regex used to validated this parameter is a string of characters consisting + // of upper- and lower-case alphanumeric characters with no spaces. You can + // also include underscores or any of the following characters: =,.@:/- + ExternalId *string `min:"2" type:"string"` + + // An IAM policy in JSON format. + // + // This parameter is optional. If you pass a policy, the temporary security + // credentials that are returned by the operation have the permissions that + // are allowed by both (the intersection of) the access policy of the role that + // is being assumed, and the policy that you pass. This gives you a way to further + // restrict the permissions for the resulting temporary security credentials. + // You cannot use the passed policy to grant permissions that are in excess + // of those allowed by the access policy of the role that is being assumed. + // For more information, see Permissions for AssumeRole, AssumeRoleWithSAML, + // and AssumeRoleWithWebIdentity (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_control-access_assumerole.html) + // in the IAM User Guide. + // + // The format for this parameter, as described by its regex pattern, is a string + // of characters up to 2048 characters in length. The characters can be any + // ASCII character from the space character to the end of the valid character + // list (\u0020-\u00FF). It can also include the tab (\u0009), linefeed (\u000A), + // and carriage return (\u000D) characters. + // + // The policy plain text must be 2048 bytes or shorter. However, an internal + // conversion compresses it into a packed binary format with a separate limit. + // The PackedPolicySize response element indicates by percentage how close to + // the upper size limit the policy is, with 100% equaling the maximum allowed + // size. + Policy *string `min:"1" type:"string"` + + // The Amazon Resource Name (ARN) of the role to assume. + // + // RoleArn is a required field + RoleArn *string `min:"20" type:"string" required:"true"` + + // An identifier for the assumed role session. + // + // Use the role session name to uniquely identify a session when the same role + // is assumed by different principals or for different reasons. In cross-account + // scenarios, the role session name is visible to, and can be logged by the + // account that owns the role. The role session name is also used in the ARN + // of the assumed role principal. This means that subsequent cross-account API + // requests using the temporary security credentials will expose the role session + // name to the external account in their CloudTrail logs. + // + // The regex used to validate this parameter is a string of characters consisting + // of upper- and lower-case alphanumeric characters with no spaces. You can + // also include underscores or any of the following characters: =,.@- + // + // RoleSessionName is a required field + RoleSessionName *string `min:"2" type:"string" required:"true"` + + // The identification number of the MFA device that is associated with the user + // who is making the AssumeRole call. Specify this value if the trust policy + // of the role being assumed includes a condition that requires MFA authentication. + // The value is either the serial number for a hardware device (such as GAHT12345678) + // or an Amazon Resource Name (ARN) for a virtual device (such as arn:aws:iam::123456789012:mfa/user). + // + // The regex used to validate this parameter is a string of characters consisting + // of upper- and lower-case alphanumeric characters with no spaces. You can + // also include underscores or any of the following characters: =,.@- + SerialNumber *string `min:"9" type:"string"` + + // The value provided by the MFA device, if the trust policy of the role being + // assumed requires MFA (that is, if the policy includes a condition that tests + // for MFA). If the role being assumed requires MFA and if the TokenCode value + // is missing or expired, the AssumeRole call returns an "access denied" error. + // + // The format for this parameter, as described by its regex pattern, is a sequence + // of six numeric digits. + TokenCode *string `min:"6" type:"string"` +} + +// String returns the string representation +func (s AssumeRoleInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AssumeRoleInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *AssumeRoleInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "AssumeRoleInput"} + if s.DurationSeconds != nil && *s.DurationSeconds < 900 { + invalidParams.Add(request.NewErrParamMinValue("DurationSeconds", 900)) + } + if s.ExternalId != nil && len(*s.ExternalId) < 2 { + invalidParams.Add(request.NewErrParamMinLen("ExternalId", 2)) + } + if s.Policy != nil && len(*s.Policy) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Policy", 1)) + } + if s.RoleArn == nil { + invalidParams.Add(request.NewErrParamRequired("RoleArn")) + } + if s.RoleArn != nil && len(*s.RoleArn) < 20 { + invalidParams.Add(request.NewErrParamMinLen("RoleArn", 20)) + } + if s.RoleSessionName == nil { + invalidParams.Add(request.NewErrParamRequired("RoleSessionName")) + } + if s.RoleSessionName != nil && len(*s.RoleSessionName) < 2 { + invalidParams.Add(request.NewErrParamMinLen("RoleSessionName", 2)) + } + if s.SerialNumber != nil && len(*s.SerialNumber) < 9 { + invalidParams.Add(request.NewErrParamMinLen("SerialNumber", 9)) + } + if s.TokenCode != nil && len(*s.TokenCode) < 6 { + invalidParams.Add(request.NewErrParamMinLen("TokenCode", 6)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDurationSeconds sets the DurationSeconds field's value. +func (s *AssumeRoleInput) SetDurationSeconds(v int64) *AssumeRoleInput { + s.DurationSeconds = &v + return s +} + +// SetExternalId sets the ExternalId field's value. +func (s *AssumeRoleInput) SetExternalId(v string) *AssumeRoleInput { + s.ExternalId = &v + return s +} + +// SetPolicy sets the Policy field's value. +func (s *AssumeRoleInput) SetPolicy(v string) *AssumeRoleInput { + s.Policy = &v + return s +} + +// SetRoleArn sets the RoleArn field's value. +func (s *AssumeRoleInput) SetRoleArn(v string) *AssumeRoleInput { + s.RoleArn = &v + return s +} + +// SetRoleSessionName sets the RoleSessionName field's value. +func (s *AssumeRoleInput) SetRoleSessionName(v string) *AssumeRoleInput { + s.RoleSessionName = &v + return s +} + +// SetSerialNumber sets the SerialNumber field's value. +func (s *AssumeRoleInput) SetSerialNumber(v string) *AssumeRoleInput { + s.SerialNumber = &v + return s +} + +// SetTokenCode sets the TokenCode field's value. +func (s *AssumeRoleInput) SetTokenCode(v string) *AssumeRoleInput { + s.TokenCode = &v + return s +} + +// Contains the response to a successful AssumeRole request, including temporary +// AWS credentials that can be used to make AWS requests. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/AssumeRoleResponse +type AssumeRoleOutput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) and the assumed role ID, which are identifiers + // that you can use to refer to the resulting temporary security credentials. + // For example, you can reference these credentials as a principal in a resource-based + // policy by using the ARN or assumed role ID. The ARN and ID include the RoleSessionName + // that you specified when you called AssumeRole. + AssumedRoleUser *AssumedRoleUser `type:"structure"` + + // The temporary security credentials, which include an access key ID, a secret + // access key, and a security (or session) token. + // + // Note: The size of the security token that STS APIs return is not fixed. We + // strongly recommend that you make no assumptions about the maximum size. As + // of this writing, the typical size is less than 4096 bytes, but that can vary. + // Also, future updates to AWS might require larger sizes. + Credentials *Credentials `type:"structure"` + + // A percentage value that indicates the size of the policy in packed form. + // The service rejects any policy with a packed size greater than 100 percent, + // which means the policy exceeded the allowed space. + PackedPolicySize *int64 `type:"integer"` +} + +// String returns the string representation +func (s AssumeRoleOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AssumeRoleOutput) GoString() string { + return s.String() +} + +// SetAssumedRoleUser sets the AssumedRoleUser field's value. +func (s *AssumeRoleOutput) SetAssumedRoleUser(v *AssumedRoleUser) *AssumeRoleOutput { + s.AssumedRoleUser = v + return s +} + +// SetCredentials sets the Credentials field's value. +func (s *AssumeRoleOutput) SetCredentials(v *Credentials) *AssumeRoleOutput { + s.Credentials = v + return s +} + +// SetPackedPolicySize sets the PackedPolicySize field's value. +func (s *AssumeRoleOutput) SetPackedPolicySize(v int64) *AssumeRoleOutput { + s.PackedPolicySize = &v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/AssumeRoleWithSAMLRequest +type AssumeRoleWithSAMLInput struct { + _ struct{} `type:"structure"` + + // The duration, in seconds, of the role session. The value can range from 900 + // seconds (15 minutes) to 3600 seconds (1 hour). By default, the value is set + // to 3600 seconds. An expiration can also be specified in the SAML authentication + // response's SessionNotOnOrAfter value. The actual expiration time is whichever + // value is shorter. + // + // This is separate from the duration of a console session that you might request + // using the returned credentials. The request to the federation endpoint for + // a console sign-in token takes a SessionDuration parameter that specifies + // the maximum length of the console session, separately from the DurationSeconds + // parameter on this API. For more information, see Enabling SAML 2.0 Federated + // Users to Access the AWS Management Console (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_enable-console-saml.html) + // in the IAM User Guide. + DurationSeconds *int64 `min:"900" type:"integer"` + + // An IAM policy in JSON format. + // + // The policy parameter is optional. If you pass a policy, the temporary security + // credentials that are returned by the operation have the permissions that + // are allowed by both the access policy of the role that is being assumed, + // and the policy that you pass. This gives you a way to further restrict the + // permissions for the resulting temporary security credentials. You cannot + // use the passed policy to grant permissions that are in excess of those allowed + // by the access policy of the role that is being assumed. For more information, + // Permissions for AssumeRole, AssumeRoleWithSAML, and AssumeRoleWithWebIdentity + // (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_control-access_assumerole.html) + // in the IAM User Guide. + // + // The format for this parameter, as described by its regex pattern, is a string + // of characters up to 2048 characters in length. The characters can be any + // ASCII character from the space character to the end of the valid character + // list (\u0020-\u00FF). It can also include the tab (\u0009), linefeed (\u000A), + // and carriage return (\u000D) characters. + // + // The policy plain text must be 2048 bytes or shorter. However, an internal + // conversion compresses it into a packed binary format with a separate limit. + // The PackedPolicySize response element indicates by percentage how close to + // the upper size limit the policy is, with 100% equaling the maximum allowed + // size. + Policy *string `min:"1" type:"string"` + + // The Amazon Resource Name (ARN) of the SAML provider in IAM that describes + // the IdP. + // + // PrincipalArn is a required field + PrincipalArn *string `min:"20" type:"string" required:"true"` + + // The Amazon Resource Name (ARN) of the role that the caller is assuming. + // + // RoleArn is a required field + RoleArn *string `min:"20" type:"string" required:"true"` + + // The base-64 encoded SAML authentication response provided by the IdP. + // + // For more information, see Configuring a Relying Party and Adding Claims (http://docs.aws.amazon.com/IAM/latest/UserGuide/create-role-saml-IdP-tasks.html) + // in the Using IAM guide. + // + // SAMLAssertion is a required field + SAMLAssertion *string `min:"4" type:"string" required:"true"` +} + +// String returns the string representation +func (s AssumeRoleWithSAMLInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AssumeRoleWithSAMLInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *AssumeRoleWithSAMLInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "AssumeRoleWithSAMLInput"} + if s.DurationSeconds != nil && *s.DurationSeconds < 900 { + invalidParams.Add(request.NewErrParamMinValue("DurationSeconds", 900)) + } + if s.Policy != nil && len(*s.Policy) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Policy", 1)) + } + if s.PrincipalArn == nil { + invalidParams.Add(request.NewErrParamRequired("PrincipalArn")) + } + if s.PrincipalArn != nil && len(*s.PrincipalArn) < 20 { + invalidParams.Add(request.NewErrParamMinLen("PrincipalArn", 20)) + } + if s.RoleArn == nil { + invalidParams.Add(request.NewErrParamRequired("RoleArn")) + } + if s.RoleArn != nil && len(*s.RoleArn) < 20 { + invalidParams.Add(request.NewErrParamMinLen("RoleArn", 20)) + } + if s.SAMLAssertion == nil { + invalidParams.Add(request.NewErrParamRequired("SAMLAssertion")) + } + if s.SAMLAssertion != nil && len(*s.SAMLAssertion) < 4 { + invalidParams.Add(request.NewErrParamMinLen("SAMLAssertion", 4)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDurationSeconds sets the DurationSeconds field's value. +func (s *AssumeRoleWithSAMLInput) SetDurationSeconds(v int64) *AssumeRoleWithSAMLInput { + s.DurationSeconds = &v + return s +} + +// SetPolicy sets the Policy field's value. +func (s *AssumeRoleWithSAMLInput) SetPolicy(v string) *AssumeRoleWithSAMLInput { + s.Policy = &v + return s +} + +// SetPrincipalArn sets the PrincipalArn field's value. +func (s *AssumeRoleWithSAMLInput) SetPrincipalArn(v string) *AssumeRoleWithSAMLInput { + s.PrincipalArn = &v + return s +} + +// SetRoleArn sets the RoleArn field's value. +func (s *AssumeRoleWithSAMLInput) SetRoleArn(v string) *AssumeRoleWithSAMLInput { + s.RoleArn = &v + return s +} + +// SetSAMLAssertion sets the SAMLAssertion field's value. +func (s *AssumeRoleWithSAMLInput) SetSAMLAssertion(v string) *AssumeRoleWithSAMLInput { + s.SAMLAssertion = &v + return s +} + +// Contains the response to a successful AssumeRoleWithSAML request, including +// temporary AWS credentials that can be used to make AWS requests. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/AssumeRoleWithSAMLResponse +type AssumeRoleWithSAMLOutput struct { + _ struct{} `type:"structure"` + + // The identifiers for the temporary security credentials that the operation + // returns. + AssumedRoleUser *AssumedRoleUser `type:"structure"` + + // The value of the Recipient attribute of the SubjectConfirmationData element + // of the SAML assertion. + Audience *string `type:"string"` + + // The temporary security credentials, which include an access key ID, a secret + // access key, and a security (or session) token. + // + // Note: The size of the security token that STS APIs return is not fixed. We + // strongly recommend that you make no assumptions about the maximum size. As + // of this writing, the typical size is less than 4096 bytes, but that can vary. + // Also, future updates to AWS might require larger sizes. + Credentials *Credentials `type:"structure"` + + // The value of the Issuer element of the SAML assertion. + Issuer *string `type:"string"` + + // A hash value based on the concatenation of the Issuer response value, the + // AWS account ID, and the friendly name (the last part of the ARN) of the SAML + // provider in IAM. The combination of NameQualifier and Subject can be used + // to uniquely identify a federated user. + // + // The following pseudocode shows how the hash value is calculated: + // + // BASE64 ( SHA1 ( "https://example.com/saml" + "123456789012" + "/MySAMLIdP" + // ) ) + NameQualifier *string `type:"string"` + + // A percentage value that indicates the size of the policy in packed form. + // The service rejects any policy with a packed size greater than 100 percent, + // which means the policy exceeded the allowed space. + PackedPolicySize *int64 `type:"integer"` + + // The value of the NameID element in the Subject element of the SAML assertion. + Subject *string `type:"string"` + + // The format of the name ID, as defined by the Format attribute in the NameID + // element of the SAML assertion. Typical examples of the format are transient + // or persistent. + // + // If the format includes the prefix urn:oasis:names:tc:SAML:2.0:nameid-format, + // that prefix is removed. For example, urn:oasis:names:tc:SAML:2.0:nameid-format:transient + // is returned as transient. If the format includes any other prefix, the format + // is returned with no modifications. + SubjectType *string `type:"string"` +} + +// String returns the string representation +func (s AssumeRoleWithSAMLOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AssumeRoleWithSAMLOutput) GoString() string { + return s.String() +} + +// SetAssumedRoleUser sets the AssumedRoleUser field's value. +func (s *AssumeRoleWithSAMLOutput) SetAssumedRoleUser(v *AssumedRoleUser) *AssumeRoleWithSAMLOutput { + s.AssumedRoleUser = v + return s +} + +// SetAudience sets the Audience field's value. +func (s *AssumeRoleWithSAMLOutput) SetAudience(v string) *AssumeRoleWithSAMLOutput { + s.Audience = &v + return s +} + +// SetCredentials sets the Credentials field's value. +func (s *AssumeRoleWithSAMLOutput) SetCredentials(v *Credentials) *AssumeRoleWithSAMLOutput { + s.Credentials = v + return s +} + +// SetIssuer sets the Issuer field's value. +func (s *AssumeRoleWithSAMLOutput) SetIssuer(v string) *AssumeRoleWithSAMLOutput { + s.Issuer = &v + return s +} + +// SetNameQualifier sets the NameQualifier field's value. +func (s *AssumeRoleWithSAMLOutput) SetNameQualifier(v string) *AssumeRoleWithSAMLOutput { + s.NameQualifier = &v + return s +} + +// SetPackedPolicySize sets the PackedPolicySize field's value. +func (s *AssumeRoleWithSAMLOutput) SetPackedPolicySize(v int64) *AssumeRoleWithSAMLOutput { + s.PackedPolicySize = &v + return s +} + +// SetSubject sets the Subject field's value. +func (s *AssumeRoleWithSAMLOutput) SetSubject(v string) *AssumeRoleWithSAMLOutput { + s.Subject = &v + return s +} + +// SetSubjectType sets the SubjectType field's value. +func (s *AssumeRoleWithSAMLOutput) SetSubjectType(v string) *AssumeRoleWithSAMLOutput { + s.SubjectType = &v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/AssumeRoleWithWebIdentityRequest +type AssumeRoleWithWebIdentityInput struct { + _ struct{} `type:"structure"` + + // The duration, in seconds, of the role session. The value can range from 900 + // seconds (15 minutes) to 3600 seconds (1 hour). By default, the value is set + // to 3600 seconds. + // + // This is separate from the duration of a console session that you might request + // using the returned credentials. The request to the federation endpoint for + // a console sign-in token takes a SessionDuration parameter that specifies + // the maximum length of the console session, separately from the DurationSeconds + // parameter on this API. For more information, see Creating a URL that Enables + // Federated Users to Access the AWS Management Console (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_enable-console-custom-url.html) + // in the IAM User Guide. + DurationSeconds *int64 `min:"900" type:"integer"` + + // An IAM policy in JSON format. + // + // The policy parameter is optional. If you pass a policy, the temporary security + // credentials that are returned by the operation have the permissions that + // are allowed by both the access policy of the role that is being assumed, + // and the policy that you pass. This gives you a way to further restrict the + // permissions for the resulting temporary security credentials. You cannot + // use the passed policy to grant permissions that are in excess of those allowed + // by the access policy of the role that is being assumed. For more information, + // see Permissions for AssumeRoleWithWebIdentity (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_control-access_assumerole.html) + // in the IAM User Guide. + // + // The format for this parameter, as described by its regex pattern, is a string + // of characters up to 2048 characters in length. The characters can be any + // ASCII character from the space character to the end of the valid character + // list (\u0020-\u00FF). It can also include the tab (\u0009), linefeed (\u000A), + // and carriage return (\u000D) characters. + // + // The policy plain text must be 2048 bytes or shorter. However, an internal + // conversion compresses it into a packed binary format with a separate limit. + // The PackedPolicySize response element indicates by percentage how close to + // the upper size limit the policy is, with 100% equaling the maximum allowed + // size. + Policy *string `min:"1" type:"string"` + + // The fully qualified host component of the domain name of the identity provider. + // + // Specify this value only for OAuth 2.0 access tokens. Currently www.amazon.com + // and graph.facebook.com are the only supported identity providers for OAuth + // 2.0 access tokens. Do not include URL schemes and port numbers. + // + // Do not specify this value for OpenID Connect ID tokens. + ProviderId *string `min:"4" type:"string"` + + // The Amazon Resource Name (ARN) of the role that the caller is assuming. + // + // RoleArn is a required field + RoleArn *string `min:"20" type:"string" required:"true"` + + // An identifier for the assumed role session. Typically, you pass the name + // or identifier that is associated with the user who is using your application. + // That way, the temporary security credentials that your application will use + // are associated with that user. This session name is included as part of the + // ARN and assumed role ID in the AssumedRoleUser response element. + // + // The regex used to validate this parameter is a string of characters consisting + // of upper- and lower-case alphanumeric characters with no spaces. You can + // also include underscores or any of the following characters: =,.@- + // + // RoleSessionName is a required field + RoleSessionName *string `min:"2" type:"string" required:"true"` + + // The OAuth 2.0 access token or OpenID Connect ID token that is provided by + // the identity provider. Your application must get this token by authenticating + // the user who is using your application with a web identity provider before + // the application makes an AssumeRoleWithWebIdentity call. + // + // WebIdentityToken is a required field + WebIdentityToken *string `min:"4" type:"string" required:"true"` +} + +// String returns the string representation +func (s AssumeRoleWithWebIdentityInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AssumeRoleWithWebIdentityInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *AssumeRoleWithWebIdentityInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "AssumeRoleWithWebIdentityInput"} + if s.DurationSeconds != nil && *s.DurationSeconds < 900 { + invalidParams.Add(request.NewErrParamMinValue("DurationSeconds", 900)) + } + if s.Policy != nil && len(*s.Policy) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Policy", 1)) + } + if s.ProviderId != nil && len(*s.ProviderId) < 4 { + invalidParams.Add(request.NewErrParamMinLen("ProviderId", 4)) + } + if s.RoleArn == nil { + invalidParams.Add(request.NewErrParamRequired("RoleArn")) + } + if s.RoleArn != nil && len(*s.RoleArn) < 20 { + invalidParams.Add(request.NewErrParamMinLen("RoleArn", 20)) + } + if s.RoleSessionName == nil { + invalidParams.Add(request.NewErrParamRequired("RoleSessionName")) + } + if s.RoleSessionName != nil && len(*s.RoleSessionName) < 2 { + invalidParams.Add(request.NewErrParamMinLen("RoleSessionName", 2)) + } + if s.WebIdentityToken == nil { + invalidParams.Add(request.NewErrParamRequired("WebIdentityToken")) + } + if s.WebIdentityToken != nil && len(*s.WebIdentityToken) < 4 { + invalidParams.Add(request.NewErrParamMinLen("WebIdentityToken", 4)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDurationSeconds sets the DurationSeconds field's value. +func (s *AssumeRoleWithWebIdentityInput) SetDurationSeconds(v int64) *AssumeRoleWithWebIdentityInput { + s.DurationSeconds = &v + return s +} + +// SetPolicy sets the Policy field's value. +func (s *AssumeRoleWithWebIdentityInput) SetPolicy(v string) *AssumeRoleWithWebIdentityInput { + s.Policy = &v + return s +} + +// SetProviderId sets the ProviderId field's value. +func (s *AssumeRoleWithWebIdentityInput) SetProviderId(v string) *AssumeRoleWithWebIdentityInput { + s.ProviderId = &v + return s +} + +// SetRoleArn sets the RoleArn field's value. +func (s *AssumeRoleWithWebIdentityInput) SetRoleArn(v string) *AssumeRoleWithWebIdentityInput { + s.RoleArn = &v + return s +} + +// SetRoleSessionName sets the RoleSessionName field's value. +func (s *AssumeRoleWithWebIdentityInput) SetRoleSessionName(v string) *AssumeRoleWithWebIdentityInput { + s.RoleSessionName = &v + return s +} + +// SetWebIdentityToken sets the WebIdentityToken field's value. +func (s *AssumeRoleWithWebIdentityInput) SetWebIdentityToken(v string) *AssumeRoleWithWebIdentityInput { + s.WebIdentityToken = &v + return s +} + +// Contains the response to a successful AssumeRoleWithWebIdentity request, +// including temporary AWS credentials that can be used to make AWS requests. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/AssumeRoleWithWebIdentityResponse +type AssumeRoleWithWebIdentityOutput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) and the assumed role ID, which are identifiers + // that you can use to refer to the resulting temporary security credentials. + // For example, you can reference these credentials as a principal in a resource-based + // policy by using the ARN or assumed role ID. The ARN and ID include the RoleSessionName + // that you specified when you called AssumeRole. + AssumedRoleUser *AssumedRoleUser `type:"structure"` + + // The intended audience (also known as client ID) of the web identity token. + // This is traditionally the client identifier issued to the application that + // requested the web identity token. + Audience *string `type:"string"` + + // The temporary security credentials, which include an access key ID, a secret + // access key, and a security token. + // + // Note: The size of the security token that STS APIs return is not fixed. We + // strongly recommend that you make no assumptions about the maximum size. As + // of this writing, the typical size is less than 4096 bytes, but that can vary. + // Also, future updates to AWS might require larger sizes. + Credentials *Credentials `type:"structure"` + + // A percentage value that indicates the size of the policy in packed form. + // The service rejects any policy with a packed size greater than 100 percent, + // which means the policy exceeded the allowed space. + PackedPolicySize *int64 `type:"integer"` + + // The issuing authority of the web identity token presented. For OpenID Connect + // ID Tokens this contains the value of the iss field. For OAuth 2.0 access + // tokens, this contains the value of the ProviderId parameter that was passed + // in the AssumeRoleWithWebIdentity request. + Provider *string `type:"string"` + + // The unique user identifier that is returned by the identity provider. This + // identifier is associated with the WebIdentityToken that was submitted with + // the AssumeRoleWithWebIdentity call. The identifier is typically unique to + // the user and the application that acquired the WebIdentityToken (pairwise + // identifier). For OpenID Connect ID tokens, this field contains the value + // returned by the identity provider as the token's sub (Subject) claim. + SubjectFromWebIdentityToken *string `min:"6" type:"string"` +} + +// String returns the string representation +func (s AssumeRoleWithWebIdentityOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AssumeRoleWithWebIdentityOutput) GoString() string { + return s.String() +} + +// SetAssumedRoleUser sets the AssumedRoleUser field's value. +func (s *AssumeRoleWithWebIdentityOutput) SetAssumedRoleUser(v *AssumedRoleUser) *AssumeRoleWithWebIdentityOutput { + s.AssumedRoleUser = v + return s +} + +// SetAudience sets the Audience field's value. +func (s *AssumeRoleWithWebIdentityOutput) SetAudience(v string) *AssumeRoleWithWebIdentityOutput { + s.Audience = &v + return s +} + +// SetCredentials sets the Credentials field's value. +func (s *AssumeRoleWithWebIdentityOutput) SetCredentials(v *Credentials) *AssumeRoleWithWebIdentityOutput { + s.Credentials = v + return s +} + +// SetPackedPolicySize sets the PackedPolicySize field's value. +func (s *AssumeRoleWithWebIdentityOutput) SetPackedPolicySize(v int64) *AssumeRoleWithWebIdentityOutput { + s.PackedPolicySize = &v + return s +} + +// SetProvider sets the Provider field's value. +func (s *AssumeRoleWithWebIdentityOutput) SetProvider(v string) *AssumeRoleWithWebIdentityOutput { + s.Provider = &v + return s +} + +// SetSubjectFromWebIdentityToken sets the SubjectFromWebIdentityToken field's value. +func (s *AssumeRoleWithWebIdentityOutput) SetSubjectFromWebIdentityToken(v string) *AssumeRoleWithWebIdentityOutput { + s.SubjectFromWebIdentityToken = &v + return s +} + +// The identifiers for the temporary security credentials that the operation +// returns. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/AssumedRoleUser +type AssumedRoleUser struct { + _ struct{} `type:"structure"` + + // The ARN of the temporary security credentials that are returned from the + // AssumeRole action. For more information about ARNs and how to use them in + // policies, see IAM Identifiers (http://docs.aws.amazon.com/IAM/latest/UserGuide/reference_identifiers.html) + // in Using IAM. + // + // Arn is a required field + Arn *string `min:"20" type:"string" required:"true"` + + // A unique identifier that contains the role ID and the role session name of + // the role that is being assumed. The role ID is generated by AWS when the + // role is created. + // + // AssumedRoleId is a required field + AssumedRoleId *string `min:"2" type:"string" required:"true"` +} + +// String returns the string representation +func (s AssumedRoleUser) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AssumedRoleUser) GoString() string { + return s.String() +} + +// SetArn sets the Arn field's value. +func (s *AssumedRoleUser) SetArn(v string) *AssumedRoleUser { + s.Arn = &v + return s +} + +// SetAssumedRoleId sets the AssumedRoleId field's value. +func (s *AssumedRoleUser) SetAssumedRoleId(v string) *AssumedRoleUser { + s.AssumedRoleId = &v + return s +} + +// AWS credentials for API authentication. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/Credentials +type Credentials struct { + _ struct{} `type:"structure"` + + // The access key ID that identifies the temporary security credentials. + // + // AccessKeyId is a required field + AccessKeyId *string `min:"16" type:"string" required:"true"` + + // The date on which the current credentials expire. + // + // Expiration is a required field + Expiration *time.Time `type:"timestamp" timestampFormat:"iso8601" required:"true"` + + // The secret access key that can be used to sign requests. + // + // SecretAccessKey is a required field + SecretAccessKey *string `type:"string" required:"true"` + + // The token that users must pass to the service API to use the temporary credentials. + // + // SessionToken is a required field + SessionToken *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s Credentials) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Credentials) GoString() string { + return s.String() +} + +// SetAccessKeyId sets the AccessKeyId field's value. +func (s *Credentials) SetAccessKeyId(v string) *Credentials { + s.AccessKeyId = &v + return s +} + +// SetExpiration sets the Expiration field's value. +func (s *Credentials) SetExpiration(v time.Time) *Credentials { + s.Expiration = &v + return s +} + +// SetSecretAccessKey sets the SecretAccessKey field's value. +func (s *Credentials) SetSecretAccessKey(v string) *Credentials { + s.SecretAccessKey = &v + return s +} + +// SetSessionToken sets the SessionToken field's value. +func (s *Credentials) SetSessionToken(v string) *Credentials { + s.SessionToken = &v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/DecodeAuthorizationMessageRequest +type DecodeAuthorizationMessageInput struct { + _ struct{} `type:"structure"` + + // The encoded message that was returned with the response. + // + // EncodedMessage is a required field + EncodedMessage *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s DecodeAuthorizationMessageInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DecodeAuthorizationMessageInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DecodeAuthorizationMessageInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DecodeAuthorizationMessageInput"} + if s.EncodedMessage == nil { + invalidParams.Add(request.NewErrParamRequired("EncodedMessage")) + } + if s.EncodedMessage != nil && len(*s.EncodedMessage) < 1 { + invalidParams.Add(request.NewErrParamMinLen("EncodedMessage", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetEncodedMessage sets the EncodedMessage field's value. +func (s *DecodeAuthorizationMessageInput) SetEncodedMessage(v string) *DecodeAuthorizationMessageInput { + s.EncodedMessage = &v + return s +} + +// A document that contains additional information about the authorization status +// of a request from an encoded message that is returned in response to an AWS +// request. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/DecodeAuthorizationMessageResponse +type DecodeAuthorizationMessageOutput struct { + _ struct{} `type:"structure"` + + // An XML document that contains the decoded message. + DecodedMessage *string `type:"string"` +} + +// String returns the string representation +func (s DecodeAuthorizationMessageOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DecodeAuthorizationMessageOutput) GoString() string { + return s.String() +} + +// SetDecodedMessage sets the DecodedMessage field's value. +func (s *DecodeAuthorizationMessageOutput) SetDecodedMessage(v string) *DecodeAuthorizationMessageOutput { + s.DecodedMessage = &v + return s +} + +// Identifiers for the federated user that is associated with the credentials. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/FederatedUser +type FederatedUser struct { + _ struct{} `type:"structure"` + + // The ARN that specifies the federated user that is associated with the credentials. + // For more information about ARNs and how to use them in policies, see IAM + // Identifiers (http://docs.aws.amazon.com/IAM/latest/UserGuide/reference_identifiers.html) + // in Using IAM. + // + // Arn is a required field + Arn *string `min:"20" type:"string" required:"true"` + + // The string that identifies the federated user associated with the credentials, + // similar to the unique ID of an IAM user. + // + // FederatedUserId is a required field + FederatedUserId *string `min:"2" type:"string" required:"true"` +} + +// String returns the string representation +func (s FederatedUser) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s FederatedUser) GoString() string { + return s.String() +} + +// SetArn sets the Arn field's value. +func (s *FederatedUser) SetArn(v string) *FederatedUser { + s.Arn = &v + return s +} + +// SetFederatedUserId sets the FederatedUserId field's value. +func (s *FederatedUser) SetFederatedUserId(v string) *FederatedUser { + s.FederatedUserId = &v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/GetCallerIdentityRequest +type GetCallerIdentityInput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s GetCallerIdentityInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetCallerIdentityInput) GoString() string { + return s.String() +} + +// Contains the response to a successful GetCallerIdentity request, including +// information about the entity making the request. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/GetCallerIdentityResponse +type GetCallerIdentityOutput struct { + _ struct{} `type:"structure"` + + // The AWS account ID number of the account that owns or contains the calling + // entity. + Account *string `type:"string"` + + // The AWS ARN associated with the calling entity. + Arn *string `min:"20" type:"string"` + + // The unique identifier of the calling entity. The exact value depends on the + // type of entity making the call. The values returned are those listed in the + // aws:userid column in the Principal table (http://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies_variables.html#principaltable) + // found on the Policy Variables reference page in the IAM User Guide. + UserId *string `type:"string"` +} + +// String returns the string representation +func (s GetCallerIdentityOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetCallerIdentityOutput) GoString() string { + return s.String() +} + +// SetAccount sets the Account field's value. +func (s *GetCallerIdentityOutput) SetAccount(v string) *GetCallerIdentityOutput { + s.Account = &v + return s +} + +// SetArn sets the Arn field's value. +func (s *GetCallerIdentityOutput) SetArn(v string) *GetCallerIdentityOutput { + s.Arn = &v + return s +} + +// SetUserId sets the UserId field's value. +func (s *GetCallerIdentityOutput) SetUserId(v string) *GetCallerIdentityOutput { + s.UserId = &v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/GetFederationTokenRequest +type GetFederationTokenInput struct { + _ struct{} `type:"structure"` + + // The duration, in seconds, that the session should last. Acceptable durations + // for federation sessions range from 900 seconds (15 minutes) to 129600 seconds + // (36 hours), with 43200 seconds (12 hours) as the default. Sessions obtained + // using AWS account (root) credentials are restricted to a maximum of 3600 + // seconds (one hour). If the specified duration is longer than one hour, the + // session obtained by using AWS account (root) credentials defaults to one + // hour. + DurationSeconds *int64 `min:"900" type:"integer"` + + // The name of the federated user. The name is used as an identifier for the + // temporary security credentials (such as Bob). For example, you can reference + // the federated user name in a resource-based policy, such as in an Amazon + // S3 bucket policy. + // + // The regex used to validate this parameter is a string of characters consisting + // of upper- and lower-case alphanumeric characters with no spaces. You can + // also include underscores or any of the following characters: =,.@- + // + // Name is a required field + Name *string `min:"2" type:"string" required:"true"` + + // An IAM policy in JSON format that is passed with the GetFederationToken call + // and evaluated along with the policy or policies that are attached to the + // IAM user whose credentials are used to call GetFederationToken. The passed + // policy is used to scope down the permissions that are available to the IAM + // user, by allowing only a subset of the permissions that are granted to the + // IAM user. The passed policy cannot grant more permissions than those granted + // to the IAM user. The final permissions for the federated user are the most + // restrictive set based on the intersection of the passed policy and the IAM + // user policy. + // + // If you do not pass a policy, the resulting temporary security credentials + // have no effective permissions. The only exception is when the temporary security + // credentials are used to access a resource that has a resource-based policy + // that specifically allows the federated user to access the resource. + // + // The format for this parameter, as described by its regex pattern, is a string + // of characters up to 2048 characters in length. The characters can be any + // ASCII character from the space character to the end of the valid character + // list (\u0020-\u00FF). It can also include the tab (\u0009), linefeed (\u000A), + // and carriage return (\u000D) characters. + // + // The policy plain text must be 2048 bytes or shorter. However, an internal + // conversion compresses it into a packed binary format with a separate limit. + // The PackedPolicySize response element indicates by percentage how close to + // the upper size limit the policy is, with 100% equaling the maximum allowed + // size. + // + // For more information about how permissions work, see Permissions for GetFederationToken + // (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_control-access_getfederationtoken.html). + Policy *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s GetFederationTokenInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetFederationTokenInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetFederationTokenInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetFederationTokenInput"} + if s.DurationSeconds != nil && *s.DurationSeconds < 900 { + invalidParams.Add(request.NewErrParamMinValue("DurationSeconds", 900)) + } + if s.Name == nil { + invalidParams.Add(request.NewErrParamRequired("Name")) + } + if s.Name != nil && len(*s.Name) < 2 { + invalidParams.Add(request.NewErrParamMinLen("Name", 2)) + } + if s.Policy != nil && len(*s.Policy) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Policy", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDurationSeconds sets the DurationSeconds field's value. +func (s *GetFederationTokenInput) SetDurationSeconds(v int64) *GetFederationTokenInput { + s.DurationSeconds = &v + return s +} + +// SetName sets the Name field's value. +func (s *GetFederationTokenInput) SetName(v string) *GetFederationTokenInput { + s.Name = &v + return s +} + +// SetPolicy sets the Policy field's value. +func (s *GetFederationTokenInput) SetPolicy(v string) *GetFederationTokenInput { + s.Policy = &v + return s +} + +// Contains the response to a successful GetFederationToken request, including +// temporary AWS credentials that can be used to make AWS requests. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/GetFederationTokenResponse +type GetFederationTokenOutput struct { + _ struct{} `type:"structure"` + + // The temporary security credentials, which include an access key ID, a secret + // access key, and a security (or session) token. + // + // Note: The size of the security token that STS APIs return is not fixed. We + // strongly recommend that you make no assumptions about the maximum size. As + // of this writing, the typical size is less than 4096 bytes, but that can vary. + // Also, future updates to AWS might require larger sizes. + Credentials *Credentials `type:"structure"` + + // Identifiers for the federated user associated with the credentials (such + // as arn:aws:sts::123456789012:federated-user/Bob or 123456789012:Bob). You + // can use the federated user's ARN in your resource-based policies, such as + // an Amazon S3 bucket policy. + FederatedUser *FederatedUser `type:"structure"` + + // A percentage value indicating the size of the policy in packed form. The + // service rejects policies for which the packed size is greater than 100 percent + // of the allowed value. + PackedPolicySize *int64 `type:"integer"` +} + +// String returns the string representation +func (s GetFederationTokenOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetFederationTokenOutput) GoString() string { + return s.String() +} + +// SetCredentials sets the Credentials field's value. +func (s *GetFederationTokenOutput) SetCredentials(v *Credentials) *GetFederationTokenOutput { + s.Credentials = v + return s +} + +// SetFederatedUser sets the FederatedUser field's value. +func (s *GetFederationTokenOutput) SetFederatedUser(v *FederatedUser) *GetFederationTokenOutput { + s.FederatedUser = v + return s +} + +// SetPackedPolicySize sets the PackedPolicySize field's value. +func (s *GetFederationTokenOutput) SetPackedPolicySize(v int64) *GetFederationTokenOutput { + s.PackedPolicySize = &v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/GetSessionTokenRequest +type GetSessionTokenInput struct { + _ struct{} `type:"structure"` + + // The duration, in seconds, that the credentials should remain valid. Acceptable + // durations for IAM user sessions range from 900 seconds (15 minutes) to 129600 + // seconds (36 hours), with 43200 seconds (12 hours) as the default. Sessions + // for AWS account owners are restricted to a maximum of 3600 seconds (one hour). + // If the duration is longer than one hour, the session for AWS account owners + // defaults to one hour. + DurationSeconds *int64 `min:"900" type:"integer"` + + // The identification number of the MFA device that is associated with the IAM + // user who is making the GetSessionToken call. Specify this value if the IAM + // user has a policy that requires MFA authentication. The value is either the + // serial number for a hardware device (such as GAHT12345678) or an Amazon Resource + // Name (ARN) for a virtual device (such as arn:aws:iam::123456789012:mfa/user). + // You can find the device for an IAM user by going to the AWS Management Console + // and viewing the user's security credentials. + // + // The regex used to validated this parameter is a string of characters consisting + // of upper- and lower-case alphanumeric characters with no spaces. You can + // also include underscores or any of the following characters: =,.@:/- + SerialNumber *string `min:"9" type:"string"` + + // The value provided by the MFA device, if MFA is required. If any policy requires + // the IAM user to submit an MFA code, specify this value. If MFA authentication + // is required, and the user does not provide a code when requesting a set of + // temporary security credentials, the user will receive an "access denied" + // response when requesting resources that require MFA authentication. + // + // The format for this parameter, as described by its regex pattern, is a sequence + // of six numeric digits. + TokenCode *string `min:"6" type:"string"` +} + +// String returns the string representation +func (s GetSessionTokenInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetSessionTokenInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetSessionTokenInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetSessionTokenInput"} + if s.DurationSeconds != nil && *s.DurationSeconds < 900 { + invalidParams.Add(request.NewErrParamMinValue("DurationSeconds", 900)) + } + if s.SerialNumber != nil && len(*s.SerialNumber) < 9 { + invalidParams.Add(request.NewErrParamMinLen("SerialNumber", 9)) + } + if s.TokenCode != nil && len(*s.TokenCode) < 6 { + invalidParams.Add(request.NewErrParamMinLen("TokenCode", 6)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDurationSeconds sets the DurationSeconds field's value. +func (s *GetSessionTokenInput) SetDurationSeconds(v int64) *GetSessionTokenInput { + s.DurationSeconds = &v + return s +} + +// SetSerialNumber sets the SerialNumber field's value. +func (s *GetSessionTokenInput) SetSerialNumber(v string) *GetSessionTokenInput { + s.SerialNumber = &v + return s +} + +// SetTokenCode sets the TokenCode field's value. +func (s *GetSessionTokenInput) SetTokenCode(v string) *GetSessionTokenInput { + s.TokenCode = &v + return s +} + +// Contains the response to a successful GetSessionToken request, including +// temporary AWS credentials that can be used to make AWS requests. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/GetSessionTokenResponse +type GetSessionTokenOutput struct { + _ struct{} `type:"structure"` + + // The temporary security credentials, which include an access key ID, a secret + // access key, and a security (or session) token. + // + // Note: The size of the security token that STS APIs return is not fixed. We + // strongly recommend that you make no assumptions about the maximum size. As + // of this writing, the typical size is less than 4096 bytes, but that can vary. + // Also, future updates to AWS might require larger sizes. + Credentials *Credentials `type:"structure"` +} + +// String returns the string representation +func (s GetSessionTokenOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetSessionTokenOutput) GoString() string { + return s.String() +} + +// SetCredentials sets the Credentials field's value. +func (s *GetSessionTokenOutput) SetCredentials(v *Credentials) *GetSessionTokenOutput { + s.Credentials = v + return s +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/sts/customizations.go b/vendor/github.com/aws/aws-sdk-go/service/sts/customizations.go new file mode 100644 index 0000000000..4010cc7fa1 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/sts/customizations.go @@ -0,0 +1,12 @@ +package sts + +import "github.com/aws/aws-sdk-go/aws/request" + +func init() { + initRequest = func(r *request.Request) { + switch r.Operation.Name { + case opAssumeRoleWithSAML, opAssumeRoleWithWebIdentity: + r.Handlers.Sign.Clear() // these operations are unsigned + } + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/sts/doc.go b/vendor/github.com/aws/aws-sdk-go/service/sts/doc.go new file mode 100644 index 0000000000..d2af518cfa --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/sts/doc.go @@ -0,0 +1,124 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +// Package sts provides the client and types for making API +// requests to AWS Security Token Service. +// +// The AWS Security Token Service (STS) is a web service that enables you to +// request temporary, limited-privilege credentials for AWS Identity and Access +// Management (IAM) users or for users that you authenticate (federated users). +// This guide provides descriptions of the STS API. For more detailed information +// about using this service, go to Temporary Security Credentials (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp.html). +// +// As an alternative to using the API, you can use one of the AWS SDKs, which +// consist of libraries and sample code for various programming languages and +// platforms (Java, Ruby, .NET, iOS, Android, etc.). The SDKs provide a convenient +// way to create programmatic access to STS. For example, the SDKs take care +// of cryptographically signing requests, managing errors, and retrying requests +// automatically. For information about the AWS SDKs, including how to download +// and install them, see the Tools for Amazon Web Services page (http://aws.amazon.com/tools/). +// +// For information about setting up signatures and authorization through the +// API, go to Signing AWS API Requests (http://docs.aws.amazon.com/general/latest/gr/signing_aws_api_requests.html) +// in the AWS General Reference. For general information about the Query API, +// go to Making Query Requests (http://docs.aws.amazon.com/IAM/latest/UserGuide/IAM_UsingQueryAPI.html) +// in Using IAM. For information about using security tokens with other AWS +// products, go to AWS Services That Work with IAM (http://docs.aws.amazon.com/IAM/latest/UserGuide/reference_aws-services-that-work-with-iam.html) +// in the IAM User Guide. +// +// If you're new to AWS and need additional technical information about a specific +// AWS product, you can find the product's technical documentation at http://aws.amazon.com/documentation/ +// (http://aws.amazon.com/documentation/). +// +// Endpoints +// +// The AWS Security Token Service (STS) has a default endpoint of https://sts.amazonaws.com +// that maps to the US East (N. Virginia) region. Additional regions are available +// and are activated by default. For more information, see Activating and Deactivating +// AWS STS in an AWS Region (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html) +// in the IAM User Guide. +// +// For information about STS endpoints, see Regions and Endpoints (http://docs.aws.amazon.com/general/latest/gr/rande.html#sts_region) +// in the AWS General Reference. +// +// Recording API requests +// +// STS supports AWS CloudTrail, which is a service that records AWS calls for +// your AWS account and delivers log files to an Amazon S3 bucket. By using +// information collected by CloudTrail, you can determine what requests were +// successfully made to STS, who made the request, when it was made, and so +// on. To learn more about CloudTrail, including how to turn it on and find +// your log files, see the AWS CloudTrail User Guide (http://docs.aws.amazon.com/awscloudtrail/latest/userguide/what_is_cloud_trail_top_level.html). +// +// See https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15 for more information on this service. +// +// See sts package documentation for more information. +// https://docs.aws.amazon.com/sdk-for-go/api/service/sts/ +// +// Using the Client +// +// To use the client for AWS Security Token Service you will first need +// to create a new instance of it. +// +// When creating a client for an AWS service you'll first need to have a Session +// already created. The Session provides configuration that can be shared +// between multiple service clients. Additional configuration can be applied to +// the Session and service's client when they are constructed. The aws package's +// Config type contains several fields such as Region for the AWS Region the +// client should make API requests too. The optional Config value can be provided +// as the variadic argument for Sessions and client creation. +// +// Once the service's client is created you can use it to make API requests the +// AWS service. These clients are safe to use concurrently. +// +// // Create a session to share configuration, and load external configuration. +// sess := session.Must(session.NewSession()) +// +// // Create the service's client with the session. +// svc := sts.New(sess) +// +// See the SDK's documentation for more information on how to use service clients. +// https://docs.aws.amazon.com/sdk-for-go/api/ +// +// See aws package's Config type for more information on configuration options. +// https://docs.aws.amazon.com/sdk-for-go/api/aws/#Config +// +// See the AWS Security Token Service client STS for more +// information on creating the service's client. +// https://docs.aws.amazon.com/sdk-for-go/api/service/sts/#New +// +// Once the client is created you can make an API request to the service. +// Each API method takes a input parameter, and returns the service response +// and an error. +// +// The API method will document which error codes the service can be returned +// by the operation if the service models the API operation's errors. These +// errors will also be available as const strings prefixed with "ErrCode". +// +// result, err := svc.AssumeRole(params) +// if err != nil { +// // Cast err to awserr.Error to handle specific error codes. +// aerr, ok := err.(awserr.Error) +// if ok && aerr.Code() == { +// // Specific error code handling +// } +// return err +// } +// +// fmt.Println("AssumeRole result:") +// fmt.Println(result) +// +// Using the Client with Context +// +// The service's client also provides methods to make API requests with a Context +// value. This allows you to control the timeout, and cancellation of pending +// requests. These methods also take request Option as variadic parameter to apply +// additional configuration to the API request. +// +// ctx := context.Background() +// +// result, err := svc.AssumeRoleWithContext(ctx, params) +// +// See the request package documentation for more information on using Context pattern +// with the SDK. +// https://docs.aws.amazon.com/sdk-for-go/api/aws/request/ +package sts diff --git a/vendor/github.com/aws/aws-sdk-go/service/sts/errors.go b/vendor/github.com/aws/aws-sdk-go/service/sts/errors.go new file mode 100644 index 0000000000..e24884ef37 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/sts/errors.go @@ -0,0 +1,73 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package sts + +const ( + + // ErrCodeExpiredTokenException for service response error code + // "ExpiredTokenException". + // + // The web identity token that was passed is expired or is not valid. Get a + // new identity token from the identity provider and then retry the request. + ErrCodeExpiredTokenException = "ExpiredTokenException" + + // ErrCodeIDPCommunicationErrorException for service response error code + // "IDPCommunicationError". + // + // The request could not be fulfilled because the non-AWS identity provider + // (IDP) that was asked to verify the incoming identity token could not be reached. + // This is often a transient error caused by network conditions. Retry the request + // a limited number of times so that you don't exceed the request rate. If the + // error persists, the non-AWS identity provider might be down or not responding. + ErrCodeIDPCommunicationErrorException = "IDPCommunicationError" + + // ErrCodeIDPRejectedClaimException for service response error code + // "IDPRejectedClaim". + // + // The identity provider (IdP) reported that authentication failed. This might + // be because the claim is invalid. + // + // If this error is returned for the AssumeRoleWithWebIdentity operation, it + // can also mean that the claim has expired or has been explicitly revoked. + ErrCodeIDPRejectedClaimException = "IDPRejectedClaim" + + // ErrCodeInvalidAuthorizationMessageException for service response error code + // "InvalidAuthorizationMessageException". + // + // The error returned if the message passed to DecodeAuthorizationMessage was + // invalid. This can happen if the token contains invalid characters, such as + // linebreaks. + ErrCodeInvalidAuthorizationMessageException = "InvalidAuthorizationMessageException" + + // ErrCodeInvalidIdentityTokenException for service response error code + // "InvalidIdentityToken". + // + // The web identity token that was passed could not be validated by AWS. Get + // a new identity token from the identity provider and then retry the request. + ErrCodeInvalidIdentityTokenException = "InvalidIdentityToken" + + // ErrCodeMalformedPolicyDocumentException for service response error code + // "MalformedPolicyDocument". + // + // The request was rejected because the policy document was malformed. The error + // message describes the specific error. + ErrCodeMalformedPolicyDocumentException = "MalformedPolicyDocument" + + // ErrCodePackedPolicyTooLargeException for service response error code + // "PackedPolicyTooLarge". + // + // The request was rejected because the policy document was too large. The error + // message describes how big the policy document is, in packed form, as a percentage + // of what the API allows. + ErrCodePackedPolicyTooLargeException = "PackedPolicyTooLarge" + + // ErrCodeRegionDisabledException for service response error code + // "RegionDisabledException". + // + // STS is not activated in the requested region for the account that is being + // asked to generate credentials. The account administrator must use the IAM + // console to activate STS in that region. For more information, see Activating + // and Deactivating AWS STS in an AWS Region (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html) + // in the IAM User Guide. + ErrCodeRegionDisabledException = "RegionDisabledException" +) diff --git a/vendor/github.com/aws/aws-sdk-go/service/sts/service.go b/vendor/github.com/aws/aws-sdk-go/service/sts/service.go new file mode 100644 index 0000000000..1ee5839e04 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/sts/service.go @@ -0,0 +1,93 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package sts + +import ( + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/client" + "github.com/aws/aws-sdk-go/aws/client/metadata" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/aws/signer/v4" + "github.com/aws/aws-sdk-go/private/protocol/query" +) + +// STS provides the API operation methods for making requests to +// AWS Security Token Service. See this package's package overview docs +// for details on the service. +// +// STS methods are safe to use concurrently. It is not safe to +// modify mutate any of the struct's properties though. +type STS struct { + *client.Client +} + +// Used for custom client initialization logic +var initClient func(*client.Client) + +// Used for custom request initialization logic +var initRequest func(*request.Request) + +// Service information constants +const ( + ServiceName = "sts" // Service endpoint prefix API calls made to. + EndpointsID = ServiceName // Service ID for Regions and Endpoints metadata. +) + +// New creates a new instance of the STS client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a STS client from just a session. +// svc := sts.New(mySession) +// +// // Create a STS client with additional configuration +// svc := sts.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func New(p client.ConfigProvider, cfgs ...*aws.Config) *STS { + c := p.ClientConfig(EndpointsID, cfgs...) + return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion, c.SigningName) +} + +// newClient creates, initializes and returns a new service client instance. +func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion, signingName string) *STS { + svc := &STS{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: ServiceName, + SigningName: signingName, + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "2011-06-15", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBackNamed(v4.SignRequestHandler) + svc.Handlers.Build.PushBackNamed(query.BuildHandler) + svc.Handlers.Unmarshal.PushBackNamed(query.UnmarshalHandler) + svc.Handlers.UnmarshalMeta.PushBackNamed(query.UnmarshalMetaHandler) + svc.Handlers.UnmarshalError.PushBackNamed(query.UnmarshalErrorHandler) + + // Run custom client initialization if present + if initClient != nil { + initClient(svc.Client) + } + + return svc +} + +// newRequest creates a new request for a STS operation and runs any +// custom request initialization. +func (c *STS) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + // Run custom request initialization if present + if initRequest != nil { + initRequest(req) + } + + return req +} diff --git a/vendor/github.com/bgentry/go-netrc/LICENSE b/vendor/github.com/bgentry/go-netrc/LICENSE new file mode 100644 index 0000000000..aade9a58b1 --- /dev/null +++ b/vendor/github.com/bgentry/go-netrc/LICENSE @@ -0,0 +1,20 @@ +Original version Copyright © 2010 Fazlul Shahriar . Newer +portions Copyright © 2014 Blake Gentry . + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/vendor/github.com/bgentry/go-netrc/netrc/netrc.go b/vendor/github.com/bgentry/go-netrc/netrc/netrc.go new file mode 100644 index 0000000000..ea49987c08 --- /dev/null +++ b/vendor/github.com/bgentry/go-netrc/netrc/netrc.go @@ -0,0 +1,510 @@ +package netrc + +import ( + "bufio" + "bytes" + "fmt" + "io" + "io/ioutil" + "os" + "strings" + "sync" + "unicode" + "unicode/utf8" +) + +type tkType int + +const ( + tkMachine tkType = iota + tkDefault + tkLogin + tkPassword + tkAccount + tkMacdef + tkComment + tkWhitespace +) + +var keywords = map[string]tkType{ + "machine": tkMachine, + "default": tkDefault, + "login": tkLogin, + "password": tkPassword, + "account": tkAccount, + "macdef": tkMacdef, + "#": tkComment, +} + +type Netrc struct { + tokens []*token + machines []*Machine + macros Macros + updateLock sync.Mutex +} + +// FindMachine returns the Machine in n named by name. If a machine named by +// name exists, it is returned. If no Machine with name name is found and there +// is a ``default'' machine, the ``default'' machine is returned. Otherwise, nil +// is returned. +func (n *Netrc) FindMachine(name string) (m *Machine) { + // TODO(bgentry): not safe for concurrency + var def *Machine + for _, m = range n.machines { + if m.Name == name { + return m + } + if m.IsDefault() { + def = m + } + } + if def == nil { + return nil + } + return def +} + +// MarshalText implements the encoding.TextMarshaler interface to encode a +// Netrc into text format. +func (n *Netrc) MarshalText() (text []byte, err error) { + // TODO(bgentry): not safe for concurrency + for i := range n.tokens { + switch n.tokens[i].kind { + case tkComment, tkDefault, tkWhitespace: // always append these types + text = append(text, n.tokens[i].rawkind...) + default: + if n.tokens[i].value != "" { // skip empty-value tokens + text = append(text, n.tokens[i].rawkind...) + } + } + if n.tokens[i].kind == tkMacdef { + text = append(text, ' ') + text = append(text, n.tokens[i].macroName...) + } + text = append(text, n.tokens[i].rawvalue...) + } + return +} + +func (n *Netrc) NewMachine(name, login, password, account string) *Machine { + n.updateLock.Lock() + defer n.updateLock.Unlock() + + prefix := "\n" + if len(n.tokens) == 0 { + prefix = "" + } + m := &Machine{ + Name: name, + Login: login, + Password: password, + Account: account, + + nametoken: &token{ + kind: tkMachine, + rawkind: []byte(prefix + "machine"), + value: name, + rawvalue: []byte(" " + name), + }, + logintoken: &token{ + kind: tkLogin, + rawkind: []byte("\n\tlogin"), + value: login, + rawvalue: []byte(" " + login), + }, + passtoken: &token{ + kind: tkPassword, + rawkind: []byte("\n\tpassword"), + value: password, + rawvalue: []byte(" " + password), + }, + accounttoken: &token{ + kind: tkAccount, + rawkind: []byte("\n\taccount"), + value: account, + rawvalue: []byte(" " + account), + }, + } + n.insertMachineTokensBeforeDefault(m) + for i := range n.machines { + if n.machines[i].IsDefault() { + n.machines = append(append(n.machines[:i], m), n.machines[i:]...) + return m + } + } + n.machines = append(n.machines, m) + return m +} + +func (n *Netrc) insertMachineTokensBeforeDefault(m *Machine) { + newtokens := []*token{m.nametoken} + if m.logintoken.value != "" { + newtokens = append(newtokens, m.logintoken) + } + if m.passtoken.value != "" { + newtokens = append(newtokens, m.passtoken) + } + if m.accounttoken.value != "" { + newtokens = append(newtokens, m.accounttoken) + } + for i := range n.tokens { + if n.tokens[i].kind == tkDefault { + // found the default, now insert tokens before it + n.tokens = append(n.tokens[:i], append(newtokens, n.tokens[i:]...)...) + return + } + } + // didn't find a default, just add the newtokens to the end + n.tokens = append(n.tokens, newtokens...) + return +} + +func (n *Netrc) RemoveMachine(name string) { + n.updateLock.Lock() + defer n.updateLock.Unlock() + + for i := range n.machines { + if n.machines[i] != nil && n.machines[i].Name == name { + m := n.machines[i] + for _, t := range []*token{ + m.nametoken, m.logintoken, m.passtoken, m.accounttoken, + } { + n.removeToken(t) + } + n.machines = append(n.machines[:i], n.machines[i+1:]...) + return + } + } +} + +func (n *Netrc) removeToken(t *token) { + if t != nil { + for i := range n.tokens { + if n.tokens[i] == t { + n.tokens = append(n.tokens[:i], n.tokens[i+1:]...) + return + } + } + } +} + +// Machine contains information about a remote machine. +type Machine struct { + Name string + Login string + Password string + Account string + + nametoken *token + logintoken *token + passtoken *token + accounttoken *token +} + +// IsDefault returns true if the machine is a "default" token, denoted by an +// empty name. +func (m *Machine) IsDefault() bool { + return m.Name == "" +} + +// UpdatePassword sets the password for the Machine m. +func (m *Machine) UpdatePassword(newpass string) { + m.Password = newpass + updateTokenValue(m.passtoken, newpass) +} + +// UpdateLogin sets the login for the Machine m. +func (m *Machine) UpdateLogin(newlogin string) { + m.Login = newlogin + updateTokenValue(m.logintoken, newlogin) +} + +// UpdateAccount sets the login for the Machine m. +func (m *Machine) UpdateAccount(newaccount string) { + m.Account = newaccount + updateTokenValue(m.accounttoken, newaccount) +} + +func updateTokenValue(t *token, value string) { + oldvalue := t.value + t.value = value + newraw := make([]byte, len(t.rawvalue)) + copy(newraw, t.rawvalue) + t.rawvalue = append( + bytes.TrimSuffix(newraw, []byte(oldvalue)), + []byte(value)..., + ) +} + +// Macros contains all the macro definitions in a netrc file. +type Macros map[string]string + +type token struct { + kind tkType + macroName string + value string + rawkind []byte + rawvalue []byte +} + +// Error represents a netrc file parse error. +type Error struct { + LineNum int // Line number + Msg string // Error message +} + +// Error returns a string representation of error e. +func (e *Error) Error() string { + return fmt.Sprintf("line %d: %s", e.LineNum, e.Msg) +} + +func (e *Error) BadDefaultOrder() bool { + return e.Msg == errBadDefaultOrder +} + +const errBadDefaultOrder = "default token must appear after all machine tokens" + +// scanLinesKeepPrefix is a split function for a Scanner that returns each line +// of text. The returned token may include newlines if they are before the +// first non-space character. The returned line may be empty. The end-of-line +// marker is one optional carriage return followed by one mandatory newline. In +// regular expression notation, it is `\r?\n`. The last non-empty line of +// input will be returned even if it has no newline. +func scanLinesKeepPrefix(data []byte, atEOF bool) (advance int, token []byte, err error) { + if atEOF && len(data) == 0 { + return 0, nil, nil + } + // Skip leading spaces. + start := 0 + for width := 0; start < len(data); start += width { + var r rune + r, width = utf8.DecodeRune(data[start:]) + if !unicode.IsSpace(r) { + break + } + } + if i := bytes.IndexByte(data[start:], '\n'); i >= 0 { + // We have a full newline-terminated line. + return start + i, data[0 : start+i], nil + } + // If we're at EOF, we have a final, non-terminated line. Return it. + if atEOF { + return len(data), data, nil + } + // Request more data. + return 0, nil, nil +} + +// scanWordsKeepPrefix is a split function for a Scanner that returns each +// space-separated word of text, with prefixing spaces included. It will never +// return an empty string. The definition of space is set by unicode.IsSpace. +// +// Adapted from bufio.ScanWords(). +func scanTokensKeepPrefix(data []byte, atEOF bool) (advance int, token []byte, err error) { + // Skip leading spaces. + start := 0 + for width := 0; start < len(data); start += width { + var r rune + r, width = utf8.DecodeRune(data[start:]) + if !unicode.IsSpace(r) { + break + } + } + if atEOF && len(data) == 0 || start == len(data) { + return len(data), data, nil + } + if len(data) > start && data[start] == '#' { + return scanLinesKeepPrefix(data, atEOF) + } + // Scan until space, marking end of word. + for width, i := 0, start; i < len(data); i += width { + var r rune + r, width = utf8.DecodeRune(data[i:]) + if unicode.IsSpace(r) { + return i, data[:i], nil + } + } + // If we're at EOF, we have a final, non-empty, non-terminated word. Return it. + if atEOF && len(data) > start { + return len(data), data, nil + } + // Request more data. + return 0, nil, nil +} + +func newToken(rawb []byte) (*token, error) { + _, tkind, err := bufio.ScanWords(rawb, true) + if err != nil { + return nil, err + } + var ok bool + t := token{rawkind: rawb} + t.kind, ok = keywords[string(tkind)] + if !ok { + trimmed := strings.TrimSpace(string(tkind)) + if trimmed == "" { + t.kind = tkWhitespace // whitespace-only, should happen only at EOF + return &t, nil + } + if strings.HasPrefix(trimmed, "#") { + t.kind = tkComment // this is a comment + return &t, nil + } + return &t, fmt.Errorf("keyword expected; got " + string(tkind)) + } + return &t, nil +} + +func scanValue(scanner *bufio.Scanner, pos int) ([]byte, string, int, error) { + if scanner.Scan() { + raw := scanner.Bytes() + pos += bytes.Count(raw, []byte{'\n'}) + return raw, strings.TrimSpace(string(raw)), pos, nil + } + if err := scanner.Err(); err != nil { + return nil, "", pos, &Error{pos, err.Error()} + } + return nil, "", pos, nil +} + +func parse(r io.Reader, pos int) (*Netrc, error) { + b, err := ioutil.ReadAll(r) + if err != nil { + return nil, err + } + + nrc := Netrc{machines: make([]*Machine, 0, 20), macros: make(Macros, 10)} + + defaultSeen := false + var currentMacro *token + var m *Machine + var t *token + scanner := bufio.NewScanner(bytes.NewReader(b)) + scanner.Split(scanTokensKeepPrefix) + + for scanner.Scan() { + rawb := scanner.Bytes() + if len(rawb) == 0 { + break + } + pos += bytes.Count(rawb, []byte{'\n'}) + t, err = newToken(rawb) + if err != nil { + if currentMacro == nil { + return nil, &Error{pos, err.Error()} + } + currentMacro.rawvalue = append(currentMacro.rawvalue, rawb...) + continue + } + + if currentMacro != nil && bytes.Contains(rawb, []byte{'\n', '\n'}) { + // if macro rawvalue + rawb would contain \n\n, then macro def is over + currentMacro.value = strings.TrimLeft(string(currentMacro.rawvalue), "\r\n") + nrc.macros[currentMacro.macroName] = currentMacro.value + currentMacro = nil + } + + switch t.kind { + case tkMacdef: + if _, t.macroName, pos, err = scanValue(scanner, pos); err != nil { + return nil, &Error{pos, err.Error()} + } + currentMacro = t + case tkDefault: + if defaultSeen { + return nil, &Error{pos, "multiple default token"} + } + if m != nil { + nrc.machines, m = append(nrc.machines, m), nil + } + m = new(Machine) + m.Name = "" + defaultSeen = true + case tkMachine: + if defaultSeen { + return nil, &Error{pos, errBadDefaultOrder} + } + if m != nil { + nrc.machines, m = append(nrc.machines, m), nil + } + m = new(Machine) + if t.rawvalue, m.Name, pos, err = scanValue(scanner, pos); err != nil { + return nil, &Error{pos, err.Error()} + } + t.value = m.Name + m.nametoken = t + case tkLogin: + if m == nil || m.Login != "" { + return nil, &Error{pos, "unexpected token login "} + } + if t.rawvalue, m.Login, pos, err = scanValue(scanner, pos); err != nil { + return nil, &Error{pos, err.Error()} + } + t.value = m.Login + m.logintoken = t + case tkPassword: + if m == nil || m.Password != "" { + return nil, &Error{pos, "unexpected token password"} + } + if t.rawvalue, m.Password, pos, err = scanValue(scanner, pos); err != nil { + return nil, &Error{pos, err.Error()} + } + t.value = m.Password + m.passtoken = t + case tkAccount: + if m == nil || m.Account != "" { + return nil, &Error{pos, "unexpected token account"} + } + if t.rawvalue, m.Account, pos, err = scanValue(scanner, pos); err != nil { + return nil, &Error{pos, err.Error()} + } + t.value = m.Account + m.accounttoken = t + } + + nrc.tokens = append(nrc.tokens, t) + } + + if err := scanner.Err(); err != nil { + return nil, err + } + + if m != nil { + nrc.machines, m = append(nrc.machines, m), nil + } + return &nrc, nil +} + +// ParseFile opens the file at filename and then passes its io.Reader to +// Parse(). +func ParseFile(filename string) (*Netrc, error) { + fd, err := os.Open(filename) + if err != nil { + return nil, err + } + defer fd.Close() + return Parse(fd) +} + +// Parse parses from the the Reader r as a netrc file and returns the set of +// machine information and macros defined in it. The ``default'' machine, +// which is intended to be used when no machine name matches, is identified +// by an empty machine name. There can be only one ``default'' machine. +// +// If there is a parsing error, an Error is returned. +func Parse(r io.Reader) (*Netrc, error) { + return parse(r, 1) +} + +// FindMachine parses the netrc file identified by filename and returns the +// Machine named by name. If a problem occurs parsing the file at filename, an +// error is returned. If a machine named by name exists, it is returned. If no +// Machine with name name is found and there is a ``default'' machine, the +// ``default'' machine is returned. Otherwise, nil is returned. +func FindMachine(filename, name string) (m *Machine, err error) { + n, err := ParseFile(filename) + if err != nil { + return nil, err + } + return n.FindMachine(name), nil +} diff --git a/vendor/github.com/davecgh/go-spew/LICENSE b/vendor/github.com/davecgh/go-spew/LICENSE new file mode 100644 index 0000000000..c836416192 --- /dev/null +++ b/vendor/github.com/davecgh/go-spew/LICENSE @@ -0,0 +1,15 @@ +ISC License + +Copyright (c) 2012-2016 Dave Collins + +Permission to use, copy, modify, and distribute this software for any +purpose with or without fee is hereby granted, provided that the above +copyright notice and this permission notice appear in all copies. + +THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES +WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF +MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR +ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES +WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN +ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF +OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. diff --git a/vendor/github.com/davecgh/go-spew/spew/bypass.go b/vendor/github.com/davecgh/go-spew/spew/bypass.go new file mode 100644 index 0000000000..8a4a6589a2 --- /dev/null +++ b/vendor/github.com/davecgh/go-spew/spew/bypass.go @@ -0,0 +1,152 @@ +// Copyright (c) 2015-2016 Dave Collins +// +// Permission to use, copy, modify, and distribute this software for any +// purpose with or without fee is hereby granted, provided that the above +// copyright notice and this permission notice appear in all copies. +// +// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES +// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF +// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR +// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES +// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN +// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF +// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + +// NOTE: Due to the following build constraints, this file will only be compiled +// when the code is not running on Google App Engine, compiled by GopherJS, and +// "-tags safe" is not added to the go build command line. The "disableunsafe" +// tag is deprecated and thus should not be used. +// +build !js,!appengine,!safe,!disableunsafe + +package spew + +import ( + "reflect" + "unsafe" +) + +const ( + // UnsafeDisabled is a build-time constant which specifies whether or + // not access to the unsafe package is available. + UnsafeDisabled = false + + // ptrSize is the size of a pointer on the current arch. + ptrSize = unsafe.Sizeof((*byte)(nil)) +) + +var ( + // offsetPtr, offsetScalar, and offsetFlag are the offsets for the + // internal reflect.Value fields. These values are valid before golang + // commit ecccf07e7f9d which changed the format. The are also valid + // after commit 82f48826c6c7 which changed the format again to mirror + // the original format. Code in the init function updates these offsets + // as necessary. + offsetPtr = uintptr(ptrSize) + offsetScalar = uintptr(0) + offsetFlag = uintptr(ptrSize * 2) + + // flagKindWidth and flagKindShift indicate various bits that the + // reflect package uses internally to track kind information. + // + // flagRO indicates whether or not the value field of a reflect.Value is + // read-only. + // + // flagIndir indicates whether the value field of a reflect.Value is + // the actual data or a pointer to the data. + // + // These values are valid before golang commit 90a7c3c86944 which + // changed their positions. Code in the init function updates these + // flags as necessary. + flagKindWidth = uintptr(5) + flagKindShift = uintptr(flagKindWidth - 1) + flagRO = uintptr(1 << 0) + flagIndir = uintptr(1 << 1) +) + +func init() { + // Older versions of reflect.Value stored small integers directly in the + // ptr field (which is named val in the older versions). Versions + // between commits ecccf07e7f9d and 82f48826c6c7 added a new field named + // scalar for this purpose which unfortunately came before the flag + // field, so the offset of the flag field is different for those + // versions. + // + // This code constructs a new reflect.Value from a known small integer + // and checks if the size of the reflect.Value struct indicates it has + // the scalar field. When it does, the offsets are updated accordingly. + vv := reflect.ValueOf(0xf00) + if unsafe.Sizeof(vv) == (ptrSize * 4) { + offsetScalar = ptrSize * 2 + offsetFlag = ptrSize * 3 + } + + // Commit 90a7c3c86944 changed the flag positions such that the low + // order bits are the kind. This code extracts the kind from the flags + // field and ensures it's the correct type. When it's not, the flag + // order has been changed to the newer format, so the flags are updated + // accordingly. + upf := unsafe.Pointer(uintptr(unsafe.Pointer(&vv)) + offsetFlag) + upfv := *(*uintptr)(upf) + flagKindMask := uintptr((1<>flagKindShift != uintptr(reflect.Int) { + flagKindShift = 0 + flagRO = 1 << 5 + flagIndir = 1 << 6 + + // Commit adf9b30e5594 modified the flags to separate the + // flagRO flag into two bits which specifies whether or not the + // field is embedded. This causes flagIndir to move over a bit + // and means that flagRO is the combination of either of the + // original flagRO bit and the new bit. + // + // This code detects the change by extracting what used to be + // the indirect bit to ensure it's set. When it's not, the flag + // order has been changed to the newer format, so the flags are + // updated accordingly. + if upfv&flagIndir == 0 { + flagRO = 3 << 5 + flagIndir = 1 << 7 + } + } +} + +// unsafeReflectValue converts the passed reflect.Value into a one that bypasses +// the typical safety restrictions preventing access to unaddressable and +// unexported data. It works by digging the raw pointer to the underlying +// value out of the protected value and generating a new unprotected (unsafe) +// reflect.Value to it. +// +// This allows us to check for implementations of the Stringer and error +// interfaces to be used for pretty printing ordinarily unaddressable and +// inaccessible values such as unexported struct fields. +func unsafeReflectValue(v reflect.Value) (rv reflect.Value) { + indirects := 1 + vt := v.Type() + upv := unsafe.Pointer(uintptr(unsafe.Pointer(&v)) + offsetPtr) + rvf := *(*uintptr)(unsafe.Pointer(uintptr(unsafe.Pointer(&v)) + offsetFlag)) + if rvf&flagIndir != 0 { + vt = reflect.PtrTo(v.Type()) + indirects++ + } else if offsetScalar != 0 { + // The value is in the scalar field when it's not one of the + // reference types. + switch vt.Kind() { + case reflect.Uintptr: + case reflect.Chan: + case reflect.Func: + case reflect.Map: + case reflect.Ptr: + case reflect.UnsafePointer: + default: + upv = unsafe.Pointer(uintptr(unsafe.Pointer(&v)) + + offsetScalar) + } + } + + pv := reflect.NewAt(vt, upv) + rv = pv + for i := 0; i < indirects; i++ { + rv = rv.Elem() + } + return rv +} diff --git a/vendor/github.com/davecgh/go-spew/spew/bypasssafe.go b/vendor/github.com/davecgh/go-spew/spew/bypasssafe.go new file mode 100644 index 0000000000..1fe3cf3d5d --- /dev/null +++ b/vendor/github.com/davecgh/go-spew/spew/bypasssafe.go @@ -0,0 +1,38 @@ +// Copyright (c) 2015-2016 Dave Collins +// +// Permission to use, copy, modify, and distribute this software for any +// purpose with or without fee is hereby granted, provided that the above +// copyright notice and this permission notice appear in all copies. +// +// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES +// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF +// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR +// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES +// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN +// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF +// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + +// NOTE: Due to the following build constraints, this file will only be compiled +// when the code is running on Google App Engine, compiled by GopherJS, or +// "-tags safe" is added to the go build command line. The "disableunsafe" +// tag is deprecated and thus should not be used. +// +build js appengine safe disableunsafe + +package spew + +import "reflect" + +const ( + // UnsafeDisabled is a build-time constant which specifies whether or + // not access to the unsafe package is available. + UnsafeDisabled = true +) + +// unsafeReflectValue typically converts the passed reflect.Value into a one +// that bypasses the typical safety restrictions preventing access to +// unaddressable and unexported data. However, doing this relies on access to +// the unsafe package. This is a stub version which simply returns the passed +// reflect.Value when the unsafe package is not available. +func unsafeReflectValue(v reflect.Value) reflect.Value { + return v +} diff --git a/vendor/github.com/davecgh/go-spew/spew/common.go b/vendor/github.com/davecgh/go-spew/spew/common.go new file mode 100644 index 0000000000..7c519ff47a --- /dev/null +++ b/vendor/github.com/davecgh/go-spew/spew/common.go @@ -0,0 +1,341 @@ +/* + * Copyright (c) 2013-2016 Dave Collins + * + * Permission to use, copy, modify, and distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +package spew + +import ( + "bytes" + "fmt" + "io" + "reflect" + "sort" + "strconv" +) + +// Some constants in the form of bytes to avoid string overhead. This mirrors +// the technique used in the fmt package. +var ( + panicBytes = []byte("(PANIC=") + plusBytes = []byte("+") + iBytes = []byte("i") + trueBytes = []byte("true") + falseBytes = []byte("false") + interfaceBytes = []byte("(interface {})") + commaNewlineBytes = []byte(",\n") + newlineBytes = []byte("\n") + openBraceBytes = []byte("{") + openBraceNewlineBytes = []byte("{\n") + closeBraceBytes = []byte("}") + asteriskBytes = []byte("*") + colonBytes = []byte(":") + colonSpaceBytes = []byte(": ") + openParenBytes = []byte("(") + closeParenBytes = []byte(")") + spaceBytes = []byte(" ") + pointerChainBytes = []byte("->") + nilAngleBytes = []byte("") + maxNewlineBytes = []byte("\n") + maxShortBytes = []byte("") + circularBytes = []byte("") + circularShortBytes = []byte("") + invalidAngleBytes = []byte("") + openBracketBytes = []byte("[") + closeBracketBytes = []byte("]") + percentBytes = []byte("%") + precisionBytes = []byte(".") + openAngleBytes = []byte("<") + closeAngleBytes = []byte(">") + openMapBytes = []byte("map[") + closeMapBytes = []byte("]") + lenEqualsBytes = []byte("len=") + capEqualsBytes = []byte("cap=") +) + +// hexDigits is used to map a decimal value to a hex digit. +var hexDigits = "0123456789abcdef" + +// catchPanic handles any panics that might occur during the handleMethods +// calls. +func catchPanic(w io.Writer, v reflect.Value) { + if err := recover(); err != nil { + w.Write(panicBytes) + fmt.Fprintf(w, "%v", err) + w.Write(closeParenBytes) + } +} + +// handleMethods attempts to call the Error and String methods on the underlying +// type the passed reflect.Value represents and outputes the result to Writer w. +// +// It handles panics in any called methods by catching and displaying the error +// as the formatted value. +func handleMethods(cs *ConfigState, w io.Writer, v reflect.Value) (handled bool) { + // We need an interface to check if the type implements the error or + // Stringer interface. However, the reflect package won't give us an + // interface on certain things like unexported struct fields in order + // to enforce visibility rules. We use unsafe, when it's available, + // to bypass these restrictions since this package does not mutate the + // values. + if !v.CanInterface() { + if UnsafeDisabled { + return false + } + + v = unsafeReflectValue(v) + } + + // Choose whether or not to do error and Stringer interface lookups against + // the base type or a pointer to the base type depending on settings. + // Technically calling one of these methods with a pointer receiver can + // mutate the value, however, types which choose to satisify an error or + // Stringer interface with a pointer receiver should not be mutating their + // state inside these interface methods. + if !cs.DisablePointerMethods && !UnsafeDisabled && !v.CanAddr() { + v = unsafeReflectValue(v) + } + if v.CanAddr() { + v = v.Addr() + } + + // Is it an error or Stringer? + switch iface := v.Interface().(type) { + case error: + defer catchPanic(w, v) + if cs.ContinueOnMethod { + w.Write(openParenBytes) + w.Write([]byte(iface.Error())) + w.Write(closeParenBytes) + w.Write(spaceBytes) + return false + } + + w.Write([]byte(iface.Error())) + return true + + case fmt.Stringer: + defer catchPanic(w, v) + if cs.ContinueOnMethod { + w.Write(openParenBytes) + w.Write([]byte(iface.String())) + w.Write(closeParenBytes) + w.Write(spaceBytes) + return false + } + w.Write([]byte(iface.String())) + return true + } + return false +} + +// printBool outputs a boolean value as true or false to Writer w. +func printBool(w io.Writer, val bool) { + if val { + w.Write(trueBytes) + } else { + w.Write(falseBytes) + } +} + +// printInt outputs a signed integer value to Writer w. +func printInt(w io.Writer, val int64, base int) { + w.Write([]byte(strconv.FormatInt(val, base))) +} + +// printUint outputs an unsigned integer value to Writer w. +func printUint(w io.Writer, val uint64, base int) { + w.Write([]byte(strconv.FormatUint(val, base))) +} + +// printFloat outputs a floating point value using the specified precision, +// which is expected to be 32 or 64bit, to Writer w. +func printFloat(w io.Writer, val float64, precision int) { + w.Write([]byte(strconv.FormatFloat(val, 'g', -1, precision))) +} + +// printComplex outputs a complex value using the specified float precision +// for the real and imaginary parts to Writer w. +func printComplex(w io.Writer, c complex128, floatPrecision int) { + r := real(c) + w.Write(openParenBytes) + w.Write([]byte(strconv.FormatFloat(r, 'g', -1, floatPrecision))) + i := imag(c) + if i >= 0 { + w.Write(plusBytes) + } + w.Write([]byte(strconv.FormatFloat(i, 'g', -1, floatPrecision))) + w.Write(iBytes) + w.Write(closeParenBytes) +} + +// printHexPtr outputs a uintptr formatted as hexidecimal with a leading '0x' +// prefix to Writer w. +func printHexPtr(w io.Writer, p uintptr) { + // Null pointer. + num := uint64(p) + if num == 0 { + w.Write(nilAngleBytes) + return + } + + // Max uint64 is 16 bytes in hex + 2 bytes for '0x' prefix + buf := make([]byte, 18) + + // It's simpler to construct the hex string right to left. + base := uint64(16) + i := len(buf) - 1 + for num >= base { + buf[i] = hexDigits[num%base] + num /= base + i-- + } + buf[i] = hexDigits[num] + + // Add '0x' prefix. + i-- + buf[i] = 'x' + i-- + buf[i] = '0' + + // Strip unused leading bytes. + buf = buf[i:] + w.Write(buf) +} + +// valuesSorter implements sort.Interface to allow a slice of reflect.Value +// elements to be sorted. +type valuesSorter struct { + values []reflect.Value + strings []string // either nil or same len and values + cs *ConfigState +} + +// newValuesSorter initializes a valuesSorter instance, which holds a set of +// surrogate keys on which the data should be sorted. It uses flags in +// ConfigState to decide if and how to populate those surrogate keys. +func newValuesSorter(values []reflect.Value, cs *ConfigState) sort.Interface { + vs := &valuesSorter{values: values, cs: cs} + if canSortSimply(vs.values[0].Kind()) { + return vs + } + if !cs.DisableMethods { + vs.strings = make([]string, len(values)) + for i := range vs.values { + b := bytes.Buffer{} + if !handleMethods(cs, &b, vs.values[i]) { + vs.strings = nil + break + } + vs.strings[i] = b.String() + } + } + if vs.strings == nil && cs.SpewKeys { + vs.strings = make([]string, len(values)) + for i := range vs.values { + vs.strings[i] = Sprintf("%#v", vs.values[i].Interface()) + } + } + return vs +} + +// canSortSimply tests whether a reflect.Kind is a primitive that can be sorted +// directly, or whether it should be considered for sorting by surrogate keys +// (if the ConfigState allows it). +func canSortSimply(kind reflect.Kind) bool { + // This switch parallels valueSortLess, except for the default case. + switch kind { + case reflect.Bool: + return true + case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int: + return true + case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint: + return true + case reflect.Float32, reflect.Float64: + return true + case reflect.String: + return true + case reflect.Uintptr: + return true + case reflect.Array: + return true + } + return false +} + +// Len returns the number of values in the slice. It is part of the +// sort.Interface implementation. +func (s *valuesSorter) Len() int { + return len(s.values) +} + +// Swap swaps the values at the passed indices. It is part of the +// sort.Interface implementation. +func (s *valuesSorter) Swap(i, j int) { + s.values[i], s.values[j] = s.values[j], s.values[i] + if s.strings != nil { + s.strings[i], s.strings[j] = s.strings[j], s.strings[i] + } +} + +// valueSortLess returns whether the first value should sort before the second +// value. It is used by valueSorter.Less as part of the sort.Interface +// implementation. +func valueSortLess(a, b reflect.Value) bool { + switch a.Kind() { + case reflect.Bool: + return !a.Bool() && b.Bool() + case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int: + return a.Int() < b.Int() + case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint: + return a.Uint() < b.Uint() + case reflect.Float32, reflect.Float64: + return a.Float() < b.Float() + case reflect.String: + return a.String() < b.String() + case reflect.Uintptr: + return a.Uint() < b.Uint() + case reflect.Array: + // Compare the contents of both arrays. + l := a.Len() + for i := 0; i < l; i++ { + av := a.Index(i) + bv := b.Index(i) + if av.Interface() == bv.Interface() { + continue + } + return valueSortLess(av, bv) + } + } + return a.String() < b.String() +} + +// Less returns whether the value at index i should sort before the +// value at index j. It is part of the sort.Interface implementation. +func (s *valuesSorter) Less(i, j int) bool { + if s.strings == nil { + return valueSortLess(s.values[i], s.values[j]) + } + return s.strings[i] < s.strings[j] +} + +// sortValues is a sort function that handles both native types and any type that +// can be converted to error or Stringer. Other inputs are sorted according to +// their Value.String() value to ensure display stability. +func sortValues(values []reflect.Value, cs *ConfigState) { + if len(values) == 0 { + return + } + sort.Sort(newValuesSorter(values, cs)) +} diff --git a/vendor/github.com/davecgh/go-spew/spew/config.go b/vendor/github.com/davecgh/go-spew/spew/config.go new file mode 100644 index 0000000000..2e3d22f312 --- /dev/null +++ b/vendor/github.com/davecgh/go-spew/spew/config.go @@ -0,0 +1,306 @@ +/* + * Copyright (c) 2013-2016 Dave Collins + * + * Permission to use, copy, modify, and distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +package spew + +import ( + "bytes" + "fmt" + "io" + "os" +) + +// ConfigState houses the configuration options used by spew to format and +// display values. There is a global instance, Config, that is used to control +// all top-level Formatter and Dump functionality. Each ConfigState instance +// provides methods equivalent to the top-level functions. +// +// The zero value for ConfigState provides no indentation. You would typically +// want to set it to a space or a tab. +// +// Alternatively, you can use NewDefaultConfig to get a ConfigState instance +// with default settings. See the documentation of NewDefaultConfig for default +// values. +type ConfigState struct { + // Indent specifies the string to use for each indentation level. The + // global config instance that all top-level functions use set this to a + // single space by default. If you would like more indentation, you might + // set this to a tab with "\t" or perhaps two spaces with " ". + Indent string + + // MaxDepth controls the maximum number of levels to descend into nested + // data structures. The default, 0, means there is no limit. + // + // NOTE: Circular data structures are properly detected, so it is not + // necessary to set this value unless you specifically want to limit deeply + // nested data structures. + MaxDepth int + + // DisableMethods specifies whether or not error and Stringer interfaces are + // invoked for types that implement them. + DisableMethods bool + + // DisablePointerMethods specifies whether or not to check for and invoke + // error and Stringer interfaces on types which only accept a pointer + // receiver when the current type is not a pointer. + // + // NOTE: This might be an unsafe action since calling one of these methods + // with a pointer receiver could technically mutate the value, however, + // in practice, types which choose to satisify an error or Stringer + // interface with a pointer receiver should not be mutating their state + // inside these interface methods. As a result, this option relies on + // access to the unsafe package, so it will not have any effect when + // running in environments without access to the unsafe package such as + // Google App Engine or with the "safe" build tag specified. + DisablePointerMethods bool + + // DisablePointerAddresses specifies whether to disable the printing of + // pointer addresses. This is useful when diffing data structures in tests. + DisablePointerAddresses bool + + // DisableCapacities specifies whether to disable the printing of capacities + // for arrays, slices, maps and channels. This is useful when diffing + // data structures in tests. + DisableCapacities bool + + // ContinueOnMethod specifies whether or not recursion should continue once + // a custom error or Stringer interface is invoked. The default, false, + // means it will print the results of invoking the custom error or Stringer + // interface and return immediately instead of continuing to recurse into + // the internals of the data type. + // + // NOTE: This flag does not have any effect if method invocation is disabled + // via the DisableMethods or DisablePointerMethods options. + ContinueOnMethod bool + + // SortKeys specifies map keys should be sorted before being printed. Use + // this to have a more deterministic, diffable output. Note that only + // native types (bool, int, uint, floats, uintptr and string) and types + // that support the error or Stringer interfaces (if methods are + // enabled) are supported, with other types sorted according to the + // reflect.Value.String() output which guarantees display stability. + SortKeys bool + + // SpewKeys specifies that, as a last resort attempt, map keys should + // be spewed to strings and sorted by those strings. This is only + // considered if SortKeys is true. + SpewKeys bool +} + +// Config is the active configuration of the top-level functions. +// The configuration can be changed by modifying the contents of spew.Config. +var Config = ConfigState{Indent: " "} + +// Errorf is a wrapper for fmt.Errorf that treats each argument as if it were +// passed with a Formatter interface returned by c.NewFormatter. It returns +// the formatted string as a value that satisfies error. See NewFormatter +// for formatting details. +// +// This function is shorthand for the following syntax: +// +// fmt.Errorf(format, c.NewFormatter(a), c.NewFormatter(b)) +func (c *ConfigState) Errorf(format string, a ...interface{}) (err error) { + return fmt.Errorf(format, c.convertArgs(a)...) +} + +// Fprint is a wrapper for fmt.Fprint that treats each argument as if it were +// passed with a Formatter interface returned by c.NewFormatter. It returns +// the number of bytes written and any write error encountered. See +// NewFormatter for formatting details. +// +// This function is shorthand for the following syntax: +// +// fmt.Fprint(w, c.NewFormatter(a), c.NewFormatter(b)) +func (c *ConfigState) Fprint(w io.Writer, a ...interface{}) (n int, err error) { + return fmt.Fprint(w, c.convertArgs(a)...) +} + +// Fprintf is a wrapper for fmt.Fprintf that treats each argument as if it were +// passed with a Formatter interface returned by c.NewFormatter. It returns +// the number of bytes written and any write error encountered. See +// NewFormatter for formatting details. +// +// This function is shorthand for the following syntax: +// +// fmt.Fprintf(w, format, c.NewFormatter(a), c.NewFormatter(b)) +func (c *ConfigState) Fprintf(w io.Writer, format string, a ...interface{}) (n int, err error) { + return fmt.Fprintf(w, format, c.convertArgs(a)...) +} + +// Fprintln is a wrapper for fmt.Fprintln that treats each argument as if it +// passed with a Formatter interface returned by c.NewFormatter. See +// NewFormatter for formatting details. +// +// This function is shorthand for the following syntax: +// +// fmt.Fprintln(w, c.NewFormatter(a), c.NewFormatter(b)) +func (c *ConfigState) Fprintln(w io.Writer, a ...interface{}) (n int, err error) { + return fmt.Fprintln(w, c.convertArgs(a)...) +} + +// Print is a wrapper for fmt.Print that treats each argument as if it were +// passed with a Formatter interface returned by c.NewFormatter. It returns +// the number of bytes written and any write error encountered. See +// NewFormatter for formatting details. +// +// This function is shorthand for the following syntax: +// +// fmt.Print(c.NewFormatter(a), c.NewFormatter(b)) +func (c *ConfigState) Print(a ...interface{}) (n int, err error) { + return fmt.Print(c.convertArgs(a)...) +} + +// Printf is a wrapper for fmt.Printf that treats each argument as if it were +// passed with a Formatter interface returned by c.NewFormatter. It returns +// the number of bytes written and any write error encountered. See +// NewFormatter for formatting details. +// +// This function is shorthand for the following syntax: +// +// fmt.Printf(format, c.NewFormatter(a), c.NewFormatter(b)) +func (c *ConfigState) Printf(format string, a ...interface{}) (n int, err error) { + return fmt.Printf(format, c.convertArgs(a)...) +} + +// Println is a wrapper for fmt.Println that treats each argument as if it were +// passed with a Formatter interface returned by c.NewFormatter. It returns +// the number of bytes written and any write error encountered. See +// NewFormatter for formatting details. +// +// This function is shorthand for the following syntax: +// +// fmt.Println(c.NewFormatter(a), c.NewFormatter(b)) +func (c *ConfigState) Println(a ...interface{}) (n int, err error) { + return fmt.Println(c.convertArgs(a)...) +} + +// Sprint is a wrapper for fmt.Sprint that treats each argument as if it were +// passed with a Formatter interface returned by c.NewFormatter. It returns +// the resulting string. See NewFormatter for formatting details. +// +// This function is shorthand for the following syntax: +// +// fmt.Sprint(c.NewFormatter(a), c.NewFormatter(b)) +func (c *ConfigState) Sprint(a ...interface{}) string { + return fmt.Sprint(c.convertArgs(a)...) +} + +// Sprintf is a wrapper for fmt.Sprintf that treats each argument as if it were +// passed with a Formatter interface returned by c.NewFormatter. It returns +// the resulting string. See NewFormatter for formatting details. +// +// This function is shorthand for the following syntax: +// +// fmt.Sprintf(format, c.NewFormatter(a), c.NewFormatter(b)) +func (c *ConfigState) Sprintf(format string, a ...interface{}) string { + return fmt.Sprintf(format, c.convertArgs(a)...) +} + +// Sprintln is a wrapper for fmt.Sprintln that treats each argument as if it +// were passed with a Formatter interface returned by c.NewFormatter. It +// returns the resulting string. See NewFormatter for formatting details. +// +// This function is shorthand for the following syntax: +// +// fmt.Sprintln(c.NewFormatter(a), c.NewFormatter(b)) +func (c *ConfigState) Sprintln(a ...interface{}) string { + return fmt.Sprintln(c.convertArgs(a)...) +} + +/* +NewFormatter returns a custom formatter that satisfies the fmt.Formatter +interface. As a result, it integrates cleanly with standard fmt package +printing functions. The formatter is useful for inline printing of smaller data +types similar to the standard %v format specifier. + +The custom formatter only responds to the %v (most compact), %+v (adds pointer +addresses), %#v (adds types), and %#+v (adds types and pointer addresses) verb +combinations. Any other verbs such as %x and %q will be sent to the the +standard fmt package for formatting. In addition, the custom formatter ignores +the width and precision arguments (however they will still work on the format +specifiers not handled by the custom formatter). + +Typically this function shouldn't be called directly. It is much easier to make +use of the custom formatter by calling one of the convenience functions such as +c.Printf, c.Println, or c.Printf. +*/ +func (c *ConfigState) NewFormatter(v interface{}) fmt.Formatter { + return newFormatter(c, v) +} + +// Fdump formats and displays the passed arguments to io.Writer w. It formats +// exactly the same as Dump. +func (c *ConfigState) Fdump(w io.Writer, a ...interface{}) { + fdump(c, w, a...) +} + +/* +Dump displays the passed parameters to standard out with newlines, customizable +indentation, and additional debug information such as complete types and all +pointer addresses used to indirect to the final value. It provides the +following features over the built-in printing facilities provided by the fmt +package: + + * Pointers are dereferenced and followed + * Circular data structures are detected and handled properly + * Custom Stringer/error interfaces are optionally invoked, including + on unexported types + * Custom types which only implement the Stringer/error interfaces via + a pointer receiver are optionally invoked when passing non-pointer + variables + * Byte arrays and slices are dumped like the hexdump -C command which + includes offsets, byte values in hex, and ASCII output + +The configuration options are controlled by modifying the public members +of c. See ConfigState for options documentation. + +See Fdump if you would prefer dumping to an arbitrary io.Writer or Sdump to +get the formatted result as a string. +*/ +func (c *ConfigState) Dump(a ...interface{}) { + fdump(c, os.Stdout, a...) +} + +// Sdump returns a string with the passed arguments formatted exactly the same +// as Dump. +func (c *ConfigState) Sdump(a ...interface{}) string { + var buf bytes.Buffer + fdump(c, &buf, a...) + return buf.String() +} + +// convertArgs accepts a slice of arguments and returns a slice of the same +// length with each argument converted to a spew Formatter interface using +// the ConfigState associated with s. +func (c *ConfigState) convertArgs(args []interface{}) (formatters []interface{}) { + formatters = make([]interface{}, len(args)) + for index, arg := range args { + formatters[index] = newFormatter(c, arg) + } + return formatters +} + +// NewDefaultConfig returns a ConfigState with the following default settings. +// +// Indent: " " +// MaxDepth: 0 +// DisableMethods: false +// DisablePointerMethods: false +// ContinueOnMethod: false +// SortKeys: false +func NewDefaultConfig() *ConfigState { + return &ConfigState{Indent: " "} +} diff --git a/vendor/github.com/davecgh/go-spew/spew/doc.go b/vendor/github.com/davecgh/go-spew/spew/doc.go new file mode 100644 index 0000000000..aacaac6f1e --- /dev/null +++ b/vendor/github.com/davecgh/go-spew/spew/doc.go @@ -0,0 +1,211 @@ +/* + * Copyright (c) 2013-2016 Dave Collins + * + * Permission to use, copy, modify, and distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +/* +Package spew implements a deep pretty printer for Go data structures to aid in +debugging. + +A quick overview of the additional features spew provides over the built-in +printing facilities for Go data types are as follows: + + * Pointers are dereferenced and followed + * Circular data structures are detected and handled properly + * Custom Stringer/error interfaces are optionally invoked, including + on unexported types + * Custom types which only implement the Stringer/error interfaces via + a pointer receiver are optionally invoked when passing non-pointer + variables + * Byte arrays and slices are dumped like the hexdump -C command which + includes offsets, byte values in hex, and ASCII output (only when using + Dump style) + +There are two different approaches spew allows for dumping Go data structures: + + * Dump style which prints with newlines, customizable indentation, + and additional debug information such as types and all pointer addresses + used to indirect to the final value + * A custom Formatter interface that integrates cleanly with the standard fmt + package and replaces %v, %+v, %#v, and %#+v to provide inline printing + similar to the default %v while providing the additional functionality + outlined above and passing unsupported format verbs such as %x and %q + along to fmt + +Quick Start + +This section demonstrates how to quickly get started with spew. See the +sections below for further details on formatting and configuration options. + +To dump a variable with full newlines, indentation, type, and pointer +information use Dump, Fdump, or Sdump: + spew.Dump(myVar1, myVar2, ...) + spew.Fdump(someWriter, myVar1, myVar2, ...) + str := spew.Sdump(myVar1, myVar2, ...) + +Alternatively, if you would prefer to use format strings with a compacted inline +printing style, use the convenience wrappers Printf, Fprintf, etc with +%v (most compact), %+v (adds pointer addresses), %#v (adds types), or +%#+v (adds types and pointer addresses): + spew.Printf("myVar1: %v -- myVar2: %+v", myVar1, myVar2) + spew.Printf("myVar3: %#v -- myVar4: %#+v", myVar3, myVar4) + spew.Fprintf(someWriter, "myVar1: %v -- myVar2: %+v", myVar1, myVar2) + spew.Fprintf(someWriter, "myVar3: %#v -- myVar4: %#+v", myVar3, myVar4) + +Configuration Options + +Configuration of spew is handled by fields in the ConfigState type. For +convenience, all of the top-level functions use a global state available +via the spew.Config global. + +It is also possible to create a ConfigState instance that provides methods +equivalent to the top-level functions. This allows concurrent configuration +options. See the ConfigState documentation for more details. + +The following configuration options are available: + * Indent + String to use for each indentation level for Dump functions. + It is a single space by default. A popular alternative is "\t". + + * MaxDepth + Maximum number of levels to descend into nested data structures. + There is no limit by default. + + * DisableMethods + Disables invocation of error and Stringer interface methods. + Method invocation is enabled by default. + + * DisablePointerMethods + Disables invocation of error and Stringer interface methods on types + which only accept pointer receivers from non-pointer variables. + Pointer method invocation is enabled by default. + + * DisablePointerAddresses + DisablePointerAddresses specifies whether to disable the printing of + pointer addresses. This is useful when diffing data structures in tests. + + * DisableCapacities + DisableCapacities specifies whether to disable the printing of + capacities for arrays, slices, maps and channels. This is useful when + diffing data structures in tests. + + * ContinueOnMethod + Enables recursion into types after invoking error and Stringer interface + methods. Recursion after method invocation is disabled by default. + + * SortKeys + Specifies map keys should be sorted before being printed. Use + this to have a more deterministic, diffable output. Note that + only native types (bool, int, uint, floats, uintptr and string) + and types which implement error or Stringer interfaces are + supported with other types sorted according to the + reflect.Value.String() output which guarantees display + stability. Natural map order is used by default. + + * SpewKeys + Specifies that, as a last resort attempt, map keys should be + spewed to strings and sorted by those strings. This is only + considered if SortKeys is true. + +Dump Usage + +Simply call spew.Dump with a list of variables you want to dump: + + spew.Dump(myVar1, myVar2, ...) + +You may also call spew.Fdump if you would prefer to output to an arbitrary +io.Writer. For example, to dump to standard error: + + spew.Fdump(os.Stderr, myVar1, myVar2, ...) + +A third option is to call spew.Sdump to get the formatted output as a string: + + str := spew.Sdump(myVar1, myVar2, ...) + +Sample Dump Output + +See the Dump example for details on the setup of the types and variables being +shown here. + + (main.Foo) { + unexportedField: (*main.Bar)(0xf84002e210)({ + flag: (main.Flag) flagTwo, + data: (uintptr) + }), + ExportedField: (map[interface {}]interface {}) (len=1) { + (string) (len=3) "one": (bool) true + } + } + +Byte (and uint8) arrays and slices are displayed uniquely like the hexdump -C +command as shown. + ([]uint8) (len=32 cap=32) { + 00000000 11 12 13 14 15 16 17 18 19 1a 1b 1c 1d 1e 1f 20 |............... | + 00000010 21 22 23 24 25 26 27 28 29 2a 2b 2c 2d 2e 2f 30 |!"#$%&'()*+,-./0| + 00000020 31 32 |12| + } + +Custom Formatter + +Spew provides a custom formatter that implements the fmt.Formatter interface +so that it integrates cleanly with standard fmt package printing functions. The +formatter is useful for inline printing of smaller data types similar to the +standard %v format specifier. + +The custom formatter only responds to the %v (most compact), %+v (adds pointer +addresses), %#v (adds types), or %#+v (adds types and pointer addresses) verb +combinations. Any other verbs such as %x and %q will be sent to the the +standard fmt package for formatting. In addition, the custom formatter ignores +the width and precision arguments (however they will still work on the format +specifiers not handled by the custom formatter). + +Custom Formatter Usage + +The simplest way to make use of the spew custom formatter is to call one of the +convenience functions such as spew.Printf, spew.Println, or spew.Printf. The +functions have syntax you are most likely already familiar with: + + spew.Printf("myVar1: %v -- myVar2: %+v", myVar1, myVar2) + spew.Printf("myVar3: %#v -- myVar4: %#+v", myVar3, myVar4) + spew.Println(myVar, myVar2) + spew.Fprintf(os.Stderr, "myVar1: %v -- myVar2: %+v", myVar1, myVar2) + spew.Fprintf(os.Stderr, "myVar3: %#v -- myVar4: %#+v", myVar3, myVar4) + +See the Index for the full list convenience functions. + +Sample Formatter Output + +Double pointer to a uint8: + %v: <**>5 + %+v: <**>(0xf8400420d0->0xf8400420c8)5 + %#v: (**uint8)5 + %#+v: (**uint8)(0xf8400420d0->0xf8400420c8)5 + +Pointer to circular struct with a uint8 field and a pointer to itself: + %v: <*>{1 <*>} + %+v: <*>(0xf84003e260){ui8:1 c:<*>(0xf84003e260)} + %#v: (*main.circular){ui8:(uint8)1 c:(*main.circular)} + %#+v: (*main.circular)(0xf84003e260){ui8:(uint8)1 c:(*main.circular)(0xf84003e260)} + +See the Printf example for details on the setup of variables being shown +here. + +Errors + +Since it is possible for custom Stringer/error interfaces to panic, spew +detects them and handles them internally by printing the panic information +inline with the output. Since spew is intended to provide deep pretty printing +capabilities on structures, it intentionally does not return any errors. +*/ +package spew diff --git a/vendor/github.com/davecgh/go-spew/spew/dump.go b/vendor/github.com/davecgh/go-spew/spew/dump.go new file mode 100644 index 0000000000..df1d582a72 --- /dev/null +++ b/vendor/github.com/davecgh/go-spew/spew/dump.go @@ -0,0 +1,509 @@ +/* + * Copyright (c) 2013-2016 Dave Collins + * + * Permission to use, copy, modify, and distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +package spew + +import ( + "bytes" + "encoding/hex" + "fmt" + "io" + "os" + "reflect" + "regexp" + "strconv" + "strings" +) + +var ( + // uint8Type is a reflect.Type representing a uint8. It is used to + // convert cgo types to uint8 slices for hexdumping. + uint8Type = reflect.TypeOf(uint8(0)) + + // cCharRE is a regular expression that matches a cgo char. + // It is used to detect character arrays to hexdump them. + cCharRE = regexp.MustCompile("^.*\\._Ctype_char$") + + // cUnsignedCharRE is a regular expression that matches a cgo unsigned + // char. It is used to detect unsigned character arrays to hexdump + // them. + cUnsignedCharRE = regexp.MustCompile("^.*\\._Ctype_unsignedchar$") + + // cUint8tCharRE is a regular expression that matches a cgo uint8_t. + // It is used to detect uint8_t arrays to hexdump them. + cUint8tCharRE = regexp.MustCompile("^.*\\._Ctype_uint8_t$") +) + +// dumpState contains information about the state of a dump operation. +type dumpState struct { + w io.Writer + depth int + pointers map[uintptr]int + ignoreNextType bool + ignoreNextIndent bool + cs *ConfigState +} + +// indent performs indentation according to the depth level and cs.Indent +// option. +func (d *dumpState) indent() { + if d.ignoreNextIndent { + d.ignoreNextIndent = false + return + } + d.w.Write(bytes.Repeat([]byte(d.cs.Indent), d.depth)) +} + +// unpackValue returns values inside of non-nil interfaces when possible. +// This is useful for data types like structs, arrays, slices, and maps which +// can contain varying types packed inside an interface. +func (d *dumpState) unpackValue(v reflect.Value) reflect.Value { + if v.Kind() == reflect.Interface && !v.IsNil() { + v = v.Elem() + } + return v +} + +// dumpPtr handles formatting of pointers by indirecting them as necessary. +func (d *dumpState) dumpPtr(v reflect.Value) { + // Remove pointers at or below the current depth from map used to detect + // circular refs. + for k, depth := range d.pointers { + if depth >= d.depth { + delete(d.pointers, k) + } + } + + // Keep list of all dereferenced pointers to show later. + pointerChain := make([]uintptr, 0) + + // Figure out how many levels of indirection there are by dereferencing + // pointers and unpacking interfaces down the chain while detecting circular + // references. + nilFound := false + cycleFound := false + indirects := 0 + ve := v + for ve.Kind() == reflect.Ptr { + if ve.IsNil() { + nilFound = true + break + } + indirects++ + addr := ve.Pointer() + pointerChain = append(pointerChain, addr) + if pd, ok := d.pointers[addr]; ok && pd < d.depth { + cycleFound = true + indirects-- + break + } + d.pointers[addr] = d.depth + + ve = ve.Elem() + if ve.Kind() == reflect.Interface { + if ve.IsNil() { + nilFound = true + break + } + ve = ve.Elem() + } + } + + // Display type information. + d.w.Write(openParenBytes) + d.w.Write(bytes.Repeat(asteriskBytes, indirects)) + d.w.Write([]byte(ve.Type().String())) + d.w.Write(closeParenBytes) + + // Display pointer information. + if !d.cs.DisablePointerAddresses && len(pointerChain) > 0 { + d.w.Write(openParenBytes) + for i, addr := range pointerChain { + if i > 0 { + d.w.Write(pointerChainBytes) + } + printHexPtr(d.w, addr) + } + d.w.Write(closeParenBytes) + } + + // Display dereferenced value. + d.w.Write(openParenBytes) + switch { + case nilFound == true: + d.w.Write(nilAngleBytes) + + case cycleFound == true: + d.w.Write(circularBytes) + + default: + d.ignoreNextType = true + d.dump(ve) + } + d.w.Write(closeParenBytes) +} + +// dumpSlice handles formatting of arrays and slices. Byte (uint8 under +// reflection) arrays and slices are dumped in hexdump -C fashion. +func (d *dumpState) dumpSlice(v reflect.Value) { + // Determine whether this type should be hex dumped or not. Also, + // for types which should be hexdumped, try to use the underlying data + // first, then fall back to trying to convert them to a uint8 slice. + var buf []uint8 + doConvert := false + doHexDump := false + numEntries := v.Len() + if numEntries > 0 { + vt := v.Index(0).Type() + vts := vt.String() + switch { + // C types that need to be converted. + case cCharRE.MatchString(vts): + fallthrough + case cUnsignedCharRE.MatchString(vts): + fallthrough + case cUint8tCharRE.MatchString(vts): + doConvert = true + + // Try to use existing uint8 slices and fall back to converting + // and copying if that fails. + case vt.Kind() == reflect.Uint8: + // We need an addressable interface to convert the type + // to a byte slice. However, the reflect package won't + // give us an interface on certain things like + // unexported struct fields in order to enforce + // visibility rules. We use unsafe, when available, to + // bypass these restrictions since this package does not + // mutate the values. + vs := v + if !vs.CanInterface() || !vs.CanAddr() { + vs = unsafeReflectValue(vs) + } + if !UnsafeDisabled { + vs = vs.Slice(0, numEntries) + + // Use the existing uint8 slice if it can be + // type asserted. + iface := vs.Interface() + if slice, ok := iface.([]uint8); ok { + buf = slice + doHexDump = true + break + } + } + + // The underlying data needs to be converted if it can't + // be type asserted to a uint8 slice. + doConvert = true + } + + // Copy and convert the underlying type if needed. + if doConvert && vt.ConvertibleTo(uint8Type) { + // Convert and copy each element into a uint8 byte + // slice. + buf = make([]uint8, numEntries) + for i := 0; i < numEntries; i++ { + vv := v.Index(i) + buf[i] = uint8(vv.Convert(uint8Type).Uint()) + } + doHexDump = true + } + } + + // Hexdump the entire slice as needed. + if doHexDump { + indent := strings.Repeat(d.cs.Indent, d.depth) + str := indent + hex.Dump(buf) + str = strings.Replace(str, "\n", "\n"+indent, -1) + str = strings.TrimRight(str, d.cs.Indent) + d.w.Write([]byte(str)) + return + } + + // Recursively call dump for each item. + for i := 0; i < numEntries; i++ { + d.dump(d.unpackValue(v.Index(i))) + if i < (numEntries - 1) { + d.w.Write(commaNewlineBytes) + } else { + d.w.Write(newlineBytes) + } + } +} + +// dump is the main workhorse for dumping a value. It uses the passed reflect +// value to figure out what kind of object we are dealing with and formats it +// appropriately. It is a recursive function, however circular data structures +// are detected and handled properly. +func (d *dumpState) dump(v reflect.Value) { + // Handle invalid reflect values immediately. + kind := v.Kind() + if kind == reflect.Invalid { + d.w.Write(invalidAngleBytes) + return + } + + // Handle pointers specially. + if kind == reflect.Ptr { + d.indent() + d.dumpPtr(v) + return + } + + // Print type information unless already handled elsewhere. + if !d.ignoreNextType { + d.indent() + d.w.Write(openParenBytes) + d.w.Write([]byte(v.Type().String())) + d.w.Write(closeParenBytes) + d.w.Write(spaceBytes) + } + d.ignoreNextType = false + + // Display length and capacity if the built-in len and cap functions + // work with the value's kind and the len/cap itself is non-zero. + valueLen, valueCap := 0, 0 + switch v.Kind() { + case reflect.Array, reflect.Slice, reflect.Chan: + valueLen, valueCap = v.Len(), v.Cap() + case reflect.Map, reflect.String: + valueLen = v.Len() + } + if valueLen != 0 || !d.cs.DisableCapacities && valueCap != 0 { + d.w.Write(openParenBytes) + if valueLen != 0 { + d.w.Write(lenEqualsBytes) + printInt(d.w, int64(valueLen), 10) + } + if !d.cs.DisableCapacities && valueCap != 0 { + if valueLen != 0 { + d.w.Write(spaceBytes) + } + d.w.Write(capEqualsBytes) + printInt(d.w, int64(valueCap), 10) + } + d.w.Write(closeParenBytes) + d.w.Write(spaceBytes) + } + + // Call Stringer/error interfaces if they exist and the handle methods flag + // is enabled + if !d.cs.DisableMethods { + if (kind != reflect.Invalid) && (kind != reflect.Interface) { + if handled := handleMethods(d.cs, d.w, v); handled { + return + } + } + } + + switch kind { + case reflect.Invalid: + // Do nothing. We should never get here since invalid has already + // been handled above. + + case reflect.Bool: + printBool(d.w, v.Bool()) + + case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int: + printInt(d.w, v.Int(), 10) + + case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint: + printUint(d.w, v.Uint(), 10) + + case reflect.Float32: + printFloat(d.w, v.Float(), 32) + + case reflect.Float64: + printFloat(d.w, v.Float(), 64) + + case reflect.Complex64: + printComplex(d.w, v.Complex(), 32) + + case reflect.Complex128: + printComplex(d.w, v.Complex(), 64) + + case reflect.Slice: + if v.IsNil() { + d.w.Write(nilAngleBytes) + break + } + fallthrough + + case reflect.Array: + d.w.Write(openBraceNewlineBytes) + d.depth++ + if (d.cs.MaxDepth != 0) && (d.depth > d.cs.MaxDepth) { + d.indent() + d.w.Write(maxNewlineBytes) + } else { + d.dumpSlice(v) + } + d.depth-- + d.indent() + d.w.Write(closeBraceBytes) + + case reflect.String: + d.w.Write([]byte(strconv.Quote(v.String()))) + + case reflect.Interface: + // The only time we should get here is for nil interfaces due to + // unpackValue calls. + if v.IsNil() { + d.w.Write(nilAngleBytes) + } + + case reflect.Ptr: + // Do nothing. We should never get here since pointers have already + // been handled above. + + case reflect.Map: + // nil maps should be indicated as different than empty maps + if v.IsNil() { + d.w.Write(nilAngleBytes) + break + } + + d.w.Write(openBraceNewlineBytes) + d.depth++ + if (d.cs.MaxDepth != 0) && (d.depth > d.cs.MaxDepth) { + d.indent() + d.w.Write(maxNewlineBytes) + } else { + numEntries := v.Len() + keys := v.MapKeys() + if d.cs.SortKeys { + sortValues(keys, d.cs) + } + for i, key := range keys { + d.dump(d.unpackValue(key)) + d.w.Write(colonSpaceBytes) + d.ignoreNextIndent = true + d.dump(d.unpackValue(v.MapIndex(key))) + if i < (numEntries - 1) { + d.w.Write(commaNewlineBytes) + } else { + d.w.Write(newlineBytes) + } + } + } + d.depth-- + d.indent() + d.w.Write(closeBraceBytes) + + case reflect.Struct: + d.w.Write(openBraceNewlineBytes) + d.depth++ + if (d.cs.MaxDepth != 0) && (d.depth > d.cs.MaxDepth) { + d.indent() + d.w.Write(maxNewlineBytes) + } else { + vt := v.Type() + numFields := v.NumField() + for i := 0; i < numFields; i++ { + d.indent() + vtf := vt.Field(i) + d.w.Write([]byte(vtf.Name)) + d.w.Write(colonSpaceBytes) + d.ignoreNextIndent = true + d.dump(d.unpackValue(v.Field(i))) + if i < (numFields - 1) { + d.w.Write(commaNewlineBytes) + } else { + d.w.Write(newlineBytes) + } + } + } + d.depth-- + d.indent() + d.w.Write(closeBraceBytes) + + case reflect.Uintptr: + printHexPtr(d.w, uintptr(v.Uint())) + + case reflect.UnsafePointer, reflect.Chan, reflect.Func: + printHexPtr(d.w, v.Pointer()) + + // There were not any other types at the time this code was written, but + // fall back to letting the default fmt package handle it in case any new + // types are added. + default: + if v.CanInterface() { + fmt.Fprintf(d.w, "%v", v.Interface()) + } else { + fmt.Fprintf(d.w, "%v", v.String()) + } + } +} + +// fdump is a helper function to consolidate the logic from the various public +// methods which take varying writers and config states. +func fdump(cs *ConfigState, w io.Writer, a ...interface{}) { + for _, arg := range a { + if arg == nil { + w.Write(interfaceBytes) + w.Write(spaceBytes) + w.Write(nilAngleBytes) + w.Write(newlineBytes) + continue + } + + d := dumpState{w: w, cs: cs} + d.pointers = make(map[uintptr]int) + d.dump(reflect.ValueOf(arg)) + d.w.Write(newlineBytes) + } +} + +// Fdump formats and displays the passed arguments to io.Writer w. It formats +// exactly the same as Dump. +func Fdump(w io.Writer, a ...interface{}) { + fdump(&Config, w, a...) +} + +// Sdump returns a string with the passed arguments formatted exactly the same +// as Dump. +func Sdump(a ...interface{}) string { + var buf bytes.Buffer + fdump(&Config, &buf, a...) + return buf.String() +} + +/* +Dump displays the passed parameters to standard out with newlines, customizable +indentation, and additional debug information such as complete types and all +pointer addresses used to indirect to the final value. It provides the +following features over the built-in printing facilities provided by the fmt +package: + + * Pointers are dereferenced and followed + * Circular data structures are detected and handled properly + * Custom Stringer/error interfaces are optionally invoked, including + on unexported types + * Custom types which only implement the Stringer/error interfaces via + a pointer receiver are optionally invoked when passing non-pointer + variables + * Byte arrays and slices are dumped like the hexdump -C command which + includes offsets, byte values in hex, and ASCII output + +The configuration options are controlled by an exported package global, +spew.Config. See ConfigState for options documentation. + +See Fdump if you would prefer dumping to an arbitrary io.Writer or Sdump to +get the formatted result as a string. +*/ +func Dump(a ...interface{}) { + fdump(&Config, os.Stdout, a...) +} diff --git a/vendor/github.com/davecgh/go-spew/spew/format.go b/vendor/github.com/davecgh/go-spew/spew/format.go new file mode 100644 index 0000000000..c49875bacb --- /dev/null +++ b/vendor/github.com/davecgh/go-spew/spew/format.go @@ -0,0 +1,419 @@ +/* + * Copyright (c) 2013-2016 Dave Collins + * + * Permission to use, copy, modify, and distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +package spew + +import ( + "bytes" + "fmt" + "reflect" + "strconv" + "strings" +) + +// supportedFlags is a list of all the character flags supported by fmt package. +const supportedFlags = "0-+# " + +// formatState implements the fmt.Formatter interface and contains information +// about the state of a formatting operation. The NewFormatter function can +// be used to get a new Formatter which can be used directly as arguments +// in standard fmt package printing calls. +type formatState struct { + value interface{} + fs fmt.State + depth int + pointers map[uintptr]int + ignoreNextType bool + cs *ConfigState +} + +// buildDefaultFormat recreates the original format string without precision +// and width information to pass in to fmt.Sprintf in the case of an +// unrecognized type. Unless new types are added to the language, this +// function won't ever be called. +func (f *formatState) buildDefaultFormat() (format string) { + buf := bytes.NewBuffer(percentBytes) + + for _, flag := range supportedFlags { + if f.fs.Flag(int(flag)) { + buf.WriteRune(flag) + } + } + + buf.WriteRune('v') + + format = buf.String() + return format +} + +// constructOrigFormat recreates the original format string including precision +// and width information to pass along to the standard fmt package. This allows +// automatic deferral of all format strings this package doesn't support. +func (f *formatState) constructOrigFormat(verb rune) (format string) { + buf := bytes.NewBuffer(percentBytes) + + for _, flag := range supportedFlags { + if f.fs.Flag(int(flag)) { + buf.WriteRune(flag) + } + } + + if width, ok := f.fs.Width(); ok { + buf.WriteString(strconv.Itoa(width)) + } + + if precision, ok := f.fs.Precision(); ok { + buf.Write(precisionBytes) + buf.WriteString(strconv.Itoa(precision)) + } + + buf.WriteRune(verb) + + format = buf.String() + return format +} + +// unpackValue returns values inside of non-nil interfaces when possible and +// ensures that types for values which have been unpacked from an interface +// are displayed when the show types flag is also set. +// This is useful for data types like structs, arrays, slices, and maps which +// can contain varying types packed inside an interface. +func (f *formatState) unpackValue(v reflect.Value) reflect.Value { + if v.Kind() == reflect.Interface { + f.ignoreNextType = false + if !v.IsNil() { + v = v.Elem() + } + } + return v +} + +// formatPtr handles formatting of pointers by indirecting them as necessary. +func (f *formatState) formatPtr(v reflect.Value) { + // Display nil if top level pointer is nil. + showTypes := f.fs.Flag('#') + if v.IsNil() && (!showTypes || f.ignoreNextType) { + f.fs.Write(nilAngleBytes) + return + } + + // Remove pointers at or below the current depth from map used to detect + // circular refs. + for k, depth := range f.pointers { + if depth >= f.depth { + delete(f.pointers, k) + } + } + + // Keep list of all dereferenced pointers to possibly show later. + pointerChain := make([]uintptr, 0) + + // Figure out how many levels of indirection there are by derferencing + // pointers and unpacking interfaces down the chain while detecting circular + // references. + nilFound := false + cycleFound := false + indirects := 0 + ve := v + for ve.Kind() == reflect.Ptr { + if ve.IsNil() { + nilFound = true + break + } + indirects++ + addr := ve.Pointer() + pointerChain = append(pointerChain, addr) + if pd, ok := f.pointers[addr]; ok && pd < f.depth { + cycleFound = true + indirects-- + break + } + f.pointers[addr] = f.depth + + ve = ve.Elem() + if ve.Kind() == reflect.Interface { + if ve.IsNil() { + nilFound = true + break + } + ve = ve.Elem() + } + } + + // Display type or indirection level depending on flags. + if showTypes && !f.ignoreNextType { + f.fs.Write(openParenBytes) + f.fs.Write(bytes.Repeat(asteriskBytes, indirects)) + f.fs.Write([]byte(ve.Type().String())) + f.fs.Write(closeParenBytes) + } else { + if nilFound || cycleFound { + indirects += strings.Count(ve.Type().String(), "*") + } + f.fs.Write(openAngleBytes) + f.fs.Write([]byte(strings.Repeat("*", indirects))) + f.fs.Write(closeAngleBytes) + } + + // Display pointer information depending on flags. + if f.fs.Flag('+') && (len(pointerChain) > 0) { + f.fs.Write(openParenBytes) + for i, addr := range pointerChain { + if i > 0 { + f.fs.Write(pointerChainBytes) + } + printHexPtr(f.fs, addr) + } + f.fs.Write(closeParenBytes) + } + + // Display dereferenced value. + switch { + case nilFound == true: + f.fs.Write(nilAngleBytes) + + case cycleFound == true: + f.fs.Write(circularShortBytes) + + default: + f.ignoreNextType = true + f.format(ve) + } +} + +// format is the main workhorse for providing the Formatter interface. It +// uses the passed reflect value to figure out what kind of object we are +// dealing with and formats it appropriately. It is a recursive function, +// however circular data structures are detected and handled properly. +func (f *formatState) format(v reflect.Value) { + // Handle invalid reflect values immediately. + kind := v.Kind() + if kind == reflect.Invalid { + f.fs.Write(invalidAngleBytes) + return + } + + // Handle pointers specially. + if kind == reflect.Ptr { + f.formatPtr(v) + return + } + + // Print type information unless already handled elsewhere. + if !f.ignoreNextType && f.fs.Flag('#') { + f.fs.Write(openParenBytes) + f.fs.Write([]byte(v.Type().String())) + f.fs.Write(closeParenBytes) + } + f.ignoreNextType = false + + // Call Stringer/error interfaces if they exist and the handle methods + // flag is enabled. + if !f.cs.DisableMethods { + if (kind != reflect.Invalid) && (kind != reflect.Interface) { + if handled := handleMethods(f.cs, f.fs, v); handled { + return + } + } + } + + switch kind { + case reflect.Invalid: + // Do nothing. We should never get here since invalid has already + // been handled above. + + case reflect.Bool: + printBool(f.fs, v.Bool()) + + case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int: + printInt(f.fs, v.Int(), 10) + + case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint: + printUint(f.fs, v.Uint(), 10) + + case reflect.Float32: + printFloat(f.fs, v.Float(), 32) + + case reflect.Float64: + printFloat(f.fs, v.Float(), 64) + + case reflect.Complex64: + printComplex(f.fs, v.Complex(), 32) + + case reflect.Complex128: + printComplex(f.fs, v.Complex(), 64) + + case reflect.Slice: + if v.IsNil() { + f.fs.Write(nilAngleBytes) + break + } + fallthrough + + case reflect.Array: + f.fs.Write(openBracketBytes) + f.depth++ + if (f.cs.MaxDepth != 0) && (f.depth > f.cs.MaxDepth) { + f.fs.Write(maxShortBytes) + } else { + numEntries := v.Len() + for i := 0; i < numEntries; i++ { + if i > 0 { + f.fs.Write(spaceBytes) + } + f.ignoreNextType = true + f.format(f.unpackValue(v.Index(i))) + } + } + f.depth-- + f.fs.Write(closeBracketBytes) + + case reflect.String: + f.fs.Write([]byte(v.String())) + + case reflect.Interface: + // The only time we should get here is for nil interfaces due to + // unpackValue calls. + if v.IsNil() { + f.fs.Write(nilAngleBytes) + } + + case reflect.Ptr: + // Do nothing. We should never get here since pointers have already + // been handled above. + + case reflect.Map: + // nil maps should be indicated as different than empty maps + if v.IsNil() { + f.fs.Write(nilAngleBytes) + break + } + + f.fs.Write(openMapBytes) + f.depth++ + if (f.cs.MaxDepth != 0) && (f.depth > f.cs.MaxDepth) { + f.fs.Write(maxShortBytes) + } else { + keys := v.MapKeys() + if f.cs.SortKeys { + sortValues(keys, f.cs) + } + for i, key := range keys { + if i > 0 { + f.fs.Write(spaceBytes) + } + f.ignoreNextType = true + f.format(f.unpackValue(key)) + f.fs.Write(colonBytes) + f.ignoreNextType = true + f.format(f.unpackValue(v.MapIndex(key))) + } + } + f.depth-- + f.fs.Write(closeMapBytes) + + case reflect.Struct: + numFields := v.NumField() + f.fs.Write(openBraceBytes) + f.depth++ + if (f.cs.MaxDepth != 0) && (f.depth > f.cs.MaxDepth) { + f.fs.Write(maxShortBytes) + } else { + vt := v.Type() + for i := 0; i < numFields; i++ { + if i > 0 { + f.fs.Write(spaceBytes) + } + vtf := vt.Field(i) + if f.fs.Flag('+') || f.fs.Flag('#') { + f.fs.Write([]byte(vtf.Name)) + f.fs.Write(colonBytes) + } + f.format(f.unpackValue(v.Field(i))) + } + } + f.depth-- + f.fs.Write(closeBraceBytes) + + case reflect.Uintptr: + printHexPtr(f.fs, uintptr(v.Uint())) + + case reflect.UnsafePointer, reflect.Chan, reflect.Func: + printHexPtr(f.fs, v.Pointer()) + + // There were not any other types at the time this code was written, but + // fall back to letting the default fmt package handle it if any get added. + default: + format := f.buildDefaultFormat() + if v.CanInterface() { + fmt.Fprintf(f.fs, format, v.Interface()) + } else { + fmt.Fprintf(f.fs, format, v.String()) + } + } +} + +// Format satisfies the fmt.Formatter interface. See NewFormatter for usage +// details. +func (f *formatState) Format(fs fmt.State, verb rune) { + f.fs = fs + + // Use standard formatting for verbs that are not v. + if verb != 'v' { + format := f.constructOrigFormat(verb) + fmt.Fprintf(fs, format, f.value) + return + } + + if f.value == nil { + if fs.Flag('#') { + fs.Write(interfaceBytes) + } + fs.Write(nilAngleBytes) + return + } + + f.format(reflect.ValueOf(f.value)) +} + +// newFormatter is a helper function to consolidate the logic from the various +// public methods which take varying config states. +func newFormatter(cs *ConfigState, v interface{}) fmt.Formatter { + fs := &formatState{value: v, cs: cs} + fs.pointers = make(map[uintptr]int) + return fs +} + +/* +NewFormatter returns a custom formatter that satisfies the fmt.Formatter +interface. As a result, it integrates cleanly with standard fmt package +printing functions. The formatter is useful for inline printing of smaller data +types similar to the standard %v format specifier. + +The custom formatter only responds to the %v (most compact), %+v (adds pointer +addresses), %#v (adds types), or %#+v (adds types and pointer addresses) verb +combinations. Any other verbs such as %x and %q will be sent to the the +standard fmt package for formatting. In addition, the custom formatter ignores +the width and precision arguments (however they will still work on the format +specifiers not handled by the custom formatter). + +Typically this function shouldn't be called directly. It is much easier to make +use of the custom formatter by calling one of the convenience functions such as +Printf, Println, or Fprintf. +*/ +func NewFormatter(v interface{}) fmt.Formatter { + return newFormatter(&Config, v) +} diff --git a/vendor/github.com/davecgh/go-spew/spew/spew.go b/vendor/github.com/davecgh/go-spew/spew/spew.go new file mode 100644 index 0000000000..32c0e33882 --- /dev/null +++ b/vendor/github.com/davecgh/go-spew/spew/spew.go @@ -0,0 +1,148 @@ +/* + * Copyright (c) 2013-2016 Dave Collins + * + * Permission to use, copy, modify, and distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +package spew + +import ( + "fmt" + "io" +) + +// Errorf is a wrapper for fmt.Errorf that treats each argument as if it were +// passed with a default Formatter interface returned by NewFormatter. It +// returns the formatted string as a value that satisfies error. See +// NewFormatter for formatting details. +// +// This function is shorthand for the following syntax: +// +// fmt.Errorf(format, spew.NewFormatter(a), spew.NewFormatter(b)) +func Errorf(format string, a ...interface{}) (err error) { + return fmt.Errorf(format, convertArgs(a)...) +} + +// Fprint is a wrapper for fmt.Fprint that treats each argument as if it were +// passed with a default Formatter interface returned by NewFormatter. It +// returns the number of bytes written and any write error encountered. See +// NewFormatter for formatting details. +// +// This function is shorthand for the following syntax: +// +// fmt.Fprint(w, spew.NewFormatter(a), spew.NewFormatter(b)) +func Fprint(w io.Writer, a ...interface{}) (n int, err error) { + return fmt.Fprint(w, convertArgs(a)...) +} + +// Fprintf is a wrapper for fmt.Fprintf that treats each argument as if it were +// passed with a default Formatter interface returned by NewFormatter. It +// returns the number of bytes written and any write error encountered. See +// NewFormatter for formatting details. +// +// This function is shorthand for the following syntax: +// +// fmt.Fprintf(w, format, spew.NewFormatter(a), spew.NewFormatter(b)) +func Fprintf(w io.Writer, format string, a ...interface{}) (n int, err error) { + return fmt.Fprintf(w, format, convertArgs(a)...) +} + +// Fprintln is a wrapper for fmt.Fprintln that treats each argument as if it +// passed with a default Formatter interface returned by NewFormatter. See +// NewFormatter for formatting details. +// +// This function is shorthand for the following syntax: +// +// fmt.Fprintln(w, spew.NewFormatter(a), spew.NewFormatter(b)) +func Fprintln(w io.Writer, a ...interface{}) (n int, err error) { + return fmt.Fprintln(w, convertArgs(a)...) +} + +// Print is a wrapper for fmt.Print that treats each argument as if it were +// passed with a default Formatter interface returned by NewFormatter. It +// returns the number of bytes written and any write error encountered. See +// NewFormatter for formatting details. +// +// This function is shorthand for the following syntax: +// +// fmt.Print(spew.NewFormatter(a), spew.NewFormatter(b)) +func Print(a ...interface{}) (n int, err error) { + return fmt.Print(convertArgs(a)...) +} + +// Printf is a wrapper for fmt.Printf that treats each argument as if it were +// passed with a default Formatter interface returned by NewFormatter. It +// returns the number of bytes written and any write error encountered. See +// NewFormatter for formatting details. +// +// This function is shorthand for the following syntax: +// +// fmt.Printf(format, spew.NewFormatter(a), spew.NewFormatter(b)) +func Printf(format string, a ...interface{}) (n int, err error) { + return fmt.Printf(format, convertArgs(a)...) +} + +// Println is a wrapper for fmt.Println that treats each argument as if it were +// passed with a default Formatter interface returned by NewFormatter. It +// returns the number of bytes written and any write error encountered. See +// NewFormatter for formatting details. +// +// This function is shorthand for the following syntax: +// +// fmt.Println(spew.NewFormatter(a), spew.NewFormatter(b)) +func Println(a ...interface{}) (n int, err error) { + return fmt.Println(convertArgs(a)...) +} + +// Sprint is a wrapper for fmt.Sprint that treats each argument as if it were +// passed with a default Formatter interface returned by NewFormatter. It +// returns the resulting string. See NewFormatter for formatting details. +// +// This function is shorthand for the following syntax: +// +// fmt.Sprint(spew.NewFormatter(a), spew.NewFormatter(b)) +func Sprint(a ...interface{}) string { + return fmt.Sprint(convertArgs(a)...) +} + +// Sprintf is a wrapper for fmt.Sprintf that treats each argument as if it were +// passed with a default Formatter interface returned by NewFormatter. It +// returns the resulting string. See NewFormatter for formatting details. +// +// This function is shorthand for the following syntax: +// +// fmt.Sprintf(format, spew.NewFormatter(a), spew.NewFormatter(b)) +func Sprintf(format string, a ...interface{}) string { + return fmt.Sprintf(format, convertArgs(a)...) +} + +// Sprintln is a wrapper for fmt.Sprintln that treats each argument as if it +// were passed with a default Formatter interface returned by NewFormatter. It +// returns the resulting string. See NewFormatter for formatting details. +// +// This function is shorthand for the following syntax: +// +// fmt.Sprintln(spew.NewFormatter(a), spew.NewFormatter(b)) +func Sprintln(a ...interface{}) string { + return fmt.Sprintln(convertArgs(a)...) +} + +// convertArgs accepts a slice of arguments and returns a slice of the same +// length with each argument converted to a default spew Formatter interface. +func convertArgs(args []interface{}) (formatters []interface{}) { + formatters = make([]interface{}, len(args)) + for index, arg := range args { + formatters[index] = NewFormatter(arg) + } + return formatters +} diff --git a/vendor/github.com/go-ini/ini/LICENSE b/vendor/github.com/go-ini/ini/LICENSE new file mode 100644 index 0000000000..37ec93a14f --- /dev/null +++ b/vendor/github.com/go-ini/ini/LICENSE @@ -0,0 +1,191 @@ +Apache License +Version 2.0, January 2004 +http://www.apache.org/licenses/ + +TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + +1. Definitions. + +"License" shall mean the terms and conditions for use, reproduction, and +distribution as defined by Sections 1 through 9 of this document. + +"Licensor" shall mean the copyright owner or entity authorized by the copyright +owner that is granting the License. + +"Legal Entity" shall mean the union of the acting entity and all other entities +that control, are controlled by, or are under common control with that entity. +For the purposes of this definition, "control" means (i) the power, direct or +indirect, to cause the direction or management of such entity, whether by +contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the +outstanding shares, or (iii) beneficial ownership of such entity. + +"You" (or "Your") shall mean an individual or Legal Entity exercising +permissions granted by this License. + +"Source" form shall mean the preferred form for making modifications, including +but not limited to software source code, documentation source, and configuration +files. + +"Object" form shall mean any form resulting from mechanical transformation or +translation of a Source form, including but not limited to compiled object code, +generated documentation, and conversions to other media types. + +"Work" shall mean the work of authorship, whether in Source or Object form, made +available under the License, as indicated by a copyright notice that is included +in or attached to the work (an example is provided in the Appendix below). + +"Derivative Works" shall mean any work, whether in Source or Object form, that +is based on (or derived from) the Work and for which the editorial revisions, +annotations, elaborations, or other modifications represent, as a whole, an +original work of authorship. For the purposes of this License, Derivative Works +shall not include works that remain separable from, or merely link (or bind by +name) to the interfaces of, the Work and Derivative Works thereof. + +"Contribution" shall mean any work of authorship, including the original version +of the Work and any modifications or additions to that Work or Derivative Works +thereof, that is intentionally submitted to Licensor for inclusion in the Work +by the copyright owner or by an individual or Legal Entity authorized to submit +on behalf of the copyright owner. For the purposes of this definition, +"submitted" means any form of electronic, verbal, or written communication sent +to the Licensor or its representatives, including but not limited to +communication on electronic mailing lists, source code control systems, and +issue tracking systems that are managed by, or on behalf of, the Licensor for +the purpose of discussing and improving the Work, but excluding communication +that is conspicuously marked or otherwise designated in writing by the copyright +owner as "Not a Contribution." + +"Contributor" shall mean Licensor and any individual or Legal Entity on behalf +of whom a Contribution has been received by Licensor and subsequently +incorporated within the Work. + +2. Grant of Copyright License. + +Subject to the terms and conditions of this License, each Contributor hereby +grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, +irrevocable copyright license to reproduce, prepare Derivative Works of, +publicly display, publicly perform, sublicense, and distribute the Work and such +Derivative Works in Source or Object form. + +3. Grant of Patent License. + +Subject to the terms and conditions of this License, each Contributor hereby +grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, +irrevocable (except as stated in this section) patent license to make, have +made, use, offer to sell, sell, import, and otherwise transfer the Work, where +such license applies only to those patent claims licensable by such Contributor +that are necessarily infringed by their Contribution(s) alone or by combination +of their Contribution(s) with the Work to which such Contribution(s) was +submitted. If You institute patent litigation against any entity (including a +cross-claim or counterclaim in a lawsuit) alleging that the Work or a +Contribution incorporated within the Work constitutes direct or contributory +patent infringement, then any patent licenses granted to You under this License +for that Work shall terminate as of the date such litigation is filed. + +4. Redistribution. + +You may reproduce and distribute copies of the Work or Derivative Works thereof +in any medium, with or without modifications, and in Source or Object form, +provided that You meet the following conditions: + +You must give any other recipients of the Work or Derivative Works a copy of +this License; and +You must cause any modified files to carry prominent notices stating that You +changed the files; and +You must retain, in the Source form of any Derivative Works that You distribute, +all copyright, patent, trademark, and attribution notices from the Source form +of the Work, excluding those notices that do not pertain to any part of the +Derivative Works; and +If the Work includes a "NOTICE" text file as part of its distribution, then any +Derivative Works that You distribute must include a readable copy of the +attribution notices contained within such NOTICE file, excluding those notices +that do not pertain to any part of the Derivative Works, in at least one of the +following places: within a NOTICE text file distributed as part of the +Derivative Works; within the Source form or documentation, if provided along +with the Derivative Works; or, within a display generated by the Derivative +Works, if and wherever such third-party notices normally appear. The contents of +the NOTICE file are for informational purposes only and do not modify the +License. You may add Your own attribution notices within Derivative Works that +You distribute, alongside or as an addendum to the NOTICE text from the Work, +provided that such additional attribution notices cannot be construed as +modifying the License. +You may add Your own copyright statement to Your modifications and may provide +additional or different license terms and conditions for use, reproduction, or +distribution of Your modifications, or for any such Derivative Works as a whole, +provided Your use, reproduction, and distribution of the Work otherwise complies +with the conditions stated in this License. + +5. Submission of Contributions. + +Unless You explicitly state otherwise, any Contribution intentionally submitted +for inclusion in the Work by You to the Licensor shall be under the terms and +conditions of this License, without any additional terms or conditions. +Notwithstanding the above, nothing herein shall supersede or modify the terms of +any separate license agreement you may have executed with Licensor regarding +such Contributions. + +6. Trademarks. + +This License does not grant permission to use the trade names, trademarks, +service marks, or product names of the Licensor, except as required for +reasonable and customary use in describing the origin of the Work and +reproducing the content of the NOTICE file. + +7. Disclaimer of Warranty. + +Unless required by applicable law or agreed to in writing, Licensor provides the +Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, +including, without limitation, any warranties or conditions of TITLE, +NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are +solely responsible for determining the appropriateness of using or +redistributing the Work and assume any risks associated with Your exercise of +permissions under this License. + +8. Limitation of Liability. + +In no event and under no legal theory, whether in tort (including negligence), +contract, or otherwise, unless required by applicable law (such as deliberate +and grossly negligent acts) or agreed to in writing, shall any Contributor be +liable to You for damages, including any direct, indirect, special, incidental, +or consequential damages of any character arising as a result of this License or +out of the use or inability to use the Work (including but not limited to +damages for loss of goodwill, work stoppage, computer failure or malfunction, or +any and all other commercial damages or losses), even if such Contributor has +been advised of the possibility of such damages. + +9. Accepting Warranty or Additional Liability. + +While redistributing the Work or Derivative Works thereof, You may choose to +offer, and charge a fee for, acceptance of support, warranty, indemnity, or +other liability obligations and/or rights consistent with this License. However, +in accepting such obligations, You may act only on Your own behalf and on Your +sole responsibility, not on behalf of any other Contributor, and only if You +agree to indemnify, defend, and hold each Contributor harmless for any liability +incurred by, or claims asserted against, such Contributor by reason of your +accepting any such warranty or additional liability. + +END OF TERMS AND CONDITIONS + +APPENDIX: How to apply the Apache License to your work + +To apply the Apache License to your work, attach the following boilerplate +notice, with the fields enclosed by brackets "[]" replaced with your own +identifying information. (Don't include the brackets!) The text should be +enclosed in the appropriate comment syntax for the file format. We also +recommend that a file or class name and description of purpose be included on +the same "printed page" as the copyright notice for easier identification within +third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/go-ini/ini/Makefile b/vendor/github.com/go-ini/ini/Makefile new file mode 100644 index 0000000000..ac034e5258 --- /dev/null +++ b/vendor/github.com/go-ini/ini/Makefile @@ -0,0 +1,12 @@ +.PHONY: build test bench vet + +build: vet bench + +test: + go test -v -cover -race + +bench: + go test -v -cover -race -test.bench=. -test.benchmem + +vet: + go vet diff --git a/vendor/github.com/go-ini/ini/README.md b/vendor/github.com/go-ini/ini/README.md new file mode 100644 index 0000000000..e67d51f320 --- /dev/null +++ b/vendor/github.com/go-ini/ini/README.md @@ -0,0 +1,746 @@ +INI [![Build Status](https://travis-ci.org/go-ini/ini.svg?branch=master)](https://travis-ci.org/go-ini/ini) [![Sourcegraph](https://sourcegraph.com/github.com/go-ini/ini/-/badge.svg)](https://sourcegraph.com/github.com/go-ini/ini?badge) +=== + +![](https://avatars0.githubusercontent.com/u/10216035?v=3&s=200) + +Package ini provides INI file read and write functionality in Go. + +[简体中文](README_ZH.md) + +## Feature + +- Load multiple data sources(`[]byte`, file and `io.ReadCloser`) with overwrites. +- Read with recursion values. +- Read with parent-child sections. +- Read with auto-increment key names. +- Read with multiple-line values. +- Read with tons of helper methods. +- Read and convert values to Go types. +- Read and **WRITE** comments of sections and keys. +- Manipulate sections, keys and comments with ease. +- Keep sections and keys in order as you parse and save. + +## Installation + +To use a tagged revision: + + go get gopkg.in/ini.v1 + +To use with latest changes: + + go get github.com/go-ini/ini + +Please add `-u` flag to update in the future. + +### Testing + +If you want to test on your machine, please apply `-t` flag: + + go get -t gopkg.in/ini.v1 + +Please add `-u` flag to update in the future. + +## Getting Started + +### Loading from data sources + +A **Data Source** is either raw data in type `[]byte`, a file name with type `string` or `io.ReadCloser`. You can load **as many data sources as you want**. Passing other types will simply return an error. + +```go +cfg, err := ini.Load([]byte("raw data"), "filename", ioutil.NopCloser(bytes.NewReader([]byte("some other data")))) +``` + +Or start with an empty object: + +```go +cfg := ini.Empty() +``` + +When you cannot decide how many data sources to load at the beginning, you will still be able to **Append()** them later. + +```go +err := cfg.Append("other file", []byte("other raw data")) +``` + +If you have a list of files with possibilities that some of them may not available at the time, and you don't know exactly which ones, you can use `LooseLoad` to ignore nonexistent files without returning error. + +```go +cfg, err := ini.LooseLoad("filename", "filename_404") +``` + +The cool thing is, whenever the file is available to load while you're calling `Reload` method, it will be counted as usual. + +#### Ignore cases of key name + +When you do not care about cases of section and key names, you can use `InsensitiveLoad` to force all names to be lowercased while parsing. + +```go +cfg, err := ini.InsensitiveLoad("filename") +//... + +// sec1 and sec2 are the exactly same section object +sec1, err := cfg.GetSection("Section") +sec2, err := cfg.GetSection("SecTIOn") + +// key1 and key2 are the exactly same key object +key1, err := sec1.GetKey("Key") +key2, err := sec2.GetKey("KeY") +``` + +#### MySQL-like boolean key + +MySQL's configuration allows a key without value as follows: + +```ini +[mysqld] +... +skip-host-cache +skip-name-resolve +``` + +By default, this is considered as missing value. But if you know you're going to deal with those cases, you can assign advanced load options: + +```go +cfg, err := LoadSources(LoadOptions{AllowBooleanKeys: true}, "my.cnf")) +``` + +The value of those keys are always `true`, and when you save to a file, it will keep in the same foramt as you read. + +To generate such keys in your program, you could use `NewBooleanKey`: + +```go +key, err := sec.NewBooleanKey("skip-host-cache") +``` + +#### Comment + +Take care that following format will be treated as comment: + +1. Line begins with `#` or `;` +2. Words after `#` or `;` +3. Words after section name (i.e words after `[some section name]`) + +If you want to save a value with `#` or `;`, please quote them with ``` ` ``` or ``` """ ```. + +Alternatively, you can use following `LoadOptions` to completely ignore inline comments: + +```go +cfg, err := LoadSources(LoadOptions{IgnoreInlineComment: true}, "app.ini")) +``` + +### Working with sections + +To get a section, you would need to: + +```go +section, err := cfg.GetSection("section name") +``` + +For a shortcut for default section, just give an empty string as name: + +```go +section, err := cfg.GetSection("") +``` + +When you're pretty sure the section exists, following code could make your life easier: + +```go +section := cfg.Section("section name") +``` + +What happens when the section somehow does not exist? Don't panic, it automatically creates and returns a new section to you. + +To create a new section: + +```go +err := cfg.NewSection("new section") +``` + +To get a list of sections or section names: + +```go +sections := cfg.Sections() +names := cfg.SectionStrings() +``` + +### Working with keys + +To get a key under a section: + +```go +key, err := cfg.Section("").GetKey("key name") +``` + +Same rule applies to key operations: + +```go +key := cfg.Section("").Key("key name") +``` + +To check if a key exists: + +```go +yes := cfg.Section("").HasKey("key name") +``` + +To create a new key: + +```go +err := cfg.Section("").NewKey("name", "value") +``` + +To get a list of keys or key names: + +```go +keys := cfg.Section("").Keys() +names := cfg.Section("").KeyStrings() +``` + +To get a clone hash of keys and corresponding values: + +```go +hash := cfg.Section("").KeysHash() +``` + +### Working with values + +To get a string value: + +```go +val := cfg.Section("").Key("key name").String() +``` + +To validate key value on the fly: + +```go +val := cfg.Section("").Key("key name").Validate(func(in string) string { + if len(in) == 0 { + return "default" + } + return in +}) +``` + +If you do not want any auto-transformation (such as recursive read) for the values, you can get raw value directly (this way you get much better performance): + +```go +val := cfg.Section("").Key("key name").Value() +``` + +To check if raw value exists: + +```go +yes := cfg.Section("").HasValue("test value") +``` + +To get value with types: + +```go +// For boolean values: +// true when value is: 1, t, T, TRUE, true, True, YES, yes, Yes, y, ON, on, On +// false when value is: 0, f, F, FALSE, false, False, NO, no, No, n, OFF, off, Off +v, err = cfg.Section("").Key("BOOL").Bool() +v, err = cfg.Section("").Key("FLOAT64").Float64() +v, err = cfg.Section("").Key("INT").Int() +v, err = cfg.Section("").Key("INT64").Int64() +v, err = cfg.Section("").Key("UINT").Uint() +v, err = cfg.Section("").Key("UINT64").Uint64() +v, err = cfg.Section("").Key("TIME").TimeFormat(time.RFC3339) +v, err = cfg.Section("").Key("TIME").Time() // RFC3339 + +v = cfg.Section("").Key("BOOL").MustBool() +v = cfg.Section("").Key("FLOAT64").MustFloat64() +v = cfg.Section("").Key("INT").MustInt() +v = cfg.Section("").Key("INT64").MustInt64() +v = cfg.Section("").Key("UINT").MustUint() +v = cfg.Section("").Key("UINT64").MustUint64() +v = cfg.Section("").Key("TIME").MustTimeFormat(time.RFC3339) +v = cfg.Section("").Key("TIME").MustTime() // RFC3339 + +// Methods start with Must also accept one argument for default value +// when key not found or fail to parse value to given type. +// Except method MustString, which you have to pass a default value. + +v = cfg.Section("").Key("String").MustString("default") +v = cfg.Section("").Key("BOOL").MustBool(true) +v = cfg.Section("").Key("FLOAT64").MustFloat64(1.25) +v = cfg.Section("").Key("INT").MustInt(10) +v = cfg.Section("").Key("INT64").MustInt64(99) +v = cfg.Section("").Key("UINT").MustUint(3) +v = cfg.Section("").Key("UINT64").MustUint64(6) +v = cfg.Section("").Key("TIME").MustTimeFormat(time.RFC3339, time.Now()) +v = cfg.Section("").Key("TIME").MustTime(time.Now()) // RFC3339 +``` + +What if my value is three-line long? + +```ini +[advance] +ADDRESS = """404 road, +NotFound, State, 5000 +Earth""" +``` + +Not a problem! + +```go +cfg.Section("advance").Key("ADDRESS").String() + +/* --- start --- +404 road, +NotFound, State, 5000 +Earth +------ end --- */ +``` + +That's cool, how about continuation lines? + +```ini +[advance] +two_lines = how about \ + continuation lines? +lots_of_lines = 1 \ + 2 \ + 3 \ + 4 +``` + +Piece of cake! + +```go +cfg.Section("advance").Key("two_lines").String() // how about continuation lines? +cfg.Section("advance").Key("lots_of_lines").String() // 1 2 3 4 +``` + +Well, I hate continuation lines, how do I disable that? + +```go +cfg, err := ini.LoadSources(ini.LoadOptions{ + IgnoreContinuation: true, +}, "filename") +``` + +Holy crap! + +Note that single quotes around values will be stripped: + +```ini +foo = "some value" // foo: some value +bar = 'some value' // bar: some value +``` + +That's all? Hmm, no. + +#### Helper methods of working with values + +To get value with given candidates: + +```go +v = cfg.Section("").Key("STRING").In("default", []string{"str", "arr", "types"}) +v = cfg.Section("").Key("FLOAT64").InFloat64(1.1, []float64{1.25, 2.5, 3.75}) +v = cfg.Section("").Key("INT").InInt(5, []int{10, 20, 30}) +v = cfg.Section("").Key("INT64").InInt64(10, []int64{10, 20, 30}) +v = cfg.Section("").Key("UINT").InUint(4, []int{3, 6, 9}) +v = cfg.Section("").Key("UINT64").InUint64(8, []int64{3, 6, 9}) +v = cfg.Section("").Key("TIME").InTimeFormat(time.RFC3339, time.Now(), []time.Time{time1, time2, time3}) +v = cfg.Section("").Key("TIME").InTime(time.Now(), []time.Time{time1, time2, time3}) // RFC3339 +``` + +Default value will be presented if value of key is not in candidates you given, and default value does not need be one of candidates. + +To validate value in a given range: + +```go +vals = cfg.Section("").Key("FLOAT64").RangeFloat64(0.0, 1.1, 2.2) +vals = cfg.Section("").Key("INT").RangeInt(0, 10, 20) +vals = cfg.Section("").Key("INT64").RangeInt64(0, 10, 20) +vals = cfg.Section("").Key("UINT").RangeUint(0, 3, 9) +vals = cfg.Section("").Key("UINT64").RangeUint64(0, 3, 9) +vals = cfg.Section("").Key("TIME").RangeTimeFormat(time.RFC3339, time.Now(), minTime, maxTime) +vals = cfg.Section("").Key("TIME").RangeTime(time.Now(), minTime, maxTime) // RFC3339 +``` + +##### Auto-split values into a slice + +To use zero value of type for invalid inputs: + +```go +// Input: 1.1, 2.2, 3.3, 4.4 -> [1.1 2.2 3.3 4.4] +// Input: how, 2.2, are, you -> [0.0 2.2 0.0 0.0] +vals = cfg.Section("").Key("STRINGS").Strings(",") +vals = cfg.Section("").Key("FLOAT64S").Float64s(",") +vals = cfg.Section("").Key("INTS").Ints(",") +vals = cfg.Section("").Key("INT64S").Int64s(",") +vals = cfg.Section("").Key("UINTS").Uints(",") +vals = cfg.Section("").Key("UINT64S").Uint64s(",") +vals = cfg.Section("").Key("TIMES").Times(",") +``` + +To exclude invalid values out of result slice: + +```go +// Input: 1.1, 2.2, 3.3, 4.4 -> [1.1 2.2 3.3 4.4] +// Input: how, 2.2, are, you -> [2.2] +vals = cfg.Section("").Key("FLOAT64S").ValidFloat64s(",") +vals = cfg.Section("").Key("INTS").ValidInts(",") +vals = cfg.Section("").Key("INT64S").ValidInt64s(",") +vals = cfg.Section("").Key("UINTS").ValidUints(",") +vals = cfg.Section("").Key("UINT64S").ValidUint64s(",") +vals = cfg.Section("").Key("TIMES").ValidTimes(",") +``` + +Or to return nothing but error when have invalid inputs: + +```go +// Input: 1.1, 2.2, 3.3, 4.4 -> [1.1 2.2 3.3 4.4] +// Input: how, 2.2, are, you -> error +vals = cfg.Section("").Key("FLOAT64S").StrictFloat64s(",") +vals = cfg.Section("").Key("INTS").StrictInts(",") +vals = cfg.Section("").Key("INT64S").StrictInt64s(",") +vals = cfg.Section("").Key("UINTS").StrictUints(",") +vals = cfg.Section("").Key("UINT64S").StrictUint64s(",") +vals = cfg.Section("").Key("TIMES").StrictTimes(",") +``` + +### Save your configuration + +Finally, it's time to save your configuration to somewhere. + +A typical way to save configuration is writing it to a file: + +```go +// ... +err = cfg.SaveTo("my.ini") +err = cfg.SaveToIndent("my.ini", "\t") +``` + +Another way to save is writing to a `io.Writer` interface: + +```go +// ... +cfg.WriteTo(writer) +cfg.WriteToIndent(writer, "\t") +``` + +By default, spaces are used to align "=" sign between key and values, to disable that: + +```go +ini.PrettyFormat = false +``` + +## Advanced Usage + +### Recursive Values + +For all value of keys, there is a special syntax `%()s`, where `` is the key name in same section or default section, and `%()s` will be replaced by corresponding value(empty string if key not found). You can use this syntax at most 99 level of recursions. + +```ini +NAME = ini + +[author] +NAME = Unknwon +GITHUB = https://github.com/%(NAME)s + +[package] +FULL_NAME = github.com/go-ini/%(NAME)s +``` + +```go +cfg.Section("author").Key("GITHUB").String() // https://github.com/Unknwon +cfg.Section("package").Key("FULL_NAME").String() // github.com/go-ini/ini +``` + +### Parent-child Sections + +You can use `.` in section name to indicate parent-child relationship between two or more sections. If the key not found in the child section, library will try again on its parent section until there is no parent section. + +```ini +NAME = ini +VERSION = v1 +IMPORT_PATH = gopkg.in/%(NAME)s.%(VERSION)s + +[package] +CLONE_URL = https://%(IMPORT_PATH)s + +[package.sub] +``` + +```go +cfg.Section("package.sub").Key("CLONE_URL").String() // https://gopkg.in/ini.v1 +``` + +#### Retrieve parent keys available to a child section + +```go +cfg.Section("package.sub").ParentKeys() // ["CLONE_URL"] +``` + +### Unparseable Sections + +Sometimes, you have sections that do not contain key-value pairs but raw content, to handle such case, you can use `LoadOptions.UnparsableSections`: + +```go +cfg, err := LoadSources(LoadOptions{UnparseableSections: []string{"COMMENTS"}}, `[COMMENTS] +<1> This slide has the fuel listed in the wrong units `)) + +body := cfg.Section("COMMENTS").Body() + +/* --- start --- +<1> This slide has the fuel listed in the wrong units +------ end --- */ +``` + +### Auto-increment Key Names + +If key name is `-` in data source, then it would be seen as special syntax for auto-increment key name start from 1, and every section is independent on counter. + +```ini +[features] +-: Support read/write comments of keys and sections +-: Support auto-increment of key names +-: Support load multiple files to overwrite key values +``` + +```go +cfg.Section("features").KeyStrings() // []{"#1", "#2", "#3"} +``` + +### Map To Struct + +Want more objective way to play with INI? Cool. + +```ini +Name = Unknwon +age = 21 +Male = true +Born = 1993-01-01T20:17:05Z + +[Note] +Content = Hi is a good man! +Cities = HangZhou, Boston +``` + +```go +type Note struct { + Content string + Cities []string +} + +type Person struct { + Name string + Age int `ini:"age"` + Male bool + Born time.Time + Note + Created time.Time `ini:"-"` +} + +func main() { + cfg, err := ini.Load("path/to/ini") + // ... + p := new(Person) + err = cfg.MapTo(p) + // ... + + // Things can be simpler. + err = ini.MapTo(p, "path/to/ini") + // ... + + // Just map a section? Fine. + n := new(Note) + err = cfg.Section("Note").MapTo(n) + // ... +} +``` + +Can I have default value for field? Absolutely. + +Assign it before you map to struct. It will keep the value as it is if the key is not presented or got wrong type. + +```go +// ... +p := &Person{ + Name: "Joe", +} +// ... +``` + +It's really cool, but what's the point if you can't give me my file back from struct? + +### Reflect From Struct + +Why not? + +```go +type Embeded struct { + Dates []time.Time `delim:"|"` + Places []string `ini:"places,omitempty"` + None []int `ini:",omitempty"` +} + +type Author struct { + Name string `ini:"NAME"` + Male bool + Age int + GPA float64 + NeverMind string `ini:"-"` + *Embeded +} + +func main() { + a := &Author{"Unknwon", true, 21, 2.8, "", + &Embeded{ + []time.Time{time.Now(), time.Now()}, + []string{"HangZhou", "Boston"}, + []int{}, + }} + cfg := ini.Empty() + err = ini.ReflectFrom(cfg, a) + // ... +} +``` + +So, what do I get? + +```ini +NAME = Unknwon +Male = true +Age = 21 +GPA = 2.8 + +[Embeded] +Dates = 2015-08-07T22:14:22+08:00|2015-08-07T22:14:22+08:00 +places = HangZhou,Boston +``` + +#### Name Mapper + +To save your time and make your code cleaner, this library supports [`NameMapper`](https://gowalker.org/gopkg.in/ini.v1#NameMapper) between struct field and actual section and key name. + +There are 2 built-in name mappers: + +- `AllCapsUnderscore`: it converts to format `ALL_CAPS_UNDERSCORE` then match section or key. +- `TitleUnderscore`: it converts to format `title_underscore` then match section or key. + +To use them: + +```go +type Info struct { + PackageName string +} + +func main() { + err = ini.MapToWithMapper(&Info{}, ini.TitleUnderscore, []byte("package_name=ini")) + // ... + + cfg, err := ini.Load([]byte("PACKAGE_NAME=ini")) + // ... + info := new(Info) + cfg.NameMapper = ini.AllCapsUnderscore + err = cfg.MapTo(info) + // ... +} +``` + +Same rules of name mapper apply to `ini.ReflectFromWithMapper` function. + +#### Value Mapper + +To expand values (e.g. from environment variables), you can use the `ValueMapper` to transform values: + +```go +type Env struct { + Foo string `ini:"foo"` +} + +func main() { + cfg, err := ini.Load([]byte("[env]\nfoo = ${MY_VAR}\n") + cfg.ValueMapper = os.ExpandEnv + // ... + env := &Env{} + err = cfg.Section("env").MapTo(env) +} +``` + +This would set the value of `env.Foo` to the value of the environment variable `MY_VAR`. + +#### Other Notes On Map/Reflect + +Any embedded struct is treated as a section by default, and there is no automatic parent-child relations in map/reflect feature: + +```go +type Child struct { + Age string +} + +type Parent struct { + Name string + Child +} + +type Config struct { + City string + Parent +} +``` + +Example configuration: + +```ini +City = Boston + +[Parent] +Name = Unknwon + +[Child] +Age = 21 +``` + +What if, yes, I'm paranoid, I want embedded struct to be in the same section. Well, all roads lead to Rome. + +```go +type Child struct { + Age string +} + +type Parent struct { + Name string + Child `ini:"Parent"` +} + +type Config struct { + City string + Parent +} +``` + +Example configuration: + +```ini +City = Boston + +[Parent] +Name = Unknwon +Age = 21 +``` + +## Getting Help + +- [API Documentation](https://gowalker.org/gopkg.in/ini.v1) +- [File An Issue](https://github.com/go-ini/ini/issues/new) + +## FAQs + +### What does `BlockMode` field do? + +By default, library lets you read and write values so we need a locker to make sure your data is safe. But in cases that you are very sure about only reading data through the library, you can set `cfg.BlockMode = false` to speed up read operations about **50-70%** faster. + +### Why another INI library? + +Many people are using my another INI library [goconfig](https://github.com/Unknwon/goconfig), so the reason for this one is I would like to make more Go style code. Also when you set `cfg.BlockMode = false`, this one is about **10-30%** faster. + +To make those changes I have to confirm API broken, so it's safer to keep it in another place and start using `gopkg.in` to version my package at this time.(PS: shorter import path) + +## License + +This project is under Apache v2 License. See the [LICENSE](LICENSE) file for the full license text. diff --git a/vendor/github.com/go-ini/ini/README_ZH.md b/vendor/github.com/go-ini/ini/README_ZH.md new file mode 100644 index 0000000000..0cf4194492 --- /dev/null +++ b/vendor/github.com/go-ini/ini/README_ZH.md @@ -0,0 +1,733 @@ +本包提供了 Go 语言中读写 INI 文件的功能。 + +## 功能特性 + +- 支持覆盖加载多个数据源(`[]byte`、文件和 `io.ReadCloser`) +- 支持递归读取键值 +- 支持读取父子分区 +- 支持读取自增键名 +- 支持读取多行的键值 +- 支持大量辅助方法 +- 支持在读取时直接转换为 Go 语言类型 +- 支持读取和 **写入** 分区和键的注释 +- 轻松操作分区、键值和注释 +- 在保存文件时分区和键值会保持原有的顺序 + +## 下载安装 + +使用一个特定版本: + + go get gopkg.in/ini.v1 + +使用最新版: + + go get github.com/go-ini/ini + +如需更新请添加 `-u` 选项。 + +### 测试安装 + +如果您想要在自己的机器上运行测试,请使用 `-t` 标记: + + go get -t gopkg.in/ini.v1 + +如需更新请添加 `-u` 选项。 + +## 开始使用 + +### 从数据源加载 + +一个 **数据源** 可以是 `[]byte` 类型的原始数据,`string` 类型的文件路径或 `io.ReadCloser`。您可以加载 **任意多个** 数据源。如果您传递其它类型的数据源,则会直接返回错误。 + +```go +cfg, err := ini.Load([]byte("raw data"), "filename", ioutil.NopCloser(bytes.NewReader([]byte("some other data")))) +``` + +或者从一个空白的文件开始: + +```go +cfg := ini.Empty() +``` + +当您在一开始无法决定需要加载哪些数据源时,仍可以使用 **Append()** 在需要的时候加载它们。 + +```go +err := cfg.Append("other file", []byte("other raw data")) +``` + +当您想要加载一系列文件,但是不能够确定其中哪些文件是不存在的,可以通过调用函数 `LooseLoad` 来忽略它们(`Load` 会因为文件不存在而返回错误): + +```go +cfg, err := ini.LooseLoad("filename", "filename_404") +``` + +更牛逼的是,当那些之前不存在的文件在重新调用 `Reload` 方法的时候突然出现了,那么它们会被正常加载。 + +#### 忽略键名的大小写 + +有时候分区和键的名称大小写混合非常烦人,这个时候就可以通过 `InsensitiveLoad` 将所有分区和键名在读取里强制转换为小写: + +```go +cfg, err := ini.InsensitiveLoad("filename") +//... + +// sec1 和 sec2 指向同一个分区对象 +sec1, err := cfg.GetSection("Section") +sec2, err := cfg.GetSection("SecTIOn") + +// key1 和 key2 指向同一个键对象 +key1, err := sec1.GetKey("Key") +key2, err := sec2.GetKey("KeY") +``` + +#### 类似 MySQL 配置中的布尔值键 + +MySQL 的配置文件中会出现没有具体值的布尔类型的键: + +```ini +[mysqld] +... +skip-host-cache +skip-name-resolve +``` + +默认情况下这被认为是缺失值而无法完成解析,但可以通过高级的加载选项对它们进行处理: + +```go +cfg, err := LoadSources(LoadOptions{AllowBooleanKeys: true}, "my.cnf")) +``` + +这些键的值永远为 `true`,且在保存到文件时也只会输出键名。 + +如果您想要通过程序来生成此类键,则可以使用 `NewBooleanKey`: + +```go +key, err := sec.NewBooleanKey("skip-host-cache") +``` + +#### 关于注释 + +下述几种情况的内容将被视为注释: + +1. 所有以 `#` 或 `;` 开头的行 +2. 所有在 `#` 或 `;` 之后的内容 +3. 分区标签后的文字 (即 `[分区名]` 之后的内容) + +如果你希望使用包含 `#` 或 `;` 的值,请使用 ``` ` ``` 或 ``` """ ``` 进行包覆。 + +除此之外,您还可以通过 `LoadOptions` 完全忽略行内注释: + +```go +cfg, err := LoadSources(LoadOptions{IgnoreInlineComment: true}, "app.ini")) +``` + +### 操作分区(Section) + +获取指定分区: + +```go +section, err := cfg.GetSection("section name") +``` + +如果您想要获取默认分区,则可以用空字符串代替分区名: + +```go +section, err := cfg.GetSection("") +``` + +当您非常确定某个分区是存在的,可以使用以下简便方法: + +```go +section := cfg.Section("section name") +``` + +如果不小心判断错了,要获取的分区其实是不存在的,那会发生什么呢?没事的,它会自动创建并返回一个对应的分区对象给您。 + +创建一个分区: + +```go +err := cfg.NewSection("new section") +``` + +获取所有分区对象或名称: + +```go +sections := cfg.Sections() +names := cfg.SectionStrings() +``` + +### 操作键(Key) + +获取某个分区下的键: + +```go +key, err := cfg.Section("").GetKey("key name") +``` + +和分区一样,您也可以直接获取键而忽略错误处理: + +```go +key := cfg.Section("").Key("key name") +``` + +判断某个键是否存在: + +```go +yes := cfg.Section("").HasKey("key name") +``` + +创建一个新的键: + +```go +err := cfg.Section("").NewKey("name", "value") +``` + +获取分区下的所有键或键名: + +```go +keys := cfg.Section("").Keys() +names := cfg.Section("").KeyStrings() +``` + +获取分区下的所有键值对的克隆: + +```go +hash := cfg.Section("").KeysHash() +``` + +### 操作键值(Value) + +获取一个类型为字符串(string)的值: + +```go +val := cfg.Section("").Key("key name").String() +``` + +获取值的同时通过自定义函数进行处理验证: + +```go +val := cfg.Section("").Key("key name").Validate(func(in string) string { + if len(in) == 0 { + return "default" + } + return in +}) +``` + +如果您不需要任何对值的自动转变功能(例如递归读取),可以直接获取原值(这种方式性能最佳): + +```go +val := cfg.Section("").Key("key name").Value() +``` + +判断某个原值是否存在: + +```go +yes := cfg.Section("").HasValue("test value") +``` + +获取其它类型的值: + +```go +// 布尔值的规则: +// true 当值为:1, t, T, TRUE, true, True, YES, yes, Yes, y, ON, on, On +// false 当值为:0, f, F, FALSE, false, False, NO, no, No, n, OFF, off, Off +v, err = cfg.Section("").Key("BOOL").Bool() +v, err = cfg.Section("").Key("FLOAT64").Float64() +v, err = cfg.Section("").Key("INT").Int() +v, err = cfg.Section("").Key("INT64").Int64() +v, err = cfg.Section("").Key("UINT").Uint() +v, err = cfg.Section("").Key("UINT64").Uint64() +v, err = cfg.Section("").Key("TIME").TimeFormat(time.RFC3339) +v, err = cfg.Section("").Key("TIME").Time() // RFC3339 + +v = cfg.Section("").Key("BOOL").MustBool() +v = cfg.Section("").Key("FLOAT64").MustFloat64() +v = cfg.Section("").Key("INT").MustInt() +v = cfg.Section("").Key("INT64").MustInt64() +v = cfg.Section("").Key("UINT").MustUint() +v = cfg.Section("").Key("UINT64").MustUint64() +v = cfg.Section("").Key("TIME").MustTimeFormat(time.RFC3339) +v = cfg.Section("").Key("TIME").MustTime() // RFC3339 + +// 由 Must 开头的方法名允许接收一个相同类型的参数来作为默认值, +// 当键不存在或者转换失败时,则会直接返回该默认值。 +// 但是,MustString 方法必须传递一个默认值。 + +v = cfg.Seciont("").Key("String").MustString("default") +v = cfg.Section("").Key("BOOL").MustBool(true) +v = cfg.Section("").Key("FLOAT64").MustFloat64(1.25) +v = cfg.Section("").Key("INT").MustInt(10) +v = cfg.Section("").Key("INT64").MustInt64(99) +v = cfg.Section("").Key("UINT").MustUint(3) +v = cfg.Section("").Key("UINT64").MustUint64(6) +v = cfg.Section("").Key("TIME").MustTimeFormat(time.RFC3339, time.Now()) +v = cfg.Section("").Key("TIME").MustTime(time.Now()) // RFC3339 +``` + +如果我的值有好多行怎么办? + +```ini +[advance] +ADDRESS = """404 road, +NotFound, State, 5000 +Earth""" +``` + +嗯哼?小 case! + +```go +cfg.Section("advance").Key("ADDRESS").String() + +/* --- start --- +404 road, +NotFound, State, 5000 +Earth +------ end --- */ +``` + +赞爆了!那要是我属于一行的内容写不下想要写到第二行怎么办? + +```ini +[advance] +two_lines = how about \ + continuation lines? +lots_of_lines = 1 \ + 2 \ + 3 \ + 4 +``` + +简直是小菜一碟! + +```go +cfg.Section("advance").Key("two_lines").String() // how about continuation lines? +cfg.Section("advance").Key("lots_of_lines").String() // 1 2 3 4 +``` + +可是我有时候觉得两行连在一起特别没劲,怎么才能不自动连接两行呢? + +```go +cfg, err := ini.LoadSources(ini.LoadOptions{ + IgnoreContinuation: true, +}, "filename") +``` + +哇靠给力啊! + +需要注意的是,值两侧的单引号会被自动剔除: + +```ini +foo = "some value" // foo: some value +bar = 'some value' // bar: some value +``` + +这就是全部了?哈哈,当然不是。 + +#### 操作键值的辅助方法 + +获取键值时设定候选值: + +```go +v = cfg.Section("").Key("STRING").In("default", []string{"str", "arr", "types"}) +v = cfg.Section("").Key("FLOAT64").InFloat64(1.1, []float64{1.25, 2.5, 3.75}) +v = cfg.Section("").Key("INT").InInt(5, []int{10, 20, 30}) +v = cfg.Section("").Key("INT64").InInt64(10, []int64{10, 20, 30}) +v = cfg.Section("").Key("UINT").InUint(4, []int{3, 6, 9}) +v = cfg.Section("").Key("UINT64").InUint64(8, []int64{3, 6, 9}) +v = cfg.Section("").Key("TIME").InTimeFormat(time.RFC3339, time.Now(), []time.Time{time1, time2, time3}) +v = cfg.Section("").Key("TIME").InTime(time.Now(), []time.Time{time1, time2, time3}) // RFC3339 +``` + +如果获取到的值不是候选值的任意一个,则会返回默认值,而默认值不需要是候选值中的一员。 + +验证获取的值是否在指定范围内: + +```go +vals = cfg.Section("").Key("FLOAT64").RangeFloat64(0.0, 1.1, 2.2) +vals = cfg.Section("").Key("INT").RangeInt(0, 10, 20) +vals = cfg.Section("").Key("INT64").RangeInt64(0, 10, 20) +vals = cfg.Section("").Key("UINT").RangeUint(0, 3, 9) +vals = cfg.Section("").Key("UINT64").RangeUint64(0, 3, 9) +vals = cfg.Section("").Key("TIME").RangeTimeFormat(time.RFC3339, time.Now(), minTime, maxTime) +vals = cfg.Section("").Key("TIME").RangeTime(time.Now(), minTime, maxTime) // RFC3339 +``` + +##### 自动分割键值到切片(slice) + +当存在无效输入时,使用零值代替: + +```go +// Input: 1.1, 2.2, 3.3, 4.4 -> [1.1 2.2 3.3 4.4] +// Input: how, 2.2, are, you -> [0.0 2.2 0.0 0.0] +vals = cfg.Section("").Key("STRINGS").Strings(",") +vals = cfg.Section("").Key("FLOAT64S").Float64s(",") +vals = cfg.Section("").Key("INTS").Ints(",") +vals = cfg.Section("").Key("INT64S").Int64s(",") +vals = cfg.Section("").Key("UINTS").Uints(",") +vals = cfg.Section("").Key("UINT64S").Uint64s(",") +vals = cfg.Section("").Key("TIMES").Times(",") +``` + +从结果切片中剔除无效输入: + +```go +// Input: 1.1, 2.2, 3.3, 4.4 -> [1.1 2.2 3.3 4.4] +// Input: how, 2.2, are, you -> [2.2] +vals = cfg.Section("").Key("FLOAT64S").ValidFloat64s(",") +vals = cfg.Section("").Key("INTS").ValidInts(",") +vals = cfg.Section("").Key("INT64S").ValidInt64s(",") +vals = cfg.Section("").Key("UINTS").ValidUints(",") +vals = cfg.Section("").Key("UINT64S").ValidUint64s(",") +vals = cfg.Section("").Key("TIMES").ValidTimes(",") +``` + +当存在无效输入时,直接返回错误: + +```go +// Input: 1.1, 2.2, 3.3, 4.4 -> [1.1 2.2 3.3 4.4] +// Input: how, 2.2, are, you -> error +vals = cfg.Section("").Key("FLOAT64S").StrictFloat64s(",") +vals = cfg.Section("").Key("INTS").StrictInts(",") +vals = cfg.Section("").Key("INT64S").StrictInt64s(",") +vals = cfg.Section("").Key("UINTS").StrictUints(",") +vals = cfg.Section("").Key("UINT64S").StrictUint64s(",") +vals = cfg.Section("").Key("TIMES").StrictTimes(",") +``` + +### 保存配置 + +终于到了这个时刻,是时候保存一下配置了。 + +比较原始的做法是输出配置到某个文件: + +```go +// ... +err = cfg.SaveTo("my.ini") +err = cfg.SaveToIndent("my.ini", "\t") +``` + +另一个比较高级的做法是写入到任何实现 `io.Writer` 接口的对象中: + +```go +// ... +cfg.WriteTo(writer) +cfg.WriteToIndent(writer, "\t") +``` + +默认情况下,空格将被用于对齐键值之间的等号以美化输出结果,以下代码可以禁用该功能: + +```go +ini.PrettyFormat = false +``` + +## 高级用法 + +### 递归读取键值 + +在获取所有键值的过程中,特殊语法 `%()s` 会被应用,其中 `` 可以是相同分区或者默认分区下的键名。字符串 `%()s` 会被相应的键值所替代,如果指定的键不存在,则会用空字符串替代。您可以最多使用 99 层的递归嵌套。 + +```ini +NAME = ini + +[author] +NAME = Unknwon +GITHUB = https://github.com/%(NAME)s + +[package] +FULL_NAME = github.com/go-ini/%(NAME)s +``` + +```go +cfg.Section("author").Key("GITHUB").String() // https://github.com/Unknwon +cfg.Section("package").Key("FULL_NAME").String() // github.com/go-ini/ini +``` + +### 读取父子分区 + +您可以在分区名称中使用 `.` 来表示两个或多个分区之间的父子关系。如果某个键在子分区中不存在,则会去它的父分区中再次寻找,直到没有父分区为止。 + +```ini +NAME = ini +VERSION = v1 +IMPORT_PATH = gopkg.in/%(NAME)s.%(VERSION)s + +[package] +CLONE_URL = https://%(IMPORT_PATH)s + +[package.sub] +``` + +```go +cfg.Section("package.sub").Key("CLONE_URL").String() // https://gopkg.in/ini.v1 +``` + +#### 获取上级父分区下的所有键名 + +```go +cfg.Section("package.sub").ParentKeys() // ["CLONE_URL"] +``` + +### 无法解析的分区 + +如果遇到一些比较特殊的分区,它们不包含常见的键值对,而是没有固定格式的纯文本,则可以使用 `LoadOptions.UnparsableSections` 进行处理: + +```go +cfg, err := LoadSources(LoadOptions{UnparseableSections: []string{"COMMENTS"}}, `[COMMENTS] +<1> This slide has the fuel listed in the wrong units `)) + +body := cfg.Section("COMMENTS").Body() + +/* --- start --- +<1> This slide has the fuel listed in the wrong units +------ end --- */ +``` + +### 读取自增键名 + +如果数据源中的键名为 `-`,则认为该键使用了自增键名的特殊语法。计数器从 1 开始,并且分区之间是相互独立的。 + +```ini +[features] +-: Support read/write comments of keys and sections +-: Support auto-increment of key names +-: Support load multiple files to overwrite key values +``` + +```go +cfg.Section("features").KeyStrings() // []{"#1", "#2", "#3"} +``` + +### 映射到结构 + +想要使用更加面向对象的方式玩转 INI 吗?好主意。 + +```ini +Name = Unknwon +age = 21 +Male = true +Born = 1993-01-01T20:17:05Z + +[Note] +Content = Hi is a good man! +Cities = HangZhou, Boston +``` + +```go +type Note struct { + Content string + Cities []string +} + +type Person struct { + Name string + Age int `ini:"age"` + Male bool + Born time.Time + Note + Created time.Time `ini:"-"` +} + +func main() { + cfg, err := ini.Load("path/to/ini") + // ... + p := new(Person) + err = cfg.MapTo(p) + // ... + + // 一切竟可以如此的简单。 + err = ini.MapTo(p, "path/to/ini") + // ... + + // 嗯哼?只需要映射一个分区吗? + n := new(Note) + err = cfg.Section("Note").MapTo(n) + // ... +} +``` + +结构的字段怎么设置默认值呢?很简单,只要在映射之前对指定字段进行赋值就可以了。如果键未找到或者类型错误,该值不会发生改变。 + +```go +// ... +p := &Person{ + Name: "Joe", +} +// ... +``` + +这样玩 INI 真的好酷啊!然而,如果不能还给我原来的配置文件,有什么卵用? + +### 从结构反射 + +可是,我有说不能吗? + +```go +type Embeded struct { + Dates []time.Time `delim:"|"` + Places []string `ini:"places,omitempty"` + None []int `ini:",omitempty"` +} + +type Author struct { + Name string `ini:"NAME"` + Male bool + Age int + GPA float64 + NeverMind string `ini:"-"` + *Embeded +} + +func main() { + a := &Author{"Unknwon", true, 21, 2.8, "", + &Embeded{ + []time.Time{time.Now(), time.Now()}, + []string{"HangZhou", "Boston"}, + []int{}, + }} + cfg := ini.Empty() + err = ini.ReflectFrom(cfg, a) + // ... +} +``` + +瞧瞧,奇迹发生了。 + +```ini +NAME = Unknwon +Male = true +Age = 21 +GPA = 2.8 + +[Embeded] +Dates = 2015-08-07T22:14:22+08:00|2015-08-07T22:14:22+08:00 +places = HangZhou,Boston +``` + +#### 名称映射器(Name Mapper) + +为了节省您的时间并简化代码,本库支持类型为 [`NameMapper`](https://gowalker.org/gopkg.in/ini.v1#NameMapper) 的名称映射器,该映射器负责结构字段名与分区名和键名之间的映射。 + +目前有 2 款内置的映射器: + +- `AllCapsUnderscore`:该映射器将字段名转换至格式 `ALL_CAPS_UNDERSCORE` 后再去匹配分区名和键名。 +- `TitleUnderscore`:该映射器将字段名转换至格式 `title_underscore` 后再去匹配分区名和键名。 + +使用方法: + +```go +type Info struct{ + PackageName string +} + +func main() { + err = ini.MapToWithMapper(&Info{}, ini.TitleUnderscore, []byte("package_name=ini")) + // ... + + cfg, err := ini.Load([]byte("PACKAGE_NAME=ini")) + // ... + info := new(Info) + cfg.NameMapper = ini.AllCapsUnderscore + err = cfg.MapTo(info) + // ... +} +``` + +使用函数 `ini.ReflectFromWithMapper` 时也可应用相同的规则。 + +#### 值映射器(Value Mapper) + +值映射器允许使用一个自定义函数自动展开值的具体内容,例如:运行时获取环境变量: + +```go +type Env struct { + Foo string `ini:"foo"` +} + +func main() { + cfg, err := ini.Load([]byte("[env]\nfoo = ${MY_VAR}\n") + cfg.ValueMapper = os.ExpandEnv + // ... + env := &Env{} + err = cfg.Section("env").MapTo(env) +} +``` + +本例中,`env.Foo` 将会是运行时所获取到环境变量 `MY_VAR` 的值。 + +#### 映射/反射的其它说明 + +任何嵌入的结构都会被默认认作一个不同的分区,并且不会自动产生所谓的父子分区关联: + +```go +type Child struct { + Age string +} + +type Parent struct { + Name string + Child +} + +type Config struct { + City string + Parent +} +``` + +示例配置文件: + +```ini +City = Boston + +[Parent] +Name = Unknwon + +[Child] +Age = 21 +``` + +很好,但是,我就是要嵌入结构也在同一个分区。好吧,你爹是李刚! + +```go +type Child struct { + Age string +} + +type Parent struct { + Name string + Child `ini:"Parent"` +} + +type Config struct { + City string + Parent +} +``` + +示例配置文件: + +```ini +City = Boston + +[Parent] +Name = Unknwon +Age = 21 +``` + +## 获取帮助 + +- [API 文档](https://gowalker.org/gopkg.in/ini.v1) +- [创建工单](https://github.com/go-ini/ini/issues/new) + +## 常见问题 + +### 字段 `BlockMode` 是什么? + +默认情况下,本库会在您进行读写操作时采用锁机制来确保数据时间。但在某些情况下,您非常确定只进行读操作。此时,您可以通过设置 `cfg.BlockMode = false` 来将读操作提升大约 **50-70%** 的性能。 + +### 为什么要写另一个 INI 解析库? + +许多人都在使用我的 [goconfig](https://github.com/Unknwon/goconfig) 来完成对 INI 文件的操作,但我希望使用更加 Go 风格的代码。并且当您设置 `cfg.BlockMode = false` 时,会有大约 **10-30%** 的性能提升。 + +为了做出这些改变,我必须对 API 进行破坏,所以新开一个仓库是最安全的做法。除此之外,本库直接使用 `gopkg.in` 来进行版本化发布。(其实真相是导入路径更短了) diff --git a/vendor/github.com/go-ini/ini/error.go b/vendor/github.com/go-ini/ini/error.go new file mode 100644 index 0000000000..80afe74315 --- /dev/null +++ b/vendor/github.com/go-ini/ini/error.go @@ -0,0 +1,32 @@ +// Copyright 2016 Unknwon +// +// Licensed under the Apache License, Version 2.0 (the "License"): you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +// License for the specific language governing permissions and limitations +// under the License. + +package ini + +import ( + "fmt" +) + +type ErrDelimiterNotFound struct { + Line string +} + +func IsErrDelimiterNotFound(err error) bool { + _, ok := err.(ErrDelimiterNotFound) + return ok +} + +func (err ErrDelimiterNotFound) Error() string { + return fmt.Sprintf("key-value delimiter not found: %s", err.Line) +} diff --git a/vendor/github.com/go-ini/ini/ini.go b/vendor/github.com/go-ini/ini/ini.go new file mode 100644 index 0000000000..0bd6c505b6 --- /dev/null +++ b/vendor/github.com/go-ini/ini/ini.go @@ -0,0 +1,561 @@ +// Copyright 2014 Unknwon +// +// Licensed under the Apache License, Version 2.0 (the "License"): you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +// License for the specific language governing permissions and limitations +// under the License. + +// Package ini provides INI file read and write functionality in Go. +package ini + +import ( + "bytes" + "errors" + "fmt" + "io" + "io/ioutil" + "os" + "regexp" + "runtime" + "strconv" + "strings" + "sync" + "time" +) + +const ( + // Name for default section. You can use this constant or the string literal. + // In most of cases, an empty string is all you need to access the section. + DEFAULT_SECTION = "DEFAULT" + + // Maximum allowed depth when recursively substituing variable names. + _DEPTH_VALUES = 99 + _VERSION = "1.28.0" +) + +// Version returns current package version literal. +func Version() string { + return _VERSION +} + +var ( + // Delimiter to determine or compose a new line. + // This variable will be changed to "\r\n" automatically on Windows + // at package init time. + LineBreak = "\n" + + // Variable regexp pattern: %(variable)s + varPattern = regexp.MustCompile(`%\(([^\)]+)\)s`) + + // Indicate whether to align "=" sign with spaces to produce pretty output + // or reduce all possible spaces for compact format. + PrettyFormat = true + + // Explicitly write DEFAULT section header + DefaultHeader = false + + // Indicate whether to put a line between sections + PrettySection = true +) + +func init() { + if runtime.GOOS == "windows" { + LineBreak = "\r\n" + } +} + +func inSlice(str string, s []string) bool { + for _, v := range s { + if str == v { + return true + } + } + return false +} + +// dataSource is an interface that returns object which can be read and closed. +type dataSource interface { + ReadCloser() (io.ReadCloser, error) +} + +// sourceFile represents an object that contains content on the local file system. +type sourceFile struct { + name string +} + +func (s sourceFile) ReadCloser() (_ io.ReadCloser, err error) { + return os.Open(s.name) +} + +type bytesReadCloser struct { + reader io.Reader +} + +func (rc *bytesReadCloser) Read(p []byte) (n int, err error) { + return rc.reader.Read(p) +} + +func (rc *bytesReadCloser) Close() error { + return nil +} + +// sourceData represents an object that contains content in memory. +type sourceData struct { + data []byte +} + +func (s *sourceData) ReadCloser() (io.ReadCloser, error) { + return ioutil.NopCloser(bytes.NewReader(s.data)), nil +} + +// sourceReadCloser represents an input stream with Close method. +type sourceReadCloser struct { + reader io.ReadCloser +} + +func (s *sourceReadCloser) ReadCloser() (io.ReadCloser, error) { + return s.reader, nil +} + +// File represents a combination of a or more INI file(s) in memory. +type File struct { + // Should make things safe, but sometimes doesn't matter. + BlockMode bool + // Make sure data is safe in multiple goroutines. + lock sync.RWMutex + + // Allow combination of multiple data sources. + dataSources []dataSource + // Actual data is stored here. + sections map[string]*Section + + // To keep data in order. + sectionList []string + + options LoadOptions + + NameMapper + ValueMapper +} + +// newFile initializes File object with given data sources. +func newFile(dataSources []dataSource, opts LoadOptions) *File { + return &File{ + BlockMode: true, + dataSources: dataSources, + sections: make(map[string]*Section), + sectionList: make([]string, 0, 10), + options: opts, + } +} + +func parseDataSource(source interface{}) (dataSource, error) { + switch s := source.(type) { + case string: + return sourceFile{s}, nil + case []byte: + return &sourceData{s}, nil + case io.ReadCloser: + return &sourceReadCloser{s}, nil + default: + return nil, fmt.Errorf("error parsing data source: unknown type '%s'", s) + } +} + +type LoadOptions struct { + // Loose indicates whether the parser should ignore nonexistent files or return error. + Loose bool + // Insensitive indicates whether the parser forces all section and key names to lowercase. + Insensitive bool + // IgnoreContinuation indicates whether to ignore continuation lines while parsing. + IgnoreContinuation bool + // IgnoreInlineComment indicates whether to ignore comments at the end of value and treat it as part of value. + IgnoreInlineComment bool + // AllowBooleanKeys indicates whether to allow boolean type keys or treat as value is missing. + // This type of keys are mostly used in my.cnf. + AllowBooleanKeys bool + // AllowShadows indicates whether to keep track of keys with same name under same section. + AllowShadows bool + // Some INI formats allow group blocks that store a block of raw content that doesn't otherwise + // conform to key/value pairs. Specify the names of those blocks here. + UnparseableSections []string +} + +func LoadSources(opts LoadOptions, source interface{}, others ...interface{}) (_ *File, err error) { + sources := make([]dataSource, len(others)+1) + sources[0], err = parseDataSource(source) + if err != nil { + return nil, err + } + for i := range others { + sources[i+1], err = parseDataSource(others[i]) + if err != nil { + return nil, err + } + } + f := newFile(sources, opts) + if err = f.Reload(); err != nil { + return nil, err + } + return f, nil +} + +// Load loads and parses from INI data sources. +// Arguments can be mixed of file name with string type, or raw data in []byte. +// It will return error if list contains nonexistent files. +func Load(source interface{}, others ...interface{}) (*File, error) { + return LoadSources(LoadOptions{}, source, others...) +} + +// LooseLoad has exactly same functionality as Load function +// except it ignores nonexistent files instead of returning error. +func LooseLoad(source interface{}, others ...interface{}) (*File, error) { + return LoadSources(LoadOptions{Loose: true}, source, others...) +} + +// InsensitiveLoad has exactly same functionality as Load function +// except it forces all section and key names to be lowercased. +func InsensitiveLoad(source interface{}, others ...interface{}) (*File, error) { + return LoadSources(LoadOptions{Insensitive: true}, source, others...) +} + +// InsensitiveLoad has exactly same functionality as Load function +// except it allows have shadow keys. +func ShadowLoad(source interface{}, others ...interface{}) (*File, error) { + return LoadSources(LoadOptions{AllowShadows: true}, source, others...) +} + +// Empty returns an empty file object. +func Empty() *File { + // Ignore error here, we sure our data is good. + f, _ := Load([]byte("")) + return f +} + +// NewSection creates a new section. +func (f *File) NewSection(name string) (*Section, error) { + if len(name) == 0 { + return nil, errors.New("error creating new section: empty section name") + } else if f.options.Insensitive && name != DEFAULT_SECTION { + name = strings.ToLower(name) + } + + if f.BlockMode { + f.lock.Lock() + defer f.lock.Unlock() + } + + if inSlice(name, f.sectionList) { + return f.sections[name], nil + } + + f.sectionList = append(f.sectionList, name) + f.sections[name] = newSection(f, name) + return f.sections[name], nil +} + +// NewRawSection creates a new section with an unparseable body. +func (f *File) NewRawSection(name, body string) (*Section, error) { + section, err := f.NewSection(name) + if err != nil { + return nil, err + } + + section.isRawSection = true + section.rawBody = body + return section, nil +} + +// NewSections creates a list of sections. +func (f *File) NewSections(names ...string) (err error) { + for _, name := range names { + if _, err = f.NewSection(name); err != nil { + return err + } + } + return nil +} + +// GetSection returns section by given name. +func (f *File) GetSection(name string) (*Section, error) { + if len(name) == 0 { + name = DEFAULT_SECTION + } else if f.options.Insensitive { + name = strings.ToLower(name) + } + + if f.BlockMode { + f.lock.RLock() + defer f.lock.RUnlock() + } + + sec := f.sections[name] + if sec == nil { + return nil, fmt.Errorf("section '%s' does not exist", name) + } + return sec, nil +} + +// Section assumes named section exists and returns a zero-value when not. +func (f *File) Section(name string) *Section { + sec, err := f.GetSection(name) + if err != nil { + // Note: It's OK here because the only possible error is empty section name, + // but if it's empty, this piece of code won't be executed. + sec, _ = f.NewSection(name) + return sec + } + return sec +} + +// Section returns list of Section. +func (f *File) Sections() []*Section { + sections := make([]*Section, len(f.sectionList)) + for i := range f.sectionList { + sections[i] = f.Section(f.sectionList[i]) + } + return sections +} + +// ChildSections returns a list of child sections of given section name. +func (f *File) ChildSections(name string) []*Section { + return f.Section(name).ChildSections() +} + +// SectionStrings returns list of section names. +func (f *File) SectionStrings() []string { + list := make([]string, len(f.sectionList)) + copy(list, f.sectionList) + return list +} + +// DeleteSection deletes a section. +func (f *File) DeleteSection(name string) { + if f.BlockMode { + f.lock.Lock() + defer f.lock.Unlock() + } + + if len(name) == 0 { + name = DEFAULT_SECTION + } + + for i, s := range f.sectionList { + if s == name { + f.sectionList = append(f.sectionList[:i], f.sectionList[i+1:]...) + delete(f.sections, name) + return + } + } +} + +func (f *File) reload(s dataSource) error { + r, err := s.ReadCloser() + if err != nil { + return err + } + defer r.Close() + + return f.parse(r) +} + +// Reload reloads and parses all data sources. +func (f *File) Reload() (err error) { + for _, s := range f.dataSources { + if err = f.reload(s); err != nil { + // In loose mode, we create an empty default section for nonexistent files. + if os.IsNotExist(err) && f.options.Loose { + f.parse(bytes.NewBuffer(nil)) + continue + } + return err + } + } + return nil +} + +// Append appends one or more data sources and reloads automatically. +func (f *File) Append(source interface{}, others ...interface{}) error { + ds, err := parseDataSource(source) + if err != nil { + return err + } + f.dataSources = append(f.dataSources, ds) + for _, s := range others { + ds, err = parseDataSource(s) + if err != nil { + return err + } + f.dataSources = append(f.dataSources, ds) + } + return f.Reload() +} + +// WriteToIndent writes content into io.Writer with given indention. +// If PrettyFormat has been set to be true, +// it will align "=" sign with spaces under each section. +func (f *File) WriteToIndent(w io.Writer, indent string) (n int64, err error) { + equalSign := "=" + if PrettyFormat { + equalSign = " = " + } + + // Use buffer to make sure target is safe until finish encoding. + buf := bytes.NewBuffer(nil) + for i, sname := range f.sectionList { + sec := f.Section(sname) + if len(sec.Comment) > 0 { + if sec.Comment[0] != '#' && sec.Comment[0] != ';' { + sec.Comment = "; " + sec.Comment + } + if _, err = buf.WriteString(sec.Comment + LineBreak); err != nil { + return 0, err + } + } + + if i > 0 || DefaultHeader { + if _, err = buf.WriteString("[" + sname + "]" + LineBreak); err != nil { + return 0, err + } + } else { + // Write nothing if default section is empty + if len(sec.keyList) == 0 { + continue + } + } + + if sec.isRawSection { + if _, err = buf.WriteString(sec.rawBody); err != nil { + return 0, err + } + continue + } + + // Count and generate alignment length and buffer spaces using the + // longest key. Keys may be modifed if they contain certain characters so + // we need to take that into account in our calculation. + alignLength := 0 + if PrettyFormat { + for _, kname := range sec.keyList { + keyLength := len(kname) + // First case will surround key by ` and second by """ + if strings.ContainsAny(kname, "\"=:") { + keyLength += 2 + } else if strings.Contains(kname, "`") { + keyLength += 6 + } + + if keyLength > alignLength { + alignLength = keyLength + } + } + } + alignSpaces := bytes.Repeat([]byte(" "), alignLength) + + KEY_LIST: + for _, kname := range sec.keyList { + key := sec.Key(kname) + if len(key.Comment) > 0 { + if len(indent) > 0 && sname != DEFAULT_SECTION { + buf.WriteString(indent) + } + if key.Comment[0] != '#' && key.Comment[0] != ';' { + key.Comment = "; " + key.Comment + } + if _, err = buf.WriteString(key.Comment + LineBreak); err != nil { + return 0, err + } + } + + if len(indent) > 0 && sname != DEFAULT_SECTION { + buf.WriteString(indent) + } + + switch { + case key.isAutoIncrement: + kname = "-" + case strings.ContainsAny(kname, "\"=:"): + kname = "`" + kname + "`" + case strings.Contains(kname, "`"): + kname = `"""` + kname + `"""` + } + + for _, val := range key.ValueWithShadows() { + if _, err = buf.WriteString(kname); err != nil { + return 0, err + } + + if key.isBooleanType { + if kname != sec.keyList[len(sec.keyList)-1] { + buf.WriteString(LineBreak) + } + continue KEY_LIST + } + + // Write out alignment spaces before "=" sign + if PrettyFormat { + buf.Write(alignSpaces[:alignLength-len(kname)]) + } + + // In case key value contains "\n", "`", "\"", "#" or ";" + if strings.ContainsAny(val, "\n`") { + val = `"""` + val + `"""` + } else if !f.options.IgnoreInlineComment && strings.ContainsAny(val, "#;") { + val = "`" + val + "`" + } + if _, err = buf.WriteString(equalSign + val + LineBreak); err != nil { + return 0, err + } + } + } + + if PrettySection { + // Put a line between sections + if _, err = buf.WriteString(LineBreak); err != nil { + return 0, err + } + } + } + + return buf.WriteTo(w) +} + +// WriteTo writes file content into io.Writer. +func (f *File) WriteTo(w io.Writer) (int64, error) { + return f.WriteToIndent(w, "") +} + +// SaveToIndent writes content to file system with given value indention. +func (f *File) SaveToIndent(filename, indent string) error { + // Note: Because we are truncating with os.Create, + // so it's safer to save to a temporary file location and rename afte done. + tmpPath := filename + "." + strconv.Itoa(time.Now().Nanosecond()) + ".tmp" + defer os.Remove(tmpPath) + + fw, err := os.Create(tmpPath) + if err != nil { + return err + } + + if _, err = f.WriteToIndent(fw, indent); err != nil { + fw.Close() + return err + } + fw.Close() + + // Remove old file and rename the new one. + os.Remove(filename) + return os.Rename(tmpPath, filename) +} + +// SaveTo writes content to file system. +func (f *File) SaveTo(filename string) error { + return f.SaveToIndent(filename, "") +} diff --git a/vendor/github.com/go-ini/ini/key.go b/vendor/github.com/go-ini/ini/key.go new file mode 100644 index 0000000000..838356af01 --- /dev/null +++ b/vendor/github.com/go-ini/ini/key.go @@ -0,0 +1,699 @@ +// Copyright 2014 Unknwon +// +// Licensed under the Apache License, Version 2.0 (the "License"): you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +// License for the specific language governing permissions and limitations +// under the License. + +package ini + +import ( + "errors" + "fmt" + "strconv" + "strings" + "time" +) + +// Key represents a key under a section. +type Key struct { + s *Section + name string + value string + isAutoIncrement bool + isBooleanType bool + + isShadow bool + shadows []*Key + + Comment string +} + +// newKey simply return a key object with given values. +func newKey(s *Section, name, val string) *Key { + return &Key{ + s: s, + name: name, + value: val, + } +} + +func (k *Key) addShadow(val string) error { + if k.isShadow { + return errors.New("cannot add shadow to another shadow key") + } else if k.isAutoIncrement || k.isBooleanType { + return errors.New("cannot add shadow to auto-increment or boolean key") + } + + shadow := newKey(k.s, k.name, val) + shadow.isShadow = true + k.shadows = append(k.shadows, shadow) + return nil +} + +// AddShadow adds a new shadow key to itself. +func (k *Key) AddShadow(val string) error { + if !k.s.f.options.AllowShadows { + return errors.New("shadow key is not allowed") + } + return k.addShadow(val) +} + +// ValueMapper represents a mapping function for values, e.g. os.ExpandEnv +type ValueMapper func(string) string + +// Name returns name of key. +func (k *Key) Name() string { + return k.name +} + +// Value returns raw value of key for performance purpose. +func (k *Key) Value() string { + return k.value +} + +// ValueWithShadows returns raw values of key and its shadows if any. +func (k *Key) ValueWithShadows() []string { + if len(k.shadows) == 0 { + return []string{k.value} + } + vals := make([]string, len(k.shadows)+1) + vals[0] = k.value + for i := range k.shadows { + vals[i+1] = k.shadows[i].value + } + return vals +} + +// transformValue takes a raw value and transforms to its final string. +func (k *Key) transformValue(val string) string { + if k.s.f.ValueMapper != nil { + val = k.s.f.ValueMapper(val) + } + + // Fail-fast if no indicate char found for recursive value + if !strings.Contains(val, "%") { + return val + } + for i := 0; i < _DEPTH_VALUES; i++ { + vr := varPattern.FindString(val) + if len(vr) == 0 { + break + } + + // Take off leading '%(' and trailing ')s'. + noption := strings.TrimLeft(vr, "%(") + noption = strings.TrimRight(noption, ")s") + + // Search in the same section. + nk, err := k.s.GetKey(noption) + if err != nil { + // Search again in default section. + nk, _ = k.s.f.Section("").GetKey(noption) + } + + // Substitute by new value and take off leading '%(' and trailing ')s'. + val = strings.Replace(val, vr, nk.value, -1) + } + return val +} + +// String returns string representation of value. +func (k *Key) String() string { + return k.transformValue(k.value) +} + +// Validate accepts a validate function which can +// return modifed result as key value. +func (k *Key) Validate(fn func(string) string) string { + return fn(k.String()) +} + +// parseBool returns the boolean value represented by the string. +// +// It accepts 1, t, T, TRUE, true, True, YES, yes, Yes, y, ON, on, On, +// 0, f, F, FALSE, false, False, NO, no, No, n, OFF, off, Off. +// Any other value returns an error. +func parseBool(str string) (value bool, err error) { + switch str { + case "1", "t", "T", "true", "TRUE", "True", "YES", "yes", "Yes", "y", "ON", "on", "On": + return true, nil + case "0", "f", "F", "false", "FALSE", "False", "NO", "no", "No", "n", "OFF", "off", "Off": + return false, nil + } + return false, fmt.Errorf("parsing \"%s\": invalid syntax", str) +} + +// Bool returns bool type value. +func (k *Key) Bool() (bool, error) { + return parseBool(k.String()) +} + +// Float64 returns float64 type value. +func (k *Key) Float64() (float64, error) { + return strconv.ParseFloat(k.String(), 64) +} + +// Int returns int type value. +func (k *Key) Int() (int, error) { + return strconv.Atoi(k.String()) +} + +// Int64 returns int64 type value. +func (k *Key) Int64() (int64, error) { + return strconv.ParseInt(k.String(), 10, 64) +} + +// Uint returns uint type valued. +func (k *Key) Uint() (uint, error) { + u, e := strconv.ParseUint(k.String(), 10, 64) + return uint(u), e +} + +// Uint64 returns uint64 type value. +func (k *Key) Uint64() (uint64, error) { + return strconv.ParseUint(k.String(), 10, 64) +} + +// Duration returns time.Duration type value. +func (k *Key) Duration() (time.Duration, error) { + return time.ParseDuration(k.String()) +} + +// TimeFormat parses with given format and returns time.Time type value. +func (k *Key) TimeFormat(format string) (time.Time, error) { + return time.Parse(format, k.String()) +} + +// Time parses with RFC3339 format and returns time.Time type value. +func (k *Key) Time() (time.Time, error) { + return k.TimeFormat(time.RFC3339) +} + +// MustString returns default value if key value is empty. +func (k *Key) MustString(defaultVal string) string { + val := k.String() + if len(val) == 0 { + k.value = defaultVal + return defaultVal + } + return val +} + +// MustBool always returns value without error, +// it returns false if error occurs. +func (k *Key) MustBool(defaultVal ...bool) bool { + val, err := k.Bool() + if len(defaultVal) > 0 && err != nil { + k.value = strconv.FormatBool(defaultVal[0]) + return defaultVal[0] + } + return val +} + +// MustFloat64 always returns value without error, +// it returns 0.0 if error occurs. +func (k *Key) MustFloat64(defaultVal ...float64) float64 { + val, err := k.Float64() + if len(defaultVal) > 0 && err != nil { + k.value = strconv.FormatFloat(defaultVal[0], 'f', -1, 64) + return defaultVal[0] + } + return val +} + +// MustInt always returns value without error, +// it returns 0 if error occurs. +func (k *Key) MustInt(defaultVal ...int) int { + val, err := k.Int() + if len(defaultVal) > 0 && err != nil { + k.value = strconv.FormatInt(int64(defaultVal[0]), 10) + return defaultVal[0] + } + return val +} + +// MustInt64 always returns value without error, +// it returns 0 if error occurs. +func (k *Key) MustInt64(defaultVal ...int64) int64 { + val, err := k.Int64() + if len(defaultVal) > 0 && err != nil { + k.value = strconv.FormatInt(defaultVal[0], 10) + return defaultVal[0] + } + return val +} + +// MustUint always returns value without error, +// it returns 0 if error occurs. +func (k *Key) MustUint(defaultVal ...uint) uint { + val, err := k.Uint() + if len(defaultVal) > 0 && err != nil { + k.value = strconv.FormatUint(uint64(defaultVal[0]), 10) + return defaultVal[0] + } + return val +} + +// MustUint64 always returns value without error, +// it returns 0 if error occurs. +func (k *Key) MustUint64(defaultVal ...uint64) uint64 { + val, err := k.Uint64() + if len(defaultVal) > 0 && err != nil { + k.value = strconv.FormatUint(defaultVal[0], 10) + return defaultVal[0] + } + return val +} + +// MustDuration always returns value without error, +// it returns zero value if error occurs. +func (k *Key) MustDuration(defaultVal ...time.Duration) time.Duration { + val, err := k.Duration() + if len(defaultVal) > 0 && err != nil { + k.value = defaultVal[0].String() + return defaultVal[0] + } + return val +} + +// MustTimeFormat always parses with given format and returns value without error, +// it returns zero value if error occurs. +func (k *Key) MustTimeFormat(format string, defaultVal ...time.Time) time.Time { + val, err := k.TimeFormat(format) + if len(defaultVal) > 0 && err != nil { + k.value = defaultVal[0].Format(format) + return defaultVal[0] + } + return val +} + +// MustTime always parses with RFC3339 format and returns value without error, +// it returns zero value if error occurs. +func (k *Key) MustTime(defaultVal ...time.Time) time.Time { + return k.MustTimeFormat(time.RFC3339, defaultVal...) +} + +// In always returns value without error, +// it returns default value if error occurs or doesn't fit into candidates. +func (k *Key) In(defaultVal string, candidates []string) string { + val := k.String() + for _, cand := range candidates { + if val == cand { + return val + } + } + return defaultVal +} + +// InFloat64 always returns value without error, +// it returns default value if error occurs or doesn't fit into candidates. +func (k *Key) InFloat64(defaultVal float64, candidates []float64) float64 { + val := k.MustFloat64() + for _, cand := range candidates { + if val == cand { + return val + } + } + return defaultVal +} + +// InInt always returns value without error, +// it returns default value if error occurs or doesn't fit into candidates. +func (k *Key) InInt(defaultVal int, candidates []int) int { + val := k.MustInt() + for _, cand := range candidates { + if val == cand { + return val + } + } + return defaultVal +} + +// InInt64 always returns value without error, +// it returns default value if error occurs or doesn't fit into candidates. +func (k *Key) InInt64(defaultVal int64, candidates []int64) int64 { + val := k.MustInt64() + for _, cand := range candidates { + if val == cand { + return val + } + } + return defaultVal +} + +// InUint always returns value without error, +// it returns default value if error occurs or doesn't fit into candidates. +func (k *Key) InUint(defaultVal uint, candidates []uint) uint { + val := k.MustUint() + for _, cand := range candidates { + if val == cand { + return val + } + } + return defaultVal +} + +// InUint64 always returns value without error, +// it returns default value if error occurs or doesn't fit into candidates. +func (k *Key) InUint64(defaultVal uint64, candidates []uint64) uint64 { + val := k.MustUint64() + for _, cand := range candidates { + if val == cand { + return val + } + } + return defaultVal +} + +// InTimeFormat always parses with given format and returns value without error, +// it returns default value if error occurs or doesn't fit into candidates. +func (k *Key) InTimeFormat(format string, defaultVal time.Time, candidates []time.Time) time.Time { + val := k.MustTimeFormat(format) + for _, cand := range candidates { + if val == cand { + return val + } + } + return defaultVal +} + +// InTime always parses with RFC3339 format and returns value without error, +// it returns default value if error occurs or doesn't fit into candidates. +func (k *Key) InTime(defaultVal time.Time, candidates []time.Time) time.Time { + return k.InTimeFormat(time.RFC3339, defaultVal, candidates) +} + +// RangeFloat64 checks if value is in given range inclusively, +// and returns default value if it's not. +func (k *Key) RangeFloat64(defaultVal, min, max float64) float64 { + val := k.MustFloat64() + if val < min || val > max { + return defaultVal + } + return val +} + +// RangeInt checks if value is in given range inclusively, +// and returns default value if it's not. +func (k *Key) RangeInt(defaultVal, min, max int) int { + val := k.MustInt() + if val < min || val > max { + return defaultVal + } + return val +} + +// RangeInt64 checks if value is in given range inclusively, +// and returns default value if it's not. +func (k *Key) RangeInt64(defaultVal, min, max int64) int64 { + val := k.MustInt64() + if val < min || val > max { + return defaultVal + } + return val +} + +// RangeTimeFormat checks if value with given format is in given range inclusively, +// and returns default value if it's not. +func (k *Key) RangeTimeFormat(format string, defaultVal, min, max time.Time) time.Time { + val := k.MustTimeFormat(format) + if val.Unix() < min.Unix() || val.Unix() > max.Unix() { + return defaultVal + } + return val +} + +// RangeTime checks if value with RFC3339 format is in given range inclusively, +// and returns default value if it's not. +func (k *Key) RangeTime(defaultVal, min, max time.Time) time.Time { + return k.RangeTimeFormat(time.RFC3339, defaultVal, min, max) +} + +// Strings returns list of string divided by given delimiter. +func (k *Key) Strings(delim string) []string { + str := k.String() + if len(str) == 0 { + return []string{} + } + + vals := strings.Split(str, delim) + for i := range vals { + // vals[i] = k.transformValue(strings.TrimSpace(vals[i])) + vals[i] = strings.TrimSpace(vals[i]) + } + return vals +} + +// StringsWithShadows returns list of string divided by given delimiter. +// Shadows will also be appended if any. +func (k *Key) StringsWithShadows(delim string) []string { + vals := k.ValueWithShadows() + results := make([]string, 0, len(vals)*2) + for i := range vals { + if len(vals) == 0 { + continue + } + + results = append(results, strings.Split(vals[i], delim)...) + } + + for i := range results { + results[i] = k.transformValue(strings.TrimSpace(results[i])) + } + return results +} + +// Float64s returns list of float64 divided by given delimiter. Any invalid input will be treated as zero value. +func (k *Key) Float64s(delim string) []float64 { + vals, _ := k.parseFloat64s(k.Strings(delim), true, false) + return vals +} + +// Ints returns list of int divided by given delimiter. Any invalid input will be treated as zero value. +func (k *Key) Ints(delim string) []int { + vals, _ := k.parseInts(k.Strings(delim), true, false) + return vals +} + +// Int64s returns list of int64 divided by given delimiter. Any invalid input will be treated as zero value. +func (k *Key) Int64s(delim string) []int64 { + vals, _ := k.parseInt64s(k.Strings(delim), true, false) + return vals +} + +// Uints returns list of uint divided by given delimiter. Any invalid input will be treated as zero value. +func (k *Key) Uints(delim string) []uint { + vals, _ := k.parseUints(k.Strings(delim), true, false) + return vals +} + +// Uint64s returns list of uint64 divided by given delimiter. Any invalid input will be treated as zero value. +func (k *Key) Uint64s(delim string) []uint64 { + vals, _ := k.parseUint64s(k.Strings(delim), true, false) + return vals +} + +// TimesFormat parses with given format and returns list of time.Time divided by given delimiter. +// Any invalid input will be treated as zero value (0001-01-01 00:00:00 +0000 UTC). +func (k *Key) TimesFormat(format, delim string) []time.Time { + vals, _ := k.parseTimesFormat(format, k.Strings(delim), true, false) + return vals +} + +// Times parses with RFC3339 format and returns list of time.Time divided by given delimiter. +// Any invalid input will be treated as zero value (0001-01-01 00:00:00 +0000 UTC). +func (k *Key) Times(delim string) []time.Time { + return k.TimesFormat(time.RFC3339, delim) +} + +// ValidFloat64s returns list of float64 divided by given delimiter. If some value is not float, then +// it will not be included to result list. +func (k *Key) ValidFloat64s(delim string) []float64 { + vals, _ := k.parseFloat64s(k.Strings(delim), false, false) + return vals +} + +// ValidInts returns list of int divided by given delimiter. If some value is not integer, then it will +// not be included to result list. +func (k *Key) ValidInts(delim string) []int { + vals, _ := k.parseInts(k.Strings(delim), false, false) + return vals +} + +// ValidInt64s returns list of int64 divided by given delimiter. If some value is not 64-bit integer, +// then it will not be included to result list. +func (k *Key) ValidInt64s(delim string) []int64 { + vals, _ := k.parseInt64s(k.Strings(delim), false, false) + return vals +} + +// ValidUints returns list of uint divided by given delimiter. If some value is not unsigned integer, +// then it will not be included to result list. +func (k *Key) ValidUints(delim string) []uint { + vals, _ := k.parseUints(k.Strings(delim), false, false) + return vals +} + +// ValidUint64s returns list of uint64 divided by given delimiter. If some value is not 64-bit unsigned +// integer, then it will not be included to result list. +func (k *Key) ValidUint64s(delim string) []uint64 { + vals, _ := k.parseUint64s(k.Strings(delim), false, false) + return vals +} + +// ValidTimesFormat parses with given format and returns list of time.Time divided by given delimiter. +func (k *Key) ValidTimesFormat(format, delim string) []time.Time { + vals, _ := k.parseTimesFormat(format, k.Strings(delim), false, false) + return vals +} + +// ValidTimes parses with RFC3339 format and returns list of time.Time divided by given delimiter. +func (k *Key) ValidTimes(delim string) []time.Time { + return k.ValidTimesFormat(time.RFC3339, delim) +} + +// StrictFloat64s returns list of float64 divided by given delimiter or error on first invalid input. +func (k *Key) StrictFloat64s(delim string) ([]float64, error) { + return k.parseFloat64s(k.Strings(delim), false, true) +} + +// StrictInts returns list of int divided by given delimiter or error on first invalid input. +func (k *Key) StrictInts(delim string) ([]int, error) { + return k.parseInts(k.Strings(delim), false, true) +} + +// StrictInt64s returns list of int64 divided by given delimiter or error on first invalid input. +func (k *Key) StrictInt64s(delim string) ([]int64, error) { + return k.parseInt64s(k.Strings(delim), false, true) +} + +// StrictUints returns list of uint divided by given delimiter or error on first invalid input. +func (k *Key) StrictUints(delim string) ([]uint, error) { + return k.parseUints(k.Strings(delim), false, true) +} + +// StrictUint64s returns list of uint64 divided by given delimiter or error on first invalid input. +func (k *Key) StrictUint64s(delim string) ([]uint64, error) { + return k.parseUint64s(k.Strings(delim), false, true) +} + +// StrictTimesFormat parses with given format and returns list of time.Time divided by given delimiter +// or error on first invalid input. +func (k *Key) StrictTimesFormat(format, delim string) ([]time.Time, error) { + return k.parseTimesFormat(format, k.Strings(delim), false, true) +} + +// StrictTimes parses with RFC3339 format and returns list of time.Time divided by given delimiter +// or error on first invalid input. +func (k *Key) StrictTimes(delim string) ([]time.Time, error) { + return k.StrictTimesFormat(time.RFC3339, delim) +} + +// parseFloat64s transforms strings to float64s. +func (k *Key) parseFloat64s(strs []string, addInvalid, returnOnInvalid bool) ([]float64, error) { + vals := make([]float64, 0, len(strs)) + for _, str := range strs { + val, err := strconv.ParseFloat(str, 64) + if err != nil && returnOnInvalid { + return nil, err + } + if err == nil || addInvalid { + vals = append(vals, val) + } + } + return vals, nil +} + +// parseInts transforms strings to ints. +func (k *Key) parseInts(strs []string, addInvalid, returnOnInvalid bool) ([]int, error) { + vals := make([]int, 0, len(strs)) + for _, str := range strs { + val, err := strconv.Atoi(str) + if err != nil && returnOnInvalid { + return nil, err + } + if err == nil || addInvalid { + vals = append(vals, val) + } + } + return vals, nil +} + +// parseInt64s transforms strings to int64s. +func (k *Key) parseInt64s(strs []string, addInvalid, returnOnInvalid bool) ([]int64, error) { + vals := make([]int64, 0, len(strs)) + for _, str := range strs { + val, err := strconv.ParseInt(str, 10, 64) + if err != nil && returnOnInvalid { + return nil, err + } + if err == nil || addInvalid { + vals = append(vals, val) + } + } + return vals, nil +} + +// parseUints transforms strings to uints. +func (k *Key) parseUints(strs []string, addInvalid, returnOnInvalid bool) ([]uint, error) { + vals := make([]uint, 0, len(strs)) + for _, str := range strs { + val, err := strconv.ParseUint(str, 10, 0) + if err != nil && returnOnInvalid { + return nil, err + } + if err == nil || addInvalid { + vals = append(vals, uint(val)) + } + } + return vals, nil +} + +// parseUint64s transforms strings to uint64s. +func (k *Key) parseUint64s(strs []string, addInvalid, returnOnInvalid bool) ([]uint64, error) { + vals := make([]uint64, 0, len(strs)) + for _, str := range strs { + val, err := strconv.ParseUint(str, 10, 64) + if err != nil && returnOnInvalid { + return nil, err + } + if err == nil || addInvalid { + vals = append(vals, val) + } + } + return vals, nil +} + +// parseTimesFormat transforms strings to times in given format. +func (k *Key) parseTimesFormat(format string, strs []string, addInvalid, returnOnInvalid bool) ([]time.Time, error) { + vals := make([]time.Time, 0, len(strs)) + for _, str := range strs { + val, err := time.Parse(format, str) + if err != nil && returnOnInvalid { + return nil, err + } + if err == nil || addInvalid { + vals = append(vals, val) + } + } + return vals, nil +} + +// SetValue changes key value. +func (k *Key) SetValue(v string) { + if k.s.f.BlockMode { + k.s.f.lock.Lock() + defer k.s.f.lock.Unlock() + } + + k.value = v + k.s.keysHash[k.name] = v +} diff --git a/vendor/github.com/go-ini/ini/parser.go b/vendor/github.com/go-ini/ini/parser.go new file mode 100644 index 0000000000..69d5476273 --- /dev/null +++ b/vendor/github.com/go-ini/ini/parser.go @@ -0,0 +1,361 @@ +// Copyright 2015 Unknwon +// +// Licensed under the Apache License, Version 2.0 (the "License"): you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +// License for the specific language governing permissions and limitations +// under the License. + +package ini + +import ( + "bufio" + "bytes" + "fmt" + "io" + "strconv" + "strings" + "unicode" +) + +type tokenType int + +const ( + _TOKEN_INVALID tokenType = iota + _TOKEN_COMMENT + _TOKEN_SECTION + _TOKEN_KEY +) + +type parser struct { + buf *bufio.Reader + isEOF bool + count int + comment *bytes.Buffer +} + +func newParser(r io.Reader) *parser { + return &parser{ + buf: bufio.NewReader(r), + count: 1, + comment: &bytes.Buffer{}, + } +} + +// BOM handles header of UTF-8, UTF-16 LE and UTF-16 BE's BOM format. +// http://en.wikipedia.org/wiki/Byte_order_mark#Representations_of_byte_order_marks_by_encoding +func (p *parser) BOM() error { + mask, err := p.buf.Peek(2) + if err != nil && err != io.EOF { + return err + } else if len(mask) < 2 { + return nil + } + + switch { + case mask[0] == 254 && mask[1] == 255: + fallthrough + case mask[0] == 255 && mask[1] == 254: + p.buf.Read(mask) + case mask[0] == 239 && mask[1] == 187: + mask, err := p.buf.Peek(3) + if err != nil && err != io.EOF { + return err + } else if len(mask) < 3 { + return nil + } + if mask[2] == 191 { + p.buf.Read(mask) + } + } + return nil +} + +func (p *parser) readUntil(delim byte) ([]byte, error) { + data, err := p.buf.ReadBytes(delim) + if err != nil { + if err == io.EOF { + p.isEOF = true + } else { + return nil, err + } + } + return data, nil +} + +func cleanComment(in []byte) ([]byte, bool) { + i := bytes.IndexAny(in, "#;") + if i == -1 { + return nil, false + } + return in[i:], true +} + +func readKeyName(in []byte) (string, int, error) { + line := string(in) + + // Check if key name surrounded by quotes. + var keyQuote string + if line[0] == '"' { + if len(line) > 6 && string(line[0:3]) == `"""` { + keyQuote = `"""` + } else { + keyQuote = `"` + } + } else if line[0] == '`' { + keyQuote = "`" + } + + // Get out key name + endIdx := -1 + if len(keyQuote) > 0 { + startIdx := len(keyQuote) + // FIXME: fail case -> """"""name"""=value + pos := strings.Index(line[startIdx:], keyQuote) + if pos == -1 { + return "", -1, fmt.Errorf("missing closing key quote: %s", line) + } + pos += startIdx + + // Find key-value delimiter + i := strings.IndexAny(line[pos+startIdx:], "=:") + if i < 0 { + return "", -1, ErrDelimiterNotFound{line} + } + endIdx = pos + i + return strings.TrimSpace(line[startIdx:pos]), endIdx + startIdx + 1, nil + } + + endIdx = strings.IndexAny(line, "=:") + if endIdx < 0 { + return "", -1, ErrDelimiterNotFound{line} + } + return strings.TrimSpace(line[0:endIdx]), endIdx + 1, nil +} + +func (p *parser) readMultilines(line, val, valQuote string) (string, error) { + for { + data, err := p.readUntil('\n') + if err != nil { + return "", err + } + next := string(data) + + pos := strings.LastIndex(next, valQuote) + if pos > -1 { + val += next[:pos] + + comment, has := cleanComment([]byte(next[pos:])) + if has { + p.comment.Write(bytes.TrimSpace(comment)) + } + break + } + val += next + if p.isEOF { + return "", fmt.Errorf("missing closing key quote from '%s' to '%s'", line, next) + } + } + return val, nil +} + +func (p *parser) readContinuationLines(val string) (string, error) { + for { + data, err := p.readUntil('\n') + if err != nil { + return "", err + } + next := strings.TrimSpace(string(data)) + + if len(next) == 0 { + break + } + val += next + if val[len(val)-1] != '\\' { + break + } + val = val[:len(val)-1] + } + return val, nil +} + +// hasSurroundedQuote check if and only if the first and last characters +// are quotes \" or \'. +// It returns false if any other parts also contain same kind of quotes. +func hasSurroundedQuote(in string, quote byte) bool { + return len(in) >= 2 && in[0] == quote && in[len(in)-1] == quote && + strings.IndexByte(in[1:], quote) == len(in)-2 +} + +func (p *parser) readValue(in []byte, ignoreContinuation, ignoreInlineComment bool) (string, error) { + line := strings.TrimLeftFunc(string(in), unicode.IsSpace) + if len(line) == 0 { + return "", nil + } + + var valQuote string + if len(line) > 3 && string(line[0:3]) == `"""` { + valQuote = `"""` + } else if line[0] == '`' { + valQuote = "`" + } + + if len(valQuote) > 0 { + startIdx := len(valQuote) + pos := strings.LastIndex(line[startIdx:], valQuote) + // Check for multi-line value + if pos == -1 { + return p.readMultilines(line, line[startIdx:], valQuote) + } + + return line[startIdx : pos+startIdx], nil + } + + // Won't be able to reach here if value only contains whitespace + line = strings.TrimSpace(line) + + // Check continuation lines when desired + if !ignoreContinuation && line[len(line)-1] == '\\' { + return p.readContinuationLines(line[:len(line)-1]) + } + + // Check if ignore inline comment + if !ignoreInlineComment { + i := strings.IndexAny(line, "#;") + if i > -1 { + p.comment.WriteString(line[i:]) + line = strings.TrimSpace(line[:i]) + } + } + + // Trim single quotes + if hasSurroundedQuote(line, '\'') || + hasSurroundedQuote(line, '"') { + line = line[1 : len(line)-1] + } + return line, nil +} + +// parse parses data through an io.Reader. +func (f *File) parse(reader io.Reader) (err error) { + p := newParser(reader) + if err = p.BOM(); err != nil { + return fmt.Errorf("BOM: %v", err) + } + + // Ignore error because default section name is never empty string. + section, _ := f.NewSection(DEFAULT_SECTION) + + var line []byte + var inUnparseableSection bool + for !p.isEOF { + line, err = p.readUntil('\n') + if err != nil { + return err + } + + line = bytes.TrimLeftFunc(line, unicode.IsSpace) + if len(line) == 0 { + continue + } + + // Comments + if line[0] == '#' || line[0] == ';' { + // Note: we do not care ending line break, + // it is needed for adding second line, + // so just clean it once at the end when set to value. + p.comment.Write(line) + continue + } + + // Section + if line[0] == '[' { + // Read to the next ']' (TODO: support quoted strings) + // TODO(unknwon): use LastIndexByte when stop supporting Go1.4 + closeIdx := bytes.LastIndex(line, []byte("]")) + if closeIdx == -1 { + return fmt.Errorf("unclosed section: %s", line) + } + + name := string(line[1:closeIdx]) + section, err = f.NewSection(name) + if err != nil { + return err + } + + comment, has := cleanComment(line[closeIdx+1:]) + if has { + p.comment.Write(comment) + } + + section.Comment = strings.TrimSpace(p.comment.String()) + + // Reset aotu-counter and comments + p.comment.Reset() + p.count = 1 + + inUnparseableSection = false + for i := range f.options.UnparseableSections { + if f.options.UnparseableSections[i] == name || + (f.options.Insensitive && strings.ToLower(f.options.UnparseableSections[i]) == strings.ToLower(name)) { + inUnparseableSection = true + continue + } + } + continue + } + + if inUnparseableSection { + section.isRawSection = true + section.rawBody += string(line) + continue + } + + kname, offset, err := readKeyName(line) + if err != nil { + // Treat as boolean key when desired, and whole line is key name. + if IsErrDelimiterNotFound(err) && f.options.AllowBooleanKeys { + kname, err := p.readValue(line, f.options.IgnoreContinuation, f.options.IgnoreInlineComment) + if err != nil { + return err + } + key, err := section.NewBooleanKey(kname) + if err != nil { + return err + } + key.Comment = strings.TrimSpace(p.comment.String()) + p.comment.Reset() + continue + } + return err + } + + // Auto increment. + isAutoIncr := false + if kname == "-" { + isAutoIncr = true + kname = "#" + strconv.Itoa(p.count) + p.count++ + } + + value, err := p.readValue(line[offset:], f.options.IgnoreContinuation, f.options.IgnoreInlineComment) + if err != nil { + return err + } + + key, err := section.NewKey(kname, value) + if err != nil { + return err + } + key.isAutoIncrement = isAutoIncr + key.Comment = strings.TrimSpace(p.comment.String()) + p.comment.Reset() + } + return nil +} diff --git a/vendor/github.com/go-ini/ini/section.go b/vendor/github.com/go-ini/ini/section.go new file mode 100644 index 0000000000..94f7375ed4 --- /dev/null +++ b/vendor/github.com/go-ini/ini/section.go @@ -0,0 +1,248 @@ +// Copyright 2014 Unknwon +// +// Licensed under the Apache License, Version 2.0 (the "License"): you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +// License for the specific language governing permissions and limitations +// under the License. + +package ini + +import ( + "errors" + "fmt" + "strings" +) + +// Section represents a config section. +type Section struct { + f *File + Comment string + name string + keys map[string]*Key + keyList []string + keysHash map[string]string + + isRawSection bool + rawBody string +} + +func newSection(f *File, name string) *Section { + return &Section{ + f: f, + name: name, + keys: make(map[string]*Key), + keyList: make([]string, 0, 10), + keysHash: make(map[string]string), + } +} + +// Name returns name of Section. +func (s *Section) Name() string { + return s.name +} + +// Body returns rawBody of Section if the section was marked as unparseable. +// It still follows the other rules of the INI format surrounding leading/trailing whitespace. +func (s *Section) Body() string { + return strings.TrimSpace(s.rawBody) +} + +// NewKey creates a new key to given section. +func (s *Section) NewKey(name, val string) (*Key, error) { + if len(name) == 0 { + return nil, errors.New("error creating new key: empty key name") + } else if s.f.options.Insensitive { + name = strings.ToLower(name) + } + + if s.f.BlockMode { + s.f.lock.Lock() + defer s.f.lock.Unlock() + } + + if inSlice(name, s.keyList) { + if s.f.options.AllowShadows { + if err := s.keys[name].addShadow(val); err != nil { + return nil, err + } + } else { + s.keys[name].value = val + } + return s.keys[name], nil + } + + s.keyList = append(s.keyList, name) + s.keys[name] = newKey(s, name, val) + s.keysHash[name] = val + return s.keys[name], nil +} + +// NewBooleanKey creates a new boolean type key to given section. +func (s *Section) NewBooleanKey(name string) (*Key, error) { + key, err := s.NewKey(name, "true") + if err != nil { + return nil, err + } + + key.isBooleanType = true + return key, nil +} + +// GetKey returns key in section by given name. +func (s *Section) GetKey(name string) (*Key, error) { + // FIXME: change to section level lock? + if s.f.BlockMode { + s.f.lock.RLock() + } + if s.f.options.Insensitive { + name = strings.ToLower(name) + } + key := s.keys[name] + if s.f.BlockMode { + s.f.lock.RUnlock() + } + + if key == nil { + // Check if it is a child-section. + sname := s.name + for { + if i := strings.LastIndex(sname, "."); i > -1 { + sname = sname[:i] + sec, err := s.f.GetSection(sname) + if err != nil { + continue + } + return sec.GetKey(name) + } else { + break + } + } + return nil, fmt.Errorf("error when getting key of section '%s': key '%s' not exists", s.name, name) + } + return key, nil +} + +// HasKey returns true if section contains a key with given name. +func (s *Section) HasKey(name string) bool { + key, _ := s.GetKey(name) + return key != nil +} + +// Haskey is a backwards-compatible name for HasKey. +func (s *Section) Haskey(name string) bool { + return s.HasKey(name) +} + +// HasValue returns true if section contains given raw value. +func (s *Section) HasValue(value string) bool { + if s.f.BlockMode { + s.f.lock.RLock() + defer s.f.lock.RUnlock() + } + + for _, k := range s.keys { + if value == k.value { + return true + } + } + return false +} + +// Key assumes named Key exists in section and returns a zero-value when not. +func (s *Section) Key(name string) *Key { + key, err := s.GetKey(name) + if err != nil { + // It's OK here because the only possible error is empty key name, + // but if it's empty, this piece of code won't be executed. + key, _ = s.NewKey(name, "") + return key + } + return key +} + +// Keys returns list of keys of section. +func (s *Section) Keys() []*Key { + keys := make([]*Key, len(s.keyList)) + for i := range s.keyList { + keys[i] = s.Key(s.keyList[i]) + } + return keys +} + +// ParentKeys returns list of keys of parent section. +func (s *Section) ParentKeys() []*Key { + var parentKeys []*Key + sname := s.name + for { + if i := strings.LastIndex(sname, "."); i > -1 { + sname = sname[:i] + sec, err := s.f.GetSection(sname) + if err != nil { + continue + } + parentKeys = append(parentKeys, sec.Keys()...) + } else { + break + } + + } + return parentKeys +} + +// KeyStrings returns list of key names of section. +func (s *Section) KeyStrings() []string { + list := make([]string, len(s.keyList)) + copy(list, s.keyList) + return list +} + +// KeysHash returns keys hash consisting of names and values. +func (s *Section) KeysHash() map[string]string { + if s.f.BlockMode { + s.f.lock.RLock() + defer s.f.lock.RUnlock() + } + + hash := map[string]string{} + for key, value := range s.keysHash { + hash[key] = value + } + return hash +} + +// DeleteKey deletes a key from section. +func (s *Section) DeleteKey(name string) { + if s.f.BlockMode { + s.f.lock.Lock() + defer s.f.lock.Unlock() + } + + for i, k := range s.keyList { + if k == name { + s.keyList = append(s.keyList[:i], s.keyList[i+1:]...) + delete(s.keys, name) + return + } + } +} + +// ChildSections returns a list of child sections of current section. +// For example, "[parent.child1]" and "[parent.child12]" are child sections +// of section "[parent]". +func (s *Section) ChildSections() []*Section { + prefix := s.name + "." + children := make([]*Section, 0, 3) + for _, name := range s.f.sectionList { + if strings.HasPrefix(name, prefix) { + children = append(children, s.f.sections[name]) + } + } + return children +} diff --git a/vendor/github.com/go-ini/ini/struct.go b/vendor/github.com/go-ini/ini/struct.go new file mode 100644 index 0000000000..d522e003a1 --- /dev/null +++ b/vendor/github.com/go-ini/ini/struct.go @@ -0,0 +1,499 @@ +// Copyright 2014 Unknwon +// +// Licensed under the Apache License, Version 2.0 (the "License"): you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +// License for the specific language governing permissions and limitations +// under the License. + +package ini + +import ( + "bytes" + "errors" + "fmt" + "reflect" + "strings" + "time" + "unicode" +) + +// NameMapper represents a ini tag name mapper. +type NameMapper func(string) string + +// Built-in name getters. +var ( + // AllCapsUnderscore converts to format ALL_CAPS_UNDERSCORE. + AllCapsUnderscore NameMapper = func(raw string) string { + newstr := make([]rune, 0, len(raw)) + for i, chr := range raw { + if isUpper := 'A' <= chr && chr <= 'Z'; isUpper { + if i > 0 { + newstr = append(newstr, '_') + } + } + newstr = append(newstr, unicode.ToUpper(chr)) + } + return string(newstr) + } + // TitleUnderscore converts to format title_underscore. + TitleUnderscore NameMapper = func(raw string) string { + newstr := make([]rune, 0, len(raw)) + for i, chr := range raw { + if isUpper := 'A' <= chr && chr <= 'Z'; isUpper { + if i > 0 { + newstr = append(newstr, '_') + } + chr -= ('A' - 'a') + } + newstr = append(newstr, chr) + } + return string(newstr) + } +) + +func (s *Section) parseFieldName(raw, actual string) string { + if len(actual) > 0 { + return actual + } + if s.f.NameMapper != nil { + return s.f.NameMapper(raw) + } + return raw +} + +func parseDelim(actual string) string { + if len(actual) > 0 { + return actual + } + return "," +} + +var reflectTime = reflect.TypeOf(time.Now()).Kind() + +// setSliceWithProperType sets proper values to slice based on its type. +func setSliceWithProperType(key *Key, field reflect.Value, delim string, allowShadow, isStrict bool) error { + var strs []string + if allowShadow { + strs = key.StringsWithShadows(delim) + } else { + strs = key.Strings(delim) + } + + numVals := len(strs) + if numVals == 0 { + return nil + } + + var vals interface{} + var err error + + sliceOf := field.Type().Elem().Kind() + switch sliceOf { + case reflect.String: + vals = strs + case reflect.Int: + vals, err = key.parseInts(strs, true, false) + case reflect.Int64: + vals, err = key.parseInt64s(strs, true, false) + case reflect.Uint: + vals, err = key.parseUints(strs, true, false) + case reflect.Uint64: + vals, err = key.parseUint64s(strs, true, false) + case reflect.Float64: + vals, err = key.parseFloat64s(strs, true, false) + case reflectTime: + vals, err = key.parseTimesFormat(time.RFC3339, strs, true, false) + default: + return fmt.Errorf("unsupported type '[]%s'", sliceOf) + } + if isStrict { + return err + } + + slice := reflect.MakeSlice(field.Type(), numVals, numVals) + for i := 0; i < numVals; i++ { + switch sliceOf { + case reflect.String: + slice.Index(i).Set(reflect.ValueOf(vals.([]string)[i])) + case reflect.Int: + slice.Index(i).Set(reflect.ValueOf(vals.([]int)[i])) + case reflect.Int64: + slice.Index(i).Set(reflect.ValueOf(vals.([]int64)[i])) + case reflect.Uint: + slice.Index(i).Set(reflect.ValueOf(vals.([]uint)[i])) + case reflect.Uint64: + slice.Index(i).Set(reflect.ValueOf(vals.([]uint64)[i])) + case reflect.Float64: + slice.Index(i).Set(reflect.ValueOf(vals.([]float64)[i])) + case reflectTime: + slice.Index(i).Set(reflect.ValueOf(vals.([]time.Time)[i])) + } + } + field.Set(slice) + return nil +} + +func wrapStrictError(err error, isStrict bool) error { + if isStrict { + return err + } + return nil +} + +// setWithProperType sets proper value to field based on its type, +// but it does not return error for failing parsing, +// because we want to use default value that is already assigned to strcut. +func setWithProperType(t reflect.Type, key *Key, field reflect.Value, delim string, allowShadow, isStrict bool) error { + switch t.Kind() { + case reflect.String: + if len(key.String()) == 0 { + return nil + } + field.SetString(key.String()) + case reflect.Bool: + boolVal, err := key.Bool() + if err != nil { + return wrapStrictError(err, isStrict) + } + field.SetBool(boolVal) + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + durationVal, err := key.Duration() + // Skip zero value + if err == nil && int(durationVal) > 0 { + field.Set(reflect.ValueOf(durationVal)) + return nil + } + + intVal, err := key.Int64() + if err != nil { + return wrapStrictError(err, isStrict) + } + field.SetInt(intVal) + // byte is an alias for uint8, so supporting uint8 breaks support for byte + case reflect.Uint, reflect.Uint16, reflect.Uint32, reflect.Uint64: + durationVal, err := key.Duration() + // Skip zero value + if err == nil && int(durationVal) > 0 { + field.Set(reflect.ValueOf(durationVal)) + return nil + } + + uintVal, err := key.Uint64() + if err != nil { + return wrapStrictError(err, isStrict) + } + field.SetUint(uintVal) + + case reflect.Float32, reflect.Float64: + floatVal, err := key.Float64() + if err != nil { + return wrapStrictError(err, isStrict) + } + field.SetFloat(floatVal) + case reflectTime: + timeVal, err := key.Time() + if err != nil { + return wrapStrictError(err, isStrict) + } + field.Set(reflect.ValueOf(timeVal)) + case reflect.Slice: + return setSliceWithProperType(key, field, delim, allowShadow, isStrict) + default: + return fmt.Errorf("unsupported type '%s'", t) + } + return nil +} + +func parseTagOptions(tag string) (rawName string, omitEmpty bool, allowShadow bool) { + opts := strings.SplitN(tag, ",", 3) + rawName = opts[0] + if len(opts) > 1 { + omitEmpty = opts[1] == "omitempty" + } + if len(opts) > 2 { + allowShadow = opts[2] == "allowshadow" + } + return rawName, omitEmpty, allowShadow +} + +func (s *Section) mapTo(val reflect.Value, isStrict bool) error { + if val.Kind() == reflect.Ptr { + val = val.Elem() + } + typ := val.Type() + + for i := 0; i < typ.NumField(); i++ { + field := val.Field(i) + tpField := typ.Field(i) + + tag := tpField.Tag.Get("ini") + if tag == "-" { + continue + } + + rawName, _, allowShadow := parseTagOptions(tag) + fieldName := s.parseFieldName(tpField.Name, rawName) + if len(fieldName) == 0 || !field.CanSet() { + continue + } + + isAnonymous := tpField.Type.Kind() == reflect.Ptr && tpField.Anonymous + isStruct := tpField.Type.Kind() == reflect.Struct + if isAnonymous { + field.Set(reflect.New(tpField.Type.Elem())) + } + + if isAnonymous || isStruct { + if sec, err := s.f.GetSection(fieldName); err == nil { + if err = sec.mapTo(field, isStrict); err != nil { + return fmt.Errorf("error mapping field(%s): %v", fieldName, err) + } + continue + } + } + + if key, err := s.GetKey(fieldName); err == nil { + delim := parseDelim(tpField.Tag.Get("delim")) + if err = setWithProperType(tpField.Type, key, field, delim, allowShadow, isStrict); err != nil { + return fmt.Errorf("error mapping field(%s): %v", fieldName, err) + } + } + } + return nil +} + +// MapTo maps section to given struct. +func (s *Section) MapTo(v interface{}) error { + typ := reflect.TypeOf(v) + val := reflect.ValueOf(v) + if typ.Kind() == reflect.Ptr { + typ = typ.Elem() + val = val.Elem() + } else { + return errors.New("cannot map to non-pointer struct") + } + + return s.mapTo(val, false) +} + +// MapTo maps section to given struct in strict mode, +// which returns all possible error including value parsing error. +func (s *Section) StrictMapTo(v interface{}) error { + typ := reflect.TypeOf(v) + val := reflect.ValueOf(v) + if typ.Kind() == reflect.Ptr { + typ = typ.Elem() + val = val.Elem() + } else { + return errors.New("cannot map to non-pointer struct") + } + + return s.mapTo(val, true) +} + +// MapTo maps file to given struct. +func (f *File) MapTo(v interface{}) error { + return f.Section("").MapTo(v) +} + +// MapTo maps file to given struct in strict mode, +// which returns all possible error including value parsing error. +func (f *File) StrictMapTo(v interface{}) error { + return f.Section("").StrictMapTo(v) +} + +// MapTo maps data sources to given struct with name mapper. +func MapToWithMapper(v interface{}, mapper NameMapper, source interface{}, others ...interface{}) error { + cfg, err := Load(source, others...) + if err != nil { + return err + } + cfg.NameMapper = mapper + return cfg.MapTo(v) +} + +// StrictMapToWithMapper maps data sources to given struct with name mapper in strict mode, +// which returns all possible error including value parsing error. +func StrictMapToWithMapper(v interface{}, mapper NameMapper, source interface{}, others ...interface{}) error { + cfg, err := Load(source, others...) + if err != nil { + return err + } + cfg.NameMapper = mapper + return cfg.StrictMapTo(v) +} + +// MapTo maps data sources to given struct. +func MapTo(v, source interface{}, others ...interface{}) error { + return MapToWithMapper(v, nil, source, others...) +} + +// StrictMapTo maps data sources to given struct in strict mode, +// which returns all possible error including value parsing error. +func StrictMapTo(v, source interface{}, others ...interface{}) error { + return StrictMapToWithMapper(v, nil, source, others...) +} + +// reflectSliceWithProperType does the opposite thing as setSliceWithProperType. +func reflectSliceWithProperType(key *Key, field reflect.Value, delim string) error { + slice := field.Slice(0, field.Len()) + if field.Len() == 0 { + return nil + } + + var buf bytes.Buffer + sliceOf := field.Type().Elem().Kind() + for i := 0; i < field.Len(); i++ { + switch sliceOf { + case reflect.String: + buf.WriteString(slice.Index(i).String()) + case reflect.Int, reflect.Int64: + buf.WriteString(fmt.Sprint(slice.Index(i).Int())) + case reflect.Uint, reflect.Uint64: + buf.WriteString(fmt.Sprint(slice.Index(i).Uint())) + case reflect.Float64: + buf.WriteString(fmt.Sprint(slice.Index(i).Float())) + case reflectTime: + buf.WriteString(slice.Index(i).Interface().(time.Time).Format(time.RFC3339)) + default: + return fmt.Errorf("unsupported type '[]%s'", sliceOf) + } + buf.WriteString(delim) + } + key.SetValue(buf.String()[:buf.Len()-1]) + return nil +} + +// reflectWithProperType does the opposite thing as setWithProperType. +func reflectWithProperType(t reflect.Type, key *Key, field reflect.Value, delim string) error { + switch t.Kind() { + case reflect.String: + key.SetValue(field.String()) + case reflect.Bool: + key.SetValue(fmt.Sprint(field.Bool())) + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + key.SetValue(fmt.Sprint(field.Int())) + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: + key.SetValue(fmt.Sprint(field.Uint())) + case reflect.Float32, reflect.Float64: + key.SetValue(fmt.Sprint(field.Float())) + case reflectTime: + key.SetValue(fmt.Sprint(field.Interface().(time.Time).Format(time.RFC3339))) + case reflect.Slice: + return reflectSliceWithProperType(key, field, delim) + default: + return fmt.Errorf("unsupported type '%s'", t) + } + return nil +} + +// CR: copied from encoding/json/encode.go with modifications of time.Time support. +// TODO: add more test coverage. +func isEmptyValue(v reflect.Value) bool { + switch v.Kind() { + case reflect.Array, reflect.Map, reflect.Slice, reflect.String: + return v.Len() == 0 + case reflect.Bool: + return !v.Bool() + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return v.Int() == 0 + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + return v.Uint() == 0 + case reflect.Float32, reflect.Float64: + return v.Float() == 0 + case reflectTime: + return v.Interface().(time.Time).IsZero() + case reflect.Interface, reflect.Ptr: + return v.IsNil() + } + return false +} + +func (s *Section) reflectFrom(val reflect.Value) error { + if val.Kind() == reflect.Ptr { + val = val.Elem() + } + typ := val.Type() + + for i := 0; i < typ.NumField(); i++ { + field := val.Field(i) + tpField := typ.Field(i) + + tag := tpField.Tag.Get("ini") + if tag == "-" { + continue + } + + opts := strings.SplitN(tag, ",", 2) + if len(opts) == 2 && opts[1] == "omitempty" && isEmptyValue(field) { + continue + } + + fieldName := s.parseFieldName(tpField.Name, opts[0]) + if len(fieldName) == 0 || !field.CanSet() { + continue + } + + if (tpField.Type.Kind() == reflect.Ptr && tpField.Anonymous) || + (tpField.Type.Kind() == reflect.Struct && tpField.Type.Name() != "Time") { + // Note: The only error here is section doesn't exist. + sec, err := s.f.GetSection(fieldName) + if err != nil { + // Note: fieldName can never be empty here, ignore error. + sec, _ = s.f.NewSection(fieldName) + } + if err = sec.reflectFrom(field); err != nil { + return fmt.Errorf("error reflecting field (%s): %v", fieldName, err) + } + continue + } + + // Note: Same reason as secion. + key, err := s.GetKey(fieldName) + if err != nil { + key, _ = s.NewKey(fieldName, "") + } + if err = reflectWithProperType(tpField.Type, key, field, parseDelim(tpField.Tag.Get("delim"))); err != nil { + return fmt.Errorf("error reflecting field (%s): %v", fieldName, err) + } + + } + return nil +} + +// ReflectFrom reflects secion from given struct. +func (s *Section) ReflectFrom(v interface{}) error { + typ := reflect.TypeOf(v) + val := reflect.ValueOf(v) + if typ.Kind() == reflect.Ptr { + typ = typ.Elem() + val = val.Elem() + } else { + return errors.New("cannot reflect from non-pointer struct") + } + + return s.reflectFrom(val) +} + +// ReflectFrom reflects file from given struct. +func (f *File) ReflectFrom(v interface{}) error { + return f.Section("").ReflectFrom(v) +} + +// ReflectFrom reflects data sources from given struct with name mapper. +func ReflectFromWithMapper(cfg *File, v interface{}, mapper NameMapper) error { + cfg.NameMapper = mapper + return cfg.ReflectFrom(v) +} + +// ReflectFrom reflects data sources from given struct. +func ReflectFrom(cfg *File, v interface{}) error { + return ReflectFromWithMapper(cfg, v, nil) +} diff --git a/vendor/github.com/hashicorp/errwrap/LICENSE b/vendor/github.com/hashicorp/errwrap/LICENSE new file mode 100644 index 0000000000..c33dcc7c92 --- /dev/null +++ b/vendor/github.com/hashicorp/errwrap/LICENSE @@ -0,0 +1,354 @@ +Mozilla Public License, version 2.0 + +1. Definitions + +1.1. “Contributor” + + means each individual or legal entity that creates, contributes to the + creation of, or owns Covered Software. + +1.2. “Contributor Version” + + means the combination of the Contributions of others (if any) used by a + Contributor and that particular Contributor’s Contribution. + +1.3. “Contribution” + + means Covered Software of a particular Contributor. + +1.4. “Covered Software” + + means Source Code Form to which the initial Contributor has attached the + notice in Exhibit A, the Executable Form of such Source Code Form, and + Modifications of such Source Code Form, in each case including portions + thereof. + +1.5. “Incompatible With Secondary Licenses” + means + + a. that the initial Contributor has attached the notice described in + Exhibit B to the Covered Software; or + + b. that the Covered Software was made available under the terms of version + 1.1 or earlier of the License, but not also under the terms of a + Secondary License. + +1.6. “Executable Form” + + means any form of the work other than Source Code Form. + +1.7. “Larger Work” + + means a work that combines Covered Software with other material, in a separate + file or files, that is not Covered Software. + +1.8. “License” + + means this document. + +1.9. “Licensable” + + means having the right to grant, to the maximum extent possible, whether at the + time of the initial grant or subsequently, any and all of the rights conveyed by + this License. + +1.10. “Modifications” + + means any of the following: + + a. any file in Source Code Form that results from an addition to, deletion + from, or modification of the contents of Covered Software; or + + b. any new file in Source Code Form that contains any Covered Software. + +1.11. “Patent Claims” of a Contributor + + means any patent claim(s), including without limitation, method, process, + and apparatus claims, in any patent Licensable by such Contributor that + would be infringed, but for the grant of the License, by the making, + using, selling, offering for sale, having made, import, or transfer of + either its Contributions or its Contributor Version. + +1.12. “Secondary License” + + means either the GNU General Public License, Version 2.0, the GNU Lesser + General Public License, Version 2.1, the GNU Affero General Public + License, Version 3.0, or any later versions of those licenses. + +1.13. “Source Code Form” + + means the form of the work preferred for making modifications. + +1.14. “You” (or “Your”) + + means an individual or a legal entity exercising rights under this + License. For legal entities, “You” includes any entity that controls, is + controlled by, or is under common control with You. For purposes of this + definition, “control” means (a) the power, direct or indirect, to cause + the direction or management of such entity, whether by contract or + otherwise, or (b) ownership of more than fifty percent (50%) of the + outstanding shares or beneficial ownership of such entity. + + +2. License Grants and Conditions + +2.1. Grants + + Each Contributor hereby grants You a world-wide, royalty-free, + non-exclusive license: + + a. under intellectual property rights (other than patent or trademark) + Licensable by such Contributor to use, reproduce, make available, + modify, display, perform, distribute, and otherwise exploit its + Contributions, either on an unmodified basis, with Modifications, or as + part of a Larger Work; and + + b. under Patent Claims of such Contributor to make, use, sell, offer for + sale, have made, import, and otherwise transfer either its Contributions + or its Contributor Version. + +2.2. Effective Date + + The licenses granted in Section 2.1 with respect to any Contribution become + effective for each Contribution on the date the Contributor first distributes + such Contribution. + +2.3. Limitations on Grant Scope + + The licenses granted in this Section 2 are the only rights granted under this + License. No additional rights or licenses will be implied from the distribution + or licensing of Covered Software under this License. Notwithstanding Section + 2.1(b) above, no patent license is granted by a Contributor: + + a. for any code that a Contributor has removed from Covered Software; or + + b. for infringements caused by: (i) Your and any other third party’s + modifications of Covered Software, or (ii) the combination of its + Contributions with other software (except as part of its Contributor + Version); or + + c. under Patent Claims infringed by Covered Software in the absence of its + Contributions. + + This License does not grant any rights in the trademarks, service marks, or + logos of any Contributor (except as may be necessary to comply with the + notice requirements in Section 3.4). + +2.4. Subsequent Licenses + + No Contributor makes additional grants as a result of Your choice to + distribute the Covered Software under a subsequent version of this License + (see Section 10.2) or under the terms of a Secondary License (if permitted + under the terms of Section 3.3). + +2.5. Representation + + Each Contributor represents that the Contributor believes its Contributions + are its original creation(s) or it has sufficient rights to grant the + rights to its Contributions conveyed by this License. + +2.6. Fair Use + + This License is not intended to limit any rights You have under applicable + copyright doctrines of fair use, fair dealing, or other equivalents. + +2.7. Conditions + + Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in + Section 2.1. + + +3. Responsibilities + +3.1. Distribution of Source Form + + All distribution of Covered Software in Source Code Form, including any + Modifications that You create or to which You contribute, must be under the + terms of this License. You must inform recipients that the Source Code Form + of the Covered Software is governed by the terms of this License, and how + they can obtain a copy of this License. You may not attempt to alter or + restrict the recipients’ rights in the Source Code Form. + +3.2. Distribution of Executable Form + + If You distribute Covered Software in Executable Form then: + + a. such Covered Software must also be made available in Source Code Form, + as described in Section 3.1, and You must inform recipients of the + Executable Form how they can obtain a copy of such Source Code Form by + reasonable means in a timely manner, at a charge no more than the cost + of distribution to the recipient; and + + b. You may distribute such Executable Form under the terms of this License, + or sublicense it under different terms, provided that the license for + the Executable Form does not attempt to limit or alter the recipients’ + rights in the Source Code Form under this License. + +3.3. Distribution of a Larger Work + + You may create and distribute a Larger Work under terms of Your choice, + provided that You also comply with the requirements of this License for the + Covered Software. If the Larger Work is a combination of Covered Software + with a work governed by one or more Secondary Licenses, and the Covered + Software is not Incompatible With Secondary Licenses, this License permits + You to additionally distribute such Covered Software under the terms of + such Secondary License(s), so that the recipient of the Larger Work may, at + their option, further distribute the Covered Software under the terms of + either this License or such Secondary License(s). + +3.4. Notices + + You may not remove or alter the substance of any license notices (including + copyright notices, patent notices, disclaimers of warranty, or limitations + of liability) contained within the Source Code Form of the Covered + Software, except that You may alter any license notices to the extent + required to remedy known factual inaccuracies. + +3.5. Application of Additional Terms + + You may choose to offer, and to charge a fee for, warranty, support, + indemnity or liability obligations to one or more recipients of Covered + Software. However, You may do so only on Your own behalf, and not on behalf + of any Contributor. You must make it absolutely clear that any such + warranty, support, indemnity, or liability obligation is offered by You + alone, and You hereby agree to indemnify every Contributor for any + liability incurred by such Contributor as a result of warranty, support, + indemnity or liability terms You offer. You may include additional + disclaimers of warranty and limitations of liability specific to any + jurisdiction. + +4. Inability to Comply Due to Statute or Regulation + + If it is impossible for You to comply with any of the terms of this License + with respect to some or all of the Covered Software due to statute, judicial + order, or regulation then You must: (a) comply with the terms of this License + to the maximum extent possible; and (b) describe the limitations and the code + they affect. Such description must be placed in a text file included with all + distributions of the Covered Software under this License. Except to the + extent prohibited by statute or regulation, such description must be + sufficiently detailed for a recipient of ordinary skill to be able to + understand it. + +5. Termination + +5.1. The rights granted under this License will terminate automatically if You + fail to comply with any of its terms. However, if You become compliant, + then the rights granted under this License from a particular Contributor + are reinstated (a) provisionally, unless and until such Contributor + explicitly and finally terminates Your grants, and (b) on an ongoing basis, + if such Contributor fails to notify You of the non-compliance by some + reasonable means prior to 60 days after You have come back into compliance. + Moreover, Your grants from a particular Contributor are reinstated on an + ongoing basis if such Contributor notifies You of the non-compliance by + some reasonable means, this is the first time You have received notice of + non-compliance with this License from such Contributor, and You become + compliant prior to 30 days after Your receipt of the notice. + +5.2. If You initiate litigation against any entity by asserting a patent + infringement claim (excluding declaratory judgment actions, counter-claims, + and cross-claims) alleging that a Contributor Version directly or + indirectly infringes any patent, then the rights granted to You by any and + all Contributors for the Covered Software under Section 2.1 of this License + shall terminate. + +5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user + license agreements (excluding distributors and resellers) which have been + validly granted by You or Your distributors under this License prior to + termination shall survive termination. + +6. Disclaimer of Warranty + + Covered Software is provided under this License on an “as is” basis, without + warranty of any kind, either expressed, implied, or statutory, including, + without limitation, warranties that the Covered Software is free of defects, + merchantable, fit for a particular purpose or non-infringing. The entire + risk as to the quality and performance of the Covered Software is with You. + Should any Covered Software prove defective in any respect, You (not any + Contributor) assume the cost of any necessary servicing, repair, or + correction. This disclaimer of warranty constitutes an essential part of this + License. No use of any Covered Software is authorized under this License + except under this disclaimer. + +7. Limitation of Liability + + Under no circumstances and under no legal theory, whether tort (including + negligence), contract, or otherwise, shall any Contributor, or anyone who + distributes Covered Software as permitted above, be liable to You for any + direct, indirect, special, incidental, or consequential damages of any + character including, without limitation, damages for lost profits, loss of + goodwill, work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses, even if such party shall have been + informed of the possibility of such damages. This limitation of liability + shall not apply to liability for death or personal injury resulting from such + party’s negligence to the extent applicable law prohibits such limitation. + Some jurisdictions do not allow the exclusion or limitation of incidental or + consequential damages, so this exclusion and limitation may not apply to You. + +8. Litigation + + Any litigation relating to this License may be brought only in the courts of + a jurisdiction where the defendant maintains its principal place of business + and such litigation shall be governed by laws of that jurisdiction, without + reference to its conflict-of-law provisions. Nothing in this Section shall + prevent a party’s ability to bring cross-claims or counter-claims. + +9. Miscellaneous + + This License represents the complete agreement concerning the subject matter + hereof. If any provision of this License is held to be unenforceable, such + provision shall be reformed only to the extent necessary to make it + enforceable. Any law or regulation which provides that the language of a + contract shall be construed against the drafter shall not be used to construe + this License against a Contributor. + + +10. Versions of the License + +10.1. New Versions + + Mozilla Foundation is the license steward. Except as provided in Section + 10.3, no one other than the license steward has the right to modify or + publish new versions of this License. Each version will be given a + distinguishing version number. + +10.2. Effect of New Versions + + You may distribute the Covered Software under the terms of the version of + the License under which You originally received the Covered Software, or + under the terms of any subsequent version published by the license + steward. + +10.3. Modified Versions + + If you create software not governed by this License, and you want to + create a new license for such software, you may create and use a modified + version of this License if you rename the license and remove any + references to the name of the license steward (except to note that such + modified license differs from this License). + +10.4. Distributing Source Code Form that is Incompatible With Secondary Licenses + If You choose to distribute Source Code Form that is Incompatible With + Secondary Licenses under the terms of this version of the License, the + notice described in Exhibit B of this License must be attached. + +Exhibit A - Source Code Form License Notice + + This Source Code Form is subject to the + terms of the Mozilla Public License, v. + 2.0. If a copy of the MPL was not + distributed with this file, You can + obtain one at + http://mozilla.org/MPL/2.0/. + +If it is not possible or desirable to put the notice in a particular file, then +You may include the notice in a location (such as a LICENSE file in a relevant +directory) where a recipient would be likely to look for such a notice. + +You may add additional accurate notices of copyright ownership. + +Exhibit B - “Incompatible With Secondary Licenses” Notice + + This Source Code Form is “Incompatible + With Secondary Licenses”, as defined by + the Mozilla Public License, v. 2.0. + diff --git a/vendor/github.com/hashicorp/errwrap/README.md b/vendor/github.com/hashicorp/errwrap/README.md new file mode 100644 index 0000000000..1c95f59782 --- /dev/null +++ b/vendor/github.com/hashicorp/errwrap/README.md @@ -0,0 +1,89 @@ +# errwrap + +`errwrap` is a package for Go that formalizes the pattern of wrapping errors +and checking if an error contains another error. + +There is a common pattern in Go of taking a returned `error` value and +then wrapping it (such as with `fmt.Errorf`) before returning it. The problem +with this pattern is that you completely lose the original `error` structure. + +Arguably the _correct_ approach is that you should make a custom structure +implementing the `error` interface, and have the original error as a field +on that structure, such [as this example](http://golang.org/pkg/os/#PathError). +This is a good approach, but you have to know the entire chain of possible +rewrapping that happens, when you might just care about one. + +`errwrap` formalizes this pattern (it doesn't matter what approach you use +above) by giving a single interface for wrapping errors, checking if a specific +error is wrapped, and extracting that error. + +## Installation and Docs + +Install using `go get github.com/hashicorp/errwrap`. + +Full documentation is available at +http://godoc.org/github.com/hashicorp/errwrap + +## Usage + +#### Basic Usage + +Below is a very basic example of its usage: + +```go +// A function that always returns an error, but wraps it, like a real +// function might. +func tryOpen() error { + _, err := os.Open("/i/dont/exist") + if err != nil { + return errwrap.Wrapf("Doesn't exist: {{err}}", err) + } + + return nil +} + +func main() { + err := tryOpen() + + // We can use the Contains helpers to check if an error contains + // another error. It is safe to do this with a nil error, or with + // an error that doesn't even use the errwrap package. + if errwrap.Contains(err, ErrNotExist) { + // Do something + } + if errwrap.ContainsType(err, new(os.PathError)) { + // Do something + } + + // Or we can use the associated `Get` functions to just extract + // a specific error. This would return nil if that specific error doesn't + // exist. + perr := errwrap.GetType(err, new(os.PathError)) +} +``` + +#### Custom Types + +If you're already making custom types that properly wrap errors, then +you can get all the functionality of `errwraps.Contains` and such by +implementing the `Wrapper` interface with just one function. Example: + +```go +type AppError { + Code ErrorCode + Err error +} + +func (e *AppError) WrappedErrors() []error { + return []error{e.Err} +} +``` + +Now this works: + +```go +err := &AppError{Err: fmt.Errorf("an error")} +if errwrap.ContainsType(err, fmt.Errorf("")) { + // This will work! +} +``` diff --git a/vendor/github.com/hashicorp/errwrap/errwrap.go b/vendor/github.com/hashicorp/errwrap/errwrap.go new file mode 100644 index 0000000000..a733bef18c --- /dev/null +++ b/vendor/github.com/hashicorp/errwrap/errwrap.go @@ -0,0 +1,169 @@ +// Package errwrap implements methods to formalize error wrapping in Go. +// +// All of the top-level functions that take an `error` are built to be able +// to take any error, not just wrapped errors. This allows you to use errwrap +// without having to type-check and type-cast everywhere. +package errwrap + +import ( + "errors" + "reflect" + "strings" +) + +// WalkFunc is the callback called for Walk. +type WalkFunc func(error) + +// Wrapper is an interface that can be implemented by custom types to +// have all the Contains, Get, etc. functions in errwrap work. +// +// When Walk reaches a Wrapper, it will call the callback for every +// wrapped error in addition to the wrapper itself. Since all the top-level +// functions in errwrap use Walk, this means that all those functions work +// with your custom type. +type Wrapper interface { + WrappedErrors() []error +} + +// Wrap defines that outer wraps inner, returning an error type that +// can be cleanly used with the other methods in this package, such as +// Contains, GetAll, etc. +// +// This function won't modify the error message at all (the outer message +// will be used). +func Wrap(outer, inner error) error { + return &wrappedError{ + Outer: outer, + Inner: inner, + } +} + +// Wrapf wraps an error with a formatting message. This is similar to using +// `fmt.Errorf` to wrap an error. If you're using `fmt.Errorf` to wrap +// errors, you should replace it with this. +// +// format is the format of the error message. The string '{{err}}' will +// be replaced with the original error message. +func Wrapf(format string, err error) error { + outerMsg := "" + if err != nil { + outerMsg = err.Error() + } + + outer := errors.New(strings.Replace( + format, "{{err}}", outerMsg, -1)) + + return Wrap(outer, err) +} + +// Contains checks if the given error contains an error with the +// message msg. If err is not a wrapped error, this will always return +// false unless the error itself happens to match this msg. +func Contains(err error, msg string) bool { + return len(GetAll(err, msg)) > 0 +} + +// ContainsType checks if the given error contains an error with +// the same concrete type as v. If err is not a wrapped error, this will +// check the err itself. +func ContainsType(err error, v interface{}) bool { + return len(GetAllType(err, v)) > 0 +} + +// Get is the same as GetAll but returns the deepest matching error. +func Get(err error, msg string) error { + es := GetAll(err, msg) + if len(es) > 0 { + return es[len(es)-1] + } + + return nil +} + +// GetType is the same as GetAllType but returns the deepest matching error. +func GetType(err error, v interface{}) error { + es := GetAllType(err, v) + if len(es) > 0 { + return es[len(es)-1] + } + + return nil +} + +// GetAll gets all the errors that might be wrapped in err with the +// given message. The order of the errors is such that the outermost +// matching error (the most recent wrap) is index zero, and so on. +func GetAll(err error, msg string) []error { + var result []error + + Walk(err, func(err error) { + if err.Error() == msg { + result = append(result, err) + } + }) + + return result +} + +// GetAllType gets all the errors that are the same type as v. +// +// The order of the return value is the same as described in GetAll. +func GetAllType(err error, v interface{}) []error { + var result []error + + var search string + if v != nil { + search = reflect.TypeOf(v).String() + } + Walk(err, func(err error) { + var needle string + if err != nil { + needle = reflect.TypeOf(err).String() + } + + if needle == search { + result = append(result, err) + } + }) + + return result +} + +// Walk walks all the wrapped errors in err and calls the callback. If +// err isn't a wrapped error, this will be called once for err. If err +// is a wrapped error, the callback will be called for both the wrapper +// that implements error as well as the wrapped error itself. +func Walk(err error, cb WalkFunc) { + if err == nil { + return + } + + switch e := err.(type) { + case *wrappedError: + cb(e.Outer) + Walk(e.Inner, cb) + case Wrapper: + cb(err) + + for _, err := range e.WrappedErrors() { + Walk(err, cb) + } + default: + cb(err) + } +} + +// wrappedError is an implementation of error that has both the +// outer and inner errors. +type wrappedError struct { + Outer error + Inner error +} + +func (w *wrappedError) Error() string { + return w.Outer.Error() +} + +func (w *wrappedError) WrappedErrors() []error { + return []error{w.Outer, w.Inner} +} diff --git a/vendor/github.com/hashicorp/go-getter/LICENSE b/vendor/github.com/hashicorp/go-getter/LICENSE new file mode 100644 index 0000000000..c33dcc7c92 --- /dev/null +++ b/vendor/github.com/hashicorp/go-getter/LICENSE @@ -0,0 +1,354 @@ +Mozilla Public License, version 2.0 + +1. Definitions + +1.1. “Contributor” + + means each individual or legal entity that creates, contributes to the + creation of, or owns Covered Software. + +1.2. “Contributor Version” + + means the combination of the Contributions of others (if any) used by a + Contributor and that particular Contributor’s Contribution. + +1.3. “Contribution” + + means Covered Software of a particular Contributor. + +1.4. “Covered Software” + + means Source Code Form to which the initial Contributor has attached the + notice in Exhibit A, the Executable Form of such Source Code Form, and + Modifications of such Source Code Form, in each case including portions + thereof. + +1.5. “Incompatible With Secondary Licenses” + means + + a. that the initial Contributor has attached the notice described in + Exhibit B to the Covered Software; or + + b. that the Covered Software was made available under the terms of version + 1.1 or earlier of the License, but not also under the terms of a + Secondary License. + +1.6. “Executable Form” + + means any form of the work other than Source Code Form. + +1.7. “Larger Work” + + means a work that combines Covered Software with other material, in a separate + file or files, that is not Covered Software. + +1.8. “License” + + means this document. + +1.9. “Licensable” + + means having the right to grant, to the maximum extent possible, whether at the + time of the initial grant or subsequently, any and all of the rights conveyed by + this License. + +1.10. “Modifications” + + means any of the following: + + a. any file in Source Code Form that results from an addition to, deletion + from, or modification of the contents of Covered Software; or + + b. any new file in Source Code Form that contains any Covered Software. + +1.11. “Patent Claims” of a Contributor + + means any patent claim(s), including without limitation, method, process, + and apparatus claims, in any patent Licensable by such Contributor that + would be infringed, but for the grant of the License, by the making, + using, selling, offering for sale, having made, import, or transfer of + either its Contributions or its Contributor Version. + +1.12. “Secondary License” + + means either the GNU General Public License, Version 2.0, the GNU Lesser + General Public License, Version 2.1, the GNU Affero General Public + License, Version 3.0, or any later versions of those licenses. + +1.13. “Source Code Form” + + means the form of the work preferred for making modifications. + +1.14. “You” (or “Your”) + + means an individual or a legal entity exercising rights under this + License. For legal entities, “You” includes any entity that controls, is + controlled by, or is under common control with You. For purposes of this + definition, “control” means (a) the power, direct or indirect, to cause + the direction or management of such entity, whether by contract or + otherwise, or (b) ownership of more than fifty percent (50%) of the + outstanding shares or beneficial ownership of such entity. + + +2. License Grants and Conditions + +2.1. Grants + + Each Contributor hereby grants You a world-wide, royalty-free, + non-exclusive license: + + a. under intellectual property rights (other than patent or trademark) + Licensable by such Contributor to use, reproduce, make available, + modify, display, perform, distribute, and otherwise exploit its + Contributions, either on an unmodified basis, with Modifications, or as + part of a Larger Work; and + + b. under Patent Claims of such Contributor to make, use, sell, offer for + sale, have made, import, and otherwise transfer either its Contributions + or its Contributor Version. + +2.2. Effective Date + + The licenses granted in Section 2.1 with respect to any Contribution become + effective for each Contribution on the date the Contributor first distributes + such Contribution. + +2.3. Limitations on Grant Scope + + The licenses granted in this Section 2 are the only rights granted under this + License. No additional rights or licenses will be implied from the distribution + or licensing of Covered Software under this License. Notwithstanding Section + 2.1(b) above, no patent license is granted by a Contributor: + + a. for any code that a Contributor has removed from Covered Software; or + + b. for infringements caused by: (i) Your and any other third party’s + modifications of Covered Software, or (ii) the combination of its + Contributions with other software (except as part of its Contributor + Version); or + + c. under Patent Claims infringed by Covered Software in the absence of its + Contributions. + + This License does not grant any rights in the trademarks, service marks, or + logos of any Contributor (except as may be necessary to comply with the + notice requirements in Section 3.4). + +2.4. Subsequent Licenses + + No Contributor makes additional grants as a result of Your choice to + distribute the Covered Software under a subsequent version of this License + (see Section 10.2) or under the terms of a Secondary License (if permitted + under the terms of Section 3.3). + +2.5. Representation + + Each Contributor represents that the Contributor believes its Contributions + are its original creation(s) or it has sufficient rights to grant the + rights to its Contributions conveyed by this License. + +2.6. Fair Use + + This License is not intended to limit any rights You have under applicable + copyright doctrines of fair use, fair dealing, or other equivalents. + +2.7. Conditions + + Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in + Section 2.1. + + +3. Responsibilities + +3.1. Distribution of Source Form + + All distribution of Covered Software in Source Code Form, including any + Modifications that You create or to which You contribute, must be under the + terms of this License. You must inform recipients that the Source Code Form + of the Covered Software is governed by the terms of this License, and how + they can obtain a copy of this License. You may not attempt to alter or + restrict the recipients’ rights in the Source Code Form. + +3.2. Distribution of Executable Form + + If You distribute Covered Software in Executable Form then: + + a. such Covered Software must also be made available in Source Code Form, + as described in Section 3.1, and You must inform recipients of the + Executable Form how they can obtain a copy of such Source Code Form by + reasonable means in a timely manner, at a charge no more than the cost + of distribution to the recipient; and + + b. You may distribute such Executable Form under the terms of this License, + or sublicense it under different terms, provided that the license for + the Executable Form does not attempt to limit or alter the recipients’ + rights in the Source Code Form under this License. + +3.3. Distribution of a Larger Work + + You may create and distribute a Larger Work under terms of Your choice, + provided that You also comply with the requirements of this License for the + Covered Software. If the Larger Work is a combination of Covered Software + with a work governed by one or more Secondary Licenses, and the Covered + Software is not Incompatible With Secondary Licenses, this License permits + You to additionally distribute such Covered Software under the terms of + such Secondary License(s), so that the recipient of the Larger Work may, at + their option, further distribute the Covered Software under the terms of + either this License or such Secondary License(s). + +3.4. Notices + + You may not remove or alter the substance of any license notices (including + copyright notices, patent notices, disclaimers of warranty, or limitations + of liability) contained within the Source Code Form of the Covered + Software, except that You may alter any license notices to the extent + required to remedy known factual inaccuracies. + +3.5. Application of Additional Terms + + You may choose to offer, and to charge a fee for, warranty, support, + indemnity or liability obligations to one or more recipients of Covered + Software. However, You may do so only on Your own behalf, and not on behalf + of any Contributor. You must make it absolutely clear that any such + warranty, support, indemnity, or liability obligation is offered by You + alone, and You hereby agree to indemnify every Contributor for any + liability incurred by such Contributor as a result of warranty, support, + indemnity or liability terms You offer. You may include additional + disclaimers of warranty and limitations of liability specific to any + jurisdiction. + +4. Inability to Comply Due to Statute or Regulation + + If it is impossible for You to comply with any of the terms of this License + with respect to some or all of the Covered Software due to statute, judicial + order, or regulation then You must: (a) comply with the terms of this License + to the maximum extent possible; and (b) describe the limitations and the code + they affect. Such description must be placed in a text file included with all + distributions of the Covered Software under this License. Except to the + extent prohibited by statute or regulation, such description must be + sufficiently detailed for a recipient of ordinary skill to be able to + understand it. + +5. Termination + +5.1. The rights granted under this License will terminate automatically if You + fail to comply with any of its terms. However, if You become compliant, + then the rights granted under this License from a particular Contributor + are reinstated (a) provisionally, unless and until such Contributor + explicitly and finally terminates Your grants, and (b) on an ongoing basis, + if such Contributor fails to notify You of the non-compliance by some + reasonable means prior to 60 days after You have come back into compliance. + Moreover, Your grants from a particular Contributor are reinstated on an + ongoing basis if such Contributor notifies You of the non-compliance by + some reasonable means, this is the first time You have received notice of + non-compliance with this License from such Contributor, and You become + compliant prior to 30 days after Your receipt of the notice. + +5.2. If You initiate litigation against any entity by asserting a patent + infringement claim (excluding declaratory judgment actions, counter-claims, + and cross-claims) alleging that a Contributor Version directly or + indirectly infringes any patent, then the rights granted to You by any and + all Contributors for the Covered Software under Section 2.1 of this License + shall terminate. + +5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user + license agreements (excluding distributors and resellers) which have been + validly granted by You or Your distributors under this License prior to + termination shall survive termination. + +6. Disclaimer of Warranty + + Covered Software is provided under this License on an “as is” basis, without + warranty of any kind, either expressed, implied, or statutory, including, + without limitation, warranties that the Covered Software is free of defects, + merchantable, fit for a particular purpose or non-infringing. The entire + risk as to the quality and performance of the Covered Software is with You. + Should any Covered Software prove defective in any respect, You (not any + Contributor) assume the cost of any necessary servicing, repair, or + correction. This disclaimer of warranty constitutes an essential part of this + License. No use of any Covered Software is authorized under this License + except under this disclaimer. + +7. Limitation of Liability + + Under no circumstances and under no legal theory, whether tort (including + negligence), contract, or otherwise, shall any Contributor, or anyone who + distributes Covered Software as permitted above, be liable to You for any + direct, indirect, special, incidental, or consequential damages of any + character including, without limitation, damages for lost profits, loss of + goodwill, work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses, even if such party shall have been + informed of the possibility of such damages. This limitation of liability + shall not apply to liability for death or personal injury resulting from such + party’s negligence to the extent applicable law prohibits such limitation. + Some jurisdictions do not allow the exclusion or limitation of incidental or + consequential damages, so this exclusion and limitation may not apply to You. + +8. Litigation + + Any litigation relating to this License may be brought only in the courts of + a jurisdiction where the defendant maintains its principal place of business + and such litigation shall be governed by laws of that jurisdiction, without + reference to its conflict-of-law provisions. Nothing in this Section shall + prevent a party’s ability to bring cross-claims or counter-claims. + +9. Miscellaneous + + This License represents the complete agreement concerning the subject matter + hereof. If any provision of this License is held to be unenforceable, such + provision shall be reformed only to the extent necessary to make it + enforceable. Any law or regulation which provides that the language of a + contract shall be construed against the drafter shall not be used to construe + this License against a Contributor. + + +10. Versions of the License + +10.1. New Versions + + Mozilla Foundation is the license steward. Except as provided in Section + 10.3, no one other than the license steward has the right to modify or + publish new versions of this License. Each version will be given a + distinguishing version number. + +10.2. Effect of New Versions + + You may distribute the Covered Software under the terms of the version of + the License under which You originally received the Covered Software, or + under the terms of any subsequent version published by the license + steward. + +10.3. Modified Versions + + If you create software not governed by this License, and you want to + create a new license for such software, you may create and use a modified + version of this License if you rename the license and remove any + references to the name of the license steward (except to note that such + modified license differs from this License). + +10.4. Distributing Source Code Form that is Incompatible With Secondary Licenses + If You choose to distribute Source Code Form that is Incompatible With + Secondary Licenses under the terms of this version of the License, the + notice described in Exhibit B of this License must be attached. + +Exhibit A - Source Code Form License Notice + + This Source Code Form is subject to the + terms of the Mozilla Public License, v. + 2.0. If a copy of the MPL was not + distributed with this file, You can + obtain one at + http://mozilla.org/MPL/2.0/. + +If it is not possible or desirable to put the notice in a particular file, then +You may include the notice in a location (such as a LICENSE file in a relevant +directory) where a recipient would be likely to look for such a notice. + +You may add additional accurate notices of copyright ownership. + +Exhibit B - “Incompatible With Secondary Licenses” Notice + + This Source Code Form is “Incompatible + With Secondary Licenses”, as defined by + the Mozilla Public License, v. 2.0. + diff --git a/vendor/github.com/hashicorp/go-getter/README.md b/vendor/github.com/hashicorp/go-getter/README.md new file mode 100644 index 0000000000..4a0b6a625d --- /dev/null +++ b/vendor/github.com/hashicorp/go-getter/README.md @@ -0,0 +1,253 @@ +# go-getter + +[![Build Status](http://img.shields.io/travis/hashicorp/go-getter.svg?style=flat-square)][travis] +[![Build status](https://ci.appveyor.com/api/projects/status/ulq3qr43n62croyq/branch/master?svg=true)][appveyor] +[![Go Documentation](http://img.shields.io/badge/go-documentation-blue.svg?style=flat-square)][godocs] + +[travis]: http://travis-ci.org/hashicorp/go-getter +[godocs]: http://godoc.org/github.com/hashicorp/go-getter +[appveyor]: https://ci.appveyor.com/project/hashicorp/go-getter/branch/master + +go-getter is a library for Go (golang) for downloading files or directories +from various sources using a URL as the primary form of input. + +The power of this library is being flexible in being able to download +from a number of different sources (file paths, Git, HTTP, Mercurial, etc.) +using a single string as input. This removes the burden of knowing how to +download from a variety of sources from the implementer. + +The concept of a _detector_ automatically turns invalid URLs into proper +URLs. For example: "github.com/hashicorp/go-getter" would turn into a +Git URL. Or "./foo" would turn into a file URL. These are extensible. + +This library is used by [Terraform](https://terraform.io) for +downloading modules, [Otto](https://ottoproject.io) for dependencies and +Appfile imports, and [Nomad](https://nomadproject.io) for downloading +binaries. + +## Installation and Usage + +Package documentation can be found on +[GoDoc](http://godoc.org/github.com/hashicorp/go-getter). + +Installation can be done with a normal `go get`: + +``` +$ go get github.com/hashicorp/go-getter +``` + +go-getter also has a command you can use to test URL strings: + +``` +$ go install github.com/hashicorp/go-getter/cmd/go-getter +... + +$ go-getter github.com/foo/bar ./foo +... +``` + +The command is useful for verifying URL structures. + +## URL Format + +go-getter uses a single string URL as input to download from a variety of +protocols. go-getter has various "tricks" with this URL to do certain things. +This section documents the URL format. + +### Supported Protocols and Detectors + +**Protocols** are used to download files/directories using a specific +mechanism. Example protocols are Git and HTTP. + +**Detectors** are used to transform a valid or invalid URL into another +URL if it matches a certain pattern. Example: "github.com/user/repo" is +automatically transformed into a fully valid Git URL. This allows go-getter +to be very user friendly. + +go-getter out of the box supports the following protocols. Additional protocols +can be augmented at runtime by implementing the `Getter` interface. + + * Local files + * Git + * Mercurial + * HTTP + * Amazon S3 + +In addition to the above protocols, go-getter has what are called "detectors." +These take a URL and attempt to automatically choose the best protocol for +it, which might involve even changing the protocol. The following detection +is built-in by default: + + * File paths such as "./foo" are automatically changed to absolute + file URLs. + * GitHub URLs, such as "github.com/mitchellh/vagrant" are automatically + changed to Git protocol over HTTP. + * BitBucket URLs, such as "bitbucket.org/mitchellh/vagrant" are automatically + changed to a Git or mercurial protocol using the BitBucket API. + +### Forced Protocol + +In some cases, the protocol to use is ambiguous depending on the source +URL. For example, "http://github.com/mitchellh/vagrant.git" could reference +an HTTP URL or a Git URL. Forced protocol syntax is used to disambiguate this +URL. + +Forced protocol can be done by prefixing the URL with the protocol followed +by double colons. For example: `git::http://github.com/mitchellh/vagrant.git` +would download the given HTTP URL using the Git protocol. + +Forced protocols will also override any detectors. + +In the absense of a forced protocol, detectors may be run on the URL, transforming +the protocol anyways. The above example would've used the Git protocol either +way since the Git detector would've detected it was a GitHub URL. + +### Protocol-Specific Options + +Each protocol can support protocol-specific options to configure that +protocol. For example, the `git` protocol supports specifying a `ref` +query parameter that tells it what ref to checkout for that Git +repository. + +The options are specified as query parameters on the URL (or URL-like string) +given to go-getter. Using the Git example above, the URL below is a valid +input to go-getter: + + github.com/hashicorp/go-getter?ref=abcd1234 + +The protocol-specific options are documented below the URL format +section. But because they are part of the URL, we point it out here so +you know they exist. + +### Checksumming + +For file downloads of any protocol, go-getter can automatically verify +a checksum for you. Note that checksumming only works for downloading files, +not directories, but checksumming will work for any protocol. + +To checksum a file, append a `checksum` query parameter to the URL. +The paramter value should be in the format of `type:value`, where +type is "md5", "sha1", "sha256", or "sha512". The "value" should be +the actual checksum value. go-getter will parse out this query parameter +automatically and use it to verify the checksum. An example URL +is shown below: + +``` +./foo.txt?checksum=md5:b7d96c89d09d9e204f5fedc4d5d55b21 +``` + +The checksum query parameter is never sent to the backend protocol +implementation. It is used at a higher level by go-getter itself. + +### Unarchiving + +go-getter will automatically unarchive files into a file or directory +based on the extension of the file being requested (over any protocol). +This works for both file and directory downloads. + +go-getter looks for an `archive` query parameter to specify the format of +the archive. If this isn't specified, go-getter will use the extension of +the path to see if it appears archived. Unarchiving can be explicitly +disabled by setting the `archive` query parameter to `false`. + +The following archive formats are supported: + + * `tar.gz` and `tgz` + * `tar.bz2` and `tbz2` + * `zip` + * `gz` + * `bz2` + +For example, an example URL is shown below: + +``` +./foo.zip +``` + +This will automatically be inferred to be a ZIP file and will be extracted. +You can also be explicit about the archive type: + +``` +./some/other/path?archive=zip +``` + +And finally, you can disable archiving completely: + +``` +./some/path?archive=false +``` + +You can combine unarchiving with the other features of go-getter such +as checksumming. The special `archive` query parameter will be removed +from the URL before going to the final protocol downloader. + +## Protocol-Specific Options + +This section documents the protocol-specific options that can be specified +for go-getter. These options should be appended to the input as normal query +parameters. Depending on the usage of go-getter, applications may provide +alternate ways of inputting options. For example, [Nomad](https://www.nomadproject.io) +provides a nice options block for specifying options rather than in the URL. + +## General (All Protocols) + +The options below are available to all protocols: + + * `archive` - The archive format to use to unarchive this file, or "" (empty + string) to disable unarchiving. For more details, see the complete section + on archive support above. + + * `checksum` - Checksum to verify the downloaded file or archive. See + the entire section on checksumming above for format and more details. + +### Local Files (`file`) + +None + +### Git (`git`) + + * `ref` - The Git ref to checkout. This is a ref, so it can point to + a commit SHA, a branch name, etc. If it is a named ref such as a branch + name, go-getter will update it to the latest on each get. + + * `sshkey` - An SSH private key to use during clones. The provided key must + be a base64-encoded string. For example, to generate a suitable `sshkey` + from a private key file on disk, you would run `base64 -w0 `. + + **Note**: Git 2.3+ is required to use this feature. + +### Mercurial (`hg`) + + * `rev` - The Mercurial revision to checkout. + +### HTTP (`http`) + +None + +### S3 (`s3`) + +S3 takes various access configurations in the URL. Note that it will also +read these from standard AWS environment variables if they're set. If +the query parameters are present, these take priority. + + * `aws_access_key_id` - AWS access key. + * `aws_access_key_secret` - AWS access key secret. + * `aws_access_token` - AWS access token if this is being used. + +#### Using IAM Instance Profiles with S3 + +If you use go-getter and want to use an EC2 IAM Instance Profile to avoid +using credentials, then just omit these and the profile, if available will +be used automatically. + +#### S3 Bucket Examples + +S3 has several addressing schemes used to reference your bucket. These are +listed here: http://docs.aws.amazon.com/AmazonS3/latest/dev/UsingBucket.html#access-bucket-intro + +Some examples for these addressing schemes: +- s3::https://s3.amazonaws.com/bucket/foo +- s3::https://s3-eu-west-1.amazonaws.com/bucket/foo +- bucket.s3.amazonaws.com/foo +- bucket.s3-eu-west-1.amazonaws.com/foo/bar + diff --git a/vendor/github.com/hashicorp/go-getter/appveyor.yml b/vendor/github.com/hashicorp/go-getter/appveyor.yml new file mode 100644 index 0000000000..159dad4dc2 --- /dev/null +++ b/vendor/github.com/hashicorp/go-getter/appveyor.yml @@ -0,0 +1,16 @@ +version: "build-{branch}-{build}" +image: Visual Studio 2015 +clone_folder: c:\gopath\github.com\hashicorp\go-getter +environment: + GOPATH: c:\gopath +install: +- cmd: >- + echo %Path% + + go version + + go env + + go get -d -v -t ./... +build_script: +- cmd: go test -v ./... diff --git a/vendor/github.com/hashicorp/go-getter/client.go b/vendor/github.com/hashicorp/go-getter/client.go new file mode 100644 index 0000000000..876812a0a2 --- /dev/null +++ b/vendor/github.com/hashicorp/go-getter/client.go @@ -0,0 +1,335 @@ +package getter + +import ( + "bytes" + "crypto/md5" + "crypto/sha1" + "crypto/sha256" + "crypto/sha512" + "encoding/hex" + "fmt" + "hash" + "io" + "io/ioutil" + "os" + "path/filepath" + "strconv" + "strings" + + urlhelper "github.com/hashicorp/go-getter/helper/url" +) + +// Client is a client for downloading things. +// +// Top-level functions such as Get are shortcuts for interacting with a client. +// Using a client directly allows more fine-grained control over how downloading +// is done, as well as customizing the protocols supported. +type Client struct { + // Src is the source URL to get. + // + // Dst is the path to save the downloaded thing as. If Dir is set to + // true, then this should be a directory. If the directory doesn't exist, + // it will be created for you. + // + // Pwd is the working directory for detection. If this isn't set, some + // detection may fail. Client will not default pwd to the current + // working directory for security reasons. + Src string + Dst string + Pwd string + + // Mode is the method of download the client will use. See ClientMode + // for documentation. + Mode ClientMode + + // Detectors is the list of detectors that are tried on the source. + // If this is nil, then the default Detectors will be used. + Detectors []Detector + + // Decompressors is the map of decompressors supported by this client. + // If this is nil, then the default value is the Decompressors global. + Decompressors map[string]Decompressor + + // Getters is the map of protocols supported by this client. If this + // is nil, then the default Getters variable will be used. + Getters map[string]Getter + + // Dir, if true, tells the Client it is downloading a directory (versus + // a single file). This distinction is necessary since filenames and + // directory names follow the same format so disambiguating is impossible + // without knowing ahead of time. + // + // WARNING: deprecated. If Mode is set, that will take precedence. + Dir bool +} + +// Get downloads the configured source to the destination. +func (c *Client) Get() error { + // Store this locally since there are cases we swap this + mode := c.Mode + if mode == ClientModeInvalid { + if c.Dir { + mode = ClientModeDir + } else { + mode = ClientModeFile + } + } + + // Default decompressor value + decompressors := c.Decompressors + if decompressors == nil { + decompressors = Decompressors + } + + // Detect the URL. This is safe if it is already detected. + detectors := c.Detectors + if detectors == nil { + detectors = Detectors + } + src, err := Detect(c.Src, c.Pwd, detectors) + if err != nil { + return err + } + + // Determine if we have a forced protocol, i.e. "git::http://..." + force, src := getForcedGetter(src) + + // If there is a subdir component, then we download the root separately + // and then copy over the proper subdir. + var realDst string + dst := c.Dst + src, subDir := SourceDirSubdir(src) + if subDir != "" { + tmpDir, err := ioutil.TempDir("", "tf") + if err != nil { + return err + } + if err := os.RemoveAll(tmpDir); err != nil { + return err + } + defer os.RemoveAll(tmpDir) + + realDst = dst + dst = tmpDir + } + + u, err := urlhelper.Parse(src) + if err != nil { + return err + } + if force == "" { + force = u.Scheme + } + + getters := c.Getters + if getters == nil { + getters = Getters + } + + g, ok := getters[force] + if !ok { + return fmt.Errorf( + "download not supported for scheme '%s'", force) + } + + // We have magic query parameters that we use to signal different features + q := u.Query() + + // Determine if we have an archive type + archiveV := q.Get("archive") + if archiveV != "" { + // Delete the paramter since it is a magic parameter we don't + // want to pass on to the Getter + q.Del("archive") + u.RawQuery = q.Encode() + + // If we can parse the value as a bool and it is false, then + // set the archive to "-" which should never map to a decompressor + if b, err := strconv.ParseBool(archiveV); err == nil && !b { + archiveV = "-" + } + } + if archiveV == "" { + // We don't appear to... but is it part of the filename? + matchingLen := 0 + for k, _ := range decompressors { + if strings.HasSuffix(u.Path, "."+k) && len(k) > matchingLen { + archiveV = k + matchingLen = len(k) + } + } + } + + // If we have a decompressor, then we need to change the destination + // to download to a temporary path. We unarchive this into the final, + // real path. + var decompressDst string + var decompressDir bool + decompressor := decompressors[archiveV] + if decompressor != nil { + // Create a temporary directory to store our archive. We delete + // this at the end of everything. + td, err := ioutil.TempDir("", "getter") + if err != nil { + return fmt.Errorf( + "Error creating temporary directory for archive: %s", err) + } + defer os.RemoveAll(td) + + // Swap the download directory to be our temporary path and + // store the old values. + decompressDst = dst + decompressDir = mode != ClientModeFile + dst = filepath.Join(td, "archive") + mode = ClientModeFile + } + + // Determine if we have a checksum + var checksumHash hash.Hash + var checksumValue []byte + if v := q.Get("checksum"); v != "" { + // Delete the query parameter if we have it. + q.Del("checksum") + u.RawQuery = q.Encode() + + // Determine the checksum hash type + checksumType := "" + idx := strings.Index(v, ":") + if idx > -1 { + checksumType = v[:idx] + } + switch checksumType { + case "md5": + checksumHash = md5.New() + case "sha1": + checksumHash = sha1.New() + case "sha256": + checksumHash = sha256.New() + case "sha512": + checksumHash = sha512.New() + default: + return fmt.Errorf( + "unsupported checksum type: %s", checksumType) + } + + // Get the remainder of the value and parse it into bytes + b, err := hex.DecodeString(v[idx+1:]) + if err != nil { + return fmt.Errorf("invalid checksum: %s", err) + } + + // Set our value + checksumValue = b + } + + if mode == ClientModeAny { + // Ask the getter which client mode to use + mode, err = g.ClientMode(u) + if err != nil { + return err + } + + // Destination is the base name of the URL path in "any" mode when + // a file source is detected. + if mode == ClientModeFile { + dst = filepath.Join(dst, filepath.Base(u.Path)) + } + } + + // If we're not downloading a directory, then just download the file + // and return. + if mode == ClientModeFile { + err := g.GetFile(dst, u) + if err != nil { + return err + } + + if checksumHash != nil { + if err := checksum(dst, checksumHash, checksumValue); err != nil { + return err + } + } + + if decompressor != nil { + // We have a decompressor, so decompress the current destination + // into the final destination with the proper mode. + err := decompressor.Decompress(decompressDst, dst, decompressDir) + if err != nil { + return err + } + + // Swap the information back + dst = decompressDst + if decompressDir { + mode = ClientModeAny + } else { + mode = ClientModeFile + } + } + + // We check the dir value again because it can be switched back + // if we were unarchiving. If we're still only Get-ing a file, then + // we're done. + if mode == ClientModeFile { + return nil + } + } + + // If we're at this point we're either downloading a directory or we've + // downloaded and unarchived a directory and we're just checking subdir. + // In the case we have a decompressor we don't Get because it was Get + // above. + if decompressor == nil { + // If we're getting a directory, then this is an error. You cannot + // checksum a directory. TODO: test + if checksumHash != nil { + return fmt.Errorf( + "checksum cannot be specified for directory download") + } + + // We're downloading a directory, which might require a bit more work + // if we're specifying a subdir. + err := g.Get(dst, u) + if err != nil { + err = fmt.Errorf("error downloading '%s': %s", src, err) + return err + } + } + + // If we have a subdir, copy that over + if subDir != "" { + if err := os.RemoveAll(realDst); err != nil { + return err + } + if err := os.MkdirAll(realDst, 0755); err != nil { + return err + } + + return copyDir(realDst, filepath.Join(dst, subDir), false) + } + + return nil +} + +// checksum is a simple method to compute the checksum of a source file +// and compare it to the given expected value. +func checksum(source string, h hash.Hash, v []byte) error { + f, err := os.Open(source) + if err != nil { + return fmt.Errorf("Failed to open file for checksum: %s", err) + } + defer f.Close() + + if _, err := io.Copy(h, f); err != nil { + return fmt.Errorf("Failed to hash: %s", err) + } + + if actual := h.Sum(nil); !bytes.Equal(actual, v) { + return fmt.Errorf( + "Checksums did not match.\nExpected: %s\nGot: %s", + hex.EncodeToString(v), + hex.EncodeToString(actual)) + } + + return nil +} diff --git a/vendor/github.com/hashicorp/go-getter/client_mode.go b/vendor/github.com/hashicorp/go-getter/client_mode.go new file mode 100644 index 0000000000..7f02509a78 --- /dev/null +++ b/vendor/github.com/hashicorp/go-getter/client_mode.go @@ -0,0 +1,24 @@ +package getter + +// ClientMode is the mode that the client operates in. +type ClientMode uint + +const ( + ClientModeInvalid ClientMode = iota + + // ClientModeAny downloads anything it can. In this mode, dst must + // be a directory. If src is a file, it is saved into the directory + // with the basename of the URL. If src is a directory or archive, + // it is unpacked directly into dst. + ClientModeAny + + // ClientModeFile downloads a single file. In this mode, dst must + // be a file path (doesn't have to exist). src must point to a single + // file. It is saved as dst. + ClientModeFile + + // ClientModeDir downloads a directory. In this mode, dst must be + // a directory path (doesn't have to exist). src must point to an + // archive or directory (such as in s3). + ClientModeDir +) diff --git a/vendor/github.com/hashicorp/go-getter/copy_dir.go b/vendor/github.com/hashicorp/go-getter/copy_dir.go new file mode 100644 index 0000000000..2f58e8aebe --- /dev/null +++ b/vendor/github.com/hashicorp/go-getter/copy_dir.go @@ -0,0 +1,78 @@ +package getter + +import ( + "io" + "os" + "path/filepath" + "strings" +) + +// copyDir copies the src directory contents into dst. Both directories +// should already exist. +// +// If ignoreDot is set to true, then dot-prefixed files/folders are ignored. +func copyDir(dst string, src string, ignoreDot bool) error { + src, err := filepath.EvalSymlinks(src) + if err != nil { + return err + } + + walkFn := func(path string, info os.FileInfo, err error) error { + if err != nil { + return err + } + if path == src { + return nil + } + + if ignoreDot && strings.HasPrefix(filepath.Base(path), ".") { + // Skip any dot files + if info.IsDir() { + return filepath.SkipDir + } else { + return nil + } + } + + // The "path" has the src prefixed to it. We need to join our + // destination with the path without the src on it. + dstPath := filepath.Join(dst, path[len(src):]) + + // If we have a directory, make that subdirectory, then continue + // the walk. + if info.IsDir() { + if path == filepath.Join(src, dst) { + // dst is in src; don't walk it. + return nil + } + + if err := os.MkdirAll(dstPath, 0755); err != nil { + return err + } + + return nil + } + + // If we have a file, copy the contents. + srcF, err := os.Open(path) + if err != nil { + return err + } + defer srcF.Close() + + dstF, err := os.Create(dstPath) + if err != nil { + return err + } + defer dstF.Close() + + if _, err := io.Copy(dstF, srcF); err != nil { + return err + } + + // Chmod it + return os.Chmod(dstPath, info.Mode()) + } + + return filepath.Walk(src, walkFn) +} diff --git a/vendor/github.com/hashicorp/go-getter/decompress.go b/vendor/github.com/hashicorp/go-getter/decompress.go new file mode 100644 index 0000000000..d18174cc3d --- /dev/null +++ b/vendor/github.com/hashicorp/go-getter/decompress.go @@ -0,0 +1,29 @@ +package getter + +// Decompressor defines the interface that must be implemented to add +// support for decompressing a type. +type Decompressor interface { + // Decompress should decompress src to dst. dir specifies whether dst + // is a directory or single file. src is guaranteed to be a single file + // that exists. dst is not guaranteed to exist already. + Decompress(dst, src string, dir bool) error +} + +// Decompressors is the mapping of extension to the Decompressor implementation +// that will decompress that extension/type. +var Decompressors map[string]Decompressor + +func init() { + tbzDecompressor := new(TarBzip2Decompressor) + tgzDecompressor := new(TarGzipDecompressor) + + Decompressors = map[string]Decompressor{ + "bz2": new(Bzip2Decompressor), + "gz": new(GzipDecompressor), + "tar.bz2": tbzDecompressor, + "tar.gz": tgzDecompressor, + "tbz2": tbzDecompressor, + "tgz": tgzDecompressor, + "zip": new(ZipDecompressor), + } +} diff --git a/vendor/github.com/hashicorp/go-getter/decompress_bzip2.go b/vendor/github.com/hashicorp/go-getter/decompress_bzip2.go new file mode 100644 index 0000000000..339f4cf7af --- /dev/null +++ b/vendor/github.com/hashicorp/go-getter/decompress_bzip2.go @@ -0,0 +1,45 @@ +package getter + +import ( + "compress/bzip2" + "fmt" + "io" + "os" + "path/filepath" +) + +// Bzip2Decompressor is an implementation of Decompressor that can +// decompress bz2 files. +type Bzip2Decompressor struct{} + +func (d *Bzip2Decompressor) Decompress(dst, src string, dir bool) error { + // Directory isn't supported at all + if dir { + return fmt.Errorf("bzip2-compressed files can only unarchive to a single file") + } + + // If we're going into a directory we should make that first + if err := os.MkdirAll(filepath.Dir(dst), 0755); err != nil { + return err + } + + // File first + f, err := os.Open(src) + if err != nil { + return err + } + defer f.Close() + + // Bzip2 compression is second + bzipR := bzip2.NewReader(f) + + // Copy it out + dstF, err := os.Create(dst) + if err != nil { + return err + } + defer dstF.Close() + + _, err = io.Copy(dstF, bzipR) + return err +} diff --git a/vendor/github.com/hashicorp/go-getter/decompress_gzip.go b/vendor/github.com/hashicorp/go-getter/decompress_gzip.go new file mode 100644 index 0000000000..20010540ea --- /dev/null +++ b/vendor/github.com/hashicorp/go-getter/decompress_gzip.go @@ -0,0 +1,49 @@ +package getter + +import ( + "compress/gzip" + "fmt" + "io" + "os" + "path/filepath" +) + +// GzipDecompressor is an implementation of Decompressor that can +// decompress bz2 files. +type GzipDecompressor struct{} + +func (d *GzipDecompressor) Decompress(dst, src string, dir bool) error { + // Directory isn't supported at all + if dir { + return fmt.Errorf("gzip-compressed files can only unarchive to a single file") + } + + // If we're going into a directory we should make that first + if err := os.MkdirAll(filepath.Dir(dst), 0755); err != nil { + return err + } + + // File first + f, err := os.Open(src) + if err != nil { + return err + } + defer f.Close() + + // gzip compression is second + gzipR, err := gzip.NewReader(f) + if err != nil { + return err + } + defer gzipR.Close() + + // Copy it out + dstF, err := os.Create(dst) + if err != nil { + return err + } + defer dstF.Close() + + _, err = io.Copy(dstF, gzipR) + return err +} diff --git a/vendor/github.com/hashicorp/go-getter/decompress_tbz2.go b/vendor/github.com/hashicorp/go-getter/decompress_tbz2.go new file mode 100644 index 0000000000..c46ed4453b --- /dev/null +++ b/vendor/github.com/hashicorp/go-getter/decompress_tbz2.go @@ -0,0 +1,95 @@ +package getter + +import ( + "archive/tar" + "compress/bzip2" + "fmt" + "io" + "os" + "path/filepath" +) + +// TarBzip2Decompressor is an implementation of Decompressor that can +// decompress tar.bz2 files. +type TarBzip2Decompressor struct{} + +func (d *TarBzip2Decompressor) Decompress(dst, src string, dir bool) error { + // If we're going into a directory we should make that first + mkdir := dst + if !dir { + mkdir = filepath.Dir(dst) + } + if err := os.MkdirAll(mkdir, 0755); err != nil { + return err + } + + // File first + f, err := os.Open(src) + if err != nil { + return err + } + defer f.Close() + + // Bzip2 compression is second + bzipR := bzip2.NewReader(f) + + // Once bzip decompressed we have a tar format + tarR := tar.NewReader(bzipR) + done := false + for { + hdr, err := tarR.Next() + if err == io.EOF { + if !done { + // Empty archive + return fmt.Errorf("empty archive: %s", src) + } + + return nil + } + if err != nil { + return err + } + + path := dst + if dir { + path = filepath.Join(path, hdr.Name) + } + + if hdr.FileInfo().IsDir() { + if dir { + return fmt.Errorf("expected a single file: %s", src) + } + + // A directory, just make the directory and continue unarchiving... + if err := os.MkdirAll(path, 0755); err != nil { + return err + } + + continue + } + + // We have a file. If we already decoded, then it is an error + if !dir && done { + return fmt.Errorf("expected a single file, got multiple: %s", src) + } + + // Mark that we're done so future in single file mode errors + done = true + + // Open the file for writing + dstF, err := os.Create(path) + if err != nil { + return err + } + _, err = io.Copy(dstF, tarR) + dstF.Close() + if err != nil { + return err + } + + // Chmod the file + if err := os.Chmod(path, hdr.FileInfo().Mode()); err != nil { + return err + } + } +} diff --git a/vendor/github.com/hashicorp/go-getter/decompress_testing.go b/vendor/github.com/hashicorp/go-getter/decompress_testing.go new file mode 100644 index 0000000000..686d6c2b64 --- /dev/null +++ b/vendor/github.com/hashicorp/go-getter/decompress_testing.go @@ -0,0 +1,134 @@ +package getter + +import ( + "crypto/md5" + "encoding/hex" + "io" + "io/ioutil" + "os" + "path/filepath" + "reflect" + "runtime" + "sort" + "strings" + "testing" +) + +// TestDecompressCase is a single test case for testing decompressors +type TestDecompressCase struct { + Input string // Input is the complete path to the input file + Dir bool // Dir is whether or not we're testing directory mode + Err bool // Err is whether we expect an error or not + DirList []string // DirList is the list of files for Dir mode + FileMD5 string // FileMD5 is the expected MD5 for a single file +} + +// TestDecompressor is a helper function for testing generic decompressors. +func TestDecompressor(t *testing.T, d Decompressor, cases []TestDecompressCase) { + for _, tc := range cases { + t.Logf("Testing: %s", tc.Input) + + // Temporary dir to store stuff + td, err := ioutil.TempDir("", "getter") + if err != nil { + t.Fatalf("err: %s", err) + } + + // Destination is always joining result so that we have a new path + dst := filepath.Join(td, "subdir", "result") + + // We use a function so defers work + func() { + defer os.RemoveAll(td) + + // Decompress + err := d.Decompress(dst, tc.Input, tc.Dir) + if (err != nil) != tc.Err { + t.Fatalf("err %s: %s", tc.Input, err) + } + if tc.Err { + return + } + + // If it isn't a directory, then check for a single file + if !tc.Dir { + fi, err := os.Stat(dst) + if err != nil { + t.Fatalf("err %s: %s", tc.Input, err) + } + if fi.IsDir() { + t.Fatalf("err %s: expected file, got directory", tc.Input) + } + if tc.FileMD5 != "" { + actual := testMD5(t, dst) + expected := tc.FileMD5 + if actual != expected { + t.Fatalf("err %s: expected MD5 %s, got %s", tc.Input, expected, actual) + } + } + + return + } + + // Convert expected for windows + expected := tc.DirList + if runtime.GOOS == "windows" { + for i, v := range expected { + expected[i] = strings.Replace(v, "/", "\\", -1) + } + } + + // Directory, check for the correct contents + actual := testListDir(t, dst) + if !reflect.DeepEqual(actual, expected) { + t.Fatalf("bad %s\n\n%#v\n\n%#v", tc.Input, actual, expected) + } + }() + } +} + +func testListDir(t *testing.T, path string) []string { + var result []string + err := filepath.Walk(path, func(sub string, info os.FileInfo, err error) error { + if err != nil { + return err + } + + sub = strings.TrimPrefix(sub, path) + if sub == "" { + return nil + } + sub = sub[1:] // Trim the leading path sep. + + // If it is a dir, add trailing sep + if info.IsDir() { + sub += "/" + } + + result = append(result, sub) + return nil + }) + if err != nil { + t.Fatalf("err: %s", err) + } + + sort.Strings(result) + return result +} + +func testMD5(t *testing.T, path string) string { + f, err := os.Open(path) + if err != nil { + t.Fatalf("err: %s", err) + } + defer f.Close() + + h := md5.New() + _, err = io.Copy(h, f) + if err != nil { + t.Fatalf("err: %s", err) + } + + result := h.Sum(nil) + return hex.EncodeToString(result) +} diff --git a/vendor/github.com/hashicorp/go-getter/decompress_tgz.go b/vendor/github.com/hashicorp/go-getter/decompress_tgz.go new file mode 100644 index 0000000000..e8b1c31cac --- /dev/null +++ b/vendor/github.com/hashicorp/go-getter/decompress_tgz.go @@ -0,0 +1,99 @@ +package getter + +import ( + "archive/tar" + "compress/gzip" + "fmt" + "io" + "os" + "path/filepath" +) + +// TarGzipDecompressor is an implementation of Decompressor that can +// decompress tar.gzip files. +type TarGzipDecompressor struct{} + +func (d *TarGzipDecompressor) Decompress(dst, src string, dir bool) error { + // If we're going into a directory we should make that first + mkdir := dst + if !dir { + mkdir = filepath.Dir(dst) + } + if err := os.MkdirAll(mkdir, 0755); err != nil { + return err + } + + // File first + f, err := os.Open(src) + if err != nil { + return err + } + defer f.Close() + + // Gzip compression is second + gzipR, err := gzip.NewReader(f) + if err != nil { + return fmt.Errorf("Error opening a gzip reader for %s: %s", src, err) + } + defer gzipR.Close() + + // Once gzip decompressed we have a tar format + tarR := tar.NewReader(gzipR) + done := false + for { + hdr, err := tarR.Next() + if err == io.EOF { + if !done { + // Empty archive + return fmt.Errorf("empty archive: %s", src) + } + + return nil + } + if err != nil { + return err + } + + path := dst + if dir { + path = filepath.Join(path, hdr.Name) + } + + if hdr.FileInfo().IsDir() { + if !dir { + return fmt.Errorf("expected a single file: %s", src) + } + + // A directory, just make the directory and continue unarchiving... + if err := os.MkdirAll(path, 0755); err != nil { + return err + } + + continue + } + + // We have a file. If we already decoded, then it is an error + if !dir && done { + return fmt.Errorf("expected a single file, got multiple: %s", src) + } + + // Mark that we're done so future in single file mode errors + done = true + + // Open the file for writing + dstF, err := os.Create(path) + if err != nil { + return err + } + _, err = io.Copy(dstF, tarR) + dstF.Close() + if err != nil { + return err + } + + // Chmod the file + if err := os.Chmod(path, hdr.FileInfo().Mode()); err != nil { + return err + } + } +} diff --git a/vendor/github.com/hashicorp/go-getter/decompress_zip.go b/vendor/github.com/hashicorp/go-getter/decompress_zip.go new file mode 100644 index 0000000000..a065c076ff --- /dev/null +++ b/vendor/github.com/hashicorp/go-getter/decompress_zip.go @@ -0,0 +1,96 @@ +package getter + +import ( + "archive/zip" + "fmt" + "io" + "os" + "path/filepath" +) + +// ZipDecompressor is an implementation of Decompressor that can +// decompress tar.gzip files. +type ZipDecompressor struct{} + +func (d *ZipDecompressor) Decompress(dst, src string, dir bool) error { + // If we're going into a directory we should make that first + mkdir := dst + if !dir { + mkdir = filepath.Dir(dst) + } + if err := os.MkdirAll(mkdir, 0755); err != nil { + return err + } + + // Open the zip + zipR, err := zip.OpenReader(src) + if err != nil { + return err + } + defer zipR.Close() + + // Check the zip integrity + if len(zipR.File) == 0 { + // Empty archive + return fmt.Errorf("empty archive: %s", src) + } + if !dir && len(zipR.File) > 1 { + return fmt.Errorf("expected a single file: %s", src) + } + + // Go through and unarchive + for _, f := range zipR.File { + path := dst + if dir { + path = filepath.Join(path, f.Name) + } + + if f.FileInfo().IsDir() { + if !dir { + return fmt.Errorf("expected a single file: %s", src) + } + + // A directory, just make the directory and continue unarchiving... + if err := os.MkdirAll(path, 0755); err != nil { + return err + } + + continue + } + + // Create the enclosing directories if we must. ZIP files aren't + // required to contain entries for just the directories so this + // can happen. + if dir { + if err := os.MkdirAll(filepath.Dir(path), 0755); err != nil { + return err + } + } + + // Open the file for reading + srcF, err := f.Open() + if err != nil { + return err + } + + // Open the file for writing + dstF, err := os.Create(path) + if err != nil { + srcF.Close() + return err + } + _, err = io.Copy(dstF, srcF) + srcF.Close() + dstF.Close() + if err != nil { + return err + } + + // Chmod the file + if err := os.Chmod(path, f.Mode()); err != nil { + return err + } + } + + return nil +} diff --git a/vendor/github.com/hashicorp/go-getter/detect.go b/vendor/github.com/hashicorp/go-getter/detect.go new file mode 100644 index 0000000000..481b737c6a --- /dev/null +++ b/vendor/github.com/hashicorp/go-getter/detect.go @@ -0,0 +1,97 @@ +package getter + +import ( + "fmt" + "path/filepath" + + "github.com/hashicorp/go-getter/helper/url" +) + +// Detector defines the interface that an invalid URL or a URL with a blank +// scheme is passed through in order to determine if its shorthand for +// something else well-known. +type Detector interface { + // Detect will detect whether the string matches a known pattern to + // turn it into a proper URL. + Detect(string, string) (string, bool, error) +} + +// Detectors is the list of detectors that are tried on an invalid URL. +// This is also the order they're tried (index 0 is first). +var Detectors []Detector + +func init() { + Detectors = []Detector{ + new(GitHubDetector), + new(BitBucketDetector), + new(S3Detector), + new(FileDetector), + } +} + +// Detect turns a source string into another source string if it is +// detected to be of a known pattern. +// +// The third parameter should be the list of detectors to use in the +// order to try them. If you don't want to configure this, just use +// the global Detectors variable. +// +// This is safe to be called with an already valid source string: Detect +// will just return it. +func Detect(src string, pwd string, ds []Detector) (string, error) { + getForce, getSrc := getForcedGetter(src) + + // Separate out the subdir if there is one, we don't pass that to detect + getSrc, subDir := SourceDirSubdir(getSrc) + + u, err := url.Parse(getSrc) + if err == nil && u.Scheme != "" { + // Valid URL + return src, nil + } + + for _, d := range ds { + result, ok, err := d.Detect(getSrc, pwd) + if err != nil { + return "", err + } + if !ok { + continue + } + + var detectForce string + detectForce, result = getForcedGetter(result) + result, detectSubdir := SourceDirSubdir(result) + + // If we have a subdir from the detection, then prepend it to our + // requested subdir. + if detectSubdir != "" { + if subDir != "" { + subDir = filepath.Join(detectSubdir, subDir) + } else { + subDir = detectSubdir + } + } + if subDir != "" { + u, err := url.Parse(result) + if err != nil { + return "", fmt.Errorf("Error parsing URL: %s", err) + } + u.Path += "//" + subDir + result = u.String() + } + + // Preserve the forced getter if it exists. We try to use the + // original set force first, followed by any force set by the + // detector. + if getForce != "" { + result = fmt.Sprintf("%s::%s", getForce, result) + } else if detectForce != "" { + result = fmt.Sprintf("%s::%s", detectForce, result) + } + + return result, nil + } + + return "", fmt.Errorf("invalid source string: %s", src) +} diff --git a/vendor/github.com/hashicorp/go-getter/detect_bitbucket.go b/vendor/github.com/hashicorp/go-getter/detect_bitbucket.go new file mode 100644 index 0000000000..a183a17dfe --- /dev/null +++ b/vendor/github.com/hashicorp/go-getter/detect_bitbucket.go @@ -0,0 +1,66 @@ +package getter + +import ( + "encoding/json" + "fmt" + "net/http" + "net/url" + "strings" +) + +// BitBucketDetector implements Detector to detect BitBucket URLs and turn +// them into URLs that the Git or Hg Getter can understand. +type BitBucketDetector struct{} + +func (d *BitBucketDetector) Detect(src, _ string) (string, bool, error) { + if len(src) == 0 { + return "", false, nil + } + + if strings.HasPrefix(src, "bitbucket.org/") { + return d.detectHTTP(src) + } + + return "", false, nil +} + +func (d *BitBucketDetector) detectHTTP(src string) (string, bool, error) { + u, err := url.Parse("https://" + src) + if err != nil { + return "", true, fmt.Errorf("error parsing BitBucket URL: %s", err) + } + + // We need to get info on this BitBucket repository to determine whether + // it is Git or Hg. + var info struct { + SCM string `json:"scm"` + } + infoUrl := "https://api.bitbucket.org/1.0/repositories" + u.Path + resp, err := http.Get(infoUrl) + if err != nil { + return "", true, fmt.Errorf("error looking up BitBucket URL: %s", err) + } + if resp.StatusCode == 403 { + // A private repo + return "", true, fmt.Errorf( + "shorthand BitBucket URL can't be used for private repos, " + + "please use a full URL") + } + dec := json.NewDecoder(resp.Body) + if err := dec.Decode(&info); err != nil { + return "", true, fmt.Errorf("error looking up BitBucket URL: %s", err) + } + + switch info.SCM { + case "git": + if !strings.HasSuffix(u.Path, ".git") { + u.Path += ".git" + } + + return "git::" + u.String(), true, nil + case "hg": + return "hg::" + u.String(), true, nil + default: + return "", true, fmt.Errorf("unknown BitBucket SCM type: %s", info.SCM) + } +} diff --git a/vendor/github.com/hashicorp/go-getter/detect_file.go b/vendor/github.com/hashicorp/go-getter/detect_file.go new file mode 100644 index 0000000000..756ea43f83 --- /dev/null +++ b/vendor/github.com/hashicorp/go-getter/detect_file.go @@ -0,0 +1,67 @@ +package getter + +import ( + "fmt" + "os" + "path/filepath" + "runtime" +) + +// FileDetector implements Detector to detect file paths. +type FileDetector struct{} + +func (d *FileDetector) Detect(src, pwd string) (string, bool, error) { + if len(src) == 0 { + return "", false, nil + } + + if !filepath.IsAbs(src) { + if pwd == "" { + return "", true, fmt.Errorf( + "relative paths require a module with a pwd") + } + + // Stat the pwd to determine if its a symbolic link. If it is, + // then the pwd becomes the original directory. Otherwise, + // `filepath.Join` below does some weird stuff. + // + // We just ignore if the pwd doesn't exist. That error will be + // caught later when we try to use the URL. + if fi, err := os.Lstat(pwd); !os.IsNotExist(err) { + if err != nil { + return "", true, err + } + if fi.Mode()&os.ModeSymlink != 0 { + pwd, err = os.Readlink(pwd) + if err != nil { + return "", true, err + } + + // The symlink itself might be a relative path, so we have to + // resolve this to have a correctly rooted URL. + pwd, err = filepath.Abs(pwd) + if err != nil { + return "", true, err + } + } + } + + src = filepath.Join(pwd, src) + } + + return fmtFileURL(src), true, nil +} + +func fmtFileURL(path string) string { + if runtime.GOOS == "windows" { + // Make sure we're using "/" on Windows. URLs are "/"-based. + path = filepath.ToSlash(path) + return fmt.Sprintf("file://%s", path) + } + + // Make sure that we don't start with "/" since we add that below. + if path[0] == '/' { + path = path[1:] + } + return fmt.Sprintf("file:///%s", path) +} diff --git a/vendor/github.com/hashicorp/go-getter/detect_github.go b/vendor/github.com/hashicorp/go-getter/detect_github.go new file mode 100644 index 0000000000..c084ad9acb --- /dev/null +++ b/vendor/github.com/hashicorp/go-getter/detect_github.go @@ -0,0 +1,73 @@ +package getter + +import ( + "fmt" + "net/url" + "strings" +) + +// GitHubDetector implements Detector to detect GitHub URLs and turn +// them into URLs that the Git Getter can understand. +type GitHubDetector struct{} + +func (d *GitHubDetector) Detect(src, _ string) (string, bool, error) { + if len(src) == 0 { + return "", false, nil + } + + if strings.HasPrefix(src, "github.com/") { + return d.detectHTTP(src) + } else if strings.HasPrefix(src, "git@github.com:") { + return d.detectSSH(src) + } + + return "", false, nil +} + +func (d *GitHubDetector) detectHTTP(src string) (string, bool, error) { + parts := strings.Split(src, "/") + if len(parts) < 3 { + return "", false, fmt.Errorf( + "GitHub URLs should be github.com/username/repo") + } + + urlStr := fmt.Sprintf("https://%s", strings.Join(parts[:3], "/")) + url, err := url.Parse(urlStr) + if err != nil { + return "", true, fmt.Errorf("error parsing GitHub URL: %s", err) + } + + if !strings.HasSuffix(url.Path, ".git") { + url.Path += ".git" + } + + if len(parts) > 3 { + url.Path += "//" + strings.Join(parts[3:], "/") + } + + return "git::" + url.String(), true, nil +} + +func (d *GitHubDetector) detectSSH(src string) (string, bool, error) { + idx := strings.Index(src, ":") + qidx := strings.Index(src, "?") + if qidx == -1 { + qidx = len(src) + } + + var u url.URL + u.Scheme = "ssh" + u.User = url.User("git") + u.Host = "github.com" + u.Path = src[idx+1 : qidx] + if qidx < len(src) { + q, err := url.ParseQuery(src[qidx+1:]) + if err != nil { + return "", true, fmt.Errorf("error parsing GitHub SSH URL: %s", err) + } + + u.RawQuery = q.Encode() + } + + return "git::" + u.String(), true, nil +} diff --git a/vendor/github.com/hashicorp/go-getter/detect_s3.go b/vendor/github.com/hashicorp/go-getter/detect_s3.go new file mode 100644 index 0000000000..8e0f4a03b4 --- /dev/null +++ b/vendor/github.com/hashicorp/go-getter/detect_s3.go @@ -0,0 +1,61 @@ +package getter + +import ( + "fmt" + "net/url" + "strings" +) + +// S3Detector implements Detector to detect S3 URLs and turn +// them into URLs that the S3 getter can understand. +type S3Detector struct{} + +func (d *S3Detector) Detect(src, _ string) (string, bool, error) { + if len(src) == 0 { + return "", false, nil + } + + if strings.Contains(src, ".amazonaws.com/") { + return d.detectHTTP(src) + } + + return "", false, nil +} + +func (d *S3Detector) detectHTTP(src string) (string, bool, error) { + parts := strings.Split(src, "/") + if len(parts) < 2 { + return "", false, fmt.Errorf( + "URL is not a valid S3 URL") + } + + hostParts := strings.Split(parts[0], ".") + if len(hostParts) == 3 { + return d.detectPathStyle(hostParts[0], parts[1:]) + } else if len(hostParts) == 4 { + return d.detectVhostStyle(hostParts[1], hostParts[0], parts[1:]) + } else { + return "", false, fmt.Errorf( + "URL is not a valid S3 URL") + } +} + +func (d *S3Detector) detectPathStyle(region string, parts []string) (string, bool, error) { + urlStr := fmt.Sprintf("https://%s.amazonaws.com/%s", region, strings.Join(parts, "/")) + url, err := url.Parse(urlStr) + if err != nil { + return "", false, fmt.Errorf("error parsing S3 URL: %s", err) + } + + return "s3::" + url.String(), true, nil +} + +func (d *S3Detector) detectVhostStyle(region, bucket string, parts []string) (string, bool, error) { + urlStr := fmt.Sprintf("https://%s.amazonaws.com/%s/%s", region, bucket, strings.Join(parts, "/")) + url, err := url.Parse(urlStr) + if err != nil { + return "", false, fmt.Errorf("error parsing S3 URL: %s", err) + } + + return "s3::" + url.String(), true, nil +} diff --git a/vendor/github.com/hashicorp/go-getter/folder_storage.go b/vendor/github.com/hashicorp/go-getter/folder_storage.go new file mode 100644 index 0000000000..647ccf4592 --- /dev/null +++ b/vendor/github.com/hashicorp/go-getter/folder_storage.go @@ -0,0 +1,65 @@ +package getter + +import ( + "crypto/md5" + "encoding/hex" + "fmt" + "os" + "path/filepath" +) + +// FolderStorage is an implementation of the Storage interface that manages +// modules on the disk. +type FolderStorage struct { + // StorageDir is the directory where the modules will be stored. + StorageDir string +} + +// Dir implements Storage.Dir +func (s *FolderStorage) Dir(key string) (d string, e bool, err error) { + d = s.dir(key) + _, err = os.Stat(d) + if err == nil { + // Directory exists + e = true + return + } + if os.IsNotExist(err) { + // Directory doesn't exist + d = "" + e = false + err = nil + return + } + + // An error + d = "" + e = false + return +} + +// Get implements Storage.Get +func (s *FolderStorage) Get(key string, source string, update bool) error { + dir := s.dir(key) + if !update { + if _, err := os.Stat(dir); err == nil { + // If the directory already exists, then we're done since + // we're not updating. + return nil + } else if !os.IsNotExist(err) { + // If the error we got wasn't a file-not-exist error, then + // something went wrong and we should report it. + return fmt.Errorf("Error reading module directory: %s", err) + } + } + + // Get the source. This always forces an update. + return Get(dir, source) +} + +// dir returns the directory name internally that we'll use to map to +// internally. +func (s *FolderStorage) dir(key string) string { + sum := md5.Sum([]byte(key)) + return filepath.Join(s.StorageDir, hex.EncodeToString(sum[:])) +} diff --git a/vendor/github.com/hashicorp/go-getter/get.go b/vendor/github.com/hashicorp/go-getter/get.go new file mode 100644 index 0000000000..c3236f553e --- /dev/null +++ b/vendor/github.com/hashicorp/go-getter/get.go @@ -0,0 +1,139 @@ +// getter is a package for downloading files or directories from a variety of +// protocols. +// +// getter is unique in its ability to download both directories and files. +// It also detects certain source strings to be protocol-specific URLs. For +// example, "github.com/hashicorp/go-getter" would turn into a Git URL and +// use the Git protocol. +// +// Protocols and detectors are extensible. +// +// To get started, see Client. +package getter + +import ( + "bytes" + "fmt" + "net/url" + "os/exec" + "regexp" + "syscall" +) + +// Getter defines the interface that schemes must implement to download +// things. +type Getter interface { + // Get downloads the given URL into the given directory. This always + // assumes that we're updating and gets the latest version that it can. + // + // The directory may already exist (if we're updating). If it is in a + // format that isn't understood, an error should be returned. Get shouldn't + // simply nuke the directory. + Get(string, *url.URL) error + + // GetFile downloads the give URL into the given path. The URL must + // reference a single file. If possible, the Getter should check if + // the remote end contains the same file and no-op this operation. + GetFile(string, *url.URL) error + + // ClientMode returns the mode based on the given URL. This is used to + // allow clients to let the getters decide which mode to use. + ClientMode(*url.URL) (ClientMode, error) +} + +// Getters is the mapping of scheme to the Getter implementation that will +// be used to get a dependency. +var Getters map[string]Getter + +// forcedRegexp is the regular expression that finds forced getters. This +// syntax is schema::url, example: git::https://foo.com +var forcedRegexp = regexp.MustCompile(`^([A-Za-z0-9]+)::(.+)$`) + +func init() { + httpGetter := &HttpGetter{Netrc: true} + + Getters = map[string]Getter{ + "file": new(FileGetter), + "git": new(GitGetter), + "hg": new(HgGetter), + "s3": new(S3Getter), + "http": httpGetter, + "https": httpGetter, + } +} + +// Get downloads the directory specified by src into the folder specified by +// dst. If dst already exists, Get will attempt to update it. +// +// src is a URL, whereas dst is always just a file path to a folder. This +// folder doesn't need to exist. It will be created if it doesn't exist. +func Get(dst, src string) error { + return (&Client{ + Src: src, + Dst: dst, + Dir: true, + Getters: Getters, + }).Get() +} + +// GetAny downloads a URL into the given destination. Unlike Get or +// GetFile, both directories and files are supported. +// +// dst must be a directory. If src is a file, it will be downloaded +// into dst with the basename of the URL. If src is a directory or +// archive, it will be unpacked directly into dst. +func GetAny(dst, src string) error { + return (&Client{ + Src: src, + Dst: dst, + Mode: ClientModeAny, + Getters: Getters, + }).Get() +} + +// GetFile downloads the file specified by src into the path specified by +// dst. +func GetFile(dst, src string) error { + return (&Client{ + Src: src, + Dst: dst, + Dir: false, + Getters: Getters, + }).Get() +} + +// getRunCommand is a helper that will run a command and capture the output +// in the case an error happens. +func getRunCommand(cmd *exec.Cmd) error { + var buf bytes.Buffer + cmd.Stdout = &buf + cmd.Stderr = &buf + err := cmd.Run() + if err == nil { + return nil + } + if exiterr, ok := err.(*exec.ExitError); ok { + // The program has exited with an exit code != 0 + if status, ok := exiterr.Sys().(syscall.WaitStatus); ok { + return fmt.Errorf( + "%s exited with %d: %s", + cmd.Path, + status.ExitStatus(), + buf.String()) + } + } + + return fmt.Errorf("error running %s: %s", cmd.Path, buf.String()) +} + +// getForcedGetter takes a source and returns the tuple of the forced +// getter and the raw URL (without the force syntax). +func getForcedGetter(src string) (string, string) { + var forced string + if ms := forcedRegexp.FindStringSubmatch(src); ms != nil { + forced = ms[1] + src = ms[2] + } + + return forced, src +} diff --git a/vendor/github.com/hashicorp/go-getter/get_file.go b/vendor/github.com/hashicorp/go-getter/get_file.go new file mode 100644 index 0000000000..e5d2d61d7d --- /dev/null +++ b/vendor/github.com/hashicorp/go-getter/get_file.go @@ -0,0 +1,32 @@ +package getter + +import ( + "net/url" + "os" +) + +// FileGetter is a Getter implementation that will download a module from +// a file scheme. +type FileGetter struct { + // Copy, if set to true, will copy data instead of using a symlink + Copy bool +} + +func (g *FileGetter) ClientMode(u *url.URL) (ClientMode, error) { + path := u.Path + if u.RawPath != "" { + path = u.RawPath + } + + fi, err := os.Stat(path) + if err != nil { + return 0, err + } + + // Check if the source is a directory. + if fi.IsDir() { + return ClientModeDir, nil + } + + return ClientModeFile, nil +} diff --git a/vendor/github.com/hashicorp/go-getter/get_file_unix.go b/vendor/github.com/hashicorp/go-getter/get_file_unix.go new file mode 100644 index 0000000000..c89a2d5a43 --- /dev/null +++ b/vendor/github.com/hashicorp/go-getter/get_file_unix.go @@ -0,0 +1,103 @@ +// +build !windows + +package getter + +import ( + "fmt" + "io" + "net/url" + "os" + "path/filepath" +) + +func (g *FileGetter) Get(dst string, u *url.URL) error { + path := u.Path + if u.RawPath != "" { + path = u.RawPath + } + + // The source path must exist and be a directory to be usable. + if fi, err := os.Stat(path); err != nil { + return fmt.Errorf("source path error: %s", err) + } else if !fi.IsDir() { + return fmt.Errorf("source path must be a directory") + } + + fi, err := os.Lstat(dst) + if err != nil && !os.IsNotExist(err) { + return err + } + + // If the destination already exists, it must be a symlink + if err == nil { + mode := fi.Mode() + if mode&os.ModeSymlink == 0 { + return fmt.Errorf("destination exists and is not a symlink") + } + + // Remove the destination + if err := os.Remove(dst); err != nil { + return err + } + } + + // Create all the parent directories + if err := os.MkdirAll(filepath.Dir(dst), 0755); err != nil { + return err + } + + return os.Symlink(path, dst) +} + +func (g *FileGetter) GetFile(dst string, u *url.URL) error { + path := u.Path + if u.RawPath != "" { + path = u.RawPath + } + + // The source path must exist and be a file to be usable. + if fi, err := os.Stat(path); err != nil { + return fmt.Errorf("source path error: %s", err) + } else if fi.IsDir() { + return fmt.Errorf("source path must be a file") + } + + _, err := os.Lstat(dst) + if err != nil && !os.IsNotExist(err) { + return err + } + + // If the destination already exists, it must be a symlink + if err == nil { + // Remove the destination + if err := os.Remove(dst); err != nil { + return err + } + } + + // Create all the parent directories + if err := os.MkdirAll(filepath.Dir(dst), 0755); err != nil { + return err + } + + // If we're not copying, just symlink and we're done + if !g.Copy { + return os.Symlink(path, dst) + } + + // Copy + srcF, err := os.Open(path) + if err != nil { + return err + } + defer srcF.Close() + + dstF, err := os.Create(dst) + if err != nil { + return err + } + defer dstF.Close() + + _, err = io.Copy(dstF, srcF) + return err +} diff --git a/vendor/github.com/hashicorp/go-getter/get_file_windows.go b/vendor/github.com/hashicorp/go-getter/get_file_windows.go new file mode 100644 index 0000000000..f87ed0a0be --- /dev/null +++ b/vendor/github.com/hashicorp/go-getter/get_file_windows.go @@ -0,0 +1,120 @@ +// +build windows + +package getter + +import ( + "fmt" + "io" + "net/url" + "os" + "os/exec" + "path/filepath" + "strings" +) + +func (g *FileGetter) Get(dst string, u *url.URL) error { + path := u.Path + if u.RawPath != "" { + path = u.RawPath + } + + // The source path must exist and be a directory to be usable. + if fi, err := os.Stat(path); err != nil { + return fmt.Errorf("source path error: %s", err) + } else if !fi.IsDir() { + return fmt.Errorf("source path must be a directory") + } + + fi, err := os.Lstat(dst) + if err != nil && !os.IsNotExist(err) { + return err + } + + // If the destination already exists, it must be a symlink + if err == nil { + mode := fi.Mode() + if mode&os.ModeSymlink == 0 { + return fmt.Errorf("destination exists and is not a symlink") + } + + // Remove the destination + if err := os.Remove(dst); err != nil { + return err + } + } + + // Create all the parent directories + if err := os.MkdirAll(filepath.Dir(dst), 0755); err != nil { + return err + } + + sourcePath := toBackslash(path) + + // Use mklink to create a junction point + output, err := exec.Command("cmd", "/c", "mklink", "/J", dst, sourcePath).CombinedOutput() + if err != nil { + return fmt.Errorf("failed to run mklink %v %v: %v %q", dst, sourcePath, err, output) + } + + return nil +} + +func (g *FileGetter) GetFile(dst string, u *url.URL) error { + path := u.Path + if u.RawPath != "" { + path = u.RawPath + } + + // The source path must exist and be a directory to be usable. + if fi, err := os.Stat(path); err != nil { + return fmt.Errorf("source path error: %s", err) + } else if fi.IsDir() { + return fmt.Errorf("source path must be a file") + } + + _, err := os.Lstat(dst) + if err != nil && !os.IsNotExist(err) { + return err + } + + // If the destination already exists, it must be a symlink + if err == nil { + // Remove the destination + if err := os.Remove(dst); err != nil { + return err + } + } + + // Create all the parent directories + if err := os.MkdirAll(filepath.Dir(dst), 0755); err != nil { + return err + } + + // If we're not copying, just symlink and we're done + if !g.Copy { + return os.Symlink(path, dst) + } + + // Copy + srcF, err := os.Open(path) + if err != nil { + return err + } + defer srcF.Close() + + dstF, err := os.Create(dst) + if err != nil { + return err + } + defer dstF.Close() + + _, err = io.Copy(dstF, srcF) + return err +} + +// toBackslash returns the result of replacing each slash character +// in path with a backslash ('\') character. Multiple separators are +// replaced by multiple backslashes. +func toBackslash(path string) string { + return strings.Replace(path, "/", "\\", -1) +} diff --git a/vendor/github.com/hashicorp/go-getter/get_git.go b/vendor/github.com/hashicorp/go-getter/get_git.go new file mode 100644 index 0000000000..0728139832 --- /dev/null +++ b/vendor/github.com/hashicorp/go-getter/get_git.go @@ -0,0 +1,225 @@ +package getter + +import ( + "encoding/base64" + "fmt" + "io/ioutil" + "net/url" + "os" + "os/exec" + "path/filepath" + "strings" + + urlhelper "github.com/hashicorp/go-getter/helper/url" + "github.com/hashicorp/go-version" +) + +// GitGetter is a Getter implementation that will download a module from +// a git repository. +type GitGetter struct{} + +func (g *GitGetter) ClientMode(_ *url.URL) (ClientMode, error) { + return ClientModeDir, nil +} + +func (g *GitGetter) Get(dst string, u *url.URL) error { + if _, err := exec.LookPath("git"); err != nil { + return fmt.Errorf("git must be available and on the PATH") + } + + // Extract some query parameters we use + var ref, sshKey string + q := u.Query() + if len(q) > 0 { + ref = q.Get("ref") + q.Del("ref") + + sshKey = q.Get("sshkey") + q.Del("sshkey") + + // Copy the URL + var newU url.URL = *u + u = &newU + u.RawQuery = q.Encode() + } + + var sshKeyFile string + if sshKey != "" { + // Check that the git version is sufficiently new. + if err := checkGitVersion("2.3"); err != nil { + return fmt.Errorf("Error using ssh key: %v", err) + } + + // We have an SSH key - decode it. + raw, err := base64.StdEncoding.DecodeString(sshKey) + if err != nil { + return err + } + + // Create a temp file for the key and ensure it is removed. + fh, err := ioutil.TempFile("", "go-getter") + if err != nil { + return err + } + sshKeyFile = fh.Name() + defer os.Remove(sshKeyFile) + + // Set the permissions prior to writing the key material. + if err := os.Chmod(sshKeyFile, 0600); err != nil { + return err + } + + // Write the raw key into the temp file. + _, err = fh.Write(raw) + fh.Close() + if err != nil { + return err + } + } + + // Clone or update the repository + _, err := os.Stat(dst) + if err != nil && !os.IsNotExist(err) { + return err + } + if err == nil { + err = g.update(dst, sshKeyFile, ref) + } else { + err = g.clone(dst, sshKeyFile, u) + } + if err != nil { + return err + } + + // Next: check out the proper tag/branch if it is specified, and checkout + if ref != "" { + if err := g.checkout(dst, ref); err != nil { + return err + } + } + + // Lastly, download any/all submodules. + return g.fetchSubmodules(dst, sshKeyFile) +} + +// GetFile for Git doesn't support updating at this time. It will download +// the file every time. +func (g *GitGetter) GetFile(dst string, u *url.URL) error { + td, err := ioutil.TempDir("", "getter-git") + if err != nil { + return err + } + if err := os.RemoveAll(td); err != nil { + return err + } + + // Get the filename, and strip the filename from the URL so we can + // just get the repository directly. + filename := filepath.Base(u.Path) + u.Path = filepath.Dir(u.Path) + + // Get the full repository + if err := g.Get(td, u); err != nil { + return err + } + + // Copy the single file + u, err = urlhelper.Parse(fmtFileURL(filepath.Join(td, filename))) + if err != nil { + return err + } + + fg := &FileGetter{Copy: true} + return fg.GetFile(dst, u) +} + +func (g *GitGetter) checkout(dst string, ref string) error { + cmd := exec.Command("git", "checkout", ref) + cmd.Dir = dst + return getRunCommand(cmd) +} + +func (g *GitGetter) clone(dst, sshKeyFile string, u *url.URL) error { + cmd := exec.Command("git", "clone", u.String(), dst) + setupGitEnv(cmd, sshKeyFile) + return getRunCommand(cmd) +} + +func (g *GitGetter) update(dst, sshKeyFile, ref string) error { + // Determine if we're a branch. If we're NOT a branch, then we just + // switch to master prior to checking out + cmd := exec.Command("git", "show-ref", "-q", "--verify", "refs/heads/"+ref) + cmd.Dir = dst + + if getRunCommand(cmd) != nil { + // Not a branch, switch to master. This will also catch non-existent + // branches, in which case we want to switch to master and then + // checkout the proper branch later. + ref = "master" + } + + // We have to be on a branch to pull + if err := g.checkout(dst, ref); err != nil { + return err + } + + cmd = exec.Command("git", "pull", "--ff-only") + cmd.Dir = dst + setupGitEnv(cmd, sshKeyFile) + return getRunCommand(cmd) +} + +// fetchSubmodules downloads any configured submodules recursively. +func (g *GitGetter) fetchSubmodules(dst, sshKeyFile string) error { + cmd := exec.Command("git", "submodule", "update", "--init", "--recursive") + cmd.Dir = dst + setupGitEnv(cmd, sshKeyFile) + return getRunCommand(cmd) +} + +// setupGitEnv sets up the environment for the given command. This is used to +// pass configuration data to git and ssh and enables advanced cloning methods. +func setupGitEnv(cmd *exec.Cmd, sshKeyFile string) { + var sshOpts []string + + if sshKeyFile != "" { + // We have an SSH key temp file configured, tell ssh about this. + sshOpts = append(sshOpts, "-i", sshKeyFile) + } + + cmd.Env = append(os.Environ(), + // Set the ssh command to use for clones. + "GIT_SSH_COMMAND=ssh "+strings.Join(sshOpts, " "), + ) +} + +// checkGitVersion is used to check the version of git installed on the system +// against a known minimum version. Returns an error if the installed version +// is older than the given minimum. +func checkGitVersion(min string) error { + want, err := version.NewVersion(min) + if err != nil { + return err + } + + out, err := exec.Command("git", "version").Output() + if err != nil { + return err + } + + fields := strings.Fields(string(out)) + if len(fields) != 3 { + return fmt.Errorf("Unexpected 'git version' output: %q", string(out)) + } + + have, err := version.NewVersion(fields[2]) + if err != nil { + return err + } + + if have.LessThan(want) { + return fmt.Errorf("Required git version = %s, have %s", want, have) + } + + return nil +} diff --git a/vendor/github.com/hashicorp/go-getter/get_hg.go b/vendor/github.com/hashicorp/go-getter/get_hg.go new file mode 100644 index 0000000000..820bdd488e --- /dev/null +++ b/vendor/github.com/hashicorp/go-getter/get_hg.go @@ -0,0 +1,131 @@ +package getter + +import ( + "fmt" + "io/ioutil" + "net/url" + "os" + "os/exec" + "path/filepath" + "runtime" + + urlhelper "github.com/hashicorp/go-getter/helper/url" +) + +// HgGetter is a Getter implementation that will download a module from +// a Mercurial repository. +type HgGetter struct{} + +func (g *HgGetter) ClientMode(_ *url.URL) (ClientMode, error) { + return ClientModeDir, nil +} + +func (g *HgGetter) Get(dst string, u *url.URL) error { + if _, err := exec.LookPath("hg"); err != nil { + return fmt.Errorf("hg must be available and on the PATH") + } + + newURL, err := urlhelper.Parse(u.String()) + if err != nil { + return err + } + if fixWindowsDrivePath(newURL) { + // See valid file path form on http://www.selenic.com/hg/help/urls + newURL.Path = fmt.Sprintf("/%s", newURL.Path) + } + + // Extract some query parameters we use + var rev string + q := newURL.Query() + if len(q) > 0 { + rev = q.Get("rev") + q.Del("rev") + + newURL.RawQuery = q.Encode() + } + + _, err = os.Stat(dst) + if err != nil && !os.IsNotExist(err) { + return err + } + if err != nil { + if err := g.clone(dst, newURL); err != nil { + return err + } + } + + if err := g.pull(dst, newURL); err != nil { + return err + } + + return g.update(dst, newURL, rev) +} + +// GetFile for Hg doesn't support updating at this time. It will download +// the file every time. +func (g *HgGetter) GetFile(dst string, u *url.URL) error { + td, err := ioutil.TempDir("", "getter-hg") + if err != nil { + return err + } + if err := os.RemoveAll(td); err != nil { + return err + } + + // Get the filename, and strip the filename from the URL so we can + // just get the repository directly. + filename := filepath.Base(u.Path) + u.Path = filepath.ToSlash(filepath.Dir(u.Path)) + + // If we're on Windows, we need to set the host to "localhost" for hg + if runtime.GOOS == "windows" { + u.Host = "localhost" + } + + // Get the full repository + if err := g.Get(td, u); err != nil { + return err + } + + // Copy the single file + u, err = urlhelper.Parse(fmtFileURL(filepath.Join(td, filename))) + if err != nil { + return err + } + + fg := &FileGetter{Copy: true} + return fg.GetFile(dst, u) +} + +func (g *HgGetter) clone(dst string, u *url.URL) error { + cmd := exec.Command("hg", "clone", "-U", u.String(), dst) + return getRunCommand(cmd) +} + +func (g *HgGetter) pull(dst string, u *url.URL) error { + cmd := exec.Command("hg", "pull") + cmd.Dir = dst + return getRunCommand(cmd) +} + +func (g *HgGetter) update(dst string, u *url.URL, rev string) error { + args := []string{"update"} + if rev != "" { + args = append(args, rev) + } + + cmd := exec.Command("hg", args...) + cmd.Dir = dst + return getRunCommand(cmd) +} + +func fixWindowsDrivePath(u *url.URL) bool { + // hg assumes a file:/// prefix for Windows drive letter file paths. + // (e.g. file:///c:/foo/bar) + // If the URL Path does not begin with a '/' character, the resulting URL + // path will have a file:// prefix. (e.g. file://c:/foo/bar) + // See http://www.selenic.com/hg/help/urls and the examples listed in + // http://selenic.com/repo/hg-stable/file/1265a3a71d75/mercurial/util.py#l1936 + return runtime.GOOS == "windows" && u.Scheme == "file" && + len(u.Path) > 1 && u.Path[0] != '/' && u.Path[1] == ':' +} diff --git a/vendor/github.com/hashicorp/go-getter/get_http.go b/vendor/github.com/hashicorp/go-getter/get_http.go new file mode 100644 index 0000000000..3c020343ee --- /dev/null +++ b/vendor/github.com/hashicorp/go-getter/get_http.go @@ -0,0 +1,219 @@ +package getter + +import ( + "encoding/xml" + "fmt" + "io" + "io/ioutil" + "net/http" + "net/url" + "os" + "path/filepath" + "strings" +) + +// HttpGetter is a Getter implementation that will download from an HTTP +// endpoint. +// +// For file downloads, HTTP is used directly. +// +// The protocol for downloading a directory from an HTTP endpoing is as follows: +// +// An HTTP GET request is made to the URL with the additional GET parameter +// "terraform-get=1". This lets you handle that scenario specially if you +// wish. The response must be a 2xx. +// +// First, a header is looked for "X-Terraform-Get" which should contain +// a source URL to download. +// +// If the header is not present, then a meta tag is searched for named +// "terraform-get" and the content should be a source URL. +// +// The source URL, whether from the header or meta tag, must be a fully +// formed URL. The shorthand syntax of "github.com/foo/bar" or relative +// paths are not allowed. +type HttpGetter struct { + // Netrc, if true, will lookup and use auth information found + // in the user's netrc file if available. + Netrc bool +} + +func (g *HttpGetter) ClientMode(u *url.URL) (ClientMode, error) { + if strings.HasSuffix(u.Path, "/") { + return ClientModeDir, nil + } + return ClientModeFile, nil +} + +func (g *HttpGetter) Get(dst string, u *url.URL) error { + // Copy the URL so we can modify it + var newU url.URL = *u + u = &newU + + if g.Netrc { + // Add auth from netrc if we can + if err := addAuthFromNetrc(u); err != nil { + return err + } + } + + // Add terraform-get to the parameter. + q := u.Query() + q.Add("terraform-get", "1") + u.RawQuery = q.Encode() + + // Get the URL + resp, err := http.Get(u.String()) + if err != nil { + return err + } + defer resp.Body.Close() + if resp.StatusCode < 200 || resp.StatusCode >= 300 { + return fmt.Errorf("bad response code: %d", resp.StatusCode) + } + + // Extract the source URL + var source string + if v := resp.Header.Get("X-Terraform-Get"); v != "" { + source = v + } else { + source, err = g.parseMeta(resp.Body) + if err != nil { + return err + } + } + if source == "" { + return fmt.Errorf("no source URL was returned") + } + + // If there is a subdir component, then we download the root separately + // into a temporary directory, then copy over the proper subdir. + source, subDir := SourceDirSubdir(source) + if subDir == "" { + return Get(dst, source) + } + + // We have a subdir, time to jump some hoops + return g.getSubdir(dst, source, subDir) +} + +func (g *HttpGetter) GetFile(dst string, u *url.URL) error { + resp, err := http.Get(u.String()) + if err != nil { + return err + } + defer resp.Body.Close() + if resp.StatusCode != 200 { + return fmt.Errorf("bad response code: %d", resp.StatusCode) + } + + // Create all the parent directories + if err := os.MkdirAll(filepath.Dir(dst), 0755); err != nil { + return err + } + + f, err := os.Create(dst) + if err != nil { + return err + } + defer f.Close() + + _, err = io.Copy(f, resp.Body) + return err +} + +// getSubdir downloads the source into the destination, but with +// the proper subdir. +func (g *HttpGetter) getSubdir(dst, source, subDir string) error { + // Create a temporary directory to store the full source + td, err := ioutil.TempDir("", "tf") + if err != nil { + return err + } + defer os.RemoveAll(td) + + // Download that into the given directory + if err := Get(td, source); err != nil { + return err + } + + // Make sure the subdir path actually exists + sourcePath := filepath.Join(td, subDir) + if _, err := os.Stat(sourcePath); err != nil { + return fmt.Errorf( + "Error downloading %s: %s", source, err) + } + + // Copy the subdirectory into our actual destination. + if err := os.RemoveAll(dst); err != nil { + return err + } + + // Make the final destination + if err := os.MkdirAll(dst, 0755); err != nil { + return err + } + + return copyDir(dst, sourcePath, false) +} + +// parseMeta looks for the first meta tag in the given reader that +// will give us the source URL. +func (g *HttpGetter) parseMeta(r io.Reader) (string, error) { + d := xml.NewDecoder(r) + d.CharsetReader = charsetReader + d.Strict = false + var err error + var t xml.Token + for { + t, err = d.Token() + if err != nil { + if err == io.EOF { + err = nil + } + return "", err + } + if e, ok := t.(xml.StartElement); ok && strings.EqualFold(e.Name.Local, "body") { + return "", nil + } + if e, ok := t.(xml.EndElement); ok && strings.EqualFold(e.Name.Local, "head") { + return "", nil + } + e, ok := t.(xml.StartElement) + if !ok || !strings.EqualFold(e.Name.Local, "meta") { + continue + } + if attrValue(e.Attr, "name") != "terraform-get" { + continue + } + if f := attrValue(e.Attr, "content"); f != "" { + return f, nil + } + } +} + +// attrValue returns the attribute value for the case-insensitive key +// `name', or the empty string if nothing is found. +func attrValue(attrs []xml.Attr, name string) string { + for _, a := range attrs { + if strings.EqualFold(a.Name.Local, name) { + return a.Value + } + } + return "" +} + +// charsetReader returns a reader for the given charset. Currently +// it only supports UTF-8 and ASCII. Otherwise, it returns a meaningful +// error which is printed by go get, so the user can find why the package +// wasn't downloaded if the encoding is not supported. Note that, in +// order to reduce potential errors, ASCII is treated as UTF-8 (i.e. characters +// greater than 0x7f are not rejected). +func charsetReader(charset string, input io.Reader) (io.Reader, error) { + switch strings.ToLower(charset) { + case "ascii": + return input, nil + default: + return nil, fmt.Errorf("can't decode XML document using charset %q", charset) + } +} diff --git a/vendor/github.com/hashicorp/go-getter/get_mock.go b/vendor/github.com/hashicorp/go-getter/get_mock.go new file mode 100644 index 0000000000..882e694dce --- /dev/null +++ b/vendor/github.com/hashicorp/go-getter/get_mock.go @@ -0,0 +1,52 @@ +package getter + +import ( + "net/url" +) + +// MockGetter is an implementation of Getter that can be used for tests. +type MockGetter struct { + // Proxy, if set, will be called after recording the calls below. + // If it isn't set, then the *Err values will be returned. + Proxy Getter + + GetCalled bool + GetDst string + GetURL *url.URL + GetErr error + + GetFileCalled bool + GetFileDst string + GetFileURL *url.URL + GetFileErr error +} + +func (g *MockGetter) Get(dst string, u *url.URL) error { + g.GetCalled = true + g.GetDst = dst + g.GetURL = u + + if g.Proxy != nil { + return g.Proxy.Get(dst, u) + } + + return g.GetErr +} + +func (g *MockGetter) GetFile(dst string, u *url.URL) error { + g.GetFileCalled = true + g.GetFileDst = dst + g.GetFileURL = u + + if g.Proxy != nil { + return g.Proxy.GetFile(dst, u) + } + return g.GetFileErr +} + +func (g *MockGetter) ClientMode(u *url.URL) (ClientMode, error) { + if l := len(u.Path); l > 0 && u.Path[l-1:] == "/" { + return ClientModeDir, nil + } + return ClientModeFile, nil +} diff --git a/vendor/github.com/hashicorp/go-getter/get_s3.go b/vendor/github.com/hashicorp/go-getter/get_s3.go new file mode 100644 index 0000000000..d3bffeb173 --- /dev/null +++ b/vendor/github.com/hashicorp/go-getter/get_s3.go @@ -0,0 +1,243 @@ +package getter + +import ( + "fmt" + "io" + "net/url" + "os" + "path/filepath" + "strings" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/credentials" + "github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds" + "github.com/aws/aws-sdk-go/aws/ec2metadata" + "github.com/aws/aws-sdk-go/aws/session" + "github.com/aws/aws-sdk-go/service/s3" +) + +// S3Getter is a Getter implementation that will download a module from +// a S3 bucket. +type S3Getter struct{} + +func (g *S3Getter) ClientMode(u *url.URL) (ClientMode, error) { + // Parse URL + region, bucket, path, _, creds, err := g.parseUrl(u) + if err != nil { + return 0, err + } + + // Create client config + config := g.getAWSConfig(region, creds) + sess := session.New(config) + client := s3.New(sess) + + // List the object(s) at the given prefix + req := &s3.ListObjectsInput{ + Bucket: aws.String(bucket), + Prefix: aws.String(path), + } + resp, err := client.ListObjects(req) + if err != nil { + return 0, err + } + + for _, o := range resp.Contents { + // Use file mode on exact match. + if *o.Key == path { + return ClientModeFile, nil + } + + // Use dir mode if child keys are found. + if strings.HasPrefix(*o.Key, path+"/") { + return ClientModeDir, nil + } + } + + // There was no match, so just return file mode. The download is going + // to fail but we will let S3 return the proper error later. + return ClientModeFile, nil +} + +func (g *S3Getter) Get(dst string, u *url.URL) error { + // Parse URL + region, bucket, path, _, creds, err := g.parseUrl(u) + if err != nil { + return err + } + + // Remove destination if it already exists + _, err = os.Stat(dst) + if err != nil && !os.IsNotExist(err) { + return err + } + + if err == nil { + // Remove the destination + if err := os.RemoveAll(dst); err != nil { + return err + } + } + + // Create all the parent directories + if err := os.MkdirAll(filepath.Dir(dst), 0755); err != nil { + return err + } + + config := g.getAWSConfig(region, creds) + sess := session.New(config) + client := s3.New(sess) + + // List files in path, keep listing until no more objects are found + lastMarker := "" + hasMore := true + for hasMore { + req := &s3.ListObjectsInput{ + Bucket: aws.String(bucket), + Prefix: aws.String(path), + } + if lastMarker != "" { + req.Marker = aws.String(lastMarker) + } + + resp, err := client.ListObjects(req) + if err != nil { + return err + } + + hasMore = aws.BoolValue(resp.IsTruncated) + + // Get each object storing each file relative to the destination path + for _, object := range resp.Contents { + lastMarker = aws.StringValue(object.Key) + objPath := aws.StringValue(object.Key) + + // If the key ends with a backslash assume it is a directory and ignore + if strings.HasSuffix(objPath, "/") { + continue + } + + // Get the object destination path + objDst, err := filepath.Rel(path, objPath) + if err != nil { + return err + } + objDst = filepath.Join(dst, objDst) + + if err := g.getObject(client, objDst, bucket, objPath, ""); err != nil { + return err + } + } + } + + return nil +} + +func (g *S3Getter) GetFile(dst string, u *url.URL) error { + region, bucket, path, version, creds, err := g.parseUrl(u) + if err != nil { + return err + } + + config := g.getAWSConfig(region, creds) + sess := session.New(config) + client := s3.New(sess) + return g.getObject(client, dst, bucket, path, version) +} + +func (g *S3Getter) getObject(client *s3.S3, dst, bucket, key, version string) error { + req := &s3.GetObjectInput{ + Bucket: aws.String(bucket), + Key: aws.String(key), + } + if version != "" { + req.VersionId = aws.String(version) + } + + resp, err := client.GetObject(req) + if err != nil { + return err + } + + // Create all the parent directories + if err := os.MkdirAll(filepath.Dir(dst), 0755); err != nil { + return err + } + + f, err := os.Create(dst) + if err != nil { + return err + } + defer f.Close() + + _, err = io.Copy(f, resp.Body) + return err +} + +func (g *S3Getter) getAWSConfig(region string, creds *credentials.Credentials) *aws.Config { + conf := &aws.Config{} + if creds == nil { + // Grab the metadata URL + metadataURL := os.Getenv("AWS_METADATA_URL") + if metadataURL == "" { + metadataURL = "http://169.254.169.254:80/latest" + } + + creds = credentials.NewChainCredentials( + []credentials.Provider{ + &credentials.EnvProvider{}, + &credentials.SharedCredentialsProvider{Filename: "", Profile: ""}, + &ec2rolecreds.EC2RoleProvider{ + Client: ec2metadata.New(session.New(&aws.Config{ + Endpoint: aws.String(metadataURL), + })), + }, + }) + } + + conf.Credentials = creds + if region != "" { + conf.Region = aws.String(region) + } + + return conf +} + +func (g *S3Getter) parseUrl(u *url.URL) (region, bucket, path, version string, creds *credentials.Credentials, err error) { + // Expected host style: s3.amazonaws.com. They always have 3 parts, + // although the first may differ if we're accessing a specific region. + hostParts := strings.Split(u.Host, ".") + if len(hostParts) != 3 { + err = fmt.Errorf("URL is not a valid S3 URL") + return + } + + // Parse the region out of the first part of the host + region = strings.TrimPrefix(strings.TrimPrefix(hostParts[0], "s3-"), "s3") + if region == "" { + region = "us-east-1" + } + + pathParts := strings.SplitN(u.Path, "/", 3) + if len(pathParts) != 3 { + err = fmt.Errorf("URL is not a valid S3 URL") + return + } + + bucket = pathParts[1] + path = pathParts[2] + version = u.Query().Get("version") + + _, hasAwsId := u.Query()["aws_access_key_id"] + _, hasAwsSecret := u.Query()["aws_access_key_secret"] + _, hasAwsToken := u.Query()["aws_access_token"] + if hasAwsId || hasAwsSecret || hasAwsToken { + creds = credentials.NewStaticCredentials( + u.Query().Get("aws_access_key_id"), + u.Query().Get("aws_access_key_secret"), + u.Query().Get("aws_access_token"), + ) + } + + return +} diff --git a/vendor/github.com/hashicorp/go-getter/helper/url/url.go b/vendor/github.com/hashicorp/go-getter/helper/url/url.go new file mode 100644 index 0000000000..02497c2543 --- /dev/null +++ b/vendor/github.com/hashicorp/go-getter/helper/url/url.go @@ -0,0 +1,14 @@ +package url + +import ( + "net/url" +) + +// Parse parses rawURL into a URL structure. +// The rawURL may be relative or absolute. +// +// Parse is a wrapper for the Go stdlib net/url Parse function, but returns +// Windows "safe" URLs on Windows platforms. +func Parse(rawURL string) (*url.URL, error) { + return parse(rawURL) +} diff --git a/vendor/github.com/hashicorp/go-getter/helper/url/url_unix.go b/vendor/github.com/hashicorp/go-getter/helper/url/url_unix.go new file mode 100644 index 0000000000..ed1352a917 --- /dev/null +++ b/vendor/github.com/hashicorp/go-getter/helper/url/url_unix.go @@ -0,0 +1,11 @@ +// +build !windows + +package url + +import ( + "net/url" +) + +func parse(rawURL string) (*url.URL, error) { + return url.Parse(rawURL) +} diff --git a/vendor/github.com/hashicorp/go-getter/helper/url/url_windows.go b/vendor/github.com/hashicorp/go-getter/helper/url/url_windows.go new file mode 100644 index 0000000000..4655226f66 --- /dev/null +++ b/vendor/github.com/hashicorp/go-getter/helper/url/url_windows.go @@ -0,0 +1,40 @@ +package url + +import ( + "fmt" + "net/url" + "path/filepath" + "strings" +) + +func parse(rawURL string) (*url.URL, error) { + // Make sure we're using "/" since URLs are "/"-based. + rawURL = filepath.ToSlash(rawURL) + + u, err := url.Parse(rawURL) + if err != nil { + return nil, err + } + + if len(rawURL) > 1 && rawURL[1] == ':' { + // Assume we're dealing with a drive letter file path where the drive + // letter has been parsed into the URL Scheme, and the rest of the path + // has been parsed into the URL Path without the leading ':' character. + u.Path = fmt.Sprintf("%s:%s", string(rawURL[0]), u.Path) + u.Scheme = "" + } + + if len(u.Host) > 1 && u.Host[1] == ':' && strings.HasPrefix(rawURL, "file://") { + // Assume we're dealing with a drive letter file path where the drive + // letter has been parsed into the URL Host. + u.Path = fmt.Sprintf("%s%s", u.Host, u.Path) + u.Host = "" + } + + // Remove leading slash for absolute file paths. + if len(u.Path) > 2 && u.Path[0] == '/' && u.Path[2] == ':' { + u.Path = u.Path[1:] + } + + return u, err +} diff --git a/vendor/github.com/hashicorp/go-getter/netrc.go b/vendor/github.com/hashicorp/go-getter/netrc.go new file mode 100644 index 0000000000..c7f6a3fb3f --- /dev/null +++ b/vendor/github.com/hashicorp/go-getter/netrc.go @@ -0,0 +1,67 @@ +package getter + +import ( + "fmt" + "net/url" + "os" + "runtime" + + "github.com/bgentry/go-netrc/netrc" + "github.com/mitchellh/go-homedir" +) + +// addAuthFromNetrc adds auth information to the URL from the user's +// netrc file if it can be found. This will only add the auth info +// if the URL doesn't already have auth info specified and the +// the username is blank. +func addAuthFromNetrc(u *url.URL) error { + // If the URL already has auth information, do nothing + if u.User != nil && u.User.Username() != "" { + return nil + } + + // Get the netrc file path + path := os.Getenv("NETRC") + if path == "" { + filename := ".netrc" + if runtime.GOOS == "windows" { + filename = "_netrc" + } + + var err error + path, err = homedir.Expand("~/" + filename) + if err != nil { + return err + } + } + + // If the file is not a file, then do nothing + if fi, err := os.Stat(path); err != nil { + // File doesn't exist, do nothing + if os.IsNotExist(err) { + return nil + } + + // Some other error! + return err + } else if fi.IsDir() { + // File is directory, ignore + return nil + } + + // Load up the netrc file + net, err := netrc.ParseFile(path) + if err != nil { + return fmt.Errorf("Error parsing netrc file at %q: %s", path, err) + } + + machine := net.FindMachine(u.Host) + if machine == nil { + // Machine not found, no problem + return nil + } + + // Set the user info + u.User = url.UserPassword(machine.Login, machine.Password) + return nil +} diff --git a/vendor/github.com/hashicorp/go-getter/source.go b/vendor/github.com/hashicorp/go-getter/source.go new file mode 100644 index 0000000000..4d5ee3cce5 --- /dev/null +++ b/vendor/github.com/hashicorp/go-getter/source.go @@ -0,0 +1,36 @@ +package getter + +import ( + "strings" +) + +// SourceDirSubdir takes a source and returns a tuple of the URL without +// the subdir and the URL with the subdir. +func SourceDirSubdir(src string) (string, string) { + // Calcaulate an offset to avoid accidentally marking the scheme + // as the dir. + var offset int + if idx := strings.Index(src, "://"); idx > -1 { + offset = idx + 3 + } + + // First see if we even have an explicit subdir + idx := strings.Index(src[offset:], "//") + if idx == -1 { + return src, "" + } + + idx += offset + subdir := src[idx+2:] + src = src[:idx] + + // Next, check if we have query parameters and push them onto the + // URL. + if idx = strings.Index(subdir, "?"); idx > -1 { + query := subdir[idx:] + subdir = subdir[:idx] + src += query + } + + return src, subdir +} diff --git a/vendor/github.com/hashicorp/go-getter/storage.go b/vendor/github.com/hashicorp/go-getter/storage.go new file mode 100644 index 0000000000..2bc6b9ec33 --- /dev/null +++ b/vendor/github.com/hashicorp/go-getter/storage.go @@ -0,0 +1,13 @@ +package getter + +// Storage is an interface that knows how to lookup downloaded directories +// as well as download and update directories from their sources into the +// proper location. +type Storage interface { + // Dir returns the directory on local disk where the directory source + // can be loaded from. + Dir(string) (string, bool, error) + + // Get will download and optionally update the given directory. + Get(string, string, bool) error +} diff --git a/vendor/github.com/hashicorp/go-multierror/LICENSE b/vendor/github.com/hashicorp/go-multierror/LICENSE new file mode 100644 index 0000000000..82b4de97c7 --- /dev/null +++ b/vendor/github.com/hashicorp/go-multierror/LICENSE @@ -0,0 +1,353 @@ +Mozilla Public License, version 2.0 + +1. Definitions + +1.1. “Contributor” + + means each individual or legal entity that creates, contributes to the + creation of, or owns Covered Software. + +1.2. “Contributor Version” + + means the combination of the Contributions of others (if any) used by a + Contributor and that particular Contributor’s Contribution. + +1.3. “Contribution” + + means Covered Software of a particular Contributor. + +1.4. “Covered Software” + + means Source Code Form to which the initial Contributor has attached the + notice in Exhibit A, the Executable Form of such Source Code Form, and + Modifications of such Source Code Form, in each case including portions + thereof. + +1.5. “Incompatible With Secondary Licenses” + means + + a. that the initial Contributor has attached the notice described in + Exhibit B to the Covered Software; or + + b. that the Covered Software was made available under the terms of version + 1.1 or earlier of the License, but not also under the terms of a + Secondary License. + +1.6. “Executable Form” + + means any form of the work other than Source Code Form. + +1.7. “Larger Work” + + means a work that combines Covered Software with other material, in a separate + file or files, that is not Covered Software. + +1.8. “License” + + means this document. + +1.9. “Licensable” + + means having the right to grant, to the maximum extent possible, whether at the + time of the initial grant or subsequently, any and all of the rights conveyed by + this License. + +1.10. “Modifications” + + means any of the following: + + a. any file in Source Code Form that results from an addition to, deletion + from, or modification of the contents of Covered Software; or + + b. any new file in Source Code Form that contains any Covered Software. + +1.11. “Patent Claims” of a Contributor + + means any patent claim(s), including without limitation, method, process, + and apparatus claims, in any patent Licensable by such Contributor that + would be infringed, but for the grant of the License, by the making, + using, selling, offering for sale, having made, import, or transfer of + either its Contributions or its Contributor Version. + +1.12. “Secondary License” + + means either the GNU General Public License, Version 2.0, the GNU Lesser + General Public License, Version 2.1, the GNU Affero General Public + License, Version 3.0, or any later versions of those licenses. + +1.13. “Source Code Form” + + means the form of the work preferred for making modifications. + +1.14. “You” (or “Your”) + + means an individual or a legal entity exercising rights under this + License. For legal entities, “You” includes any entity that controls, is + controlled by, or is under common control with You. For purposes of this + definition, “control” means (a) the power, direct or indirect, to cause + the direction or management of such entity, whether by contract or + otherwise, or (b) ownership of more than fifty percent (50%) of the + outstanding shares or beneficial ownership of such entity. + + +2. License Grants and Conditions + +2.1. Grants + + Each Contributor hereby grants You a world-wide, royalty-free, + non-exclusive license: + + a. under intellectual property rights (other than patent or trademark) + Licensable by such Contributor to use, reproduce, make available, + modify, display, perform, distribute, and otherwise exploit its + Contributions, either on an unmodified basis, with Modifications, or as + part of a Larger Work; and + + b. under Patent Claims of such Contributor to make, use, sell, offer for + sale, have made, import, and otherwise transfer either its Contributions + or its Contributor Version. + +2.2. Effective Date + + The licenses granted in Section 2.1 with respect to any Contribution become + effective for each Contribution on the date the Contributor first distributes + such Contribution. + +2.3. Limitations on Grant Scope + + The licenses granted in this Section 2 are the only rights granted under this + License. No additional rights or licenses will be implied from the distribution + or licensing of Covered Software under this License. Notwithstanding Section + 2.1(b) above, no patent license is granted by a Contributor: + + a. for any code that a Contributor has removed from Covered Software; or + + b. for infringements caused by: (i) Your and any other third party’s + modifications of Covered Software, or (ii) the combination of its + Contributions with other software (except as part of its Contributor + Version); or + + c. under Patent Claims infringed by Covered Software in the absence of its + Contributions. + + This License does not grant any rights in the trademarks, service marks, or + logos of any Contributor (except as may be necessary to comply with the + notice requirements in Section 3.4). + +2.4. Subsequent Licenses + + No Contributor makes additional grants as a result of Your choice to + distribute the Covered Software under a subsequent version of this License + (see Section 10.2) or under the terms of a Secondary License (if permitted + under the terms of Section 3.3). + +2.5. Representation + + Each Contributor represents that the Contributor believes its Contributions + are its original creation(s) or it has sufficient rights to grant the + rights to its Contributions conveyed by this License. + +2.6. Fair Use + + This License is not intended to limit any rights You have under applicable + copyright doctrines of fair use, fair dealing, or other equivalents. + +2.7. Conditions + + Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in + Section 2.1. + + +3. Responsibilities + +3.1. Distribution of Source Form + + All distribution of Covered Software in Source Code Form, including any + Modifications that You create or to which You contribute, must be under the + terms of this License. You must inform recipients that the Source Code Form + of the Covered Software is governed by the terms of this License, and how + they can obtain a copy of this License. You may not attempt to alter or + restrict the recipients’ rights in the Source Code Form. + +3.2. Distribution of Executable Form + + If You distribute Covered Software in Executable Form then: + + a. such Covered Software must also be made available in Source Code Form, + as described in Section 3.1, and You must inform recipients of the + Executable Form how they can obtain a copy of such Source Code Form by + reasonable means in a timely manner, at a charge no more than the cost + of distribution to the recipient; and + + b. You may distribute such Executable Form under the terms of this License, + or sublicense it under different terms, provided that the license for + the Executable Form does not attempt to limit or alter the recipients’ + rights in the Source Code Form under this License. + +3.3. Distribution of a Larger Work + + You may create and distribute a Larger Work under terms of Your choice, + provided that You also comply with the requirements of this License for the + Covered Software. If the Larger Work is a combination of Covered Software + with a work governed by one or more Secondary Licenses, and the Covered + Software is not Incompatible With Secondary Licenses, this License permits + You to additionally distribute such Covered Software under the terms of + such Secondary License(s), so that the recipient of the Larger Work may, at + their option, further distribute the Covered Software under the terms of + either this License or such Secondary License(s). + +3.4. Notices + + You may not remove or alter the substance of any license notices (including + copyright notices, patent notices, disclaimers of warranty, or limitations + of liability) contained within the Source Code Form of the Covered + Software, except that You may alter any license notices to the extent + required to remedy known factual inaccuracies. + +3.5. Application of Additional Terms + + You may choose to offer, and to charge a fee for, warranty, support, + indemnity or liability obligations to one or more recipients of Covered + Software. However, You may do so only on Your own behalf, and not on behalf + of any Contributor. You must make it absolutely clear that any such + warranty, support, indemnity, or liability obligation is offered by You + alone, and You hereby agree to indemnify every Contributor for any + liability incurred by such Contributor as a result of warranty, support, + indemnity or liability terms You offer. You may include additional + disclaimers of warranty and limitations of liability specific to any + jurisdiction. + +4. Inability to Comply Due to Statute or Regulation + + If it is impossible for You to comply with any of the terms of this License + with respect to some or all of the Covered Software due to statute, judicial + order, or regulation then You must: (a) comply with the terms of this License + to the maximum extent possible; and (b) describe the limitations and the code + they affect. Such description must be placed in a text file included with all + distributions of the Covered Software under this License. Except to the + extent prohibited by statute or regulation, such description must be + sufficiently detailed for a recipient of ordinary skill to be able to + understand it. + +5. Termination + +5.1. The rights granted under this License will terminate automatically if You + fail to comply with any of its terms. However, if You become compliant, + then the rights granted under this License from a particular Contributor + are reinstated (a) provisionally, unless and until such Contributor + explicitly and finally terminates Your grants, and (b) on an ongoing basis, + if such Contributor fails to notify You of the non-compliance by some + reasonable means prior to 60 days after You have come back into compliance. + Moreover, Your grants from a particular Contributor are reinstated on an + ongoing basis if such Contributor notifies You of the non-compliance by + some reasonable means, this is the first time You have received notice of + non-compliance with this License from such Contributor, and You become + compliant prior to 30 days after Your receipt of the notice. + +5.2. If You initiate litigation against any entity by asserting a patent + infringement claim (excluding declaratory judgment actions, counter-claims, + and cross-claims) alleging that a Contributor Version directly or + indirectly infringes any patent, then the rights granted to You by any and + all Contributors for the Covered Software under Section 2.1 of this License + shall terminate. + +5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user + license agreements (excluding distributors and resellers) which have been + validly granted by You or Your distributors under this License prior to + termination shall survive termination. + +6. Disclaimer of Warranty + + Covered Software is provided under this License on an “as is” basis, without + warranty of any kind, either expressed, implied, or statutory, including, + without limitation, warranties that the Covered Software is free of defects, + merchantable, fit for a particular purpose or non-infringing. The entire + risk as to the quality and performance of the Covered Software is with You. + Should any Covered Software prove defective in any respect, You (not any + Contributor) assume the cost of any necessary servicing, repair, or + correction. This disclaimer of warranty constitutes an essential part of this + License. No use of any Covered Software is authorized under this License + except under this disclaimer. + +7. Limitation of Liability + + Under no circumstances and under no legal theory, whether tort (including + negligence), contract, or otherwise, shall any Contributor, or anyone who + distributes Covered Software as permitted above, be liable to You for any + direct, indirect, special, incidental, or consequential damages of any + character including, without limitation, damages for lost profits, loss of + goodwill, work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses, even if such party shall have been + informed of the possibility of such damages. This limitation of liability + shall not apply to liability for death or personal injury resulting from such + party’s negligence to the extent applicable law prohibits such limitation. + Some jurisdictions do not allow the exclusion or limitation of incidental or + consequential damages, so this exclusion and limitation may not apply to You. + +8. Litigation + + Any litigation relating to this License may be brought only in the courts of + a jurisdiction where the defendant maintains its principal place of business + and such litigation shall be governed by laws of that jurisdiction, without + reference to its conflict-of-law provisions. Nothing in this Section shall + prevent a party’s ability to bring cross-claims or counter-claims. + +9. Miscellaneous + + This License represents the complete agreement concerning the subject matter + hereof. If any provision of this License is held to be unenforceable, such + provision shall be reformed only to the extent necessary to make it + enforceable. Any law or regulation which provides that the language of a + contract shall be construed against the drafter shall not be used to construe + this License against a Contributor. + + +10. Versions of the License + +10.1. New Versions + + Mozilla Foundation is the license steward. Except as provided in Section + 10.3, no one other than the license steward has the right to modify or + publish new versions of this License. Each version will be given a + distinguishing version number. + +10.2. Effect of New Versions + + You may distribute the Covered Software under the terms of the version of + the License under which You originally received the Covered Software, or + under the terms of any subsequent version published by the license + steward. + +10.3. Modified Versions + + If you create software not governed by this License, and you want to + create a new license for such software, you may create and use a modified + version of this License if you rename the license and remove any + references to the name of the license steward (except to note that such + modified license differs from this License). + +10.4. Distributing Source Code Form that is Incompatible With Secondary Licenses + If You choose to distribute Source Code Form that is Incompatible With + Secondary Licenses under the terms of this version of the License, the + notice described in Exhibit B of this License must be attached. + +Exhibit A - Source Code Form License Notice + + This Source Code Form is subject to the + terms of the Mozilla Public License, v. + 2.0. If a copy of the MPL was not + distributed with this file, You can + obtain one at + http://mozilla.org/MPL/2.0/. + +If it is not possible or desirable to put the notice in a particular file, then +You may include the notice in a location (such as a LICENSE file in a relevant +directory) where a recipient would be likely to look for such a notice. + +You may add additional accurate notices of copyright ownership. + +Exhibit B - “Incompatible With Secondary Licenses” Notice + + This Source Code Form is “Incompatible + With Secondary Licenses”, as defined by + the Mozilla Public License, v. 2.0. diff --git a/vendor/github.com/hashicorp/go-multierror/README.md b/vendor/github.com/hashicorp/go-multierror/README.md new file mode 100644 index 0000000000..e81be50e0d --- /dev/null +++ b/vendor/github.com/hashicorp/go-multierror/README.md @@ -0,0 +1,91 @@ +# go-multierror + +`go-multierror` is a package for Go that provides a mechanism for +representing a list of `error` values as a single `error`. + +This allows a function in Go to return an `error` that might actually +be a list of errors. If the caller knows this, they can unwrap the +list and access the errors. If the caller doesn't know, the error +formats to a nice human-readable format. + +`go-multierror` implements the +[errwrap](https://github.com/hashicorp/errwrap) interface so that it can +be used with that library, as well. + +## Installation and Docs + +Install using `go get github.com/hashicorp/go-multierror`. + +Full documentation is available at +http://godoc.org/github.com/hashicorp/go-multierror + +## Usage + +go-multierror is easy to use and purposely built to be unobtrusive in +existing Go applications/libraries that may not be aware of it. + +**Building a list of errors** + +The `Append` function is used to create a list of errors. This function +behaves a lot like the Go built-in `append` function: it doesn't matter +if the first argument is nil, a `multierror.Error`, or any other `error`, +the function behaves as you would expect. + +```go +var result error + +if err := step1(); err != nil { + result = multierror.Append(result, err) +} +if err := step2(); err != nil { + result = multierror.Append(result, err) +} + +return result +``` + +**Customizing the formatting of the errors** + +By specifying a custom `ErrorFormat`, you can customize the format +of the `Error() string` function: + +```go +var result *multierror.Error + +// ... accumulate errors here, maybe using Append + +if result != nil { + result.ErrorFormat = func([]error) string { + return "errors!" + } +} +``` + +**Accessing the list of errors** + +`multierror.Error` implements `error` so if the caller doesn't know about +multierror, it will work just fine. But if you're aware a multierror might +be returned, you can use type switches to access the list of errors: + +```go +if err := something(); err != nil { + if merr, ok := err.(*multierror.Error); ok { + // Use merr.Errors + } +} +``` + +**Returning a multierror only if there are errors** + +If you build a `multierror.Error`, you can use the `ErrorOrNil` function +to return an `error` implementation only if there are errors to return: + +```go +var result *multierror.Error + +// ... accumulate errors here + +// Return the `error` only if errors were added to the multierror, otherwise +// return nil since there are no errors. +return result.ErrorOrNil() +``` diff --git a/vendor/github.com/hashicorp/go-multierror/append.go b/vendor/github.com/hashicorp/go-multierror/append.go new file mode 100644 index 0000000000..00afa9b351 --- /dev/null +++ b/vendor/github.com/hashicorp/go-multierror/append.go @@ -0,0 +1,37 @@ +package multierror + +// Append is a helper function that will append more errors +// onto an Error in order to create a larger multi-error. +// +// If err is not a multierror.Error, then it will be turned into +// one. If any of the errs are multierr.Error, they will be flattened +// one level into err. +func Append(err error, errs ...error) *Error { + switch err := err.(type) { + case *Error: + // Typed nils can reach here, so initialize if we are nil + if err == nil { + err = new(Error) + } + + // Go through each error and flatten + for _, e := range errs { + switch e := e.(type) { + case *Error: + err.Errors = append(err.Errors, e.Errors...) + default: + err.Errors = append(err.Errors, e) + } + } + + return err + default: + newErrs := make([]error, 0, len(errs)+1) + if err != nil { + newErrs = append(newErrs, err) + } + newErrs = append(newErrs, errs...) + + return Append(&Error{}, newErrs...) + } +} diff --git a/vendor/github.com/hashicorp/go-multierror/flatten.go b/vendor/github.com/hashicorp/go-multierror/flatten.go new file mode 100644 index 0000000000..aab8e9abec --- /dev/null +++ b/vendor/github.com/hashicorp/go-multierror/flatten.go @@ -0,0 +1,26 @@ +package multierror + +// Flatten flattens the given error, merging any *Errors together into +// a single *Error. +func Flatten(err error) error { + // If it isn't an *Error, just return the error as-is + if _, ok := err.(*Error); !ok { + return err + } + + // Otherwise, make the result and flatten away! + flatErr := new(Error) + flatten(err, flatErr) + return flatErr +} + +func flatten(err error, flatErr *Error) { + switch err := err.(type) { + case *Error: + for _, e := range err.Errors { + flatten(e, flatErr) + } + default: + flatErr.Errors = append(flatErr.Errors, err) + } +} diff --git a/vendor/github.com/hashicorp/go-multierror/format.go b/vendor/github.com/hashicorp/go-multierror/format.go new file mode 100644 index 0000000000..bb65a12e74 --- /dev/null +++ b/vendor/github.com/hashicorp/go-multierror/format.go @@ -0,0 +1,23 @@ +package multierror + +import ( + "fmt" + "strings" +) + +// ErrorFormatFunc is a function callback that is called by Error to +// turn the list of errors into a string. +type ErrorFormatFunc func([]error) string + +// ListFormatFunc is a basic formatter that outputs the number of errors +// that occurred along with a bullet point list of the errors. +func ListFormatFunc(es []error) string { + points := make([]string, len(es)) + for i, err := range es { + points[i] = fmt.Sprintf("* %s", err) + } + + return fmt.Sprintf( + "%d error(s) occurred:\n\n%s", + len(es), strings.Join(points, "\n")) +} diff --git a/vendor/github.com/hashicorp/go-multierror/multierror.go b/vendor/github.com/hashicorp/go-multierror/multierror.go new file mode 100644 index 0000000000..2ea0827329 --- /dev/null +++ b/vendor/github.com/hashicorp/go-multierror/multierror.go @@ -0,0 +1,51 @@ +package multierror + +import ( + "fmt" +) + +// Error is an error type to track multiple errors. This is used to +// accumulate errors in cases and return them as a single "error". +type Error struct { + Errors []error + ErrorFormat ErrorFormatFunc +} + +func (e *Error) Error() string { + fn := e.ErrorFormat + if fn == nil { + fn = ListFormatFunc + } + + return fn(e.Errors) +} + +// ErrorOrNil returns an error interface if this Error represents +// a list of errors, or returns nil if the list of errors is empty. This +// function is useful at the end of accumulation to make sure that the value +// returned represents the existence of errors. +func (e *Error) ErrorOrNil() error { + if e == nil { + return nil + } + if len(e.Errors) == 0 { + return nil + } + + return e +} + +func (e *Error) GoString() string { + return fmt.Sprintf("*%#v", *e) +} + +// WrappedErrors returns the list of errors that this Error is wrapping. +// It is an implementatin of the errwrap.Wrapper interface so that +// multierror.Error can be used with that library. +// +// This method is not safe to be called concurrently and is no different +// than accessing the Errors field directly. It is implementd only to +// satisfy the errwrap.Wrapper interface. +func (e *Error) WrappedErrors() []error { + return e.Errors +} diff --git a/vendor/github.com/hashicorp/go-multierror/prefix.go b/vendor/github.com/hashicorp/go-multierror/prefix.go new file mode 100644 index 0000000000..5c477abe44 --- /dev/null +++ b/vendor/github.com/hashicorp/go-multierror/prefix.go @@ -0,0 +1,37 @@ +package multierror + +import ( + "fmt" + + "github.com/hashicorp/errwrap" +) + +// Prefix is a helper function that will prefix some text +// to the given error. If the error is a multierror.Error, then +// it will be prefixed to each wrapped error. +// +// This is useful to use when appending multiple multierrors +// together in order to give better scoping. +func Prefix(err error, prefix string) error { + if err == nil { + return nil + } + + format := fmt.Sprintf("%s {{err}}", prefix) + switch err := err.(type) { + case *Error: + // Typed nils can reach here, so initialize if we are nil + if err == nil { + err = new(Error) + } + + // Wrap each of the errors + for i, e := range err.Errors { + err.Errors[i] = errwrap.Wrapf(format, e) + } + + return err + default: + return errwrap.Wrapf(format, err) + } +} diff --git a/vendor/github.com/hashicorp/go-plugin/LICENSE b/vendor/github.com/hashicorp/go-plugin/LICENSE new file mode 100644 index 0000000000..82b4de97c7 --- /dev/null +++ b/vendor/github.com/hashicorp/go-plugin/LICENSE @@ -0,0 +1,353 @@ +Mozilla Public License, version 2.0 + +1. Definitions + +1.1. “Contributor” + + means each individual or legal entity that creates, contributes to the + creation of, or owns Covered Software. + +1.2. “Contributor Version” + + means the combination of the Contributions of others (if any) used by a + Contributor and that particular Contributor’s Contribution. + +1.3. “Contribution” + + means Covered Software of a particular Contributor. + +1.4. “Covered Software” + + means Source Code Form to which the initial Contributor has attached the + notice in Exhibit A, the Executable Form of such Source Code Form, and + Modifications of such Source Code Form, in each case including portions + thereof. + +1.5. “Incompatible With Secondary Licenses” + means + + a. that the initial Contributor has attached the notice described in + Exhibit B to the Covered Software; or + + b. that the Covered Software was made available under the terms of version + 1.1 or earlier of the License, but not also under the terms of a + Secondary License. + +1.6. “Executable Form” + + means any form of the work other than Source Code Form. + +1.7. “Larger Work” + + means a work that combines Covered Software with other material, in a separate + file or files, that is not Covered Software. + +1.8. “License” + + means this document. + +1.9. “Licensable” + + means having the right to grant, to the maximum extent possible, whether at the + time of the initial grant or subsequently, any and all of the rights conveyed by + this License. + +1.10. “Modifications” + + means any of the following: + + a. any file in Source Code Form that results from an addition to, deletion + from, or modification of the contents of Covered Software; or + + b. any new file in Source Code Form that contains any Covered Software. + +1.11. “Patent Claims” of a Contributor + + means any patent claim(s), including without limitation, method, process, + and apparatus claims, in any patent Licensable by such Contributor that + would be infringed, but for the grant of the License, by the making, + using, selling, offering for sale, having made, import, or transfer of + either its Contributions or its Contributor Version. + +1.12. “Secondary License” + + means either the GNU General Public License, Version 2.0, the GNU Lesser + General Public License, Version 2.1, the GNU Affero General Public + License, Version 3.0, or any later versions of those licenses. + +1.13. “Source Code Form” + + means the form of the work preferred for making modifications. + +1.14. “You” (or “Your”) + + means an individual or a legal entity exercising rights under this + License. For legal entities, “You” includes any entity that controls, is + controlled by, or is under common control with You. For purposes of this + definition, “control” means (a) the power, direct or indirect, to cause + the direction or management of such entity, whether by contract or + otherwise, or (b) ownership of more than fifty percent (50%) of the + outstanding shares or beneficial ownership of such entity. + + +2. License Grants and Conditions + +2.1. Grants + + Each Contributor hereby grants You a world-wide, royalty-free, + non-exclusive license: + + a. under intellectual property rights (other than patent or trademark) + Licensable by such Contributor to use, reproduce, make available, + modify, display, perform, distribute, and otherwise exploit its + Contributions, either on an unmodified basis, with Modifications, or as + part of a Larger Work; and + + b. under Patent Claims of such Contributor to make, use, sell, offer for + sale, have made, import, and otherwise transfer either its Contributions + or its Contributor Version. + +2.2. Effective Date + + The licenses granted in Section 2.1 with respect to any Contribution become + effective for each Contribution on the date the Contributor first distributes + such Contribution. + +2.3. Limitations on Grant Scope + + The licenses granted in this Section 2 are the only rights granted under this + License. No additional rights or licenses will be implied from the distribution + or licensing of Covered Software under this License. Notwithstanding Section + 2.1(b) above, no patent license is granted by a Contributor: + + a. for any code that a Contributor has removed from Covered Software; or + + b. for infringements caused by: (i) Your and any other third party’s + modifications of Covered Software, or (ii) the combination of its + Contributions with other software (except as part of its Contributor + Version); or + + c. under Patent Claims infringed by Covered Software in the absence of its + Contributions. + + This License does not grant any rights in the trademarks, service marks, or + logos of any Contributor (except as may be necessary to comply with the + notice requirements in Section 3.4). + +2.4. Subsequent Licenses + + No Contributor makes additional grants as a result of Your choice to + distribute the Covered Software under a subsequent version of this License + (see Section 10.2) or under the terms of a Secondary License (if permitted + under the terms of Section 3.3). + +2.5. Representation + + Each Contributor represents that the Contributor believes its Contributions + are its original creation(s) or it has sufficient rights to grant the + rights to its Contributions conveyed by this License. + +2.6. Fair Use + + This License is not intended to limit any rights You have under applicable + copyright doctrines of fair use, fair dealing, or other equivalents. + +2.7. Conditions + + Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in + Section 2.1. + + +3. Responsibilities + +3.1. Distribution of Source Form + + All distribution of Covered Software in Source Code Form, including any + Modifications that You create or to which You contribute, must be under the + terms of this License. You must inform recipients that the Source Code Form + of the Covered Software is governed by the terms of this License, and how + they can obtain a copy of this License. You may not attempt to alter or + restrict the recipients’ rights in the Source Code Form. + +3.2. Distribution of Executable Form + + If You distribute Covered Software in Executable Form then: + + a. such Covered Software must also be made available in Source Code Form, + as described in Section 3.1, and You must inform recipients of the + Executable Form how they can obtain a copy of such Source Code Form by + reasonable means in a timely manner, at a charge no more than the cost + of distribution to the recipient; and + + b. You may distribute such Executable Form under the terms of this License, + or sublicense it under different terms, provided that the license for + the Executable Form does not attempt to limit or alter the recipients’ + rights in the Source Code Form under this License. + +3.3. Distribution of a Larger Work + + You may create and distribute a Larger Work under terms of Your choice, + provided that You also comply with the requirements of this License for the + Covered Software. If the Larger Work is a combination of Covered Software + with a work governed by one or more Secondary Licenses, and the Covered + Software is not Incompatible With Secondary Licenses, this License permits + You to additionally distribute such Covered Software under the terms of + such Secondary License(s), so that the recipient of the Larger Work may, at + their option, further distribute the Covered Software under the terms of + either this License or such Secondary License(s). + +3.4. Notices + + You may not remove or alter the substance of any license notices (including + copyright notices, patent notices, disclaimers of warranty, or limitations + of liability) contained within the Source Code Form of the Covered + Software, except that You may alter any license notices to the extent + required to remedy known factual inaccuracies. + +3.5. Application of Additional Terms + + You may choose to offer, and to charge a fee for, warranty, support, + indemnity or liability obligations to one or more recipients of Covered + Software. However, You may do so only on Your own behalf, and not on behalf + of any Contributor. You must make it absolutely clear that any such + warranty, support, indemnity, or liability obligation is offered by You + alone, and You hereby agree to indemnify every Contributor for any + liability incurred by such Contributor as a result of warranty, support, + indemnity or liability terms You offer. You may include additional + disclaimers of warranty and limitations of liability specific to any + jurisdiction. + +4. Inability to Comply Due to Statute or Regulation + + If it is impossible for You to comply with any of the terms of this License + with respect to some or all of the Covered Software due to statute, judicial + order, or regulation then You must: (a) comply with the terms of this License + to the maximum extent possible; and (b) describe the limitations and the code + they affect. Such description must be placed in a text file included with all + distributions of the Covered Software under this License. Except to the + extent prohibited by statute or regulation, such description must be + sufficiently detailed for a recipient of ordinary skill to be able to + understand it. + +5. Termination + +5.1. The rights granted under this License will terminate automatically if You + fail to comply with any of its terms. However, if You become compliant, + then the rights granted under this License from a particular Contributor + are reinstated (a) provisionally, unless and until such Contributor + explicitly and finally terminates Your grants, and (b) on an ongoing basis, + if such Contributor fails to notify You of the non-compliance by some + reasonable means prior to 60 days after You have come back into compliance. + Moreover, Your grants from a particular Contributor are reinstated on an + ongoing basis if such Contributor notifies You of the non-compliance by + some reasonable means, this is the first time You have received notice of + non-compliance with this License from such Contributor, and You become + compliant prior to 30 days after Your receipt of the notice. + +5.2. If You initiate litigation against any entity by asserting a patent + infringement claim (excluding declaratory judgment actions, counter-claims, + and cross-claims) alleging that a Contributor Version directly or + indirectly infringes any patent, then the rights granted to You by any and + all Contributors for the Covered Software under Section 2.1 of this License + shall terminate. + +5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user + license agreements (excluding distributors and resellers) which have been + validly granted by You or Your distributors under this License prior to + termination shall survive termination. + +6. Disclaimer of Warranty + + Covered Software is provided under this License on an “as is” basis, without + warranty of any kind, either expressed, implied, or statutory, including, + without limitation, warranties that the Covered Software is free of defects, + merchantable, fit for a particular purpose or non-infringing. The entire + risk as to the quality and performance of the Covered Software is with You. + Should any Covered Software prove defective in any respect, You (not any + Contributor) assume the cost of any necessary servicing, repair, or + correction. This disclaimer of warranty constitutes an essential part of this + License. No use of any Covered Software is authorized under this License + except under this disclaimer. + +7. Limitation of Liability + + Under no circumstances and under no legal theory, whether tort (including + negligence), contract, or otherwise, shall any Contributor, or anyone who + distributes Covered Software as permitted above, be liable to You for any + direct, indirect, special, incidental, or consequential damages of any + character including, without limitation, damages for lost profits, loss of + goodwill, work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses, even if such party shall have been + informed of the possibility of such damages. This limitation of liability + shall not apply to liability for death or personal injury resulting from such + party’s negligence to the extent applicable law prohibits such limitation. + Some jurisdictions do not allow the exclusion or limitation of incidental or + consequential damages, so this exclusion and limitation may not apply to You. + +8. Litigation + + Any litigation relating to this License may be brought only in the courts of + a jurisdiction where the defendant maintains its principal place of business + and such litigation shall be governed by laws of that jurisdiction, without + reference to its conflict-of-law provisions. Nothing in this Section shall + prevent a party’s ability to bring cross-claims or counter-claims. + +9. Miscellaneous + + This License represents the complete agreement concerning the subject matter + hereof. If any provision of this License is held to be unenforceable, such + provision shall be reformed only to the extent necessary to make it + enforceable. Any law or regulation which provides that the language of a + contract shall be construed against the drafter shall not be used to construe + this License against a Contributor. + + +10. Versions of the License + +10.1. New Versions + + Mozilla Foundation is the license steward. Except as provided in Section + 10.3, no one other than the license steward has the right to modify or + publish new versions of this License. Each version will be given a + distinguishing version number. + +10.2. Effect of New Versions + + You may distribute the Covered Software under the terms of the version of + the License under which You originally received the Covered Software, or + under the terms of any subsequent version published by the license + steward. + +10.3. Modified Versions + + If you create software not governed by this License, and you want to + create a new license for such software, you may create and use a modified + version of this License if you rename the license and remove any + references to the name of the license steward (except to note that such + modified license differs from this License). + +10.4. Distributing Source Code Form that is Incompatible With Secondary Licenses + If You choose to distribute Source Code Form that is Incompatible With + Secondary Licenses under the terms of this version of the License, the + notice described in Exhibit B of this License must be attached. + +Exhibit A - Source Code Form License Notice + + This Source Code Form is subject to the + terms of the Mozilla Public License, v. + 2.0. If a copy of the MPL was not + distributed with this file, You can + obtain one at + http://mozilla.org/MPL/2.0/. + +If it is not possible or desirable to put the notice in a particular file, then +You may include the notice in a location (such as a LICENSE file in a relevant +directory) where a recipient would be likely to look for such a notice. + +You may add additional accurate notices of copyright ownership. + +Exhibit B - “Incompatible With Secondary Licenses” Notice + + This Source Code Form is “Incompatible + With Secondary Licenses”, as defined by + the Mozilla Public License, v. 2.0. diff --git a/vendor/github.com/hashicorp/go-plugin/README.md b/vendor/github.com/hashicorp/go-plugin/README.md new file mode 100644 index 0000000000..2058cfb68d --- /dev/null +++ b/vendor/github.com/hashicorp/go-plugin/README.md @@ -0,0 +1,161 @@ +# Go Plugin System over RPC + +`go-plugin` is a Go (golang) plugin system over RPC. It is the plugin system +that has been in use by HashiCorp tooling for over 3 years. While initially +created for [Packer](https://www.packer.io), it has since been used by +[Terraform](https://www.terraform.io) and [Otto](https://www.ottoproject.io), +with plans to also use it for [Nomad](https://www.nomadproject.io) and +[Vault](https://www.vaultproject.io). + +While the plugin system is over RPC, it is currently only designed to work +over a local [reliable] network. Plugins over a real network are not supported +and will lead to unexpected behavior. + +This plugin system has been used on millions of machines across many different +projects and has proven to be battle hardened and ready for production use. + +## Features + +The HashiCorp plugin system supports a number of features: + +**Plugins are Go interface implementations.** This makes writing and consuming +plugins feel very natural. To a plugin author: you just implement an +interface as if it were going to run in the same process. For a plugin user: +you just use and call functions on an interface as if it were in the same +process. This plugin system handles the communication in between. + +**Complex arguments and return values are supported.** This library +provides APIs for handling complex arguments and return values such +as interfaces, `io.Reader/Writer`, etc. We do this by giving you a library +(`MuxBroker`) for creating new connections between the client/server to +serve additional interfaces or transfer raw data. + +**Bidirectional communication.** Because the plugin system supports +complex arguments, the host process can send it interface implementations +and the plugin can call back into the host process. + +**Built-in Logging.** Any plugins that use the `log` standard library +will have log data automatically sent to the host process. The host +process will mirror this output prefixed with the path to the plugin +binary. This makes debugging with plugins simple. + +**Protocol Versioning.** A very basic "protocol version" is supported that +can be incremented to invalidate any previous plugins. This is useful when +interface signatures are changing, protocol level changes are necessary, +etc. When a protocol version is incompatible, a human friendly error +message is shown to the end user. + +**Stdout/Stderr Syncing.** While plugins are subprocesses, they can continue +to use stdout/stderr as usual and the output will get mirrored back to +the host process. The host process can control what `io.Writer` these +streams go to to prevent this from happening. + +**TTY Preservation.** Plugin subprocesses are connected to the identical +stdin file descriptor as the host process, allowing software that requires +a TTY to work. For example, a plugin can execute `ssh` and even though there +are multiple subprocesses and RPC happening, it will look and act perfectly +to the end user. + +**Host upgrade while a plugin is running.** Plugins can be "reattached" +so that the host process can be upgraded while the plugin is still running. +This requires the host/plugin to know this is possible and daemonize +properly. `NewClient` takes a `ReattachConfig` to determine if and how to +reattach. + +## Architecture + +The HashiCorp plugin system works by launching subprocesses and communicating +over RPC (using standard `net/rpc`). A single connection is made between +any plugin and the host process, and we use a +[connection multiplexing](https://github.com/hashicorp/yamux) +library to multiplex any other connections on top. + +This architecture has a number of benefits: + + * Plugins can't crash your host process: A panic in a plugin doesn't + panic the plugin user. + + * Plugins are very easy to write: just write a Go application and `go build`. + Theoretically you could also use another language as long as it can + communicate the Go `net/rpc` protocol but this hasn't yet been tried. + + * Plugins are very easy to install: just put the binary in a location where + the host will find it (depends on the host but this library also provides + helpers), and the plugin host handles the rest. + + * Plugins can be relatively secure: The plugin only has access to the + interfaces and args given to it, not to the entire memory space of the + process. More security features are planned (see the coming soon section + below). + +## Usage + +To use the plugin system, you must take the following steps. These are +high-level steps that must be done. Examples are available in the +`examples/` directory. + + 1. Choose the interface(s) you want to expose for plugins. + + 2. For each interface, implement an implementation of that interface + that communicates over an `*rpc.Client` (from the standard `net/rpc` + package) for every function call. Likewise, implement the RPC server + struct this communicates to which is then communicating to a real, + concrete implementation. + + 3. Create a `Plugin` implementation that knows how to create the RPC + client/server for a given plugin type. + + 4. Plugin authors call `plugin.Serve` to serve a plugin from the + `main` function. + + 5. Plugin users use `plugin.Client` to launch a subprocess and request + an interface implementation over RPC. + +That's it! In practice, step 2 is the most tedious and time consuming step. +Even so, it isn't very difficult and you can see examples in the `examples/` +directory as well as throughout our various open source projects. + +For complete API documentation, see [GoDoc](https://godoc.org/github.com/hashicorp/go-plugin). + +## Roadmap + +Our plugin system is constantly evolving. As we use the plugin system for +new projects or for new features in existing projects, we constantly find +improvements we can make. + +At this point in time, the roadmap for the plugin system is: + +**Cryptographically Secure Plugins.** We'll implement signing plugins +and loading signed plugins in order to allow Vault to make use of multi-process +in a secure way. + +**Semantic Versioning.** Plugins will be able to implement a semantic version. +This plugin system will give host processes a system for constraining +versions. This is in addition to the protocol versioning already present +which is more for larger underlying changes. + +**Plugin fetching.** We will integrate with [go-getter](https://github.com/hashicorp/go-getter) +to support automatic download + install of plugins. Paired with cryptographically +secure plugins (above), we can make this a safe operation for an amazing +user experience. + +## What About Shared Libraries? + +When we started using plugins (late 2012, early 2013), plugins over RPC +were the only option since Go didn't support dynamic library loading. Today, +Go still doesn't support dynamic library loading, but they do intend to. +Since 2012, our plugin system has stabilized from millions of users using it, +and has many benefits we've come to value greatly. + +For example, we intend to use this plugin system in +[Vault](https://www.vaultproject.io), and dynamic library loading will +simply never be acceptable in Vault for security reasons. That is an extreme +example, but we believe our library system has more upsides than downsides +over dynamic library loading and since we've had it built and tested for years, +we'll likely continue to use it. + +Shared libraries have one major advantage over our system which is much +higher performance. In real world scenarios across our various tools, +we've never required any more performance out of our plugin system and it +has seen very high throughput, so this isn't a concern for us at the moment. + diff --git a/vendor/github.com/hashicorp/go-plugin/client.go b/vendor/github.com/hashicorp/go-plugin/client.go new file mode 100644 index 0000000000..9f8a0f2765 --- /dev/null +++ b/vendor/github.com/hashicorp/go-plugin/client.go @@ -0,0 +1,581 @@ +package plugin + +import ( + "bufio" + "errors" + "fmt" + "io" + "io/ioutil" + "log" + "net" + "os" + "os/exec" + "path/filepath" + "strconv" + "strings" + "sync" + "sync/atomic" + "time" + "unicode" +) + +// If this is 1, then we've called CleanupClients. This can be used +// by plugin RPC implementations to change error behavior since you +// can expected network connection errors at this point. This should be +// read by using sync/atomic. +var Killed uint32 = 0 + +// This is a slice of the "managed" clients which are cleaned up when +// calling Cleanup +var managedClients = make([]*Client, 0, 5) +var managedClientsLock sync.Mutex + +// Error types +var ( + // ErrProcessNotFound is returned when a client is instantiated to + // reattach to an existing process and it isn't found. + ErrProcessNotFound = errors.New("Reattachment process not found") +) + +// Client handles the lifecycle of a plugin application. It launches +// plugins, connects to them, dispenses interface implementations, and handles +// killing the process. +// +// Plugin hosts should use one Client for each plugin executable. To +// dispense a plugin type, use the `Client.Client` function, and then +// cal `Dispense`. This awkward API is mostly historical but is used to split +// the client that deals with subprocess management and the client that +// does RPC management. +// +// See NewClient and ClientConfig for using a Client. +type Client struct { + config *ClientConfig + exited bool + doneLogging chan struct{} + l sync.Mutex + address net.Addr + process *os.Process + client *RPCClient +} + +// ClientConfig is the configuration used to initialize a new +// plugin client. After being used to initialize a plugin client, +// that configuration must not be modified again. +type ClientConfig struct { + // HandshakeConfig is the configuration that must match servers. + HandshakeConfig + + // Plugins are the plugins that can be consumed. + Plugins map[string]Plugin + + // One of the following must be set, but not both. + // + // Cmd is the unstarted subprocess for starting the plugin. If this is + // set, then the Client starts the plugin process on its own and connects + // to it. + // + // Reattach is configuration for reattaching to an existing plugin process + // that is already running. This isn't common. + Cmd *exec.Cmd + Reattach *ReattachConfig + + // Managed represents if the client should be managed by the + // plugin package or not. If true, then by calling CleanupClients, + // it will automatically be cleaned up. Otherwise, the client + // user is fully responsible for making sure to Kill all plugin + // clients. By default the client is _not_ managed. + Managed bool + + // The minimum and maximum port to use for communicating with + // the subprocess. If not set, this defaults to 10,000 and 25,000 + // respectively. + MinPort, MaxPort uint + + // StartTimeout is the timeout to wait for the plugin to say it + // has started successfully. + StartTimeout time.Duration + + // If non-nil, then the stderr of the client will be written to here + // (as well as the log). This is the original os.Stderr of the subprocess. + // This isn't the output of synced stderr. + Stderr io.Writer + + // SyncStdout, SyncStderr can be set to override the + // respective os.Std* values in the plugin. Care should be taken to + // avoid races here. If these are nil, then this will automatically be + // hooked up to os.Stdin, Stdout, and Stderr, respectively. + // + // If the default values (nil) are used, then this package will not + // sync any of these streams. + SyncStdout io.Writer + SyncStderr io.Writer +} + +// ReattachConfig is used to configure a client to reattach to an +// already-running plugin process. You can retrieve this information by +// calling ReattachConfig on Client. +type ReattachConfig struct { + Addr net.Addr + Pid int +} + +// This makes sure all the managed subprocesses are killed and properly +// logged. This should be called before the parent process running the +// plugins exits. +// +// This must only be called _once_. +func CleanupClients() { + // Set the killed to true so that we don't get unexpected panics + atomic.StoreUint32(&Killed, 1) + + // Kill all the managed clients in parallel and use a WaitGroup + // to wait for them all to finish up. + var wg sync.WaitGroup + managedClientsLock.Lock() + for _, client := range managedClients { + wg.Add(1) + + go func(client *Client) { + client.Kill() + wg.Done() + }(client) + } + managedClientsLock.Unlock() + + log.Println("[DEBUG] plugin: waiting for all plugin processes to complete...") + wg.Wait() +} + +// Creates a new plugin client which manages the lifecycle of an external +// plugin and gets the address for the RPC connection. +// +// The client must be cleaned up at some point by calling Kill(). If +// the client is a managed client (created with NewManagedClient) you +// can just call CleanupClients at the end of your program and they will +// be properly cleaned. +func NewClient(config *ClientConfig) (c *Client) { + if config.MinPort == 0 && config.MaxPort == 0 { + config.MinPort = 10000 + config.MaxPort = 25000 + } + + if config.StartTimeout == 0 { + config.StartTimeout = 1 * time.Minute + } + + if config.Stderr == nil { + config.Stderr = ioutil.Discard + } + + if config.SyncStdout == nil { + config.SyncStdout = ioutil.Discard + } + if config.SyncStderr == nil { + config.SyncStderr = ioutil.Discard + } + + c = &Client{config: config} + if config.Managed { + managedClientsLock.Lock() + managedClients = append(managedClients, c) + managedClientsLock.Unlock() + } + + return +} + +// Client returns an RPC client for the plugin. +// +// Subsequent calls to this will return the same RPC client. +func (c *Client) Client() (*RPCClient, error) { + addr, err := c.Start() + if err != nil { + return nil, err + } + + c.l.Lock() + defer c.l.Unlock() + + if c.client != nil { + return c.client, nil + } + + // Connect to the client + conn, err := net.Dial(addr.Network(), addr.String()) + if err != nil { + return nil, err + } + if tcpConn, ok := conn.(*net.TCPConn); ok { + // Make sure to set keep alive so that the connection doesn't die + tcpConn.SetKeepAlive(true) + } + + // Create the actual RPC client + c.client, err = NewRPCClient(conn, c.config.Plugins) + if err != nil { + conn.Close() + return nil, err + } + + // Begin the stream syncing so that stdin, out, err work properly + err = c.client.SyncStreams( + c.config.SyncStdout, + c.config.SyncStderr) + if err != nil { + c.client.Close() + c.client = nil + return nil, err + } + + return c.client, nil +} + +// Tells whether or not the underlying process has exited. +func (c *Client) Exited() bool { + c.l.Lock() + defer c.l.Unlock() + return c.exited +} + +// End the executing subprocess (if it is running) and perform any cleanup +// tasks necessary such as capturing any remaining logs and so on. +// +// This method blocks until the process successfully exits. +// +// This method can safely be called multiple times. +func (c *Client) Kill() { + // Grab a lock to read some private fields. + c.l.Lock() + process := c.process + addr := c.address + doneCh := c.doneLogging + c.l.Unlock() + + // If there is no process, we never started anything. Nothing to kill. + if process == nil { + return + } + + // We need to check for address here. It is possible that the plugin + // started (process != nil) but has no address (addr == nil) if the + // plugin failed at startup. If we do have an address, we need to close + // the plugin net connections. + graceful := false + if addr != nil { + // Close the client to cleanly exit the process. + client, err := c.Client() + if err == nil { + err = client.Close() + + // If there is no error, then we attempt to wait for a graceful + // exit. If there was an error, we assume that graceful cleanup + // won't happen and just force kill. + graceful = err == nil + if err != nil { + // If there was an error just log it. We're going to force + // kill in a moment anyways. + log.Printf( + "[WARN] plugin: error closing client during Kill: %s", err) + } + } + } + + // If we're attempting a graceful exit, then we wait for a short period + // of time to allow that to happen. To wait for this we just wait on the + // doneCh which would be closed if the process exits. + if graceful { + select { + case <-doneCh: + return + case <-time.After(250 * time.Millisecond): + } + } + + // If graceful exiting failed, just kill it + process.Kill() + + // Wait for the client to finish logging so we have a complete log + <-doneCh +} + +// Starts the underlying subprocess, communicating with it to negotiate +// a port for RPC connections, and returning the address to connect via RPC. +// +// This method is safe to call multiple times. Subsequent calls have no effect. +// Once a client has been started once, it cannot be started again, even if +// it was killed. +func (c *Client) Start() (addr net.Addr, err error) { + c.l.Lock() + defer c.l.Unlock() + + if c.address != nil { + return c.address, nil + } + + // If one of cmd or reattach isn't set, then it is an error. We wrap + // this in a {} for scoping reasons, and hopeful that the escape + // analysis will pop the stock here. + { + cmdSet := c.config.Cmd != nil + attachSet := c.config.Reattach != nil + if cmdSet == attachSet { + return nil, fmt.Errorf("Only one of Cmd or Reattach must be set") + } + } + + // Create the logging channel for when we kill + c.doneLogging = make(chan struct{}) + + if c.config.Reattach != nil { + // Verify the process still exists. If not, then it is an error + p, err := os.FindProcess(c.config.Reattach.Pid) + if err != nil { + return nil, err + } + + // Attempt to connect to the addr since on Unix systems FindProcess + // doesn't actually return an error if it can't find the process. + conn, err := net.Dial( + c.config.Reattach.Addr.Network(), + c.config.Reattach.Addr.String()) + if err != nil { + p.Kill() + return nil, ErrProcessNotFound + } + conn.Close() + + // Goroutine to mark exit status + go func(pid int) { + // Wait for the process to die + pidWait(pid) + + // Log so we can see it + log.Printf("[DEBUG] plugin: reattached plugin process exited\n") + + // Mark it + c.l.Lock() + defer c.l.Unlock() + c.exited = true + + // Close the logging channel since that doesn't work on reattach + close(c.doneLogging) + }(p.Pid) + + // Set the address and process + c.address = c.config.Reattach.Addr + c.process = p + + return c.address, nil + } + + env := []string{ + fmt.Sprintf("%s=%s", c.config.MagicCookieKey, c.config.MagicCookieValue), + fmt.Sprintf("PLUGIN_MIN_PORT=%d", c.config.MinPort), + fmt.Sprintf("PLUGIN_MAX_PORT=%d", c.config.MaxPort), + } + + stdout_r, stdout_w := io.Pipe() + stderr_r, stderr_w := io.Pipe() + + cmd := c.config.Cmd + cmd.Env = append(cmd.Env, os.Environ()...) + cmd.Env = append(cmd.Env, env...) + cmd.Stdin = os.Stdin + cmd.Stderr = stderr_w + cmd.Stdout = stdout_w + + log.Printf("[DEBUG] plugin: starting plugin: %s %#v", cmd.Path, cmd.Args) + err = cmd.Start() + if err != nil { + return + } + + // Set the process + c.process = cmd.Process + + // Make sure the command is properly cleaned up if there is an error + defer func() { + r := recover() + + if err != nil || r != nil { + cmd.Process.Kill() + } + + if r != nil { + panic(r) + } + }() + + // Start goroutine to wait for process to exit + exitCh := make(chan struct{}) + go func() { + // Make sure we close the write end of our stderr/stdout so + // that the readers send EOF properly. + defer stderr_w.Close() + defer stdout_w.Close() + + // Wait for the command to end. + cmd.Wait() + + // Log and make sure to flush the logs write away + log.Printf("[DEBUG] plugin: %s: plugin process exited\n", cmd.Path) + os.Stderr.Sync() + + // Mark that we exited + close(exitCh) + + // Set that we exited, which takes a lock + c.l.Lock() + defer c.l.Unlock() + c.exited = true + }() + + // Start goroutine that logs the stderr + go c.logStderr(stderr_r) + + // Start a goroutine that is going to be reading the lines + // out of stdout + linesCh := make(chan []byte) + go func() { + defer close(linesCh) + + buf := bufio.NewReader(stdout_r) + for { + line, err := buf.ReadBytes('\n') + if line != nil { + linesCh <- line + } + + if err == io.EOF { + return + } + } + }() + + // Make sure after we exit we read the lines from stdout forever + // so they don't block since it is an io.Pipe + defer func() { + go func() { + for _ = range linesCh { + } + }() + }() + + // Some channels for the next step + timeout := time.After(c.config.StartTimeout) + + // Start looking for the address + log.Printf("[DEBUG] plugin: waiting for RPC address for: %s", cmd.Path) + select { + case <-timeout: + err = errors.New("timeout while waiting for plugin to start") + case <-exitCh: + err = errors.New("plugin exited before we could connect") + case lineBytes := <-linesCh: + // Trim the line and split by "|" in order to get the parts of + // the output. + line := strings.TrimSpace(string(lineBytes)) + parts := strings.SplitN(line, "|", 4) + if len(parts) < 4 { + err = fmt.Errorf( + "Unrecognized remote plugin message: %s\n\n"+ + "This usually means that the plugin is either invalid or simply\n"+ + "needs to be recompiled to support the latest protocol.", line) + return + } + + // Check the core protocol. Wrapped in a {} for scoping. + { + var coreProtocol int64 + coreProtocol, err = strconv.ParseInt(parts[0], 10, 0) + if err != nil { + err = fmt.Errorf("Error parsing core protocol version: %s", err) + return + } + + if int(coreProtocol) != CoreProtocolVersion { + err = fmt.Errorf("Incompatible core API version with plugin. "+ + "Plugin version: %s, Ours: %d\n\n"+ + "To fix this, the plugin usually only needs to be recompiled.\n"+ + "Please report this to the plugin author.", parts[0], CoreProtocolVersion) + return + } + } + + // Parse the protocol version + var protocol int64 + protocol, err = strconv.ParseInt(parts[1], 10, 0) + if err != nil { + err = fmt.Errorf("Error parsing protocol version: %s", err) + return + } + + // Test the API version + if uint(protocol) != c.config.ProtocolVersion { + err = fmt.Errorf("Incompatible API version with plugin. "+ + "Plugin version: %s, Ours: %d", parts[1], c.config.ProtocolVersion) + return + } + + switch parts[2] { + case "tcp": + addr, err = net.ResolveTCPAddr("tcp", parts[3]) + case "unix": + addr, err = net.ResolveUnixAddr("unix", parts[3]) + default: + err = fmt.Errorf("Unknown address type: %s", parts[3]) + } + } + + c.address = addr + return +} + +// ReattachConfig returns the information that must be provided to NewClient +// to reattach to the plugin process that this client started. This is +// useful for plugins that detach from their parent process. +// +// If this returns nil then the process hasn't been started yet. Please +// call Start or Client before calling this. +func (c *Client) ReattachConfig() *ReattachConfig { + c.l.Lock() + defer c.l.Unlock() + + if c.address == nil { + return nil + } + + if c.config.Cmd != nil && c.config.Cmd.Process == nil { + return nil + } + + // If we connected via reattach, just return the information as-is + if c.config.Reattach != nil { + return c.config.Reattach + } + + return &ReattachConfig{ + Addr: c.address, + Pid: c.config.Cmd.Process.Pid, + } +} + +func (c *Client) logStderr(r io.Reader) { + bufR := bufio.NewReader(r) + for { + line, err := bufR.ReadString('\n') + if line != "" { + c.config.Stderr.Write([]byte(line)) + + line = strings.TrimRightFunc(line, unicode.IsSpace) + log.Printf("[DEBUG] plugin: %s: %s", filepath.Base(c.config.Cmd.Path), line) + } + + if err == io.EOF { + break + } + } + + // Flag that we've completed logging for others + close(c.doneLogging) +} diff --git a/vendor/github.com/hashicorp/go-plugin/discover.go b/vendor/github.com/hashicorp/go-plugin/discover.go new file mode 100644 index 0000000000..d22c566ed5 --- /dev/null +++ b/vendor/github.com/hashicorp/go-plugin/discover.go @@ -0,0 +1,28 @@ +package plugin + +import ( + "path/filepath" +) + +// Discover discovers plugins that are in a given directory. +// +// The directory doesn't need to be absolute. For example, "." will work fine. +// +// This currently assumes any file matching the glob is a plugin. +// In the future this may be smarter about checking that a file is +// executable and so on. +// +// TODO: test +func Discover(glob, dir string) ([]string, error) { + var err error + + // Make the directory absolute if it isn't already + if !filepath.IsAbs(dir) { + dir, err = filepath.Abs(dir) + if err != nil { + return nil, err + } + } + + return filepath.Glob(filepath.Join(dir, glob)) +} diff --git a/vendor/github.com/hashicorp/go-plugin/error.go b/vendor/github.com/hashicorp/go-plugin/error.go new file mode 100644 index 0000000000..22a7baa6a0 --- /dev/null +++ b/vendor/github.com/hashicorp/go-plugin/error.go @@ -0,0 +1,24 @@ +package plugin + +// This is a type that wraps error types so that they can be messaged +// across RPC channels. Since "error" is an interface, we can't always +// gob-encode the underlying structure. This is a valid error interface +// implementer that we will push across. +type BasicError struct { + Message string +} + +// NewBasicError is used to create a BasicError. +// +// err is allowed to be nil. +func NewBasicError(err error) *BasicError { + if err == nil { + return nil + } + + return &BasicError{err.Error()} +} + +func (e *BasicError) Error() string { + return e.Message +} diff --git a/vendor/github.com/hashicorp/go-plugin/mux_broker.go b/vendor/github.com/hashicorp/go-plugin/mux_broker.go new file mode 100644 index 0000000000..01c45ad7c6 --- /dev/null +++ b/vendor/github.com/hashicorp/go-plugin/mux_broker.go @@ -0,0 +1,204 @@ +package plugin + +import ( + "encoding/binary" + "fmt" + "log" + "net" + "sync" + "sync/atomic" + "time" + + "github.com/hashicorp/yamux" +) + +// MuxBroker is responsible for brokering multiplexed connections by unique ID. +// +// It is used by plugins to multiplex multiple RPC connections and data +// streams on top of a single connection between the plugin process and the +// host process. +// +// This allows a plugin to request a channel with a specific ID to connect to +// or accept a connection from, and the broker handles the details of +// holding these channels open while they're being negotiated. +// +// The Plugin interface has access to these for both Server and Client. +// The broker can be used by either (optionally) to reserve and connect to +// new multiplexed streams. This is useful for complex args and return values, +// or anything else you might need a data stream for. +type MuxBroker struct { + nextId uint32 + session *yamux.Session + streams map[uint32]*muxBrokerPending + + sync.Mutex +} + +type muxBrokerPending struct { + ch chan net.Conn + doneCh chan struct{} +} + +func newMuxBroker(s *yamux.Session) *MuxBroker { + return &MuxBroker{ + session: s, + streams: make(map[uint32]*muxBrokerPending), + } +} + +// Accept accepts a connection by ID. +// +// This should not be called multiple times with the same ID at one time. +func (m *MuxBroker) Accept(id uint32) (net.Conn, error) { + var c net.Conn + p := m.getStream(id) + select { + case c = <-p.ch: + close(p.doneCh) + case <-time.After(5 * time.Second): + m.Lock() + defer m.Unlock() + delete(m.streams, id) + + return nil, fmt.Errorf("timeout waiting for accept") + } + + // Ack our connection + if err := binary.Write(c, binary.LittleEndian, id); err != nil { + c.Close() + return nil, err + } + + return c, nil +} + +// AcceptAndServe is used to accept a specific stream ID and immediately +// serve an RPC server on that stream ID. This is used to easily serve +// complex arguments. +// +// The served interface is always registered to the "Plugin" name. +func (m *MuxBroker) AcceptAndServe(id uint32, v interface{}) { + conn, err := m.Accept(id) + if err != nil { + log.Printf("[ERR] plugin: plugin acceptAndServe error: %s", err) + return + } + + serve(conn, "Plugin", v) +} + +// Close closes the connection and all sub-connections. +func (m *MuxBroker) Close() error { + return m.session.Close() +} + +// Dial opens a connection by ID. +func (m *MuxBroker) Dial(id uint32) (net.Conn, error) { + // Open the stream + stream, err := m.session.OpenStream() + if err != nil { + return nil, err + } + + // Write the stream ID onto the wire. + if err := binary.Write(stream, binary.LittleEndian, id); err != nil { + stream.Close() + return nil, err + } + + // Read the ack that we connected. Then we're off! + var ack uint32 + if err := binary.Read(stream, binary.LittleEndian, &ack); err != nil { + stream.Close() + return nil, err + } + if ack != id { + stream.Close() + return nil, fmt.Errorf("bad ack: %d (expected %d)", ack, id) + } + + return stream, nil +} + +// NextId returns a unique ID to use next. +// +// It is possible for very long-running plugin hosts to wrap this value, +// though it would require a very large amount of RPC calls. In practice +// we've never seen it happen. +func (m *MuxBroker) NextId() uint32 { + return atomic.AddUint32(&m.nextId, 1) +} + +// Run starts the brokering and should be executed in a goroutine, since it +// blocks forever, or until the session closes. +// +// Uses of MuxBroker never need to call this. It is called internally by +// the plugin host/client. +func (m *MuxBroker) Run() { + for { + stream, err := m.session.AcceptStream() + if err != nil { + // Once we receive an error, just exit + break + } + + // Read the stream ID from the stream + var id uint32 + if err := binary.Read(stream, binary.LittleEndian, &id); err != nil { + stream.Close() + continue + } + + // Initialize the waiter + p := m.getStream(id) + select { + case p.ch <- stream: + default: + } + + // Wait for a timeout + go m.timeoutWait(id, p) + } +} + +func (m *MuxBroker) getStream(id uint32) *muxBrokerPending { + m.Lock() + defer m.Unlock() + + p, ok := m.streams[id] + if ok { + return p + } + + m.streams[id] = &muxBrokerPending{ + ch: make(chan net.Conn, 1), + doneCh: make(chan struct{}), + } + return m.streams[id] +} + +func (m *MuxBroker) timeoutWait(id uint32, p *muxBrokerPending) { + // Wait for the stream to either be picked up and connected, or + // for a timeout. + timeout := false + select { + case <-p.doneCh: + case <-time.After(5 * time.Second): + timeout = true + } + + m.Lock() + defer m.Unlock() + + // Delete the stream so no one else can grab it + delete(m.streams, id) + + // If we timed out, then check if we have a channel in the buffer, + // and if so, close it. + if timeout { + select { + case s := <-p.ch: + s.Close() + } + } +} diff --git a/vendor/github.com/hashicorp/go-plugin/plugin.go b/vendor/github.com/hashicorp/go-plugin/plugin.go new file mode 100644 index 0000000000..37c8fd653f --- /dev/null +++ b/vendor/github.com/hashicorp/go-plugin/plugin.go @@ -0,0 +1,25 @@ +// The plugin package exposes functions and helpers for communicating to +// plugins which are implemented as standalone binary applications. +// +// plugin.Client fully manages the lifecycle of executing the application, +// connecting to it, and returning the RPC client for dispensing plugins. +// +// plugin.Serve fully manages listeners to expose an RPC server from a binary +// that plugin.Client can connect to. +package plugin + +import ( + "net/rpc" +) + +// Plugin is the interface that is implemented to serve/connect to an +// inteface implementation. +type Plugin interface { + // Server should return the RPC server compatible struct to serve + // the methods that the Client calls over net/rpc. + Server(*MuxBroker) (interface{}, error) + + // Client returns an interface implementation for the plugin you're + // serving that communicates to the server end of the plugin. + Client(*MuxBroker, *rpc.Client) (interface{}, error) +} diff --git a/vendor/github.com/hashicorp/go-plugin/process.go b/vendor/github.com/hashicorp/go-plugin/process.go new file mode 100644 index 0000000000..88c999a580 --- /dev/null +++ b/vendor/github.com/hashicorp/go-plugin/process.go @@ -0,0 +1,24 @@ +package plugin + +import ( + "time" +) + +// pidAlive checks whether a pid is alive. +func pidAlive(pid int) bool { + return _pidAlive(pid) +} + +// pidWait blocks for a process to exit. +func pidWait(pid int) error { + ticker := time.NewTicker(1 * time.Second) + defer ticker.Stop() + + for range ticker.C { + if !pidAlive(pid) { + break + } + } + + return nil +} diff --git a/vendor/github.com/hashicorp/go-plugin/process_posix.go b/vendor/github.com/hashicorp/go-plugin/process_posix.go new file mode 100644 index 0000000000..70ba546bf6 --- /dev/null +++ b/vendor/github.com/hashicorp/go-plugin/process_posix.go @@ -0,0 +1,19 @@ +// +build !windows + +package plugin + +import ( + "os" + "syscall" +) + +// _pidAlive tests whether a process is alive or not by sending it Signal 0, +// since Go otherwise has no way to test this. +func _pidAlive(pid int) bool { + proc, err := os.FindProcess(pid) + if err == nil { + err = proc.Signal(syscall.Signal(0)) + } + + return err == nil +} diff --git a/vendor/github.com/hashicorp/go-plugin/process_windows.go b/vendor/github.com/hashicorp/go-plugin/process_windows.go new file mode 100644 index 0000000000..9f7b018090 --- /dev/null +++ b/vendor/github.com/hashicorp/go-plugin/process_windows.go @@ -0,0 +1,29 @@ +package plugin + +import ( + "syscall" +) + +const ( + // Weird name but matches the MSDN docs + exit_STILL_ACTIVE = 259 + + processDesiredAccess = syscall.STANDARD_RIGHTS_READ | + syscall.PROCESS_QUERY_INFORMATION | + syscall.SYNCHRONIZE +) + +// _pidAlive tests whether a process is alive or not +func _pidAlive(pid int) bool { + h, err := syscall.OpenProcess(processDesiredAccess, false, uint32(pid)) + if err != nil { + return false + } + + var ec uint32 + if e := syscall.GetExitCodeProcess(h, &ec); e != nil { + return false + } + + return ec == exit_STILL_ACTIVE +} diff --git a/vendor/github.com/hashicorp/go-plugin/rpc_client.go b/vendor/github.com/hashicorp/go-plugin/rpc_client.go new file mode 100644 index 0000000000..29f9bf063e --- /dev/null +++ b/vendor/github.com/hashicorp/go-plugin/rpc_client.go @@ -0,0 +1,123 @@ +package plugin + +import ( + "fmt" + "io" + "net" + "net/rpc" + + "github.com/hashicorp/yamux" +) + +// RPCClient connects to an RPCServer over net/rpc to dispense plugin types. +type RPCClient struct { + broker *MuxBroker + control *rpc.Client + plugins map[string]Plugin + + // These are the streams used for the various stdout/err overrides + stdout, stderr net.Conn +} + +// NewRPCClient creates a client from an already-open connection-like value. +// Dial is typically used instead. +func NewRPCClient(conn io.ReadWriteCloser, plugins map[string]Plugin) (*RPCClient, error) { + // Create the yamux client so we can multiplex + mux, err := yamux.Client(conn, nil) + if err != nil { + conn.Close() + return nil, err + } + + // Connect to the control stream. + control, err := mux.Open() + if err != nil { + mux.Close() + return nil, err + } + + // Connect stdout, stderr streams + stdstream := make([]net.Conn, 2) + for i, _ := range stdstream { + stdstream[i], err = mux.Open() + if err != nil { + mux.Close() + return nil, err + } + } + + // Create the broker and start it up + broker := newMuxBroker(mux) + go broker.Run() + + // Build the client using our broker and control channel. + return &RPCClient{ + broker: broker, + control: rpc.NewClient(control), + plugins: plugins, + stdout: stdstream[0], + stderr: stdstream[1], + }, nil +} + +// SyncStreams should be called to enable syncing of stdout, +// stderr with the plugin. +// +// This will return immediately and the syncing will continue to happen +// in the background. You do not need to launch this in a goroutine itself. +// +// This should never be called multiple times. +func (c *RPCClient) SyncStreams(stdout io.Writer, stderr io.Writer) error { + go copyStream("stdout", stdout, c.stdout) + go copyStream("stderr", stderr, c.stderr) + return nil +} + +// Close closes the connection. The client is no longer usable after this +// is called. +func (c *RPCClient) Close() error { + // Call the control channel and ask it to gracefully exit. If this + // errors, then we save it so that we always return an error but we + // want to try to close the other channels anyways. + var empty struct{} + returnErr := c.control.Call("Control.Quit", true, &empty) + + // Close the other streams we have + if err := c.control.Close(); err != nil { + return err + } + if err := c.stdout.Close(); err != nil { + return err + } + if err := c.stderr.Close(); err != nil { + return err + } + if err := c.broker.Close(); err != nil { + return err + } + + // Return back the error we got from Control.Quit. This is very important + // since we MUST return non-nil error if this fails so that Client.Kill + // will properly try a process.Kill. + return returnErr +} + +func (c *RPCClient) Dispense(name string) (interface{}, error) { + p, ok := c.plugins[name] + if !ok { + return nil, fmt.Errorf("unknown plugin type: %s", name) + } + + var id uint32 + if err := c.control.Call( + "Dispenser.Dispense", name, &id); err != nil { + return nil, err + } + + conn, err := c.broker.Dial(id) + if err != nil { + return nil, err + } + + return p.Client(c.broker, rpc.NewClient(conn)) +} diff --git a/vendor/github.com/hashicorp/go-plugin/rpc_server.go b/vendor/github.com/hashicorp/go-plugin/rpc_server.go new file mode 100644 index 0000000000..3984dc891b --- /dev/null +++ b/vendor/github.com/hashicorp/go-plugin/rpc_server.go @@ -0,0 +1,185 @@ +package plugin + +import ( + "errors" + "fmt" + "io" + "log" + "net" + "net/rpc" + "sync" + + "github.com/hashicorp/yamux" +) + +// RPCServer listens for network connections and then dispenses interface +// implementations over net/rpc. +// +// After setting the fields below, they shouldn't be read again directly +// from the structure which may be reading/writing them concurrently. +type RPCServer struct { + Plugins map[string]Plugin + + // Stdout, Stderr are what this server will use instead of the + // normal stdin/out/err. This is because due to the multi-process nature + // of our plugin system, we can't use the normal process values so we + // make our own custom one we pipe across. + Stdout io.Reader + Stderr io.Reader + + // DoneCh should be set to a non-nil channel that will be closed + // when the control requests the RPC server to end. + DoneCh chan<- struct{} + + lock sync.Mutex +} + +// Accept accepts connections on a listener and serves requests for +// each incoming connection. Accept blocks; the caller typically invokes +// it in a go statement. +func (s *RPCServer) Accept(lis net.Listener) { + for { + conn, err := lis.Accept() + if err != nil { + log.Printf("[ERR] plugin: plugin server: %s", err) + return + } + + go s.ServeConn(conn) + } +} + +// ServeConn runs a single connection. +// +// ServeConn blocks, serving the connection until the client hangs up. +func (s *RPCServer) ServeConn(conn io.ReadWriteCloser) { + // First create the yamux server to wrap this connection + mux, err := yamux.Server(conn, nil) + if err != nil { + conn.Close() + log.Printf("[ERR] plugin: error creating yamux server: %s", err) + return + } + + // Accept the control connection + control, err := mux.Accept() + if err != nil { + mux.Close() + if err != io.EOF { + log.Printf("[ERR] plugin: error accepting control connection: %s", err) + } + + return + } + + // Connect the stdstreams (in, out, err) + stdstream := make([]net.Conn, 2) + for i, _ := range stdstream { + stdstream[i], err = mux.Accept() + if err != nil { + mux.Close() + log.Printf("[ERR] plugin: accepting stream %d: %s", i, err) + return + } + } + + // Copy std streams out to the proper place + go copyStream("stdout", stdstream[0], s.Stdout) + go copyStream("stderr", stdstream[1], s.Stderr) + + // Create the broker and start it up + broker := newMuxBroker(mux) + go broker.Run() + + // Use the control connection to build the dispenser and serve the + // connection. + server := rpc.NewServer() + server.RegisterName("Control", &controlServer{ + server: s, + }) + server.RegisterName("Dispenser", &dispenseServer{ + broker: broker, + plugins: s.Plugins, + }) + server.ServeConn(control) +} + +// done is called internally by the control server to trigger the +// doneCh to close which is listened to by the main process to cleanly +// exit. +func (s *RPCServer) done() { + s.lock.Lock() + defer s.lock.Unlock() + + if s.DoneCh != nil { + close(s.DoneCh) + s.DoneCh = nil + } +} + +// dispenseServer dispenses variousinterface implementations for Terraform. +type controlServer struct { + server *RPCServer +} + +func (c *controlServer) Quit( + null bool, response *struct{}) error { + // End the server + c.server.done() + + // Always return true + *response = struct{}{} + + return nil +} + +// dispenseServer dispenses variousinterface implementations for Terraform. +type dispenseServer struct { + broker *MuxBroker + plugins map[string]Plugin +} + +func (d *dispenseServer) Dispense( + name string, response *uint32) error { + // Find the function to create this implementation + p, ok := d.plugins[name] + if !ok { + return fmt.Errorf("unknown plugin type: %s", name) + } + + // Create the implementation first so we know if there is an error. + impl, err := p.Server(d.broker) + if err != nil { + // We turn the error into an errors error so that it works across RPC + return errors.New(err.Error()) + } + + // Reserve an ID for our implementation + id := d.broker.NextId() + *response = id + + // Run the rest in a goroutine since it can only happen once this RPC + // call returns. We wait for a connection for the plugin implementation + // and serve it. + go func() { + conn, err := d.broker.Accept(id) + if err != nil { + log.Printf("[ERR] go-plugin: plugin dispense error: %s: %s", name, err) + return + } + + serve(conn, "Plugin", impl) + }() + + return nil +} + +func serve(conn io.ReadWriteCloser, name string, v interface{}) { + server := rpc.NewServer() + if err := server.RegisterName(name, v); err != nil { + log.Printf("[ERR] go-plugin: plugin dispense error: %s", err) + return + } + + server.ServeConn(conn) +} diff --git a/vendor/github.com/hashicorp/go-plugin/server.go b/vendor/github.com/hashicorp/go-plugin/server.go new file mode 100644 index 0000000000..b5c5270a7d --- /dev/null +++ b/vendor/github.com/hashicorp/go-plugin/server.go @@ -0,0 +1,222 @@ +package plugin + +import ( + "errors" + "fmt" + "io/ioutil" + "log" + "net" + "os" + "os/signal" + "runtime" + "strconv" + "sync/atomic" +) + +// CoreProtocolVersion is the ProtocolVersion of the plugin system itself. +// We will increment this whenever we change any protocol behavior. This +// will invalidate any prior plugins but will at least allow us to iterate +// on the core in a safe way. We will do our best to do this very +// infrequently. +const CoreProtocolVersion = 1 + +// HandshakeConfig is the configuration used by client and servers to +// handshake before starting a plugin connection. This is embedded by +// both ServeConfig and ClientConfig. +// +// In practice, the plugin host creates a HandshakeConfig that is exported +// and plugins then can easily consume it. +type HandshakeConfig struct { + // ProtocolVersion is the version that clients must match on to + // agree they can communicate. This should match the ProtocolVersion + // set on ClientConfig when using a plugin. + ProtocolVersion uint + + // MagicCookieKey and value are used as a very basic verification + // that a plugin is intended to be launched. This is not a security + // measure, just a UX feature. If the magic cookie doesn't match, + // we show human-friendly output. + MagicCookieKey string + MagicCookieValue string +} + +// ServeConfig configures what sorts of plugins are served. +type ServeConfig struct { + // HandshakeConfig is the configuration that must match clients. + HandshakeConfig + + // Plugins are the plugins that are served. + Plugins map[string]Plugin +} + +// Serve serves the plugins given by ServeConfig. +// +// Serve doesn't return until the plugin is done being executed. Any +// errors will be outputted to the log. +// +// This is the method that plugins should call in their main() functions. +func Serve(opts *ServeConfig) { + // Validate the handshake config + if opts.MagicCookieKey == "" || opts.MagicCookieValue == "" { + fmt.Fprintf(os.Stderr, + "Misconfigured ServeConfig given to serve this plugin: no magic cookie\n"+ + "key or value was set. Please notify the plugin author and report\n"+ + "this as a bug.\n") + os.Exit(1) + } + + // First check the cookie + if os.Getenv(opts.MagicCookieKey) != opts.MagicCookieValue { + fmt.Fprintf(os.Stderr, + "This binary is a plugin. These are not meant to be executed directly.\n"+ + "Please execute the program that consumes these plugins, which will\n"+ + "load any plugins automatically\n") + os.Exit(1) + } + + // Logging goes to the original stderr + log.SetOutput(os.Stderr) + + // Create our new stdout, stderr files. These will override our built-in + // stdout/stderr so that it works across the stream boundary. + stdout_r, stdout_w, err := os.Pipe() + if err != nil { + fmt.Fprintf(os.Stderr, "Error preparing plugin: %s\n", err) + os.Exit(1) + } + stderr_r, stderr_w, err := os.Pipe() + if err != nil { + fmt.Fprintf(os.Stderr, "Error preparing plugin: %s\n", err) + os.Exit(1) + } + + // Register a listener so we can accept a connection + listener, err := serverListener() + if err != nil { + log.Printf("[ERR] plugin: plugin init: %s", err) + return + } + defer listener.Close() + + // Create the channel to tell us when we're done + doneCh := make(chan struct{}) + + // Create the RPC server to dispense + server := &RPCServer{ + Plugins: opts.Plugins, + Stdout: stdout_r, + Stderr: stderr_r, + DoneCh: doneCh, + } + + // Output the address and service name to stdout so that core can bring it up. + log.Printf("[DEBUG] plugin: plugin address: %s %s\n", + listener.Addr().Network(), listener.Addr().String()) + fmt.Printf("%d|%d|%s|%s\n", + CoreProtocolVersion, + opts.ProtocolVersion, + listener.Addr().Network(), + listener.Addr().String()) + os.Stdout.Sync() + + // Eat the interrupts + ch := make(chan os.Signal, 1) + signal.Notify(ch, os.Interrupt) + go func() { + var count int32 = 0 + for { + <-ch + newCount := atomic.AddInt32(&count, 1) + log.Printf( + "[DEBUG] plugin: received interrupt signal (count: %d). Ignoring.", + newCount) + } + }() + + // Set our new out, err + os.Stdout = stdout_w + os.Stderr = stderr_w + + // Serve + go server.Accept(listener) + + // Wait for the graceful exit + <-doneCh +} + +func serverListener() (net.Listener, error) { + if runtime.GOOS == "windows" { + return serverListener_tcp() + } + + return serverListener_unix() +} + +func serverListener_tcp() (net.Listener, error) { + minPort, err := strconv.ParseInt(os.Getenv("PLUGIN_MIN_PORT"), 10, 32) + if err != nil { + return nil, err + } + + maxPort, err := strconv.ParseInt(os.Getenv("PLUGIN_MAX_PORT"), 10, 32) + if err != nil { + return nil, err + } + + for port := minPort; port <= maxPort; port++ { + address := fmt.Sprintf("127.0.0.1:%d", port) + listener, err := net.Listen("tcp", address) + if err == nil { + return listener, nil + } + } + + return nil, errors.New("Couldn't bind plugin TCP listener") +} + +func serverListener_unix() (net.Listener, error) { + tf, err := ioutil.TempFile("", "plugin") + if err != nil { + return nil, err + } + path := tf.Name() + + // Close the file and remove it because it has to not exist for + // the domain socket. + if err := tf.Close(); err != nil { + return nil, err + } + if err := os.Remove(path); err != nil { + return nil, err + } + + l, err := net.Listen("unix", path) + if err != nil { + return nil, err + } + + // Wrap the listener in rmListener so that the Unix domain socket file + // is removed on close. + return &rmListener{ + Listener: l, + Path: path, + }, nil +} + +// rmListener is an implementation of net.Listener that forwards most +// calls to the listener but also removes a file as part of the close. We +// use this to cleanup the unix domain socket on close. +type rmListener struct { + net.Listener + Path string +} + +func (l *rmListener) Close() error { + // Close the listener itself + if err := l.Listener.Close(); err != nil { + return err + } + + // Remove the file + return os.Remove(l.Path) +} diff --git a/vendor/github.com/hashicorp/go-plugin/server_mux.go b/vendor/github.com/hashicorp/go-plugin/server_mux.go new file mode 100644 index 0000000000..033079ea0f --- /dev/null +++ b/vendor/github.com/hashicorp/go-plugin/server_mux.go @@ -0,0 +1,31 @@ +package plugin + +import ( + "fmt" + "os" +) + +// ServeMuxMap is the type that is used to configure ServeMux +type ServeMuxMap map[string]*ServeConfig + +// ServeMux is like Serve, but serves multiple types of plugins determined +// by the argument given on the command-line. +// +// This command doesn't return until the plugin is done being executed. Any +// errors are logged or output to stderr. +func ServeMux(m ServeMuxMap) { + if len(os.Args) != 2 { + fmt.Fprintf(os.Stderr, + "Invoked improperly. This is an internal command that shouldn't\n"+ + "be manually invoked.\n") + os.Exit(1) + } + + opts, ok := m[os.Args[1]] + if !ok { + fmt.Fprintf(os.Stderr, "Unknown plugin: %s\n", os.Args[1]) + os.Exit(1) + } + + Serve(opts) +} diff --git a/vendor/github.com/hashicorp/go-plugin/stream.go b/vendor/github.com/hashicorp/go-plugin/stream.go new file mode 100644 index 0000000000..1d547aaaab --- /dev/null +++ b/vendor/github.com/hashicorp/go-plugin/stream.go @@ -0,0 +1,18 @@ +package plugin + +import ( + "io" + "log" +) + +func copyStream(name string, dst io.Writer, src io.Reader) { + if src == nil { + panic(name + ": src is nil") + } + if dst == nil { + panic(name + ": dst is nil") + } + if _, err := io.Copy(dst, src); err != nil && err != io.EOF { + log.Printf("[ERR] plugin: stream copy '%s' error: %s", name, err) + } +} diff --git a/vendor/github.com/hashicorp/go-plugin/testing.go b/vendor/github.com/hashicorp/go-plugin/testing.go new file mode 100644 index 0000000000..9086a1b45f --- /dev/null +++ b/vendor/github.com/hashicorp/go-plugin/testing.go @@ -0,0 +1,76 @@ +package plugin + +import ( + "bytes" + "net" + "net/rpc" + "testing" +) + +// The testing file contains test helpers that you can use outside of +// this package for making it easier to test plugins themselves. + +// TestConn is a helper function for returning a client and server +// net.Conn connected to each other. +func TestConn(t *testing.T) (net.Conn, net.Conn) { + // Listen to any local port. This listener will be closed + // after a single connection is established. + l, err := net.Listen("tcp", "127.0.0.1:0") + if err != nil { + t.Fatalf("err: %s", err) + } + + // Start a goroutine to accept our client connection + var serverConn net.Conn + doneCh := make(chan struct{}) + go func() { + defer close(doneCh) + defer l.Close() + var err error + serverConn, err = l.Accept() + if err != nil { + t.Fatalf("err: %s", err) + } + }() + + // Connect to the server + clientConn, err := net.Dial("tcp", l.Addr().String()) + if err != nil { + t.Fatalf("err: %s", err) + } + + // Wait for the server side to acknowledge it has connected + <-doneCh + + return clientConn, serverConn +} + +// TestRPCConn returns a rpc client and server connected to each other. +func TestRPCConn(t *testing.T) (*rpc.Client, *rpc.Server) { + clientConn, serverConn := TestConn(t) + + server := rpc.NewServer() + go server.ServeConn(serverConn) + + client := rpc.NewClient(clientConn) + return client, server +} + +// TestPluginRPCConn returns a plugin RPC client and server that are connected +// together and configured. +func TestPluginRPCConn(t *testing.T, ps map[string]Plugin) (*RPCClient, *RPCServer) { + // Create two net.Conns we can use to shuttle our control connection + clientConn, serverConn := TestConn(t) + + // Start up the server + server := &RPCServer{Plugins: ps, Stdout: new(bytes.Buffer), Stderr: new(bytes.Buffer)} + go server.ServeConn(serverConn) + + // Connect the client to the server + client, err := NewRPCClient(clientConn, ps) + if err != nil { + t.Fatalf("err: %s", err) + } + + return client, server +} diff --git a/vendor/github.com/hashicorp/go-uuid/LICENSE b/vendor/github.com/hashicorp/go-uuid/LICENSE new file mode 100644 index 0000000000..e87a115e46 --- /dev/null +++ b/vendor/github.com/hashicorp/go-uuid/LICENSE @@ -0,0 +1,363 @@ +Mozilla Public License, version 2.0 + +1. Definitions + +1.1. "Contributor" + + means each individual or legal entity that creates, contributes to the + creation of, or owns Covered Software. + +1.2. "Contributor Version" + + means the combination of the Contributions of others (if any) used by a + Contributor and that particular Contributor's Contribution. + +1.3. "Contribution" + + means Covered Software of a particular Contributor. + +1.4. "Covered Software" + + means Source Code Form to which the initial Contributor has attached the + notice in Exhibit A, the Executable Form of such Source Code Form, and + Modifications of such Source Code Form, in each case including portions + thereof. + +1.5. "Incompatible With Secondary Licenses" + means + + a. that the initial Contributor has attached the notice described in + Exhibit B to the Covered Software; or + + b. that the Covered Software was made available under the terms of + version 1.1 or earlier of the License, but not also under the terms of + a Secondary License. + +1.6. "Executable Form" + + means any form of the work other than Source Code Form. + +1.7. "Larger Work" + + means a work that combines Covered Software with other material, in a + separate file or files, that is not Covered Software. + +1.8. "License" + + means this document. + +1.9. "Licensable" + + means having the right to grant, to the maximum extent possible, whether + at the time of the initial grant or subsequently, any and all of the + rights conveyed by this License. + +1.10. "Modifications" + + means any of the following: + + a. any file in Source Code Form that results from an addition to, + deletion from, or modification of the contents of Covered Software; or + + b. any new file in Source Code Form that contains any Covered Software. + +1.11. "Patent Claims" of a Contributor + + means any patent claim(s), including without limitation, method, + process, and apparatus claims, in any patent Licensable by such + Contributor that would be infringed, but for the grant of the License, + by the making, using, selling, offering for sale, having made, import, + or transfer of either its Contributions or its Contributor Version. + +1.12. "Secondary License" + + means either the GNU General Public License, Version 2.0, the GNU Lesser + General Public License, Version 2.1, the GNU Affero General Public + License, Version 3.0, or any later versions of those licenses. + +1.13. "Source Code Form" + + means the form of the work preferred for making modifications. + +1.14. "You" (or "Your") + + means an individual or a legal entity exercising rights under this + License. For legal entities, "You" includes any entity that controls, is + controlled by, or is under common control with You. For purposes of this + definition, "control" means (a) the power, direct or indirect, to cause + the direction or management of such entity, whether by contract or + otherwise, or (b) ownership of more than fifty percent (50%) of the + outstanding shares or beneficial ownership of such entity. + + +2. License Grants and Conditions + +2.1. Grants + + Each Contributor hereby grants You a world-wide, royalty-free, + non-exclusive license: + + a. under intellectual property rights (other than patent or trademark) + Licensable by such Contributor to use, reproduce, make available, + modify, display, perform, distribute, and otherwise exploit its + Contributions, either on an unmodified basis, with Modifications, or + as part of a Larger Work; and + + b. under Patent Claims of such Contributor to make, use, sell, offer for + sale, have made, import, and otherwise transfer either its + Contributions or its Contributor Version. + +2.2. Effective Date + + The licenses granted in Section 2.1 with respect to any Contribution + become effective for each Contribution on the date the Contributor first + distributes such Contribution. + +2.3. Limitations on Grant Scope + + The licenses granted in this Section 2 are the only rights granted under + this License. No additional rights or licenses will be implied from the + distribution or licensing of Covered Software under this License. + Notwithstanding Section 2.1(b) above, no patent license is granted by a + Contributor: + + a. for any code that a Contributor has removed from Covered Software; or + + b. for infringements caused by: (i) Your and any other third party's + modifications of Covered Software, or (ii) the combination of its + Contributions with other software (except as part of its Contributor + Version); or + + c. under Patent Claims infringed by Covered Software in the absence of + its Contributions. + + This License does not grant any rights in the trademarks, service marks, + or logos of any Contributor (except as may be necessary to comply with + the notice requirements in Section 3.4). + +2.4. Subsequent Licenses + + No Contributor makes additional grants as a result of Your choice to + distribute the Covered Software under a subsequent version of this + License (see Section 10.2) or under the terms of a Secondary License (if + permitted under the terms of Section 3.3). + +2.5. Representation + + Each Contributor represents that the Contributor believes its + Contributions are its original creation(s) or it has sufficient rights to + grant the rights to its Contributions conveyed by this License. + +2.6. Fair Use + + This License is not intended to limit any rights You have under + applicable copyright doctrines of fair use, fair dealing, or other + equivalents. + +2.7. Conditions + + Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in + Section 2.1. + + +3. Responsibilities + +3.1. Distribution of Source Form + + All distribution of Covered Software in Source Code Form, including any + Modifications that You create or to which You contribute, must be under + the terms of this License. You must inform recipients that the Source + Code Form of the Covered Software is governed by the terms of this + License, and how they can obtain a copy of this License. You may not + attempt to alter or restrict the recipients' rights in the Source Code + Form. + +3.2. Distribution of Executable Form + + If You distribute Covered Software in Executable Form then: + + a. such Covered Software must also be made available in Source Code Form, + as described in Section 3.1, and You must inform recipients of the + Executable Form how they can obtain a copy of such Source Code Form by + reasonable means in a timely manner, at a charge no more than the cost + of distribution to the recipient; and + + b. You may distribute such Executable Form under the terms of this + License, or sublicense it under different terms, provided that the + license for the Executable Form does not attempt to limit or alter the + recipients' rights in the Source Code Form under this License. + +3.3. Distribution of a Larger Work + + You may create and distribute a Larger Work under terms of Your choice, + provided that You also comply with the requirements of this License for + the Covered Software. If the Larger Work is a combination of Covered + Software with a work governed by one or more Secondary Licenses, and the + Covered Software is not Incompatible With Secondary Licenses, this + License permits You to additionally distribute such Covered Software + under the terms of such Secondary License(s), so that the recipient of + the Larger Work may, at their option, further distribute the Covered + Software under the terms of either this License or such Secondary + License(s). + +3.4. Notices + + You may not remove or alter the substance of any license notices + (including copyright notices, patent notices, disclaimers of warranty, or + limitations of liability) contained within the Source Code Form of the + Covered Software, except that You may alter any license notices to the + extent required to remedy known factual inaccuracies. + +3.5. Application of Additional Terms + + You may choose to offer, and to charge a fee for, warranty, support, + indemnity or liability obligations to one or more recipients of Covered + Software. However, You may do so only on Your own behalf, and not on + behalf of any Contributor. You must make it absolutely clear that any + such warranty, support, indemnity, or liability obligation is offered by + You alone, and You hereby agree to indemnify every Contributor for any + liability incurred by such Contributor as a result of warranty, support, + indemnity or liability terms You offer. You may include additional + disclaimers of warranty and limitations of liability specific to any + jurisdiction. + +4. Inability to Comply Due to Statute or Regulation + + If it is impossible for You to comply with any of the terms of this License + with respect to some or all of the Covered Software due to statute, + judicial order, or regulation then You must: (a) comply with the terms of + this License to the maximum extent possible; and (b) describe the + limitations and the code they affect. Such description must be placed in a + text file included with all distributions of the Covered Software under + this License. Except to the extent prohibited by statute or regulation, + such description must be sufficiently detailed for a recipient of ordinary + skill to be able to understand it. + +5. Termination + +5.1. The rights granted under this License will terminate automatically if You + fail to comply with any of its terms. However, if You become compliant, + then the rights granted under this License from a particular Contributor + are reinstated (a) provisionally, unless and until such Contributor + explicitly and finally terminates Your grants, and (b) on an ongoing + basis, if such Contributor fails to notify You of the non-compliance by + some reasonable means prior to 60 days after You have come back into + compliance. Moreover, Your grants from a particular Contributor are + reinstated on an ongoing basis if such Contributor notifies You of the + non-compliance by some reasonable means, this is the first time You have + received notice of non-compliance with this License from such + Contributor, and You become compliant prior to 30 days after Your receipt + of the notice. + +5.2. If You initiate litigation against any entity by asserting a patent + infringement claim (excluding declaratory judgment actions, + counter-claims, and cross-claims) alleging that a Contributor Version + directly or indirectly infringes any patent, then the rights granted to + You by any and all Contributors for the Covered Software under Section + 2.1 of this License shall terminate. + +5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user + license agreements (excluding distributors and resellers) which have been + validly granted by You or Your distributors under this License prior to + termination shall survive termination. + +6. Disclaimer of Warranty + + Covered Software is provided under this License on an "as is" basis, + without warranty of any kind, either expressed, implied, or statutory, + including, without limitation, warranties that the Covered Software is free + of defects, merchantable, fit for a particular purpose or non-infringing. + The entire risk as to the quality and performance of the Covered Software + is with You. Should any Covered Software prove defective in any respect, + You (not any Contributor) assume the cost of any necessary servicing, + repair, or correction. This disclaimer of warranty constitutes an essential + part of this License. No use of any Covered Software is authorized under + this License except under this disclaimer. + +7. Limitation of Liability + + Under no circumstances and under no legal theory, whether tort (including + negligence), contract, or otherwise, shall any Contributor, or anyone who + distributes Covered Software as permitted above, be liable to You for any + direct, indirect, special, incidental, or consequential damages of any + character including, without limitation, damages for lost profits, loss of + goodwill, work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses, even if such party shall have been + informed of the possibility of such damages. This limitation of liability + shall not apply to liability for death or personal injury resulting from + such party's negligence to the extent applicable law prohibits such + limitation. Some jurisdictions do not allow the exclusion or limitation of + incidental or consequential damages, so this exclusion and limitation may + not apply to You. + +8. Litigation + + Any litigation relating to this License may be brought only in the courts + of a jurisdiction where the defendant maintains its principal place of + business and such litigation shall be governed by laws of that + jurisdiction, without reference to its conflict-of-law provisions. Nothing + in this Section shall prevent a party's ability to bring cross-claims or + counter-claims. + +9. Miscellaneous + + This License represents the complete agreement concerning the subject + matter hereof. If any provision of this License is held to be + unenforceable, such provision shall be reformed only to the extent + necessary to make it enforceable. Any law or regulation which provides that + the language of a contract shall be construed against the drafter shall not + be used to construe this License against a Contributor. + + +10. Versions of the License + +10.1. New Versions + + Mozilla Foundation is the license steward. Except as provided in Section + 10.3, no one other than the license steward has the right to modify or + publish new versions of this License. Each version will be given a + distinguishing version number. + +10.2. Effect of New Versions + + You may distribute the Covered Software under the terms of the version + of the License under which You originally received the Covered Software, + or under the terms of any subsequent version published by the license + steward. + +10.3. Modified Versions + + If you create software not governed by this License, and you want to + create a new license for such software, you may create and use a + modified version of this License if you rename the license and remove + any references to the name of the license steward (except to note that + such modified license differs from this License). + +10.4. Distributing Source Code Form that is Incompatible With Secondary + Licenses If You choose to distribute Source Code Form that is + Incompatible With Secondary Licenses under the terms of this version of + the License, the notice described in Exhibit B of this License must be + attached. + +Exhibit A - Source Code Form License Notice + + This Source Code Form is subject to the + terms of the Mozilla Public License, v. + 2.0. If a copy of the MPL was not + distributed with this file, You can + obtain one at + http://mozilla.org/MPL/2.0/. + +If it is not possible or desirable to put the notice in a particular file, +then You may include the notice in a location (such as a LICENSE file in a +relevant directory) where a recipient would be likely to look for such a +notice. + +You may add additional accurate notices of copyright ownership. + +Exhibit B - "Incompatible With Secondary Licenses" Notice + + This Source Code Form is "Incompatible + With Secondary Licenses", as defined by + the Mozilla Public License, v. 2.0. + diff --git a/vendor/github.com/hashicorp/go-uuid/README.md b/vendor/github.com/hashicorp/go-uuid/README.md new file mode 100644 index 0000000000..21fdda4ada --- /dev/null +++ b/vendor/github.com/hashicorp/go-uuid/README.md @@ -0,0 +1,8 @@ +# uuid + +Generates UUID-format strings using purely high quality random bytes. + +Documentation +============= + +The full documentation is available on [Godoc](http://godoc.org/github.com/hashicorp/go-uuid). diff --git a/vendor/github.com/hashicorp/go-uuid/uuid.go b/vendor/github.com/hashicorp/go-uuid/uuid.go new file mode 100644 index 0000000000..322b522c23 --- /dev/null +++ b/vendor/github.com/hashicorp/go-uuid/uuid.go @@ -0,0 +1,57 @@ +package uuid + +import ( + "crypto/rand" + "encoding/hex" + "fmt" +) + +// GenerateUUID is used to generate a random UUID +func GenerateUUID() (string, error) { + buf := make([]byte, 16) + if _, err := rand.Read(buf); err != nil { + return "", fmt.Errorf("failed to read random bytes: %v", err) + } + + return FormatUUID(buf) +} + +func FormatUUID(buf []byte) (string, error) { + if len(buf) != 16 { + return "", fmt.Errorf("wrong length byte slice (%d)", len(buf)) + } + + return fmt.Sprintf("%08x-%04x-%04x-%04x-%12x", + buf[0:4], + buf[4:6], + buf[6:8], + buf[8:10], + buf[10:16]), nil +} + +func ParseUUID(uuid string) ([]byte, error) { + if len(uuid) != 36 { + return nil, fmt.Errorf("uuid string is wrong length") + } + + hyph := []byte("-") + + if uuid[8] != hyph[0] || + uuid[13] != hyph[0] || + uuid[18] != hyph[0] || + uuid[23] != hyph[0] { + return nil, fmt.Errorf("uuid is improperly formatted") + } + + hexStr := uuid[0:8] + uuid[9:13] + uuid[14:18] + uuid[19:23] + uuid[24:36] + + ret, err := hex.DecodeString(hexStr) + if err != nil { + return nil, err + } + if len(ret) != 16 { + return nil, fmt.Errorf("decoded hex is the wrong length") + } + + return ret, nil +} diff --git a/vendor/github.com/hashicorp/go-version/LICENSE b/vendor/github.com/hashicorp/go-version/LICENSE new file mode 100644 index 0000000000..c33dcc7c92 --- /dev/null +++ b/vendor/github.com/hashicorp/go-version/LICENSE @@ -0,0 +1,354 @@ +Mozilla Public License, version 2.0 + +1. Definitions + +1.1. “Contributor” + + means each individual or legal entity that creates, contributes to the + creation of, or owns Covered Software. + +1.2. “Contributor Version” + + means the combination of the Contributions of others (if any) used by a + Contributor and that particular Contributor’s Contribution. + +1.3. “Contribution” + + means Covered Software of a particular Contributor. + +1.4. “Covered Software” + + means Source Code Form to which the initial Contributor has attached the + notice in Exhibit A, the Executable Form of such Source Code Form, and + Modifications of such Source Code Form, in each case including portions + thereof. + +1.5. “Incompatible With Secondary Licenses” + means + + a. that the initial Contributor has attached the notice described in + Exhibit B to the Covered Software; or + + b. that the Covered Software was made available under the terms of version + 1.1 or earlier of the License, but not also under the terms of a + Secondary License. + +1.6. “Executable Form” + + means any form of the work other than Source Code Form. + +1.7. “Larger Work” + + means a work that combines Covered Software with other material, in a separate + file or files, that is not Covered Software. + +1.8. “License” + + means this document. + +1.9. “Licensable” + + means having the right to grant, to the maximum extent possible, whether at the + time of the initial grant or subsequently, any and all of the rights conveyed by + this License. + +1.10. “Modifications” + + means any of the following: + + a. any file in Source Code Form that results from an addition to, deletion + from, or modification of the contents of Covered Software; or + + b. any new file in Source Code Form that contains any Covered Software. + +1.11. “Patent Claims” of a Contributor + + means any patent claim(s), including without limitation, method, process, + and apparatus claims, in any patent Licensable by such Contributor that + would be infringed, but for the grant of the License, by the making, + using, selling, offering for sale, having made, import, or transfer of + either its Contributions or its Contributor Version. + +1.12. “Secondary License” + + means either the GNU General Public License, Version 2.0, the GNU Lesser + General Public License, Version 2.1, the GNU Affero General Public + License, Version 3.0, or any later versions of those licenses. + +1.13. “Source Code Form” + + means the form of the work preferred for making modifications. + +1.14. “You” (or “Your”) + + means an individual or a legal entity exercising rights under this + License. For legal entities, “You” includes any entity that controls, is + controlled by, or is under common control with You. For purposes of this + definition, “control” means (a) the power, direct or indirect, to cause + the direction or management of such entity, whether by contract or + otherwise, or (b) ownership of more than fifty percent (50%) of the + outstanding shares or beneficial ownership of such entity. + + +2. License Grants and Conditions + +2.1. Grants + + Each Contributor hereby grants You a world-wide, royalty-free, + non-exclusive license: + + a. under intellectual property rights (other than patent or trademark) + Licensable by such Contributor to use, reproduce, make available, + modify, display, perform, distribute, and otherwise exploit its + Contributions, either on an unmodified basis, with Modifications, or as + part of a Larger Work; and + + b. under Patent Claims of such Contributor to make, use, sell, offer for + sale, have made, import, and otherwise transfer either its Contributions + or its Contributor Version. + +2.2. Effective Date + + The licenses granted in Section 2.1 with respect to any Contribution become + effective for each Contribution on the date the Contributor first distributes + such Contribution. + +2.3. Limitations on Grant Scope + + The licenses granted in this Section 2 are the only rights granted under this + License. No additional rights or licenses will be implied from the distribution + or licensing of Covered Software under this License. Notwithstanding Section + 2.1(b) above, no patent license is granted by a Contributor: + + a. for any code that a Contributor has removed from Covered Software; or + + b. for infringements caused by: (i) Your and any other third party’s + modifications of Covered Software, or (ii) the combination of its + Contributions with other software (except as part of its Contributor + Version); or + + c. under Patent Claims infringed by Covered Software in the absence of its + Contributions. + + This License does not grant any rights in the trademarks, service marks, or + logos of any Contributor (except as may be necessary to comply with the + notice requirements in Section 3.4). + +2.4. Subsequent Licenses + + No Contributor makes additional grants as a result of Your choice to + distribute the Covered Software under a subsequent version of this License + (see Section 10.2) or under the terms of a Secondary License (if permitted + under the terms of Section 3.3). + +2.5. Representation + + Each Contributor represents that the Contributor believes its Contributions + are its original creation(s) or it has sufficient rights to grant the + rights to its Contributions conveyed by this License. + +2.6. Fair Use + + This License is not intended to limit any rights You have under applicable + copyright doctrines of fair use, fair dealing, or other equivalents. + +2.7. Conditions + + Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in + Section 2.1. + + +3. Responsibilities + +3.1. Distribution of Source Form + + All distribution of Covered Software in Source Code Form, including any + Modifications that You create or to which You contribute, must be under the + terms of this License. You must inform recipients that the Source Code Form + of the Covered Software is governed by the terms of this License, and how + they can obtain a copy of this License. You may not attempt to alter or + restrict the recipients’ rights in the Source Code Form. + +3.2. Distribution of Executable Form + + If You distribute Covered Software in Executable Form then: + + a. such Covered Software must also be made available in Source Code Form, + as described in Section 3.1, and You must inform recipients of the + Executable Form how they can obtain a copy of such Source Code Form by + reasonable means in a timely manner, at a charge no more than the cost + of distribution to the recipient; and + + b. You may distribute such Executable Form under the terms of this License, + or sublicense it under different terms, provided that the license for + the Executable Form does not attempt to limit or alter the recipients’ + rights in the Source Code Form under this License. + +3.3. Distribution of a Larger Work + + You may create and distribute a Larger Work under terms of Your choice, + provided that You also comply with the requirements of this License for the + Covered Software. If the Larger Work is a combination of Covered Software + with a work governed by one or more Secondary Licenses, and the Covered + Software is not Incompatible With Secondary Licenses, this License permits + You to additionally distribute such Covered Software under the terms of + such Secondary License(s), so that the recipient of the Larger Work may, at + their option, further distribute the Covered Software under the terms of + either this License or such Secondary License(s). + +3.4. Notices + + You may not remove or alter the substance of any license notices (including + copyright notices, patent notices, disclaimers of warranty, or limitations + of liability) contained within the Source Code Form of the Covered + Software, except that You may alter any license notices to the extent + required to remedy known factual inaccuracies. + +3.5. Application of Additional Terms + + You may choose to offer, and to charge a fee for, warranty, support, + indemnity or liability obligations to one or more recipients of Covered + Software. However, You may do so only on Your own behalf, and not on behalf + of any Contributor. You must make it absolutely clear that any such + warranty, support, indemnity, or liability obligation is offered by You + alone, and You hereby agree to indemnify every Contributor for any + liability incurred by such Contributor as a result of warranty, support, + indemnity or liability terms You offer. You may include additional + disclaimers of warranty and limitations of liability specific to any + jurisdiction. + +4. Inability to Comply Due to Statute or Regulation + + If it is impossible for You to comply with any of the terms of this License + with respect to some or all of the Covered Software due to statute, judicial + order, or regulation then You must: (a) comply with the terms of this License + to the maximum extent possible; and (b) describe the limitations and the code + they affect. Such description must be placed in a text file included with all + distributions of the Covered Software under this License. Except to the + extent prohibited by statute or regulation, such description must be + sufficiently detailed for a recipient of ordinary skill to be able to + understand it. + +5. Termination + +5.1. The rights granted under this License will terminate automatically if You + fail to comply with any of its terms. However, if You become compliant, + then the rights granted under this License from a particular Contributor + are reinstated (a) provisionally, unless and until such Contributor + explicitly and finally terminates Your grants, and (b) on an ongoing basis, + if such Contributor fails to notify You of the non-compliance by some + reasonable means prior to 60 days after You have come back into compliance. + Moreover, Your grants from a particular Contributor are reinstated on an + ongoing basis if such Contributor notifies You of the non-compliance by + some reasonable means, this is the first time You have received notice of + non-compliance with this License from such Contributor, and You become + compliant prior to 30 days after Your receipt of the notice. + +5.2. If You initiate litigation against any entity by asserting a patent + infringement claim (excluding declaratory judgment actions, counter-claims, + and cross-claims) alleging that a Contributor Version directly or + indirectly infringes any patent, then the rights granted to You by any and + all Contributors for the Covered Software under Section 2.1 of this License + shall terminate. + +5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user + license agreements (excluding distributors and resellers) which have been + validly granted by You or Your distributors under this License prior to + termination shall survive termination. + +6. Disclaimer of Warranty + + Covered Software is provided under this License on an “as is” basis, without + warranty of any kind, either expressed, implied, or statutory, including, + without limitation, warranties that the Covered Software is free of defects, + merchantable, fit for a particular purpose or non-infringing. The entire + risk as to the quality and performance of the Covered Software is with You. + Should any Covered Software prove defective in any respect, You (not any + Contributor) assume the cost of any necessary servicing, repair, or + correction. This disclaimer of warranty constitutes an essential part of this + License. No use of any Covered Software is authorized under this License + except under this disclaimer. + +7. Limitation of Liability + + Under no circumstances and under no legal theory, whether tort (including + negligence), contract, or otherwise, shall any Contributor, or anyone who + distributes Covered Software as permitted above, be liable to You for any + direct, indirect, special, incidental, or consequential damages of any + character including, without limitation, damages for lost profits, loss of + goodwill, work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses, even if such party shall have been + informed of the possibility of such damages. This limitation of liability + shall not apply to liability for death or personal injury resulting from such + party’s negligence to the extent applicable law prohibits such limitation. + Some jurisdictions do not allow the exclusion or limitation of incidental or + consequential damages, so this exclusion and limitation may not apply to You. + +8. Litigation + + Any litigation relating to this License may be brought only in the courts of + a jurisdiction where the defendant maintains its principal place of business + and such litigation shall be governed by laws of that jurisdiction, without + reference to its conflict-of-law provisions. Nothing in this Section shall + prevent a party’s ability to bring cross-claims or counter-claims. + +9. Miscellaneous + + This License represents the complete agreement concerning the subject matter + hereof. If any provision of this License is held to be unenforceable, such + provision shall be reformed only to the extent necessary to make it + enforceable. Any law or regulation which provides that the language of a + contract shall be construed against the drafter shall not be used to construe + this License against a Contributor. + + +10. Versions of the License + +10.1. New Versions + + Mozilla Foundation is the license steward. Except as provided in Section + 10.3, no one other than the license steward has the right to modify or + publish new versions of this License. Each version will be given a + distinguishing version number. + +10.2. Effect of New Versions + + You may distribute the Covered Software under the terms of the version of + the License under which You originally received the Covered Software, or + under the terms of any subsequent version published by the license + steward. + +10.3. Modified Versions + + If you create software not governed by this License, and you want to + create a new license for such software, you may create and use a modified + version of this License if you rename the license and remove any + references to the name of the license steward (except to note that such + modified license differs from this License). + +10.4. Distributing Source Code Form that is Incompatible With Secondary Licenses + If You choose to distribute Source Code Form that is Incompatible With + Secondary Licenses under the terms of this version of the License, the + notice described in Exhibit B of this License must be attached. + +Exhibit A - Source Code Form License Notice + + This Source Code Form is subject to the + terms of the Mozilla Public License, v. + 2.0. If a copy of the MPL was not + distributed with this file, You can + obtain one at + http://mozilla.org/MPL/2.0/. + +If it is not possible or desirable to put the notice in a particular file, then +You may include the notice in a location (such as a LICENSE file in a relevant +directory) where a recipient would be likely to look for such a notice. + +You may add additional accurate notices of copyright ownership. + +Exhibit B - “Incompatible With Secondary Licenses” Notice + + This Source Code Form is “Incompatible + With Secondary Licenses”, as defined by + the Mozilla Public License, v. 2.0. + diff --git a/vendor/github.com/hashicorp/go-version/README.md b/vendor/github.com/hashicorp/go-version/README.md new file mode 100644 index 0000000000..6f3a15ce77 --- /dev/null +++ b/vendor/github.com/hashicorp/go-version/README.md @@ -0,0 +1,65 @@ +# Versioning Library for Go +[![Build Status](https://travis-ci.org/hashicorp/go-version.svg?branch=master)](https://travis-ci.org/hashicorp/go-version) + +go-version is a library for parsing versions and version constraints, +and verifying versions against a set of constraints. go-version +can sort a collection of versions properly, handles prerelease/beta +versions, can increment versions, etc. + +Versions used with go-version must follow [SemVer](http://semver.org/). + +## Installation and Usage + +Package documentation can be found on +[GoDoc](http://godoc.org/github.com/hashicorp/go-version). + +Installation can be done with a normal `go get`: + +``` +$ go get github.com/hashicorp/go-version +``` + +#### Version Parsing and Comparison + +```go +v1, err := version.NewVersion("1.2") +v2, err := version.NewVersion("1.5+metadata") + +// Comparison example. There is also GreaterThan, Equal, and just +// a simple Compare that returns an int allowing easy >=, <=, etc. +if v1.LessThan(v2) { + fmt.Printf("%s is less than %s", v1, v2) +} +``` + +#### Version Constraints + +```go +v1, err := version.NewVersion("1.2") + +// Constraints example. +constraints, err := version.NewConstraint(">= 1.0, < 1.4") +if constraints.Check(v1) { + fmt.Printf("%s satisfies constraints %s", v1, constraints) +} +``` + +#### Version Sorting + +```go +versionsRaw := []string{"1.1", "0.7.1", "1.4-beta", "1.4", "2"} +versions := make([]*version.Version, len(versionsRaw)) +for i, raw := range versionsRaw { + v, _ := version.NewVersion(raw) + versions[i] = v +} + +// After this, the versions are properly sorted +sort.Sort(version.Collection(versions)) +``` + +## Issues and Contributing + +If you find an issue with this library, please report an issue. If you'd +like, we welcome any contributions. Fork this library and submit a pull +request. diff --git a/vendor/github.com/hashicorp/go-version/constraint.go b/vendor/github.com/hashicorp/go-version/constraint.go new file mode 100644 index 0000000000..8c73df0602 --- /dev/null +++ b/vendor/github.com/hashicorp/go-version/constraint.go @@ -0,0 +1,178 @@ +package version + +import ( + "fmt" + "regexp" + "strings" +) + +// Constraint represents a single constraint for a version, such as +// ">= 1.0". +type Constraint struct { + f constraintFunc + check *Version + original string +} + +// Constraints is a slice of constraints. We make a custom type so that +// we can add methods to it. +type Constraints []*Constraint + +type constraintFunc func(v, c *Version) bool + +var constraintOperators map[string]constraintFunc + +var constraintRegexp *regexp.Regexp + +func init() { + constraintOperators = map[string]constraintFunc{ + "": constraintEqual, + "=": constraintEqual, + "!=": constraintNotEqual, + ">": constraintGreaterThan, + "<": constraintLessThan, + ">=": constraintGreaterThanEqual, + "<=": constraintLessThanEqual, + "~>": constraintPessimistic, + } + + ops := make([]string, 0, len(constraintOperators)) + for k := range constraintOperators { + ops = append(ops, regexp.QuoteMeta(k)) + } + + constraintRegexp = regexp.MustCompile(fmt.Sprintf( + `^\s*(%s)\s*(%s)\s*$`, + strings.Join(ops, "|"), + VersionRegexpRaw)) +} + +// NewConstraint will parse one or more constraints from the given +// constraint string. The string must be a comma-separated list of +// constraints. +func NewConstraint(v string) (Constraints, error) { + vs := strings.Split(v, ",") + result := make([]*Constraint, len(vs)) + for i, single := range vs { + c, err := parseSingle(single) + if err != nil { + return nil, err + } + + result[i] = c + } + + return Constraints(result), nil +} + +// Check tests if a version satisfies all the constraints. +func (cs Constraints) Check(v *Version) bool { + for _, c := range cs { + if !c.Check(v) { + return false + } + } + + return true +} + +// Returns the string format of the constraints +func (cs Constraints) String() string { + csStr := make([]string, len(cs)) + for i, c := range cs { + csStr[i] = c.String() + } + + return strings.Join(csStr, ",") +} + +// Check tests if a constraint is validated by the given version. +func (c *Constraint) Check(v *Version) bool { + return c.f(v, c.check) +} + +func (c *Constraint) String() string { + return c.original +} + +func parseSingle(v string) (*Constraint, error) { + matches := constraintRegexp.FindStringSubmatch(v) + if matches == nil { + return nil, fmt.Errorf("Malformed constraint: %s", v) + } + + check, err := NewVersion(matches[2]) + if err != nil { + return nil, err + } + + return &Constraint{ + f: constraintOperators[matches[1]], + check: check, + original: v, + }, nil +} + +//------------------------------------------------------------------- +// Constraint functions +//------------------------------------------------------------------- + +func constraintEqual(v, c *Version) bool { + return v.Equal(c) +} + +func constraintNotEqual(v, c *Version) bool { + return !v.Equal(c) +} + +func constraintGreaterThan(v, c *Version) bool { + return v.Compare(c) == 1 +} + +func constraintLessThan(v, c *Version) bool { + return v.Compare(c) == -1 +} + +func constraintGreaterThanEqual(v, c *Version) bool { + return v.Compare(c) >= 0 +} + +func constraintLessThanEqual(v, c *Version) bool { + return v.Compare(c) <= 0 +} + +func constraintPessimistic(v, c *Version) bool { + // If the version being checked is naturally less than the constraint, then there + // is no way for the version to be valid against the constraint + if v.LessThan(c) { + return false + } + // We'll use this more than once, so grab the length now so it's a little cleaner + // to write the later checks + cs := len(c.segments) + + // If the version being checked has less specificity than the constraint, then there + // is no way for the version to be valid against the constraint + if cs > len(v.segments) { + return false + } + + // Check the segments in the constraint against those in the version. If the version + // being checked, at any point, does not have the same values in each index of the + // constraints segments, then it cannot be valid against the constraint. + for i := 0; i < c.si-1; i++ { + if v.segments[i] != c.segments[i] { + return false + } + } + + // Check the last part of the segment in the constraint. If the version segment at + // this index is less than the constraints segment at this index, then it cannot + // be valid against the constraint + if c.segments[cs-1] > v.segments[cs-1] { + return false + } + + // If nothing has rejected the version by now, it's valid + return true +} diff --git a/vendor/github.com/hashicorp/go-version/version.go b/vendor/github.com/hashicorp/go-version/version.go new file mode 100644 index 0000000000..dfe509caa0 --- /dev/null +++ b/vendor/github.com/hashicorp/go-version/version.go @@ -0,0 +1,322 @@ +package version + +import ( + "bytes" + "fmt" + "reflect" + "regexp" + "strconv" + "strings" +) + +// The compiled regular expression used to test the validity of a version. +var versionRegexp *regexp.Regexp + +// The raw regular expression string used for testing the validity +// of a version. +const VersionRegexpRaw string = `v?([0-9]+(\.[0-9]+)*?)` + + `(-?([0-9A-Za-z\-]+(\.[0-9A-Za-z\-]+)*))?` + + `(\+([0-9A-Za-z\-]+(\.[0-9A-Za-z\-]+)*))?` + + `?` + +// Version represents a single version. +type Version struct { + metadata string + pre string + segments []int64 + si int +} + +func init() { + versionRegexp = regexp.MustCompile("^" + VersionRegexpRaw + "$") +} + +// NewVersion parses the given version and returns a new +// Version. +func NewVersion(v string) (*Version, error) { + matches := versionRegexp.FindStringSubmatch(v) + if matches == nil { + return nil, fmt.Errorf("Malformed version: %s", v) + } + segmentsStr := strings.Split(matches[1], ".") + segments := make([]int64, len(segmentsStr)) + si := 0 + for i, str := range segmentsStr { + val, err := strconv.ParseInt(str, 10, 64) + if err != nil { + return nil, fmt.Errorf( + "Error parsing version: %s", err) + } + + segments[i] = int64(val) + si++ + } + + // Even though we could support more than three segments, if we + // got less than three, pad it with 0s. This is to cover the basic + // default usecase of semver, which is MAJOR.MINOR.PATCH at the minimum + for i := len(segments); i < 3; i++ { + segments = append(segments, 0) + } + + return &Version{ + metadata: matches[7], + pre: matches[4], + segments: segments, + si: si, + }, nil +} + +// Must is a helper that wraps a call to a function returning (*Version, error) +// and panics if error is non-nil. +func Must(v *Version, err error) *Version { + if err != nil { + panic(err) + } + + return v +} + +// Compare compares this version to another version. This +// returns -1, 0, or 1 if this version is smaller, equal, +// or larger than the other version, respectively. +// +// If you want boolean results, use the LessThan, Equal, +// or GreaterThan methods. +func (v *Version) Compare(other *Version) int { + // A quick, efficient equality check + if v.String() == other.String() { + return 0 + } + + segmentsSelf := v.Segments64() + segmentsOther := other.Segments64() + + // If the segments are the same, we must compare on prerelease info + if reflect.DeepEqual(segmentsSelf, segmentsOther) { + preSelf := v.Prerelease() + preOther := other.Prerelease() + if preSelf == "" && preOther == "" { + return 0 + } + if preSelf == "" { + return 1 + } + if preOther == "" { + return -1 + } + + return comparePrereleases(preSelf, preOther) + } + + // Get the highest specificity (hS), or if they're equal, just use segmentSelf length + lenSelf := len(segmentsSelf) + lenOther := len(segmentsOther) + hS := lenSelf + if lenSelf < lenOther { + hS = lenOther + } + // Compare the segments + // Because a constraint could have more/less specificity than the version it's + // checking, we need to account for a lopsided or jagged comparison + for i := 0; i < hS; i++ { + if i > lenSelf-1 { + // This means Self had the lower specificity + // Check to see if the remaining segments in Other are all zeros + if !allZero(segmentsOther[i:]) { + // if not, it means that Other has to be greater than Self + return -1 + } + break + } else if i > lenOther-1 { + // this means Other had the lower specificity + // Check to see if the remaining segments in Self are all zeros - + if !allZero(segmentsSelf[i:]) { + //if not, it means that Self has to be greater than Other + return 1 + } + break + } + lhs := segmentsSelf[i] + rhs := segmentsOther[i] + if lhs == rhs { + continue + } else if lhs < rhs { + return -1 + } + // Otherwis, rhs was > lhs, they're not equal + return 1 + } + + // if we got this far, they're equal + return 0 +} + +func allZero(segs []int64) bool { + for _, s := range segs { + if s != 0 { + return false + } + } + return true +} + +func comparePart(preSelf string, preOther string) int { + if preSelf == preOther { + return 0 + } + + selfNumeric := true + _, err := strconv.ParseInt(preSelf, 10, 64) + if err != nil { + selfNumeric = false + } + + otherNumeric := true + _, err = strconv.ParseInt(preOther, 10, 64) + if err != nil { + otherNumeric = false + } + + // if a part is empty, we use the other to decide + if preSelf == "" { + if otherNumeric { + return -1 + } + return 1 + } + + if preOther == "" { + if selfNumeric { + return 1 + } + return -1 + } + + if selfNumeric && !otherNumeric { + return -1 + } else if !selfNumeric && otherNumeric { + return 1 + } else if preSelf > preOther { + return 1 + } + + return -1 +} + +func comparePrereleases(v string, other string) int { + // the same pre release! + if v == other { + return 0 + } + + // split both pre releases for analyse their parts + selfPreReleaseMeta := strings.Split(v, ".") + otherPreReleaseMeta := strings.Split(other, ".") + + selfPreReleaseLen := len(selfPreReleaseMeta) + otherPreReleaseLen := len(otherPreReleaseMeta) + + biggestLen := otherPreReleaseLen + if selfPreReleaseLen > otherPreReleaseLen { + biggestLen = selfPreReleaseLen + } + + // loop for parts to find the first difference + for i := 0; i < biggestLen; i = i + 1 { + partSelfPre := "" + if i < selfPreReleaseLen { + partSelfPre = selfPreReleaseMeta[i] + } + + partOtherPre := "" + if i < otherPreReleaseLen { + partOtherPre = otherPreReleaseMeta[i] + } + + compare := comparePart(partSelfPre, partOtherPre) + // if parts are equals, continue the loop + if compare != 0 { + return compare + } + } + + return 0 +} + +// Equal tests if two versions are equal. +func (v *Version) Equal(o *Version) bool { + return v.Compare(o) == 0 +} + +// GreaterThan tests if this version is greater than another version. +func (v *Version) GreaterThan(o *Version) bool { + return v.Compare(o) > 0 +} + +// LessThan tests if this version is less than another version. +func (v *Version) LessThan(o *Version) bool { + return v.Compare(o) < 0 +} + +// Metadata returns any metadata that was part of the version +// string. +// +// Metadata is anything that comes after the "+" in the version. +// For example, with "1.2.3+beta", the metadata is "beta". +func (v *Version) Metadata() string { + return v.metadata +} + +// Prerelease returns any prerelease data that is part of the version, +// or blank if there is no prerelease data. +// +// Prerelease information is anything that comes after the "-" in the +// version (but before any metadata). For example, with "1.2.3-beta", +// the prerelease information is "beta". +func (v *Version) Prerelease() string { + return v.pre +} + +// Segments returns the numeric segments of the version as a slice of ints. +// +// This excludes any metadata or pre-release information. For example, +// for a version "1.2.3-beta", segments will return a slice of +// 1, 2, 3. +func (v *Version) Segments() []int { + segmentSlice := make([]int, len(v.segments)) + for i, v := range v.segments { + segmentSlice[i] = int(v) + } + return segmentSlice +} + +// Segments64 returns the numeric segments of the version as a slice of int64s. +// +// This excludes any metadata or pre-release information. For example, +// for a version "1.2.3-beta", segments will return a slice of +// 1, 2, 3. +func (v *Version) Segments64() []int64 { + return v.segments +} + +// String returns the full version string included pre-release +// and metadata information. +func (v *Version) String() string { + var buf bytes.Buffer + fmtParts := make([]string, len(v.segments)) + for i, s := range v.segments { + // We can ignore err here since we've pre-parsed the values in segments + str := strconv.FormatInt(s, 10) + fmtParts[i] = str + } + fmt.Fprintf(&buf, strings.Join(fmtParts, ".")) + if v.pre != "" { + fmt.Fprintf(&buf, "-%s", v.pre) + } + if v.metadata != "" { + fmt.Fprintf(&buf, "+%s", v.metadata) + } + + return buf.String() +} diff --git a/vendor/github.com/hashicorp/go-version/version_collection.go b/vendor/github.com/hashicorp/go-version/version_collection.go new file mode 100644 index 0000000000..cc888d43e6 --- /dev/null +++ b/vendor/github.com/hashicorp/go-version/version_collection.go @@ -0,0 +1,17 @@ +package version + +// Collection is a type that implements the sort.Interface interface +// so that versions can be sorted. +type Collection []*Version + +func (v Collection) Len() int { + return len(v) +} + +func (v Collection) Less(i, j int) bool { + return v[i].LessThan(v[j]) +} + +func (v Collection) Swap(i, j int) { + v[i], v[j] = v[j], v[i] +} diff --git a/vendor/github.com/hashicorp/hcl/LICENSE b/vendor/github.com/hashicorp/hcl/LICENSE new file mode 100644 index 0000000000..c33dcc7c92 --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/LICENSE @@ -0,0 +1,354 @@ +Mozilla Public License, version 2.0 + +1. Definitions + +1.1. “Contributor” + + means each individual or legal entity that creates, contributes to the + creation of, or owns Covered Software. + +1.2. “Contributor Version” + + means the combination of the Contributions of others (if any) used by a + Contributor and that particular Contributor’s Contribution. + +1.3. “Contribution” + + means Covered Software of a particular Contributor. + +1.4. “Covered Software” + + means Source Code Form to which the initial Contributor has attached the + notice in Exhibit A, the Executable Form of such Source Code Form, and + Modifications of such Source Code Form, in each case including portions + thereof. + +1.5. “Incompatible With Secondary Licenses” + means + + a. that the initial Contributor has attached the notice described in + Exhibit B to the Covered Software; or + + b. that the Covered Software was made available under the terms of version + 1.1 or earlier of the License, but not also under the terms of a + Secondary License. + +1.6. “Executable Form” + + means any form of the work other than Source Code Form. + +1.7. “Larger Work” + + means a work that combines Covered Software with other material, in a separate + file or files, that is not Covered Software. + +1.8. “License” + + means this document. + +1.9. “Licensable” + + means having the right to grant, to the maximum extent possible, whether at the + time of the initial grant or subsequently, any and all of the rights conveyed by + this License. + +1.10. “Modifications” + + means any of the following: + + a. any file in Source Code Form that results from an addition to, deletion + from, or modification of the contents of Covered Software; or + + b. any new file in Source Code Form that contains any Covered Software. + +1.11. “Patent Claims” of a Contributor + + means any patent claim(s), including without limitation, method, process, + and apparatus claims, in any patent Licensable by such Contributor that + would be infringed, but for the grant of the License, by the making, + using, selling, offering for sale, having made, import, or transfer of + either its Contributions or its Contributor Version. + +1.12. “Secondary License” + + means either the GNU General Public License, Version 2.0, the GNU Lesser + General Public License, Version 2.1, the GNU Affero General Public + License, Version 3.0, or any later versions of those licenses. + +1.13. “Source Code Form” + + means the form of the work preferred for making modifications. + +1.14. “You” (or “Your”) + + means an individual or a legal entity exercising rights under this + License. For legal entities, “You” includes any entity that controls, is + controlled by, or is under common control with You. For purposes of this + definition, “control” means (a) the power, direct or indirect, to cause + the direction or management of such entity, whether by contract or + otherwise, or (b) ownership of more than fifty percent (50%) of the + outstanding shares or beneficial ownership of such entity. + + +2. License Grants and Conditions + +2.1. Grants + + Each Contributor hereby grants You a world-wide, royalty-free, + non-exclusive license: + + a. under intellectual property rights (other than patent or trademark) + Licensable by such Contributor to use, reproduce, make available, + modify, display, perform, distribute, and otherwise exploit its + Contributions, either on an unmodified basis, with Modifications, or as + part of a Larger Work; and + + b. under Patent Claims of such Contributor to make, use, sell, offer for + sale, have made, import, and otherwise transfer either its Contributions + or its Contributor Version. + +2.2. Effective Date + + The licenses granted in Section 2.1 with respect to any Contribution become + effective for each Contribution on the date the Contributor first distributes + such Contribution. + +2.3. Limitations on Grant Scope + + The licenses granted in this Section 2 are the only rights granted under this + License. No additional rights or licenses will be implied from the distribution + or licensing of Covered Software under this License. Notwithstanding Section + 2.1(b) above, no patent license is granted by a Contributor: + + a. for any code that a Contributor has removed from Covered Software; or + + b. for infringements caused by: (i) Your and any other third party’s + modifications of Covered Software, or (ii) the combination of its + Contributions with other software (except as part of its Contributor + Version); or + + c. under Patent Claims infringed by Covered Software in the absence of its + Contributions. + + This License does not grant any rights in the trademarks, service marks, or + logos of any Contributor (except as may be necessary to comply with the + notice requirements in Section 3.4). + +2.4. Subsequent Licenses + + No Contributor makes additional grants as a result of Your choice to + distribute the Covered Software under a subsequent version of this License + (see Section 10.2) or under the terms of a Secondary License (if permitted + under the terms of Section 3.3). + +2.5. Representation + + Each Contributor represents that the Contributor believes its Contributions + are its original creation(s) or it has sufficient rights to grant the + rights to its Contributions conveyed by this License. + +2.6. Fair Use + + This License is not intended to limit any rights You have under applicable + copyright doctrines of fair use, fair dealing, or other equivalents. + +2.7. Conditions + + Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in + Section 2.1. + + +3. Responsibilities + +3.1. Distribution of Source Form + + All distribution of Covered Software in Source Code Form, including any + Modifications that You create or to which You contribute, must be under the + terms of this License. You must inform recipients that the Source Code Form + of the Covered Software is governed by the terms of this License, and how + they can obtain a copy of this License. You may not attempt to alter or + restrict the recipients’ rights in the Source Code Form. + +3.2. Distribution of Executable Form + + If You distribute Covered Software in Executable Form then: + + a. such Covered Software must also be made available in Source Code Form, + as described in Section 3.1, and You must inform recipients of the + Executable Form how they can obtain a copy of such Source Code Form by + reasonable means in a timely manner, at a charge no more than the cost + of distribution to the recipient; and + + b. You may distribute such Executable Form under the terms of this License, + or sublicense it under different terms, provided that the license for + the Executable Form does not attempt to limit or alter the recipients’ + rights in the Source Code Form under this License. + +3.3. Distribution of a Larger Work + + You may create and distribute a Larger Work under terms of Your choice, + provided that You also comply with the requirements of this License for the + Covered Software. If the Larger Work is a combination of Covered Software + with a work governed by one or more Secondary Licenses, and the Covered + Software is not Incompatible With Secondary Licenses, this License permits + You to additionally distribute such Covered Software under the terms of + such Secondary License(s), so that the recipient of the Larger Work may, at + their option, further distribute the Covered Software under the terms of + either this License or such Secondary License(s). + +3.4. Notices + + You may not remove or alter the substance of any license notices (including + copyright notices, patent notices, disclaimers of warranty, or limitations + of liability) contained within the Source Code Form of the Covered + Software, except that You may alter any license notices to the extent + required to remedy known factual inaccuracies. + +3.5. Application of Additional Terms + + You may choose to offer, and to charge a fee for, warranty, support, + indemnity or liability obligations to one or more recipients of Covered + Software. However, You may do so only on Your own behalf, and not on behalf + of any Contributor. You must make it absolutely clear that any such + warranty, support, indemnity, or liability obligation is offered by You + alone, and You hereby agree to indemnify every Contributor for any + liability incurred by such Contributor as a result of warranty, support, + indemnity or liability terms You offer. You may include additional + disclaimers of warranty and limitations of liability specific to any + jurisdiction. + +4. Inability to Comply Due to Statute or Regulation + + If it is impossible for You to comply with any of the terms of this License + with respect to some or all of the Covered Software due to statute, judicial + order, or regulation then You must: (a) comply with the terms of this License + to the maximum extent possible; and (b) describe the limitations and the code + they affect. Such description must be placed in a text file included with all + distributions of the Covered Software under this License. Except to the + extent prohibited by statute or regulation, such description must be + sufficiently detailed for a recipient of ordinary skill to be able to + understand it. + +5. Termination + +5.1. The rights granted under this License will terminate automatically if You + fail to comply with any of its terms. However, if You become compliant, + then the rights granted under this License from a particular Contributor + are reinstated (a) provisionally, unless and until such Contributor + explicitly and finally terminates Your grants, and (b) on an ongoing basis, + if such Contributor fails to notify You of the non-compliance by some + reasonable means prior to 60 days after You have come back into compliance. + Moreover, Your grants from a particular Contributor are reinstated on an + ongoing basis if such Contributor notifies You of the non-compliance by + some reasonable means, this is the first time You have received notice of + non-compliance with this License from such Contributor, and You become + compliant prior to 30 days after Your receipt of the notice. + +5.2. If You initiate litigation against any entity by asserting a patent + infringement claim (excluding declaratory judgment actions, counter-claims, + and cross-claims) alleging that a Contributor Version directly or + indirectly infringes any patent, then the rights granted to You by any and + all Contributors for the Covered Software under Section 2.1 of this License + shall terminate. + +5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user + license agreements (excluding distributors and resellers) which have been + validly granted by You or Your distributors under this License prior to + termination shall survive termination. + +6. Disclaimer of Warranty + + Covered Software is provided under this License on an “as is” basis, without + warranty of any kind, either expressed, implied, or statutory, including, + without limitation, warranties that the Covered Software is free of defects, + merchantable, fit for a particular purpose or non-infringing. The entire + risk as to the quality and performance of the Covered Software is with You. + Should any Covered Software prove defective in any respect, You (not any + Contributor) assume the cost of any necessary servicing, repair, or + correction. This disclaimer of warranty constitutes an essential part of this + License. No use of any Covered Software is authorized under this License + except under this disclaimer. + +7. Limitation of Liability + + Under no circumstances and under no legal theory, whether tort (including + negligence), contract, or otherwise, shall any Contributor, or anyone who + distributes Covered Software as permitted above, be liable to You for any + direct, indirect, special, incidental, or consequential damages of any + character including, without limitation, damages for lost profits, loss of + goodwill, work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses, even if such party shall have been + informed of the possibility of such damages. This limitation of liability + shall not apply to liability for death or personal injury resulting from such + party’s negligence to the extent applicable law prohibits such limitation. + Some jurisdictions do not allow the exclusion or limitation of incidental or + consequential damages, so this exclusion and limitation may not apply to You. + +8. Litigation + + Any litigation relating to this License may be brought only in the courts of + a jurisdiction where the defendant maintains its principal place of business + and such litigation shall be governed by laws of that jurisdiction, without + reference to its conflict-of-law provisions. Nothing in this Section shall + prevent a party’s ability to bring cross-claims or counter-claims. + +9. Miscellaneous + + This License represents the complete agreement concerning the subject matter + hereof. If any provision of this License is held to be unenforceable, such + provision shall be reformed only to the extent necessary to make it + enforceable. Any law or regulation which provides that the language of a + contract shall be construed against the drafter shall not be used to construe + this License against a Contributor. + + +10. Versions of the License + +10.1. New Versions + + Mozilla Foundation is the license steward. Except as provided in Section + 10.3, no one other than the license steward has the right to modify or + publish new versions of this License. Each version will be given a + distinguishing version number. + +10.2. Effect of New Versions + + You may distribute the Covered Software under the terms of the version of + the License under which You originally received the Covered Software, or + under the terms of any subsequent version published by the license + steward. + +10.3. Modified Versions + + If you create software not governed by this License, and you want to + create a new license for such software, you may create and use a modified + version of this License if you rename the license and remove any + references to the name of the license steward (except to note that such + modified license differs from this License). + +10.4. Distributing Source Code Form that is Incompatible With Secondary Licenses + If You choose to distribute Source Code Form that is Incompatible With + Secondary Licenses under the terms of this version of the License, the + notice described in Exhibit B of this License must be attached. + +Exhibit A - Source Code Form License Notice + + This Source Code Form is subject to the + terms of the Mozilla Public License, v. + 2.0. If a copy of the MPL was not + distributed with this file, You can + obtain one at + http://mozilla.org/MPL/2.0/. + +If it is not possible or desirable to put the notice in a particular file, then +You may include the notice in a location (such as a LICENSE file in a relevant +directory) where a recipient would be likely to look for such a notice. + +You may add additional accurate notices of copyright ownership. + +Exhibit B - “Incompatible With Secondary Licenses” Notice + + This Source Code Form is “Incompatible + With Secondary Licenses”, as defined by + the Mozilla Public License, v. 2.0. + diff --git a/vendor/github.com/hashicorp/hcl/Makefile b/vendor/github.com/hashicorp/hcl/Makefile new file mode 100644 index 0000000000..84fd743f5c --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/Makefile @@ -0,0 +1,18 @@ +TEST?=./... + +default: test + +fmt: generate + go fmt ./... + +test: generate + go get -t ./... + go test $(TEST) $(TESTARGS) + +generate: + go generate ./... + +updatedeps: + go get -u golang.org/x/tools/cmd/stringer + +.PHONY: default generate test updatedeps diff --git a/vendor/github.com/hashicorp/hcl/README.md b/vendor/github.com/hashicorp/hcl/README.md new file mode 100644 index 0000000000..c8223326dd --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/README.md @@ -0,0 +1,125 @@ +# HCL + +[![GoDoc](https://godoc.org/github.com/hashicorp/hcl?status.png)](https://godoc.org/github.com/hashicorp/hcl) [![Build Status](https://travis-ci.org/hashicorp/hcl.svg?branch=master)](https://travis-ci.org/hashicorp/hcl) + +HCL (HashiCorp Configuration Language) is a configuration language built +by HashiCorp. The goal of HCL is to build a structured configuration language +that is both human and machine friendly for use with command-line tools, but +specifically targeted towards DevOps tools, servers, etc. + +HCL is also fully JSON compatible. That is, JSON can be used as completely +valid input to a system expecting HCL. This helps makes systems +interoperable with other systems. + +HCL is heavily inspired by +[libucl](https://github.com/vstakhov/libucl), +nginx configuration, and others similar. + +## Why? + +A common question when viewing HCL is to ask the question: why not +JSON, YAML, etc.? + +Prior to HCL, the tools we built at [HashiCorp](http://www.hashicorp.com) +used a variety of configuration languages from full programming languages +such as Ruby to complete data structure languages such as JSON. What we +learned is that some people wanted human-friendly configuration languages +and some people wanted machine-friendly languages. + +JSON fits a nice balance in this, but is fairly verbose and most +importantly doesn't support comments. With YAML, we found that beginners +had a really hard time determining what the actual structure was, and +ended up guessing more often than not whether to use a hyphen, colon, etc. +in order to represent some configuration key. + +Full programming languages such as Ruby enable complex behavior +a configuration language shouldn't usually allow, and also forces +people to learn some set of Ruby. + +Because of this, we decided to create our own configuration language +that is JSON-compatible. Our configuration language (HCL) is designed +to be written and modified by humans. The API for HCL allows JSON +as an input so that it is also machine-friendly (machines can generate +JSON instead of trying to generate HCL). + +Our goal with HCL is not to alienate other configuration languages. +It is instead to provide HCL as a specialized language for our tools, +and JSON as the interoperability layer. + +## Syntax + +For a complete grammar, please see the parser itself. A high-level overview +of the syntax and grammar is listed here. + + * Single line comments start with `#` or `//` + + * Multi-line comments are wrapped in `/*` and `*/`. Nested block comments + are not allowed. A multi-line comment (also known as a block comment) + terminates at the first `*/` found. + + * Values are assigned with the syntax `key = value` (whitespace doesn't + matter). The value can be any primitive: a string, number, boolean, + object, or list. + + * Strings are double-quoted and can contain any UTF-8 characters. + Example: `"Hello, World"` + + * Multi-line strings start with `<- + echo %Path% + + go version + + go env + + go get -t ./... + +build_script: +- cmd: go test -v ./... diff --git a/vendor/github.com/hashicorp/hcl/decoder.go b/vendor/github.com/hashicorp/hcl/decoder.go new file mode 100644 index 0000000000..0b39c1b952 --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/decoder.go @@ -0,0 +1,724 @@ +package hcl + +import ( + "errors" + "fmt" + "reflect" + "sort" + "strconv" + "strings" + + "github.com/hashicorp/hcl/hcl/ast" + "github.com/hashicorp/hcl/hcl/parser" + "github.com/hashicorp/hcl/hcl/token" +) + +// This is the tag to use with structures to have settings for HCL +const tagName = "hcl" + +var ( + // nodeType holds a reference to the type of ast.Node + nodeType reflect.Type = findNodeType() +) + +// Unmarshal accepts a byte slice as input and writes the +// data to the value pointed to by v. +func Unmarshal(bs []byte, v interface{}) error { + root, err := parse(bs) + if err != nil { + return err + } + + return DecodeObject(v, root) +} + +// Decode reads the given input and decodes it into the structure +// given by `out`. +func Decode(out interface{}, in string) error { + obj, err := Parse(in) + if err != nil { + return err + } + + return DecodeObject(out, obj) +} + +// DecodeObject is a lower-level version of Decode. It decodes a +// raw Object into the given output. +func DecodeObject(out interface{}, n ast.Node) error { + val := reflect.ValueOf(out) + if val.Kind() != reflect.Ptr { + return errors.New("result must be a pointer") + } + + // If we have the file, we really decode the root node + if f, ok := n.(*ast.File); ok { + n = f.Node + } + + var d decoder + return d.decode("root", n, val.Elem()) +} + +type decoder struct { + stack []reflect.Kind +} + +func (d *decoder) decode(name string, node ast.Node, result reflect.Value) error { + k := result + + // If we have an interface with a valid value, we use that + // for the check. + if result.Kind() == reflect.Interface { + elem := result.Elem() + if elem.IsValid() { + k = elem + } + } + + // Push current onto stack unless it is an interface. + if k.Kind() != reflect.Interface { + d.stack = append(d.stack, k.Kind()) + + // Schedule a pop + defer func() { + d.stack = d.stack[:len(d.stack)-1] + }() + } + + switch k.Kind() { + case reflect.Bool: + return d.decodeBool(name, node, result) + case reflect.Float64: + return d.decodeFloat(name, node, result) + case reflect.Int, reflect.Int32, reflect.Int64: + return d.decodeInt(name, node, result) + case reflect.Interface: + // When we see an interface, we make our own thing + return d.decodeInterface(name, node, result) + case reflect.Map: + return d.decodeMap(name, node, result) + case reflect.Ptr: + return d.decodePtr(name, node, result) + case reflect.Slice: + return d.decodeSlice(name, node, result) + case reflect.String: + return d.decodeString(name, node, result) + case reflect.Struct: + return d.decodeStruct(name, node, result) + default: + return &parser.PosError{ + Pos: node.Pos(), + Err: fmt.Errorf("%s: unknown kind to decode into: %s", name, k.Kind()), + } + } +} + +func (d *decoder) decodeBool(name string, node ast.Node, result reflect.Value) error { + switch n := node.(type) { + case *ast.LiteralType: + if n.Token.Type == token.BOOL { + v, err := strconv.ParseBool(n.Token.Text) + if err != nil { + return err + } + + result.Set(reflect.ValueOf(v)) + return nil + } + } + + return &parser.PosError{ + Pos: node.Pos(), + Err: fmt.Errorf("%s: unknown type %T", name, node), + } +} + +func (d *decoder) decodeFloat(name string, node ast.Node, result reflect.Value) error { + switch n := node.(type) { + case *ast.LiteralType: + if n.Token.Type == token.FLOAT { + v, err := strconv.ParseFloat(n.Token.Text, 64) + if err != nil { + return err + } + + result.Set(reflect.ValueOf(v)) + return nil + } + } + + return &parser.PosError{ + Pos: node.Pos(), + Err: fmt.Errorf("%s: unknown type %T", name, node), + } +} + +func (d *decoder) decodeInt(name string, node ast.Node, result reflect.Value) error { + switch n := node.(type) { + case *ast.LiteralType: + switch n.Token.Type { + case token.NUMBER: + v, err := strconv.ParseInt(n.Token.Text, 0, 0) + if err != nil { + return err + } + + if result.Kind() == reflect.Interface { + result.Set(reflect.ValueOf(int(v))) + } else { + result.SetInt(v) + } + return nil + case token.STRING: + v, err := strconv.ParseInt(n.Token.Value().(string), 0, 0) + if err != nil { + return err + } + + if result.Kind() == reflect.Interface { + result.Set(reflect.ValueOf(int(v))) + } else { + result.SetInt(v) + } + return nil + } + } + + return &parser.PosError{ + Pos: node.Pos(), + Err: fmt.Errorf("%s: unknown type %T", name, node), + } +} + +func (d *decoder) decodeInterface(name string, node ast.Node, result reflect.Value) error { + // When we see an ast.Node, we retain the value to enable deferred decoding. + // Very useful in situations where we want to preserve ast.Node information + // like Pos + if result.Type() == nodeType && result.CanSet() { + result.Set(reflect.ValueOf(node)) + return nil + } + + var set reflect.Value + redecode := true + + // For testing types, ObjectType should just be treated as a list. We + // set this to a temporary var because we want to pass in the real node. + testNode := node + if ot, ok := node.(*ast.ObjectType); ok { + testNode = ot.List + } + + switch n := testNode.(type) { + case *ast.ObjectList: + // If we're at the root or we're directly within a slice, then we + // decode objects into map[string]interface{}, otherwise we decode + // them into lists. + if len(d.stack) == 0 || d.stack[len(d.stack)-1] == reflect.Slice { + var temp map[string]interface{} + tempVal := reflect.ValueOf(temp) + result := reflect.MakeMap( + reflect.MapOf( + reflect.TypeOf(""), + tempVal.Type().Elem())) + + set = result + } else { + var temp []map[string]interface{} + tempVal := reflect.ValueOf(temp) + result := reflect.MakeSlice( + reflect.SliceOf(tempVal.Type().Elem()), 0, len(n.Items)) + set = result + } + case *ast.ObjectType: + // If we're at the root or we're directly within a slice, then we + // decode objects into map[string]interface{}, otherwise we decode + // them into lists. + if len(d.stack) == 0 || d.stack[len(d.stack)-1] == reflect.Slice { + var temp map[string]interface{} + tempVal := reflect.ValueOf(temp) + result := reflect.MakeMap( + reflect.MapOf( + reflect.TypeOf(""), + tempVal.Type().Elem())) + + set = result + } else { + var temp []map[string]interface{} + tempVal := reflect.ValueOf(temp) + result := reflect.MakeSlice( + reflect.SliceOf(tempVal.Type().Elem()), 0, 1) + set = result + } + case *ast.ListType: + var temp []interface{} + tempVal := reflect.ValueOf(temp) + result := reflect.MakeSlice( + reflect.SliceOf(tempVal.Type().Elem()), 0, 0) + set = result + case *ast.LiteralType: + switch n.Token.Type { + case token.BOOL: + var result bool + set = reflect.Indirect(reflect.New(reflect.TypeOf(result))) + case token.FLOAT: + var result float64 + set = reflect.Indirect(reflect.New(reflect.TypeOf(result))) + case token.NUMBER: + var result int + set = reflect.Indirect(reflect.New(reflect.TypeOf(result))) + case token.STRING, token.HEREDOC: + set = reflect.Indirect(reflect.New(reflect.TypeOf(""))) + default: + return &parser.PosError{ + Pos: node.Pos(), + Err: fmt.Errorf("%s: cannot decode into interface: %T", name, node), + } + } + default: + return fmt.Errorf( + "%s: cannot decode into interface: %T", + name, node) + } + + // Set the result to what its supposed to be, then reset + // result so we don't reflect into this method anymore. + result.Set(set) + + if redecode { + // Revisit the node so that we can use the newly instantiated + // thing and populate it. + if err := d.decode(name, node, result); err != nil { + return err + } + } + + return nil +} + +func (d *decoder) decodeMap(name string, node ast.Node, result reflect.Value) error { + if item, ok := node.(*ast.ObjectItem); ok { + node = &ast.ObjectList{Items: []*ast.ObjectItem{item}} + } + + if ot, ok := node.(*ast.ObjectType); ok { + node = ot.List + } + + n, ok := node.(*ast.ObjectList) + if !ok { + return &parser.PosError{ + Pos: node.Pos(), + Err: fmt.Errorf("%s: not an object type for map (%T)", name, node), + } + } + + // If we have an interface, then we can address the interface, + // but not the slice itself, so get the element but set the interface + set := result + if result.Kind() == reflect.Interface { + result = result.Elem() + } + + resultType := result.Type() + resultElemType := resultType.Elem() + resultKeyType := resultType.Key() + if resultKeyType.Kind() != reflect.String { + return &parser.PosError{ + Pos: node.Pos(), + Err: fmt.Errorf("%s: map must have string keys", name), + } + } + + // Make a map if it is nil + resultMap := result + if result.IsNil() { + resultMap = reflect.MakeMap( + reflect.MapOf(resultKeyType, resultElemType)) + } + + // Go through each element and decode it. + done := make(map[string]struct{}) + for _, item := range n.Items { + if item.Val == nil { + continue + } + + // github.com/hashicorp/terraform/issue/5740 + if len(item.Keys) == 0 { + return &parser.PosError{ + Pos: node.Pos(), + Err: fmt.Errorf("%s: map must have string keys", name), + } + } + + // Get the key we're dealing with, which is the first item + keyStr := item.Keys[0].Token.Value().(string) + + // If we've already processed this key, then ignore it + if _, ok := done[keyStr]; ok { + continue + } + + // Determine the value. If we have more than one key, then we + // get the objectlist of only these keys. + itemVal := item.Val + if len(item.Keys) > 1 { + itemVal = n.Filter(keyStr) + done[keyStr] = struct{}{} + } + + // Make the field name + fieldName := fmt.Sprintf("%s.%s", name, keyStr) + + // Get the key/value as reflection values + key := reflect.ValueOf(keyStr) + val := reflect.Indirect(reflect.New(resultElemType)) + + // If we have a pre-existing value in the map, use that + oldVal := resultMap.MapIndex(key) + if oldVal.IsValid() { + val.Set(oldVal) + } + + // Decode! + if err := d.decode(fieldName, itemVal, val); err != nil { + return err + } + + // Set the value on the map + resultMap.SetMapIndex(key, val) + } + + // Set the final map if we can + set.Set(resultMap) + return nil +} + +func (d *decoder) decodePtr(name string, node ast.Node, result reflect.Value) error { + // Create an element of the concrete (non pointer) type and decode + // into that. Then set the value of the pointer to this type. + resultType := result.Type() + resultElemType := resultType.Elem() + val := reflect.New(resultElemType) + if err := d.decode(name, node, reflect.Indirect(val)); err != nil { + return err + } + + result.Set(val) + return nil +} + +func (d *decoder) decodeSlice(name string, node ast.Node, result reflect.Value) error { + // If we have an interface, then we can address the interface, + // but not the slice itself, so get the element but set the interface + set := result + if result.Kind() == reflect.Interface { + result = result.Elem() + } + // Create the slice if it isn't nil + resultType := result.Type() + resultElemType := resultType.Elem() + if result.IsNil() { + resultSliceType := reflect.SliceOf(resultElemType) + result = reflect.MakeSlice( + resultSliceType, 0, 0) + } + + // Figure out the items we'll be copying into the slice + var items []ast.Node + switch n := node.(type) { + case *ast.ObjectList: + items = make([]ast.Node, len(n.Items)) + for i, item := range n.Items { + items[i] = item + } + case *ast.ObjectType: + items = []ast.Node{n} + case *ast.ListType: + items = n.List + default: + return &parser.PosError{ + Pos: node.Pos(), + Err: fmt.Errorf("unknown slice type: %T", node), + } + } + + for i, item := range items { + fieldName := fmt.Sprintf("%s[%d]", name, i) + + // Decode + val := reflect.Indirect(reflect.New(resultElemType)) + + // if item is an object that was decoded from ambiguous JSON and + // flattened, make sure it's expanded if it needs to decode into a + // defined structure. + item := expandObject(item, val) + + if err := d.decode(fieldName, item, val); err != nil { + return err + } + + // Append it onto the slice + result = reflect.Append(result, val) + } + + set.Set(result) + return nil +} + +// expandObject detects if an ambiguous JSON object was flattened to a List which +// should be decoded into a struct, and expands the ast to properly deocode. +func expandObject(node ast.Node, result reflect.Value) ast.Node { + item, ok := node.(*ast.ObjectItem) + if !ok { + return node + } + + elemType := result.Type() + + // our target type must be a struct + switch elemType.Kind() { + case reflect.Ptr: + switch elemType.Elem().Kind() { + case reflect.Struct: + //OK + default: + return node + } + case reflect.Struct: + //OK + default: + return node + } + + // A list value will have a key and field name. If it had more fields, + // it wouldn't have been flattened. + if len(item.Keys) != 2 { + return node + } + + keyToken := item.Keys[0].Token + item.Keys = item.Keys[1:] + + // we need to un-flatten the ast enough to decode + newNode := &ast.ObjectItem{ + Keys: []*ast.ObjectKey{ + &ast.ObjectKey{ + Token: keyToken, + }, + }, + Val: &ast.ObjectType{ + List: &ast.ObjectList{ + Items: []*ast.ObjectItem{item}, + }, + }, + } + + return newNode +} + +func (d *decoder) decodeString(name string, node ast.Node, result reflect.Value) error { + switch n := node.(type) { + case *ast.LiteralType: + switch n.Token.Type { + case token.NUMBER: + result.Set(reflect.ValueOf(n.Token.Text).Convert(result.Type())) + return nil + case token.STRING, token.HEREDOC: + result.Set(reflect.ValueOf(n.Token.Value()).Convert(result.Type())) + return nil + } + } + + return &parser.PosError{ + Pos: node.Pos(), + Err: fmt.Errorf("%s: unknown type for string %T", name, node), + } +} + +func (d *decoder) decodeStruct(name string, node ast.Node, result reflect.Value) error { + var item *ast.ObjectItem + if it, ok := node.(*ast.ObjectItem); ok { + item = it + node = it.Val + } + + if ot, ok := node.(*ast.ObjectType); ok { + node = ot.List + } + + // Handle the special case where the object itself is a literal. Previously + // the yacc parser would always ensure top-level elements were arrays. The new + // parser does not make the same guarantees, thus we need to convert any + // top-level literal elements into a list. + if _, ok := node.(*ast.LiteralType); ok && item != nil { + node = &ast.ObjectList{Items: []*ast.ObjectItem{item}} + } + + list, ok := node.(*ast.ObjectList) + if !ok { + return &parser.PosError{ + Pos: node.Pos(), + Err: fmt.Errorf("%s: not an object type for struct (%T)", name, node), + } + } + + // This slice will keep track of all the structs we'll be decoding. + // There can be more than one struct if there are embedded structs + // that are squashed. + structs := make([]reflect.Value, 1, 5) + structs[0] = result + + // Compile the list of all the fields that we're going to be decoding + // from all the structs. + fields := make(map[*reflect.StructField]reflect.Value) + for len(structs) > 0 { + structVal := structs[0] + structs = structs[1:] + + structType := structVal.Type() + for i := 0; i < structType.NumField(); i++ { + fieldType := structType.Field(i) + tagParts := strings.Split(fieldType.Tag.Get(tagName), ",") + + // Ignore fields with tag name "-" + if tagParts[0] == "-" { + continue + } + + if fieldType.Anonymous { + fieldKind := fieldType.Type.Kind() + if fieldKind != reflect.Struct { + return &parser.PosError{ + Pos: node.Pos(), + Err: fmt.Errorf("%s: unsupported type to struct: %s", + fieldType.Name, fieldKind), + } + } + + // We have an embedded field. We "squash" the fields down + // if specified in the tag. + squash := false + for _, tag := range tagParts[1:] { + if tag == "squash" { + squash = true + break + } + } + + if squash { + structs = append( + structs, result.FieldByName(fieldType.Name)) + continue + } + } + + // Normal struct field, store it away + fields[&fieldType] = structVal.Field(i) + } + } + + usedKeys := make(map[string]struct{}) + decodedFields := make([]string, 0, len(fields)) + decodedFieldsVal := make([]reflect.Value, 0) + unusedKeysVal := make([]reflect.Value, 0) + for fieldType, field := range fields { + if !field.IsValid() { + // This should never happen + panic("field is not valid") + } + + // If we can't set the field, then it is unexported or something, + // and we just continue onwards. + if !field.CanSet() { + continue + } + + fieldName := fieldType.Name + + tagValue := fieldType.Tag.Get(tagName) + tagParts := strings.SplitN(tagValue, ",", 2) + if len(tagParts) >= 2 { + switch tagParts[1] { + case "decodedFields": + decodedFieldsVal = append(decodedFieldsVal, field) + continue + case "key": + if item == nil { + return &parser.PosError{ + Pos: node.Pos(), + Err: fmt.Errorf("%s: %s asked for 'key', impossible", + name, fieldName), + } + } + + field.SetString(item.Keys[0].Token.Value().(string)) + continue + case "unusedKeys": + unusedKeysVal = append(unusedKeysVal, field) + continue + } + } + + if tagParts[0] != "" { + fieldName = tagParts[0] + } + + // Determine the element we'll use to decode. If it is a single + // match (only object with the field), then we decode it exactly. + // If it is a prefix match, then we decode the matches. + filter := list.Filter(fieldName) + + prefixMatches := filter.Children() + matches := filter.Elem() + if len(matches.Items) == 0 && len(prefixMatches.Items) == 0 { + continue + } + + // Track the used key + usedKeys[fieldName] = struct{}{} + + // Create the field name and decode. We range over the elements + // because we actually want the value. + fieldName = fmt.Sprintf("%s.%s", name, fieldName) + if len(prefixMatches.Items) > 0 { + if err := d.decode(fieldName, prefixMatches, field); err != nil { + return err + } + } + for _, match := range matches.Items { + var decodeNode ast.Node = match.Val + if ot, ok := decodeNode.(*ast.ObjectType); ok { + decodeNode = &ast.ObjectList{Items: ot.List.Items} + } + + if err := d.decode(fieldName, decodeNode, field); err != nil { + return err + } + } + + decodedFields = append(decodedFields, fieldType.Name) + } + + if len(decodedFieldsVal) > 0 { + // Sort it so that it is deterministic + sort.Strings(decodedFields) + + for _, v := range decodedFieldsVal { + v.Set(reflect.ValueOf(decodedFields)) + } + } + + return nil +} + +// findNodeType returns the type of ast.Node +func findNodeType() reflect.Type { + var nodeContainer struct { + Node ast.Node + } + value := reflect.ValueOf(nodeContainer).FieldByName("Node") + return value.Type() +} diff --git a/vendor/github.com/hashicorp/hcl/hcl.go b/vendor/github.com/hashicorp/hcl/hcl.go new file mode 100644 index 0000000000..575a20b50b --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/hcl.go @@ -0,0 +1,11 @@ +// Package hcl decodes HCL into usable Go structures. +// +// hcl input can come in either pure HCL format or JSON format. +// It can be parsed into an AST, and then decoded into a structure, +// or it can be decoded directly from a string into a structure. +// +// If you choose to parse HCL into a raw AST, the benefit is that you +// can write custom visitor implementations to implement custom +// semantic checks. By default, HCL does not perform any semantic +// checks. +package hcl diff --git a/vendor/github.com/hashicorp/hcl/hcl/ast/ast.go b/vendor/github.com/hashicorp/hcl/hcl/ast/ast.go new file mode 100644 index 0000000000..6e5ef654bb --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/hcl/ast/ast.go @@ -0,0 +1,219 @@ +// Package ast declares the types used to represent syntax trees for HCL +// (HashiCorp Configuration Language) +package ast + +import ( + "fmt" + "strings" + + "github.com/hashicorp/hcl/hcl/token" +) + +// Node is an element in the abstract syntax tree. +type Node interface { + node() + Pos() token.Pos +} + +func (File) node() {} +func (ObjectList) node() {} +func (ObjectKey) node() {} +func (ObjectItem) node() {} +func (Comment) node() {} +func (CommentGroup) node() {} +func (ObjectType) node() {} +func (LiteralType) node() {} +func (ListType) node() {} + +// File represents a single HCL file +type File struct { + Node Node // usually a *ObjectList + Comments []*CommentGroup // list of all comments in the source +} + +func (f *File) Pos() token.Pos { + return f.Node.Pos() +} + +// ObjectList represents a list of ObjectItems. An HCL file itself is an +// ObjectList. +type ObjectList struct { + Items []*ObjectItem +} + +func (o *ObjectList) Add(item *ObjectItem) { + o.Items = append(o.Items, item) +} + +// Filter filters out the objects with the given key list as a prefix. +// +// The returned list of objects contain ObjectItems where the keys have +// this prefix already stripped off. This might result in objects with +// zero-length key lists if they have no children. +// +// If no matches are found, an empty ObjectList (non-nil) is returned. +func (o *ObjectList) Filter(keys ...string) *ObjectList { + var result ObjectList + for _, item := range o.Items { + // If there aren't enough keys, then ignore this + if len(item.Keys) < len(keys) { + continue + } + + match := true + for i, key := range item.Keys[:len(keys)] { + key := key.Token.Value().(string) + if key != keys[i] && !strings.EqualFold(key, keys[i]) { + match = false + break + } + } + if !match { + continue + } + + // Strip off the prefix from the children + newItem := *item + newItem.Keys = newItem.Keys[len(keys):] + result.Add(&newItem) + } + + return &result +} + +// Children returns further nested objects (key length > 0) within this +// ObjectList. This should be used with Filter to get at child items. +func (o *ObjectList) Children() *ObjectList { + var result ObjectList + for _, item := range o.Items { + if len(item.Keys) > 0 { + result.Add(item) + } + } + + return &result +} + +// Elem returns items in the list that are direct element assignments +// (key length == 0). This should be used with Filter to get at elements. +func (o *ObjectList) Elem() *ObjectList { + var result ObjectList + for _, item := range o.Items { + if len(item.Keys) == 0 { + result.Add(item) + } + } + + return &result +} + +func (o *ObjectList) Pos() token.Pos { + // always returns the uninitiliazed position + return o.Items[0].Pos() +} + +// ObjectItem represents a HCL Object Item. An item is represented with a key +// (or keys). It can be an assignment or an object (both normal and nested) +type ObjectItem struct { + // keys is only one length long if it's of type assignment. If it's a + // nested object it can be larger than one. In that case "assign" is + // invalid as there is no assignments for a nested object. + Keys []*ObjectKey + + // assign contains the position of "=", if any + Assign token.Pos + + // val is the item itself. It can be an object,list, number, bool or a + // string. If key length is larger than one, val can be only of type + // Object. + Val Node + + LeadComment *CommentGroup // associated lead comment + LineComment *CommentGroup // associated line comment +} + +func (o *ObjectItem) Pos() token.Pos { + // I'm not entirely sure what causes this, but removing this causes + // a test failure. We should investigate at some point. + if len(o.Keys) == 0 { + return token.Pos{} + } + + return o.Keys[0].Pos() +} + +// ObjectKeys are either an identifier or of type string. +type ObjectKey struct { + Token token.Token +} + +func (o *ObjectKey) Pos() token.Pos { + return o.Token.Pos +} + +// LiteralType represents a literal of basic type. Valid types are: +// token.NUMBER, token.FLOAT, token.BOOL and token.STRING +type LiteralType struct { + Token token.Token + + // comment types, only used when in a list + LeadComment *CommentGroup + LineComment *CommentGroup +} + +func (l *LiteralType) Pos() token.Pos { + return l.Token.Pos +} + +// ListStatement represents a HCL List type +type ListType struct { + Lbrack token.Pos // position of "[" + Rbrack token.Pos // position of "]" + List []Node // the elements in lexical order +} + +func (l *ListType) Pos() token.Pos { + return l.Lbrack +} + +func (l *ListType) Add(node Node) { + l.List = append(l.List, node) +} + +// ObjectType represents a HCL Object Type +type ObjectType struct { + Lbrace token.Pos // position of "{" + Rbrace token.Pos // position of "}" + List *ObjectList // the nodes in lexical order +} + +func (o *ObjectType) Pos() token.Pos { + return o.Lbrace +} + +// Comment node represents a single //, # style or /*- style commment +type Comment struct { + Start token.Pos // position of / or # + Text string +} + +func (c *Comment) Pos() token.Pos { + return c.Start +} + +// CommentGroup node represents a sequence of comments with no other tokens and +// no empty lines between. +type CommentGroup struct { + List []*Comment // len(List) > 0 +} + +func (c *CommentGroup) Pos() token.Pos { + return c.List[0].Pos() +} + +//------------------------------------------------------------------- +// GoStringer +//------------------------------------------------------------------- + +func (o *ObjectKey) GoString() string { return fmt.Sprintf("*%#v", *o) } +func (o *ObjectList) GoString() string { return fmt.Sprintf("*%#v", *o) } diff --git a/vendor/github.com/hashicorp/hcl/hcl/ast/walk.go b/vendor/github.com/hashicorp/hcl/hcl/ast/walk.go new file mode 100644 index 0000000000..ba07ad42b0 --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/hcl/ast/walk.go @@ -0,0 +1,52 @@ +package ast + +import "fmt" + +// WalkFunc describes a function to be called for each node during a Walk. The +// returned node can be used to rewrite the AST. Walking stops the returned +// bool is false. +type WalkFunc func(Node) (Node, bool) + +// Walk traverses an AST in depth-first order: It starts by calling fn(node); +// node must not be nil. If fn returns true, Walk invokes fn recursively for +// each of the non-nil children of node, followed by a call of fn(nil). The +// returned node of fn can be used to rewrite the passed node to fn. +func Walk(node Node, fn WalkFunc) Node { + rewritten, ok := fn(node) + if !ok { + return rewritten + } + + switch n := node.(type) { + case *File: + n.Node = Walk(n.Node, fn) + case *ObjectList: + for i, item := range n.Items { + n.Items[i] = Walk(item, fn).(*ObjectItem) + } + case *ObjectKey: + // nothing to do + case *ObjectItem: + for i, k := range n.Keys { + n.Keys[i] = Walk(k, fn).(*ObjectKey) + } + + if n.Val != nil { + n.Val = Walk(n.Val, fn) + } + case *LiteralType: + // nothing to do + case *ListType: + for i, l := range n.List { + n.List[i] = Walk(l, fn) + } + case *ObjectType: + n.List = Walk(n.List, fn).(*ObjectList) + default: + // should we panic here? + fmt.Printf("unknown type: %T\n", n) + } + + fn(nil) + return rewritten +} diff --git a/vendor/github.com/hashicorp/hcl/hcl/parser/error.go b/vendor/github.com/hashicorp/hcl/hcl/parser/error.go new file mode 100644 index 0000000000..5c99381dfb --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/hcl/parser/error.go @@ -0,0 +1,17 @@ +package parser + +import ( + "fmt" + + "github.com/hashicorp/hcl/hcl/token" +) + +// PosError is a parse error that contains a position. +type PosError struct { + Pos token.Pos + Err error +} + +func (e *PosError) Error() string { + return fmt.Sprintf("At %s: %s", e.Pos, e.Err) +} diff --git a/vendor/github.com/hashicorp/hcl/hcl/parser/parser.go b/vendor/github.com/hashicorp/hcl/hcl/parser/parser.go new file mode 100644 index 0000000000..b4881806e7 --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/hcl/parser/parser.go @@ -0,0 +1,520 @@ +// Package parser implements a parser for HCL (HashiCorp Configuration +// Language) +package parser + +import ( + "bytes" + "errors" + "fmt" + "strings" + + "github.com/hashicorp/hcl/hcl/ast" + "github.com/hashicorp/hcl/hcl/scanner" + "github.com/hashicorp/hcl/hcl/token" +) + +type Parser struct { + sc *scanner.Scanner + + // Last read token + tok token.Token + commaPrev token.Token + + comments []*ast.CommentGroup + leadComment *ast.CommentGroup // last lead comment + lineComment *ast.CommentGroup // last line comment + + enableTrace bool + indent int + n int // buffer size (max = 1) +} + +func newParser(src []byte) *Parser { + return &Parser{ + sc: scanner.New(src), + } +} + +// Parse returns the fully parsed source and returns the abstract syntax tree. +func Parse(src []byte) (*ast.File, error) { + // normalize all line endings + // since the scanner and output only work with "\n" line endings, we may + // end up with dangling "\r" characters in the parsed data. + src = bytes.Replace(src, []byte("\r\n"), []byte("\n"), -1) + + p := newParser(src) + return p.Parse() +} + +var errEofToken = errors.New("EOF token found") + +// Parse returns the fully parsed source and returns the abstract syntax tree. +func (p *Parser) Parse() (*ast.File, error) { + f := &ast.File{} + var err, scerr error + p.sc.Error = func(pos token.Pos, msg string) { + scerr = &PosError{Pos: pos, Err: errors.New(msg)} + } + + f.Node, err = p.objectList(false) + if scerr != nil { + return nil, scerr + } + if err != nil { + return nil, err + } + + f.Comments = p.comments + return f, nil +} + +// objectList parses a list of items within an object (generally k/v pairs). +// The parameter" obj" tells this whether to we are within an object (braces: +// '{', '}') or just at the top level. If we're within an object, we end +// at an RBRACE. +func (p *Parser) objectList(obj bool) (*ast.ObjectList, error) { + defer un(trace(p, "ParseObjectList")) + node := &ast.ObjectList{} + + for { + if obj { + tok := p.scan() + p.unscan() + if tok.Type == token.RBRACE { + break + } + } + + n, err := p.objectItem() + if err == errEofToken { + break // we are finished + } + + // we don't return a nil node, because might want to use already + // collected items. + if err != nil { + return node, err + } + + node.Add(n) + + // object lists can be optionally comma-delimited e.g. when a list of maps + // is being expressed, so a comma is allowed here - it's simply consumed + tok := p.scan() + if tok.Type != token.COMMA { + p.unscan() + } + } + return node, nil +} + +func (p *Parser) consumeComment() (comment *ast.Comment, endline int) { + endline = p.tok.Pos.Line + + // count the endline if it's multiline comment, ie starting with /* + if len(p.tok.Text) > 1 && p.tok.Text[1] == '*' { + // don't use range here - no need to decode Unicode code points + for i := 0; i < len(p.tok.Text); i++ { + if p.tok.Text[i] == '\n' { + endline++ + } + } + } + + comment = &ast.Comment{Start: p.tok.Pos, Text: p.tok.Text} + p.tok = p.sc.Scan() + return +} + +func (p *Parser) consumeCommentGroup(n int) (comments *ast.CommentGroup, endline int) { + var list []*ast.Comment + endline = p.tok.Pos.Line + + for p.tok.Type == token.COMMENT && p.tok.Pos.Line <= endline+n { + var comment *ast.Comment + comment, endline = p.consumeComment() + list = append(list, comment) + } + + // add comment group to the comments list + comments = &ast.CommentGroup{List: list} + p.comments = append(p.comments, comments) + + return +} + +// objectItem parses a single object item +func (p *Parser) objectItem() (*ast.ObjectItem, error) { + defer un(trace(p, "ParseObjectItem")) + + keys, err := p.objectKey() + if len(keys) > 0 && err == errEofToken { + // We ignore eof token here since it is an error if we didn't + // receive a value (but we did receive a key) for the item. + err = nil + } + if len(keys) > 0 && err != nil && p.tok.Type == token.RBRACE { + // This is a strange boolean statement, but what it means is: + // We have keys with no value, and we're likely in an object + // (since RBrace ends an object). For this, we set err to nil so + // we continue and get the error below of having the wrong value + // type. + err = nil + + // Reset the token type so we don't think it completed fine. See + // objectType which uses p.tok.Type to check if we're done with + // the object. + p.tok.Type = token.EOF + } + if err != nil { + return nil, err + } + + o := &ast.ObjectItem{ + Keys: keys, + } + + if p.leadComment != nil { + o.LeadComment = p.leadComment + p.leadComment = nil + } + + switch p.tok.Type { + case token.ASSIGN: + o.Assign = p.tok.Pos + o.Val, err = p.object() + if err != nil { + return nil, err + } + case token.LBRACE: + o.Val, err = p.objectType() + if err != nil { + return nil, err + } + default: + keyStr := make([]string, 0, len(keys)) + for _, k := range keys { + keyStr = append(keyStr, k.Token.Text) + } + + return nil, fmt.Errorf( + "key '%s' expected start of object ('{') or assignment ('=')", + strings.Join(keyStr, " ")) + } + + // do a look-ahead for line comment + p.scan() + if len(keys) > 0 && o.Val.Pos().Line == keys[0].Pos().Line && p.lineComment != nil { + o.LineComment = p.lineComment + p.lineComment = nil + } + p.unscan() + return o, nil +} + +// objectKey parses an object key and returns a ObjectKey AST +func (p *Parser) objectKey() ([]*ast.ObjectKey, error) { + keyCount := 0 + keys := make([]*ast.ObjectKey, 0) + + for { + tok := p.scan() + switch tok.Type { + case token.EOF: + // It is very important to also return the keys here as well as + // the error. This is because we need to be able to tell if we + // did parse keys prior to finding the EOF, or if we just found + // a bare EOF. + return keys, errEofToken + case token.ASSIGN: + // assignment or object only, but not nested objects. this is not + // allowed: `foo bar = {}` + if keyCount > 1 { + return nil, &PosError{ + Pos: p.tok.Pos, + Err: fmt.Errorf("nested object expected: LBRACE got: %s", p.tok.Type), + } + } + + if keyCount == 0 { + return nil, &PosError{ + Pos: p.tok.Pos, + Err: errors.New("no object keys found!"), + } + } + + return keys, nil + case token.LBRACE: + var err error + + // If we have no keys, then it is a syntax error. i.e. {{}} is not + // allowed. + if len(keys) == 0 { + err = &PosError{ + Pos: p.tok.Pos, + Err: fmt.Errorf("expected: IDENT | STRING got: %s", p.tok.Type), + } + } + + // object + return keys, err + case token.IDENT, token.STRING: + keyCount++ + keys = append(keys, &ast.ObjectKey{Token: p.tok}) + case token.ILLEGAL: + return keys, &PosError{ + Pos: p.tok.Pos, + Err: fmt.Errorf("illegal character"), + } + default: + return keys, &PosError{ + Pos: p.tok.Pos, + Err: fmt.Errorf("expected: IDENT | STRING | ASSIGN | LBRACE got: %s", p.tok.Type), + } + } + } +} + +// object parses any type of object, such as number, bool, string, object or +// list. +func (p *Parser) object() (ast.Node, error) { + defer un(trace(p, "ParseType")) + tok := p.scan() + + switch tok.Type { + case token.NUMBER, token.FLOAT, token.BOOL, token.STRING, token.HEREDOC: + return p.literalType() + case token.LBRACE: + return p.objectType() + case token.LBRACK: + return p.listType() + case token.COMMENT: + // implement comment + case token.EOF: + return nil, errEofToken + } + + return nil, &PosError{ + Pos: tok.Pos, + Err: fmt.Errorf("Unknown token: %+v", tok), + } +} + +// objectType parses an object type and returns a ObjectType AST +func (p *Parser) objectType() (*ast.ObjectType, error) { + defer un(trace(p, "ParseObjectType")) + + // we assume that the currently scanned token is a LBRACE + o := &ast.ObjectType{ + Lbrace: p.tok.Pos, + } + + l, err := p.objectList(true) + + // if we hit RBRACE, we are good to go (means we parsed all Items), if it's + // not a RBRACE, it's an syntax error and we just return it. + if err != nil && p.tok.Type != token.RBRACE { + return nil, err + } + + // No error, scan and expect the ending to be a brace + if tok := p.scan(); tok.Type != token.RBRACE { + return nil, fmt.Errorf("object expected closing RBRACE got: %s", tok.Type) + } + + o.List = l + o.Rbrace = p.tok.Pos // advanced via parseObjectList + return o, nil +} + +// listType parses a list type and returns a ListType AST +func (p *Parser) listType() (*ast.ListType, error) { + defer un(trace(p, "ParseListType")) + + // we assume that the currently scanned token is a LBRACK + l := &ast.ListType{ + Lbrack: p.tok.Pos, + } + + needComma := false + for { + tok := p.scan() + if needComma { + switch tok.Type { + case token.COMMA, token.RBRACK: + default: + return nil, &PosError{ + Pos: tok.Pos, + Err: fmt.Errorf( + "error parsing list, expected comma or list end, got: %s", + tok.Type), + } + } + } + switch tok.Type { + case token.BOOL, token.NUMBER, token.FLOAT, token.STRING, token.HEREDOC: + node, err := p.literalType() + if err != nil { + return nil, err + } + + // If there is a lead comment, apply it + if p.leadComment != nil { + node.LeadComment = p.leadComment + p.leadComment = nil + } + + l.Add(node) + needComma = true + case token.COMMA: + // get next list item or we are at the end + // do a look-ahead for line comment + p.scan() + if p.lineComment != nil && len(l.List) > 0 { + lit, ok := l.List[len(l.List)-1].(*ast.LiteralType) + if ok { + lit.LineComment = p.lineComment + l.List[len(l.List)-1] = lit + p.lineComment = nil + } + } + p.unscan() + + needComma = false + continue + case token.LBRACE: + // Looks like a nested object, so parse it out + node, err := p.objectType() + if err != nil { + return nil, &PosError{ + Pos: tok.Pos, + Err: fmt.Errorf( + "error while trying to parse object within list: %s", err), + } + } + l.Add(node) + needComma = true + case token.LBRACK: + node, err := p.listType() + if err != nil { + return nil, &PosError{ + Pos: tok.Pos, + Err: fmt.Errorf( + "error while trying to parse list within list: %s", err), + } + } + l.Add(node) + case token.RBRACK: + // finished + l.Rbrack = p.tok.Pos + return l, nil + default: + return nil, &PosError{ + Pos: tok.Pos, + Err: fmt.Errorf("unexpected token while parsing list: %s", tok.Type), + } + } + } +} + +// literalType parses a literal type and returns a LiteralType AST +func (p *Parser) literalType() (*ast.LiteralType, error) { + defer un(trace(p, "ParseLiteral")) + + return &ast.LiteralType{ + Token: p.tok, + }, nil +} + +// scan returns the next token from the underlying scanner. If a token has +// been unscanned then read that instead. In the process, it collects any +// comment groups encountered, and remembers the last lead and line comments. +func (p *Parser) scan() token.Token { + // If we have a token on the buffer, then return it. + if p.n != 0 { + p.n = 0 + return p.tok + } + + // Otherwise read the next token from the scanner and Save it to the buffer + // in case we unscan later. + prev := p.tok + p.tok = p.sc.Scan() + + if p.tok.Type == token.COMMENT { + var comment *ast.CommentGroup + var endline int + + // fmt.Printf("p.tok.Pos.Line = %+v prev: %d endline %d \n", + // p.tok.Pos.Line, prev.Pos.Line, endline) + if p.tok.Pos.Line == prev.Pos.Line { + // The comment is on same line as the previous token; it + // cannot be a lead comment but may be a line comment. + comment, endline = p.consumeCommentGroup(0) + if p.tok.Pos.Line != endline { + // The next token is on a different line, thus + // the last comment group is a line comment. + p.lineComment = comment + } + } + + // consume successor comments, if any + endline = -1 + for p.tok.Type == token.COMMENT { + comment, endline = p.consumeCommentGroup(1) + } + + if endline+1 == p.tok.Pos.Line && p.tok.Type != token.RBRACE { + switch p.tok.Type { + case token.RBRACE, token.RBRACK: + // Do not count for these cases + default: + // The next token is following on the line immediately after the + // comment group, thus the last comment group is a lead comment. + p.leadComment = comment + } + } + + } + + return p.tok +} + +// unscan pushes the previously read token back onto the buffer. +func (p *Parser) unscan() { + p.n = 1 +} + +// ---------------------------------------------------------------------------- +// Parsing support + +func (p *Parser) printTrace(a ...interface{}) { + if !p.enableTrace { + return + } + + const dots = ". . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . " + const n = len(dots) + fmt.Printf("%5d:%3d: ", p.tok.Pos.Line, p.tok.Pos.Column) + + i := 2 * p.indent + for i > n { + fmt.Print(dots) + i -= n + } + // i <= n + fmt.Print(dots[0:i]) + fmt.Println(a...) +} + +func trace(p *Parser, msg string) *Parser { + p.printTrace(msg, "(") + p.indent++ + return p +} + +// Usage pattern: defer un(trace(p, "...")) +func un(p *Parser) { + p.indent-- + p.printTrace(")") +} diff --git a/vendor/github.com/hashicorp/hcl/hcl/scanner/scanner.go b/vendor/github.com/hashicorp/hcl/hcl/scanner/scanner.go new file mode 100644 index 0000000000..69662367f0 --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/hcl/scanner/scanner.go @@ -0,0 +1,651 @@ +// Package scanner implements a scanner for HCL (HashiCorp Configuration +// Language) source text. +package scanner + +import ( + "bytes" + "fmt" + "os" + "regexp" + "unicode" + "unicode/utf8" + + "github.com/hashicorp/hcl/hcl/token" +) + +// eof represents a marker rune for the end of the reader. +const eof = rune(0) + +// Scanner defines a lexical scanner +type Scanner struct { + buf *bytes.Buffer // Source buffer for advancing and scanning + src []byte // Source buffer for immutable access + + // Source Position + srcPos token.Pos // current position + prevPos token.Pos // previous position, used for peek() method + + lastCharLen int // length of last character in bytes + lastLineLen int // length of last line in characters (for correct column reporting) + + tokStart int // token text start position + tokEnd int // token text end position + + // Error is called for each error encountered. If no Error + // function is set, the error is reported to os.Stderr. + Error func(pos token.Pos, msg string) + + // ErrorCount is incremented by one for each error encountered. + ErrorCount int + + // tokPos is the start position of most recently scanned token; set by + // Scan. The Filename field is always left untouched by the Scanner. If + // an error is reported (via Error) and Position is invalid, the scanner is + // not inside a token. + tokPos token.Pos +} + +// New creates and initializes a new instance of Scanner using src as +// its source content. +func New(src []byte) *Scanner { + // even though we accept a src, we read from a io.Reader compatible type + // (*bytes.Buffer). So in the future we might easily change it to streaming + // read. + b := bytes.NewBuffer(src) + s := &Scanner{ + buf: b, + src: src, + } + + // srcPosition always starts with 1 + s.srcPos.Line = 1 + return s +} + +// next reads the next rune from the bufferred reader. Returns the rune(0) if +// an error occurs (or io.EOF is returned). +func (s *Scanner) next() rune { + ch, size, err := s.buf.ReadRune() + if err != nil { + // advance for error reporting + s.srcPos.Column++ + s.srcPos.Offset += size + s.lastCharLen = size + return eof + } + + if ch == utf8.RuneError && size == 1 { + s.srcPos.Column++ + s.srcPos.Offset += size + s.lastCharLen = size + s.err("illegal UTF-8 encoding") + return ch + } + + // remember last position + s.prevPos = s.srcPos + + s.srcPos.Column++ + s.lastCharLen = size + s.srcPos.Offset += size + + if ch == '\n' { + s.srcPos.Line++ + s.lastLineLen = s.srcPos.Column + s.srcPos.Column = 0 + } + + // If we see a null character with data left, then that is an error + if ch == '\x00' && s.buf.Len() > 0 { + s.err("unexpected null character (0x00)") + return eof + } + + // debug + // fmt.Printf("ch: %q, offset:column: %d:%d\n", ch, s.srcPos.Offset, s.srcPos.Column) + return ch +} + +// unread unreads the previous read Rune and updates the source position +func (s *Scanner) unread() { + if err := s.buf.UnreadRune(); err != nil { + panic(err) // this is user fault, we should catch it + } + s.srcPos = s.prevPos // put back last position +} + +// peek returns the next rune without advancing the reader. +func (s *Scanner) peek() rune { + peek, _, err := s.buf.ReadRune() + if err != nil { + return eof + } + + s.buf.UnreadRune() + return peek +} + +// Scan scans the next token and returns the token. +func (s *Scanner) Scan() token.Token { + ch := s.next() + + // skip white space + for isWhitespace(ch) { + ch = s.next() + } + + var tok token.Type + + // token text markings + s.tokStart = s.srcPos.Offset - s.lastCharLen + + // token position, initial next() is moving the offset by one(size of rune + // actually), though we are interested with the starting point + s.tokPos.Offset = s.srcPos.Offset - s.lastCharLen + if s.srcPos.Column > 0 { + // common case: last character was not a '\n' + s.tokPos.Line = s.srcPos.Line + s.tokPos.Column = s.srcPos.Column + } else { + // last character was a '\n' + // (we cannot be at the beginning of the source + // since we have called next() at least once) + s.tokPos.Line = s.srcPos.Line - 1 + s.tokPos.Column = s.lastLineLen + } + + switch { + case isLetter(ch): + tok = token.IDENT + lit := s.scanIdentifier() + if lit == "true" || lit == "false" { + tok = token.BOOL + } + case isDecimal(ch): + tok = s.scanNumber(ch) + default: + switch ch { + case eof: + tok = token.EOF + case '"': + tok = token.STRING + s.scanString() + case '#', '/': + tok = token.COMMENT + s.scanComment(ch) + case '.': + tok = token.PERIOD + ch = s.peek() + if isDecimal(ch) { + tok = token.FLOAT + ch = s.scanMantissa(ch) + ch = s.scanExponent(ch) + } + case '<': + tok = token.HEREDOC + s.scanHeredoc() + case '[': + tok = token.LBRACK + case ']': + tok = token.RBRACK + case '{': + tok = token.LBRACE + case '}': + tok = token.RBRACE + case ',': + tok = token.COMMA + case '=': + tok = token.ASSIGN + case '+': + tok = token.ADD + case '-': + if isDecimal(s.peek()) { + ch := s.next() + tok = s.scanNumber(ch) + } else { + tok = token.SUB + } + default: + s.err("illegal char") + } + } + + // finish token ending + s.tokEnd = s.srcPos.Offset + + // create token literal + var tokenText string + if s.tokStart >= 0 { + tokenText = string(s.src[s.tokStart:s.tokEnd]) + } + s.tokStart = s.tokEnd // ensure idempotency of tokenText() call + + return token.Token{ + Type: tok, + Pos: s.tokPos, + Text: tokenText, + } +} + +func (s *Scanner) scanComment(ch rune) { + // single line comments + if ch == '#' || (ch == '/' && s.peek() != '*') { + if ch == '/' && s.peek() != '/' { + s.err("expected '/' for comment") + return + } + + ch = s.next() + for ch != '\n' && ch >= 0 && ch != eof { + ch = s.next() + } + if ch != eof && ch >= 0 { + s.unread() + } + return + } + + // be sure we get the character after /* This allows us to find comment's + // that are not erminated + if ch == '/' { + s.next() + ch = s.next() // read character after "/*" + } + + // look for /* - style comments + for { + if ch < 0 || ch == eof { + s.err("comment not terminated") + break + } + + ch0 := ch + ch = s.next() + if ch0 == '*' && ch == '/' { + break + } + } +} + +// scanNumber scans a HCL number definition starting with the given rune +func (s *Scanner) scanNumber(ch rune) token.Type { + if ch == '0' { + // check for hexadecimal, octal or float + ch = s.next() + if ch == 'x' || ch == 'X' { + // hexadecimal + ch = s.next() + found := false + for isHexadecimal(ch) { + ch = s.next() + found = true + } + + if !found { + s.err("illegal hexadecimal number") + } + + if ch != eof { + s.unread() + } + + return token.NUMBER + } + + // now it's either something like: 0421(octal) or 0.1231(float) + illegalOctal := false + for isDecimal(ch) { + ch = s.next() + if ch == '8' || ch == '9' { + // this is just a possibility. For example 0159 is illegal, but + // 0159.23 is valid. So we mark a possible illegal octal. If + // the next character is not a period, we'll print the error. + illegalOctal = true + } + } + + if ch == 'e' || ch == 'E' { + ch = s.scanExponent(ch) + return token.FLOAT + } + + if ch == '.' { + ch = s.scanFraction(ch) + + if ch == 'e' || ch == 'E' { + ch = s.next() + ch = s.scanExponent(ch) + } + return token.FLOAT + } + + if illegalOctal { + s.err("illegal octal number") + } + + if ch != eof { + s.unread() + } + return token.NUMBER + } + + s.scanMantissa(ch) + ch = s.next() // seek forward + if ch == 'e' || ch == 'E' { + ch = s.scanExponent(ch) + return token.FLOAT + } + + if ch == '.' { + ch = s.scanFraction(ch) + if ch == 'e' || ch == 'E' { + ch = s.next() + ch = s.scanExponent(ch) + } + return token.FLOAT + } + + if ch != eof { + s.unread() + } + return token.NUMBER +} + +// scanMantissa scans the mantissa begining from the rune. It returns the next +// non decimal rune. It's used to determine wheter it's a fraction or exponent. +func (s *Scanner) scanMantissa(ch rune) rune { + scanned := false + for isDecimal(ch) { + ch = s.next() + scanned = true + } + + if scanned && ch != eof { + s.unread() + } + return ch +} + +// scanFraction scans the fraction after the '.' rune +func (s *Scanner) scanFraction(ch rune) rune { + if ch == '.' { + ch = s.peek() // we peek just to see if we can move forward + ch = s.scanMantissa(ch) + } + return ch +} + +// scanExponent scans the remaining parts of an exponent after the 'e' or 'E' +// rune. +func (s *Scanner) scanExponent(ch rune) rune { + if ch == 'e' || ch == 'E' { + ch = s.next() + if ch == '-' || ch == '+' { + ch = s.next() + } + ch = s.scanMantissa(ch) + } + return ch +} + +// scanHeredoc scans a heredoc string +func (s *Scanner) scanHeredoc() { + // Scan the second '<' in example: '<= len(identBytes) && identRegexp.Match(s.src[lineStart:s.srcPos.Offset-s.lastCharLen]) { + break + } + + // Not an anchor match, record the start of a new line + lineStart = s.srcPos.Offset + } + + if ch == eof { + s.err("heredoc not terminated") + return + } + } + + return +} + +// scanString scans a quoted string +func (s *Scanner) scanString() { + braces := 0 + for { + // '"' opening already consumed + // read character after quote + ch := s.next() + + if (ch == '\n' && braces == 0) || ch < 0 || ch == eof { + s.err("literal not terminated") + return + } + + if ch == '"' && braces == 0 { + break + } + + // If we're going into a ${} then we can ignore quotes for awhile + if braces == 0 && ch == '$' && s.peek() == '{' { + braces++ + s.next() + } else if braces > 0 && ch == '{' { + braces++ + } + if braces > 0 && ch == '}' { + braces-- + } + + if ch == '\\' { + s.scanEscape() + } + } + + return +} + +// scanEscape scans an escape sequence +func (s *Scanner) scanEscape() rune { + // http://en.cppreference.com/w/cpp/language/escape + ch := s.next() // read character after '/' + switch ch { + case 'a', 'b', 'f', 'n', 'r', 't', 'v', '\\', '"': + // nothing to do + case '0', '1', '2', '3', '4', '5', '6', '7': + // octal notation + ch = s.scanDigits(ch, 8, 3) + case 'x': + // hexademical notation + ch = s.scanDigits(s.next(), 16, 2) + case 'u': + // universal character name + ch = s.scanDigits(s.next(), 16, 4) + case 'U': + // universal character name + ch = s.scanDigits(s.next(), 16, 8) + default: + s.err("illegal char escape") + } + return ch +} + +// scanDigits scans a rune with the given base for n times. For example an +// octal notation \184 would yield in scanDigits(ch, 8, 3) +func (s *Scanner) scanDigits(ch rune, base, n int) rune { + start := n + for n > 0 && digitVal(ch) < base { + ch = s.next() + if ch == eof { + // If we see an EOF, we halt any more scanning of digits + // immediately. + break + } + + n-- + } + if n > 0 { + s.err("illegal char escape") + } + + if n != start { + // we scanned all digits, put the last non digit char back, + // only if we read anything at all + s.unread() + } + + return ch +} + +// scanIdentifier scans an identifier and returns the literal string +func (s *Scanner) scanIdentifier() string { + offs := s.srcPos.Offset - s.lastCharLen + ch := s.next() + for isLetter(ch) || isDigit(ch) || ch == '-' || ch == '.' { + ch = s.next() + } + + if ch != eof { + s.unread() // we got identifier, put back latest char + } + + return string(s.src[offs:s.srcPos.Offset]) +} + +// recentPosition returns the position of the character immediately after the +// character or token returned by the last call to Scan. +func (s *Scanner) recentPosition() (pos token.Pos) { + pos.Offset = s.srcPos.Offset - s.lastCharLen + switch { + case s.srcPos.Column > 0: + // common case: last character was not a '\n' + pos.Line = s.srcPos.Line + pos.Column = s.srcPos.Column + case s.lastLineLen > 0: + // last character was a '\n' + // (we cannot be at the beginning of the source + // since we have called next() at least once) + pos.Line = s.srcPos.Line - 1 + pos.Column = s.lastLineLen + default: + // at the beginning of the source + pos.Line = 1 + pos.Column = 1 + } + return +} + +// err prints the error of any scanning to s.Error function. If the function is +// not defined, by default it prints them to os.Stderr +func (s *Scanner) err(msg string) { + s.ErrorCount++ + pos := s.recentPosition() + + if s.Error != nil { + s.Error(pos, msg) + return + } + + fmt.Fprintf(os.Stderr, "%s: %s\n", pos, msg) +} + +// isHexadecimal returns true if the given rune is a letter +func isLetter(ch rune) bool { + return 'a' <= ch && ch <= 'z' || 'A' <= ch && ch <= 'Z' || ch == '_' || ch >= 0x80 && unicode.IsLetter(ch) +} + +// isDigit returns true if the given rune is a decimal digit +func isDigit(ch rune) bool { + return '0' <= ch && ch <= '9' || ch >= 0x80 && unicode.IsDigit(ch) +} + +// isDecimal returns true if the given rune is a decimal number +func isDecimal(ch rune) bool { + return '0' <= ch && ch <= '9' +} + +// isHexadecimal returns true if the given rune is an hexadecimal number +func isHexadecimal(ch rune) bool { + return '0' <= ch && ch <= '9' || 'a' <= ch && ch <= 'f' || 'A' <= ch && ch <= 'F' +} + +// isWhitespace returns true if the rune is a space, tab, newline or carriage return +func isWhitespace(ch rune) bool { + return ch == ' ' || ch == '\t' || ch == '\n' || ch == '\r' +} + +// digitVal returns the integer value of a given octal,decimal or hexadecimal rune +func digitVal(ch rune) int { + switch { + case '0' <= ch && ch <= '9': + return int(ch - '0') + case 'a' <= ch && ch <= 'f': + return int(ch - 'a' + 10) + case 'A' <= ch && ch <= 'F': + return int(ch - 'A' + 10) + } + return 16 // larger than any legal digit val +} diff --git a/vendor/github.com/hashicorp/hcl/hcl/strconv/quote.go b/vendor/github.com/hashicorp/hcl/hcl/strconv/quote.go new file mode 100644 index 0000000000..5f981eaa2f --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/hcl/strconv/quote.go @@ -0,0 +1,241 @@ +package strconv + +import ( + "errors" + "unicode/utf8" +) + +// ErrSyntax indicates that a value does not have the right syntax for the target type. +var ErrSyntax = errors.New("invalid syntax") + +// Unquote interprets s as a single-quoted, double-quoted, +// or backquoted Go string literal, returning the string value +// that s quotes. (If s is single-quoted, it would be a Go +// character literal; Unquote returns the corresponding +// one-character string.) +func Unquote(s string) (t string, err error) { + n := len(s) + if n < 2 { + return "", ErrSyntax + } + quote := s[0] + if quote != s[n-1] { + return "", ErrSyntax + } + s = s[1 : n-1] + + if quote != '"' { + return "", ErrSyntax + } + if !contains(s, '$') && !contains(s, '{') && contains(s, '\n') { + return "", ErrSyntax + } + + // Is it trivial? Avoid allocation. + if !contains(s, '\\') && !contains(s, quote) && !contains(s, '$') { + switch quote { + case '"': + return s, nil + case '\'': + r, size := utf8.DecodeRuneInString(s) + if size == len(s) && (r != utf8.RuneError || size != 1) { + return s, nil + } + } + } + + var runeTmp [utf8.UTFMax]byte + buf := make([]byte, 0, 3*len(s)/2) // Try to avoid more allocations. + for len(s) > 0 { + // If we're starting a '${}' then let it through un-unquoted. + // Specifically: we don't unquote any characters within the `${}` + // section. + if s[0] == '$' && len(s) > 1 && s[1] == '{' { + buf = append(buf, '$', '{') + s = s[2:] + + // Continue reading until we find the closing brace, copying as-is + braces := 1 + for len(s) > 0 && braces > 0 { + r, size := utf8.DecodeRuneInString(s) + if r == utf8.RuneError { + return "", ErrSyntax + } + + s = s[size:] + + n := utf8.EncodeRune(runeTmp[:], r) + buf = append(buf, runeTmp[:n]...) + + switch r { + case '{': + braces++ + case '}': + braces-- + } + } + if braces != 0 { + return "", ErrSyntax + } + if len(s) == 0 { + // If there's no string left, we're done! + break + } else { + // If there's more left, we need to pop back up to the top of the loop + // in case there's another interpolation in this string. + continue + } + } + + if s[0] == '\n' { + return "", ErrSyntax + } + + c, multibyte, ss, err := unquoteChar(s, quote) + if err != nil { + return "", err + } + s = ss + if c < utf8.RuneSelf || !multibyte { + buf = append(buf, byte(c)) + } else { + n := utf8.EncodeRune(runeTmp[:], c) + buf = append(buf, runeTmp[:n]...) + } + if quote == '\'' && len(s) != 0 { + // single-quoted must be single character + return "", ErrSyntax + } + } + return string(buf), nil +} + +// contains reports whether the string contains the byte c. +func contains(s string, c byte) bool { + for i := 0; i < len(s); i++ { + if s[i] == c { + return true + } + } + return false +} + +func unhex(b byte) (v rune, ok bool) { + c := rune(b) + switch { + case '0' <= c && c <= '9': + return c - '0', true + case 'a' <= c && c <= 'f': + return c - 'a' + 10, true + case 'A' <= c && c <= 'F': + return c - 'A' + 10, true + } + return +} + +func unquoteChar(s string, quote byte) (value rune, multibyte bool, tail string, err error) { + // easy cases + switch c := s[0]; { + case c == quote && (quote == '\'' || quote == '"'): + err = ErrSyntax + return + case c >= utf8.RuneSelf: + r, size := utf8.DecodeRuneInString(s) + return r, true, s[size:], nil + case c != '\\': + return rune(s[0]), false, s[1:], nil + } + + // hard case: c is backslash + if len(s) <= 1 { + err = ErrSyntax + return + } + c := s[1] + s = s[2:] + + switch c { + case 'a': + value = '\a' + case 'b': + value = '\b' + case 'f': + value = '\f' + case 'n': + value = '\n' + case 'r': + value = '\r' + case 't': + value = '\t' + case 'v': + value = '\v' + case 'x', 'u', 'U': + n := 0 + switch c { + case 'x': + n = 2 + case 'u': + n = 4 + case 'U': + n = 8 + } + var v rune + if len(s) < n { + err = ErrSyntax + return + } + for j := 0; j < n; j++ { + x, ok := unhex(s[j]) + if !ok { + err = ErrSyntax + return + } + v = v<<4 | x + } + s = s[n:] + if c == 'x' { + // single-byte string, possibly not UTF-8 + value = v + break + } + if v > utf8.MaxRune { + err = ErrSyntax + return + } + value = v + multibyte = true + case '0', '1', '2', '3', '4', '5', '6', '7': + v := rune(c) - '0' + if len(s) < 2 { + err = ErrSyntax + return + } + for j := 0; j < 2; j++ { // one digit already; two more + x := rune(s[j]) - '0' + if x < 0 || x > 7 { + err = ErrSyntax + return + } + v = (v << 3) | x + } + s = s[2:] + if v > 255 { + err = ErrSyntax + return + } + value = v + case '\\': + value = '\\' + case '\'', '"': + if c != quote { + err = ErrSyntax + return + } + value = rune(c) + default: + err = ErrSyntax + return + } + tail = s + return +} diff --git a/vendor/github.com/hashicorp/hcl/hcl/token/position.go b/vendor/github.com/hashicorp/hcl/hcl/token/position.go new file mode 100644 index 0000000000..59c1bb72d4 --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/hcl/token/position.go @@ -0,0 +1,46 @@ +package token + +import "fmt" + +// Pos describes an arbitrary source position +// including the file, line, and column location. +// A Position is valid if the line number is > 0. +type Pos struct { + Filename string // filename, if any + Offset int // offset, starting at 0 + Line int // line number, starting at 1 + Column int // column number, starting at 1 (character count) +} + +// IsValid returns true if the position is valid. +func (p *Pos) IsValid() bool { return p.Line > 0 } + +// String returns a string in one of several forms: +// +// file:line:column valid position with file name +// line:column valid position without file name +// file invalid position with file name +// - invalid position without file name +func (p Pos) String() string { + s := p.Filename + if p.IsValid() { + if s != "" { + s += ":" + } + s += fmt.Sprintf("%d:%d", p.Line, p.Column) + } + if s == "" { + s = "-" + } + return s +} + +// Before reports whether the position p is before u. +func (p Pos) Before(u Pos) bool { + return u.Offset > p.Offset || u.Line > p.Line +} + +// After reports whether the position p is after u. +func (p Pos) After(u Pos) bool { + return u.Offset < p.Offset || u.Line < p.Line +} diff --git a/vendor/github.com/hashicorp/hcl/hcl/token/token.go b/vendor/github.com/hashicorp/hcl/hcl/token/token.go new file mode 100644 index 0000000000..e37c0664ec --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/hcl/token/token.go @@ -0,0 +1,219 @@ +// Package token defines constants representing the lexical tokens for HCL +// (HashiCorp Configuration Language) +package token + +import ( + "fmt" + "strconv" + "strings" + + hclstrconv "github.com/hashicorp/hcl/hcl/strconv" +) + +// Token defines a single HCL token which can be obtained via the Scanner +type Token struct { + Type Type + Pos Pos + Text string + JSON bool +} + +// Type is the set of lexical tokens of the HCL (HashiCorp Configuration Language) +type Type int + +const ( + // Special tokens + ILLEGAL Type = iota + EOF + COMMENT + + identifier_beg + IDENT // literals + literal_beg + NUMBER // 12345 + FLOAT // 123.45 + BOOL // true,false + STRING // "abc" + HEREDOC // < 0 { + // Pop the current item + n := len(frontier) + item := frontier[n-1] + frontier = frontier[:n-1] + + switch v := item.Val.(type) { + case *ast.ObjectType: + items, frontier = flattenObjectType(v, item, items, frontier) + case *ast.ListType: + items, frontier = flattenListType(v, item, items, frontier) + default: + items = append(items, item) + } + } + + // Reverse the list since the frontier model runs things backwards + for i := len(items)/2 - 1; i >= 0; i-- { + opp := len(items) - 1 - i + items[i], items[opp] = items[opp], items[i] + } + + // Done! Set the original items + list.Items = items + return n, true + }) +} + +func flattenListType( + ot *ast.ListType, + item *ast.ObjectItem, + items []*ast.ObjectItem, + frontier []*ast.ObjectItem) ([]*ast.ObjectItem, []*ast.ObjectItem) { + // If the list is empty, keep the original list + if len(ot.List) == 0 { + items = append(items, item) + return items, frontier + } + + // All the elements of this object must also be objects! + for _, subitem := range ot.List { + if _, ok := subitem.(*ast.ObjectType); !ok { + items = append(items, item) + return items, frontier + } + } + + // Great! We have a match go through all the items and flatten + for _, elem := range ot.List { + // Add it to the frontier so that we can recurse + frontier = append(frontier, &ast.ObjectItem{ + Keys: item.Keys, + Assign: item.Assign, + Val: elem, + LeadComment: item.LeadComment, + LineComment: item.LineComment, + }) + } + + return items, frontier +} + +func flattenObjectType( + ot *ast.ObjectType, + item *ast.ObjectItem, + items []*ast.ObjectItem, + frontier []*ast.ObjectItem) ([]*ast.ObjectItem, []*ast.ObjectItem) { + // If the list has no items we do not have to flatten anything + if ot.List.Items == nil { + items = append(items, item) + return items, frontier + } + + // All the elements of this object must also be objects! + for _, subitem := range ot.List.Items { + if _, ok := subitem.Val.(*ast.ObjectType); !ok { + items = append(items, item) + return items, frontier + } + } + + // Great! We have a match go through all the items and flatten + for _, subitem := range ot.List.Items { + // Copy the new key + keys := make([]*ast.ObjectKey, len(item.Keys)+len(subitem.Keys)) + copy(keys, item.Keys) + copy(keys[len(item.Keys):], subitem.Keys) + + // Add it to the frontier so that we can recurse + frontier = append(frontier, &ast.ObjectItem{ + Keys: keys, + Assign: item.Assign, + Val: subitem.Val, + LeadComment: item.LeadComment, + LineComment: item.LineComment, + }) + } + + return items, frontier +} diff --git a/vendor/github.com/hashicorp/hcl/json/parser/parser.go b/vendor/github.com/hashicorp/hcl/json/parser/parser.go new file mode 100644 index 0000000000..125a5f0729 --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/json/parser/parser.go @@ -0,0 +1,313 @@ +package parser + +import ( + "errors" + "fmt" + + "github.com/hashicorp/hcl/hcl/ast" + hcltoken "github.com/hashicorp/hcl/hcl/token" + "github.com/hashicorp/hcl/json/scanner" + "github.com/hashicorp/hcl/json/token" +) + +type Parser struct { + sc *scanner.Scanner + + // Last read token + tok token.Token + commaPrev token.Token + + enableTrace bool + indent int + n int // buffer size (max = 1) +} + +func newParser(src []byte) *Parser { + return &Parser{ + sc: scanner.New(src), + } +} + +// Parse returns the fully parsed source and returns the abstract syntax tree. +func Parse(src []byte) (*ast.File, error) { + p := newParser(src) + return p.Parse() +} + +var errEofToken = errors.New("EOF token found") + +// Parse returns the fully parsed source and returns the abstract syntax tree. +func (p *Parser) Parse() (*ast.File, error) { + f := &ast.File{} + var err, scerr error + p.sc.Error = func(pos token.Pos, msg string) { + scerr = fmt.Errorf("%s: %s", pos, msg) + } + + // The root must be an object in JSON + object, err := p.object() + if scerr != nil { + return nil, scerr + } + if err != nil { + return nil, err + } + + // We make our final node an object list so it is more HCL compatible + f.Node = object.List + + // Flatten it, which finds patterns and turns them into more HCL-like + // AST trees. + flattenObjects(f.Node) + + return f, nil +} + +func (p *Parser) objectList() (*ast.ObjectList, error) { + defer un(trace(p, "ParseObjectList")) + node := &ast.ObjectList{} + + for { + n, err := p.objectItem() + if err == errEofToken { + break // we are finished + } + + // we don't return a nil node, because might want to use already + // collected items. + if err != nil { + return node, err + } + + node.Add(n) + + // Check for a followup comma. If it isn't a comma, then we're done + if tok := p.scan(); tok.Type != token.COMMA { + break + } + } + + return node, nil +} + +// objectItem parses a single object item +func (p *Parser) objectItem() (*ast.ObjectItem, error) { + defer un(trace(p, "ParseObjectItem")) + + keys, err := p.objectKey() + if err != nil { + return nil, err + } + + o := &ast.ObjectItem{ + Keys: keys, + } + + switch p.tok.Type { + case token.COLON: + pos := p.tok.Pos + o.Assign = hcltoken.Pos{ + Filename: pos.Filename, + Offset: pos.Offset, + Line: pos.Line, + Column: pos.Column, + } + + o.Val, err = p.objectValue() + if err != nil { + return nil, err + } + } + + return o, nil +} + +// objectKey parses an object key and returns a ObjectKey AST +func (p *Parser) objectKey() ([]*ast.ObjectKey, error) { + keyCount := 0 + keys := make([]*ast.ObjectKey, 0) + + for { + tok := p.scan() + switch tok.Type { + case token.EOF: + return nil, errEofToken + case token.STRING: + keyCount++ + keys = append(keys, &ast.ObjectKey{ + Token: p.tok.HCLToken(), + }) + case token.COLON: + // If we have a zero keycount it means that we never got + // an object key, i.e. `{ :`. This is a syntax error. + if keyCount == 0 { + return nil, fmt.Errorf("expected: STRING got: %s", p.tok.Type) + } + + // Done + return keys, nil + case token.ILLEGAL: + return nil, errors.New("illegal") + default: + return nil, fmt.Errorf("expected: STRING got: %s", p.tok.Type) + } + } +} + +// object parses any type of object, such as number, bool, string, object or +// list. +func (p *Parser) objectValue() (ast.Node, error) { + defer un(trace(p, "ParseObjectValue")) + tok := p.scan() + + switch tok.Type { + case token.NUMBER, token.FLOAT, token.BOOL, token.NULL, token.STRING: + return p.literalType() + case token.LBRACE: + return p.objectType() + case token.LBRACK: + return p.listType() + case token.EOF: + return nil, errEofToken + } + + return nil, fmt.Errorf("Expected object value, got unknown token: %+v", tok) +} + +// object parses any type of object, such as number, bool, string, object or +// list. +func (p *Parser) object() (*ast.ObjectType, error) { + defer un(trace(p, "ParseType")) + tok := p.scan() + + switch tok.Type { + case token.LBRACE: + return p.objectType() + case token.EOF: + return nil, errEofToken + } + + return nil, fmt.Errorf("Expected object, got unknown token: %+v", tok) +} + +// objectType parses an object type and returns a ObjectType AST +func (p *Parser) objectType() (*ast.ObjectType, error) { + defer un(trace(p, "ParseObjectType")) + + // we assume that the currently scanned token is a LBRACE + o := &ast.ObjectType{} + + l, err := p.objectList() + + // if we hit RBRACE, we are good to go (means we parsed all Items), if it's + // not a RBRACE, it's an syntax error and we just return it. + if err != nil && p.tok.Type != token.RBRACE { + return nil, err + } + + o.List = l + return o, nil +} + +// listType parses a list type and returns a ListType AST +func (p *Parser) listType() (*ast.ListType, error) { + defer un(trace(p, "ParseListType")) + + // we assume that the currently scanned token is a LBRACK + l := &ast.ListType{} + + for { + tok := p.scan() + switch tok.Type { + case token.NUMBER, token.FLOAT, token.STRING: + node, err := p.literalType() + if err != nil { + return nil, err + } + + l.Add(node) + case token.COMMA: + continue + case token.LBRACE: + node, err := p.objectType() + if err != nil { + return nil, err + } + + l.Add(node) + case token.BOOL: + // TODO(arslan) should we support? not supported by HCL yet + case token.LBRACK: + // TODO(arslan) should we support nested lists? Even though it's + // written in README of HCL, it's not a part of the grammar + // (not defined in parse.y) + case token.RBRACK: + // finished + return l, nil + default: + return nil, fmt.Errorf("unexpected token while parsing list: %s", tok.Type) + } + + } +} + +// literalType parses a literal type and returns a LiteralType AST +func (p *Parser) literalType() (*ast.LiteralType, error) { + defer un(trace(p, "ParseLiteral")) + + return &ast.LiteralType{ + Token: p.tok.HCLToken(), + }, nil +} + +// scan returns the next token from the underlying scanner. If a token has +// been unscanned then read that instead. +func (p *Parser) scan() token.Token { + // If we have a token on the buffer, then return it. + if p.n != 0 { + p.n = 0 + return p.tok + } + + p.tok = p.sc.Scan() + return p.tok +} + +// unscan pushes the previously read token back onto the buffer. +func (p *Parser) unscan() { + p.n = 1 +} + +// ---------------------------------------------------------------------------- +// Parsing support + +func (p *Parser) printTrace(a ...interface{}) { + if !p.enableTrace { + return + } + + const dots = ". . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . " + const n = len(dots) + fmt.Printf("%5d:%3d: ", p.tok.Pos.Line, p.tok.Pos.Column) + + i := 2 * p.indent + for i > n { + fmt.Print(dots) + i -= n + } + // i <= n + fmt.Print(dots[0:i]) + fmt.Println(a...) +} + +func trace(p *Parser, msg string) *Parser { + p.printTrace(msg, "(") + p.indent++ + return p +} + +// Usage pattern: defer un(trace(p, "...")) +func un(p *Parser) { + p.indent-- + p.printTrace(")") +} diff --git a/vendor/github.com/hashicorp/hcl/json/scanner/scanner.go b/vendor/github.com/hashicorp/hcl/json/scanner/scanner.go new file mode 100644 index 0000000000..dd5c72bb3d --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/json/scanner/scanner.go @@ -0,0 +1,451 @@ +package scanner + +import ( + "bytes" + "fmt" + "os" + "unicode" + "unicode/utf8" + + "github.com/hashicorp/hcl/json/token" +) + +// eof represents a marker rune for the end of the reader. +const eof = rune(0) + +// Scanner defines a lexical scanner +type Scanner struct { + buf *bytes.Buffer // Source buffer for advancing and scanning + src []byte // Source buffer for immutable access + + // Source Position + srcPos token.Pos // current position + prevPos token.Pos // previous position, used for peek() method + + lastCharLen int // length of last character in bytes + lastLineLen int // length of last line in characters (for correct column reporting) + + tokStart int // token text start position + tokEnd int // token text end position + + // Error is called for each error encountered. If no Error + // function is set, the error is reported to os.Stderr. + Error func(pos token.Pos, msg string) + + // ErrorCount is incremented by one for each error encountered. + ErrorCount int + + // tokPos is the start position of most recently scanned token; set by + // Scan. The Filename field is always left untouched by the Scanner. If + // an error is reported (via Error) and Position is invalid, the scanner is + // not inside a token. + tokPos token.Pos +} + +// New creates and initializes a new instance of Scanner using src as +// its source content. +func New(src []byte) *Scanner { + // even though we accept a src, we read from a io.Reader compatible type + // (*bytes.Buffer). So in the future we might easily change it to streaming + // read. + b := bytes.NewBuffer(src) + s := &Scanner{ + buf: b, + src: src, + } + + // srcPosition always starts with 1 + s.srcPos.Line = 1 + return s +} + +// next reads the next rune from the bufferred reader. Returns the rune(0) if +// an error occurs (or io.EOF is returned). +func (s *Scanner) next() rune { + ch, size, err := s.buf.ReadRune() + if err != nil { + // advance for error reporting + s.srcPos.Column++ + s.srcPos.Offset += size + s.lastCharLen = size + return eof + } + + if ch == utf8.RuneError && size == 1 { + s.srcPos.Column++ + s.srcPos.Offset += size + s.lastCharLen = size + s.err("illegal UTF-8 encoding") + return ch + } + + // remember last position + s.prevPos = s.srcPos + + s.srcPos.Column++ + s.lastCharLen = size + s.srcPos.Offset += size + + if ch == '\n' { + s.srcPos.Line++ + s.lastLineLen = s.srcPos.Column + s.srcPos.Column = 0 + } + + // debug + // fmt.Printf("ch: %q, offset:column: %d:%d\n", ch, s.srcPos.Offset, s.srcPos.Column) + return ch +} + +// unread unreads the previous read Rune and updates the source position +func (s *Scanner) unread() { + if err := s.buf.UnreadRune(); err != nil { + panic(err) // this is user fault, we should catch it + } + s.srcPos = s.prevPos // put back last position +} + +// peek returns the next rune without advancing the reader. +func (s *Scanner) peek() rune { + peek, _, err := s.buf.ReadRune() + if err != nil { + return eof + } + + s.buf.UnreadRune() + return peek +} + +// Scan scans the next token and returns the token. +func (s *Scanner) Scan() token.Token { + ch := s.next() + + // skip white space + for isWhitespace(ch) { + ch = s.next() + } + + var tok token.Type + + // token text markings + s.tokStart = s.srcPos.Offset - s.lastCharLen + + // token position, initial next() is moving the offset by one(size of rune + // actually), though we are interested with the starting point + s.tokPos.Offset = s.srcPos.Offset - s.lastCharLen + if s.srcPos.Column > 0 { + // common case: last character was not a '\n' + s.tokPos.Line = s.srcPos.Line + s.tokPos.Column = s.srcPos.Column + } else { + // last character was a '\n' + // (we cannot be at the beginning of the source + // since we have called next() at least once) + s.tokPos.Line = s.srcPos.Line - 1 + s.tokPos.Column = s.lastLineLen + } + + switch { + case isLetter(ch): + lit := s.scanIdentifier() + if lit == "true" || lit == "false" { + tok = token.BOOL + } else if lit == "null" { + tok = token.NULL + } else { + s.err("illegal char") + } + case isDecimal(ch): + tok = s.scanNumber(ch) + default: + switch ch { + case eof: + tok = token.EOF + case '"': + tok = token.STRING + s.scanString() + case '.': + tok = token.PERIOD + ch = s.peek() + if isDecimal(ch) { + tok = token.FLOAT + ch = s.scanMantissa(ch) + ch = s.scanExponent(ch) + } + case '[': + tok = token.LBRACK + case ']': + tok = token.RBRACK + case '{': + tok = token.LBRACE + case '}': + tok = token.RBRACE + case ',': + tok = token.COMMA + case ':': + tok = token.COLON + case '-': + if isDecimal(s.peek()) { + ch := s.next() + tok = s.scanNumber(ch) + } else { + s.err("illegal char") + } + default: + s.err("illegal char: " + string(ch)) + } + } + + // finish token ending + s.tokEnd = s.srcPos.Offset + + // create token literal + var tokenText string + if s.tokStart >= 0 { + tokenText = string(s.src[s.tokStart:s.tokEnd]) + } + s.tokStart = s.tokEnd // ensure idempotency of tokenText() call + + return token.Token{ + Type: tok, + Pos: s.tokPos, + Text: tokenText, + } +} + +// scanNumber scans a HCL number definition starting with the given rune +func (s *Scanner) scanNumber(ch rune) token.Type { + zero := ch == '0' + pos := s.srcPos + + s.scanMantissa(ch) + ch = s.next() // seek forward + if ch == 'e' || ch == 'E' { + ch = s.scanExponent(ch) + return token.FLOAT + } + + if ch == '.' { + ch = s.scanFraction(ch) + if ch == 'e' || ch == 'E' { + ch = s.next() + ch = s.scanExponent(ch) + } + return token.FLOAT + } + + if ch != eof { + s.unread() + } + + // If we have a larger number and this is zero, error + if zero && pos != s.srcPos { + s.err("numbers cannot start with 0") + } + + return token.NUMBER +} + +// scanMantissa scans the mantissa begining from the rune. It returns the next +// non decimal rune. It's used to determine wheter it's a fraction or exponent. +func (s *Scanner) scanMantissa(ch rune) rune { + scanned := false + for isDecimal(ch) { + ch = s.next() + scanned = true + } + + if scanned && ch != eof { + s.unread() + } + return ch +} + +// scanFraction scans the fraction after the '.' rune +func (s *Scanner) scanFraction(ch rune) rune { + if ch == '.' { + ch = s.peek() // we peek just to see if we can move forward + ch = s.scanMantissa(ch) + } + return ch +} + +// scanExponent scans the remaining parts of an exponent after the 'e' or 'E' +// rune. +func (s *Scanner) scanExponent(ch rune) rune { + if ch == 'e' || ch == 'E' { + ch = s.next() + if ch == '-' || ch == '+' { + ch = s.next() + } + ch = s.scanMantissa(ch) + } + return ch +} + +// scanString scans a quoted string +func (s *Scanner) scanString() { + braces := 0 + for { + // '"' opening already consumed + // read character after quote + ch := s.next() + + if ch == '\n' || ch < 0 || ch == eof { + s.err("literal not terminated") + return + } + + if ch == '"' { + break + } + + // If we're going into a ${} then we can ignore quotes for awhile + if braces == 0 && ch == '$' && s.peek() == '{' { + braces++ + s.next() + } else if braces > 0 && ch == '{' { + braces++ + } + if braces > 0 && ch == '}' { + braces-- + } + + if ch == '\\' { + s.scanEscape() + } + } + + return +} + +// scanEscape scans an escape sequence +func (s *Scanner) scanEscape() rune { + // http://en.cppreference.com/w/cpp/language/escape + ch := s.next() // read character after '/' + switch ch { + case 'a', 'b', 'f', 'n', 'r', 't', 'v', '\\', '"': + // nothing to do + case '0', '1', '2', '3', '4', '5', '6', '7': + // octal notation + ch = s.scanDigits(ch, 8, 3) + case 'x': + // hexademical notation + ch = s.scanDigits(s.next(), 16, 2) + case 'u': + // universal character name + ch = s.scanDigits(s.next(), 16, 4) + case 'U': + // universal character name + ch = s.scanDigits(s.next(), 16, 8) + default: + s.err("illegal char escape") + } + return ch +} + +// scanDigits scans a rune with the given base for n times. For example an +// octal notation \184 would yield in scanDigits(ch, 8, 3) +func (s *Scanner) scanDigits(ch rune, base, n int) rune { + for n > 0 && digitVal(ch) < base { + ch = s.next() + n-- + } + if n > 0 { + s.err("illegal char escape") + } + + // we scanned all digits, put the last non digit char back + s.unread() + return ch +} + +// scanIdentifier scans an identifier and returns the literal string +func (s *Scanner) scanIdentifier() string { + offs := s.srcPos.Offset - s.lastCharLen + ch := s.next() + for isLetter(ch) || isDigit(ch) || ch == '-' { + ch = s.next() + } + + if ch != eof { + s.unread() // we got identifier, put back latest char + } + + return string(s.src[offs:s.srcPos.Offset]) +} + +// recentPosition returns the position of the character immediately after the +// character or token returned by the last call to Scan. +func (s *Scanner) recentPosition() (pos token.Pos) { + pos.Offset = s.srcPos.Offset - s.lastCharLen + switch { + case s.srcPos.Column > 0: + // common case: last character was not a '\n' + pos.Line = s.srcPos.Line + pos.Column = s.srcPos.Column + case s.lastLineLen > 0: + // last character was a '\n' + // (we cannot be at the beginning of the source + // since we have called next() at least once) + pos.Line = s.srcPos.Line - 1 + pos.Column = s.lastLineLen + default: + // at the beginning of the source + pos.Line = 1 + pos.Column = 1 + } + return +} + +// err prints the error of any scanning to s.Error function. If the function is +// not defined, by default it prints them to os.Stderr +func (s *Scanner) err(msg string) { + s.ErrorCount++ + pos := s.recentPosition() + + if s.Error != nil { + s.Error(pos, msg) + return + } + + fmt.Fprintf(os.Stderr, "%s: %s\n", pos, msg) +} + +// isHexadecimal returns true if the given rune is a letter +func isLetter(ch rune) bool { + return 'a' <= ch && ch <= 'z' || 'A' <= ch && ch <= 'Z' || ch == '_' || ch >= 0x80 && unicode.IsLetter(ch) +} + +// isHexadecimal returns true if the given rune is a decimal digit +func isDigit(ch rune) bool { + return '0' <= ch && ch <= '9' || ch >= 0x80 && unicode.IsDigit(ch) +} + +// isHexadecimal returns true if the given rune is a decimal number +func isDecimal(ch rune) bool { + return '0' <= ch && ch <= '9' +} + +// isHexadecimal returns true if the given rune is an hexadecimal number +func isHexadecimal(ch rune) bool { + return '0' <= ch && ch <= '9' || 'a' <= ch && ch <= 'f' || 'A' <= ch && ch <= 'F' +} + +// isWhitespace returns true if the rune is a space, tab, newline or carriage return +func isWhitespace(ch rune) bool { + return ch == ' ' || ch == '\t' || ch == '\n' || ch == '\r' +} + +// digitVal returns the integer value of a given octal,decimal or hexadecimal rune +func digitVal(ch rune) int { + switch { + case '0' <= ch && ch <= '9': + return int(ch - '0') + case 'a' <= ch && ch <= 'f': + return int(ch - 'a' + 10) + case 'A' <= ch && ch <= 'F': + return int(ch - 'A' + 10) + } + return 16 // larger than any legal digit val +} diff --git a/vendor/github.com/hashicorp/hcl/json/token/position.go b/vendor/github.com/hashicorp/hcl/json/token/position.go new file mode 100644 index 0000000000..59c1bb72d4 --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/json/token/position.go @@ -0,0 +1,46 @@ +package token + +import "fmt" + +// Pos describes an arbitrary source position +// including the file, line, and column location. +// A Position is valid if the line number is > 0. +type Pos struct { + Filename string // filename, if any + Offset int // offset, starting at 0 + Line int // line number, starting at 1 + Column int // column number, starting at 1 (character count) +} + +// IsValid returns true if the position is valid. +func (p *Pos) IsValid() bool { return p.Line > 0 } + +// String returns a string in one of several forms: +// +// file:line:column valid position with file name +// line:column valid position without file name +// file invalid position with file name +// - invalid position without file name +func (p Pos) String() string { + s := p.Filename + if p.IsValid() { + if s != "" { + s += ":" + } + s += fmt.Sprintf("%d:%d", p.Line, p.Column) + } + if s == "" { + s = "-" + } + return s +} + +// Before reports whether the position p is before u. +func (p Pos) Before(u Pos) bool { + return u.Offset > p.Offset || u.Line > p.Line +} + +// After reports whether the position p is after u. +func (p Pos) After(u Pos) bool { + return u.Offset < p.Offset || u.Line < p.Line +} diff --git a/vendor/github.com/hashicorp/hcl/json/token/token.go b/vendor/github.com/hashicorp/hcl/json/token/token.go new file mode 100644 index 0000000000..95a0c3eee6 --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/json/token/token.go @@ -0,0 +1,118 @@ +package token + +import ( + "fmt" + "strconv" + + hcltoken "github.com/hashicorp/hcl/hcl/token" +) + +// Token defines a single HCL token which can be obtained via the Scanner +type Token struct { + Type Type + Pos Pos + Text string +} + +// Type is the set of lexical tokens of the HCL (HashiCorp Configuration Language) +type Type int + +const ( + // Special tokens + ILLEGAL Type = iota + EOF + + identifier_beg + literal_beg + NUMBER // 12345 + FLOAT // 123.45 + BOOL // true,false + STRING // "abc" + NULL // null + literal_end + identifier_end + + operator_beg + LBRACK // [ + LBRACE // { + COMMA // , + PERIOD // . + COLON // : + + RBRACK // ] + RBRACE // } + + operator_end +) + +var tokens = [...]string{ + ILLEGAL: "ILLEGAL", + + EOF: "EOF", + + NUMBER: "NUMBER", + FLOAT: "FLOAT", + BOOL: "BOOL", + STRING: "STRING", + NULL: "NULL", + + LBRACK: "LBRACK", + LBRACE: "LBRACE", + COMMA: "COMMA", + PERIOD: "PERIOD", + COLON: "COLON", + + RBRACK: "RBRACK", + RBRACE: "RBRACE", +} + +// String returns the string corresponding to the token tok. +func (t Type) String() string { + s := "" + if 0 <= t && t < Type(len(tokens)) { + s = tokens[t] + } + if s == "" { + s = "token(" + strconv.Itoa(int(t)) + ")" + } + return s +} + +// IsIdentifier returns true for tokens corresponding to identifiers and basic +// type literals; it returns false otherwise. +func (t Type) IsIdentifier() bool { return identifier_beg < t && t < identifier_end } + +// IsLiteral returns true for tokens corresponding to basic type literals; it +// returns false otherwise. +func (t Type) IsLiteral() bool { return literal_beg < t && t < literal_end } + +// IsOperator returns true for tokens corresponding to operators and +// delimiters; it returns false otherwise. +func (t Type) IsOperator() bool { return operator_beg < t && t < operator_end } + +// String returns the token's literal text. Note that this is only +// applicable for certain token types, such as token.IDENT, +// token.STRING, etc.. +func (t Token) String() string { + return fmt.Sprintf("%s %s %s", t.Pos.String(), t.Type.String(), t.Text) +} + +// HCLToken converts this token to an HCL token. +// +// The token type must be a literal type or this will panic. +func (t Token) HCLToken() hcltoken.Token { + switch t.Type { + case BOOL: + return hcltoken.Token{Type: hcltoken.BOOL, Text: t.Text} + case FLOAT: + return hcltoken.Token{Type: hcltoken.FLOAT, Text: t.Text} + case NULL: + return hcltoken.Token{Type: hcltoken.STRING, Text: ""} + case NUMBER: + return hcltoken.Token{Type: hcltoken.NUMBER, Text: t.Text} + case STRING: + return hcltoken.Token{Type: hcltoken.STRING, Text: t.Text, JSON: true} + default: + panic(fmt.Sprintf("unimplemented HCLToken for type: %s", t.Type)) + } +} diff --git a/vendor/github.com/hashicorp/hcl/lex.go b/vendor/github.com/hashicorp/hcl/lex.go new file mode 100644 index 0000000000..d9993c2928 --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/lex.go @@ -0,0 +1,38 @@ +package hcl + +import ( + "unicode" + "unicode/utf8" +) + +type lexModeValue byte + +const ( + lexModeUnknown lexModeValue = iota + lexModeHcl + lexModeJson +) + +// lexMode returns whether we're going to be parsing in JSON +// mode or HCL mode. +func lexMode(v []byte) lexModeValue { + var ( + r rune + w int + offset int + ) + + for { + r, w = utf8.DecodeRune(v[offset:]) + offset += w + if unicode.IsSpace(r) { + continue + } + if r == '{' { + return lexModeJson + } + break + } + + return lexModeHcl +} diff --git a/vendor/github.com/hashicorp/hcl/parse.go b/vendor/github.com/hashicorp/hcl/parse.go new file mode 100644 index 0000000000..1fca53c4ce --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/parse.go @@ -0,0 +1,39 @@ +package hcl + +import ( + "fmt" + + "github.com/hashicorp/hcl/hcl/ast" + hclParser "github.com/hashicorp/hcl/hcl/parser" + jsonParser "github.com/hashicorp/hcl/json/parser" +) + +// ParseBytes accepts as input byte slice and returns ast tree. +// +// Input can be either JSON or HCL +func ParseBytes(in []byte) (*ast.File, error) { + return parse(in) +} + +// ParseString accepts input as a string and returns ast tree. +func ParseString(input string) (*ast.File, error) { + return parse([]byte(input)) +} + +func parse(in []byte) (*ast.File, error) { + switch lexMode(in) { + case lexModeHcl: + return hclParser.Parse(in) + case lexModeJson: + return jsonParser.Parse(in) + } + + return nil, fmt.Errorf("unknown config format") +} + +// Parse parses the given input and returns the root object. +// +// The input format can be either HCL or JSON. +func Parse(input string) (*ast.File, error) { + return parse([]byte(input)) +} diff --git a/vendor/github.com/hashicorp/hil/LICENSE b/vendor/github.com/hashicorp/hil/LICENSE new file mode 100644 index 0000000000..82b4de97c7 --- /dev/null +++ b/vendor/github.com/hashicorp/hil/LICENSE @@ -0,0 +1,353 @@ +Mozilla Public License, version 2.0 + +1. Definitions + +1.1. “Contributor” + + means each individual or legal entity that creates, contributes to the + creation of, or owns Covered Software. + +1.2. “Contributor Version” + + means the combination of the Contributions of others (if any) used by a + Contributor and that particular Contributor’s Contribution. + +1.3. “Contribution” + + means Covered Software of a particular Contributor. + +1.4. “Covered Software” + + means Source Code Form to which the initial Contributor has attached the + notice in Exhibit A, the Executable Form of such Source Code Form, and + Modifications of such Source Code Form, in each case including portions + thereof. + +1.5. “Incompatible With Secondary Licenses” + means + + a. that the initial Contributor has attached the notice described in + Exhibit B to the Covered Software; or + + b. that the Covered Software was made available under the terms of version + 1.1 or earlier of the License, but not also under the terms of a + Secondary License. + +1.6. “Executable Form” + + means any form of the work other than Source Code Form. + +1.7. “Larger Work” + + means a work that combines Covered Software with other material, in a separate + file or files, that is not Covered Software. + +1.8. “License” + + means this document. + +1.9. “Licensable” + + means having the right to grant, to the maximum extent possible, whether at the + time of the initial grant or subsequently, any and all of the rights conveyed by + this License. + +1.10. “Modifications” + + means any of the following: + + a. any file in Source Code Form that results from an addition to, deletion + from, or modification of the contents of Covered Software; or + + b. any new file in Source Code Form that contains any Covered Software. + +1.11. “Patent Claims” of a Contributor + + means any patent claim(s), including without limitation, method, process, + and apparatus claims, in any patent Licensable by such Contributor that + would be infringed, but for the grant of the License, by the making, + using, selling, offering for sale, having made, import, or transfer of + either its Contributions or its Contributor Version. + +1.12. “Secondary License” + + means either the GNU General Public License, Version 2.0, the GNU Lesser + General Public License, Version 2.1, the GNU Affero General Public + License, Version 3.0, or any later versions of those licenses. + +1.13. “Source Code Form” + + means the form of the work preferred for making modifications. + +1.14. “You” (or “Your”) + + means an individual or a legal entity exercising rights under this + License. For legal entities, “You” includes any entity that controls, is + controlled by, or is under common control with You. For purposes of this + definition, “control” means (a) the power, direct or indirect, to cause + the direction or management of such entity, whether by contract or + otherwise, or (b) ownership of more than fifty percent (50%) of the + outstanding shares or beneficial ownership of such entity. + + +2. License Grants and Conditions + +2.1. Grants + + Each Contributor hereby grants You a world-wide, royalty-free, + non-exclusive license: + + a. under intellectual property rights (other than patent or trademark) + Licensable by such Contributor to use, reproduce, make available, + modify, display, perform, distribute, and otherwise exploit its + Contributions, either on an unmodified basis, with Modifications, or as + part of a Larger Work; and + + b. under Patent Claims of such Contributor to make, use, sell, offer for + sale, have made, import, and otherwise transfer either its Contributions + or its Contributor Version. + +2.2. Effective Date + + The licenses granted in Section 2.1 with respect to any Contribution become + effective for each Contribution on the date the Contributor first distributes + such Contribution. + +2.3. Limitations on Grant Scope + + The licenses granted in this Section 2 are the only rights granted under this + License. No additional rights or licenses will be implied from the distribution + or licensing of Covered Software under this License. Notwithstanding Section + 2.1(b) above, no patent license is granted by a Contributor: + + a. for any code that a Contributor has removed from Covered Software; or + + b. for infringements caused by: (i) Your and any other third party’s + modifications of Covered Software, or (ii) the combination of its + Contributions with other software (except as part of its Contributor + Version); or + + c. under Patent Claims infringed by Covered Software in the absence of its + Contributions. + + This License does not grant any rights in the trademarks, service marks, or + logos of any Contributor (except as may be necessary to comply with the + notice requirements in Section 3.4). + +2.4. Subsequent Licenses + + No Contributor makes additional grants as a result of Your choice to + distribute the Covered Software under a subsequent version of this License + (see Section 10.2) or under the terms of a Secondary License (if permitted + under the terms of Section 3.3). + +2.5. Representation + + Each Contributor represents that the Contributor believes its Contributions + are its original creation(s) or it has sufficient rights to grant the + rights to its Contributions conveyed by this License. + +2.6. Fair Use + + This License is not intended to limit any rights You have under applicable + copyright doctrines of fair use, fair dealing, or other equivalents. + +2.7. Conditions + + Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in + Section 2.1. + + +3. Responsibilities + +3.1. Distribution of Source Form + + All distribution of Covered Software in Source Code Form, including any + Modifications that You create or to which You contribute, must be under the + terms of this License. You must inform recipients that the Source Code Form + of the Covered Software is governed by the terms of this License, and how + they can obtain a copy of this License. You may not attempt to alter or + restrict the recipients’ rights in the Source Code Form. + +3.2. Distribution of Executable Form + + If You distribute Covered Software in Executable Form then: + + a. such Covered Software must also be made available in Source Code Form, + as described in Section 3.1, and You must inform recipients of the + Executable Form how they can obtain a copy of such Source Code Form by + reasonable means in a timely manner, at a charge no more than the cost + of distribution to the recipient; and + + b. You may distribute such Executable Form under the terms of this License, + or sublicense it under different terms, provided that the license for + the Executable Form does not attempt to limit or alter the recipients’ + rights in the Source Code Form under this License. + +3.3. Distribution of a Larger Work + + You may create and distribute a Larger Work under terms of Your choice, + provided that You also comply with the requirements of this License for the + Covered Software. If the Larger Work is a combination of Covered Software + with a work governed by one or more Secondary Licenses, and the Covered + Software is not Incompatible With Secondary Licenses, this License permits + You to additionally distribute such Covered Software under the terms of + such Secondary License(s), so that the recipient of the Larger Work may, at + their option, further distribute the Covered Software under the terms of + either this License or such Secondary License(s). + +3.4. Notices + + You may not remove or alter the substance of any license notices (including + copyright notices, patent notices, disclaimers of warranty, or limitations + of liability) contained within the Source Code Form of the Covered + Software, except that You may alter any license notices to the extent + required to remedy known factual inaccuracies. + +3.5. Application of Additional Terms + + You may choose to offer, and to charge a fee for, warranty, support, + indemnity or liability obligations to one or more recipients of Covered + Software. However, You may do so only on Your own behalf, and not on behalf + of any Contributor. You must make it absolutely clear that any such + warranty, support, indemnity, or liability obligation is offered by You + alone, and You hereby agree to indemnify every Contributor for any + liability incurred by such Contributor as a result of warranty, support, + indemnity or liability terms You offer. You may include additional + disclaimers of warranty and limitations of liability specific to any + jurisdiction. + +4. Inability to Comply Due to Statute or Regulation + + If it is impossible for You to comply with any of the terms of this License + with respect to some or all of the Covered Software due to statute, judicial + order, or regulation then You must: (a) comply with the terms of this License + to the maximum extent possible; and (b) describe the limitations and the code + they affect. Such description must be placed in a text file included with all + distributions of the Covered Software under this License. Except to the + extent prohibited by statute or regulation, such description must be + sufficiently detailed for a recipient of ordinary skill to be able to + understand it. + +5. Termination + +5.1. The rights granted under this License will terminate automatically if You + fail to comply with any of its terms. However, if You become compliant, + then the rights granted under this License from a particular Contributor + are reinstated (a) provisionally, unless and until such Contributor + explicitly and finally terminates Your grants, and (b) on an ongoing basis, + if such Contributor fails to notify You of the non-compliance by some + reasonable means prior to 60 days after You have come back into compliance. + Moreover, Your grants from a particular Contributor are reinstated on an + ongoing basis if such Contributor notifies You of the non-compliance by + some reasonable means, this is the first time You have received notice of + non-compliance with this License from such Contributor, and You become + compliant prior to 30 days after Your receipt of the notice. + +5.2. If You initiate litigation against any entity by asserting a patent + infringement claim (excluding declaratory judgment actions, counter-claims, + and cross-claims) alleging that a Contributor Version directly or + indirectly infringes any patent, then the rights granted to You by any and + all Contributors for the Covered Software under Section 2.1 of this License + shall terminate. + +5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user + license agreements (excluding distributors and resellers) which have been + validly granted by You or Your distributors under this License prior to + termination shall survive termination. + +6. Disclaimer of Warranty + + Covered Software is provided under this License on an “as is” basis, without + warranty of any kind, either expressed, implied, or statutory, including, + without limitation, warranties that the Covered Software is free of defects, + merchantable, fit for a particular purpose or non-infringing. The entire + risk as to the quality and performance of the Covered Software is with You. + Should any Covered Software prove defective in any respect, You (not any + Contributor) assume the cost of any necessary servicing, repair, or + correction. This disclaimer of warranty constitutes an essential part of this + License. No use of any Covered Software is authorized under this License + except under this disclaimer. + +7. Limitation of Liability + + Under no circumstances and under no legal theory, whether tort (including + negligence), contract, or otherwise, shall any Contributor, or anyone who + distributes Covered Software as permitted above, be liable to You for any + direct, indirect, special, incidental, or consequential damages of any + character including, without limitation, damages for lost profits, loss of + goodwill, work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses, even if such party shall have been + informed of the possibility of such damages. This limitation of liability + shall not apply to liability for death or personal injury resulting from such + party’s negligence to the extent applicable law prohibits such limitation. + Some jurisdictions do not allow the exclusion or limitation of incidental or + consequential damages, so this exclusion and limitation may not apply to You. + +8. Litigation + + Any litigation relating to this License may be brought only in the courts of + a jurisdiction where the defendant maintains its principal place of business + and such litigation shall be governed by laws of that jurisdiction, without + reference to its conflict-of-law provisions. Nothing in this Section shall + prevent a party’s ability to bring cross-claims or counter-claims. + +9. Miscellaneous + + This License represents the complete agreement concerning the subject matter + hereof. If any provision of this License is held to be unenforceable, such + provision shall be reformed only to the extent necessary to make it + enforceable. Any law or regulation which provides that the language of a + contract shall be construed against the drafter shall not be used to construe + this License against a Contributor. + + +10. Versions of the License + +10.1. New Versions + + Mozilla Foundation is the license steward. Except as provided in Section + 10.3, no one other than the license steward has the right to modify or + publish new versions of this License. Each version will be given a + distinguishing version number. + +10.2. Effect of New Versions + + You may distribute the Covered Software under the terms of the version of + the License under which You originally received the Covered Software, or + under the terms of any subsequent version published by the license + steward. + +10.3. Modified Versions + + If you create software not governed by this License, and you want to + create a new license for such software, you may create and use a modified + version of this License if you rename the license and remove any + references to the name of the license steward (except to note that such + modified license differs from this License). + +10.4. Distributing Source Code Form that is Incompatible With Secondary Licenses + If You choose to distribute Source Code Form that is Incompatible With + Secondary Licenses under the terms of this version of the License, the + notice described in Exhibit B of this License must be attached. + +Exhibit A - Source Code Form License Notice + + This Source Code Form is subject to the + terms of the Mozilla Public License, v. + 2.0. If a copy of the MPL was not + distributed with this file, You can + obtain one at + http://mozilla.org/MPL/2.0/. + +If it is not possible or desirable to put the notice in a particular file, then +You may include the notice in a location (such as a LICENSE file in a relevant +directory) where a recipient would be likely to look for such a notice. + +You may add additional accurate notices of copyright ownership. + +Exhibit B - “Incompatible With Secondary Licenses” Notice + + This Source Code Form is “Incompatible + With Secondary Licenses”, as defined by + the Mozilla Public License, v. 2.0. diff --git a/vendor/github.com/hashicorp/hil/README.md b/vendor/github.com/hashicorp/hil/README.md new file mode 100644 index 0000000000..186ed2518c --- /dev/null +++ b/vendor/github.com/hashicorp/hil/README.md @@ -0,0 +1,102 @@ +# HIL + +[![GoDoc](https://godoc.org/github.com/hashicorp/hil?status.png)](https://godoc.org/github.com/hashicorp/hil) [![Build Status](https://travis-ci.org/hashicorp/hil.svg?branch=master)](https://travis-ci.org/hashicorp/hil) + +HIL (HashiCorp Interpolation Language) is a lightweight embedded language used +primarily for configuration interpolation. The goal of HIL is to make a simple +language for interpolations in the various configurations of HashiCorp tools. + +HIL is built to interpolate any string, but is in use by HashiCorp primarily +with [HCL](https://github.com/hashicorp/hcl). HCL is _not required_ in any +way for use with HIL. + +HIL isn't meant to be a general purpose language. It was built for basic +configuration interpolations. Therefore, you can't currently write functions, +have conditionals, set intermediary variables, etc. within HIL itself. It is +possible some of these may be added later but the right use case must exist. + +## Why? + +Many of our tools have support for something similar to templates, but +within the configuration itself. The most prominent requirement was in +[Terraform](https://github.com/hashicorp/terraform) where we wanted the +configuration to be able to reference values from elsewhere in the +configuration. Example: + + foo = "hi ${var.world}" + +We originally used a full templating language for this, but found it +was too heavy weight. Additionally, many full languages required bindings +to C (and thus the usage of cgo) which we try to avoid to make cross-compilation +easier. We then moved to very basic regular expression based +string replacement, but found the need for basic arithmetic and function +calls resulting in overly complex regular expressions. + +Ultimately, we wrote our own mini-language within Terraform itself. As +we built other projects such as [Nomad](https://nomadproject.io) and +[Otto](https://ottoproject.io), the need for basic interpolations arose +again. + +Thus HIL was born. It is extracted from Terraform, cleaned up, and +better tested for general purpose use. + +## Syntax + +For a complete grammar, please see the parser itself. A high-level overview +of the syntax and grammer is listed here. + +Code begins within `${` and `}`. Outside of this, text is treated +literally. For example, `foo` is a valid HIL program that is just the +string "foo", but `foo ${bar}` is an HIL program that is the string "foo " +concatened with the value of `bar`. For the remainder of the syntax +docs, we'll assume you're within `${}`. + + * Identifiers are any text in the format of `[a-zA-Z0-9-.]`. Example + identifiers: `foo`, `var.foo`, `foo-bar`. + + * Strings are double quoted and can contain any UTF-8 characters. + Example: `"Hello, World"` + + * Numbers are assumed to be base 10. If you prefix a number with 0x, + it is treated as a hexadecimal. If it is prefixed with 0, it is + treated as an octal. Numbers can be in scientific notation: "1e10". + + * Unary `-` can be used for negative numbers. Example: `-10` or `-0.2` + + * Boolean values: `true`, `false` + + * The following arithmetic operations are allowed: +, -, *, /, %. + + * Function calls are in the form of `name(arg1, arg2, ...)`. Example: + `add(1, 5)`. Arguments can be any valid HIL expression, example: + `add(1, var.foo)` or even nested function calls: + `add(1, get("some value"))`. + + * Within strings, further interpolations can be opened with `${}`. + Example: `"Hello ${nested}"`. A full example including the + original `${}` (remember this list assumes were inside of one + already) could be: `foo ${func("hello ${var.foo}")}`. + +## Language Changes + +We've used this mini-language in Terraform for years. For backwards compatibility +reasons, we're unlikely to make an incompatible change to the language but +we're not currently making that promise, either. + +The internal API of this project may very well change as we evolve it +to work with more of our projects. We recommend using some sort of dependency +management solution with this package. + +## Future Changes + +The following changes are already planned to be made at some point: + + * Richer types: lists, maps, etc. + + * Convert to a more standard Go parser structure similar to HCL. This + will improve our error messaging as well as allow us to have automatic + formatting. + + * Allow interpolations to result in more types than just a string. While + within the interpolation basic types are honored, the result is always + a string. diff --git a/vendor/github.com/hashicorp/hil/appveyor.yml b/vendor/github.com/hashicorp/hil/appveyor.yml new file mode 100644 index 0000000000..feaf7a34e2 --- /dev/null +++ b/vendor/github.com/hashicorp/hil/appveyor.yml @@ -0,0 +1,18 @@ +version: "build-{branch}-{build}" +image: Visual Studio 2015 +clone_folder: c:\gopath\src\github.com\hashicorp\hil +environment: + GOPATH: c:\gopath +init: + - git config --global core.autocrlf true +install: +- cmd: >- + echo %Path% + + go version + + go env + + go get -d -v -t ./... +build_script: +- cmd: go test -v ./... diff --git a/vendor/github.com/hashicorp/hil/ast/arithmetic.go b/vendor/github.com/hashicorp/hil/ast/arithmetic.go new file mode 100644 index 0000000000..94dc24f89f --- /dev/null +++ b/vendor/github.com/hashicorp/hil/ast/arithmetic.go @@ -0,0 +1,43 @@ +package ast + +import ( + "bytes" + "fmt" +) + +// Arithmetic represents a node where the result is arithmetic of +// two or more operands in the order given. +type Arithmetic struct { + Op ArithmeticOp + Exprs []Node + Posx Pos +} + +func (n *Arithmetic) Accept(v Visitor) Node { + for i, expr := range n.Exprs { + n.Exprs[i] = expr.Accept(v) + } + + return v(n) +} + +func (n *Arithmetic) Pos() Pos { + return n.Posx +} + +func (n *Arithmetic) GoString() string { + return fmt.Sprintf("*%#v", *n) +} + +func (n *Arithmetic) String() string { + var b bytes.Buffer + for _, expr := range n.Exprs { + b.WriteString(fmt.Sprintf("%s", expr)) + } + + return b.String() +} + +func (n *Arithmetic) Type(Scope) (Type, error) { + return TypeInt, nil +} diff --git a/vendor/github.com/hashicorp/hil/ast/arithmetic_op.go b/vendor/github.com/hashicorp/hil/ast/arithmetic_op.go new file mode 100644 index 0000000000..18880c6047 --- /dev/null +++ b/vendor/github.com/hashicorp/hil/ast/arithmetic_op.go @@ -0,0 +1,24 @@ +package ast + +// ArithmeticOp is the operation to use for the math. +type ArithmeticOp int + +const ( + ArithmeticOpInvalid ArithmeticOp = 0 + + ArithmeticOpAdd ArithmeticOp = iota + ArithmeticOpSub + ArithmeticOpMul + ArithmeticOpDiv + ArithmeticOpMod + + ArithmeticOpLogicalAnd + ArithmeticOpLogicalOr + + ArithmeticOpEqual + ArithmeticOpNotEqual + ArithmeticOpLessThan + ArithmeticOpLessThanOrEqual + ArithmeticOpGreaterThan + ArithmeticOpGreaterThanOrEqual +) diff --git a/vendor/github.com/hashicorp/hil/ast/ast.go b/vendor/github.com/hashicorp/hil/ast/ast.go new file mode 100644 index 0000000000..c6350f8bba --- /dev/null +++ b/vendor/github.com/hashicorp/hil/ast/ast.go @@ -0,0 +1,99 @@ +package ast + +import ( + "fmt" +) + +// Node is the interface that all AST nodes must implement. +type Node interface { + // Accept is called to dispatch to the visitors. It must return the + // resulting Node (which might be different in an AST transform). + Accept(Visitor) Node + + // Pos returns the position of this node in some source. + Pos() Pos + + // Type returns the type of this node for the given context. + Type(Scope) (Type, error) +} + +// Pos is the starting position of an AST node +type Pos struct { + Column, Line int // Column/Line number, starting at 1 + Filename string // Optional source filename, if known +} + +func (p Pos) String() string { + if p.Filename == "" { + return fmt.Sprintf("%d:%d", p.Line, p.Column) + } else { + return fmt.Sprintf("%s:%d:%d", p.Filename, p.Line, p.Column) + } +} + +// InitPos is an initiaial position value. This should be used as +// the starting position (presets the column and line to 1). +var InitPos = Pos{Column: 1, Line: 1} + +// Visitors are just implementations of this function. +// +// The function must return the Node to replace this node with. "nil" is +// _not_ a valid return value. If there is no replacement, the original node +// should be returned. We build this replacement directly into the visitor +// pattern since AST transformations are a common and useful tool and +// building it into the AST itself makes it required for future Node +// implementations and very easy to do. +// +// Note that this isn't a true implementation of the visitor pattern, which +// generally requires proper type dispatch on the function. However, +// implementing this basic visitor pattern style is still very useful even +// if you have to type switch. +type Visitor func(Node) Node + +//go:generate stringer -type=Type + +// Type is the type of any value. +type Type uint32 + +const ( + TypeInvalid Type = 0 + TypeAny Type = 1 << iota + TypeBool + TypeString + TypeInt + TypeFloat + TypeList + TypeMap + + // This is a special type used by Terraform to mark "unknown" values. + // It is impossible for this type to be introduced into your HIL programs + // unless you explicitly set a variable to this value. In that case, + // any operation including the variable will return "TypeUnknown" as the + // type. + TypeUnknown +) + +func (t Type) Printable() string { + switch t { + case TypeInvalid: + return "invalid type" + case TypeAny: + return "any type" + case TypeBool: + return "type bool" + case TypeString: + return "type string" + case TypeInt: + return "type int" + case TypeFloat: + return "type float" + case TypeList: + return "type list" + case TypeMap: + return "type map" + case TypeUnknown: + return "type unknown" + default: + return "unknown type" + } +} diff --git a/vendor/github.com/hashicorp/hil/ast/call.go b/vendor/github.com/hashicorp/hil/ast/call.go new file mode 100644 index 0000000000..0557011022 --- /dev/null +++ b/vendor/github.com/hashicorp/hil/ast/call.go @@ -0,0 +1,47 @@ +package ast + +import ( + "fmt" + "strings" +) + +// Call represents a function call. +type Call struct { + Func string + Args []Node + Posx Pos +} + +func (n *Call) Accept(v Visitor) Node { + for i, a := range n.Args { + n.Args[i] = a.Accept(v) + } + + return v(n) +} + +func (n *Call) Pos() Pos { + return n.Posx +} + +func (n *Call) String() string { + args := make([]string, len(n.Args)) + for i, arg := range n.Args { + args[i] = fmt.Sprintf("%s", arg) + } + + return fmt.Sprintf("Call(%s, %s)", n.Func, strings.Join(args, ", ")) +} + +func (n *Call) Type(s Scope) (Type, error) { + f, ok := s.LookupFunc(n.Func) + if !ok { + return TypeInvalid, fmt.Errorf("unknown function: %s", n.Func) + } + + return f.ReturnType, nil +} + +func (n *Call) GoString() string { + return fmt.Sprintf("*%#v", *n) +} diff --git a/vendor/github.com/hashicorp/hil/ast/conditional.go b/vendor/github.com/hashicorp/hil/ast/conditional.go new file mode 100644 index 0000000000..be48f89d46 --- /dev/null +++ b/vendor/github.com/hashicorp/hil/ast/conditional.go @@ -0,0 +1,36 @@ +package ast + +import ( + "fmt" +) + +type Conditional struct { + CondExpr Node + TrueExpr Node + FalseExpr Node + Posx Pos +} + +// Accept passes the given visitor to the child nodes in this order: +// CondExpr, TrueExpr, FalseExpr. It then finally passes itself to the visitor. +func (n *Conditional) Accept(v Visitor) Node { + n.CondExpr = n.CondExpr.Accept(v) + n.TrueExpr = n.TrueExpr.Accept(v) + n.FalseExpr = n.FalseExpr.Accept(v) + + return v(n) +} + +func (n *Conditional) Pos() Pos { + return n.Posx +} + +func (n *Conditional) Type(Scope) (Type, error) { + // This is not actually a useful value; the type checker ignores + // this function when analyzing conditionals, just as with Arithmetic. + return TypeInt, nil +} + +func (n *Conditional) GoString() string { + return fmt.Sprintf("*%#v", *n) +} diff --git a/vendor/github.com/hashicorp/hil/ast/index.go b/vendor/github.com/hashicorp/hil/ast/index.go new file mode 100644 index 0000000000..860c25fd24 --- /dev/null +++ b/vendor/github.com/hashicorp/hil/ast/index.go @@ -0,0 +1,76 @@ +package ast + +import ( + "fmt" + "strings" +) + +// Index represents an indexing operation into another data structure +type Index struct { + Target Node + Key Node + Posx Pos +} + +func (n *Index) Accept(v Visitor) Node { + n.Target = n.Target.Accept(v) + n.Key = n.Key.Accept(v) + return v(n) +} + +func (n *Index) Pos() Pos { + return n.Posx +} + +func (n *Index) String() string { + return fmt.Sprintf("Index(%s, %s)", n.Target, n.Key) +} + +func (n *Index) Type(s Scope) (Type, error) { + variableAccess, ok := n.Target.(*VariableAccess) + if !ok { + return TypeInvalid, fmt.Errorf("target is not a variable") + } + + variable, ok := s.LookupVar(variableAccess.Name) + if !ok { + return TypeInvalid, fmt.Errorf("unknown variable accessed: %s", variableAccess.Name) + } + + switch variable.Type { + case TypeList: + return n.typeList(variable, variableAccess.Name) + case TypeMap: + return n.typeMap(variable, variableAccess.Name) + default: + return TypeInvalid, fmt.Errorf("invalid index operation into non-indexable type: %s", variable.Type) + } +} + +func (n *Index) typeList(variable Variable, variableName string) (Type, error) { + // We assume type checking has already determined that this is a list + list := variable.Value.([]Variable) + + return VariableListElementTypesAreHomogenous(variableName, list) +} + +func (n *Index) typeMap(variable Variable, variableName string) (Type, error) { + // We assume type checking has already determined that this is a map + vmap := variable.Value.(map[string]Variable) + + return VariableMapValueTypesAreHomogenous(variableName, vmap) +} + +func reportTypes(typesFound map[Type]struct{}) string { + stringTypes := make([]string, len(typesFound)) + i := 0 + for k, _ := range typesFound { + stringTypes[0] = k.String() + i++ + } + return strings.Join(stringTypes, ", ") +} + +func (n *Index) GoString() string { + return fmt.Sprintf("*%#v", *n) +} diff --git a/vendor/github.com/hashicorp/hil/ast/literal.go b/vendor/github.com/hashicorp/hil/ast/literal.go new file mode 100644 index 0000000000..da6014fee2 --- /dev/null +++ b/vendor/github.com/hashicorp/hil/ast/literal.go @@ -0,0 +1,88 @@ +package ast + +import ( + "fmt" + "reflect" +) + +// LiteralNode represents a single literal value, such as "foo" or +// 42 or 3.14159. Based on the Type, the Value can be safely cast. +type LiteralNode struct { + Value interface{} + Typex Type + Posx Pos +} + +// NewLiteralNode returns a new literal node representing the given +// literal Go value, which must correspond to one of the primitive types +// supported by HIL. Lists and maps cannot currently be constructed via +// this function. +// +// If an inappropriately-typed value is provided, this function will +// return an error. The main intended use of this function is to produce +// "synthetic" literals from constants in code, where the value type is +// well known at compile time. To easily store these in global variables, +// see also MustNewLiteralNode. +func NewLiteralNode(value interface{}, pos Pos) (*LiteralNode, error) { + goType := reflect.TypeOf(value) + var hilType Type + + switch goType.Kind() { + case reflect.Bool: + hilType = TypeBool + case reflect.Int: + hilType = TypeInt + case reflect.Float64: + hilType = TypeFloat + case reflect.String: + hilType = TypeString + default: + return nil, fmt.Errorf("unsupported literal node type: %T", value) + } + + return &LiteralNode{ + Value: value, + Typex: hilType, + Posx: pos, + }, nil +} + +// MustNewLiteralNode wraps NewLiteralNode and panics if an error is +// returned, thus allowing valid literal nodes to be easily assigned to +// global variables. +func MustNewLiteralNode(value interface{}, pos Pos) *LiteralNode { + node, err := NewLiteralNode(value, pos) + if err != nil { + panic(err) + } + return node +} + +func (n *LiteralNode) Accept(v Visitor) Node { + return v(n) +} + +func (n *LiteralNode) Pos() Pos { + return n.Posx +} + +func (n *LiteralNode) GoString() string { + return fmt.Sprintf("*%#v", *n) +} + +func (n *LiteralNode) String() string { + return fmt.Sprintf("Literal(%s, %v)", n.Typex, n.Value) +} + +func (n *LiteralNode) Type(Scope) (Type, error) { + return n.Typex, nil +} + +// IsUnknown returns true either if the node's value is itself unknown +// of if it is a collection containing any unknown elements, deeply. +func (n *LiteralNode) IsUnknown() bool { + return IsUnknown(Variable{ + Type: n.Typex, + Value: n.Value, + }) +} diff --git a/vendor/github.com/hashicorp/hil/ast/output.go b/vendor/github.com/hashicorp/hil/ast/output.go new file mode 100644 index 0000000000..1e27f970b3 --- /dev/null +++ b/vendor/github.com/hashicorp/hil/ast/output.go @@ -0,0 +1,78 @@ +package ast + +import ( + "bytes" + "fmt" +) + +// Output represents the root node of all interpolation evaluations. If the +// output only has one expression which is either a TypeList or TypeMap, the +// Output can be type-asserted to []interface{} or map[string]interface{} +// respectively. Otherwise the Output evaluates as a string, and concatenates +// the evaluation of each expression. +type Output struct { + Exprs []Node + Posx Pos +} + +func (n *Output) Accept(v Visitor) Node { + for i, expr := range n.Exprs { + n.Exprs[i] = expr.Accept(v) + } + + return v(n) +} + +func (n *Output) Pos() Pos { + return n.Posx +} + +func (n *Output) GoString() string { + return fmt.Sprintf("*%#v", *n) +} + +func (n *Output) String() string { + var b bytes.Buffer + for _, expr := range n.Exprs { + b.WriteString(fmt.Sprintf("%s", expr)) + } + + return b.String() +} + +func (n *Output) Type(s Scope) (Type, error) { + // Special case no expressions for backward compatibility + if len(n.Exprs) == 0 { + return TypeString, nil + } + + // Special case a single expression of types list or map + if len(n.Exprs) == 1 { + exprType, err := n.Exprs[0].Type(s) + if err != nil { + return TypeInvalid, err + } + switch exprType { + case TypeList: + return TypeList, nil + case TypeMap: + return TypeMap, nil + } + } + + // Otherwise ensure all our expressions are strings + for index, expr := range n.Exprs { + exprType, err := expr.Type(s) + if err != nil { + return TypeInvalid, err + } + // We only look for things we know we can't coerce with an implicit conversion func + if exprType == TypeList || exprType == TypeMap { + return TypeInvalid, fmt.Errorf( + "multi-expression HIL outputs may only have string inputs: %d is type %s", + index, exprType) + } + } + + return TypeString, nil +} diff --git a/vendor/github.com/hashicorp/hil/ast/scope.go b/vendor/github.com/hashicorp/hil/ast/scope.go new file mode 100644 index 0000000000..7a975d9993 --- /dev/null +++ b/vendor/github.com/hashicorp/hil/ast/scope.go @@ -0,0 +1,90 @@ +package ast + +import ( + "fmt" + "reflect" +) + +// Scope is the interface used to look up variables and functions while +// evaluating. How these functions/variables are defined are up to the caller. +type Scope interface { + LookupFunc(string) (Function, bool) + LookupVar(string) (Variable, bool) +} + +// Variable is a variable value for execution given as input to the engine. +// It records the value of a variables along with their type. +type Variable struct { + Value interface{} + Type Type +} + +// NewVariable creates a new Variable for the given value. This will +// attempt to infer the correct type. If it can't, an error will be returned. +func NewVariable(v interface{}) (result Variable, err error) { + switch v := reflect.ValueOf(v); v.Kind() { + case reflect.String: + result.Type = TypeString + default: + err = fmt.Errorf("Unknown type: %s", v.Kind()) + } + + result.Value = v + return +} + +// String implements Stringer on Variable, displaying the type and value +// of the Variable. +func (v Variable) String() string { + return fmt.Sprintf("{Variable (%s): %+v}", v.Type, v.Value) +} + +// Function defines a function that can be executed by the engine. +// The type checker will validate that the proper types will be called +// to the callback. +type Function struct { + // ArgTypes is the list of types in argument order. These are the + // required arguments. + // + // ReturnType is the type of the returned value. The Callback MUST + // return this type. + ArgTypes []Type + ReturnType Type + + // Variadic, if true, says that this function is variadic, meaning + // it takes a variable number of arguments. In this case, the + // VariadicType must be set. + Variadic bool + VariadicType Type + + // Callback is the function called for a function. The argument + // types are guaranteed to match the spec above by the type checker. + // The length of the args is strictly == len(ArgTypes) unless Varidiac + // is true, in which case its >= len(ArgTypes). + Callback func([]interface{}) (interface{}, error) +} + +// BasicScope is a simple scope that looks up variables and functions +// using a map. +type BasicScope struct { + FuncMap map[string]Function + VarMap map[string]Variable +} + +func (s *BasicScope) LookupFunc(n string) (Function, bool) { + if s == nil { + return Function{}, false + } + + v, ok := s.FuncMap[n] + return v, ok +} + +func (s *BasicScope) LookupVar(n string) (Variable, bool) { + if s == nil { + return Variable{}, false + } + + v, ok := s.VarMap[n] + return v, ok +} diff --git a/vendor/github.com/hashicorp/hil/ast/stack.go b/vendor/github.com/hashicorp/hil/ast/stack.go new file mode 100644 index 0000000000..bd2bc15786 --- /dev/null +++ b/vendor/github.com/hashicorp/hil/ast/stack.go @@ -0,0 +1,25 @@ +package ast + +// Stack is a stack of Node. +type Stack struct { + stack []Node +} + +func (s *Stack) Len() int { + return len(s.stack) +} + +func (s *Stack) Push(n Node) { + s.stack = append(s.stack, n) +} + +func (s *Stack) Pop() Node { + x := s.stack[len(s.stack)-1] + s.stack[len(s.stack)-1] = nil + s.stack = s.stack[:len(s.stack)-1] + return x +} + +func (s *Stack) Reset() { + s.stack = nil +} diff --git a/vendor/github.com/hashicorp/hil/ast/type_string.go b/vendor/github.com/hashicorp/hil/ast/type_string.go new file mode 100644 index 0000000000..1f51a98dd5 --- /dev/null +++ b/vendor/github.com/hashicorp/hil/ast/type_string.go @@ -0,0 +1,54 @@ +// Code generated by "stringer -type=Type"; DO NOT EDIT + +package ast + +import "fmt" + +const ( + _Type_name_0 = "TypeInvalid" + _Type_name_1 = "TypeAny" + _Type_name_2 = "TypeBool" + _Type_name_3 = "TypeString" + _Type_name_4 = "TypeInt" + _Type_name_5 = "TypeFloat" + _Type_name_6 = "TypeList" + _Type_name_7 = "TypeMap" + _Type_name_8 = "TypeUnknown" +) + +var ( + _Type_index_0 = [...]uint8{0, 11} + _Type_index_1 = [...]uint8{0, 7} + _Type_index_2 = [...]uint8{0, 8} + _Type_index_3 = [...]uint8{0, 10} + _Type_index_4 = [...]uint8{0, 7} + _Type_index_5 = [...]uint8{0, 9} + _Type_index_6 = [...]uint8{0, 8} + _Type_index_7 = [...]uint8{0, 7} + _Type_index_8 = [...]uint8{0, 11} +) + +func (i Type) String() string { + switch { + case i == 0: + return _Type_name_0 + case i == 2: + return _Type_name_1 + case i == 4: + return _Type_name_2 + case i == 8: + return _Type_name_3 + case i == 16: + return _Type_name_4 + case i == 32: + return _Type_name_5 + case i == 64: + return _Type_name_6 + case i == 128: + return _Type_name_7 + case i == 256: + return _Type_name_8 + default: + return fmt.Sprintf("Type(%d)", i) + } +} diff --git a/vendor/github.com/hashicorp/hil/ast/unknown.go b/vendor/github.com/hashicorp/hil/ast/unknown.go new file mode 100644 index 0000000000..d6ddaecc78 --- /dev/null +++ b/vendor/github.com/hashicorp/hil/ast/unknown.go @@ -0,0 +1,30 @@ +package ast + +// IsUnknown reports whether a variable is unknown or contains any value +// that is unknown. This will recurse into lists and maps and so on. +func IsUnknown(v Variable) bool { + // If it is unknown itself, return true + if v.Type == TypeUnknown { + return true + } + + // If it is a container type, check the values + switch v.Type { + case TypeList: + for _, el := range v.Value.([]Variable) { + if IsUnknown(el) { + return true + } + } + case TypeMap: + for _, el := range v.Value.(map[string]Variable) { + if IsUnknown(el) { + return true + } + } + default: + } + + // Not a container type or survive the above checks + return false +} diff --git a/vendor/github.com/hashicorp/hil/ast/variable_access.go b/vendor/github.com/hashicorp/hil/ast/variable_access.go new file mode 100644 index 0000000000..4c1362d753 --- /dev/null +++ b/vendor/github.com/hashicorp/hil/ast/variable_access.go @@ -0,0 +1,36 @@ +package ast + +import ( + "fmt" +) + +// VariableAccess represents a variable access. +type VariableAccess struct { + Name string + Posx Pos +} + +func (n *VariableAccess) Accept(v Visitor) Node { + return v(n) +} + +func (n *VariableAccess) Pos() Pos { + return n.Posx +} + +func (n *VariableAccess) GoString() string { + return fmt.Sprintf("*%#v", *n) +} + +func (n *VariableAccess) String() string { + return fmt.Sprintf("Variable(%s)", n.Name) +} + +func (n *VariableAccess) Type(s Scope) (Type, error) { + v, ok := s.LookupVar(n.Name) + if !ok { + return TypeInvalid, fmt.Errorf("unknown variable: %s", n.Name) + } + + return v.Type, nil +} diff --git a/vendor/github.com/hashicorp/hil/ast/variables_helper.go b/vendor/github.com/hashicorp/hil/ast/variables_helper.go new file mode 100644 index 0000000000..06bd18de2a --- /dev/null +++ b/vendor/github.com/hashicorp/hil/ast/variables_helper.go @@ -0,0 +1,63 @@ +package ast + +import "fmt" + +func VariableListElementTypesAreHomogenous(variableName string, list []Variable) (Type, error) { + if len(list) == 0 { + return TypeInvalid, fmt.Errorf("list %q does not have any elements so cannot determine type.", variableName) + } + + elemType := TypeUnknown + for _, v := range list { + if v.Type == TypeUnknown { + continue + } + + if elemType == TypeUnknown { + elemType = v.Type + continue + } + + if v.Type != elemType { + return TypeInvalid, fmt.Errorf( + "list %q does not have homogenous types. found %s and then %s", + variableName, + elemType, v.Type, + ) + } + + elemType = v.Type + } + + return elemType, nil +} + +func VariableMapValueTypesAreHomogenous(variableName string, vmap map[string]Variable) (Type, error) { + if len(vmap) == 0 { + return TypeInvalid, fmt.Errorf("map %q does not have any elements so cannot determine type.", variableName) + } + + elemType := TypeUnknown + for _, v := range vmap { + if v.Type == TypeUnknown { + continue + } + + if elemType == TypeUnknown { + elemType = v.Type + continue + } + + if v.Type != elemType { + return TypeInvalid, fmt.Errorf( + "map %q does not have homogenous types. found %s and then %s", + variableName, + elemType, v.Type, + ) + } + + elemType = v.Type + } + + return elemType, nil +} diff --git a/vendor/github.com/hashicorp/hil/builtins.go b/vendor/github.com/hashicorp/hil/builtins.go new file mode 100644 index 0000000000..909c788a2c --- /dev/null +++ b/vendor/github.com/hashicorp/hil/builtins.go @@ -0,0 +1,331 @@ +package hil + +import ( + "errors" + "strconv" + + "github.com/hashicorp/hil/ast" +) + +// NOTE: All builtins are tested in engine_test.go + +func registerBuiltins(scope *ast.BasicScope) *ast.BasicScope { + if scope == nil { + scope = new(ast.BasicScope) + } + if scope.FuncMap == nil { + scope.FuncMap = make(map[string]ast.Function) + } + + // Implicit conversions + scope.FuncMap["__builtin_BoolToString"] = builtinBoolToString() + scope.FuncMap["__builtin_FloatToInt"] = builtinFloatToInt() + scope.FuncMap["__builtin_FloatToString"] = builtinFloatToString() + scope.FuncMap["__builtin_IntToFloat"] = builtinIntToFloat() + scope.FuncMap["__builtin_IntToString"] = builtinIntToString() + scope.FuncMap["__builtin_StringToInt"] = builtinStringToInt() + scope.FuncMap["__builtin_StringToFloat"] = builtinStringToFloat() + scope.FuncMap["__builtin_StringToBool"] = builtinStringToBool() + + // Math operations + scope.FuncMap["__builtin_IntMath"] = builtinIntMath() + scope.FuncMap["__builtin_FloatMath"] = builtinFloatMath() + scope.FuncMap["__builtin_BoolCompare"] = builtinBoolCompare() + scope.FuncMap["__builtin_FloatCompare"] = builtinFloatCompare() + scope.FuncMap["__builtin_IntCompare"] = builtinIntCompare() + scope.FuncMap["__builtin_StringCompare"] = builtinStringCompare() + scope.FuncMap["__builtin_Logical"] = builtinLogical() + return scope +} + +func builtinFloatMath() ast.Function { + return ast.Function{ + ArgTypes: []ast.Type{ast.TypeInt}, + Variadic: true, + VariadicType: ast.TypeFloat, + ReturnType: ast.TypeFloat, + Callback: func(args []interface{}) (interface{}, error) { + op := args[0].(ast.ArithmeticOp) + result := args[1].(float64) + for _, raw := range args[2:] { + arg := raw.(float64) + switch op { + case ast.ArithmeticOpAdd: + result += arg + case ast.ArithmeticOpSub: + result -= arg + case ast.ArithmeticOpMul: + result *= arg + case ast.ArithmeticOpDiv: + result /= arg + } + } + + return result, nil + }, + } +} + +func builtinIntMath() ast.Function { + return ast.Function{ + ArgTypes: []ast.Type{ast.TypeInt}, + Variadic: true, + VariadicType: ast.TypeInt, + ReturnType: ast.TypeInt, + Callback: func(args []interface{}) (interface{}, error) { + op := args[0].(ast.ArithmeticOp) + result := args[1].(int) + for _, raw := range args[2:] { + arg := raw.(int) + switch op { + case ast.ArithmeticOpAdd: + result += arg + case ast.ArithmeticOpSub: + result -= arg + case ast.ArithmeticOpMul: + result *= arg + case ast.ArithmeticOpDiv: + if arg == 0 { + return nil, errors.New("divide by zero") + } + + result /= arg + case ast.ArithmeticOpMod: + if arg == 0 { + return nil, errors.New("divide by zero") + } + + result = result % arg + } + } + + return result, nil + }, + } +} + +func builtinBoolCompare() ast.Function { + return ast.Function{ + ArgTypes: []ast.Type{ast.TypeInt, ast.TypeBool, ast.TypeBool}, + Variadic: false, + ReturnType: ast.TypeBool, + Callback: func(args []interface{}) (interface{}, error) { + op := args[0].(ast.ArithmeticOp) + lhs := args[1].(bool) + rhs := args[2].(bool) + + switch op { + case ast.ArithmeticOpEqual: + return lhs == rhs, nil + case ast.ArithmeticOpNotEqual: + return lhs != rhs, nil + default: + return nil, errors.New("invalid comparison operation") + } + }, + } +} + +func builtinFloatCompare() ast.Function { + return ast.Function{ + ArgTypes: []ast.Type{ast.TypeInt, ast.TypeFloat, ast.TypeFloat}, + Variadic: false, + ReturnType: ast.TypeBool, + Callback: func(args []interface{}) (interface{}, error) { + op := args[0].(ast.ArithmeticOp) + lhs := args[1].(float64) + rhs := args[2].(float64) + + switch op { + case ast.ArithmeticOpEqual: + return lhs == rhs, nil + case ast.ArithmeticOpNotEqual: + return lhs != rhs, nil + case ast.ArithmeticOpLessThan: + return lhs < rhs, nil + case ast.ArithmeticOpLessThanOrEqual: + return lhs <= rhs, nil + case ast.ArithmeticOpGreaterThan: + return lhs > rhs, nil + case ast.ArithmeticOpGreaterThanOrEqual: + return lhs >= rhs, nil + default: + return nil, errors.New("invalid comparison operation") + } + }, + } +} + +func builtinIntCompare() ast.Function { + return ast.Function{ + ArgTypes: []ast.Type{ast.TypeInt, ast.TypeInt, ast.TypeInt}, + Variadic: false, + ReturnType: ast.TypeBool, + Callback: func(args []interface{}) (interface{}, error) { + op := args[0].(ast.ArithmeticOp) + lhs := args[1].(int) + rhs := args[2].(int) + + switch op { + case ast.ArithmeticOpEqual: + return lhs == rhs, nil + case ast.ArithmeticOpNotEqual: + return lhs != rhs, nil + case ast.ArithmeticOpLessThan: + return lhs < rhs, nil + case ast.ArithmeticOpLessThanOrEqual: + return lhs <= rhs, nil + case ast.ArithmeticOpGreaterThan: + return lhs > rhs, nil + case ast.ArithmeticOpGreaterThanOrEqual: + return lhs >= rhs, nil + default: + return nil, errors.New("invalid comparison operation") + } + }, + } +} + +func builtinStringCompare() ast.Function { + return ast.Function{ + ArgTypes: []ast.Type{ast.TypeInt, ast.TypeString, ast.TypeString}, + Variadic: false, + ReturnType: ast.TypeBool, + Callback: func(args []interface{}) (interface{}, error) { + op := args[0].(ast.ArithmeticOp) + lhs := args[1].(string) + rhs := args[2].(string) + + switch op { + case ast.ArithmeticOpEqual: + return lhs == rhs, nil + case ast.ArithmeticOpNotEqual: + return lhs != rhs, nil + default: + return nil, errors.New("invalid comparison operation") + } + }, + } +} + +func builtinLogical() ast.Function { + return ast.Function{ + ArgTypes: []ast.Type{ast.TypeInt}, + Variadic: true, + VariadicType: ast.TypeBool, + ReturnType: ast.TypeBool, + Callback: func(args []interface{}) (interface{}, error) { + op := args[0].(ast.ArithmeticOp) + result := args[1].(bool) + for _, raw := range args[2:] { + arg := raw.(bool) + switch op { + case ast.ArithmeticOpLogicalOr: + result = result || arg + case ast.ArithmeticOpLogicalAnd: + result = result && arg + default: + return nil, errors.New("invalid logical operator") + } + } + + return result, nil + }, + } +} + +func builtinFloatToInt() ast.Function { + return ast.Function{ + ArgTypes: []ast.Type{ast.TypeFloat}, + ReturnType: ast.TypeInt, + Callback: func(args []interface{}) (interface{}, error) { + return int(args[0].(float64)), nil + }, + } +} + +func builtinFloatToString() ast.Function { + return ast.Function{ + ArgTypes: []ast.Type{ast.TypeFloat}, + ReturnType: ast.TypeString, + Callback: func(args []interface{}) (interface{}, error) { + return strconv.FormatFloat( + args[0].(float64), 'g', -1, 64), nil + }, + } +} + +func builtinIntToFloat() ast.Function { + return ast.Function{ + ArgTypes: []ast.Type{ast.TypeInt}, + ReturnType: ast.TypeFloat, + Callback: func(args []interface{}) (interface{}, error) { + return float64(args[0].(int)), nil + }, + } +} + +func builtinIntToString() ast.Function { + return ast.Function{ + ArgTypes: []ast.Type{ast.TypeInt}, + ReturnType: ast.TypeString, + Callback: func(args []interface{}) (interface{}, error) { + return strconv.FormatInt(int64(args[0].(int)), 10), nil + }, + } +} + +func builtinStringToInt() ast.Function { + return ast.Function{ + ArgTypes: []ast.Type{ast.TypeInt}, + ReturnType: ast.TypeString, + Callback: func(args []interface{}) (interface{}, error) { + v, err := strconv.ParseInt(args[0].(string), 0, 0) + if err != nil { + return nil, err + } + + return int(v), nil + }, + } +} + +func builtinStringToFloat() ast.Function { + return ast.Function{ + ArgTypes: []ast.Type{ast.TypeString}, + ReturnType: ast.TypeFloat, + Callback: func(args []interface{}) (interface{}, error) { + v, err := strconv.ParseFloat(args[0].(string), 64) + if err != nil { + return nil, err + } + + return v, nil + }, + } +} + +func builtinBoolToString() ast.Function { + return ast.Function{ + ArgTypes: []ast.Type{ast.TypeBool}, + ReturnType: ast.TypeString, + Callback: func(args []interface{}) (interface{}, error) { + return strconv.FormatBool(args[0].(bool)), nil + }, + } +} + +func builtinStringToBool() ast.Function { + return ast.Function{ + ArgTypes: []ast.Type{ast.TypeString}, + ReturnType: ast.TypeBool, + Callback: func(args []interface{}) (interface{}, error) { + v, err := strconv.ParseBool(args[0].(string)) + if err != nil { + return nil, err + } + + return v, nil + }, + } +} diff --git a/vendor/github.com/hashicorp/hil/check_identifier.go b/vendor/github.com/hashicorp/hil/check_identifier.go new file mode 100644 index 0000000000..474f50588e --- /dev/null +++ b/vendor/github.com/hashicorp/hil/check_identifier.go @@ -0,0 +1,88 @@ +package hil + +import ( + "fmt" + "sync" + + "github.com/hashicorp/hil/ast" +) + +// IdentifierCheck is a SemanticCheck that checks that all identifiers +// resolve properly and that the right number of arguments are passed +// to functions. +type IdentifierCheck struct { + Scope ast.Scope + + err error + lock sync.Mutex +} + +func (c *IdentifierCheck) Visit(root ast.Node) error { + c.lock.Lock() + defer c.lock.Unlock() + defer c.reset() + root.Accept(c.visit) + return c.err +} + +func (c *IdentifierCheck) visit(raw ast.Node) ast.Node { + if c.err != nil { + return raw + } + + switch n := raw.(type) { + case *ast.Call: + c.visitCall(n) + case *ast.VariableAccess: + c.visitVariableAccess(n) + case *ast.Output: + // Ignore + case *ast.LiteralNode: + // Ignore + default: + // Ignore + } + + // We never do replacement with this visitor + return raw +} + +func (c *IdentifierCheck) visitCall(n *ast.Call) { + // Look up the function in the map + function, ok := c.Scope.LookupFunc(n.Func) + if !ok { + c.createErr(n, fmt.Sprintf("unknown function called: %s", n.Func)) + return + } + + // Break up the args into what is variadic and what is required + args := n.Args + if function.Variadic && len(args) > len(function.ArgTypes) { + args = n.Args[:len(function.ArgTypes)] + } + + // Verify the number of arguments + if len(args) != len(function.ArgTypes) { + c.createErr(n, fmt.Sprintf( + "%s: expected %d arguments, got %d", + n.Func, len(function.ArgTypes), len(n.Args))) + return + } +} + +func (c *IdentifierCheck) visitVariableAccess(n *ast.VariableAccess) { + // Look up the variable in the map + if _, ok := c.Scope.LookupVar(n.Name); !ok { + c.createErr(n, fmt.Sprintf( + "unknown variable accessed: %s", n.Name)) + return + } +} + +func (c *IdentifierCheck) createErr(n ast.Node, str string) { + c.err = fmt.Errorf("%s: %s", n.Pos(), str) +} + +func (c *IdentifierCheck) reset() { + c.err = nil +} diff --git a/vendor/github.com/hashicorp/hil/check_types.go b/vendor/github.com/hashicorp/hil/check_types.go new file mode 100644 index 0000000000..f16da39185 --- /dev/null +++ b/vendor/github.com/hashicorp/hil/check_types.go @@ -0,0 +1,668 @@ +package hil + +import ( + "fmt" + "sync" + + "github.com/hashicorp/hil/ast" +) + +// TypeCheck implements ast.Visitor for type checking an AST tree. +// It requires some configuration to look up the type of nodes. +// +// It also optionally will not type error and will insert an implicit +// type conversions for specific types if specified by the Implicit +// field. Note that this is kind of organizationally weird to put into +// this structure but we'd rather do that than duplicate the type checking +// logic multiple times. +type TypeCheck struct { + Scope ast.Scope + + // Implicit is a map of implicit type conversions that we can do, + // and that shouldn't error. The key of the first map is the from type, + // the key of the second map is the to type, and the final string + // value is the function to call (which must be registered in the Scope). + Implicit map[ast.Type]map[ast.Type]string + + // Stack of types. This shouldn't be used directly except by implementations + // of TypeCheckNode. + Stack []ast.Type + + err error + lock sync.Mutex +} + +// TypeCheckNode is the interface that must be implemented by any +// ast.Node that wants to support type-checking. If the type checker +// encounters a node that doesn't implement this, it will error. +type TypeCheckNode interface { + TypeCheck(*TypeCheck) (ast.Node, error) +} + +func (v *TypeCheck) Visit(root ast.Node) error { + v.lock.Lock() + defer v.lock.Unlock() + defer v.reset() + root.Accept(v.visit) + + // If the resulting type is unknown, then just let the whole thing go. + if v.err == errExitUnknown { + v.err = nil + } + + return v.err +} + +func (v *TypeCheck) visit(raw ast.Node) ast.Node { + if v.err != nil { + return raw + } + + var result ast.Node + var err error + switch n := raw.(type) { + case *ast.Arithmetic: + tc := &typeCheckArithmetic{n} + result, err = tc.TypeCheck(v) + case *ast.Call: + tc := &typeCheckCall{n} + result, err = tc.TypeCheck(v) + case *ast.Conditional: + tc := &typeCheckConditional{n} + result, err = tc.TypeCheck(v) + case *ast.Index: + tc := &typeCheckIndex{n} + result, err = tc.TypeCheck(v) + case *ast.Output: + tc := &typeCheckOutput{n} + result, err = tc.TypeCheck(v) + case *ast.LiteralNode: + tc := &typeCheckLiteral{n} + result, err = tc.TypeCheck(v) + case *ast.VariableAccess: + tc := &typeCheckVariableAccess{n} + result, err = tc.TypeCheck(v) + default: + tc, ok := raw.(TypeCheckNode) + if !ok { + err = fmt.Errorf("unknown node for type check: %#v", raw) + break + } + + result, err = tc.TypeCheck(v) + } + + if err != nil { + pos := raw.Pos() + v.err = fmt.Errorf("At column %d, line %d: %s", + pos.Column, pos.Line, err) + } + + return result +} + +type typeCheckArithmetic struct { + n *ast.Arithmetic +} + +func (tc *typeCheckArithmetic) TypeCheck(v *TypeCheck) (ast.Node, error) { + // The arguments are on the stack in reverse order, so pop them off. + exprs := make([]ast.Type, len(tc.n.Exprs)) + for i, _ := range tc.n.Exprs { + exprs[len(tc.n.Exprs)-1-i] = v.StackPop() + } + + // If any operand is unknown then our result is automatically unknown + for _, ty := range exprs { + if ty == ast.TypeUnknown { + v.StackPush(ast.TypeUnknown) + return tc.n, nil + } + } + + switch tc.n.Op { + case ast.ArithmeticOpLogicalAnd, ast.ArithmeticOpLogicalOr: + return tc.checkLogical(v, exprs) + case ast.ArithmeticOpEqual, ast.ArithmeticOpNotEqual, + ast.ArithmeticOpLessThan, ast.ArithmeticOpGreaterThan, + ast.ArithmeticOpGreaterThanOrEqual, ast.ArithmeticOpLessThanOrEqual: + return tc.checkComparison(v, exprs) + default: + return tc.checkNumeric(v, exprs) + } + +} + +func (tc *typeCheckArithmetic) checkNumeric(v *TypeCheck, exprs []ast.Type) (ast.Node, error) { + // Determine the resulting type we want. We do this by going over + // every expression until we find one with a type we recognize. + // We do this because the first expr might be a string ("var.foo") + // and we need to know what to implicit to. + mathFunc := "__builtin_IntMath" + mathType := ast.TypeInt + for _, v := range exprs { + // We assume int math but if we find ANY float, the entire + // expression turns into floating point math. + if v == ast.TypeFloat { + mathFunc = "__builtin_FloatMath" + mathType = v + break + } + } + + // Verify the args + for i, arg := range exprs { + if arg != mathType { + cn := v.ImplicitConversion(exprs[i], mathType, tc.n.Exprs[i]) + if cn != nil { + tc.n.Exprs[i] = cn + continue + } + + return nil, fmt.Errorf( + "operand %d should be %s, got %s", + i+1, mathType, arg) + } + } + + // Modulo doesn't work for floats + if mathType == ast.TypeFloat && tc.n.Op == ast.ArithmeticOpMod { + return nil, fmt.Errorf("modulo cannot be used with floats") + } + + // Return type + v.StackPush(mathType) + + // Replace our node with a call to the proper function. This isn't + // type checked but we already verified types. + args := make([]ast.Node, len(tc.n.Exprs)+1) + args[0] = &ast.LiteralNode{ + Value: tc.n.Op, + Typex: ast.TypeInt, + Posx: tc.n.Pos(), + } + copy(args[1:], tc.n.Exprs) + return &ast.Call{ + Func: mathFunc, + Args: args, + Posx: tc.n.Pos(), + }, nil +} + +func (tc *typeCheckArithmetic) checkComparison(v *TypeCheck, exprs []ast.Type) (ast.Node, error) { + if len(exprs) != 2 { + // This should never happen, because the parser never produces + // nodes that violate this. + return nil, fmt.Errorf( + "comparison operators must have exactly two operands", + ) + } + + // The first operand always dictates the type for a comparison. + compareFunc := "" + compareType := exprs[0] + switch compareType { + case ast.TypeBool: + compareFunc = "__builtin_BoolCompare" + case ast.TypeFloat: + compareFunc = "__builtin_FloatCompare" + case ast.TypeInt: + compareFunc = "__builtin_IntCompare" + case ast.TypeString: + compareFunc = "__builtin_StringCompare" + default: + return nil, fmt.Errorf( + "comparison operators apply only to bool, float, int, and string", + ) + } + + // For non-equality comparisons, we will do implicit conversions to + // integer types if possible. In this case, we need to go through and + // determine the type of comparison we're doing to enable the implicit + // conversion. + if tc.n.Op != ast.ArithmeticOpEqual && tc.n.Op != ast.ArithmeticOpNotEqual { + compareFunc = "__builtin_IntCompare" + compareType = ast.TypeInt + for _, expr := range exprs { + if expr == ast.TypeFloat { + compareFunc = "__builtin_FloatCompare" + compareType = ast.TypeFloat + break + } + } + } + + // Verify (and possibly, convert) the args + for i, arg := range exprs { + if arg != compareType { + cn := v.ImplicitConversion(exprs[i], compareType, tc.n.Exprs[i]) + if cn != nil { + tc.n.Exprs[i] = cn + continue + } + + return nil, fmt.Errorf( + "operand %d should be %s, got %s", + i+1, compareType, arg, + ) + } + } + + // Only ints and floats can have the <, >, <= and >= operators applied + switch tc.n.Op { + case ast.ArithmeticOpEqual, ast.ArithmeticOpNotEqual: + // anything goes + default: + switch compareType { + case ast.TypeFloat, ast.TypeInt: + // fine + default: + return nil, fmt.Errorf( + "<, >, <= and >= may apply only to int and float values", + ) + } + } + + // Comparison operators always return bool + v.StackPush(ast.TypeBool) + + // Replace our node with a call to the proper function. This isn't + // type checked but we already verified types. + args := make([]ast.Node, len(tc.n.Exprs)+1) + args[0] = &ast.LiteralNode{ + Value: tc.n.Op, + Typex: ast.TypeInt, + Posx: tc.n.Pos(), + } + copy(args[1:], tc.n.Exprs) + return &ast.Call{ + Func: compareFunc, + Args: args, + Posx: tc.n.Pos(), + }, nil +} + +func (tc *typeCheckArithmetic) checkLogical(v *TypeCheck, exprs []ast.Type) (ast.Node, error) { + for i, t := range exprs { + if t != ast.TypeBool { + cn := v.ImplicitConversion(t, ast.TypeBool, tc.n.Exprs[i]) + if cn == nil { + return nil, fmt.Errorf( + "logical operators require boolean operands, not %s", + t, + ) + } + tc.n.Exprs[i] = cn + } + } + + // Return type is always boolean + v.StackPush(ast.TypeBool) + + // Arithmetic nodes are replaced with a call to a built-in function + args := make([]ast.Node, len(tc.n.Exprs)+1) + args[0] = &ast.LiteralNode{ + Value: tc.n.Op, + Typex: ast.TypeInt, + Posx: tc.n.Pos(), + } + copy(args[1:], tc.n.Exprs) + return &ast.Call{ + Func: "__builtin_Logical", + Args: args, + Posx: tc.n.Pos(), + }, nil +} + +type typeCheckCall struct { + n *ast.Call +} + +func (tc *typeCheckCall) TypeCheck(v *TypeCheck) (ast.Node, error) { + // Look up the function in the map + function, ok := v.Scope.LookupFunc(tc.n.Func) + if !ok { + return nil, fmt.Errorf("unknown function called: %s", tc.n.Func) + } + + // The arguments are on the stack in reverse order, so pop them off. + args := make([]ast.Type, len(tc.n.Args)) + for i, _ := range tc.n.Args { + args[len(tc.n.Args)-1-i] = v.StackPop() + } + + // Verify the args + for i, expected := range function.ArgTypes { + if expected == ast.TypeAny { + continue + } + + if args[i] == ast.TypeUnknown { + v.StackPush(ast.TypeUnknown) + return tc.n, nil + } + + if args[i] != expected { + cn := v.ImplicitConversion(args[i], expected, tc.n.Args[i]) + if cn != nil { + tc.n.Args[i] = cn + continue + } + + return nil, fmt.Errorf( + "%s: argument %d should be %s, got %s", + tc.n.Func, i+1, expected.Printable(), args[i].Printable()) + } + } + + // If we're variadic, then verify the types there + if function.Variadic && function.VariadicType != ast.TypeAny { + args = args[len(function.ArgTypes):] + for i, t := range args { + if t == ast.TypeUnknown { + v.StackPush(ast.TypeUnknown) + return tc.n, nil + } + + if t != function.VariadicType { + realI := i + len(function.ArgTypes) + cn := v.ImplicitConversion( + t, function.VariadicType, tc.n.Args[realI]) + if cn != nil { + tc.n.Args[realI] = cn + continue + } + + return nil, fmt.Errorf( + "%s: argument %d should be %s, got %s", + tc.n.Func, realI, + function.VariadicType.Printable(), t.Printable()) + } + } + } + + // Return type + v.StackPush(function.ReturnType) + + return tc.n, nil +} + +type typeCheckConditional struct { + n *ast.Conditional +} + +func (tc *typeCheckConditional) TypeCheck(v *TypeCheck) (ast.Node, error) { + // On the stack we have the types of the condition, true and false + // expressions, but they are in reverse order. + falseType := v.StackPop() + trueType := v.StackPop() + condType := v.StackPop() + + if condType == ast.TypeUnknown { + v.StackPush(ast.TypeUnknown) + return tc.n, nil + } + + if condType != ast.TypeBool { + cn := v.ImplicitConversion(condType, ast.TypeBool, tc.n.CondExpr) + if cn == nil { + return nil, fmt.Errorf( + "condition must be type bool, not %s", condType.Printable(), + ) + } + tc.n.CondExpr = cn + } + + // The types of the true and false expression must match + if trueType != falseType && trueType != ast.TypeUnknown && falseType != ast.TypeUnknown { + + // Since passing around stringified versions of other types is + // common, we pragmatically allow the false expression to dictate + // the result type when the true expression is a string. + if trueType == ast.TypeString { + cn := v.ImplicitConversion(trueType, falseType, tc.n.TrueExpr) + if cn == nil { + return nil, fmt.Errorf( + "true and false expression types must match; have %s and %s", + trueType.Printable(), falseType.Printable(), + ) + } + tc.n.TrueExpr = cn + trueType = falseType + } else { + cn := v.ImplicitConversion(falseType, trueType, tc.n.FalseExpr) + if cn == nil { + return nil, fmt.Errorf( + "true and false expression types must match; have %s and %s", + trueType.Printable(), falseType.Printable(), + ) + } + tc.n.FalseExpr = cn + falseType = trueType + } + } + + // Currently list and map types cannot be used, because we cannot + // generally assert that their element types are consistent. + // Such support might be added later, either by improving the type + // system or restricting usage to only variable and literal expressions, + // but for now this is simply prohibited because it doesn't seem to + // be a common enough case to be worth the complexity. + switch trueType { + case ast.TypeList: + return nil, fmt.Errorf( + "conditional operator cannot be used with list values", + ) + case ast.TypeMap: + return nil, fmt.Errorf( + "conditional operator cannot be used with map values", + ) + } + + // Result type (guaranteed to also match falseType due to the above) + if trueType == ast.TypeUnknown { + // falseType may also be unknown, but that's okay because two + // unknowns means our result is unknown anyway. + v.StackPush(falseType) + } else { + v.StackPush(trueType) + } + + return tc.n, nil +} + +type typeCheckOutput struct { + n *ast.Output +} + +func (tc *typeCheckOutput) TypeCheck(v *TypeCheck) (ast.Node, error) { + n := tc.n + types := make([]ast.Type, len(n.Exprs)) + for i, _ := range n.Exprs { + types[len(n.Exprs)-1-i] = v.StackPop() + } + + for _, ty := range types { + if ty == ast.TypeUnknown { + v.StackPush(ast.TypeUnknown) + return tc.n, nil + } + } + + // If there is only one argument and it is a list, we evaluate to a list + if len(types) == 1 { + switch t := types[0]; t { + case ast.TypeList: + fallthrough + case ast.TypeMap: + v.StackPush(t) + return n, nil + } + } + + // Otherwise, all concat args must be strings, so validate that + resultType := ast.TypeString + for i, t := range types { + + if t == ast.TypeUnknown { + resultType = ast.TypeUnknown + continue + } + + if t != ast.TypeString { + cn := v.ImplicitConversion(t, ast.TypeString, n.Exprs[i]) + if cn != nil { + n.Exprs[i] = cn + continue + } + + return nil, fmt.Errorf( + "output of an HIL expression must be a string, or a single list (argument %d is %s)", i+1, t) + } + } + + // This always results in type string, unless there are unknowns + v.StackPush(resultType) + + return n, nil +} + +type typeCheckLiteral struct { + n *ast.LiteralNode +} + +func (tc *typeCheckLiteral) TypeCheck(v *TypeCheck) (ast.Node, error) { + v.StackPush(tc.n.Typex) + return tc.n, nil +} + +type typeCheckVariableAccess struct { + n *ast.VariableAccess +} + +func (tc *typeCheckVariableAccess) TypeCheck(v *TypeCheck) (ast.Node, error) { + // Look up the variable in the map + variable, ok := v.Scope.LookupVar(tc.n.Name) + if !ok { + return nil, fmt.Errorf( + "unknown variable accessed: %s", tc.n.Name) + } + + // Add the type to the stack + v.StackPush(variable.Type) + + return tc.n, nil +} + +type typeCheckIndex struct { + n *ast.Index +} + +func (tc *typeCheckIndex) TypeCheck(v *TypeCheck) (ast.Node, error) { + keyType := v.StackPop() + targetType := v.StackPop() + + if keyType == ast.TypeUnknown || targetType == ast.TypeUnknown { + v.StackPush(ast.TypeUnknown) + return tc.n, nil + } + + // Ensure we have a VariableAccess as the target + varAccessNode, ok := tc.n.Target.(*ast.VariableAccess) + if !ok { + return nil, fmt.Errorf( + "target of an index must be a VariableAccess node, was %T", tc.n.Target) + } + + // Get the variable + variable, ok := v.Scope.LookupVar(varAccessNode.Name) + if !ok { + return nil, fmt.Errorf( + "unknown variable accessed: %s", varAccessNode.Name) + } + + switch targetType { + case ast.TypeList: + if keyType != ast.TypeInt { + tc.n.Key = v.ImplicitConversion(keyType, ast.TypeInt, tc.n.Key) + if tc.n.Key == nil { + return nil, fmt.Errorf( + "key of an index must be an int, was %s", keyType) + } + } + + valType, err := ast.VariableListElementTypesAreHomogenous( + varAccessNode.Name, variable.Value.([]ast.Variable)) + if err != nil { + return tc.n, err + } + + v.StackPush(valType) + return tc.n, nil + case ast.TypeMap: + if keyType != ast.TypeString { + tc.n.Key = v.ImplicitConversion(keyType, ast.TypeString, tc.n.Key) + if tc.n.Key == nil { + return nil, fmt.Errorf( + "key of an index must be a string, was %s", keyType) + } + } + + valType, err := ast.VariableMapValueTypesAreHomogenous( + varAccessNode.Name, variable.Value.(map[string]ast.Variable)) + if err != nil { + return tc.n, err + } + + v.StackPush(valType) + return tc.n, nil + default: + return nil, fmt.Errorf("invalid index operation into non-indexable type: %s", variable.Type) + } +} + +func (v *TypeCheck) ImplicitConversion( + actual ast.Type, expected ast.Type, n ast.Node) ast.Node { + if v.Implicit == nil { + return nil + } + + fromMap, ok := v.Implicit[actual] + if !ok { + return nil + } + + toFunc, ok := fromMap[expected] + if !ok { + return nil + } + + return &ast.Call{ + Func: toFunc, + Args: []ast.Node{n}, + Posx: n.Pos(), + } +} + +func (v *TypeCheck) reset() { + v.Stack = nil + v.err = nil +} + +func (v *TypeCheck) StackPush(t ast.Type) { + v.Stack = append(v.Stack, t) +} + +func (v *TypeCheck) StackPop() ast.Type { + var x ast.Type + x, v.Stack = v.Stack[len(v.Stack)-1], v.Stack[:len(v.Stack)-1] + return x +} + +func (v *TypeCheck) StackPeek() ast.Type { + if len(v.Stack) == 0 { + return ast.TypeInvalid + } + + return v.Stack[len(v.Stack)-1] +} diff --git a/vendor/github.com/hashicorp/hil/convert.go b/vendor/github.com/hashicorp/hil/convert.go new file mode 100644 index 0000000000..f2024d01c2 --- /dev/null +++ b/vendor/github.com/hashicorp/hil/convert.go @@ -0,0 +1,159 @@ +package hil + +import ( + "fmt" + "reflect" + + "github.com/hashicorp/hil/ast" + "github.com/mitchellh/mapstructure" +) + +// UnknownValue is a sentinel value that can be used to denote +// that a value of a variable (or map element, list element, etc.) +// is unknown. This will always have the type ast.TypeUnknown. +const UnknownValue = "74D93920-ED26-11E3-AC10-0800200C9A66" + +var hilMapstructureDecodeHookSlice []interface{} +var hilMapstructureDecodeHookStringSlice []string +var hilMapstructureDecodeHookMap map[string]interface{} + +// hilMapstructureWeakDecode behaves in the same way as mapstructure.WeakDecode +// but has a DecodeHook which defeats the backward compatibility mode of mapstructure +// which WeakDecodes []interface{}{} into an empty map[string]interface{}. This +// allows us to use WeakDecode (desirable), but not fail on empty lists. +func hilMapstructureWeakDecode(m interface{}, rawVal interface{}) error { + config := &mapstructure.DecoderConfig{ + DecodeHook: func(source reflect.Type, target reflect.Type, val interface{}) (interface{}, error) { + sliceType := reflect.TypeOf(hilMapstructureDecodeHookSlice) + stringSliceType := reflect.TypeOf(hilMapstructureDecodeHookStringSlice) + mapType := reflect.TypeOf(hilMapstructureDecodeHookMap) + + if (source == sliceType || source == stringSliceType) && target == mapType { + return nil, fmt.Errorf("Cannot convert %s into a %s", source, target) + } + + return val, nil + }, + WeaklyTypedInput: true, + Result: rawVal, + } + + decoder, err := mapstructure.NewDecoder(config) + if err != nil { + return err + } + + return decoder.Decode(m) +} + +func InterfaceToVariable(input interface{}) (ast.Variable, error) { + if inputVariable, ok := input.(ast.Variable); ok { + return inputVariable, nil + } + + var stringVal string + if err := hilMapstructureWeakDecode(input, &stringVal); err == nil { + // Special case the unknown value to turn into "unknown" + if stringVal == UnknownValue { + return ast.Variable{Value: UnknownValue, Type: ast.TypeUnknown}, nil + } + + // Otherwise return the string value + return ast.Variable{ + Type: ast.TypeString, + Value: stringVal, + }, nil + } + + var mapVal map[string]interface{} + if err := hilMapstructureWeakDecode(input, &mapVal); err == nil { + elements := make(map[string]ast.Variable) + for i, element := range mapVal { + varElement, err := InterfaceToVariable(element) + if err != nil { + return ast.Variable{}, err + } + elements[i] = varElement + } + + return ast.Variable{ + Type: ast.TypeMap, + Value: elements, + }, nil + } + + var sliceVal []interface{} + if err := hilMapstructureWeakDecode(input, &sliceVal); err == nil { + elements := make([]ast.Variable, len(sliceVal)) + for i, element := range sliceVal { + varElement, err := InterfaceToVariable(element) + if err != nil { + return ast.Variable{}, err + } + elements[i] = varElement + } + + return ast.Variable{ + Type: ast.TypeList, + Value: elements, + }, nil + } + + return ast.Variable{}, fmt.Errorf("value for conversion must be a string, interface{} or map[string]interface: got %T", input) +} + +func VariableToInterface(input ast.Variable) (interface{}, error) { + if input.Type == ast.TypeString { + if inputStr, ok := input.Value.(string); ok { + return inputStr, nil + } else { + return nil, fmt.Errorf("ast.Variable with type string has value which is not a string") + } + } + + if input.Type == ast.TypeList { + inputList, ok := input.Value.([]ast.Variable) + if !ok { + return nil, fmt.Errorf("ast.Variable with type list has value which is not a []ast.Variable") + } + + result := make([]interface{}, 0) + if len(inputList) == 0 { + return result, nil + } + + for _, element := range inputList { + if convertedElement, err := VariableToInterface(element); err == nil { + result = append(result, convertedElement) + } else { + return nil, err + } + } + + return result, nil + } + + if input.Type == ast.TypeMap { + inputMap, ok := input.Value.(map[string]ast.Variable) + if !ok { + return nil, fmt.Errorf("ast.Variable with type map has value which is not a map[string]ast.Variable") + } + + result := make(map[string]interface{}, 0) + if len(inputMap) == 0 { + return result, nil + } + + for key, value := range inputMap { + if convertedValue, err := VariableToInterface(value); err == nil { + result[key] = convertedValue + } else { + return nil, err + } + } + + return result, nil + } + + return nil, fmt.Errorf("unknown input type: %s", input.Type) +} diff --git a/vendor/github.com/hashicorp/hil/eval.go b/vendor/github.com/hashicorp/hil/eval.go new file mode 100644 index 0000000000..27820769e8 --- /dev/null +++ b/vendor/github.com/hashicorp/hil/eval.go @@ -0,0 +1,472 @@ +package hil + +import ( + "bytes" + "errors" + "fmt" + "sync" + + "github.com/hashicorp/hil/ast" +) + +// EvalConfig is the configuration for evaluating. +type EvalConfig struct { + // GlobalScope is the global scope of execution for evaluation. + GlobalScope *ast.BasicScope + + // SemanticChecks is a list of additional semantic checks that will be run + // on the tree prior to evaluating it. The type checker, identifier checker, + // etc. will be run before these automatically. + SemanticChecks []SemanticChecker +} + +// SemanticChecker is the type that must be implemented to do a +// semantic check on an AST tree. This will be called with the root node. +type SemanticChecker func(ast.Node) error + +// EvaluationResult is a struct returned from the hil.Eval function, +// representing the result of an interpolation. Results are returned in their +// "natural" Go structure rather than in terms of the HIL AST. For the types +// currently implemented, this means that the Value field can be interpreted as +// the following Go types: +// TypeInvalid: undefined +// TypeString: string +// TypeList: []interface{} +// TypeMap: map[string]interface{} +// TypBool: bool +type EvaluationResult struct { + Type EvalType + Value interface{} +} + +// InvalidResult is a structure representing the result of a HIL interpolation +// which has invalid syntax, missing variables, or some other type of error. +// The error is described out of band in the accompanying error return value. +var InvalidResult = EvaluationResult{Type: TypeInvalid, Value: nil} + +// errExitUnknown is an internal error that when returned means the result +// is an unknown value. We use this for early exit. +var errExitUnknown = errors.New("unknown value") + +func Eval(root ast.Node, config *EvalConfig) (EvaluationResult, error) { + output, outputType, err := internalEval(root, config) + if err != nil { + return InvalidResult, err + } + + // If the result contains any nested unknowns then the result as a whole + // is unknown, so that callers only have to deal with "entirely known" + // or "entirely unknown" as outcomes. + if ast.IsUnknown(ast.Variable{Type: outputType, Value: output}) { + outputType = ast.TypeUnknown + output = UnknownValue + } + + switch outputType { + case ast.TypeList: + val, err := VariableToInterface(ast.Variable{ + Type: ast.TypeList, + Value: output, + }) + return EvaluationResult{ + Type: TypeList, + Value: val, + }, err + case ast.TypeMap: + val, err := VariableToInterface(ast.Variable{ + Type: ast.TypeMap, + Value: output, + }) + return EvaluationResult{ + Type: TypeMap, + Value: val, + }, err + case ast.TypeString: + return EvaluationResult{ + Type: TypeString, + Value: output, + }, nil + case ast.TypeBool: + return EvaluationResult{ + Type: TypeBool, + Value: output, + }, nil + case ast.TypeUnknown: + return EvaluationResult{ + Type: TypeUnknown, + Value: UnknownValue, + }, nil + default: + return InvalidResult, fmt.Errorf("unknown type %s as interpolation output", outputType) + } +} + +// Eval evaluates the given AST tree and returns its output value, the type +// of the output, and any error that occurred. +func internalEval(root ast.Node, config *EvalConfig) (interface{}, ast.Type, error) { + // Copy the scope so we can add our builtins + if config == nil { + config = new(EvalConfig) + } + scope := registerBuiltins(config.GlobalScope) + implicitMap := map[ast.Type]map[ast.Type]string{ + ast.TypeFloat: { + ast.TypeInt: "__builtin_FloatToInt", + ast.TypeString: "__builtin_FloatToString", + }, + ast.TypeInt: { + ast.TypeFloat: "__builtin_IntToFloat", + ast.TypeString: "__builtin_IntToString", + }, + ast.TypeString: { + ast.TypeInt: "__builtin_StringToInt", + ast.TypeFloat: "__builtin_StringToFloat", + ast.TypeBool: "__builtin_StringToBool", + }, + ast.TypeBool: { + ast.TypeString: "__builtin_BoolToString", + }, + } + + // Build our own semantic checks that we always run + tv := &TypeCheck{Scope: scope, Implicit: implicitMap} + ic := &IdentifierCheck{Scope: scope} + + // Build up the semantic checks for execution + checks := make( + []SemanticChecker, + len(config.SemanticChecks), + len(config.SemanticChecks)+2) + copy(checks, config.SemanticChecks) + checks = append(checks, ic.Visit) + checks = append(checks, tv.Visit) + + // Run the semantic checks + for _, check := range checks { + if err := check(root); err != nil { + return nil, ast.TypeInvalid, err + } + } + + // Execute + v := &evalVisitor{Scope: scope} + return v.Visit(root) +} + +// EvalNode is the interface that must be implemented by any ast.Node +// to support evaluation. This will be called in visitor pattern order. +// The result of each call to Eval is automatically pushed onto the +// stack as a LiteralNode. Pop elements off the stack to get child +// values. +type EvalNode interface { + Eval(ast.Scope, *ast.Stack) (interface{}, ast.Type, error) +} + +type evalVisitor struct { + Scope ast.Scope + Stack ast.Stack + + err error + lock sync.Mutex +} + +func (v *evalVisitor) Visit(root ast.Node) (interface{}, ast.Type, error) { + // Run the actual visitor pattern + root.Accept(v.visit) + + // Get our result and clear out everything else + var result *ast.LiteralNode + if v.Stack.Len() > 0 { + result = v.Stack.Pop().(*ast.LiteralNode) + } else { + result = new(ast.LiteralNode) + } + resultErr := v.err + if resultErr == errExitUnknown { + // This means the return value is unknown and we used the error + // as an early exit mechanism. Reset since the value on the stack + // should be the unknown value. + resultErr = nil + } + + // Clear everything else so we aren't just dangling + v.Stack.Reset() + v.err = nil + + t, err := result.Type(v.Scope) + if err != nil { + return nil, ast.TypeInvalid, err + } + + return result.Value, t, resultErr +} + +func (v *evalVisitor) visit(raw ast.Node) ast.Node { + if v.err != nil { + return raw + } + + en, err := evalNode(raw) + if err != nil { + v.err = err + return raw + } + + out, outType, err := en.Eval(v.Scope, &v.Stack) + if err != nil { + v.err = err + return raw + } + + v.Stack.Push(&ast.LiteralNode{ + Value: out, + Typex: outType, + }) + + if outType == ast.TypeUnknown { + // Halt immediately + v.err = errExitUnknown + return raw + } + + return raw +} + +// evalNode is a private function that returns an EvalNode for built-in +// types as well as any other EvalNode implementations. +func evalNode(raw ast.Node) (EvalNode, error) { + switch n := raw.(type) { + case *ast.Index: + return &evalIndex{n}, nil + case *ast.Call: + return &evalCall{n}, nil + case *ast.Conditional: + return &evalConditional{n}, nil + case *ast.Output: + return &evalOutput{n}, nil + case *ast.LiteralNode: + return &evalLiteralNode{n}, nil + case *ast.VariableAccess: + return &evalVariableAccess{n}, nil + default: + en, ok := n.(EvalNode) + if !ok { + return nil, fmt.Errorf("node doesn't support evaluation: %#v", raw) + } + + return en, nil + } +} + +type evalCall struct{ *ast.Call } + +func (v *evalCall) Eval(s ast.Scope, stack *ast.Stack) (interface{}, ast.Type, error) { + // Look up the function in the map + function, ok := s.LookupFunc(v.Func) + if !ok { + return nil, ast.TypeInvalid, fmt.Errorf( + "unknown function called: %s", v.Func) + } + + // The arguments are on the stack in reverse order, so pop them off. + args := make([]interface{}, len(v.Args)) + for i, _ := range v.Args { + node := stack.Pop().(*ast.LiteralNode) + if node.IsUnknown() { + // If any arguments are unknown then the result is automatically unknown + return UnknownValue, ast.TypeUnknown, nil + } + args[len(v.Args)-1-i] = node.Value + } + + // Call the function + result, err := function.Callback(args) + if err != nil { + return nil, ast.TypeInvalid, fmt.Errorf("%s: %s", v.Func, err) + } + + return result, function.ReturnType, nil +} + +type evalConditional struct{ *ast.Conditional } + +func (v *evalConditional) Eval(s ast.Scope, stack *ast.Stack) (interface{}, ast.Type, error) { + // On the stack we have literal nodes representing the resulting values + // of the condition, true and false expressions, but they are in reverse + // order. + falseLit := stack.Pop().(*ast.LiteralNode) + trueLit := stack.Pop().(*ast.LiteralNode) + condLit := stack.Pop().(*ast.LiteralNode) + + if condLit.IsUnknown() { + // If our conditional is unknown then our result is also unknown + return UnknownValue, ast.TypeUnknown, nil + } + + if condLit.Value.(bool) { + return trueLit.Value, trueLit.Typex, nil + } else { + return falseLit.Value, trueLit.Typex, nil + } +} + +type evalIndex struct{ *ast.Index } + +func (v *evalIndex) Eval(scope ast.Scope, stack *ast.Stack) (interface{}, ast.Type, error) { + key := stack.Pop().(*ast.LiteralNode) + target := stack.Pop().(*ast.LiteralNode) + + variableName := v.Index.Target.(*ast.VariableAccess).Name + + if key.IsUnknown() { + // If our key is unknown then our result is also unknown + return UnknownValue, ast.TypeUnknown, nil + } + + // For target, we'll accept collections containing unknown values but + // we still need to catch when the collection itself is unknown, shallowly. + if target.Typex == ast.TypeUnknown { + return UnknownValue, ast.TypeUnknown, nil + } + + switch target.Typex { + case ast.TypeList: + return v.evalListIndex(variableName, target.Value, key.Value) + case ast.TypeMap: + return v.evalMapIndex(variableName, target.Value, key.Value) + default: + return nil, ast.TypeInvalid, fmt.Errorf( + "target %q for indexing must be ast.TypeList or ast.TypeMap, is %s", + variableName, target.Typex) + } +} + +func (v *evalIndex) evalListIndex(variableName string, target interface{}, key interface{}) (interface{}, ast.Type, error) { + // We assume type checking was already done and we can assume that target + // is a list and key is an int + list, ok := target.([]ast.Variable) + if !ok { + return nil, ast.TypeInvalid, fmt.Errorf( + "cannot cast target to []Variable, is: %T", target) + } + + keyInt, ok := key.(int) + if !ok { + return nil, ast.TypeInvalid, fmt.Errorf( + "cannot cast key to int, is: %T", key) + } + + if len(list) == 0 { + return nil, ast.TypeInvalid, fmt.Errorf("list is empty") + } + + if keyInt < 0 || len(list) < keyInt+1 { + return nil, ast.TypeInvalid, fmt.Errorf( + "index %d out of range for list %s (max %d)", + keyInt, variableName, len(list)) + } + + returnVal := list[keyInt].Value + returnType := list[keyInt].Type + return returnVal, returnType, nil +} + +func (v *evalIndex) evalMapIndex(variableName string, target interface{}, key interface{}) (interface{}, ast.Type, error) { + // We assume type checking was already done and we can assume that target + // is a map and key is a string + vmap, ok := target.(map[string]ast.Variable) + if !ok { + return nil, ast.TypeInvalid, fmt.Errorf( + "cannot cast target to map[string]Variable, is: %T", target) + } + + keyString, ok := key.(string) + if !ok { + return nil, ast.TypeInvalid, fmt.Errorf( + "cannot cast key to string, is: %T", key) + } + + if len(vmap) == 0 { + return nil, ast.TypeInvalid, fmt.Errorf("map is empty") + } + + value, ok := vmap[keyString] + if !ok { + return nil, ast.TypeInvalid, fmt.Errorf( + "key %q does not exist in map %s", keyString, variableName) + } + + return value.Value, value.Type, nil +} + +type evalOutput struct{ *ast.Output } + +func (v *evalOutput) Eval(s ast.Scope, stack *ast.Stack) (interface{}, ast.Type, error) { + // The expressions should all be on the stack in reverse + // order. So pop them off, reverse their order, and concatenate. + nodes := make([]*ast.LiteralNode, 0, len(v.Exprs)) + haveUnknown := false + for range v.Exprs { + n := stack.Pop().(*ast.LiteralNode) + nodes = append(nodes, n) + + // If we have any unknowns then the whole result is unknown + // (we must deal with this first, because the type checker can + // skip type conversions in the presence of unknowns, and thus + // any of our other nodes may be incorrectly typed.) + if n.IsUnknown() { + haveUnknown = true + } + } + + if haveUnknown { + return UnknownValue, ast.TypeUnknown, nil + } + + // Special case the single list and map + if len(nodes) == 1 { + switch t := nodes[0].Typex; t { + case ast.TypeList: + fallthrough + case ast.TypeMap: + fallthrough + case ast.TypeUnknown: + return nodes[0].Value, t, nil + } + } + + // Otherwise concatenate the strings + var buf bytes.Buffer + for i := len(nodes) - 1; i >= 0; i-- { + if nodes[i].Typex != ast.TypeString { + return nil, ast.TypeInvalid, fmt.Errorf( + "invalid output with %s value at index %d: %#v", + nodes[i].Typex, + i, + nodes[i].Value, + ) + } + buf.WriteString(nodes[i].Value.(string)) + } + + return buf.String(), ast.TypeString, nil +} + +type evalLiteralNode struct{ *ast.LiteralNode } + +func (v *evalLiteralNode) Eval(ast.Scope, *ast.Stack) (interface{}, ast.Type, error) { + return v.Value, v.Typex, nil +} + +type evalVariableAccess struct{ *ast.VariableAccess } + +func (v *evalVariableAccess) Eval(scope ast.Scope, _ *ast.Stack) (interface{}, ast.Type, error) { + // Look up the variable in the map + variable, ok := scope.LookupVar(v.Name) + if !ok { + return nil, ast.TypeInvalid, fmt.Errorf( + "unknown variable accessed: %s", v.Name) + } + + return variable.Value, variable.Type, nil +} diff --git a/vendor/github.com/hashicorp/hil/eval_type.go b/vendor/github.com/hashicorp/hil/eval_type.go new file mode 100644 index 0000000000..6946ecd23f --- /dev/null +++ b/vendor/github.com/hashicorp/hil/eval_type.go @@ -0,0 +1,16 @@ +package hil + +//go:generate stringer -type=EvalType eval_type.go + +// EvalType represents the type of the output returned from a HIL +// evaluation. +type EvalType uint32 + +const ( + TypeInvalid EvalType = 0 + TypeString EvalType = 1 << iota + TypeBool + TypeList + TypeMap + TypeUnknown +) diff --git a/vendor/github.com/hashicorp/hil/evaltype_string.go b/vendor/github.com/hashicorp/hil/evaltype_string.go new file mode 100644 index 0000000000..b107ddd451 --- /dev/null +++ b/vendor/github.com/hashicorp/hil/evaltype_string.go @@ -0,0 +1,42 @@ +// Code generated by "stringer -type=EvalType eval_type.go"; DO NOT EDIT + +package hil + +import "fmt" + +const ( + _EvalType_name_0 = "TypeInvalid" + _EvalType_name_1 = "TypeString" + _EvalType_name_2 = "TypeBool" + _EvalType_name_3 = "TypeList" + _EvalType_name_4 = "TypeMap" + _EvalType_name_5 = "TypeUnknown" +) + +var ( + _EvalType_index_0 = [...]uint8{0, 11} + _EvalType_index_1 = [...]uint8{0, 10} + _EvalType_index_2 = [...]uint8{0, 8} + _EvalType_index_3 = [...]uint8{0, 8} + _EvalType_index_4 = [...]uint8{0, 7} + _EvalType_index_5 = [...]uint8{0, 11} +) + +func (i EvalType) String() string { + switch { + case i == 0: + return _EvalType_name_0 + case i == 2: + return _EvalType_name_1 + case i == 4: + return _EvalType_name_2 + case i == 8: + return _EvalType_name_3 + case i == 16: + return _EvalType_name_4 + case i == 32: + return _EvalType_name_5 + default: + return fmt.Sprintf("EvalType(%d)", i) + } +} diff --git a/vendor/github.com/hashicorp/hil/parse.go b/vendor/github.com/hashicorp/hil/parse.go new file mode 100644 index 0000000000..ecbe1fdbfa --- /dev/null +++ b/vendor/github.com/hashicorp/hil/parse.go @@ -0,0 +1,29 @@ +package hil + +import ( + "github.com/hashicorp/hil/ast" + "github.com/hashicorp/hil/parser" + "github.com/hashicorp/hil/scanner" +) + +// Parse parses the given program and returns an executable AST tree. +// +// Syntax errors are returned with error having the dynamic type +// *parser.ParseError, which gives the caller access to the source position +// where the error was found, which allows (for example) combining it with +// a known source filename to add context to the error message. +func Parse(v string) (ast.Node, error) { + return ParseWithPosition(v, ast.Pos{Line: 1, Column: 1}) +} + +// ParseWithPosition is like Parse except that it overrides the source +// row and column position of the first character in the string, which should +// be 1-based. +// +// This can be used when HIL is embedded in another language and the outer +// parser knows the row and column where the HIL expression started within +// the overall source file. +func ParseWithPosition(v string, pos ast.Pos) (ast.Node, error) { + ch := scanner.Scan(v, pos) + return parser.Parse(ch) +} diff --git a/vendor/github.com/hashicorp/hil/parser/binary_op.go b/vendor/github.com/hashicorp/hil/parser/binary_op.go new file mode 100644 index 0000000000..2e013e01d6 --- /dev/null +++ b/vendor/github.com/hashicorp/hil/parser/binary_op.go @@ -0,0 +1,45 @@ +package parser + +import ( + "github.com/hashicorp/hil/ast" + "github.com/hashicorp/hil/scanner" +) + +var binaryOps []map[scanner.TokenType]ast.ArithmeticOp + +func init() { + // This operation table maps from the operator's scanner token type + // to the AST arithmetic operation. All expressions produced from + // binary operators are *ast.Arithmetic nodes. + // + // Binary operator groups are listed in order of precedence, with + // the *lowest* precedence first. Operators within the same group + // have left-to-right associativity. + binaryOps = []map[scanner.TokenType]ast.ArithmeticOp{ + { + scanner.OR: ast.ArithmeticOpLogicalOr, + }, + { + scanner.AND: ast.ArithmeticOpLogicalAnd, + }, + { + scanner.EQUAL: ast.ArithmeticOpEqual, + scanner.NOTEQUAL: ast.ArithmeticOpNotEqual, + }, + { + scanner.GT: ast.ArithmeticOpGreaterThan, + scanner.GTE: ast.ArithmeticOpGreaterThanOrEqual, + scanner.LT: ast.ArithmeticOpLessThan, + scanner.LTE: ast.ArithmeticOpLessThanOrEqual, + }, + { + scanner.PLUS: ast.ArithmeticOpAdd, + scanner.MINUS: ast.ArithmeticOpSub, + }, + { + scanner.STAR: ast.ArithmeticOpMul, + scanner.SLASH: ast.ArithmeticOpDiv, + scanner.PERCENT: ast.ArithmeticOpMod, + }, + } +} diff --git a/vendor/github.com/hashicorp/hil/parser/error.go b/vendor/github.com/hashicorp/hil/parser/error.go new file mode 100644 index 0000000000..bacd696457 --- /dev/null +++ b/vendor/github.com/hashicorp/hil/parser/error.go @@ -0,0 +1,38 @@ +package parser + +import ( + "fmt" + + "github.com/hashicorp/hil/ast" + "github.com/hashicorp/hil/scanner" +) + +type ParseError struct { + Message string + Pos ast.Pos +} + +func Errorf(pos ast.Pos, format string, args ...interface{}) error { + return &ParseError{ + Message: fmt.Sprintf(format, args...), + Pos: pos, + } +} + +// TokenErrorf is a convenient wrapper around Errorf that uses the +// position of the given token. +func TokenErrorf(token *scanner.Token, format string, args ...interface{}) error { + return Errorf(token.Pos, format, args...) +} + +func ExpectationError(wanted string, got *scanner.Token) error { + return TokenErrorf(got, "expected %s but found %s", wanted, got) +} + +func (e *ParseError) Error() string { + return fmt.Sprintf("parse error at %s: %s", e.Pos, e.Message) +} + +func (e *ParseError) String() string { + return e.Error() +} diff --git a/vendor/github.com/hashicorp/hil/parser/fuzz.go b/vendor/github.com/hashicorp/hil/parser/fuzz.go new file mode 100644 index 0000000000..de954f3836 --- /dev/null +++ b/vendor/github.com/hashicorp/hil/parser/fuzz.go @@ -0,0 +1,28 @@ +// +build gofuzz + +package parser + +import ( + "github.com/hashicorp/hil/ast" + "github.com/hashicorp/hil/scanner" +) + +// This is a fuzz testing function designed to be used with go-fuzz: +// https://github.com/dvyukov/go-fuzz +// +// It's not included in a normal build due to the gofuzz build tag above. +// +// There are some input files that you can use as a seed corpus for go-fuzz +// in the directory ./fuzz-corpus . + +func Fuzz(data []byte) int { + str := string(data) + + ch := scanner.Scan(str, ast.Pos{Line: 1, Column: 1}) + _, err := Parse(ch) + if err != nil { + return 0 + } + + return 1 +} diff --git a/vendor/github.com/hashicorp/hil/parser/parser.go b/vendor/github.com/hashicorp/hil/parser/parser.go new file mode 100644 index 0000000000..376f1c49da --- /dev/null +++ b/vendor/github.com/hashicorp/hil/parser/parser.go @@ -0,0 +1,522 @@ +package parser + +import ( + "strconv" + "unicode/utf8" + + "github.com/hashicorp/hil/ast" + "github.com/hashicorp/hil/scanner" +) + +func Parse(ch <-chan *scanner.Token) (ast.Node, error) { + peeker := scanner.NewPeeker(ch) + parser := &parser{peeker} + output, err := parser.ParseTopLevel() + peeker.Close() + return output, err +} + +type parser struct { + peeker *scanner.Peeker +} + +func (p *parser) ParseTopLevel() (ast.Node, error) { + return p.parseInterpolationSeq(false) +} + +func (p *parser) ParseQuoted() (ast.Node, error) { + return p.parseInterpolationSeq(true) +} + +// parseInterpolationSeq parses either the top-level sequence of literals +// and interpolation expressions or a similar sequence within a quoted +// string inside an interpolation expression. The latter case is requested +// by setting 'quoted' to true. +func (p *parser) parseInterpolationSeq(quoted bool) (ast.Node, error) { + literalType := scanner.LITERAL + endType := scanner.EOF + if quoted { + // exceptions for quoted sequences + literalType = scanner.STRING + endType = scanner.CQUOTE + } + + startPos := p.peeker.Peek().Pos + + if quoted { + tok := p.peeker.Read() + if tok.Type != scanner.OQUOTE { + return nil, ExpectationError("open quote", tok) + } + } + + var exprs []ast.Node + for { + tok := p.peeker.Read() + + if tok.Type == endType { + break + } + + switch tok.Type { + case literalType: + val, err := p.parseStringToken(tok) + if err != nil { + return nil, err + } + exprs = append(exprs, &ast.LiteralNode{ + Value: val, + Typex: ast.TypeString, + Posx: tok.Pos, + }) + case scanner.BEGIN: + expr, err := p.ParseInterpolation() + if err != nil { + return nil, err + } + exprs = append(exprs, expr) + default: + return nil, ExpectationError(`"${"`, tok) + } + } + + if len(exprs) == 0 { + // If we have no parts at all then the input must've + // been an empty string. + exprs = append(exprs, &ast.LiteralNode{ + Value: "", + Typex: ast.TypeString, + Posx: startPos, + }) + } + + // As a special case, if our "Output" contains only one expression + // and it's a literal string then we'll hoist it up to be our + // direct return value, so callers can easily recognize a string + // that has no interpolations at all. + if len(exprs) == 1 { + if lit, ok := exprs[0].(*ast.LiteralNode); ok { + if lit.Typex == ast.TypeString { + return lit, nil + } + } + } + + return &ast.Output{ + Exprs: exprs, + Posx: startPos, + }, nil +} + +// parseStringToken takes a token of either LITERAL or STRING type and +// returns the interpreted string, after processing any relevant +// escape sequences. +func (p *parser) parseStringToken(tok *scanner.Token) (string, error) { + var backslashes bool + switch tok.Type { + case scanner.LITERAL: + backslashes = false + case scanner.STRING: + backslashes = true + default: + panic("unsupported string token type") + } + + raw := []byte(tok.Content) + buf := make([]byte, 0, len(raw)) + + for i := 0; i < len(raw); i++ { + b := raw[i] + more := len(raw) > (i + 1) + + if b == '$' { + if more && raw[i+1] == '$' { + // skip over the second dollar sign + i++ + } + } else if backslashes && b == '\\' { + if !more { + return "", Errorf( + ast.Pos{ + Column: tok.Pos.Column + utf8.RuneCount(raw[:i]), + Line: tok.Pos.Line, + }, + `unfinished backslash escape sequence`, + ) + } + escapeType := raw[i+1] + switch escapeType { + case '\\': + // skip over the second slash + i++ + case 'n': + b = '\n' + i++ + case '"': + b = '"' + i++ + default: + return "", Errorf( + ast.Pos{ + Column: tok.Pos.Column + utf8.RuneCount(raw[:i]), + Line: tok.Pos.Line, + }, + `invalid backslash escape sequence`, + ) + } + } + + buf = append(buf, b) + } + + return string(buf), nil +} + +func (p *parser) ParseInterpolation() (ast.Node, error) { + // By the time we're called, we're already "inside" the ${ sequence + // because the caller consumed the ${ token. + + expr, err := p.ParseExpression() + if err != nil { + return nil, err + } + + err = p.requireTokenType(scanner.END, `"}"`) + if err != nil { + return nil, err + } + + return expr, nil +} + +func (p *parser) ParseExpression() (ast.Node, error) { + return p.parseTernaryCond() +} + +func (p *parser) parseTernaryCond() (ast.Node, error) { + // The ternary condition operator (.. ? .. : ..) behaves somewhat + // like a binary operator except that the "operator" is itself + // an expression enclosed in two punctuation characters. + // The middle expression is parsed as if the ? and : symbols + // were parentheses. The "rhs" (the "false expression") is then + // treated right-associatively so it behaves similarly to the + // middle in terms of precedence. + + startPos := p.peeker.Peek().Pos + + var cond, trueExpr, falseExpr ast.Node + var err error + + cond, err = p.parseBinaryOps(binaryOps) + if err != nil { + return nil, err + } + + next := p.peeker.Peek() + if next.Type != scanner.QUESTION { + return cond, nil + } + + p.peeker.Read() // eat question mark + + trueExpr, err = p.ParseExpression() + if err != nil { + return nil, err + } + + colon := p.peeker.Read() + if colon.Type != scanner.COLON { + return nil, ExpectationError(":", colon) + } + + falseExpr, err = p.ParseExpression() + if err != nil { + return nil, err + } + + return &ast.Conditional{ + CondExpr: cond, + TrueExpr: trueExpr, + FalseExpr: falseExpr, + Posx: startPos, + }, nil +} + +// parseBinaryOps calls itself recursively to work through all of the +// operator precedence groups, and then eventually calls ParseExpressionTerm +// for each operand. +func (p *parser) parseBinaryOps(ops []map[scanner.TokenType]ast.ArithmeticOp) (ast.Node, error) { + if len(ops) == 0 { + // We've run out of operators, so now we'll just try to parse a term. + return p.ParseExpressionTerm() + } + + thisLevel := ops[0] + remaining := ops[1:] + + startPos := p.peeker.Peek().Pos + + var lhs, rhs ast.Node + operator := ast.ArithmeticOpInvalid + var err error + + // parse a term that might be the first operand of a binary + // expression or it might just be a standalone term, but + // we won't know until we've parsed it and can look ahead + // to see if there's an operator token. + lhs, err = p.parseBinaryOps(remaining) + if err != nil { + return nil, err + } + + // We'll keep eating up arithmetic operators until we run + // out, so that operators with the same precedence will combine in a + // left-associative manner: + // a+b+c => (a+b)+c, not a+(b+c) + // + // Should we later want to have right-associative operators, a way + // to achieve that would be to call back up to ParseExpression here + // instead of iteratively parsing only the remaining operators. + for { + next := p.peeker.Peek() + var newOperator ast.ArithmeticOp + var ok bool + if newOperator, ok = thisLevel[next.Type]; !ok { + break + } + + // Are we extending an expression started on + // the previous iteration? + if operator != ast.ArithmeticOpInvalid { + lhs = &ast.Arithmetic{ + Op: operator, + Exprs: []ast.Node{lhs, rhs}, + Posx: startPos, + } + } + + operator = newOperator + p.peeker.Read() // eat operator token + rhs, err = p.parseBinaryOps(remaining) + if err != nil { + return nil, err + } + } + + if operator != ast.ArithmeticOpInvalid { + return &ast.Arithmetic{ + Op: operator, + Exprs: []ast.Node{lhs, rhs}, + Posx: startPos, + }, nil + } else { + return lhs, nil + } +} + +func (p *parser) ParseExpressionTerm() (ast.Node, error) { + + next := p.peeker.Peek() + + switch next.Type { + + case scanner.OPAREN: + p.peeker.Read() + expr, err := p.ParseExpression() + if err != nil { + return nil, err + } + err = p.requireTokenType(scanner.CPAREN, `")"`) + return expr, err + + case scanner.OQUOTE: + return p.ParseQuoted() + + case scanner.INTEGER: + tok := p.peeker.Read() + val, err := strconv.Atoi(tok.Content) + if err != nil { + return nil, TokenErrorf(tok, "invalid integer: %s", err) + } + return &ast.LiteralNode{ + Value: val, + Typex: ast.TypeInt, + Posx: tok.Pos, + }, nil + + case scanner.FLOAT: + tok := p.peeker.Read() + val, err := strconv.ParseFloat(tok.Content, 64) + if err != nil { + return nil, TokenErrorf(tok, "invalid float: %s", err) + } + return &ast.LiteralNode{ + Value: val, + Typex: ast.TypeFloat, + Posx: tok.Pos, + }, nil + + case scanner.BOOL: + tok := p.peeker.Read() + // the scanner guarantees that tok.Content is either "true" or "false" + var val bool + if tok.Content[0] == 't' { + val = true + } else { + val = false + } + return &ast.LiteralNode{ + Value: val, + Typex: ast.TypeBool, + Posx: tok.Pos, + }, nil + + case scanner.MINUS: + opTok := p.peeker.Read() + // important to use ParseExpressionTerm rather than ParseExpression + // here, otherwise we can capture a following binary expression into + // our negation. + // e.g. -46+5 should parse as (0-46)+5, not 0-(46+5) + operand, err := p.ParseExpressionTerm() + if err != nil { + return nil, err + } + // The AST currently represents negative numbers as + // a binary subtraction of the number from zero. + return &ast.Arithmetic{ + Op: ast.ArithmeticOpSub, + Exprs: []ast.Node{ + &ast.LiteralNode{ + Value: 0, + Typex: ast.TypeInt, + Posx: opTok.Pos, + }, + operand, + }, + Posx: opTok.Pos, + }, nil + + case scanner.BANG: + opTok := p.peeker.Read() + // important to use ParseExpressionTerm rather than ParseExpression + // here, otherwise we can capture a following binary expression into + // our negation. + operand, err := p.ParseExpressionTerm() + if err != nil { + return nil, err + } + // The AST currently represents binary negation as an equality + // test with "false". + return &ast.Arithmetic{ + Op: ast.ArithmeticOpEqual, + Exprs: []ast.Node{ + &ast.LiteralNode{ + Value: false, + Typex: ast.TypeBool, + Posx: opTok.Pos, + }, + operand, + }, + Posx: opTok.Pos, + }, nil + + case scanner.IDENTIFIER: + return p.ParseScopeInteraction() + + default: + return nil, ExpectationError("expression", next) + } +} + +// ParseScopeInteraction parses the expression types that interact +// with the evaluation scope: variable access, function calls, and +// indexing. +// +// Indexing should actually be a distinct operator in its own right, +// so that e.g. it can be applied to the result of a function call, +// but for now we're preserving the behavior of the older yacc-based +// parser. +func (p *parser) ParseScopeInteraction() (ast.Node, error) { + first := p.peeker.Read() + startPos := first.Pos + if first.Type != scanner.IDENTIFIER { + return nil, ExpectationError("identifier", first) + } + + next := p.peeker.Peek() + if next.Type == scanner.OPAREN { + // function call + funcName := first.Content + p.peeker.Read() // eat paren + var args []ast.Node + + for { + if p.peeker.Peek().Type == scanner.CPAREN { + break + } + + arg, err := p.ParseExpression() + if err != nil { + return nil, err + } + + args = append(args, arg) + + if p.peeker.Peek().Type == scanner.COMMA { + p.peeker.Read() // eat comma + continue + } else { + break + } + } + + err := p.requireTokenType(scanner.CPAREN, `")"`) + if err != nil { + return nil, err + } + + return &ast.Call{ + Func: funcName, + Args: args, + Posx: startPos, + }, nil + } + + varNode := &ast.VariableAccess{ + Name: first.Content, + Posx: startPos, + } + + if p.peeker.Peek().Type == scanner.OBRACKET { + // index operator + startPos := p.peeker.Read().Pos // eat bracket + indexExpr, err := p.ParseExpression() + if err != nil { + return nil, err + } + err = p.requireTokenType(scanner.CBRACKET, `"]"`) + if err != nil { + return nil, err + } + return &ast.Index{ + Target: varNode, + Key: indexExpr, + Posx: startPos, + }, nil + } + + return varNode, nil +} + +// requireTokenType consumes the next token an returns an error if its +// type does not match the given type. nil is returned if the type matches. +// +// This is a helper around peeker.Read() for situations where the parser just +// wants to assert that a particular token type must be present. +func (p *parser) requireTokenType(wantType scanner.TokenType, wantName string) error { + token := p.peeker.Read() + if token.Type != wantType { + return ExpectationError(wantName, token) + } + return nil +} diff --git a/vendor/github.com/hashicorp/hil/scanner/peeker.go b/vendor/github.com/hashicorp/hil/scanner/peeker.go new file mode 100644 index 0000000000..4de372831f --- /dev/null +++ b/vendor/github.com/hashicorp/hil/scanner/peeker.go @@ -0,0 +1,55 @@ +package scanner + +// Peeker is a utility that wraps a token channel returned by Scan and +// provides an interface that allows a caller (e.g. the parser) to +// work with the token stream in a mode that allows one token of lookahead, +// and provides utilities for more convenient processing of the stream. +type Peeker struct { + ch <-chan *Token + peeked *Token +} + +func NewPeeker(ch <-chan *Token) *Peeker { + return &Peeker{ + ch: ch, + } +} + +// Peek returns the next token in the stream without consuming it. A +// subsequent call to Read will return the same token. +func (p *Peeker) Peek() *Token { + if p.peeked == nil { + p.peeked = <-p.ch + } + return p.peeked +} + +// Read consumes the next token in the stream and returns it. +func (p *Peeker) Read() *Token { + token := p.Peek() + + // As a special case, we will produce the EOF token forever once + // it is reached. + if token.Type != EOF { + p.peeked = nil + } + + return token +} + +// Close ensures that the token stream has been exhausted, to prevent +// the goroutine in the underlying scanner from leaking. +// +// It's not necessary to call this if the caller reads the token stream +// to EOF, since that implicitly closes the scanner. +func (p *Peeker) Close() { + for _ = range p.ch { + // discard + } + // Install a synthetic EOF token in 'peeked' in case someone + // erroneously calls Peek() or Read() after we've closed. + p.peeked = &Token{ + Type: EOF, + Content: "", + } +} diff --git a/vendor/github.com/hashicorp/hil/scanner/scanner.go b/vendor/github.com/hashicorp/hil/scanner/scanner.go new file mode 100644 index 0000000000..bab86c67a6 --- /dev/null +++ b/vendor/github.com/hashicorp/hil/scanner/scanner.go @@ -0,0 +1,550 @@ +package scanner + +import ( + "unicode" + "unicode/utf8" + + "github.com/hashicorp/hil/ast" +) + +// Scan returns a channel that recieves Tokens from the given input string. +// +// The scanner's job is just to partition the string into meaningful parts. +// It doesn't do any transformation of the raw input string, so the caller +// must deal with any further interpretation required, such as parsing INTEGER +// tokens into real ints, or dealing with escape sequences in LITERAL or +// STRING tokens. +// +// Strings in the returned tokens are slices from the original string. +// +// startPos should be set to ast.InitPos unless the caller knows that +// this interpolation string is part of a larger file and knows the position +// of the first character in that larger file. +func Scan(s string, startPos ast.Pos) <-chan *Token { + ch := make(chan *Token) + go scan(s, ch, startPos) + return ch +} + +func scan(s string, ch chan<- *Token, pos ast.Pos) { + // 'remain' starts off as the whole string but we gradually + // slice of the front of it as we work our way through. + remain := s + + // nesting keeps track of how many ${ .. } sequences we are + // inside, so we can recognize the minor differences in syntax + // between outer string literals (LITERAL tokens) and quoted + // string literals (STRING tokens). + nesting := 0 + + // We're going to flip back and forth between parsing literals/strings + // and parsing interpolation sequences ${ .. } until we reach EOF or + // some INVALID token. +All: + for { + startPos := pos + // Literal string processing first, since the beginning of + // a string is always outside of an interpolation sequence. + literalVal, terminator := scanLiteral(remain, pos, nesting > 0) + + if len(literalVal) > 0 { + litType := LITERAL + if nesting > 0 { + litType = STRING + } + ch <- &Token{ + Type: litType, + Content: literalVal, + Pos: startPos, + } + remain = remain[len(literalVal):] + } + + ch <- terminator + remain = remain[len(terminator.Content):] + pos = terminator.Pos + // Safe to use len() here because none of the terminator tokens + // can contain UTF-8 sequences. + pos.Column = pos.Column + len(terminator.Content) + + switch terminator.Type { + case INVALID: + // Synthetic EOF after invalid token, since further scanning + // is likely to just produce more garbage. + ch <- &Token{ + Type: EOF, + Content: "", + Pos: pos, + } + break All + case EOF: + // All done! + break All + case BEGIN: + nesting++ + case CQUOTE: + // nothing special to do + default: + // Should never happen + panic("invalid string/literal terminator") + } + + // Now we do the processing of the insides of ${ .. } sequences. + // This loop terminates when we encounter either a closing } or + // an opening ", which will cause us to return to literal processing. + Interpolation: + for { + + token, size, newPos := scanInterpolationToken(remain, pos) + ch <- token + remain = remain[size:] + pos = newPos + + switch token.Type { + case INVALID: + // Synthetic EOF after invalid token, since further scanning + // is likely to just produce more garbage. + ch <- &Token{ + Type: EOF, + Content: "", + Pos: pos, + } + break All + case EOF: + // All done + // (though a syntax error that we'll catch in the parser) + break All + case END: + nesting-- + if nesting < 0 { + // Can happen if there are unbalanced ${ and } sequences + // in the input, which we'll catch in the parser. + nesting = 0 + } + break Interpolation + case OQUOTE: + // Beginning of nested quoted string + break Interpolation + } + } + } + + close(ch) +} + +// Returns the token found at the start of the given string, followed by +// the number of bytes that were consumed from the string and the adjusted +// source position. +// +// Note that the number of bytes consumed can be more than the length of +// the returned token contents if the string begins with whitespace, since +// it will be silently consumed before reading the token. +func scanInterpolationToken(s string, startPos ast.Pos) (*Token, int, ast.Pos) { + pos := startPos + size := 0 + + // Consume whitespace, if any + for len(s) > 0 && byteIsSpace(s[0]) { + if s[0] == '\n' { + pos.Column = 1 + pos.Line++ + } else { + pos.Column++ + } + size++ + s = s[1:] + } + + // Unexpected EOF during sequence + if len(s) == 0 { + return &Token{ + Type: EOF, + Content: "", + Pos: pos, + }, size, pos + } + + next := s[0] + var token *Token + + switch next { + case '(', ')', '[', ']', ',', '.', '+', '-', '*', '/', '%', '?', ':': + // Easy punctuation symbols that don't have any special meaning + // during scanning, and that stand for themselves in the + // TokenType enumeration. + token = &Token{ + Type: TokenType(next), + Content: s[:1], + Pos: pos, + } + case '}': + token = &Token{ + Type: END, + Content: s[:1], + Pos: pos, + } + case '"': + token = &Token{ + Type: OQUOTE, + Content: s[:1], + Pos: pos, + } + case '!': + if len(s) >= 2 && s[:2] == "!=" { + token = &Token{ + Type: NOTEQUAL, + Content: s[:2], + Pos: pos, + } + } else { + token = &Token{ + Type: BANG, + Content: s[:1], + Pos: pos, + } + } + case '<': + if len(s) >= 2 && s[:2] == "<=" { + token = &Token{ + Type: LTE, + Content: s[:2], + Pos: pos, + } + } else { + token = &Token{ + Type: LT, + Content: s[:1], + Pos: pos, + } + } + case '>': + if len(s) >= 2 && s[:2] == ">=" { + token = &Token{ + Type: GTE, + Content: s[:2], + Pos: pos, + } + } else { + token = &Token{ + Type: GT, + Content: s[:1], + Pos: pos, + } + } + case '=': + if len(s) >= 2 && s[:2] == "==" { + token = &Token{ + Type: EQUAL, + Content: s[:2], + Pos: pos, + } + } else { + // A single equals is not a valid operator + token = &Token{ + Type: INVALID, + Content: s[:1], + Pos: pos, + } + } + case '&': + if len(s) >= 2 && s[:2] == "&&" { + token = &Token{ + Type: AND, + Content: s[:2], + Pos: pos, + } + } else { + token = &Token{ + Type: INVALID, + Content: s[:1], + Pos: pos, + } + } + case '|': + if len(s) >= 2 && s[:2] == "||" { + token = &Token{ + Type: OR, + Content: s[:2], + Pos: pos, + } + } else { + token = &Token{ + Type: INVALID, + Content: s[:1], + Pos: pos, + } + } + default: + if next >= '0' && next <= '9' { + num, numType := scanNumber(s) + token = &Token{ + Type: numType, + Content: num, + Pos: pos, + } + } else if stringStartsWithIdentifier(s) { + ident, runeLen := scanIdentifier(s) + tokenType := IDENTIFIER + if ident == "true" || ident == "false" { + tokenType = BOOL + } + token = &Token{ + Type: tokenType, + Content: ident, + Pos: pos, + } + // Skip usual token handling because it doesn't + // know how to deal with UTF-8 sequences. + pos.Column = pos.Column + runeLen + return token, size + len(ident), pos + } else { + _, byteLen := utf8.DecodeRuneInString(s) + token = &Token{ + Type: INVALID, + Content: s[:byteLen], + Pos: pos, + } + // Skip usual token handling because it doesn't + // know how to deal with UTF-8 sequences. + pos.Column = pos.Column + 1 + return token, size + byteLen, pos + } + } + + // Here we assume that the token content contains no UTF-8 sequences, + // because we dealt with UTF-8 characters as a special case where + // necessary above. + size = size + len(token.Content) + pos.Column = pos.Column + len(token.Content) + + return token, size, pos +} + +// Returns the (possibly-empty) prefix of the given string that represents +// a literal, followed by the token that marks the end of the literal. +func scanLiteral(s string, startPos ast.Pos, nested bool) (string, *Token) { + litLen := 0 + pos := startPos + var terminator *Token + for { + + if litLen >= len(s) { + if nested { + // We've ended in the middle of a quoted string, + // which means this token is actually invalid. + return "", &Token{ + Type: INVALID, + Content: s, + Pos: startPos, + } + } + terminator = &Token{ + Type: EOF, + Content: "", + Pos: pos, + } + break + } + + next := s[litLen] + + if next == '$' && len(s) > litLen+1 { + follow := s[litLen+1] + + if follow == '{' { + terminator = &Token{ + Type: BEGIN, + Content: s[litLen : litLen+2], + Pos: pos, + } + pos.Column = pos.Column + 2 + break + } else if follow == '$' { + // Double-$ escapes the special processing of $, + // so we will consume both characters here. + pos.Column = pos.Column + 2 + litLen = litLen + 2 + continue + } + } + + // special handling that applies only to quoted strings + if nested { + if next == '"' { + terminator = &Token{ + Type: CQUOTE, + Content: s[litLen : litLen+1], + Pos: pos, + } + pos.Column = pos.Column + 1 + break + } + + // Escaped quote marks do not terminate the string. + // + // All we do here in the scanner is avoid terminating a string + // due to an escaped quote. The parser is responsible for the + // full handling of escape sequences, since it's able to produce + // better error messages than we can produce in here. + if next == '\\' && len(s) > litLen+1 { + follow := s[litLen+1] + + if follow == '"' { + // \" escapes the special processing of ", + // so we will consume both characters here. + pos.Column = pos.Column + 2 + litLen = litLen + 2 + continue + } + } + } + + if next == '\n' { + pos.Column = 1 + pos.Line++ + litLen++ + } else { + pos.Column++ + + // "Column" measures runes, so we need to actually consume + // a valid UTF-8 character here. + _, size := utf8.DecodeRuneInString(s[litLen:]) + litLen = litLen + size + } + + } + + return s[:litLen], terminator +} + +// scanNumber returns the extent of the prefix of the string that represents +// a valid number, along with what type of number it represents: INT or FLOAT. +// +// scanNumber does only basic character analysis: numbers consist of digits +// and periods, with at least one period signalling a FLOAT. It's the parser's +// responsibility to validate the form and range of the number, such as ensuring +// that a FLOAT actually contains only one period, etc. +func scanNumber(s string) (string, TokenType) { + period := -1 + byteLen := 0 + numType := INTEGER + for { + if byteLen >= len(s) { + break + } + + next := s[byteLen] + if next != '.' && (next < '0' || next > '9') { + // If our last value was a period, then we're not a float, + // we're just an integer that ends in a period. + if period == byteLen-1 { + byteLen-- + numType = INTEGER + } + + break + } + + if next == '.' { + // If we've already seen a period, break out + if period >= 0 { + break + } + + period = byteLen + numType = FLOAT + } + + byteLen++ + } + + return s[:byteLen], numType +} + +// scanIdentifier returns the extent of the prefix of the string that +// represents a valid identifier, along with the length of that prefix +// in runes. +// +// Identifiers may contain utf8-encoded non-Latin letters, which will +// cause the returned "rune length" to be shorter than the byte length +// of the returned string. +func scanIdentifier(s string) (string, int) { + byteLen := 0 + runeLen := 0 + for { + if byteLen >= len(s) { + break + } + + nextRune, size := utf8.DecodeRuneInString(s[byteLen:]) + if !(nextRune == '_' || + nextRune == '-' || + nextRune == '.' || + nextRune == '*' || + unicode.IsNumber(nextRune) || + unicode.IsLetter(nextRune) || + unicode.IsMark(nextRune)) { + break + } + + // If we reach a star, it must be between periods to be part + // of the same identifier. + if nextRune == '*' && s[byteLen-1] != '.' { + break + } + + // If our previous character was a star, then the current must + // be period. Otherwise, undo that and exit. + if byteLen > 0 && s[byteLen-1] == '*' && nextRune != '.' { + byteLen-- + if s[byteLen-1] == '.' { + byteLen-- + } + + break + } + + byteLen = byteLen + size + runeLen = runeLen + 1 + } + + return s[:byteLen], runeLen +} + +// byteIsSpace implements a restrictive interpretation of spaces that includes +// only what's valid inside interpolation sequences: spaces, tabs, newlines. +func byteIsSpace(b byte) bool { + switch b { + case ' ', '\t', '\r', '\n': + return true + default: + return false + } +} + +// stringStartsWithIdentifier returns true if the given string begins with +// a character that is a legal start of an identifier: an underscore or +// any character that Unicode considers to be a letter. +func stringStartsWithIdentifier(s string) bool { + if len(s) == 0 { + return false + } + + first := s[0] + + // Easy ASCII cases first + if (first >= 'a' && first <= 'z') || (first >= 'A' && first <= 'Z') || first == '_' { + return true + } + + // If our first byte begins a UTF-8 sequence then the sequence might + // be a unicode letter. + if utf8.RuneStart(first) { + firstRune, _ := utf8.DecodeRuneInString(s) + if unicode.IsLetter(firstRune) { + return true + } + } + + return false +} diff --git a/vendor/github.com/hashicorp/hil/scanner/token.go b/vendor/github.com/hashicorp/hil/scanner/token.go new file mode 100644 index 0000000000..b6c82ae9b0 --- /dev/null +++ b/vendor/github.com/hashicorp/hil/scanner/token.go @@ -0,0 +1,105 @@ +package scanner + +import ( + "fmt" + + "github.com/hashicorp/hil/ast" +) + +type Token struct { + Type TokenType + Content string + Pos ast.Pos +} + +//go:generate stringer -type=TokenType +type TokenType rune + +const ( + // Raw string data outside of ${ .. } sequences + LITERAL TokenType = 'o' + + // STRING is like a LITERAL but it's inside a quoted string + // within a ${ ... } sequence, and so it can contain backslash + // escaping. + STRING TokenType = 'S' + + // Other Literals + INTEGER TokenType = 'I' + FLOAT TokenType = 'F' + BOOL TokenType = 'B' + + BEGIN TokenType = '$' // actually "${" + END TokenType = '}' + OQUOTE TokenType = '“' // Opening quote of a nested quoted sequence + CQUOTE TokenType = '”' // Closing quote of a nested quoted sequence + OPAREN TokenType = '(' + CPAREN TokenType = ')' + OBRACKET TokenType = '[' + CBRACKET TokenType = ']' + COMMA TokenType = ',' + + IDENTIFIER TokenType = 'i' + + PERIOD TokenType = '.' + PLUS TokenType = '+' + MINUS TokenType = '-' + STAR TokenType = '*' + SLASH TokenType = '/' + PERCENT TokenType = '%' + + AND TokenType = '∧' + OR TokenType = '∨' + BANG TokenType = '!' + + EQUAL TokenType = '=' + NOTEQUAL TokenType = '≠' + GT TokenType = '>' + LT TokenType = '<' + GTE TokenType = '≥' + LTE TokenType = '≤' + + QUESTION TokenType = '?' + COLON TokenType = ':' + + EOF TokenType = '␄' + + // Produced for sequences that cannot be understood as valid tokens + // e.g. due to use of unrecognized punctuation. + INVALID TokenType = '�' +) + +func (t *Token) String() string { + switch t.Type { + case EOF: + return "end of string" + case INVALID: + return fmt.Sprintf("invalid sequence %q", t.Content) + case INTEGER: + return fmt.Sprintf("integer %s", t.Content) + case FLOAT: + return fmt.Sprintf("float %s", t.Content) + case STRING: + return fmt.Sprintf("string %q", t.Content) + case LITERAL: + return fmt.Sprintf("literal %q", t.Content) + case OQUOTE: + return fmt.Sprintf("opening quote") + case CQUOTE: + return fmt.Sprintf("closing quote") + case AND: + return "&&" + case OR: + return "||" + case NOTEQUAL: + return "!=" + case GTE: + return ">=" + case LTE: + return "<=" + default: + // The remaining token types have content that + // speaks for itself. + return fmt.Sprintf("%q", t.Content) + } +} diff --git a/vendor/github.com/hashicorp/hil/scanner/tokentype_string.go b/vendor/github.com/hashicorp/hil/scanner/tokentype_string.go new file mode 100644 index 0000000000..a602f5fdd8 --- /dev/null +++ b/vendor/github.com/hashicorp/hil/scanner/tokentype_string.go @@ -0,0 +1,51 @@ +// Code generated by "stringer -type=TokenType"; DO NOT EDIT + +package scanner + +import "fmt" + +const _TokenType_name = "BANGBEGINPERCENTOPARENCPARENSTARPLUSCOMMAMINUSPERIODSLASHCOLONLTEQUALGTQUESTIONBOOLFLOATINTEGERSTRINGOBRACKETCBRACKETIDENTIFIERLITERALENDOQUOTECQUOTEANDORNOTEQUALLTEGTEEOFINVALID" + +var _TokenType_map = map[TokenType]string{ + 33: _TokenType_name[0:4], + 36: _TokenType_name[4:9], + 37: _TokenType_name[9:16], + 40: _TokenType_name[16:22], + 41: _TokenType_name[22:28], + 42: _TokenType_name[28:32], + 43: _TokenType_name[32:36], + 44: _TokenType_name[36:41], + 45: _TokenType_name[41:46], + 46: _TokenType_name[46:52], + 47: _TokenType_name[52:57], + 58: _TokenType_name[57:62], + 60: _TokenType_name[62:64], + 61: _TokenType_name[64:69], + 62: _TokenType_name[69:71], + 63: _TokenType_name[71:79], + 66: _TokenType_name[79:83], + 70: _TokenType_name[83:88], + 73: _TokenType_name[88:95], + 83: _TokenType_name[95:101], + 91: _TokenType_name[101:109], + 93: _TokenType_name[109:117], + 105: _TokenType_name[117:127], + 111: _TokenType_name[127:134], + 125: _TokenType_name[134:137], + 8220: _TokenType_name[137:143], + 8221: _TokenType_name[143:149], + 8743: _TokenType_name[149:152], + 8744: _TokenType_name[152:154], + 8800: _TokenType_name[154:162], + 8804: _TokenType_name[162:165], + 8805: _TokenType_name[165:168], + 9220: _TokenType_name[168:171], + 65533: _TokenType_name[171:178], +} + +func (i TokenType) String() string { + if str, ok := _TokenType_map[i]; ok { + return str + } + return fmt.Sprintf("TokenType(%d)", i) +} diff --git a/vendor/github.com/hashicorp/hil/transform_fixed.go b/vendor/github.com/hashicorp/hil/transform_fixed.go new file mode 100644 index 0000000000..e69df29432 --- /dev/null +++ b/vendor/github.com/hashicorp/hil/transform_fixed.go @@ -0,0 +1,29 @@ +package hil + +import ( + "github.com/hashicorp/hil/ast" +) + +// FixedValueTransform transforms an AST to return a fixed value for +// all interpolations. i.e. you can make "hi ${anything}" always +// turn into "hi foo". +// +// The primary use case for this is for config validations where you can +// verify that interpolations result in a certain type of string. +func FixedValueTransform(root ast.Node, Value *ast.LiteralNode) ast.Node { + // We visit the nodes in top-down order + result := root + switch n := result.(type) { + case *ast.Output: + for i, v := range n.Exprs { + n.Exprs[i] = FixedValueTransform(v, Value) + } + case *ast.LiteralNode: + // We keep it as-is + default: + // Anything else we replace + result = Value + } + + return result +} diff --git a/vendor/github.com/hashicorp/hil/walk.go b/vendor/github.com/hashicorp/hil/walk.go new file mode 100644 index 0000000000..0ace83065f --- /dev/null +++ b/vendor/github.com/hashicorp/hil/walk.go @@ -0,0 +1,266 @@ +package hil + +import ( + "fmt" + "reflect" + "strings" + + "github.com/hashicorp/hil/ast" + "github.com/mitchellh/reflectwalk" +) + +// WalkFn is the type of function to pass to Walk. Modify fields within +// WalkData to control whether replacement happens. +type WalkFn func(*WalkData) error + +// WalkData is the structure passed to the callback of the Walk function. +// +// This structure contains data passed in as well as fields that are expected +// to be written by the caller as a result. Please see the documentation for +// each field for more information. +type WalkData struct { + // Root is the parsed root of this HIL program + Root ast.Node + + // Location is the location within the structure where this + // value was found. This can be used to modify behavior within + // slices and so on. + Location reflectwalk.Location + + // The below two values must be set by the callback to have any effect. + // + // Replace, if true, will replace the value in the structure with + // ReplaceValue. It is up to the caller to make sure this is a string. + Replace bool + ReplaceValue string +} + +// Walk will walk an arbitrary Go structure and parse any string as an +// HIL program and call the callback cb to determine what to replace it +// with. +// +// This function is very useful for arbitrary HIL program interpolation +// across a complex configuration structure. Due to the heavy use of +// reflection in this function, it is recommend to write many unit tests +// with your typical configuration structures to hilp mitigate the risk +// of panics. +func Walk(v interface{}, cb WalkFn) error { + walker := &interpolationWalker{F: cb} + return reflectwalk.Walk(v, walker) +} + +// interpolationWalker implements interfaces for the reflectwalk package +// (github.com/mitchellh/reflectwalk) that can be used to automatically +// execute a callback for an interpolation. +type interpolationWalker struct { + F WalkFn + + key []string + lastValue reflect.Value + loc reflectwalk.Location + cs []reflect.Value + csKey []reflect.Value + csData interface{} + sliceIndex int + unknownKeys []string +} + +func (w *interpolationWalker) Enter(loc reflectwalk.Location) error { + w.loc = loc + return nil +} + +func (w *interpolationWalker) Exit(loc reflectwalk.Location) error { + w.loc = reflectwalk.None + + switch loc { + case reflectwalk.Map: + w.cs = w.cs[:len(w.cs)-1] + case reflectwalk.MapValue: + w.key = w.key[:len(w.key)-1] + w.csKey = w.csKey[:len(w.csKey)-1] + case reflectwalk.Slice: + // Split any values that need to be split + w.splitSlice() + w.cs = w.cs[:len(w.cs)-1] + case reflectwalk.SliceElem: + w.csKey = w.csKey[:len(w.csKey)-1] + } + + return nil +} + +func (w *interpolationWalker) Map(m reflect.Value) error { + w.cs = append(w.cs, m) + return nil +} + +func (w *interpolationWalker) MapElem(m, k, v reflect.Value) error { + w.csData = k + w.csKey = append(w.csKey, k) + w.key = append(w.key, k.String()) + w.lastValue = v + return nil +} + +func (w *interpolationWalker) Slice(s reflect.Value) error { + w.cs = append(w.cs, s) + return nil +} + +func (w *interpolationWalker) SliceElem(i int, elem reflect.Value) error { + w.csKey = append(w.csKey, reflect.ValueOf(i)) + w.sliceIndex = i + return nil +} + +func (w *interpolationWalker) Primitive(v reflect.Value) error { + setV := v + + // We only care about strings + if v.Kind() == reflect.Interface { + setV = v + v = v.Elem() + } + if v.Kind() != reflect.String { + return nil + } + + astRoot, err := Parse(v.String()) + if err != nil { + return err + } + + // If the AST we got is just a literal string value with the same + // value then we ignore it. We have to check if its the same value + // because it is possible to input a string, get out a string, and + // have it be different. For example: "foo-$${bar}" turns into + // "foo-${bar}" + if n, ok := astRoot.(*ast.LiteralNode); ok { + if s, ok := n.Value.(string); ok && s == v.String() { + return nil + } + } + + if w.F == nil { + return nil + } + + data := WalkData{Root: astRoot, Location: w.loc} + if err := w.F(&data); err != nil { + return fmt.Errorf( + "%s in:\n\n%s", + err, v.String()) + } + + if data.Replace { + /* + if remove { + w.removeCurrent() + return nil + } + */ + + resultVal := reflect.ValueOf(data.ReplaceValue) + switch w.loc { + case reflectwalk.MapKey: + m := w.cs[len(w.cs)-1] + + // Delete the old value + var zero reflect.Value + m.SetMapIndex(w.csData.(reflect.Value), zero) + + // Set the new key with the existing value + m.SetMapIndex(resultVal, w.lastValue) + + // Set the key to be the new key + w.csData = resultVal + case reflectwalk.MapValue: + // If we're in a map, then the only way to set a map value is + // to set it directly. + m := w.cs[len(w.cs)-1] + mk := w.csData.(reflect.Value) + m.SetMapIndex(mk, resultVal) + default: + // Otherwise, we should be addressable + setV.Set(resultVal) + } + } + + return nil +} + +func (w *interpolationWalker) removeCurrent() { + // Append the key to the unknown keys + w.unknownKeys = append(w.unknownKeys, strings.Join(w.key, ".")) + + for i := 1; i <= len(w.cs); i++ { + c := w.cs[len(w.cs)-i] + switch c.Kind() { + case reflect.Map: + // Zero value so that we delete the map key + var val reflect.Value + + // Get the key and delete it + k := w.csData.(reflect.Value) + c.SetMapIndex(k, val) + return + } + } + + panic("No container found for removeCurrent") +} + +func (w *interpolationWalker) replaceCurrent(v reflect.Value) { + c := w.cs[len(w.cs)-2] + switch c.Kind() { + case reflect.Map: + // Get the key and delete it + k := w.csKey[len(w.csKey)-1] + c.SetMapIndex(k, v) + } +} + +func (w *interpolationWalker) splitSlice() { + // Get the []interface{} slice so we can do some operations on + // it without dealing with reflection. We'll document each step + // here to be clear. + var s []interface{} + raw := w.cs[len(w.cs)-1] + switch v := raw.Interface().(type) { + case []interface{}: + s = v + case []map[string]interface{}: + return + default: + panic("Unknown kind: " + raw.Kind().String()) + } + + // Check if we have any elements that we need to split. If not, then + // just return since we're done. + split := false + if !split { + return + } + + // Make a new result slice that is twice the capacity to fit our growth. + result := make([]interface{}, 0, len(s)*2) + + // Go over each element of the original slice and start building up + // the resulting slice by splitting where we have to. + for _, v := range s { + sv, ok := v.(string) + if !ok { + // Not a string, so just set it + result = append(result, v) + continue + } + + // Not a string list, so just set it + result = append(result, sv) + } + + // Our slice is now done, we have to replace the slice now + // with this new one that we have. + w.replaceCurrent(reflect.ValueOf(result)) +} diff --git a/vendor/github.com/hashicorp/logutils/LICENSE b/vendor/github.com/hashicorp/logutils/LICENSE new file mode 100644 index 0000000000..c33dcc7c92 --- /dev/null +++ b/vendor/github.com/hashicorp/logutils/LICENSE @@ -0,0 +1,354 @@ +Mozilla Public License, version 2.0 + +1. Definitions + +1.1. “Contributor” + + means each individual or legal entity that creates, contributes to the + creation of, or owns Covered Software. + +1.2. “Contributor Version” + + means the combination of the Contributions of others (if any) used by a + Contributor and that particular Contributor’s Contribution. + +1.3. “Contribution” + + means Covered Software of a particular Contributor. + +1.4. “Covered Software” + + means Source Code Form to which the initial Contributor has attached the + notice in Exhibit A, the Executable Form of such Source Code Form, and + Modifications of such Source Code Form, in each case including portions + thereof. + +1.5. “Incompatible With Secondary Licenses” + means + + a. that the initial Contributor has attached the notice described in + Exhibit B to the Covered Software; or + + b. that the Covered Software was made available under the terms of version + 1.1 or earlier of the License, but not also under the terms of a + Secondary License. + +1.6. “Executable Form” + + means any form of the work other than Source Code Form. + +1.7. “Larger Work” + + means a work that combines Covered Software with other material, in a separate + file or files, that is not Covered Software. + +1.8. “License” + + means this document. + +1.9. “Licensable” + + means having the right to grant, to the maximum extent possible, whether at the + time of the initial grant or subsequently, any and all of the rights conveyed by + this License. + +1.10. “Modifications” + + means any of the following: + + a. any file in Source Code Form that results from an addition to, deletion + from, or modification of the contents of Covered Software; or + + b. any new file in Source Code Form that contains any Covered Software. + +1.11. “Patent Claims” of a Contributor + + means any patent claim(s), including without limitation, method, process, + and apparatus claims, in any patent Licensable by such Contributor that + would be infringed, but for the grant of the License, by the making, + using, selling, offering for sale, having made, import, or transfer of + either its Contributions or its Contributor Version. + +1.12. “Secondary License” + + means either the GNU General Public License, Version 2.0, the GNU Lesser + General Public License, Version 2.1, the GNU Affero General Public + License, Version 3.0, or any later versions of those licenses. + +1.13. “Source Code Form” + + means the form of the work preferred for making modifications. + +1.14. “You” (or “Your”) + + means an individual or a legal entity exercising rights under this + License. For legal entities, “You” includes any entity that controls, is + controlled by, or is under common control with You. For purposes of this + definition, “control” means (a) the power, direct or indirect, to cause + the direction or management of such entity, whether by contract or + otherwise, or (b) ownership of more than fifty percent (50%) of the + outstanding shares or beneficial ownership of such entity. + + +2. License Grants and Conditions + +2.1. Grants + + Each Contributor hereby grants You a world-wide, royalty-free, + non-exclusive license: + + a. under intellectual property rights (other than patent or trademark) + Licensable by such Contributor to use, reproduce, make available, + modify, display, perform, distribute, and otherwise exploit its + Contributions, either on an unmodified basis, with Modifications, or as + part of a Larger Work; and + + b. under Patent Claims of such Contributor to make, use, sell, offer for + sale, have made, import, and otherwise transfer either its Contributions + or its Contributor Version. + +2.2. Effective Date + + The licenses granted in Section 2.1 with respect to any Contribution become + effective for each Contribution on the date the Contributor first distributes + such Contribution. + +2.3. Limitations on Grant Scope + + The licenses granted in this Section 2 are the only rights granted under this + License. No additional rights or licenses will be implied from the distribution + or licensing of Covered Software under this License. Notwithstanding Section + 2.1(b) above, no patent license is granted by a Contributor: + + a. for any code that a Contributor has removed from Covered Software; or + + b. for infringements caused by: (i) Your and any other third party’s + modifications of Covered Software, or (ii) the combination of its + Contributions with other software (except as part of its Contributor + Version); or + + c. under Patent Claims infringed by Covered Software in the absence of its + Contributions. + + This License does not grant any rights in the trademarks, service marks, or + logos of any Contributor (except as may be necessary to comply with the + notice requirements in Section 3.4). + +2.4. Subsequent Licenses + + No Contributor makes additional grants as a result of Your choice to + distribute the Covered Software under a subsequent version of this License + (see Section 10.2) or under the terms of a Secondary License (if permitted + under the terms of Section 3.3). + +2.5. Representation + + Each Contributor represents that the Contributor believes its Contributions + are its original creation(s) or it has sufficient rights to grant the + rights to its Contributions conveyed by this License. + +2.6. Fair Use + + This License is not intended to limit any rights You have under applicable + copyright doctrines of fair use, fair dealing, or other equivalents. + +2.7. Conditions + + Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in + Section 2.1. + + +3. Responsibilities + +3.1. Distribution of Source Form + + All distribution of Covered Software in Source Code Form, including any + Modifications that You create or to which You contribute, must be under the + terms of this License. You must inform recipients that the Source Code Form + of the Covered Software is governed by the terms of this License, and how + they can obtain a copy of this License. You may not attempt to alter or + restrict the recipients’ rights in the Source Code Form. + +3.2. Distribution of Executable Form + + If You distribute Covered Software in Executable Form then: + + a. such Covered Software must also be made available in Source Code Form, + as described in Section 3.1, and You must inform recipients of the + Executable Form how they can obtain a copy of such Source Code Form by + reasonable means in a timely manner, at a charge no more than the cost + of distribution to the recipient; and + + b. You may distribute such Executable Form under the terms of this License, + or sublicense it under different terms, provided that the license for + the Executable Form does not attempt to limit or alter the recipients’ + rights in the Source Code Form under this License. + +3.3. Distribution of a Larger Work + + You may create and distribute a Larger Work under terms of Your choice, + provided that You also comply with the requirements of this License for the + Covered Software. If the Larger Work is a combination of Covered Software + with a work governed by one or more Secondary Licenses, and the Covered + Software is not Incompatible With Secondary Licenses, this License permits + You to additionally distribute such Covered Software under the terms of + such Secondary License(s), so that the recipient of the Larger Work may, at + their option, further distribute the Covered Software under the terms of + either this License or such Secondary License(s). + +3.4. Notices + + You may not remove or alter the substance of any license notices (including + copyright notices, patent notices, disclaimers of warranty, or limitations + of liability) contained within the Source Code Form of the Covered + Software, except that You may alter any license notices to the extent + required to remedy known factual inaccuracies. + +3.5. Application of Additional Terms + + You may choose to offer, and to charge a fee for, warranty, support, + indemnity or liability obligations to one or more recipients of Covered + Software. However, You may do so only on Your own behalf, and not on behalf + of any Contributor. You must make it absolutely clear that any such + warranty, support, indemnity, or liability obligation is offered by You + alone, and You hereby agree to indemnify every Contributor for any + liability incurred by such Contributor as a result of warranty, support, + indemnity or liability terms You offer. You may include additional + disclaimers of warranty and limitations of liability specific to any + jurisdiction. + +4. Inability to Comply Due to Statute or Regulation + + If it is impossible for You to comply with any of the terms of this License + with respect to some or all of the Covered Software due to statute, judicial + order, or regulation then You must: (a) comply with the terms of this License + to the maximum extent possible; and (b) describe the limitations and the code + they affect. Such description must be placed in a text file included with all + distributions of the Covered Software under this License. Except to the + extent prohibited by statute or regulation, such description must be + sufficiently detailed for a recipient of ordinary skill to be able to + understand it. + +5. Termination + +5.1. The rights granted under this License will terminate automatically if You + fail to comply with any of its terms. However, if You become compliant, + then the rights granted under this License from a particular Contributor + are reinstated (a) provisionally, unless and until such Contributor + explicitly and finally terminates Your grants, and (b) on an ongoing basis, + if such Contributor fails to notify You of the non-compliance by some + reasonable means prior to 60 days after You have come back into compliance. + Moreover, Your grants from a particular Contributor are reinstated on an + ongoing basis if such Contributor notifies You of the non-compliance by + some reasonable means, this is the first time You have received notice of + non-compliance with this License from such Contributor, and You become + compliant prior to 30 days after Your receipt of the notice. + +5.2. If You initiate litigation against any entity by asserting a patent + infringement claim (excluding declaratory judgment actions, counter-claims, + and cross-claims) alleging that a Contributor Version directly or + indirectly infringes any patent, then the rights granted to You by any and + all Contributors for the Covered Software under Section 2.1 of this License + shall terminate. + +5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user + license agreements (excluding distributors and resellers) which have been + validly granted by You or Your distributors under this License prior to + termination shall survive termination. + +6. Disclaimer of Warranty + + Covered Software is provided under this License on an “as is” basis, without + warranty of any kind, either expressed, implied, or statutory, including, + without limitation, warranties that the Covered Software is free of defects, + merchantable, fit for a particular purpose or non-infringing. The entire + risk as to the quality and performance of the Covered Software is with You. + Should any Covered Software prove defective in any respect, You (not any + Contributor) assume the cost of any necessary servicing, repair, or + correction. This disclaimer of warranty constitutes an essential part of this + License. No use of any Covered Software is authorized under this License + except under this disclaimer. + +7. Limitation of Liability + + Under no circumstances and under no legal theory, whether tort (including + negligence), contract, or otherwise, shall any Contributor, or anyone who + distributes Covered Software as permitted above, be liable to You for any + direct, indirect, special, incidental, or consequential damages of any + character including, without limitation, damages for lost profits, loss of + goodwill, work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses, even if such party shall have been + informed of the possibility of such damages. This limitation of liability + shall not apply to liability for death or personal injury resulting from such + party’s negligence to the extent applicable law prohibits such limitation. + Some jurisdictions do not allow the exclusion or limitation of incidental or + consequential damages, so this exclusion and limitation may not apply to You. + +8. Litigation + + Any litigation relating to this License may be brought only in the courts of + a jurisdiction where the defendant maintains its principal place of business + and such litigation shall be governed by laws of that jurisdiction, without + reference to its conflict-of-law provisions. Nothing in this Section shall + prevent a party’s ability to bring cross-claims or counter-claims. + +9. Miscellaneous + + This License represents the complete agreement concerning the subject matter + hereof. If any provision of this License is held to be unenforceable, such + provision shall be reformed only to the extent necessary to make it + enforceable. Any law or regulation which provides that the language of a + contract shall be construed against the drafter shall not be used to construe + this License against a Contributor. + + +10. Versions of the License + +10.1. New Versions + + Mozilla Foundation is the license steward. Except as provided in Section + 10.3, no one other than the license steward has the right to modify or + publish new versions of this License. Each version will be given a + distinguishing version number. + +10.2. Effect of New Versions + + You may distribute the Covered Software under the terms of the version of + the License under which You originally received the Covered Software, or + under the terms of any subsequent version published by the license + steward. + +10.3. Modified Versions + + If you create software not governed by this License, and you want to + create a new license for such software, you may create and use a modified + version of this License if you rename the license and remove any + references to the name of the license steward (except to note that such + modified license differs from this License). + +10.4. Distributing Source Code Form that is Incompatible With Secondary Licenses + If You choose to distribute Source Code Form that is Incompatible With + Secondary Licenses under the terms of this version of the License, the + notice described in Exhibit B of this License must be attached. + +Exhibit A - Source Code Form License Notice + + This Source Code Form is subject to the + terms of the Mozilla Public License, v. + 2.0. If a copy of the MPL was not + distributed with this file, You can + obtain one at + http://mozilla.org/MPL/2.0/. + +If it is not possible or desirable to put the notice in a particular file, then +You may include the notice in a location (such as a LICENSE file in a relevant +directory) where a recipient would be likely to look for such a notice. + +You may add additional accurate notices of copyright ownership. + +Exhibit B - “Incompatible With Secondary Licenses” Notice + + This Source Code Form is “Incompatible + With Secondary Licenses”, as defined by + the Mozilla Public License, v. 2.0. + diff --git a/vendor/github.com/hashicorp/logutils/README.md b/vendor/github.com/hashicorp/logutils/README.md new file mode 100644 index 0000000000..49490eaeb6 --- /dev/null +++ b/vendor/github.com/hashicorp/logutils/README.md @@ -0,0 +1,36 @@ +# logutils + +logutils is a Go package that augments the standard library "log" package +to make logging a bit more modern, without fragmenting the Go ecosystem +with new logging packages. + +## The simplest thing that could possibly work + +Presumably your application already uses the default `log` package. To switch, you'll want your code to look like the following: + +```go +package main + +import ( + "log" + "os" + + "github.com/hashicorp/logutils" +) + +func main() { + filter := &logutils.LevelFilter{ + Levels: []logutils.LogLevel{"DEBUG", "WARN", "ERROR"}, + MinLevel: logutils.LogLevel("WARN"), + Writer: os.Stderr, + } + log.SetOutput(filter) + + log.Print("[DEBUG] Debugging") // this will not print + log.Print("[WARN] Warning") // this will + log.Print("[ERROR] Erring") // and so will this + log.Print("Message I haven't updated") // and so will this +} +``` + +This logs to standard error exactly like go's standard logger. Any log messages you haven't converted to have a level will continue to print as before. diff --git a/vendor/github.com/hashicorp/logutils/level.go b/vendor/github.com/hashicorp/logutils/level.go new file mode 100644 index 0000000000..6381bf1629 --- /dev/null +++ b/vendor/github.com/hashicorp/logutils/level.go @@ -0,0 +1,81 @@ +// Package logutils augments the standard log package with levels. +package logutils + +import ( + "bytes" + "io" + "sync" +) + +type LogLevel string + +// LevelFilter is an io.Writer that can be used with a logger that +// will filter out log messages that aren't at least a certain level. +// +// Once the filter is in use somewhere, it is not safe to modify +// the structure. +type LevelFilter struct { + // Levels is the list of log levels, in increasing order of + // severity. Example might be: {"DEBUG", "WARN", "ERROR"}. + Levels []LogLevel + + // MinLevel is the minimum level allowed through + MinLevel LogLevel + + // The underlying io.Writer where log messages that pass the filter + // will be set. + Writer io.Writer + + badLevels map[LogLevel]struct{} + once sync.Once +} + +// Check will check a given line if it would be included in the level +// filter. +func (f *LevelFilter) Check(line []byte) bool { + f.once.Do(f.init) + + // Check for a log level + var level LogLevel + x := bytes.IndexByte(line, '[') + if x >= 0 { + y := bytes.IndexByte(line[x:], ']') + if y >= 0 { + level = LogLevel(line[x+1 : x+y]) + } + } + + _, ok := f.badLevels[level] + return !ok +} + +func (f *LevelFilter) Write(p []byte) (n int, err error) { + // Note in general that io.Writer can receive any byte sequence + // to write, but the "log" package always guarantees that we only + // get a single line. We use that as a slight optimization within + // this method, assuming we're dealing with a single, complete line + // of log data. + + if !f.Check(p) { + return len(p), nil + } + + return f.Writer.Write(p) +} + +// SetMinLevel is used to update the minimum log level +func (f *LevelFilter) SetMinLevel(min LogLevel) { + f.MinLevel = min + f.init() +} + +func (f *LevelFilter) init() { + badLevels := make(map[LogLevel]struct{}) + for _, level := range f.Levels { + if level == f.MinLevel { + break + } + badLevels[level] = struct{}{} + } + f.badLevels = badLevels +} diff --git a/vendor/github.com/hashicorp/terraform/LICENSE b/vendor/github.com/hashicorp/terraform/LICENSE new file mode 100644 index 0000000000..c33dcc7c92 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/LICENSE @@ -0,0 +1,354 @@ +Mozilla Public License, version 2.0 + +1. Definitions + +1.1. “Contributor” + + means each individual or legal entity that creates, contributes to the + creation of, or owns Covered Software. + +1.2. “Contributor Version” + + means the combination of the Contributions of others (if any) used by a + Contributor and that particular Contributor’s Contribution. + +1.3. “Contribution” + + means Covered Software of a particular Contributor. + +1.4. “Covered Software” + + means Source Code Form to which the initial Contributor has attached the + notice in Exhibit A, the Executable Form of such Source Code Form, and + Modifications of such Source Code Form, in each case including portions + thereof. + +1.5. “Incompatible With Secondary Licenses” + means + + a. that the initial Contributor has attached the notice described in + Exhibit B to the Covered Software; or + + b. that the Covered Software was made available under the terms of version + 1.1 or earlier of the License, but not also under the terms of a + Secondary License. + +1.6. “Executable Form” + + means any form of the work other than Source Code Form. + +1.7. “Larger Work” + + means a work that combines Covered Software with other material, in a separate + file or files, that is not Covered Software. + +1.8. “License” + + means this document. + +1.9. “Licensable” + + means having the right to grant, to the maximum extent possible, whether at the + time of the initial grant or subsequently, any and all of the rights conveyed by + this License. + +1.10. “Modifications” + + means any of the following: + + a. any file in Source Code Form that results from an addition to, deletion + from, or modification of the contents of Covered Software; or + + b. any new file in Source Code Form that contains any Covered Software. + +1.11. “Patent Claims” of a Contributor + + means any patent claim(s), including without limitation, method, process, + and apparatus claims, in any patent Licensable by such Contributor that + would be infringed, but for the grant of the License, by the making, + using, selling, offering for sale, having made, import, or transfer of + either its Contributions or its Contributor Version. + +1.12. “Secondary License” + + means either the GNU General Public License, Version 2.0, the GNU Lesser + General Public License, Version 2.1, the GNU Affero General Public + License, Version 3.0, or any later versions of those licenses. + +1.13. “Source Code Form” + + means the form of the work preferred for making modifications. + +1.14. “You” (or “Your”) + + means an individual or a legal entity exercising rights under this + License. For legal entities, “You” includes any entity that controls, is + controlled by, or is under common control with You. For purposes of this + definition, “control” means (a) the power, direct or indirect, to cause + the direction or management of such entity, whether by contract or + otherwise, or (b) ownership of more than fifty percent (50%) of the + outstanding shares or beneficial ownership of such entity. + + +2. License Grants and Conditions + +2.1. Grants + + Each Contributor hereby grants You a world-wide, royalty-free, + non-exclusive license: + + a. under intellectual property rights (other than patent or trademark) + Licensable by such Contributor to use, reproduce, make available, + modify, display, perform, distribute, and otherwise exploit its + Contributions, either on an unmodified basis, with Modifications, or as + part of a Larger Work; and + + b. under Patent Claims of such Contributor to make, use, sell, offer for + sale, have made, import, and otherwise transfer either its Contributions + or its Contributor Version. + +2.2. Effective Date + + The licenses granted in Section 2.1 with respect to any Contribution become + effective for each Contribution on the date the Contributor first distributes + such Contribution. + +2.3. Limitations on Grant Scope + + The licenses granted in this Section 2 are the only rights granted under this + License. No additional rights or licenses will be implied from the distribution + or licensing of Covered Software under this License. Notwithstanding Section + 2.1(b) above, no patent license is granted by a Contributor: + + a. for any code that a Contributor has removed from Covered Software; or + + b. for infringements caused by: (i) Your and any other third party’s + modifications of Covered Software, or (ii) the combination of its + Contributions with other software (except as part of its Contributor + Version); or + + c. under Patent Claims infringed by Covered Software in the absence of its + Contributions. + + This License does not grant any rights in the trademarks, service marks, or + logos of any Contributor (except as may be necessary to comply with the + notice requirements in Section 3.4). + +2.4. Subsequent Licenses + + No Contributor makes additional grants as a result of Your choice to + distribute the Covered Software under a subsequent version of this License + (see Section 10.2) or under the terms of a Secondary License (if permitted + under the terms of Section 3.3). + +2.5. Representation + + Each Contributor represents that the Contributor believes its Contributions + are its original creation(s) or it has sufficient rights to grant the + rights to its Contributions conveyed by this License. + +2.6. Fair Use + + This License is not intended to limit any rights You have under applicable + copyright doctrines of fair use, fair dealing, or other equivalents. + +2.7. Conditions + + Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in + Section 2.1. + + +3. Responsibilities + +3.1. Distribution of Source Form + + All distribution of Covered Software in Source Code Form, including any + Modifications that You create or to which You contribute, must be under the + terms of this License. You must inform recipients that the Source Code Form + of the Covered Software is governed by the terms of this License, and how + they can obtain a copy of this License. You may not attempt to alter or + restrict the recipients’ rights in the Source Code Form. + +3.2. Distribution of Executable Form + + If You distribute Covered Software in Executable Form then: + + a. such Covered Software must also be made available in Source Code Form, + as described in Section 3.1, and You must inform recipients of the + Executable Form how they can obtain a copy of such Source Code Form by + reasonable means in a timely manner, at a charge no more than the cost + of distribution to the recipient; and + + b. You may distribute such Executable Form under the terms of this License, + or sublicense it under different terms, provided that the license for + the Executable Form does not attempt to limit or alter the recipients’ + rights in the Source Code Form under this License. + +3.3. Distribution of a Larger Work + + You may create and distribute a Larger Work under terms of Your choice, + provided that You also comply with the requirements of this License for the + Covered Software. If the Larger Work is a combination of Covered Software + with a work governed by one or more Secondary Licenses, and the Covered + Software is not Incompatible With Secondary Licenses, this License permits + You to additionally distribute such Covered Software under the terms of + such Secondary License(s), so that the recipient of the Larger Work may, at + their option, further distribute the Covered Software under the terms of + either this License or such Secondary License(s). + +3.4. Notices + + You may not remove or alter the substance of any license notices (including + copyright notices, patent notices, disclaimers of warranty, or limitations + of liability) contained within the Source Code Form of the Covered + Software, except that You may alter any license notices to the extent + required to remedy known factual inaccuracies. + +3.5. Application of Additional Terms + + You may choose to offer, and to charge a fee for, warranty, support, + indemnity or liability obligations to one or more recipients of Covered + Software. However, You may do so only on Your own behalf, and not on behalf + of any Contributor. You must make it absolutely clear that any such + warranty, support, indemnity, or liability obligation is offered by You + alone, and You hereby agree to indemnify every Contributor for any + liability incurred by such Contributor as a result of warranty, support, + indemnity or liability terms You offer. You may include additional + disclaimers of warranty and limitations of liability specific to any + jurisdiction. + +4. Inability to Comply Due to Statute or Regulation + + If it is impossible for You to comply with any of the terms of this License + with respect to some or all of the Covered Software due to statute, judicial + order, or regulation then You must: (a) comply with the terms of this License + to the maximum extent possible; and (b) describe the limitations and the code + they affect. Such description must be placed in a text file included with all + distributions of the Covered Software under this License. Except to the + extent prohibited by statute or regulation, such description must be + sufficiently detailed for a recipient of ordinary skill to be able to + understand it. + +5. Termination + +5.1. The rights granted under this License will terminate automatically if You + fail to comply with any of its terms. However, if You become compliant, + then the rights granted under this License from a particular Contributor + are reinstated (a) provisionally, unless and until such Contributor + explicitly and finally terminates Your grants, and (b) on an ongoing basis, + if such Contributor fails to notify You of the non-compliance by some + reasonable means prior to 60 days after You have come back into compliance. + Moreover, Your grants from a particular Contributor are reinstated on an + ongoing basis if such Contributor notifies You of the non-compliance by + some reasonable means, this is the first time You have received notice of + non-compliance with this License from such Contributor, and You become + compliant prior to 30 days after Your receipt of the notice. + +5.2. If You initiate litigation against any entity by asserting a patent + infringement claim (excluding declaratory judgment actions, counter-claims, + and cross-claims) alleging that a Contributor Version directly or + indirectly infringes any patent, then the rights granted to You by any and + all Contributors for the Covered Software under Section 2.1 of this License + shall terminate. + +5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user + license agreements (excluding distributors and resellers) which have been + validly granted by You or Your distributors under this License prior to + termination shall survive termination. + +6. Disclaimer of Warranty + + Covered Software is provided under this License on an “as is” basis, without + warranty of any kind, either expressed, implied, or statutory, including, + without limitation, warranties that the Covered Software is free of defects, + merchantable, fit for a particular purpose or non-infringing. The entire + risk as to the quality and performance of the Covered Software is with You. + Should any Covered Software prove defective in any respect, You (not any + Contributor) assume the cost of any necessary servicing, repair, or + correction. This disclaimer of warranty constitutes an essential part of this + License. No use of any Covered Software is authorized under this License + except under this disclaimer. + +7. Limitation of Liability + + Under no circumstances and under no legal theory, whether tort (including + negligence), contract, or otherwise, shall any Contributor, or anyone who + distributes Covered Software as permitted above, be liable to You for any + direct, indirect, special, incidental, or consequential damages of any + character including, without limitation, damages for lost profits, loss of + goodwill, work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses, even if such party shall have been + informed of the possibility of such damages. This limitation of liability + shall not apply to liability for death or personal injury resulting from such + party’s negligence to the extent applicable law prohibits such limitation. + Some jurisdictions do not allow the exclusion or limitation of incidental or + consequential damages, so this exclusion and limitation may not apply to You. + +8. Litigation + + Any litigation relating to this License may be brought only in the courts of + a jurisdiction where the defendant maintains its principal place of business + and such litigation shall be governed by laws of that jurisdiction, without + reference to its conflict-of-law provisions. Nothing in this Section shall + prevent a party’s ability to bring cross-claims or counter-claims. + +9. Miscellaneous + + This License represents the complete agreement concerning the subject matter + hereof. If any provision of this License is held to be unenforceable, such + provision shall be reformed only to the extent necessary to make it + enforceable. Any law or regulation which provides that the language of a + contract shall be construed against the drafter shall not be used to construe + this License against a Contributor. + + +10. Versions of the License + +10.1. New Versions + + Mozilla Foundation is the license steward. Except as provided in Section + 10.3, no one other than the license steward has the right to modify or + publish new versions of this License. Each version will be given a + distinguishing version number. + +10.2. Effect of New Versions + + You may distribute the Covered Software under the terms of the version of + the License under which You originally received the Covered Software, or + under the terms of any subsequent version published by the license + steward. + +10.3. Modified Versions + + If you create software not governed by this License, and you want to + create a new license for such software, you may create and use a modified + version of this License if you rename the license and remove any + references to the name of the license steward (except to note that such + modified license differs from this License). + +10.4. Distributing Source Code Form that is Incompatible With Secondary Licenses + If You choose to distribute Source Code Form that is Incompatible With + Secondary Licenses under the terms of this version of the License, the + notice described in Exhibit B of this License must be attached. + +Exhibit A - Source Code Form License Notice + + This Source Code Form is subject to the + terms of the Mozilla Public License, v. + 2.0. If a copy of the MPL was not + distributed with this file, You can + obtain one at + http://mozilla.org/MPL/2.0/. + +If it is not possible or desirable to put the notice in a particular file, then +You may include the notice in a location (such as a LICENSE file in a relevant +directory) where a recipient would be likely to look for such a notice. + +You may add additional accurate notices of copyright ownership. + +Exhibit B - “Incompatible With Secondary Licenses” Notice + + This Source Code Form is “Incompatible + With Secondary Licenses”, as defined by + the Mozilla Public License, v. 2.0. + diff --git a/vendor/github.com/hashicorp/terraform/config/append.go b/vendor/github.com/hashicorp/terraform/config/append.go new file mode 100644 index 0000000000..5f4e89eef7 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/config/append.go @@ -0,0 +1,86 @@ +package config + +// Append appends one configuration to another. +// +// Append assumes that both configurations will not have +// conflicting variables, resources, etc. If they do, the +// problems will be caught in the validation phase. +// +// It is possible that c1, c2 on their own are not valid. For +// example, a resource in c2 may reference a variable in c1. But +// together, they would be valid. +func Append(c1, c2 *Config) (*Config, error) { + c := new(Config) + + // Append unknown keys, but keep them unique since it is a set + unknowns := make(map[string]struct{}) + for _, k := range c1.unknownKeys { + _, present := unknowns[k] + if !present { + unknowns[k] = struct{}{} + c.unknownKeys = append(c.unknownKeys, k) + } + } + + for _, k := range c2.unknownKeys { + _, present := unknowns[k] + if !present { + unknowns[k] = struct{}{} + c.unknownKeys = append(c.unknownKeys, k) + } + } + + c.Atlas = c1.Atlas + if c2.Atlas != nil { + c.Atlas = c2.Atlas + } + + // merge Terraform blocks + if c1.Terraform != nil { + c.Terraform = c1.Terraform + if c2.Terraform != nil { + c.Terraform.Merge(c2.Terraform) + } + } else { + c.Terraform = c2.Terraform + } + + if len(c1.Modules) > 0 || len(c2.Modules) > 0 { + c.Modules = make( + []*Module, 0, len(c1.Modules)+len(c2.Modules)) + c.Modules = append(c.Modules, c1.Modules...) + c.Modules = append(c.Modules, c2.Modules...) + } + + if len(c1.Outputs) > 0 || len(c2.Outputs) > 0 { + c.Outputs = make( + []*Output, 0, len(c1.Outputs)+len(c2.Outputs)) + c.Outputs = append(c.Outputs, c1.Outputs...) + c.Outputs = append(c.Outputs, c2.Outputs...) + } + + if len(c1.ProviderConfigs) > 0 || len(c2.ProviderConfigs) > 0 { + c.ProviderConfigs = make( + []*ProviderConfig, + 0, len(c1.ProviderConfigs)+len(c2.ProviderConfigs)) + c.ProviderConfigs = append(c.ProviderConfigs, c1.ProviderConfigs...) + c.ProviderConfigs = append(c.ProviderConfigs, c2.ProviderConfigs...) + } + + if len(c1.Resources) > 0 || len(c2.Resources) > 0 { + c.Resources = make( + []*Resource, + 0, len(c1.Resources)+len(c2.Resources)) + c.Resources = append(c.Resources, c1.Resources...) + c.Resources = append(c.Resources, c2.Resources...) + } + + if len(c1.Variables) > 0 || len(c2.Variables) > 0 { + c.Variables = make( + []*Variable, 0, len(c1.Variables)+len(c2.Variables)) + c.Variables = append(c.Variables, c1.Variables...) + c.Variables = append(c.Variables, c2.Variables...) + } + + return c, nil +} diff --git a/vendor/github.com/hashicorp/terraform/config/config.go b/vendor/github.com/hashicorp/terraform/config/config.go new file mode 100644 index 0000000000..a157824290 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/config/config.go @@ -0,0 +1,1034 @@ +// The config package is responsible for loading and validating the +// configuration. +package config + +import ( + "fmt" + "regexp" + "strconv" + "strings" + + "github.com/hashicorp/go-multierror" + "github.com/hashicorp/hil" + "github.com/hashicorp/hil/ast" + "github.com/hashicorp/terraform/helper/hilmapstructure" + "github.com/mitchellh/reflectwalk" +) + +// NameRegexp is the regular expression that all names (modules, providers, +// resources, etc.) must follow. +var NameRegexp = regexp.MustCompile(`(?i)\A[A-Z0-9_][A-Z0-9\-\_]*\z`) + +// Config is the configuration that comes from loading a collection +// of Terraform templates. +type Config struct { + // Dir is the path to the directory where this configuration was + // loaded from. If it is blank, this configuration wasn't loaded from + // any meaningful directory. + Dir string + + Terraform *Terraform + Atlas *AtlasConfig + Modules []*Module + ProviderConfigs []*ProviderConfig + Resources []*Resource + Variables []*Variable + Outputs []*Output + + // The fields below can be filled in by loaders for validation + // purposes. + unknownKeys []string +} + +// AtlasConfig is the configuration for building in HashiCorp's Atlas. +type AtlasConfig struct { + Name string + Include []string + Exclude []string +} + +// Module is a module used within a configuration. +// +// This does not represent a module itself, this represents a module +// call-site within an existing configuration. +type Module struct { + Name string + Source string + RawConfig *RawConfig +} + +// ProviderConfig is the configuration for a resource provider. +// +// For example, Terraform needs to set the AWS access keys for the AWS +// resource provider. +type ProviderConfig struct { + Name string + Alias string + RawConfig *RawConfig +} + +// A resource represents a single Terraform resource in the configuration. +// A Terraform resource is something that supports some or all of the +// usual "create, read, update, delete" operations, depending on +// the given Mode. +type Resource struct { + Mode ResourceMode // which operations the resource supports + Name string + Type string + RawCount *RawConfig + RawConfig *RawConfig + Provisioners []*Provisioner + Provider string + DependsOn []string + Lifecycle ResourceLifecycle +} + +// Copy returns a copy of this Resource. Helpful for avoiding shared +// config pointers across multiple pieces of the graph that need to do +// interpolation. +func (r *Resource) Copy() *Resource { + n := &Resource{ + Mode: r.Mode, + Name: r.Name, + Type: r.Type, + RawCount: r.RawCount.Copy(), + RawConfig: r.RawConfig.Copy(), + Provisioners: make([]*Provisioner, 0, len(r.Provisioners)), + Provider: r.Provider, + DependsOn: make([]string, len(r.DependsOn)), + Lifecycle: *r.Lifecycle.Copy(), + } + for _, p := range r.Provisioners { + n.Provisioners = append(n.Provisioners, p.Copy()) + } + copy(n.DependsOn, r.DependsOn) + return n +} + +// ResourceLifecycle is used to store the lifecycle tuning parameters +// to allow customized behavior +type ResourceLifecycle struct { + CreateBeforeDestroy bool `mapstructure:"create_before_destroy"` + PreventDestroy bool `mapstructure:"prevent_destroy"` + IgnoreChanges []string `mapstructure:"ignore_changes"` +} + +// Copy returns a copy of this ResourceLifecycle +func (r *ResourceLifecycle) Copy() *ResourceLifecycle { + n := &ResourceLifecycle{ + CreateBeforeDestroy: r.CreateBeforeDestroy, + PreventDestroy: r.PreventDestroy, + IgnoreChanges: make([]string, len(r.IgnoreChanges)), + } + copy(n.IgnoreChanges, r.IgnoreChanges) + return n +} + +// Provisioner is a configured provisioner step on a resource. +type Provisioner struct { + Type string + RawConfig *RawConfig + ConnInfo *RawConfig + + When ProvisionerWhen + OnFailure ProvisionerOnFailure +} + +// Copy returns a copy of this Provisioner +func (p *Provisioner) Copy() *Provisioner { + return &Provisioner{ + Type: p.Type, + RawConfig: p.RawConfig.Copy(), + ConnInfo: p.ConnInfo.Copy(), + When: p.When, + OnFailure: p.OnFailure, + } +} + +// Variable is a variable defined within the configuration. +type Variable struct { + Name string + DeclaredType string `mapstructure:"type"` + Default interface{} + Description string +} + +// Output is an output defined within the configuration. An output is +// resulting data that is highlighted by Terraform when finished. An +// output marked Sensitive will be output in a masked form following +// application, but will still be available in state. +type Output struct { + Name string + DependsOn []string + Description string + Sensitive bool + RawConfig *RawConfig +} + +// VariableType is the type of value a variable is holding, and returned +// by the Type() function on variables. +type VariableType byte + +const ( + VariableTypeUnknown VariableType = iota + VariableTypeString + VariableTypeList + VariableTypeMap +) + +func (v VariableType) Printable() string { + switch v { + case VariableTypeString: + return "string" + case VariableTypeMap: + return "map" + case VariableTypeList: + return "list" + default: + return "unknown" + } +} + +// ProviderConfigName returns the name of the provider configuration in +// the given mapping that maps to the proper provider configuration +// for this resource. +func ProviderConfigName(t string, pcs []*ProviderConfig) string { + lk := "" + for _, v := range pcs { + k := v.Name + if strings.HasPrefix(t, k) && len(k) > len(lk) { + lk = k + } + } + + return lk +} + +// A unique identifier for this module. +func (r *Module) Id() string { + return fmt.Sprintf("%s", r.Name) +} + +// Count returns the count of this resource. +func (r *Resource) Count() (int, error) { + raw := r.RawCount.Value() + count, ok := r.RawCount.Value().(string) + if !ok { + return 0, fmt.Errorf( + "expected count to be a string or int, got %T", raw) + } + + v, err := strconv.ParseInt(count, 0, 0) + if err != nil { + return 0, err + } + + return int(v), nil +} + +// A unique identifier for this resource. +func (r *Resource) Id() string { + switch r.Mode { + case ManagedResourceMode: + return fmt.Sprintf("%s.%s", r.Type, r.Name) + case DataResourceMode: + return fmt.Sprintf("data.%s.%s", r.Type, r.Name) + default: + panic(fmt.Errorf("unknown resource mode %s", r.Mode)) + } +} + +// Validate does some basic semantic checking of the configuration. +func (c *Config) Validate() error { + if c == nil { + return nil + } + + var errs []error + + for _, k := range c.unknownKeys { + errs = append(errs, fmt.Errorf( + "Unknown root level key: %s", k)) + } + + // Validate the Terraform config + if tf := c.Terraform; tf != nil { + errs = append(errs, c.Terraform.Validate()...) + } + + vars := c.InterpolatedVariables() + varMap := make(map[string]*Variable) + for _, v := range c.Variables { + if _, ok := varMap[v.Name]; ok { + errs = append(errs, fmt.Errorf( + "Variable '%s': duplicate found. Variable names must be unique.", + v.Name)) + } + + varMap[v.Name] = v + } + + for k, _ := range varMap { + if !NameRegexp.MatchString(k) { + errs = append(errs, fmt.Errorf( + "variable %q: variable name must match regular expresion %s", + k, NameRegexp)) + } + } + + for _, v := range c.Variables { + if v.Type() == VariableTypeUnknown { + errs = append(errs, fmt.Errorf( + "Variable '%s': must be a string or a map", + v.Name)) + continue + } + + interp := false + fn := func(n ast.Node) (interface{}, error) { + // LiteralNode is a literal string (outside of a ${ ... } sequence). + // interpolationWalker skips most of these. but in particular it + // visits those that have escaped sequences (like $${foo}) as a + // signal that *some* processing is required on this string. For + // our purposes here though, this is fine and not an interpolation. + if _, ok := n.(*ast.LiteralNode); !ok { + interp = true + } + return "", nil + } + + w := &interpolationWalker{F: fn} + if v.Default != nil { + if err := reflectwalk.Walk(v.Default, w); err == nil { + if interp { + errs = append(errs, fmt.Errorf( + "Variable '%s': cannot contain interpolations", + v.Name)) + } + } + } + } + + // Check for references to user variables that do not actually + // exist and record those errors. + for source, vs := range vars { + for _, v := range vs { + uv, ok := v.(*UserVariable) + if !ok { + continue + } + + if _, ok := varMap[uv.Name]; !ok { + errs = append(errs, fmt.Errorf( + "%s: unknown variable referenced: '%s'. define it with 'variable' blocks", + source, + uv.Name)) + } + } + } + + // Check that all count variables are valid. + for source, vs := range vars { + for _, rawV := range vs { + switch v := rawV.(type) { + case *CountVariable: + if v.Type == CountValueInvalid { + errs = append(errs, fmt.Errorf( + "%s: invalid count variable: %s", + source, + v.FullKey())) + } + case *PathVariable: + if v.Type == PathValueInvalid { + errs = append(errs, fmt.Errorf( + "%s: invalid path variable: %s", + source, + v.FullKey())) + } + } + } + } + + // Check that providers aren't declared multiple times. + providerSet := make(map[string]struct{}) + for _, p := range c.ProviderConfigs { + name := p.FullName() + if _, ok := providerSet[name]; ok { + errs = append(errs, fmt.Errorf( + "provider.%s: declared multiple times, you can only declare a provider once", + name)) + continue + } + + providerSet[name] = struct{}{} + } + + // Check that all references to modules are valid + modules := make(map[string]*Module) + dupped := make(map[string]struct{}) + for _, m := range c.Modules { + // Check for duplicates + if _, ok := modules[m.Id()]; ok { + if _, ok := dupped[m.Id()]; !ok { + dupped[m.Id()] = struct{}{} + + errs = append(errs, fmt.Errorf( + "%s: module repeated multiple times", + m.Id())) + } + + // Already seen this module, just skip it + continue + } + + modules[m.Id()] = m + + // Check that the source has no interpolations + rc, err := NewRawConfig(map[string]interface{}{ + "root": m.Source, + }) + if err != nil { + errs = append(errs, fmt.Errorf( + "%s: module source error: %s", + m.Id(), err)) + } else if len(rc.Interpolations) > 0 { + errs = append(errs, fmt.Errorf( + "%s: module source cannot contain interpolations", + m.Id())) + } + + // Check that the name matches our regexp + if !NameRegexp.Match([]byte(m.Name)) { + errs = append(errs, fmt.Errorf( + "%s: module name can only contain letters, numbers, "+ + "dashes, and underscores", + m.Id())) + } + + // Check that the configuration can all be strings, lists or maps + raw := make(map[string]interface{}) + for k, v := range m.RawConfig.Raw { + var strVal string + if err := hilmapstructure.WeakDecode(v, &strVal); err == nil { + raw[k] = strVal + continue + } + + var mapVal map[string]interface{} + if err := hilmapstructure.WeakDecode(v, &mapVal); err == nil { + raw[k] = mapVal + continue + } + + var sliceVal []interface{} + if err := hilmapstructure.WeakDecode(v, &sliceVal); err == nil { + raw[k] = sliceVal + continue + } + + errs = append(errs, fmt.Errorf( + "%s: variable %s must be a string, list or map value", + m.Id(), k)) + } + + // Check for invalid count variables + for _, v := range m.RawConfig.Variables { + switch v.(type) { + case *CountVariable: + errs = append(errs, fmt.Errorf( + "%s: count variables are only valid within resources", m.Name)) + case *SelfVariable: + errs = append(errs, fmt.Errorf( + "%s: self variables are only valid within resources", m.Name)) + } + } + + // Update the raw configuration to only contain the string values + m.RawConfig, err = NewRawConfig(raw) + if err != nil { + errs = append(errs, fmt.Errorf( + "%s: can't initialize configuration: %s", + m.Id(), err)) + } + } + dupped = nil + + // Check that all variables for modules reference modules that + // exist. + for source, vs := range vars { + for _, v := range vs { + mv, ok := v.(*ModuleVariable) + if !ok { + continue + } + + if _, ok := modules[mv.Name]; !ok { + errs = append(errs, fmt.Errorf( + "%s: unknown module referenced: %s", + source, + mv.Name)) + } + } + } + + // Check that all references to resources are valid + resources := make(map[string]*Resource) + dupped = make(map[string]struct{}) + for _, r := range c.Resources { + if _, ok := resources[r.Id()]; ok { + if _, ok := dupped[r.Id()]; !ok { + dupped[r.Id()] = struct{}{} + + errs = append(errs, fmt.Errorf( + "%s: resource repeated multiple times", + r.Id())) + } + } + + resources[r.Id()] = r + } + dupped = nil + + // Validate resources + for n, r := range resources { + // Verify count variables + for _, v := range r.RawCount.Variables { + switch v.(type) { + case *CountVariable: + errs = append(errs, fmt.Errorf( + "%s: resource count can't reference count variable: %s", + n, + v.FullKey())) + case *SimpleVariable: + errs = append(errs, fmt.Errorf( + "%s: resource count can't reference variable: %s", + n, + v.FullKey())) + + // Good + case *ModuleVariable: + case *ResourceVariable: + case *TerraformVariable: + case *UserVariable: + + default: + errs = append(errs, fmt.Errorf( + "Internal error. Unknown type in count var in %s: %T", + n, v)) + } + } + + // Interpolate with a fixed number to verify that its a number. + r.RawCount.interpolate(func(root ast.Node) (interface{}, error) { + // Execute the node but transform the AST so that it returns + // a fixed value of "5" for all interpolations. + result, err := hil.Eval( + hil.FixedValueTransform( + root, &ast.LiteralNode{Value: "5", Typex: ast.TypeString}), + nil) + if err != nil { + return "", err + } + + return result.Value, nil + }) + _, err := strconv.ParseInt(r.RawCount.Value().(string), 0, 0) + if err != nil { + errs = append(errs, fmt.Errorf( + "%s: resource count must be an integer", + n)) + } + r.RawCount.init() + + // Validate DependsOn + errs = append(errs, c.validateDependsOn(n, r.DependsOn, resources, modules)...) + + // Verify provisioners + for _, p := range r.Provisioners { + // This validation checks that there are no splat variables + // referencing ourself. This currently is not allowed. + + for _, v := range p.ConnInfo.Variables { + rv, ok := v.(*ResourceVariable) + if !ok { + continue + } + + if rv.Multi && rv.Index == -1 && rv.Type == r.Type && rv.Name == r.Name { + errs = append(errs, fmt.Errorf( + "%s: connection info cannot contain splat variable "+ + "referencing itself", n)) + break + } + } + + for _, v := range p.RawConfig.Variables { + rv, ok := v.(*ResourceVariable) + if !ok { + continue + } + + if rv.Multi && rv.Index == -1 && rv.Type == r.Type && rv.Name == r.Name { + errs = append(errs, fmt.Errorf( + "%s: connection info cannot contain splat variable "+ + "referencing itself", n)) + break + } + } + + // Check for invalid when/onFailure values, though this should be + // picked up by the loader we check here just in case. + if p.When == ProvisionerWhenInvalid { + errs = append(errs, fmt.Errorf( + "%s: provisioner 'when' value is invalid", n)) + } + if p.OnFailure == ProvisionerOnFailureInvalid { + errs = append(errs, fmt.Errorf( + "%s: provisioner 'on_failure' value is invalid", n)) + } + } + + // Verify ignore_changes contains valid entries + for _, v := range r.Lifecycle.IgnoreChanges { + if strings.Contains(v, "*") && v != "*" { + errs = append(errs, fmt.Errorf( + "%s: ignore_changes does not support using a partial string "+ + "together with a wildcard: %s", n, v)) + } + } + + // Verify ignore_changes has no interpolations + rc, err := NewRawConfig(map[string]interface{}{ + "root": r.Lifecycle.IgnoreChanges, + }) + if err != nil { + errs = append(errs, fmt.Errorf( + "%s: lifecycle ignore_changes error: %s", + n, err)) + } else if len(rc.Interpolations) > 0 { + errs = append(errs, fmt.Errorf( + "%s: lifecycle ignore_changes cannot contain interpolations", + n)) + } + + // If it is a data source then it can't have provisioners + if r.Mode == DataResourceMode { + if _, ok := r.RawConfig.Raw["provisioner"]; ok { + errs = append(errs, fmt.Errorf( + "%s: data sources cannot have provisioners", + n)) + } + } + } + + for source, vs := range vars { + for _, v := range vs { + rv, ok := v.(*ResourceVariable) + if !ok { + continue + } + + id := rv.ResourceId() + if _, ok := resources[id]; !ok { + errs = append(errs, fmt.Errorf( + "%s: unknown resource '%s' referenced in variable %s", + source, + id, + rv.FullKey())) + continue + } + } + } + + // Check that all outputs are valid + { + found := make(map[string]struct{}) + for _, o := range c.Outputs { + // Verify the output is new + if _, ok := found[o.Name]; ok { + errs = append(errs, fmt.Errorf( + "%s: duplicate output. output names must be unique.", + o.Name)) + continue + } + found[o.Name] = struct{}{} + + var invalidKeys []string + valueKeyFound := false + for k := range o.RawConfig.Raw { + if k == "value" { + valueKeyFound = true + continue + } + if k == "sensitive" { + if sensitive, ok := o.RawConfig.config[k].(bool); ok { + if sensitive { + o.Sensitive = true + } + continue + } + + errs = append(errs, fmt.Errorf( + "%s: value for 'sensitive' must be boolean", + o.Name)) + continue + } + if k == "description" { + if desc, ok := o.RawConfig.config[k].(string); ok { + o.Description = desc + continue + } + + errs = append(errs, fmt.Errorf( + "%s: value for 'description' must be string", + o.Name)) + continue + } + invalidKeys = append(invalidKeys, k) + } + if len(invalidKeys) > 0 { + errs = append(errs, fmt.Errorf( + "%s: output has invalid keys: %s", + o.Name, strings.Join(invalidKeys, ", "))) + } + if !valueKeyFound { + errs = append(errs, fmt.Errorf( + "%s: output is missing required 'value' key", o.Name)) + } + + for _, v := range o.RawConfig.Variables { + if _, ok := v.(*CountVariable); ok { + errs = append(errs, fmt.Errorf( + "%s: count variables are only valid within resources", o.Name)) + } + } + } + } + + // Validate the self variable + for source, rc := range c.rawConfigs() { + // Ignore provisioners. This is a pretty brittle way to do this, + // but better than also repeating all the resources. + if strings.Contains(source, "provision") { + continue + } + + for _, v := range rc.Variables { + if _, ok := v.(*SelfVariable); ok { + errs = append(errs, fmt.Errorf( + "%s: cannot contain self-reference %s", source, v.FullKey())) + } + } + } + + if len(errs) > 0 { + return &multierror.Error{Errors: errs} + } + + return nil +} + +// InterpolatedVariables is a helper that returns a mapping of all the interpolated +// variables within the configuration. This is used to verify references +// are valid in the Validate step. +func (c *Config) InterpolatedVariables() map[string][]InterpolatedVariable { + result := make(map[string][]InterpolatedVariable) + for source, rc := range c.rawConfigs() { + for _, v := range rc.Variables { + result[source] = append(result[source], v) + } + } + return result +} + +// rawConfigs returns all of the RawConfigs that are available keyed by +// a human-friendly source. +func (c *Config) rawConfigs() map[string]*RawConfig { + result := make(map[string]*RawConfig) + for _, m := range c.Modules { + source := fmt.Sprintf("module '%s'", m.Name) + result[source] = m.RawConfig + } + + for _, pc := range c.ProviderConfigs { + source := fmt.Sprintf("provider config '%s'", pc.Name) + result[source] = pc.RawConfig + } + + for _, rc := range c.Resources { + source := fmt.Sprintf("resource '%s'", rc.Id()) + result[source+" count"] = rc.RawCount + result[source+" config"] = rc.RawConfig + + for i, p := range rc.Provisioners { + subsource := fmt.Sprintf( + "%s provisioner %s (#%d)", + source, p.Type, i+1) + result[subsource] = p.RawConfig + } + } + + for _, o := range c.Outputs { + source := fmt.Sprintf("output '%s'", o.Name) + result[source] = o.RawConfig + } + + return result +} + +func (c *Config) validateDependsOn( + n string, + v []string, + resources map[string]*Resource, + modules map[string]*Module) []error { + // Verify depends on points to resources that all exist + var errs []error + for _, d := range v { + // Check if we contain interpolations + rc, err := NewRawConfig(map[string]interface{}{ + "value": d, + }) + if err == nil && len(rc.Variables) > 0 { + errs = append(errs, fmt.Errorf( + "%s: depends on value cannot contain interpolations: %s", + n, d)) + continue + } + + // If it is a module, verify it is a module + if strings.HasPrefix(d, "module.") { + name := d[len("module."):] + if _, ok := modules[name]; !ok { + errs = append(errs, fmt.Errorf( + "%s: resource depends on non-existent module '%s'", + n, name)) + } + + continue + } + + // Check resources + if _, ok := resources[d]; !ok { + errs = append(errs, fmt.Errorf( + "%s: resource depends on non-existent resource '%s'", + n, d)) + } + } + + return errs +} + +func (m *Module) mergerName() string { + return m.Id() +} + +func (m *Module) mergerMerge(other merger) merger { + m2 := other.(*Module) + + result := *m + result.Name = m2.Name + result.RawConfig = result.RawConfig.merge(m2.RawConfig) + + if m2.Source != "" { + result.Source = m2.Source + } + + return &result +} + +func (o *Output) mergerName() string { + return o.Name +} + +func (o *Output) mergerMerge(m merger) merger { + o2 := m.(*Output) + + result := *o + result.Name = o2.Name + result.Description = o2.Description + result.RawConfig = result.RawConfig.merge(o2.RawConfig) + result.Sensitive = o2.Sensitive + result.DependsOn = o2.DependsOn + + return &result +} + +func (c *ProviderConfig) GoString() string { + return fmt.Sprintf("*%#v", *c) +} + +func (c *ProviderConfig) FullName() string { + if c.Alias == "" { + return c.Name + } + + return fmt.Sprintf("%s.%s", c.Name, c.Alias) +} + +func (c *ProviderConfig) mergerName() string { + return c.Name +} + +func (c *ProviderConfig) mergerMerge(m merger) merger { + c2 := m.(*ProviderConfig) + + result := *c + result.Name = c2.Name + result.RawConfig = result.RawConfig.merge(c2.RawConfig) + + if c2.Alias != "" { + result.Alias = c2.Alias + } + + return &result +} + +func (r *Resource) mergerName() string { + return r.Id() +} + +func (r *Resource) mergerMerge(m merger) merger { + r2 := m.(*Resource) + + result := *r + result.Mode = r2.Mode + result.Name = r2.Name + result.Type = r2.Type + result.RawConfig = result.RawConfig.merge(r2.RawConfig) + + if r2.RawCount.Value() != "1" { + result.RawCount = r2.RawCount + } + + if len(r2.Provisioners) > 0 { + result.Provisioners = r2.Provisioners + } + + return &result +} + +// Merge merges two variables to create a new third variable. +func (v *Variable) Merge(v2 *Variable) *Variable { + // Shallow copy the variable + result := *v + + // The names should be the same, but the second name always wins. + result.Name = v2.Name + + if v2.DeclaredType != "" { + result.DeclaredType = v2.DeclaredType + } + if v2.Default != nil { + result.Default = v2.Default + } + if v2.Description != "" { + result.Description = v2.Description + } + + return &result +} + +var typeStringMap = map[string]VariableType{ + "string": VariableTypeString, + "map": VariableTypeMap, + "list": VariableTypeList, +} + +// Type returns the type of variable this is. +func (v *Variable) Type() VariableType { + if v.DeclaredType != "" { + declaredType, ok := typeStringMap[v.DeclaredType] + if !ok { + return VariableTypeUnknown + } + + return declaredType + } + + return v.inferTypeFromDefault() +} + +// ValidateTypeAndDefault ensures that default variable value is compatible +// with the declared type (if one exists), and that the type is one which is +// known to Terraform +func (v *Variable) ValidateTypeAndDefault() error { + // If an explicit type is declared, ensure it is valid + if v.DeclaredType != "" { + if _, ok := typeStringMap[v.DeclaredType]; !ok { + validTypes := []string{} + for k := range typeStringMap { + validTypes = append(validTypes, k) + } + return fmt.Errorf( + "Variable '%s' type must be one of [%s] - '%s' is not a valid type", + v.Name, + strings.Join(validTypes, ", "), + v.DeclaredType, + ) + } + } + + if v.DeclaredType == "" || v.Default == nil { + return nil + } + + if v.inferTypeFromDefault() != v.Type() { + return fmt.Errorf("'%s' has a default value which is not of type '%s' (got '%s')", + v.Name, v.DeclaredType, v.inferTypeFromDefault().Printable()) + } + + return nil +} + +func (v *Variable) mergerName() string { + return v.Name +} + +func (v *Variable) mergerMerge(m merger) merger { + return v.Merge(m.(*Variable)) +} + +// Required tests whether a variable is required or not. +func (v *Variable) Required() bool { + return v.Default == nil +} + +// inferTypeFromDefault contains the logic for the old method of inferring +// variable types - we can also use this for validating that the declared +// type matches the type of the default value +func (v *Variable) inferTypeFromDefault() VariableType { + if v.Default == nil { + return VariableTypeString + } + + var s string + if err := hilmapstructure.WeakDecode(v.Default, &s); err == nil { + v.Default = s + return VariableTypeString + } + + var m map[string]interface{} + if err := hilmapstructure.WeakDecode(v.Default, &m); err == nil { + v.Default = m + return VariableTypeMap + } + + var l []interface{} + if err := hilmapstructure.WeakDecode(v.Default, &l); err == nil { + v.Default = l + return VariableTypeList + } + + return VariableTypeUnknown +} + +func (m ResourceMode) Taintable() bool { + switch m { + case ManagedResourceMode: + return true + case DataResourceMode: + return false + default: + panic(fmt.Errorf("unsupported ResourceMode value %s", m)) + } +} diff --git a/vendor/github.com/hashicorp/terraform/config/config_string.go b/vendor/github.com/hashicorp/terraform/config/config_string.go new file mode 100644 index 0000000000..0b3abbcd53 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/config/config_string.go @@ -0,0 +1,338 @@ +package config + +import ( + "bytes" + "fmt" + "sort" + "strings" +) + +// TestString is a Stringer-like function that outputs a string that can +// be used to easily compare multiple Config structures in unit tests. +// +// This function has no practical use outside of unit tests and debugging. +func (c *Config) TestString() string { + if c == nil { + return "" + } + + var buf bytes.Buffer + if len(c.Modules) > 0 { + buf.WriteString("Modules:\n\n") + buf.WriteString(modulesStr(c.Modules)) + buf.WriteString("\n\n") + } + + if len(c.Variables) > 0 { + buf.WriteString("Variables:\n\n") + buf.WriteString(variablesStr(c.Variables)) + buf.WriteString("\n\n") + } + + if len(c.ProviderConfigs) > 0 { + buf.WriteString("Provider Configs:\n\n") + buf.WriteString(providerConfigsStr(c.ProviderConfigs)) + buf.WriteString("\n\n") + } + + if len(c.Resources) > 0 { + buf.WriteString("Resources:\n\n") + buf.WriteString(resourcesStr(c.Resources)) + buf.WriteString("\n\n") + } + + if len(c.Outputs) > 0 { + buf.WriteString("Outputs:\n\n") + buf.WriteString(outputsStr(c.Outputs)) + buf.WriteString("\n") + } + + return strings.TrimSpace(buf.String()) +} + +func terraformStr(t *Terraform) string { + result := "" + + if b := t.Backend; b != nil { + result += fmt.Sprintf("backend (%s)\n", b.Type) + + keys := make([]string, 0, len(b.RawConfig.Raw)) + for k, _ := range b.RawConfig.Raw { + keys = append(keys, k) + } + sort.Strings(keys) + + for _, k := range keys { + result += fmt.Sprintf(" %s\n", k) + } + } + + return strings.TrimSpace(result) +} + +func modulesStr(ms []*Module) string { + result := "" + order := make([]int, 0, len(ms)) + ks := make([]string, 0, len(ms)) + mapping := make(map[string]int) + for i, m := range ms { + k := m.Id() + ks = append(ks, k) + mapping[k] = i + } + sort.Strings(ks) + for _, k := range ks { + order = append(order, mapping[k]) + } + + for _, i := range order { + m := ms[i] + result += fmt.Sprintf("%s\n", m.Id()) + + ks := make([]string, 0, len(m.RawConfig.Raw)) + for k, _ := range m.RawConfig.Raw { + ks = append(ks, k) + } + sort.Strings(ks) + + result += fmt.Sprintf(" source = %s\n", m.Source) + + for _, k := range ks { + result += fmt.Sprintf(" %s\n", k) + } + } + + return strings.TrimSpace(result) +} + +func outputsStr(os []*Output) string { + ns := make([]string, 0, len(os)) + m := make(map[string]*Output) + for _, o := range os { + ns = append(ns, o.Name) + m[o.Name] = o + } + sort.Strings(ns) + + result := "" + for _, n := range ns { + o := m[n] + + result += fmt.Sprintf("%s\n", n) + + if len(o.DependsOn) > 0 { + result += fmt.Sprintf(" dependsOn\n") + for _, d := range o.DependsOn { + result += fmt.Sprintf(" %s\n", d) + } + } + + if len(o.RawConfig.Variables) > 0 { + result += fmt.Sprintf(" vars\n") + for _, rawV := range o.RawConfig.Variables { + kind := "unknown" + str := rawV.FullKey() + + switch rawV.(type) { + case *ResourceVariable: + kind = "resource" + case *UserVariable: + kind = "user" + } + + result += fmt.Sprintf(" %s: %s\n", kind, str) + } + } + } + + return strings.TrimSpace(result) +} + +// This helper turns a provider configs field into a deterministic +// string value for comparison in tests. +func providerConfigsStr(pcs []*ProviderConfig) string { + result := "" + + ns := make([]string, 0, len(pcs)) + m := make(map[string]*ProviderConfig) + for _, n := range pcs { + ns = append(ns, n.Name) + m[n.Name] = n + } + sort.Strings(ns) + + for _, n := range ns { + pc := m[n] + + result += fmt.Sprintf("%s\n", n) + + keys := make([]string, 0, len(pc.RawConfig.Raw)) + for k, _ := range pc.RawConfig.Raw { + keys = append(keys, k) + } + sort.Strings(keys) + + for _, k := range keys { + result += fmt.Sprintf(" %s\n", k) + } + + if len(pc.RawConfig.Variables) > 0 { + result += fmt.Sprintf(" vars\n") + for _, rawV := range pc.RawConfig.Variables { + kind := "unknown" + str := rawV.FullKey() + + switch rawV.(type) { + case *ResourceVariable: + kind = "resource" + case *UserVariable: + kind = "user" + } + + result += fmt.Sprintf(" %s: %s\n", kind, str) + } + } + } + + return strings.TrimSpace(result) +} + +// This helper turns a resources field into a deterministic +// string value for comparison in tests. +func resourcesStr(rs []*Resource) string { + result := "" + order := make([]int, 0, len(rs)) + ks := make([]string, 0, len(rs)) + mapping := make(map[string]int) + for i, r := range rs { + k := r.Id() + ks = append(ks, k) + mapping[k] = i + } + sort.Strings(ks) + for _, k := range ks { + order = append(order, mapping[k]) + } + + for _, i := range order { + r := rs[i] + result += fmt.Sprintf( + "%s (x%s)\n", + r.Id(), + r.RawCount.Value()) + + ks := make([]string, 0, len(r.RawConfig.Raw)) + for k, _ := range r.RawConfig.Raw { + ks = append(ks, k) + } + sort.Strings(ks) + + for _, k := range ks { + result += fmt.Sprintf(" %s\n", k) + } + + if len(r.Provisioners) > 0 { + result += fmt.Sprintf(" provisioners\n") + for _, p := range r.Provisioners { + when := "" + if p.When != ProvisionerWhenCreate { + when = fmt.Sprintf(" (%s)", p.When.String()) + } + + result += fmt.Sprintf(" %s%s\n", p.Type, when) + + if p.OnFailure != ProvisionerOnFailureFail { + result += fmt.Sprintf(" on_failure = %s\n", p.OnFailure.String()) + } + + ks := make([]string, 0, len(p.RawConfig.Raw)) + for k, _ := range p.RawConfig.Raw { + ks = append(ks, k) + } + sort.Strings(ks) + + for _, k := range ks { + result += fmt.Sprintf(" %s\n", k) + } + } + } + + if len(r.DependsOn) > 0 { + result += fmt.Sprintf(" dependsOn\n") + for _, d := range r.DependsOn { + result += fmt.Sprintf(" %s\n", d) + } + } + + if len(r.RawConfig.Variables) > 0 { + result += fmt.Sprintf(" vars\n") + + ks := make([]string, 0, len(r.RawConfig.Variables)) + for k, _ := range r.RawConfig.Variables { + ks = append(ks, k) + } + sort.Strings(ks) + + for _, k := range ks { + rawV := r.RawConfig.Variables[k] + kind := "unknown" + str := rawV.FullKey() + + switch rawV.(type) { + case *ResourceVariable: + kind = "resource" + case *UserVariable: + kind = "user" + } + + result += fmt.Sprintf(" %s: %s\n", kind, str) + } + } + } + + return strings.TrimSpace(result) +} + +// This helper turns a variables field into a deterministic +// string value for comparison in tests. +func variablesStr(vs []*Variable) string { + result := "" + ks := make([]string, 0, len(vs)) + m := make(map[string]*Variable) + for _, v := range vs { + ks = append(ks, v.Name) + m[v.Name] = v + } + sort.Strings(ks) + + for _, k := range ks { + v := m[k] + + required := "" + if v.Required() { + required = " (required)" + } + + declaredType := "" + if v.DeclaredType != "" { + declaredType = fmt.Sprintf(" (%s)", v.DeclaredType) + } + + if v.Default == nil || v.Default == "" { + v.Default = "<>" + } + if v.Description == "" { + v.Description = "<>" + } + + result += fmt.Sprintf( + "%s%s%s\n %v\n %s\n", + k, + required, + declaredType, + v.Default, + v.Description) + } + + return strings.TrimSpace(result) +} diff --git a/vendor/github.com/hashicorp/terraform/config/config_terraform.go b/vendor/github.com/hashicorp/terraform/config/config_terraform.go new file mode 100644 index 0000000000..8535c96485 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/config/config_terraform.go @@ -0,0 +1,117 @@ +package config + +import ( + "fmt" + "strings" + + "github.com/hashicorp/go-version" + "github.com/mitchellh/hashstructure" +) + +// Terraform is the Terraform meta-configuration that can be present +// in configuration files for configuring Terraform itself. +type Terraform struct { + RequiredVersion string `hcl:"required_version"` // Required Terraform version (constraint) + Backend *Backend // See Backend struct docs +} + +// Validate performs the validation for just the Terraform configuration. +func (t *Terraform) Validate() []error { + var errs []error + + if raw := t.RequiredVersion; raw != "" { + // Check that the value has no interpolations + rc, err := NewRawConfig(map[string]interface{}{ + "root": raw, + }) + if err != nil { + errs = append(errs, fmt.Errorf( + "terraform.required_version: %s", err)) + } else if len(rc.Interpolations) > 0 { + errs = append(errs, fmt.Errorf( + "terraform.required_version: cannot contain interpolations")) + } else { + // Check it is valid + _, err := version.NewConstraint(raw) + if err != nil { + errs = append(errs, fmt.Errorf( + "terraform.required_version: invalid syntax: %s", err)) + } + } + } + + if t.Backend != nil { + errs = append(errs, t.Backend.Validate()...) + } + + return errs +} + +// Merge t with t2. +// Any conflicting fields are overwritten by t2. +func (t *Terraform) Merge(t2 *Terraform) { + if t2.RequiredVersion != "" { + t.RequiredVersion = t2.RequiredVersion + } + + if t2.Backend != nil { + t.Backend = t2.Backend + } +} + +// Backend is the configuration for the "backend" to use with Terraform. +// A backend is responsible for all major behavior of Terraform's core. +// The abstraction layer above the core (the "backend") allows for behavior +// such as remote operation. +type Backend struct { + Type string + RawConfig *RawConfig + + // Hash is a unique hash code representing the original configuration + // of the backend. This won't be recomputed unless Rehash is called. + Hash uint64 +} + +// Rehash returns a unique content hash for this backend's configuration +// as a uint64 value. +func (b *Backend) Rehash() uint64 { + // If we have no backend, the value is zero + if b == nil { + return 0 + } + + // Use hashstructure to hash only our type with the config. + code, err := hashstructure.Hash(map[string]interface{}{ + "type": b.Type, + "config": b.RawConfig.Raw, + }, nil) + + // This should never happen since we have just some basic primitives + // so panic if there is an error. + if err != nil { + panic(err) + } + + return code +} + +func (b *Backend) Validate() []error { + if len(b.RawConfig.Interpolations) > 0 { + return []error{fmt.Errorf(strings.TrimSpace(errBackendInterpolations))} + } + + return nil +} + +const errBackendInterpolations = ` +terraform.backend: configuration cannot contain interpolations + +The backend configuration is loaded by Terraform extremely early, before +the core of Terraform can be initialized. This is necessary because the backend +dictates the behavior of that core. The core is what handles interpolation +processing. Because of this, interpolations cannot be used in backend +configuration. + +If you'd like to parameterize backend configuration, we recommend using +partial configuration with the "-backend-config" flag to "terraform init". +` diff --git a/vendor/github.com/hashicorp/terraform/config/config_tree.go b/vendor/github.com/hashicorp/terraform/config/config_tree.go new file mode 100644 index 0000000000..08dc0fe904 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/config/config_tree.go @@ -0,0 +1,43 @@ +package config + +// configTree represents a tree of configurations where the root is the +// first file and its children are the configurations it has imported. +type configTree struct { + Path string + Config *Config + Children []*configTree +} + +// Flatten flattens the entire tree down to a single merged Config +// structure. +func (t *configTree) Flatten() (*Config, error) { + // No children is easy: we're already merged! + if len(t.Children) == 0 { + return t.Config, nil + } + + // Depth-first, merge all the children first. + childConfigs := make([]*Config, len(t.Children)) + for i, ct := range t.Children { + c, err := ct.Flatten() + if err != nil { + return nil, err + } + + childConfigs[i] = c + } + + // Merge all the children in order + config := childConfigs[0] + childConfigs = childConfigs[1:] + for _, config2 := range childConfigs { + var err error + config, err = Merge(config, config2) + if err != nil { + return nil, err + } + } + + // Merge the final merged child config with our own + return Merge(config, t.Config) +} diff --git a/vendor/github.com/hashicorp/terraform/config/import_tree.go b/vendor/github.com/hashicorp/terraform/config/import_tree.go new file mode 100644 index 0000000000..37ec11a155 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/config/import_tree.go @@ -0,0 +1,113 @@ +package config + +import ( + "fmt" + "io" +) + +// configurable is an interface that must be implemented by any configuration +// formats of Terraform in order to return a *Config. +type configurable interface { + Config() (*Config, error) +} + +// importTree is the result of the first-pass load of the configuration +// files. It is a tree of raw configurables and then any children (their +// imports). +// +// An importTree can be turned into a configTree. +type importTree struct { + Path string + Raw configurable + Children []*importTree +} + +// This is the function type that must be implemented by the configuration +// file loader to turn a single file into a configurable and any additional +// imports. +type fileLoaderFunc func(path string) (configurable, []string, error) + +// loadTree takes a single file and loads the entire importTree for that +// file. This function detects what kind of configuration file it is an +// executes the proper fileLoaderFunc. +func loadTree(root string) (*importTree, error) { + var f fileLoaderFunc + switch ext(root) { + case ".tf", ".tf.json": + f = loadFileHcl + default: + } + + if f == nil { + return nil, fmt.Errorf( + "%s: unknown configuration format. Use '.tf' or '.tf.json' extension", + root) + } + + c, imps, err := f(root) + if err != nil { + return nil, err + } + + children := make([]*importTree, len(imps)) + for i, imp := range imps { + t, err := loadTree(imp) + if err != nil { + return nil, err + } + + children[i] = t + } + + return &importTree{ + Path: root, + Raw: c, + Children: children, + }, nil +} + +// Close releases any resources we might be holding open for the importTree. +// +// This can safely be called even while ConfigTree results are alive. The +// importTree is not bound to these. +func (t *importTree) Close() error { + if c, ok := t.Raw.(io.Closer); ok { + c.Close() + } + for _, ct := range t.Children { + ct.Close() + } + + return nil +} + +// ConfigTree traverses the importTree and turns each node into a *Config +// object, ultimately returning a *configTree. +func (t *importTree) ConfigTree() (*configTree, error) { + config, err := t.Raw.Config() + if err != nil { + return nil, fmt.Errorf( + "Error loading %s: %s", + t.Path, + err) + } + + // Build our result + result := &configTree{ + Path: t.Path, + Config: config, + } + + // Build the config trees for the children + result.Children = make([]*configTree, len(t.Children)) + for i, ct := range t.Children { + t, err := ct.ConfigTree() + if err != nil { + return nil, err + } + + result.Children[i] = t + } + + return result, nil +} diff --git a/vendor/github.com/hashicorp/terraform/config/interpolate.go b/vendor/github.com/hashicorp/terraform/config/interpolate.go new file mode 100644 index 0000000000..bbb3555418 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/config/interpolate.go @@ -0,0 +1,386 @@ +package config + +import ( + "fmt" + "strconv" + "strings" + + "github.com/hashicorp/hil/ast" +) + +// An InterpolatedVariable is a variable reference within an interpolation. +// +// Implementations of this interface represents various sources where +// variables can come from: user variables, resources, etc. +type InterpolatedVariable interface { + FullKey() string +} + +// CountVariable is a variable for referencing information about +// the count. +type CountVariable struct { + Type CountValueType + key string +} + +// CountValueType is the type of the count variable that is referenced. +type CountValueType byte + +const ( + CountValueInvalid CountValueType = iota + CountValueIndex +) + +// A ModuleVariable is a variable that is referencing the output +// of a module, such as "${module.foo.bar}" +type ModuleVariable struct { + Name string + Field string + key string +} + +// A PathVariable is a variable that references path information about the +// module. +type PathVariable struct { + Type PathValueType + key string +} + +type PathValueType byte + +const ( + PathValueInvalid PathValueType = iota + PathValueCwd + PathValueModule + PathValueRoot +) + +// A ResourceVariable is a variable that is referencing the field +// of a resource, such as "${aws_instance.foo.ami}" +type ResourceVariable struct { + Mode ResourceMode + Type string // Resource type, i.e. "aws_instance" + Name string // Resource name + Field string // Resource field + + Multi bool // True if multi-variable: aws_instance.foo.*.id + Index int // Index for multi-variable: aws_instance.foo.1.id == 1 + + key string +} + +// SelfVariable is a variable that is referencing the same resource +// it is running on: "${self.address}" +type SelfVariable struct { + Field string + + key string +} + +// SimpleVariable is an unprefixed variable, which can show up when users have +// strings they are passing down to resources that use interpolation +// internally. The template_file resource is an example of this. +type SimpleVariable struct { + Key string +} + +// TerraformVariable is a "terraform."-prefixed variable used to access +// metadata about the Terraform run. +type TerraformVariable struct { + Field string + key string +} + +// A UserVariable is a variable that is referencing a user variable +// that is inputted from outside the configuration. This looks like +// "${var.foo}" +type UserVariable struct { + Name string + Elem string + + key string +} + +func NewInterpolatedVariable(v string) (InterpolatedVariable, error) { + if strings.HasPrefix(v, "count.") { + return NewCountVariable(v) + } else if strings.HasPrefix(v, "path.") { + return NewPathVariable(v) + } else if strings.HasPrefix(v, "self.") { + return NewSelfVariable(v) + } else if strings.HasPrefix(v, "terraform.") { + return NewTerraformVariable(v) + } else if strings.HasPrefix(v, "var.") { + return NewUserVariable(v) + } else if strings.HasPrefix(v, "module.") { + return NewModuleVariable(v) + } else if !strings.ContainsRune(v, '.') { + return NewSimpleVariable(v) + } else { + return NewResourceVariable(v) + } +} + +func NewCountVariable(key string) (*CountVariable, error) { + var fieldType CountValueType + parts := strings.SplitN(key, ".", 2) + switch parts[1] { + case "index": + fieldType = CountValueIndex + } + + return &CountVariable{ + Type: fieldType, + key: key, + }, nil +} + +func (c *CountVariable) FullKey() string { + return c.key +} + +func NewModuleVariable(key string) (*ModuleVariable, error) { + parts := strings.SplitN(key, ".", 3) + if len(parts) < 3 { + return nil, fmt.Errorf( + "%s: module variables must be three parts: module.name.attr", + key) + } + + return &ModuleVariable{ + Name: parts[1], + Field: parts[2], + key: key, + }, nil +} + +func (v *ModuleVariable) FullKey() string { + return v.key +} + +func (v *ModuleVariable) GoString() string { + return fmt.Sprintf("*%#v", *v) +} + +func NewPathVariable(key string) (*PathVariable, error) { + var fieldType PathValueType + parts := strings.SplitN(key, ".", 2) + switch parts[1] { + case "cwd": + fieldType = PathValueCwd + case "module": + fieldType = PathValueModule + case "root": + fieldType = PathValueRoot + } + + return &PathVariable{ + Type: fieldType, + key: key, + }, nil +} + +func (v *PathVariable) FullKey() string { + return v.key +} + +func NewResourceVariable(key string) (*ResourceVariable, error) { + var mode ResourceMode + var parts []string + if strings.HasPrefix(key, "data.") { + mode = DataResourceMode + parts = strings.SplitN(key, ".", 4) + if len(parts) < 4 { + return nil, fmt.Errorf( + "%s: data variables must be four parts: data.TYPE.NAME.ATTR", + key) + } + + // Don't actually need the "data." prefix for parsing, since it's + // always constant. + parts = parts[1:] + } else { + mode = ManagedResourceMode + parts = strings.SplitN(key, ".", 3) + if len(parts) < 3 { + return nil, fmt.Errorf( + "%s: resource variables must be three parts: TYPE.NAME.ATTR", + key) + } + } + + field := parts[2] + multi := false + var index int + + if idx := strings.Index(field, "."); idx != -1 { + indexStr := field[:idx] + multi = indexStr == "*" + index = -1 + + if !multi { + indexInt, err := strconv.ParseInt(indexStr, 0, 0) + if err == nil { + multi = true + index = int(indexInt) + } + } + + if multi { + field = field[idx+1:] + } + } + + return &ResourceVariable{ + Mode: mode, + Type: parts[0], + Name: parts[1], + Field: field, + Multi: multi, + Index: index, + key: key, + }, nil +} + +func (v *ResourceVariable) ResourceId() string { + switch v.Mode { + case ManagedResourceMode: + return fmt.Sprintf("%s.%s", v.Type, v.Name) + case DataResourceMode: + return fmt.Sprintf("data.%s.%s", v.Type, v.Name) + default: + panic(fmt.Errorf("unknown resource mode %s", v.Mode)) + } +} + +func (v *ResourceVariable) FullKey() string { + return v.key +} + +func NewSelfVariable(key string) (*SelfVariable, error) { + field := key[len("self."):] + + return &SelfVariable{ + Field: field, + + key: key, + }, nil +} + +func (v *SelfVariable) FullKey() string { + return v.key +} + +func (v *SelfVariable) GoString() string { + return fmt.Sprintf("*%#v", *v) +} + +func NewSimpleVariable(key string) (*SimpleVariable, error) { + return &SimpleVariable{key}, nil +} + +func (v *SimpleVariable) FullKey() string { + return v.Key +} + +func (v *SimpleVariable) GoString() string { + return fmt.Sprintf("*%#v", *v) +} + +func NewTerraformVariable(key string) (*TerraformVariable, error) { + field := key[len("terraform."):] + return &TerraformVariable{ + Field: field, + key: key, + }, nil +} + +func (v *TerraformVariable) FullKey() string { + return v.key +} + +func (v *TerraformVariable) GoString() string { + return fmt.Sprintf("*%#v", *v) +} + +func NewUserVariable(key string) (*UserVariable, error) { + name := key[len("var."):] + elem := "" + if idx := strings.Index(name, "."); idx > -1 { + elem = name[idx+1:] + name = name[:idx] + } + + if len(elem) > 0 { + return nil, fmt.Errorf("Invalid dot index found: 'var.%s.%s'. Values in maps and lists can be referenced using square bracket indexing, like: 'var.mymap[\"key\"]' or 'var.mylist[1]'.", name, elem) + } + + return &UserVariable{ + key: key, + + Name: name, + Elem: elem, + }, nil +} + +func (v *UserVariable) FullKey() string { + return v.key +} + +func (v *UserVariable) GoString() string { + return fmt.Sprintf("*%#v", *v) +} + +// DetectVariables takes an AST root and returns all the interpolated +// variables that are detected in the AST tree. +func DetectVariables(root ast.Node) ([]InterpolatedVariable, error) { + var result []InterpolatedVariable + var resultErr error + + // Visitor callback + fn := func(n ast.Node) ast.Node { + if resultErr != nil { + return n + } + + switch vn := n.(type) { + case *ast.VariableAccess: + v, err := NewInterpolatedVariable(vn.Name) + if err != nil { + resultErr = err + return n + } + result = append(result, v) + case *ast.Index: + if va, ok := vn.Target.(*ast.VariableAccess); ok { + v, err := NewInterpolatedVariable(va.Name) + if err != nil { + resultErr = err + return n + } + result = append(result, v) + } + if va, ok := vn.Key.(*ast.VariableAccess); ok { + v, err := NewInterpolatedVariable(va.Name) + if err != nil { + resultErr = err + return n + } + result = append(result, v) + } + default: + return n + } + + return n + } + + // Visitor pattern + root.Accept(fn) + + if resultErr != nil { + return nil, resultErr + } + + return result, nil +} diff --git a/vendor/github.com/hashicorp/terraform/config/interpolate_funcs.go b/vendor/github.com/hashicorp/terraform/config/interpolate_funcs.go new file mode 100644 index 0000000000..7b7b3f2620 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/config/interpolate_funcs.go @@ -0,0 +1,1438 @@ +package config + +import ( + "crypto/md5" + "crypto/sha1" + "crypto/sha256" + "crypto/sha512" + "encoding/base64" + "encoding/hex" + "encoding/json" + "fmt" + "io/ioutil" + "math" + "net" + "path/filepath" + "regexp" + "sort" + "strconv" + "strings" + "time" + + "github.com/apparentlymart/go-cidr/cidr" + "github.com/hashicorp/go-uuid" + "github.com/hashicorp/hil" + "github.com/hashicorp/hil/ast" + "github.com/mitchellh/go-homedir" + "golang.org/x/crypto/bcrypt" +) + +// stringSliceToVariableValue converts a string slice into the value +// required to be returned from interpolation functions which return +// TypeList. +func stringSliceToVariableValue(values []string) []ast.Variable { + output := make([]ast.Variable, len(values)) + for index, value := range values { + output[index] = ast.Variable{ + Type: ast.TypeString, + Value: value, + } + } + return output +} + +func listVariableValueToStringSlice(values []ast.Variable) ([]string, error) { + output := make([]string, len(values)) + for index, value := range values { + if value.Type != ast.TypeString { + return []string{}, fmt.Errorf("list has non-string element (%T)", value.Type.String()) + } + output[index] = value.Value.(string) + } + return output, nil +} + +// Funcs is the mapping of built-in functions for configuration. +func Funcs() map[string]ast.Function { + return map[string]ast.Function{ + "basename": interpolationFuncBasename(), + "base64decode": interpolationFuncBase64Decode(), + "base64encode": interpolationFuncBase64Encode(), + "base64sha256": interpolationFuncBase64Sha256(), + "base64sha512": interpolationFuncBase64Sha512(), + "bcrypt": interpolationFuncBcrypt(), + "ceil": interpolationFuncCeil(), + "chomp": interpolationFuncChomp(), + "cidrhost": interpolationFuncCidrHost(), + "cidrnetmask": interpolationFuncCidrNetmask(), + "cidrsubnet": interpolationFuncCidrSubnet(), + "coalesce": interpolationFuncCoalesce(), + "coalescelist": interpolationFuncCoalesceList(), + "compact": interpolationFuncCompact(), + "concat": interpolationFuncConcat(), + "dirname": interpolationFuncDirname(), + "distinct": interpolationFuncDistinct(), + "element": interpolationFuncElement(), + "file": interpolationFuncFile(), + "matchkeys": interpolationFuncMatchKeys(), + "floor": interpolationFuncFloor(), + "format": interpolationFuncFormat(), + "formatlist": interpolationFuncFormatList(), + "index": interpolationFuncIndex(), + "join": interpolationFuncJoin(), + "jsonencode": interpolationFuncJSONEncode(), + "length": interpolationFuncLength(), + "list": interpolationFuncList(), + "log": interpolationFuncLog(), + "lower": interpolationFuncLower(), + "map": interpolationFuncMap(), + "max": interpolationFuncMax(), + "md5": interpolationFuncMd5(), + "merge": interpolationFuncMerge(), + "min": interpolationFuncMin(), + "pathexpand": interpolationFuncPathExpand(), + "pow": interpolationFuncPow(), + "uuid": interpolationFuncUUID(), + "replace": interpolationFuncReplace(), + "sha1": interpolationFuncSha1(), + "sha256": interpolationFuncSha256(), + "sha512": interpolationFuncSha512(), + "signum": interpolationFuncSignum(), + "slice": interpolationFuncSlice(), + "sort": interpolationFuncSort(), + "split": interpolationFuncSplit(), + "substr": interpolationFuncSubstr(), + "timestamp": interpolationFuncTimestamp(), + "title": interpolationFuncTitle(), + "trimspace": interpolationFuncTrimSpace(), + "upper": interpolationFuncUpper(), + "zipmap": interpolationFuncZipMap(), + } +} + +// interpolationFuncList creates a list from the parameters passed +// to it. +func interpolationFuncList() ast.Function { + return ast.Function{ + ArgTypes: []ast.Type{}, + ReturnType: ast.TypeList, + Variadic: true, + VariadicType: ast.TypeAny, + Callback: func(args []interface{}) (interface{}, error) { + var outputList []ast.Variable + + for i, val := range args { + switch v := val.(type) { + case string: + outputList = append(outputList, ast.Variable{Type: ast.TypeString, Value: v}) + case []ast.Variable: + outputList = append(outputList, ast.Variable{Type: ast.TypeList, Value: v}) + case map[string]ast.Variable: + outputList = append(outputList, ast.Variable{Type: ast.TypeMap, Value: v}) + default: + return nil, fmt.Errorf("unexpected type %T for argument %d in list", v, i) + } + } + + // we don't support heterogeneous types, so make sure all types match the first + if len(outputList) > 0 { + firstType := outputList[0].Type + for i, v := range outputList[1:] { + if v.Type != firstType { + return nil, fmt.Errorf("unexpected type %s for argument %d in list", v.Type, i+1) + } + } + } + + return outputList, nil + }, + } +} + +// interpolationFuncMap creates a map from the parameters passed +// to it. +func interpolationFuncMap() ast.Function { + return ast.Function{ + ArgTypes: []ast.Type{}, + ReturnType: ast.TypeMap, + Variadic: true, + VariadicType: ast.TypeAny, + Callback: func(args []interface{}) (interface{}, error) { + outputMap := make(map[string]ast.Variable) + + if len(args)%2 != 0 { + return nil, fmt.Errorf("requires an even number of arguments, got %d", len(args)) + } + + var firstType *ast.Type + for i := 0; i < len(args); i += 2 { + key, ok := args[i].(string) + if !ok { + return nil, fmt.Errorf("argument %d represents a key, so it must be a string", i+1) + } + val := args[i+1] + variable, err := hil.InterfaceToVariable(val) + if err != nil { + return nil, err + } + // Enforce map type homogeneity + if firstType == nil { + firstType = &variable.Type + } else if variable.Type != *firstType { + return nil, fmt.Errorf("all map values must have the same type, got %s then %s", firstType.Printable(), variable.Type.Printable()) + } + // Check for duplicate keys + if _, ok := outputMap[key]; ok { + return nil, fmt.Errorf("argument %d is a duplicate key: %q", i+1, key) + } + outputMap[key] = variable + } + + return outputMap, nil + }, + } +} + +// interpolationFuncCompact strips a list of multi-variable values +// (e.g. as returned by "split") of any empty strings. +func interpolationFuncCompact() ast.Function { + return ast.Function{ + ArgTypes: []ast.Type{ast.TypeList}, + ReturnType: ast.TypeList, + Variadic: false, + Callback: func(args []interface{}) (interface{}, error) { + inputList := args[0].([]ast.Variable) + + var outputList []string + for _, val := range inputList { + strVal, ok := val.Value.(string) + if !ok { + return nil, fmt.Errorf( + "compact() may only be used with flat lists, this list contains elements of %s", + val.Type.Printable()) + } + if strVal == "" { + continue + } + + outputList = append(outputList, strVal) + } + return stringSliceToVariableValue(outputList), nil + }, + } +} + +// interpolationFuncCidrHost implements the "cidrhost" function that +// fills in the host part of a CIDR range address to create a single +// host address +func interpolationFuncCidrHost() ast.Function { + return ast.Function{ + ArgTypes: []ast.Type{ + ast.TypeString, // starting CIDR mask + ast.TypeInt, // host number to insert + }, + ReturnType: ast.TypeString, + Variadic: false, + Callback: func(args []interface{}) (interface{}, error) { + hostNum := args[1].(int) + _, network, err := net.ParseCIDR(args[0].(string)) + if err != nil { + return nil, fmt.Errorf("invalid CIDR expression: %s", err) + } + + ip, err := cidr.Host(network, hostNum) + if err != nil { + return nil, err + } + + return ip.String(), nil + }, + } +} + +// interpolationFuncCidrNetmask implements the "cidrnetmask" function +// that returns the subnet mask in IP address notation. +func interpolationFuncCidrNetmask() ast.Function { + return ast.Function{ + ArgTypes: []ast.Type{ + ast.TypeString, // CIDR mask + }, + ReturnType: ast.TypeString, + Variadic: false, + Callback: func(args []interface{}) (interface{}, error) { + _, network, err := net.ParseCIDR(args[0].(string)) + if err != nil { + return nil, fmt.Errorf("invalid CIDR expression: %s", err) + } + + return net.IP(network.Mask).String(), nil + }, + } +} + +// interpolationFuncCidrSubnet implements the "cidrsubnet" function that +// adds an additional subnet of the given length onto an existing +// IP block expressed in CIDR notation. +func interpolationFuncCidrSubnet() ast.Function { + return ast.Function{ + ArgTypes: []ast.Type{ + ast.TypeString, // starting CIDR mask + ast.TypeInt, // number of bits to extend the prefix + ast.TypeInt, // network number to append to the prefix + }, + ReturnType: ast.TypeString, + Variadic: false, + Callback: func(args []interface{}) (interface{}, error) { + extraBits := args[1].(int) + subnetNum := args[2].(int) + _, network, err := net.ParseCIDR(args[0].(string)) + if err != nil { + return nil, fmt.Errorf("invalid CIDR expression: %s", err) + } + + // For portability with 32-bit systems where the subnet number + // will be a 32-bit int, we only allow extension of 32 bits in + // one call even if we're running on a 64-bit machine. + // (Of course, this is significant only for IPv6.) + if extraBits > 32 { + return nil, fmt.Errorf("may not extend prefix by more than 32 bits") + } + + newNetwork, err := cidr.Subnet(network, extraBits, subnetNum) + if err != nil { + return nil, err + } + + return newNetwork.String(), nil + }, + } +} + +// interpolationFuncCoalesce implements the "coalesce" function that +// returns the first non null / empty string from the provided input +func interpolationFuncCoalesce() ast.Function { + return ast.Function{ + ArgTypes: []ast.Type{ast.TypeString}, + ReturnType: ast.TypeString, + Variadic: true, + VariadicType: ast.TypeString, + Callback: func(args []interface{}) (interface{}, error) { + if len(args) < 2 { + return nil, fmt.Errorf("must provide at least two arguments") + } + for _, arg := range args { + argument := arg.(string) + + if argument != "" { + return argument, nil + } + } + return "", nil + }, + } +} + +// interpolationFuncCoalesceList implements the "coalescelist" function that +// returns the first non empty list from the provided input +func interpolationFuncCoalesceList() ast.Function { + return ast.Function{ + ArgTypes: []ast.Type{ast.TypeList}, + ReturnType: ast.TypeList, + Variadic: true, + VariadicType: ast.TypeList, + Callback: func(args []interface{}) (interface{}, error) { + if len(args) < 2 { + return nil, fmt.Errorf("must provide at least two arguments") + } + for _, arg := range args { + argument := arg.([]ast.Variable) + + if len(argument) > 0 { + return argument, nil + } + } + return make([]ast.Variable, 0), nil + }, + } +} + +// interpolationFuncConcat implements the "concat" function that concatenates +// multiple lists. +func interpolationFuncConcat() ast.Function { + return ast.Function{ + ArgTypes: []ast.Type{ast.TypeList}, + ReturnType: ast.TypeList, + Variadic: true, + VariadicType: ast.TypeList, + Callback: func(args []interface{}) (interface{}, error) { + var outputList []ast.Variable + + for _, arg := range args { + for _, v := range arg.([]ast.Variable) { + switch v.Type { + case ast.TypeString: + outputList = append(outputList, v) + case ast.TypeList: + outputList = append(outputList, v) + case ast.TypeMap: + outputList = append(outputList, v) + default: + return nil, fmt.Errorf("concat() does not support lists of %s", v.Type.Printable()) + } + } + } + + // we don't support heterogeneous types, so make sure all types match the first + if len(outputList) > 0 { + firstType := outputList[0].Type + for _, v := range outputList[1:] { + if v.Type != firstType { + return nil, fmt.Errorf("unexpected %s in list of %s", v.Type.Printable(), firstType.Printable()) + } + } + } + + return outputList, nil + }, + } +} + +// interpolationFuncPow returns base x exponential of y. +func interpolationFuncPow() ast.Function { + return ast.Function{ + ArgTypes: []ast.Type{ast.TypeFloat, ast.TypeFloat}, + ReturnType: ast.TypeFloat, + Callback: func(args []interface{}) (interface{}, error) { + return math.Pow(args[0].(float64), args[1].(float64)), nil + }, + } +} + +// interpolationFuncFile implements the "file" function that allows +// loading contents from a file. +func interpolationFuncFile() ast.Function { + return ast.Function{ + ArgTypes: []ast.Type{ast.TypeString}, + ReturnType: ast.TypeString, + Callback: func(args []interface{}) (interface{}, error) { + path, err := homedir.Expand(args[0].(string)) + if err != nil { + return "", err + } + data, err := ioutil.ReadFile(path) + if err != nil { + return "", err + } + + return string(data), nil + }, + } +} + +// interpolationFuncFormat implements the "format" function that does +// string formatting. +func interpolationFuncFormat() ast.Function { + return ast.Function{ + ArgTypes: []ast.Type{ast.TypeString}, + Variadic: true, + VariadicType: ast.TypeAny, + ReturnType: ast.TypeString, + Callback: func(args []interface{}) (interface{}, error) { + format := args[0].(string) + return fmt.Sprintf(format, args[1:]...), nil + }, + } +} + +// interpolationFuncMax returns the maximum of the numeric arguments +func interpolationFuncMax() ast.Function { + return ast.Function{ + ArgTypes: []ast.Type{ast.TypeFloat}, + ReturnType: ast.TypeFloat, + Variadic: true, + VariadicType: ast.TypeFloat, + Callback: func(args []interface{}) (interface{}, error) { + max := args[0].(float64) + + for i := 1; i < len(args); i++ { + max = math.Max(max, args[i].(float64)) + } + + return max, nil + }, + } +} + +// interpolationFuncMin returns the minimum of the numeric arguments +func interpolationFuncMin() ast.Function { + return ast.Function{ + ArgTypes: []ast.Type{ast.TypeFloat}, + ReturnType: ast.TypeFloat, + Variadic: true, + VariadicType: ast.TypeFloat, + Callback: func(args []interface{}) (interface{}, error) { + min := args[0].(float64) + + for i := 1; i < len(args); i++ { + min = math.Min(min, args[i].(float64)) + } + + return min, nil + }, + } +} + +// interpolationFuncPathExpand will expand any `~`'s found with the full file path +func interpolationFuncPathExpand() ast.Function { + return ast.Function{ + ArgTypes: []ast.Type{ast.TypeString}, + ReturnType: ast.TypeString, + Callback: func(args []interface{}) (interface{}, error) { + return homedir.Expand(args[0].(string)) + }, + } +} + +// interpolationFuncCeil returns the the least integer value greater than or equal to the argument +func interpolationFuncCeil() ast.Function { + return ast.Function{ + ArgTypes: []ast.Type{ast.TypeFloat}, + ReturnType: ast.TypeInt, + Callback: func(args []interface{}) (interface{}, error) { + return int(math.Ceil(args[0].(float64))), nil + }, + } +} + +// interpolationFuncLog returns the logarithnm. +func interpolationFuncLog() ast.Function { + return ast.Function{ + ArgTypes: []ast.Type{ast.TypeFloat, ast.TypeFloat}, + ReturnType: ast.TypeFloat, + Callback: func(args []interface{}) (interface{}, error) { + return math.Log(args[0].(float64)) / math.Log(args[1].(float64)), nil + }, + } +} + +// interpolationFuncChomp removes trailing newlines from the given string +func interpolationFuncChomp() ast.Function { + newlines := regexp.MustCompile(`(?:\r\n?|\n)*\z`) + return ast.Function{ + ArgTypes: []ast.Type{ast.TypeString}, + ReturnType: ast.TypeString, + Callback: func(args []interface{}) (interface{}, error) { + return newlines.ReplaceAllString(args[0].(string), ""), nil + }, + } +} + +// interpolationFuncFloorreturns returns the greatest integer value less than or equal to the argument +func interpolationFuncFloor() ast.Function { + return ast.Function{ + ArgTypes: []ast.Type{ast.TypeFloat}, + ReturnType: ast.TypeInt, + Callback: func(args []interface{}) (interface{}, error) { + return int(math.Floor(args[0].(float64))), nil + }, + } +} + +func interpolationFuncZipMap() ast.Function { + return ast.Function{ + ArgTypes: []ast.Type{ + ast.TypeList, // Keys + ast.TypeList, // Values + }, + ReturnType: ast.TypeMap, + Callback: func(args []interface{}) (interface{}, error) { + keys := args[0].([]ast.Variable) + values := args[1].([]ast.Variable) + + if len(keys) != len(values) { + return nil, fmt.Errorf("count of keys (%d) does not match count of values (%d)", + len(keys), len(values)) + } + + for i, val := range keys { + if val.Type != ast.TypeString { + return nil, fmt.Errorf("keys must be strings. value at position %d is %s", + i, val.Type.Printable()) + } + } + + result := map[string]ast.Variable{} + for i := 0; i < len(keys); i++ { + result[keys[i].Value.(string)] = values[i] + } + + return result, nil + }, + } +} + +// interpolationFuncFormatList implements the "formatlist" function that does +// string formatting on lists. +func interpolationFuncFormatList() ast.Function { + return ast.Function{ + ArgTypes: []ast.Type{ast.TypeAny}, + Variadic: true, + VariadicType: ast.TypeAny, + ReturnType: ast.TypeList, + Callback: func(args []interface{}) (interface{}, error) { + // Make a copy of the variadic part of args + // to avoid modifying the original. + varargs := make([]interface{}, len(args)-1) + copy(varargs, args[1:]) + + // Verify we have some arguments + if len(varargs) == 0 { + return nil, fmt.Errorf("no arguments to formatlist") + } + + // Convert arguments that are lists into slices. + // Confirm along the way that all lists have the same length (n). + var n int + listSeen := false + for i := 1; i < len(args); i++ { + s, ok := args[i].([]ast.Variable) + if !ok { + continue + } + + // Mark that we've seen at least one list + listSeen = true + + // Convert the ast.Variable to a slice of strings + parts, err := listVariableValueToStringSlice(s) + if err != nil { + return nil, err + } + + // otherwise the list is sent down to be indexed + varargs[i-1] = parts + + // Check length + if n == 0 { + // first list we've seen + n = len(parts) + continue + } + if n != len(parts) { + return nil, fmt.Errorf("format: mismatched list lengths: %d != %d", n, len(parts)) + } + } + + // If we didn't see a list this is an error because we + // can't determine the return value length. + if !listSeen { + return nil, fmt.Errorf( + "formatlist requires at least one list argument") + } + + // Do the formatting. + format := args[0].(string) + + // Generate a list of formatted strings. + list := make([]string, n) + fmtargs := make([]interface{}, len(varargs)) + for i := 0; i < n; i++ { + for j, arg := range varargs { + switch arg := arg.(type) { + default: + fmtargs[j] = arg + case []string: + fmtargs[j] = arg[i] + } + } + list[i] = fmt.Sprintf(format, fmtargs...) + } + return stringSliceToVariableValue(list), nil + }, + } +} + +// interpolationFuncIndex implements the "index" function that allows one to +// find the index of a specific element in a list +func interpolationFuncIndex() ast.Function { + return ast.Function{ + ArgTypes: []ast.Type{ast.TypeList, ast.TypeString}, + ReturnType: ast.TypeInt, + Callback: func(args []interface{}) (interface{}, error) { + haystack := args[0].([]ast.Variable) + needle := args[1].(string) + for index, element := range haystack { + if needle == element.Value { + return index, nil + } + } + return nil, fmt.Errorf("Could not find '%s' in '%s'", needle, haystack) + }, + } +} + +// interpolationFuncBasename implements the "dirname" function. +func interpolationFuncDirname() ast.Function { + return ast.Function{ + ArgTypes: []ast.Type{ast.TypeString}, + ReturnType: ast.TypeString, + Callback: func(args []interface{}) (interface{}, error) { + return filepath.Dir(args[0].(string)), nil + }, + } +} + +// interpolationFuncDistinct implements the "distinct" function that +// removes duplicate elements from a list. +func interpolationFuncDistinct() ast.Function { + return ast.Function{ + ArgTypes: []ast.Type{ast.TypeList}, + ReturnType: ast.TypeList, + Variadic: true, + VariadicType: ast.TypeList, + Callback: func(args []interface{}) (interface{}, error) { + var list []string + + if len(args) != 1 { + return nil, fmt.Errorf("accepts only one argument.") + } + + if argument, ok := args[0].([]ast.Variable); ok { + for _, element := range argument { + if element.Type != ast.TypeString { + return nil, fmt.Errorf( + "only works for flat lists, this list contains elements of %s", + element.Type.Printable()) + } + list = appendIfMissing(list, element.Value.(string)) + } + } + + return stringSliceToVariableValue(list), nil + }, + } +} + +// helper function to add an element to a list, if it does not already exsit +func appendIfMissing(slice []string, element string) []string { + for _, ele := range slice { + if ele == element { + return slice + } + } + return append(slice, element) +} + +// for two lists `keys` and `values` of equal length, returns all elements +// from `values` where the corresponding element from `keys` is in `searchset`. +func interpolationFuncMatchKeys() ast.Function { + return ast.Function{ + ArgTypes: []ast.Type{ast.TypeList, ast.TypeList, ast.TypeList}, + ReturnType: ast.TypeList, + Callback: func(args []interface{}) (interface{}, error) { + output := make([]ast.Variable, 0) + + values, _ := args[0].([]ast.Variable) + keys, _ := args[1].([]ast.Variable) + searchset, _ := args[2].([]ast.Variable) + + if len(keys) != len(values) { + return nil, fmt.Errorf("length of keys and values should be equal") + } + + for i, key := range keys { + for _, search := range searchset { + if res, err := compareSimpleVariables(key, search); err != nil { + return nil, err + } else if res == true { + output = append(output, values[i]) + break + } + } + } + // if searchset is empty, then output is an empty list as well. + // if we haven't matched any key, then output is an empty list. + return output, nil + }, + } +} + +// compare two variables of the same type, i.e. non complex one, such as TypeList or TypeMap +func compareSimpleVariables(a, b ast.Variable) (bool, error) { + if a.Type != b.Type { + return false, fmt.Errorf( + "won't compare items of different types %s and %s", + a.Type.Printable(), b.Type.Printable()) + } + switch a.Type { + case ast.TypeString: + return a.Value.(string) == b.Value.(string), nil + default: + return false, fmt.Errorf( + "can't compare items of type %s", + a.Type.Printable()) + } +} + +// interpolationFuncJoin implements the "join" function that allows +// multi-variable values to be joined by some character. +func interpolationFuncJoin() ast.Function { + return ast.Function{ + ArgTypes: []ast.Type{ast.TypeString}, + Variadic: true, + VariadicType: ast.TypeList, + ReturnType: ast.TypeString, + Callback: func(args []interface{}) (interface{}, error) { + var list []string + + if len(args) < 2 { + return nil, fmt.Errorf("not enough arguments to join()") + } + + for _, arg := range args[1:] { + for _, part := range arg.([]ast.Variable) { + if part.Type != ast.TypeString { + return nil, fmt.Errorf( + "only works on flat lists, this list contains elements of %s", + part.Type.Printable()) + } + list = append(list, part.Value.(string)) + } + } + + return strings.Join(list, args[0].(string)), nil + }, + } +} + +// interpolationFuncJSONEncode implements the "jsonencode" function that encodes +// a string, list, or map as its JSON representation. For now, values in the +// list or map may only be strings. +func interpolationFuncJSONEncode() ast.Function { + return ast.Function{ + ArgTypes: []ast.Type{ast.TypeAny}, + ReturnType: ast.TypeString, + Callback: func(args []interface{}) (interface{}, error) { + var toEncode interface{} + + switch typedArg := args[0].(type) { + case string: + toEncode = typedArg + + case []ast.Variable: + // We preallocate the list here. Note that it's important that in + // the length 0 case, we have an empty list rather than nil, as + // they encode differently. + // XXX It would be nice to support arbitrarily nested data here. Is + // there an inverse of hil.InterfaceToVariable? + strings := make([]string, len(typedArg)) + + for i, v := range typedArg { + if v.Type != ast.TypeString { + return "", fmt.Errorf("list elements must be strings") + } + strings[i] = v.Value.(string) + } + toEncode = strings + + case map[string]ast.Variable: + // XXX It would be nice to support arbitrarily nested data here. Is + // there an inverse of hil.InterfaceToVariable? + stringMap := make(map[string]string) + for k, v := range typedArg { + if v.Type != ast.TypeString { + return "", fmt.Errorf("map values must be strings") + } + stringMap[k] = v.Value.(string) + } + toEncode = stringMap + + default: + return "", fmt.Errorf("unknown type for JSON encoding: %T", args[0]) + } + + jEnc, err := json.Marshal(toEncode) + if err != nil { + return "", fmt.Errorf("failed to encode JSON data '%s'", toEncode) + } + return string(jEnc), nil + }, + } +} + +// interpolationFuncReplace implements the "replace" function that does +// string replacement. +func interpolationFuncReplace() ast.Function { + return ast.Function{ + ArgTypes: []ast.Type{ast.TypeString, ast.TypeString, ast.TypeString}, + ReturnType: ast.TypeString, + Callback: func(args []interface{}) (interface{}, error) { + s := args[0].(string) + search := args[1].(string) + replace := args[2].(string) + + // We search/replace using a regexp if the string is surrounded + // in forward slashes. + if len(search) > 1 && search[0] == '/' && search[len(search)-1] == '/' { + re, err := regexp.Compile(search[1 : len(search)-1]) + if err != nil { + return nil, err + } + + return re.ReplaceAllString(s, replace), nil + } + + return strings.Replace(s, search, replace, -1), nil + }, + } +} + +func interpolationFuncLength() ast.Function { + return ast.Function{ + ArgTypes: []ast.Type{ast.TypeAny}, + ReturnType: ast.TypeInt, + Variadic: false, + Callback: func(args []interface{}) (interface{}, error) { + subject := args[0] + + switch typedSubject := subject.(type) { + case string: + return len(typedSubject), nil + case []ast.Variable: + return len(typedSubject), nil + case map[string]ast.Variable: + return len(typedSubject), nil + } + + return 0, fmt.Errorf("arguments to length() must be a string, list, or map") + }, + } +} + +func interpolationFuncSignum() ast.Function { + return ast.Function{ + ArgTypes: []ast.Type{ast.TypeInt}, + ReturnType: ast.TypeInt, + Variadic: false, + Callback: func(args []interface{}) (interface{}, error) { + num := args[0].(int) + switch { + case num < 0: + return -1, nil + case num > 0: + return +1, nil + default: + return 0, nil + } + }, + } +} + +// interpolationFuncSlice returns a portion of the input list between from, inclusive and to, exclusive. +func interpolationFuncSlice() ast.Function { + return ast.Function{ + ArgTypes: []ast.Type{ + ast.TypeList, // inputList + ast.TypeInt, // from + ast.TypeInt, // to + }, + ReturnType: ast.TypeList, + Variadic: false, + Callback: func(args []interface{}) (interface{}, error) { + inputList := args[0].([]ast.Variable) + from := args[1].(int) + to := args[2].(int) + + if from < 0 { + return nil, fmt.Errorf("from index must be >= 0") + } + if to > len(inputList) { + return nil, fmt.Errorf("to index must be <= length of the input list") + } + if from > to { + return nil, fmt.Errorf("from index must be <= to index") + } + + var outputList []ast.Variable + for i, val := range inputList { + if i >= from && i < to { + outputList = append(outputList, val) + } + } + return outputList, nil + }, + } +} + +// interpolationFuncSort sorts a list of a strings lexographically +func interpolationFuncSort() ast.Function { + return ast.Function{ + ArgTypes: []ast.Type{ast.TypeList}, + ReturnType: ast.TypeList, + Variadic: false, + Callback: func(args []interface{}) (interface{}, error) { + inputList := args[0].([]ast.Variable) + + // Ensure that all the list members are strings and + // create a string slice from them + members := make([]string, len(inputList)) + for i, val := range inputList { + if val.Type != ast.TypeString { + return nil, fmt.Errorf( + "sort() may only be used with lists of strings - %s at index %d", + val.Type.String(), i) + } + + members[i] = val.Value.(string) + } + + sort.Strings(members) + return stringSliceToVariableValue(members), nil + }, + } +} + +// interpolationFuncSplit implements the "split" function that allows +// strings to split into multi-variable values +func interpolationFuncSplit() ast.Function { + return ast.Function{ + ArgTypes: []ast.Type{ast.TypeString, ast.TypeString}, + ReturnType: ast.TypeList, + Callback: func(args []interface{}) (interface{}, error) { + sep := args[0].(string) + s := args[1].(string) + elements := strings.Split(s, sep) + return stringSliceToVariableValue(elements), nil + }, + } +} + +// interpolationFuncLookup implements the "lookup" function that allows +// dynamic lookups of map types within a Terraform configuration. +func interpolationFuncLookup(vs map[string]ast.Variable) ast.Function { + return ast.Function{ + ArgTypes: []ast.Type{ast.TypeMap, ast.TypeString}, + ReturnType: ast.TypeString, + Variadic: true, + VariadicType: ast.TypeString, + Callback: func(args []interface{}) (interface{}, error) { + defaultValue := "" + defaultValueSet := false + if len(args) > 2 { + defaultValue = args[2].(string) + defaultValueSet = true + } + if len(args) > 3 { + return "", fmt.Errorf("lookup() takes no more than three arguments") + } + index := args[1].(string) + mapVar := args[0].(map[string]ast.Variable) + + v, ok := mapVar[index] + if !ok { + if defaultValueSet { + return defaultValue, nil + } else { + return "", fmt.Errorf( + "lookup failed to find '%s'", + args[1].(string)) + } + } + if v.Type != ast.TypeString { + return nil, fmt.Errorf( + "lookup() may only be used with flat maps, this map contains elements of %s", + v.Type.Printable()) + } + + return v.Value.(string), nil + }, + } +} + +// interpolationFuncElement implements the "element" function that allows +// a specific index to be looked up in a multi-variable value. Note that this will +// wrap if the index is larger than the number of elements in the multi-variable value. +func interpolationFuncElement() ast.Function { + return ast.Function{ + ArgTypes: []ast.Type{ast.TypeList, ast.TypeString}, + ReturnType: ast.TypeString, + Callback: func(args []interface{}) (interface{}, error) { + list := args[0].([]ast.Variable) + if len(list) == 0 { + return nil, fmt.Errorf("element() may not be used with an empty list") + } + + index, err := strconv.Atoi(args[1].(string)) + if err != nil || index < 0 { + return "", fmt.Errorf( + "invalid number for index, got %s", args[1]) + } + + resolvedIndex := index % len(list) + + v := list[resolvedIndex] + if v.Type != ast.TypeString { + return nil, fmt.Errorf( + "element() may only be used with flat lists, this list contains elements of %s", + v.Type.Printable()) + } + return v.Value, nil + }, + } +} + +// interpolationFuncKeys implements the "keys" function that yields a list of +// keys of map types within a Terraform configuration. +func interpolationFuncKeys(vs map[string]ast.Variable) ast.Function { + return ast.Function{ + ArgTypes: []ast.Type{ast.TypeMap}, + ReturnType: ast.TypeList, + Callback: func(args []interface{}) (interface{}, error) { + mapVar := args[0].(map[string]ast.Variable) + keys := make([]string, 0) + + for k, _ := range mapVar { + keys = append(keys, k) + } + + sort.Strings(keys) + + // Keys are guaranteed to be strings + return stringSliceToVariableValue(keys), nil + }, + } +} + +// interpolationFuncValues implements the "values" function that yields a list of +// keys of map types within a Terraform configuration. +func interpolationFuncValues(vs map[string]ast.Variable) ast.Function { + return ast.Function{ + ArgTypes: []ast.Type{ast.TypeMap}, + ReturnType: ast.TypeList, + Callback: func(args []interface{}) (interface{}, error) { + mapVar := args[0].(map[string]ast.Variable) + keys := make([]string, 0) + + for k, _ := range mapVar { + keys = append(keys, k) + } + + sort.Strings(keys) + + values := make([]string, len(keys)) + for index, key := range keys { + if value, ok := mapVar[key].Value.(string); ok { + values[index] = value + } else { + return "", fmt.Errorf("values(): %q has element with bad type %s", + key, mapVar[key].Type) + } + } + + variable, err := hil.InterfaceToVariable(values) + if err != nil { + return nil, err + } + + return variable.Value, nil + }, + } +} + +// interpolationFuncBasename implements the "basename" function. +func interpolationFuncBasename() ast.Function { + return ast.Function{ + ArgTypes: []ast.Type{ast.TypeString}, + ReturnType: ast.TypeString, + Callback: func(args []interface{}) (interface{}, error) { + return filepath.Base(args[0].(string)), nil + }, + } +} + +// interpolationFuncBase64Encode implements the "base64encode" function that +// allows Base64 encoding. +func interpolationFuncBase64Encode() ast.Function { + return ast.Function{ + ArgTypes: []ast.Type{ast.TypeString}, + ReturnType: ast.TypeString, + Callback: func(args []interface{}) (interface{}, error) { + s := args[0].(string) + return base64.StdEncoding.EncodeToString([]byte(s)), nil + }, + } +} + +// interpolationFuncBase64Decode implements the "base64decode" function that +// allows Base64 decoding. +func interpolationFuncBase64Decode() ast.Function { + return ast.Function{ + ArgTypes: []ast.Type{ast.TypeString}, + ReturnType: ast.TypeString, + Callback: func(args []interface{}) (interface{}, error) { + s := args[0].(string) + sDec, err := base64.StdEncoding.DecodeString(s) + if err != nil { + return "", fmt.Errorf("failed to decode base64 data '%s'", s) + } + return string(sDec), nil + }, + } +} + +// interpolationFuncLower implements the "lower" function that does +// string lower casing. +func interpolationFuncLower() ast.Function { + return ast.Function{ + ArgTypes: []ast.Type{ast.TypeString}, + ReturnType: ast.TypeString, + Callback: func(args []interface{}) (interface{}, error) { + toLower := args[0].(string) + return strings.ToLower(toLower), nil + }, + } +} + +func interpolationFuncMd5() ast.Function { + return ast.Function{ + ArgTypes: []ast.Type{ast.TypeString}, + ReturnType: ast.TypeString, + Callback: func(args []interface{}) (interface{}, error) { + s := args[0].(string) + h := md5.New() + h.Write([]byte(s)) + hash := hex.EncodeToString(h.Sum(nil)) + return hash, nil + }, + } +} + +func interpolationFuncMerge() ast.Function { + return ast.Function{ + ArgTypes: []ast.Type{ast.TypeMap}, + ReturnType: ast.TypeMap, + Variadic: true, + VariadicType: ast.TypeMap, + Callback: func(args []interface{}) (interface{}, error) { + outputMap := make(map[string]ast.Variable) + + for _, arg := range args { + for k, v := range arg.(map[string]ast.Variable) { + outputMap[k] = v + } + } + + return outputMap, nil + }, + } +} + +// interpolationFuncUpper implements the "upper" function that does +// string upper casing. +func interpolationFuncUpper() ast.Function { + return ast.Function{ + ArgTypes: []ast.Type{ast.TypeString}, + ReturnType: ast.TypeString, + Callback: func(args []interface{}) (interface{}, error) { + toUpper := args[0].(string) + return strings.ToUpper(toUpper), nil + }, + } +} + +func interpolationFuncSha1() ast.Function { + return ast.Function{ + ArgTypes: []ast.Type{ast.TypeString}, + ReturnType: ast.TypeString, + Callback: func(args []interface{}) (interface{}, error) { + s := args[0].(string) + h := sha1.New() + h.Write([]byte(s)) + hash := hex.EncodeToString(h.Sum(nil)) + return hash, nil + }, + } +} + +// hexadecimal representation of sha256 sum +func interpolationFuncSha256() ast.Function { + return ast.Function{ + ArgTypes: []ast.Type{ast.TypeString}, + ReturnType: ast.TypeString, + Callback: func(args []interface{}) (interface{}, error) { + s := args[0].(string) + h := sha256.New() + h.Write([]byte(s)) + hash := hex.EncodeToString(h.Sum(nil)) + return hash, nil + }, + } +} + +func interpolationFuncSha512() ast.Function { + return ast.Function{ + ArgTypes: []ast.Type{ast.TypeString}, + ReturnType: ast.TypeString, + Callback: func(args []interface{}) (interface{}, error) { + s := args[0].(string) + h := sha512.New() + h.Write([]byte(s)) + hash := hex.EncodeToString(h.Sum(nil)) + return hash, nil + }, + } +} + +func interpolationFuncTrimSpace() ast.Function { + return ast.Function{ + ArgTypes: []ast.Type{ast.TypeString}, + ReturnType: ast.TypeString, + Callback: func(args []interface{}) (interface{}, error) { + trimSpace := args[0].(string) + return strings.TrimSpace(trimSpace), nil + }, + } +} + +func interpolationFuncBase64Sha256() ast.Function { + return ast.Function{ + ArgTypes: []ast.Type{ast.TypeString}, + ReturnType: ast.TypeString, + Callback: func(args []interface{}) (interface{}, error) { + s := args[0].(string) + h := sha256.New() + h.Write([]byte(s)) + shaSum := h.Sum(nil) + encoded := base64.StdEncoding.EncodeToString(shaSum[:]) + return encoded, nil + }, + } +} + +func interpolationFuncBase64Sha512() ast.Function { + return ast.Function{ + ArgTypes: []ast.Type{ast.TypeString}, + ReturnType: ast.TypeString, + Callback: func(args []interface{}) (interface{}, error) { + s := args[0].(string) + h := sha512.New() + h.Write([]byte(s)) + shaSum := h.Sum(nil) + encoded := base64.StdEncoding.EncodeToString(shaSum[:]) + return encoded, nil + }, + } +} + +func interpolationFuncBcrypt() ast.Function { + return ast.Function{ + ArgTypes: []ast.Type{ast.TypeString}, + Variadic: true, + VariadicType: ast.TypeString, + ReturnType: ast.TypeString, + Callback: func(args []interface{}) (interface{}, error) { + defaultCost := 10 + + if len(args) > 1 { + costStr := args[1].(string) + cost, err := strconv.Atoi(costStr) + if err != nil { + return "", err + } + + defaultCost = cost + } + + if len(args) > 2 { + return "", fmt.Errorf("bcrypt() takes no more than two arguments") + } + + input := args[0].(string) + out, err := bcrypt.GenerateFromPassword([]byte(input), defaultCost) + if err != nil { + return "", fmt.Errorf("error occured generating password %s", err.Error()) + } + + return string(out), nil + }, + } +} + +func interpolationFuncUUID() ast.Function { + return ast.Function{ + ArgTypes: []ast.Type{}, + ReturnType: ast.TypeString, + Callback: func(args []interface{}) (interface{}, error) { + return uuid.GenerateUUID() + }, + } +} + +// interpolationFuncTimestamp +func interpolationFuncTimestamp() ast.Function { + return ast.Function{ + ArgTypes: []ast.Type{}, + ReturnType: ast.TypeString, + Callback: func(args []interface{}) (interface{}, error) { + return time.Now().UTC().Format(time.RFC3339), nil + }, + } +} + +// interpolationFuncTitle implements the "title" function that returns a copy of the +// string in which first characters of all the words are capitalized. +func interpolationFuncTitle() ast.Function { + return ast.Function{ + ArgTypes: []ast.Type{ast.TypeString}, + ReturnType: ast.TypeString, + Callback: func(args []interface{}) (interface{}, error) { + toTitle := args[0].(string) + return strings.Title(toTitle), nil + }, + } +} + +// interpolationFuncSubstr implements the "substr" function that allows strings +// to be truncated. +func interpolationFuncSubstr() ast.Function { + return ast.Function{ + ArgTypes: []ast.Type{ + ast.TypeString, // input string + ast.TypeInt, // offset + ast.TypeInt, // length + }, + ReturnType: ast.TypeString, + Callback: func(args []interface{}) (interface{}, error) { + str := args[0].(string) + offset := args[1].(int) + length := args[2].(int) + + // Interpret a negative offset as being equivalent to a positive + // offset taken from the end of the string. + if offset < 0 { + offset += len(str) + } + + // Interpret a length of `-1` as indicating that the substring + // should start at `offset` and continue until the end of the + // string. Any other negative length (other than `-1`) is invalid. + if length == -1 { + length = len(str) + } else if length >= 0 { + length += offset + } else { + return nil, fmt.Errorf("length should be a non-negative integer") + } + + if offset > len(str) { + return nil, fmt.Errorf("offset cannot be larger than the length of the string") + } + + if length > len(str) { + return nil, fmt.Errorf("'offset + length' cannot be larger than the length of the string") + } + + return str[offset:length], nil + }, + } +} diff --git a/vendor/github.com/hashicorp/terraform/config/interpolate_walk.go b/vendor/github.com/hashicorp/terraform/config/interpolate_walk.go new file mode 100644 index 0000000000..ead3d102e1 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/config/interpolate_walk.go @@ -0,0 +1,283 @@ +package config + +import ( + "fmt" + "reflect" + "strings" + + "github.com/hashicorp/hil" + "github.com/hashicorp/hil/ast" + "github.com/mitchellh/reflectwalk" +) + +// interpolationWalker implements interfaces for the reflectwalk package +// (github.com/mitchellh/reflectwalk) that can be used to automatically +// execute a callback for an interpolation. +type interpolationWalker struct { + // F is the function to call for every interpolation. It can be nil. + // + // If Replace is true, then the return value of F will be used to + // replace the interpolation. + F interpolationWalkerFunc + Replace bool + + // ContextF is an advanced version of F that also receives the + // location of where it is in the structure. This lets you do + // context-aware validation. + ContextF interpolationWalkerContextFunc + + key []string + lastValue reflect.Value + loc reflectwalk.Location + cs []reflect.Value + csKey []reflect.Value + csData interface{} + sliceIndex []int + unknownKeys []string +} + +// interpolationWalkerFunc is the callback called by interpolationWalk. +// It is called with any interpolation found. It should return a value +// to replace the interpolation with, along with any errors. +// +// If Replace is set to false in interpolationWalker, then the replace +// value can be anything as it will have no effect. +type interpolationWalkerFunc func(ast.Node) (interface{}, error) + +// interpolationWalkerContextFunc is called by interpolationWalk if +// ContextF is set. This receives both the interpolation and the location +// where the interpolation is. +// +// This callback can be used to validate the location of the interpolation +// within the configuration. +type interpolationWalkerContextFunc func(reflectwalk.Location, ast.Node) + +func (w *interpolationWalker) Enter(loc reflectwalk.Location) error { + w.loc = loc + return nil +} + +func (w *interpolationWalker) Exit(loc reflectwalk.Location) error { + w.loc = reflectwalk.None + + switch loc { + case reflectwalk.Map: + w.cs = w.cs[:len(w.cs)-1] + case reflectwalk.MapValue: + w.key = w.key[:len(w.key)-1] + w.csKey = w.csKey[:len(w.csKey)-1] + case reflectwalk.Slice: + // Split any values that need to be split + w.splitSlice() + w.cs = w.cs[:len(w.cs)-1] + case reflectwalk.SliceElem: + w.csKey = w.csKey[:len(w.csKey)-1] + w.sliceIndex = w.sliceIndex[:len(w.sliceIndex)-1] + } + + return nil +} + +func (w *interpolationWalker) Map(m reflect.Value) error { + w.cs = append(w.cs, m) + return nil +} + +func (w *interpolationWalker) MapElem(m, k, v reflect.Value) error { + w.csData = k + w.csKey = append(w.csKey, k) + + if l := len(w.sliceIndex); l > 0 { + w.key = append(w.key, fmt.Sprintf("%d.%s", w.sliceIndex[l-1], k.String())) + } else { + w.key = append(w.key, k.String()) + } + + w.lastValue = v + return nil +} + +func (w *interpolationWalker) Slice(s reflect.Value) error { + w.cs = append(w.cs, s) + return nil +} + +func (w *interpolationWalker) SliceElem(i int, elem reflect.Value) error { + w.csKey = append(w.csKey, reflect.ValueOf(i)) + w.sliceIndex = append(w.sliceIndex, i) + return nil +} + +func (w *interpolationWalker) Primitive(v reflect.Value) error { + setV := v + + // We only care about strings + if v.Kind() == reflect.Interface { + setV = v + v = v.Elem() + } + if v.Kind() != reflect.String { + return nil + } + + astRoot, err := hil.Parse(v.String()) + if err != nil { + return err + } + + // If the AST we got is just a literal string value with the same + // value then we ignore it. We have to check if its the same value + // because it is possible to input a string, get out a string, and + // have it be different. For example: "foo-$${bar}" turns into + // "foo-${bar}" + if n, ok := astRoot.(*ast.LiteralNode); ok { + if s, ok := n.Value.(string); ok && s == v.String() { + return nil + } + } + + if w.ContextF != nil { + w.ContextF(w.loc, astRoot) + } + + if w.F == nil { + return nil + } + + replaceVal, err := w.F(astRoot) + if err != nil { + return fmt.Errorf( + "%s in:\n\n%s", + err, v.String()) + } + + if w.Replace { + // We need to determine if we need to remove this element + // if the result contains any "UnknownVariableValue" which is + // set if it is computed. This behavior is different if we're + // splitting (in a SliceElem) or not. + remove := false + if w.loc == reflectwalk.SliceElem { + switch typedReplaceVal := replaceVal.(type) { + case string: + if typedReplaceVal == UnknownVariableValue { + remove = true + } + case []interface{}: + if hasUnknownValue(typedReplaceVal) { + remove = true + } + } + } else if replaceVal == UnknownVariableValue { + remove = true + } + + if remove { + w.unknownKeys = append(w.unknownKeys, strings.Join(w.key, ".")) + } + + resultVal := reflect.ValueOf(replaceVal) + switch w.loc { + case reflectwalk.MapKey: + m := w.cs[len(w.cs)-1] + + // Delete the old value + var zero reflect.Value + m.SetMapIndex(w.csData.(reflect.Value), zero) + + // Set the new key with the existing value + m.SetMapIndex(resultVal, w.lastValue) + + // Set the key to be the new key + w.csData = resultVal + case reflectwalk.MapValue: + // If we're in a map, then the only way to set a map value is + // to set it directly. + m := w.cs[len(w.cs)-1] + mk := w.csData.(reflect.Value) + m.SetMapIndex(mk, resultVal) + default: + // Otherwise, we should be addressable + setV.Set(resultVal) + } + } + + return nil +} + +func (w *interpolationWalker) replaceCurrent(v reflect.Value) { + // if we don't have at least 2 values, we're not going to find a map, but + // we could panic. + if len(w.cs) < 2 { + return + } + + c := w.cs[len(w.cs)-2] + switch c.Kind() { + case reflect.Map: + // Get the key and delete it + k := w.csKey[len(w.csKey)-1] + c.SetMapIndex(k, v) + } +} + +func hasUnknownValue(variable []interface{}) bool { + for _, value := range variable { + if strVal, ok := value.(string); ok { + if strVal == UnknownVariableValue { + return true + } + } + } + return false +} + +func (w *interpolationWalker) splitSlice() { + raw := w.cs[len(w.cs)-1] + + var s []interface{} + switch v := raw.Interface().(type) { + case []interface{}: + s = v + case []map[string]interface{}: + return + } + + split := false + for _, val := range s { + if varVal, ok := val.(ast.Variable); ok && varVal.Type == ast.TypeList { + split = true + } + if _, ok := val.([]interface{}); ok { + split = true + } + } + + if !split { + return + } + + result := make([]interface{}, 0) + for _, v := range s { + switch val := v.(type) { + case ast.Variable: + switch val.Type { + case ast.TypeList: + elements := val.Value.([]ast.Variable) + for _, element := range elements { + result = append(result, element.Value) + } + default: + result = append(result, val.Value) + } + case []interface{}: + for _, element := range val { + result = append(result, element) + } + default: + result = append(result, v) + } + } + + w.replaceCurrent(reflect.ValueOf(result)) +} diff --git a/vendor/github.com/hashicorp/terraform/config/lang.go b/vendor/github.com/hashicorp/terraform/config/lang.go new file mode 100644 index 0000000000..890d30beb9 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/config/lang.go @@ -0,0 +1,11 @@ +package config + +import ( + "github.com/hashicorp/hil/ast" +) + +type noopNode struct{} + +func (n *noopNode) Accept(ast.Visitor) ast.Node { return n } +func (n *noopNode) Pos() ast.Pos { return ast.Pos{} } +func (n *noopNode) Type(ast.Scope) (ast.Type, error) { return ast.TypeString, nil } diff --git a/vendor/github.com/hashicorp/terraform/config/loader.go b/vendor/github.com/hashicorp/terraform/config/loader.go new file mode 100644 index 0000000000..0bfa89c255 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/config/loader.go @@ -0,0 +1,224 @@ +package config + +import ( + "encoding/json" + "fmt" + "io" + "os" + "path/filepath" + "sort" + "strings" + + "github.com/hashicorp/hcl" +) + +// ErrNoConfigsFound is the error returned by LoadDir if no +// Terraform configuration files were found in the given directory. +type ErrNoConfigsFound struct { + Dir string +} + +func (e ErrNoConfigsFound) Error() string { + return fmt.Sprintf( + "No Terraform configuration files found in directory: %s", + e.Dir) +} + +// LoadJSON loads a single Terraform configuration from a given JSON document. +// +// The document must be a complete Terraform configuration. This function will +// NOT try to load any additional modules so only the given document is loaded. +func LoadJSON(raw json.RawMessage) (*Config, error) { + obj, err := hcl.Parse(string(raw)) + if err != nil { + return nil, fmt.Errorf( + "Error parsing JSON document as HCL: %s", err) + } + + // Start building the result + hclConfig := &hclConfigurable{ + Root: obj, + } + + return hclConfig.Config() +} + +// LoadFile loads the Terraform configuration from a given file. +// +// This file can be any format that Terraform recognizes, and import any +// other format that Terraform recognizes. +func LoadFile(path string) (*Config, error) { + importTree, err := loadTree(path) + if err != nil { + return nil, err + } + + configTree, err := importTree.ConfigTree() + + // Close the importTree now so that we can clear resources as quickly + // as possible. + importTree.Close() + + if err != nil { + return nil, err + } + + return configTree.Flatten() +} + +// LoadDir loads all the Terraform configuration files in a single +// directory and appends them together. +// +// Special files known as "override files" can also be present, which +// are merged into the loaded configuration. That is, the non-override +// files are loaded first to create the configuration. Then, the overrides +// are merged into the configuration to create the final configuration. +// +// Files are loaded in lexical order. +func LoadDir(root string) (*Config, error) { + files, overrides, err := dirFiles(root) + if err != nil { + return nil, err + } + if len(files) == 0 { + return nil, &ErrNoConfigsFound{Dir: root} + } + + // Determine the absolute path to the directory. + rootAbs, err := filepath.Abs(root) + if err != nil { + return nil, err + } + + var result *Config + + // Sort the files and overrides so we have a deterministic order + sort.Strings(files) + sort.Strings(overrides) + + // Load all the regular files, append them to each other. + for _, f := range files { + c, err := LoadFile(f) + if err != nil { + return nil, err + } + + if result != nil { + result, err = Append(result, c) + if err != nil { + return nil, err + } + } else { + result = c + } + } + + // Load all the overrides, and merge them into the config + for _, f := range overrides { + c, err := LoadFile(f) + if err != nil { + return nil, err + } + + result, err = Merge(result, c) + if err != nil { + return nil, err + } + } + + // Mark the directory + result.Dir = rootAbs + + return result, nil +} + +// IsEmptyDir returns true if the directory given has no Terraform +// configuration files. +func IsEmptyDir(root string) (bool, error) { + if _, err := os.Stat(root); err != nil && os.IsNotExist(err) { + return true, nil + } + + fs, os, err := dirFiles(root) + if err != nil { + return false, err + } + + return len(fs) == 0 && len(os) == 0, nil +} + +// Ext returns the Terraform configuration extension of the given +// path, or a blank string if it is an invalid function. +func ext(path string) string { + if strings.HasSuffix(path, ".tf") { + return ".tf" + } else if strings.HasSuffix(path, ".tf.json") { + return ".tf.json" + } else { + return "" + } +} + +func dirFiles(dir string) ([]string, []string, error) { + f, err := os.Open(dir) + if err != nil { + return nil, nil, err + } + defer f.Close() + + fi, err := f.Stat() + if err != nil { + return nil, nil, err + } + if !fi.IsDir() { + return nil, nil, fmt.Errorf( + "configuration path must be a directory: %s", + dir) + } + + var files, overrides []string + err = nil + for err != io.EOF { + var fis []os.FileInfo + fis, err = f.Readdir(128) + if err != nil && err != io.EOF { + return nil, nil, err + } + + for _, fi := range fis { + // Ignore directories + if fi.IsDir() { + continue + } + + // Only care about files that are valid to load + name := fi.Name() + extValue := ext(name) + if extValue == "" || isIgnoredFile(name) { + continue + } + + // Determine if we're dealing with an override + nameNoExt := name[:len(name)-len(extValue)] + override := nameNoExt == "override" || + strings.HasSuffix(nameNoExt, "_override") + + path := filepath.Join(dir, name) + if override { + overrides = append(overrides, path) + } else { + files = append(files, path) + } + } + } + + return files, overrides, nil +} + +// isIgnoredFile returns true or false depending on whether the +// provided file name is a file that should be ignored. +func isIgnoredFile(name string) bool { + return strings.HasPrefix(name, ".") || // Unix-like hidden files + strings.HasSuffix(name, "~") || // vim + strings.HasPrefix(name, "#") && strings.HasSuffix(name, "#") // emacs +} diff --git a/vendor/github.com/hashicorp/terraform/config/loader_hcl.go b/vendor/github.com/hashicorp/terraform/config/loader_hcl.go new file mode 100644 index 0000000000..9abb1960f3 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/config/loader_hcl.go @@ -0,0 +1,1130 @@ +package config + +import ( + "fmt" + "io/ioutil" + + "github.com/hashicorp/go-multierror" + "github.com/hashicorp/hcl" + "github.com/hashicorp/hcl/hcl/ast" + "github.com/mitchellh/mapstructure" +) + +// hclConfigurable is an implementation of configurable that knows +// how to turn HCL configuration into a *Config object. +type hclConfigurable struct { + File string + Root *ast.File +} + +func (t *hclConfigurable) Config() (*Config, error) { + validKeys := map[string]struct{}{ + "atlas": struct{}{}, + "data": struct{}{}, + "module": struct{}{}, + "output": struct{}{}, + "provider": struct{}{}, + "resource": struct{}{}, + "terraform": struct{}{}, + "variable": struct{}{}, + } + + // Top-level item should be the object list + list, ok := t.Root.Node.(*ast.ObjectList) + if !ok { + return nil, fmt.Errorf("error parsing: file doesn't contain a root object") + } + + // Start building up the actual configuration. + config := new(Config) + + // Terraform config + if o := list.Filter("terraform"); len(o.Items) > 0 { + var err error + config.Terraform, err = loadTerraformHcl(o) + if err != nil { + return nil, err + } + } + + // Build the variables + if vars := list.Filter("variable"); len(vars.Items) > 0 { + var err error + config.Variables, err = loadVariablesHcl(vars) + if err != nil { + return nil, err + } + } + + // Get Atlas configuration + if atlas := list.Filter("atlas"); len(atlas.Items) > 0 { + var err error + config.Atlas, err = loadAtlasHcl(atlas) + if err != nil { + return nil, err + } + } + + // Build the modules + if modules := list.Filter("module"); len(modules.Items) > 0 { + var err error + config.Modules, err = loadModulesHcl(modules) + if err != nil { + return nil, err + } + } + + // Build the provider configs + if providers := list.Filter("provider"); len(providers.Items) > 0 { + var err error + config.ProviderConfigs, err = loadProvidersHcl(providers) + if err != nil { + return nil, err + } + } + + // Build the resources + { + var err error + managedResourceConfigs := list.Filter("resource") + dataResourceConfigs := list.Filter("data") + + config.Resources = make( + []*Resource, 0, + len(managedResourceConfigs.Items)+len(dataResourceConfigs.Items), + ) + + managedResources, err := loadManagedResourcesHcl(managedResourceConfigs) + if err != nil { + return nil, err + } + dataResources, err := loadDataResourcesHcl(dataResourceConfigs) + if err != nil { + return nil, err + } + + config.Resources = append(config.Resources, dataResources...) + config.Resources = append(config.Resources, managedResources...) + } + + // Build the outputs + if outputs := list.Filter("output"); len(outputs.Items) > 0 { + var err error + config.Outputs, err = loadOutputsHcl(outputs) + if err != nil { + return nil, err + } + } + + // Check for invalid keys + for _, item := range list.Items { + if len(item.Keys) == 0 { + // Not sure how this would happen, but let's avoid a panic + continue + } + + k := item.Keys[0].Token.Value().(string) + if _, ok := validKeys[k]; ok { + continue + } + + config.unknownKeys = append(config.unknownKeys, k) + } + + return config, nil +} + +// loadFileHcl is a fileLoaderFunc that knows how to read HCL +// files and turn them into hclConfigurables. +func loadFileHcl(root string) (configurable, []string, error) { + // Read the HCL file and prepare for parsing + d, err := ioutil.ReadFile(root) + if err != nil { + return nil, nil, fmt.Errorf( + "Error reading %s: %s", root, err) + } + + // Parse it + hclRoot, err := hcl.Parse(string(d)) + if err != nil { + return nil, nil, fmt.Errorf( + "Error parsing %s: %s", root, err) + } + + // Start building the result + result := &hclConfigurable{ + File: root, + Root: hclRoot, + } + + // Dive in, find the imports. This is disabled for now since + // imports were removed prior to Terraform 0.1. The code is + // remaining here commented for historical purposes. + /* + imports := obj.Get("import") + if imports == nil { + result.Object.Ref() + return result, nil, nil + } + + if imports.Type() != libucl.ObjectTypeString { + imports.Close() + + return nil, nil, fmt.Errorf( + "Error in %s: all 'import' declarations should be in the format\n"+ + "`import \"foo\"` (Got type %s)", + root, + imports.Type()) + } + + // Gather all the import paths + importPaths := make([]string, 0, imports.Len()) + iter := imports.Iterate(false) + for imp := iter.Next(); imp != nil; imp = iter.Next() { + path := imp.ToString() + if !filepath.IsAbs(path) { + // Relative paths are relative to the Terraform file itself + dir := filepath.Dir(root) + path = filepath.Join(dir, path) + } + + importPaths = append(importPaths, path) + imp.Close() + } + iter.Close() + imports.Close() + + result.Object.Ref() + */ + + return result, nil, nil +} + +// Given a handle to a HCL object, this transforms it into the Terraform config +func loadTerraformHcl(list *ast.ObjectList) (*Terraform, error) { + if len(list.Items) > 1 { + return nil, fmt.Errorf("only one 'terraform' block allowed per module") + } + + // Get our one item + item := list.Items[0] + + // This block should have an empty top level ObjectItem. If there are keys + // here, it's likely because we have a flattened JSON object, and we can + // lift this into a nested ObjectList to decode properly. + if len(item.Keys) > 0 { + item = &ast.ObjectItem{ + Val: &ast.ObjectType{ + List: &ast.ObjectList{ + Items: []*ast.ObjectItem{item}, + }, + }, + } + } + + // We need the item value as an ObjectList + var listVal *ast.ObjectList + if ot, ok := item.Val.(*ast.ObjectType); ok { + listVal = ot.List + } else { + return nil, fmt.Errorf("terraform block: should be an object") + } + + // NOTE: We purposely don't validate unknown HCL keys here so that + // we can potentially read _future_ Terraform version config (to + // still be able to validate the required version). + // + // We should still keep track of unknown keys to validate later, but + // HCL doesn't currently support that. + + var config Terraform + if err := hcl.DecodeObject(&config, item.Val); err != nil { + return nil, fmt.Errorf( + "Error reading terraform config: %s", + err) + } + + // If we have provisioners, then parse those out + if os := listVal.Filter("backend"); len(os.Items) > 0 { + var err error + config.Backend, err = loadTerraformBackendHcl(os) + if err != nil { + return nil, fmt.Errorf( + "Error reading backend config for terraform block: %s", + err) + } + } + + return &config, nil +} + +// Loads the Backend configuration from an object list. +func loadTerraformBackendHcl(list *ast.ObjectList) (*Backend, error) { + if len(list.Items) > 1 { + return nil, fmt.Errorf("only one 'backend' block allowed") + } + + // Get our one item + item := list.Items[0] + + // Verify the keys + if len(item.Keys) != 1 { + return nil, fmt.Errorf( + "position %s: 'backend' must be followed by exactly one string: a type", + item.Pos()) + } + + typ := item.Keys[0].Token.Value().(string) + + // Decode the raw config + var config map[string]interface{} + if err := hcl.DecodeObject(&config, item.Val); err != nil { + return nil, fmt.Errorf( + "Error reading backend config: %s", + err) + } + + rawConfig, err := NewRawConfig(config) + if err != nil { + return nil, fmt.Errorf( + "Error reading backend config: %s", + err) + } + + b := &Backend{ + Type: typ, + RawConfig: rawConfig, + } + b.Hash = b.Rehash() + + return b, nil +} + +// Given a handle to a HCL object, this transforms it into the Atlas +// configuration. +func loadAtlasHcl(list *ast.ObjectList) (*AtlasConfig, error) { + if len(list.Items) > 1 { + return nil, fmt.Errorf("only one 'atlas' block allowed") + } + + // Get our one item + item := list.Items[0] + + var config AtlasConfig + if err := hcl.DecodeObject(&config, item.Val); err != nil { + return nil, fmt.Errorf( + "Error reading atlas config: %s", + err) + } + + return &config, nil +} + +// Given a handle to a HCL object, this recurses into the structure +// and pulls out a list of modules. +// +// The resulting modules may not be unique, but each module +// represents exactly one module definition in the HCL configuration. +// We leave it up to another pass to merge them together. +func loadModulesHcl(list *ast.ObjectList) ([]*Module, error) { + if err := assertAllBlocksHaveNames("module", list); err != nil { + return nil, err + } + + list = list.Children() + if len(list.Items) == 0 { + return nil, nil + } + + // Where all the results will go + var result []*Module + + // Now go over all the types and their children in order to get + // all of the actual resources. + for _, item := range list.Items { + k := item.Keys[0].Token.Value().(string) + + var listVal *ast.ObjectList + if ot, ok := item.Val.(*ast.ObjectType); ok { + listVal = ot.List + } else { + return nil, fmt.Errorf("module '%s': should be an object", k) + } + + var config map[string]interface{} + if err := hcl.DecodeObject(&config, item.Val); err != nil { + return nil, fmt.Errorf( + "Error reading config for %s: %s", + k, + err) + } + + // Remove the fields we handle specially + delete(config, "source") + + rawConfig, err := NewRawConfig(config) + if err != nil { + return nil, fmt.Errorf( + "Error reading config for %s: %s", + k, + err) + } + + // If we have a count, then figure it out + var source string + if o := listVal.Filter("source"); len(o.Items) > 0 { + err = hcl.DecodeObject(&source, o.Items[0].Val) + if err != nil { + return nil, fmt.Errorf( + "Error parsing source for %s: %s", + k, + err) + } + } + + result = append(result, &Module{ + Name: k, + Source: source, + RawConfig: rawConfig, + }) + } + + return result, nil +} + +// LoadOutputsHcl recurses into the given HCL object and turns +// it into a mapping of outputs. +func loadOutputsHcl(list *ast.ObjectList) ([]*Output, error) { + if err := assertAllBlocksHaveNames("output", list); err != nil { + return nil, err + } + + list = list.Children() + + // Go through each object and turn it into an actual result. + result := make([]*Output, 0, len(list.Items)) + for _, item := range list.Items { + n := item.Keys[0].Token.Value().(string) + + var listVal *ast.ObjectList + if ot, ok := item.Val.(*ast.ObjectType); ok { + listVal = ot.List + } else { + return nil, fmt.Errorf("output '%s': should be an object", n) + } + + var config map[string]interface{} + if err := hcl.DecodeObject(&config, item.Val); err != nil { + return nil, err + } + + // Delete special keys + delete(config, "depends_on") + + rawConfig, err := NewRawConfig(config) + if err != nil { + return nil, fmt.Errorf( + "Error reading config for output %s: %s", + n, + err) + } + + // If we have depends fields, then add those in + var dependsOn []string + if o := listVal.Filter("depends_on"); len(o.Items) > 0 { + err := hcl.DecodeObject(&dependsOn, o.Items[0].Val) + if err != nil { + return nil, fmt.Errorf( + "Error reading depends_on for output %q: %s", + n, + err) + } + } + + result = append(result, &Output{ + Name: n, + RawConfig: rawConfig, + DependsOn: dependsOn, + }) + } + + return result, nil +} + +// LoadVariablesHcl recurses into the given HCL object and turns +// it into a list of variables. +func loadVariablesHcl(list *ast.ObjectList) ([]*Variable, error) { + if err := assertAllBlocksHaveNames("variable", list); err != nil { + return nil, err + } + + list = list.Children() + + // hclVariable is the structure each variable is decoded into + type hclVariable struct { + DeclaredType string `hcl:"type"` + Default interface{} + Description string + Fields []string `hcl:",decodedFields"` + } + + // Go through each object and turn it into an actual result. + result := make([]*Variable, 0, len(list.Items)) + for _, item := range list.Items { + // Clean up items from JSON + unwrapHCLObjectKeysFromJSON(item, 1) + + // Verify the keys + if len(item.Keys) != 1 { + return nil, fmt.Errorf( + "position %s: 'variable' must be followed by exactly one strings: a name", + item.Pos()) + } + + n := item.Keys[0].Token.Value().(string) + if !NameRegexp.MatchString(n) { + return nil, fmt.Errorf( + "position %s: 'variable' name must match regular expression: %s", + item.Pos(), NameRegexp) + } + + // Check for invalid keys + valid := []string{"type", "default", "description"} + if err := checkHCLKeys(item.Val, valid); err != nil { + return nil, multierror.Prefix(err, fmt.Sprintf( + "variable[%s]:", n)) + } + + // Decode into hclVariable to get typed values + var hclVar hclVariable + if err := hcl.DecodeObject(&hclVar, item.Val); err != nil { + return nil, err + } + + // Defaults turn into a slice of map[string]interface{} and + // we need to make sure to convert that down into the + // proper type for Config. + if ms, ok := hclVar.Default.([]map[string]interface{}); ok { + def := make(map[string]interface{}) + for _, m := range ms { + for k, v := range m { + def[k] = v + } + } + + hclVar.Default = def + } + + // Build the new variable and do some basic validation + newVar := &Variable{ + Name: n, + DeclaredType: hclVar.DeclaredType, + Default: hclVar.Default, + Description: hclVar.Description, + } + if err := newVar.ValidateTypeAndDefault(); err != nil { + return nil, err + } + + result = append(result, newVar) + } + + return result, nil +} + +// LoadProvidersHcl recurses into the given HCL object and turns +// it into a mapping of provider configs. +func loadProvidersHcl(list *ast.ObjectList) ([]*ProviderConfig, error) { + if err := assertAllBlocksHaveNames("provider", list); err != nil { + return nil, err + } + + list = list.Children() + if len(list.Items) == 0 { + return nil, nil + } + + // Go through each object and turn it into an actual result. + result := make([]*ProviderConfig, 0, len(list.Items)) + for _, item := range list.Items { + n := item.Keys[0].Token.Value().(string) + + var listVal *ast.ObjectList + if ot, ok := item.Val.(*ast.ObjectType); ok { + listVal = ot.List + } else { + return nil, fmt.Errorf("module '%s': should be an object", n) + } + + var config map[string]interface{} + if err := hcl.DecodeObject(&config, item.Val); err != nil { + return nil, err + } + + delete(config, "alias") + + rawConfig, err := NewRawConfig(config) + if err != nil { + return nil, fmt.Errorf( + "Error reading config for provider config %s: %s", + n, + err) + } + + // If we have an alias field, then add those in + var alias string + if a := listVal.Filter("alias"); len(a.Items) > 0 { + err := hcl.DecodeObject(&alias, a.Items[0].Val) + if err != nil { + return nil, fmt.Errorf( + "Error reading alias for provider[%s]: %s", + n, + err) + } + } + + result = append(result, &ProviderConfig{ + Name: n, + Alias: alias, + RawConfig: rawConfig, + }) + } + + return result, nil +} + +// Given a handle to a HCL object, this recurses into the structure +// and pulls out a list of data sources. +// +// The resulting data sources may not be unique, but each one +// represents exactly one data definition in the HCL configuration. +// We leave it up to another pass to merge them together. +func loadDataResourcesHcl(list *ast.ObjectList) ([]*Resource, error) { + if err := assertAllBlocksHaveNames("data", list); err != nil { + return nil, err + } + + list = list.Children() + if len(list.Items) == 0 { + return nil, nil + } + + // Where all the results will go + var result []*Resource + + // Now go over all the types and their children in order to get + // all of the actual resources. + for _, item := range list.Items { + if len(item.Keys) != 2 { + return nil, fmt.Errorf( + "position %s: 'data' must be followed by exactly two strings: a type and a name", + item.Pos()) + } + + t := item.Keys[0].Token.Value().(string) + k := item.Keys[1].Token.Value().(string) + + var listVal *ast.ObjectList + if ot, ok := item.Val.(*ast.ObjectType); ok { + listVal = ot.List + } else { + return nil, fmt.Errorf("data sources %s[%s]: should be an object", t, k) + } + + var config map[string]interface{} + if err := hcl.DecodeObject(&config, item.Val); err != nil { + return nil, fmt.Errorf( + "Error reading config for %s[%s]: %s", + t, + k, + err) + } + + // Remove the fields we handle specially + delete(config, "depends_on") + delete(config, "provider") + delete(config, "count") + + rawConfig, err := NewRawConfig(config) + if err != nil { + return nil, fmt.Errorf( + "Error reading config for %s[%s]: %s", + t, + k, + err) + } + + // If we have a count, then figure it out + var count string = "1" + if o := listVal.Filter("count"); len(o.Items) > 0 { + err = hcl.DecodeObject(&count, o.Items[0].Val) + if err != nil { + return nil, fmt.Errorf( + "Error parsing count for %s[%s]: %s", + t, + k, + err) + } + } + countConfig, err := NewRawConfig(map[string]interface{}{ + "count": count, + }) + if err != nil { + return nil, err + } + countConfig.Key = "count" + + // If we have depends fields, then add those in + var dependsOn []string + if o := listVal.Filter("depends_on"); len(o.Items) > 0 { + err := hcl.DecodeObject(&dependsOn, o.Items[0].Val) + if err != nil { + return nil, fmt.Errorf( + "Error reading depends_on for %s[%s]: %s", + t, + k, + err) + } + } + + // If we have a provider, then parse it out + var provider string + if o := listVal.Filter("provider"); len(o.Items) > 0 { + err := hcl.DecodeObject(&provider, o.Items[0].Val) + if err != nil { + return nil, fmt.Errorf( + "Error reading provider for %s[%s]: %s", + t, + k, + err) + } + } + + result = append(result, &Resource{ + Mode: DataResourceMode, + Name: k, + Type: t, + RawCount: countConfig, + RawConfig: rawConfig, + Provider: provider, + Provisioners: []*Provisioner{}, + DependsOn: dependsOn, + Lifecycle: ResourceLifecycle{}, + }) + } + + return result, nil +} + +// Given a handle to a HCL object, this recurses into the structure +// and pulls out a list of managed resources. +// +// The resulting resources may not be unique, but each resource +// represents exactly one "resource" block in the HCL configuration. +// We leave it up to another pass to merge them together. +func loadManagedResourcesHcl(list *ast.ObjectList) ([]*Resource, error) { + list = list.Children() + if len(list.Items) == 0 { + return nil, nil + } + + // Where all the results will go + var result []*Resource + + // Now go over all the types and their children in order to get + // all of the actual resources. + for _, item := range list.Items { + // GH-4385: We detect a pure provisioner resource and give the user + // an error about how to do it cleanly. + if len(item.Keys) == 4 && item.Keys[2].Token.Value().(string) == "provisioner" { + return nil, fmt.Errorf( + "position %s: provisioners in a resource should be wrapped in a list\n\n"+ + "Example: \"provisioner\": [ { \"local-exec\": ... } ]", + item.Pos()) + } + + // Fix up JSON input + unwrapHCLObjectKeysFromJSON(item, 2) + + if len(item.Keys) != 2 { + return nil, fmt.Errorf( + "position %s: resource must be followed by exactly two strings, a type and a name", + item.Pos()) + } + + t := item.Keys[0].Token.Value().(string) + k := item.Keys[1].Token.Value().(string) + + var listVal *ast.ObjectList + if ot, ok := item.Val.(*ast.ObjectType); ok { + listVal = ot.List + } else { + return nil, fmt.Errorf("resources %s[%s]: should be an object", t, k) + } + + var config map[string]interface{} + if err := hcl.DecodeObject(&config, item.Val); err != nil { + return nil, fmt.Errorf( + "Error reading config for %s[%s]: %s", + t, + k, + err) + } + + // Remove the fields we handle specially + delete(config, "connection") + delete(config, "count") + delete(config, "depends_on") + delete(config, "provisioner") + delete(config, "provider") + delete(config, "lifecycle") + + rawConfig, err := NewRawConfig(config) + if err != nil { + return nil, fmt.Errorf( + "Error reading config for %s[%s]: %s", + t, + k, + err) + } + + // If we have a count, then figure it out + var count string = "1" + if o := listVal.Filter("count"); len(o.Items) > 0 { + err = hcl.DecodeObject(&count, o.Items[0].Val) + if err != nil { + return nil, fmt.Errorf( + "Error parsing count for %s[%s]: %s", + t, + k, + err) + } + } + countConfig, err := NewRawConfig(map[string]interface{}{ + "count": count, + }) + if err != nil { + return nil, err + } + countConfig.Key = "count" + + // If we have depends fields, then add those in + var dependsOn []string + if o := listVal.Filter("depends_on"); len(o.Items) > 0 { + err := hcl.DecodeObject(&dependsOn, o.Items[0].Val) + if err != nil { + return nil, fmt.Errorf( + "Error reading depends_on for %s[%s]: %s", + t, + k, + err) + } + } + + // If we have connection info, then parse those out + var connInfo map[string]interface{} + if o := listVal.Filter("connection"); len(o.Items) > 0 { + err := hcl.DecodeObject(&connInfo, o.Items[0].Val) + if err != nil { + return nil, fmt.Errorf( + "Error reading connection info for %s[%s]: %s", + t, + k, + err) + } + } + + // If we have provisioners, then parse those out + var provisioners []*Provisioner + if os := listVal.Filter("provisioner"); len(os.Items) > 0 { + var err error + provisioners, err = loadProvisionersHcl(os, connInfo) + if err != nil { + return nil, fmt.Errorf( + "Error reading provisioners for %s[%s]: %s", + t, + k, + err) + } + } + + // If we have a provider, then parse it out + var provider string + if o := listVal.Filter("provider"); len(o.Items) > 0 { + err := hcl.DecodeObject(&provider, o.Items[0].Val) + if err != nil { + return nil, fmt.Errorf( + "Error reading provider for %s[%s]: %s", + t, + k, + err) + } + } + + // Check if the resource should be re-created before + // destroying the existing instance + var lifecycle ResourceLifecycle + if o := listVal.Filter("lifecycle"); len(o.Items) > 0 { + if len(o.Items) > 1 { + return nil, fmt.Errorf( + "%s[%s]: Multiple lifecycle blocks found, expected one", + t, k) + } + + // Check for invalid keys + valid := []string{"create_before_destroy", "ignore_changes", "prevent_destroy"} + if err := checkHCLKeys(o.Items[0].Val, valid); err != nil { + return nil, multierror.Prefix(err, fmt.Sprintf( + "%s[%s]:", t, k)) + } + + var raw map[string]interface{} + if err = hcl.DecodeObject(&raw, o.Items[0].Val); err != nil { + return nil, fmt.Errorf( + "Error parsing lifecycle for %s[%s]: %s", + t, + k, + err) + } + + if err := mapstructure.WeakDecode(raw, &lifecycle); err != nil { + return nil, fmt.Errorf( + "Error parsing lifecycle for %s[%s]: %s", + t, + k, + err) + } + } + + result = append(result, &Resource{ + Mode: ManagedResourceMode, + Name: k, + Type: t, + RawCount: countConfig, + RawConfig: rawConfig, + Provisioners: provisioners, + Provider: provider, + DependsOn: dependsOn, + Lifecycle: lifecycle, + }) + } + + return result, nil +} + +func loadProvisionersHcl(list *ast.ObjectList, connInfo map[string]interface{}) ([]*Provisioner, error) { + if err := assertAllBlocksHaveNames("provisioner", list); err != nil { + return nil, err + } + + list = list.Children() + if len(list.Items) == 0 { + return nil, nil + } + + // Go through each object and turn it into an actual result. + result := make([]*Provisioner, 0, len(list.Items)) + for _, item := range list.Items { + n := item.Keys[0].Token.Value().(string) + + var listVal *ast.ObjectList + if ot, ok := item.Val.(*ast.ObjectType); ok { + listVal = ot.List + } else { + return nil, fmt.Errorf("provisioner '%s': should be an object", n) + } + + var config map[string]interface{} + if err := hcl.DecodeObject(&config, item.Val); err != nil { + return nil, err + } + + // Parse the "when" value + when := ProvisionerWhenCreate + if v, ok := config["when"]; ok { + switch v { + case "create": + when = ProvisionerWhenCreate + case "destroy": + when = ProvisionerWhenDestroy + default: + return nil, fmt.Errorf( + "position %s: 'provisioner' when must be 'create' or 'destroy'", + item.Pos()) + } + } + + // Parse the "on_failure" value + onFailure := ProvisionerOnFailureFail + if v, ok := config["on_failure"]; ok { + switch v { + case "continue": + onFailure = ProvisionerOnFailureContinue + case "fail": + onFailure = ProvisionerOnFailureFail + default: + return nil, fmt.Errorf( + "position %s: 'provisioner' on_failure must be 'continue' or 'fail'", + item.Pos()) + } + } + + // Delete fields we special case + delete(config, "connection") + delete(config, "when") + delete(config, "on_failure") + + rawConfig, err := NewRawConfig(config) + if err != nil { + return nil, err + } + + // Check if we have a provisioner-level connection + // block that overrides the resource-level + var subConnInfo map[string]interface{} + if o := listVal.Filter("connection"); len(o.Items) > 0 { + err := hcl.DecodeObject(&subConnInfo, o.Items[0].Val) + if err != nil { + return nil, err + } + } + + // Inherit from the resource connInfo any keys + // that are not explicitly overriden. + if connInfo != nil && subConnInfo != nil { + for k, v := range connInfo { + if _, ok := subConnInfo[k]; !ok { + subConnInfo[k] = v + } + } + } else if subConnInfo == nil { + subConnInfo = connInfo + } + + // Parse the connInfo + connRaw, err := NewRawConfig(subConnInfo) + if err != nil { + return nil, err + } + + result = append(result, &Provisioner{ + Type: n, + RawConfig: rawConfig, + ConnInfo: connRaw, + When: when, + OnFailure: onFailure, + }) + } + + return result, nil +} + +/* +func hclObjectMap(os *hclobj.Object) map[string]ast.ListNode { + objects := make(map[string][]*hclobj.Object) + + for _, o := range os.Elem(false) { + for _, elem := range o.Elem(true) { + val, ok := objects[elem.Key] + if !ok { + val = make([]*hclobj.Object, 0, 1) + } + + val = append(val, elem) + objects[elem.Key] = val + } + } + + return objects +} +*/ + +// assertAllBlocksHaveNames returns an error if any of the items in +// the given object list are blocks without keys (like "module {}") +// or simple assignments (like "module = 1"). It returns nil if +// neither of these things are true. +// +// The given name is used in any generated error messages, and should +// be the name of the block we're dealing with. The given list should +// be the result of calling .Filter on an object list with that same +// name. +func assertAllBlocksHaveNames(name string, list *ast.ObjectList) error { + if elem := list.Elem(); len(elem.Items) != 0 { + switch et := elem.Items[0].Val.(type) { + case *ast.ObjectType: + pos := et.Lbrace + return fmt.Errorf("%s: %q must be followed by a name", pos, name) + default: + pos := elem.Items[0].Val.Pos() + return fmt.Errorf("%s: %q must be a configuration block", pos, name) + } + } + return nil +} + +func checkHCLKeys(node ast.Node, valid []string) error { + var list *ast.ObjectList + switch n := node.(type) { + case *ast.ObjectList: + list = n + case *ast.ObjectType: + list = n.List + default: + return fmt.Errorf("cannot check HCL keys of type %T", n) + } + + validMap := make(map[string]struct{}, len(valid)) + for _, v := range valid { + validMap[v] = struct{}{} + } + + var result error + for _, item := range list.Items { + key := item.Keys[0].Token.Value().(string) + if _, ok := validMap[key]; !ok { + result = multierror.Append(result, fmt.Errorf( + "invalid key: %s", key)) + } + } + + return result +} + +// unwrapHCLObjectKeysFromJSON cleans up an edge case that can occur when +// parsing JSON as input: if we're parsing JSON then directly nested +// items will show up as additional "keys". +// +// For objects that expect a fixed number of keys, this breaks the +// decoding process. This function unwraps the object into what it would've +// looked like if it came directly from HCL by specifying the number of keys +// you expect. +// +// Example: +// +// { "foo": { "baz": {} } } +// +// Will show up with Keys being: []string{"foo", "baz"} +// when we really just want the first two. This function will fix this. +func unwrapHCLObjectKeysFromJSON(item *ast.ObjectItem, depth int) { + if len(item.Keys) > depth && item.Keys[0].Token.JSON { + for len(item.Keys) > depth { + // Pop off the last key + n := len(item.Keys) + key := item.Keys[n-1] + item.Keys[n-1] = nil + item.Keys = item.Keys[:n-1] + + // Wrap our value in a list + item.Val = &ast.ObjectType{ + List: &ast.ObjectList{ + Items: []*ast.ObjectItem{ + &ast.ObjectItem{ + Keys: []*ast.ObjectKey{key}, + Val: item.Val, + }, + }, + }, + } + } + } +} diff --git a/vendor/github.com/hashicorp/terraform/config/merge.go b/vendor/github.com/hashicorp/terraform/config/merge.go new file mode 100644 index 0000000000..db214be456 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/config/merge.go @@ -0,0 +1,193 @@ +package config + +// Merge merges two configurations into a single configuration. +// +// Merge allows for the two configurations to have duplicate resources, +// because the resources will be merged. This differs from a single +// Config which must only have unique resources. +func Merge(c1, c2 *Config) (*Config, error) { + c := new(Config) + + // Merge unknown keys + unknowns := make(map[string]struct{}) + for _, k := range c1.unknownKeys { + _, present := unknowns[k] + if !present { + unknowns[k] = struct{}{} + c.unknownKeys = append(c.unknownKeys, k) + } + } + for _, k := range c2.unknownKeys { + _, present := unknowns[k] + if !present { + unknowns[k] = struct{}{} + c.unknownKeys = append(c.unknownKeys, k) + } + } + + // Merge Atlas configuration. This is a dumb one overrides the other + // sort of merge. + c.Atlas = c1.Atlas + if c2.Atlas != nil { + c.Atlas = c2.Atlas + } + + // Merge the Terraform configuration + if c1.Terraform != nil { + c.Terraform = c1.Terraform + if c2.Terraform != nil { + c.Terraform.Merge(c2.Terraform) + } + } else { + c.Terraform = c2.Terraform + } + + // NOTE: Everything below is pretty gross. Due to the lack of generics + // in Go, there is some hoop-jumping involved to make this merging a + // little more test-friendly and less repetitive. Ironically, making it + // less repetitive involves being a little repetitive, but I prefer to + // be repetitive with things that are less error prone than things that + // are more error prone (more logic). Type conversions to an interface + // are pretty low-error. + + var m1, m2, mresult []merger + + // Modules + m1 = make([]merger, 0, len(c1.Modules)) + m2 = make([]merger, 0, len(c2.Modules)) + for _, v := range c1.Modules { + m1 = append(m1, v) + } + for _, v := range c2.Modules { + m2 = append(m2, v) + } + mresult = mergeSlice(m1, m2) + if len(mresult) > 0 { + c.Modules = make([]*Module, len(mresult)) + for i, v := range mresult { + c.Modules[i] = v.(*Module) + } + } + + // Outputs + m1 = make([]merger, 0, len(c1.Outputs)) + m2 = make([]merger, 0, len(c2.Outputs)) + for _, v := range c1.Outputs { + m1 = append(m1, v) + } + for _, v := range c2.Outputs { + m2 = append(m2, v) + } + mresult = mergeSlice(m1, m2) + if len(mresult) > 0 { + c.Outputs = make([]*Output, len(mresult)) + for i, v := range mresult { + c.Outputs[i] = v.(*Output) + } + } + + // Provider Configs + m1 = make([]merger, 0, len(c1.ProviderConfigs)) + m2 = make([]merger, 0, len(c2.ProviderConfigs)) + for _, v := range c1.ProviderConfigs { + m1 = append(m1, v) + } + for _, v := range c2.ProviderConfigs { + m2 = append(m2, v) + } + mresult = mergeSlice(m1, m2) + if len(mresult) > 0 { + c.ProviderConfigs = make([]*ProviderConfig, len(mresult)) + for i, v := range mresult { + c.ProviderConfigs[i] = v.(*ProviderConfig) + } + } + + // Resources + m1 = make([]merger, 0, len(c1.Resources)) + m2 = make([]merger, 0, len(c2.Resources)) + for _, v := range c1.Resources { + m1 = append(m1, v) + } + for _, v := range c2.Resources { + m2 = append(m2, v) + } + mresult = mergeSlice(m1, m2) + if len(mresult) > 0 { + c.Resources = make([]*Resource, len(mresult)) + for i, v := range mresult { + c.Resources[i] = v.(*Resource) + } + } + + // Variables + m1 = make([]merger, 0, len(c1.Variables)) + m2 = make([]merger, 0, len(c2.Variables)) + for _, v := range c1.Variables { + m1 = append(m1, v) + } + for _, v := range c2.Variables { + m2 = append(m2, v) + } + mresult = mergeSlice(m1, m2) + if len(mresult) > 0 { + c.Variables = make([]*Variable, len(mresult)) + for i, v := range mresult { + c.Variables[i] = v.(*Variable) + } + } + + return c, nil +} + +// merger is an interface that must be implemented by types that are +// merge-able. This simplifies the implementation of Merge for the various +// components of a Config. +type merger interface { + mergerName() string + mergerMerge(merger) merger +} + +// mergeSlice merges a slice of mergers. +func mergeSlice(m1, m2 []merger) []merger { + r := make([]merger, len(m1), len(m1)+len(m2)) + copy(r, m1) + + m := map[string]struct{}{} + for _, v2 := range m2 { + // If we already saw it, just append it because its a + // duplicate and invalid... + name := v2.mergerName() + if _, ok := m[name]; ok { + r = append(r, v2) + continue + } + m[name] = struct{}{} + + // Find an original to override + var original merger + originalIndex := -1 + for i, v := range m1 { + if v.mergerName() == name { + originalIndex = i + original = v + break + } + } + + var v merger + if original == nil { + v = v2 + } else { + v = original.mergerMerge(v2) + } + + if originalIndex == -1 { + r = append(r, v) + } else { + r[originalIndex] = v + } + } + + return r +} diff --git a/vendor/github.com/hashicorp/terraform/config/module/copy_dir.go b/vendor/github.com/hashicorp/terraform/config/module/copy_dir.go new file mode 100644 index 0000000000..095f61d853 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/config/module/copy_dir.go @@ -0,0 +1,114 @@ +package module + +import ( + "io" + "os" + "path/filepath" + "strings" +) + +// copyDir copies the src directory contents into dst. Both directories +// should already exist. +func copyDir(dst, src string) error { + src, err := filepath.EvalSymlinks(src) + if err != nil { + return err + } + + walkFn := func(path string, info os.FileInfo, err error) error { + if err != nil { + return err + } + + if path == src { + return nil + } + + if strings.HasPrefix(filepath.Base(path), ".") { + // Skip any dot files + if info.IsDir() { + return filepath.SkipDir + } else { + return nil + } + } + + // The "path" has the src prefixed to it. We need to join our + // destination with the path without the src on it. + dstPath := filepath.Join(dst, path[len(src):]) + + // we don't want to try and copy the same file over itself. + if eq, err := sameFile(path, dstPath); eq { + return nil + } else if err != nil { + return err + } + + // If we have a directory, make that subdirectory, then continue + // the walk. + if info.IsDir() { + if path == filepath.Join(src, dst) { + // dst is in src; don't walk it. + return nil + } + + if err := os.MkdirAll(dstPath, 0755); err != nil { + return err + } + + return nil + } + + // If we have a file, copy the contents. + srcF, err := os.Open(path) + if err != nil { + return err + } + defer srcF.Close() + + dstF, err := os.Create(dstPath) + if err != nil { + return err + } + defer dstF.Close() + + if _, err := io.Copy(dstF, srcF); err != nil { + return err + } + + // Chmod it + return os.Chmod(dstPath, info.Mode()) + } + + return filepath.Walk(src, walkFn) +} + +// sameFile tried to determine if to paths are the same file. +// If the paths don't match, we lookup the inode on supported systems. +func sameFile(a, b string) (bool, error) { + if a == b { + return true, nil + } + + aIno, err := inode(a) + if err != nil { + if os.IsNotExist(err) { + return false, nil + } + return false, err + } + + bIno, err := inode(b) + if err != nil { + if os.IsNotExist(err) { + return false, nil + } + return false, err + } + + if aIno > 0 && aIno == bIno { + return true, nil + } + + return false, nil +} diff --git a/vendor/github.com/hashicorp/terraform/config/module/get.go b/vendor/github.com/hashicorp/terraform/config/module/get.go new file mode 100644 index 0000000000..96b4a63c3d --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/config/module/get.go @@ -0,0 +1,71 @@ +package module + +import ( + "io/ioutil" + "os" + + "github.com/hashicorp/go-getter" +) + +// GetMode is an enum that describes how modules are loaded. +// +// GetModeLoad says that modules will not be downloaded or updated, they will +// only be loaded from the storage. +// +// GetModeGet says that modules can be initially downloaded if they don't +// exist, but otherwise to just load from the current version in storage. +// +// GetModeUpdate says that modules should be checked for updates and +// downloaded prior to loading. If there are no updates, we load the version +// from disk, otherwise we download first and then load. +type GetMode byte + +const ( + GetModeNone GetMode = iota + GetModeGet + GetModeUpdate +) + +// GetCopy is the same as Get except that it downloads a copy of the +// module represented by source. +// +// This copy will omit and dot-prefixed files (such as .git/, .hg/) and +// can't be updated on its own. +func GetCopy(dst, src string) error { + // Create the temporary directory to do the real Get to + tmpDir, err := ioutil.TempDir("", "tf") + if err != nil { + return err + } + // FIXME: This isn't completely safe. Creating and removing our temp path + // exposes where to race to inject files. + if err := os.RemoveAll(tmpDir); err != nil { + return err + } + defer os.RemoveAll(tmpDir) + + // Get to that temporary dir + if err := getter.Get(tmpDir, src); err != nil { + return err + } + + // Make sure the destination exists + if err := os.MkdirAll(dst, 0755); err != nil { + return err + } + + // Copy to the final location + return copyDir(dst, tmpDir) +} + +func getStorage(s getter.Storage, key string, src string, mode GetMode) (string, bool, error) { + // Get the module with the level specified if we were told to. + if mode > GetModeNone { + if err := s.Get(key, src, mode == GetModeUpdate); err != nil { + return "", false, err + } + } + + // Get the directory where the module is. + return s.Dir(key) +} diff --git a/vendor/github.com/hashicorp/terraform/config/module/inode.go b/vendor/github.com/hashicorp/terraform/config/module/inode.go new file mode 100644 index 0000000000..8603ee268e --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/config/module/inode.go @@ -0,0 +1,21 @@ +// +build linux darwin openbsd netbsd solaris + +package module + +import ( + "fmt" + "os" + "syscall" +) + +// lookup the inode of a file on posix systems +func inode(path string) (uint64, error) { + stat, err := os.Stat(path) + if err != nil { + return 0, err + } + if st, ok := stat.Sys().(*syscall.Stat_t); ok { + return st.Ino, nil + } + return 0, fmt.Errorf("could not determine file inode") +} diff --git a/vendor/github.com/hashicorp/terraform/config/module/inode_freebsd.go b/vendor/github.com/hashicorp/terraform/config/module/inode_freebsd.go new file mode 100644 index 0000000000..0d95730d9d --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/config/module/inode_freebsd.go @@ -0,0 +1,21 @@ +// +build freebsd + +package module + +import ( + "fmt" + "os" + "syscall" +) + +// lookup the inode of a file on posix systems +func inode(path string) (uint64, error) { + stat, err := os.Stat(path) + if err != nil { + return 0, err + } + if st, ok := stat.Sys().(*syscall.Stat_t); ok { + return uint64(st.Ino), nil + } + return 0, fmt.Errorf("could not determine file inode") +} diff --git a/vendor/github.com/hashicorp/terraform/config/module/inode_windows.go b/vendor/github.com/hashicorp/terraform/config/module/inode_windows.go new file mode 100644 index 0000000000..c0cf455385 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/config/module/inode_windows.go @@ -0,0 +1,8 @@ +// +build windows + +package module + +// no syscall.Stat_t on windows, return 0 for inodes +func inode(path string) (uint64, error) { + return 0, nil +} diff --git a/vendor/github.com/hashicorp/terraform/config/module/module.go b/vendor/github.com/hashicorp/terraform/config/module/module.go new file mode 100644 index 0000000000..f8649f6e9d --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/config/module/module.go @@ -0,0 +1,7 @@ +package module + +// Module represents the metadata for a single module. +type Module struct { + Name string + Source string +} diff --git a/vendor/github.com/hashicorp/terraform/config/module/testing.go b/vendor/github.com/hashicorp/terraform/config/module/testing.go new file mode 100644 index 0000000000..fc9e7331af --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/config/module/testing.go @@ -0,0 +1,38 @@ +package module + +import ( + "io/ioutil" + "os" + "testing" + + "github.com/hashicorp/go-getter" +) + +// TestTree loads a module at the given path and returns the tree as well +// as a function that should be deferred to clean up resources. +func TestTree(t *testing.T, path string) (*Tree, func()) { + // Create a temporary directory for module storage + dir, err := ioutil.TempDir("", "tf") + if err != nil { + t.Fatalf("err: %s", err) + return nil, nil + } + + // Load the module + mod, err := NewTreeModule("", path) + if err != nil { + t.Fatalf("err: %s", err) + return nil, nil + } + + // Get the child modules + s := &getter.FolderStorage{StorageDir: dir} + if err := mod.Load(s, GetModeGet); err != nil { + t.Fatalf("err: %s", err) + return nil, nil + } + + return mod, func() { + os.RemoveAll(dir) + } +} diff --git a/vendor/github.com/hashicorp/terraform/config/module/tree.go b/vendor/github.com/hashicorp/terraform/config/module/tree.go new file mode 100644 index 0000000000..b6f90fd930 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/config/module/tree.go @@ -0,0 +1,428 @@ +package module + +import ( + "bufio" + "bytes" + "fmt" + "path/filepath" + "strings" + "sync" + + "github.com/hashicorp/go-getter" + "github.com/hashicorp/terraform/config" +) + +// RootName is the name of the root tree. +const RootName = "root" + +// Tree represents the module import tree of configurations. +// +// This Tree structure can be used to get (download) new modules, load +// all the modules without getting, flatten the tree into something +// Terraform can use, etc. +type Tree struct { + name string + config *config.Config + children map[string]*Tree + path []string + lock sync.RWMutex +} + +// NewTree returns a new Tree for the given config structure. +func NewTree(name string, c *config.Config) *Tree { + return &Tree{config: c, name: name} +} + +// NewEmptyTree returns a new tree that is empty (contains no configuration). +func NewEmptyTree() *Tree { + t := &Tree{config: &config.Config{}} + + // We do this dummy load so that the tree is marked as "loaded". It + // should never fail because this is just about a no-op. If it does fail + // we panic so we can know its a bug. + if err := t.Load(nil, GetModeGet); err != nil { + panic(err) + } + + return t +} + +// NewTreeModule is like NewTree except it parses the configuration in +// the directory and gives it a specific name. Use a blank name "" to specify +// the root module. +func NewTreeModule(name, dir string) (*Tree, error) { + c, err := config.LoadDir(dir) + if err != nil { + return nil, err + } + + return NewTree(name, c), nil +} + +// Config returns the configuration for this module. +func (t *Tree) Config() *config.Config { + return t.config +} + +// Child returns the child with the given path (by name). +func (t *Tree) Child(path []string) *Tree { + if t == nil { + return nil + } + + if len(path) == 0 { + return t + } + + c := t.Children()[path[0]] + if c == nil { + return nil + } + + return c.Child(path[1:]) +} + +// Children returns the children of this tree (the modules that are +// imported by this root). +// +// This will only return a non-nil value after Load is called. +func (t *Tree) Children() map[string]*Tree { + t.lock.RLock() + defer t.lock.RUnlock() + return t.children +} + +// Loaded says whether or not this tree has been loaded or not yet. +func (t *Tree) Loaded() bool { + t.lock.RLock() + defer t.lock.RUnlock() + return t.children != nil +} + +// Modules returns the list of modules that this tree imports. +// +// This is only the imports of _this_ level of the tree. To retrieve the +// full nested imports, you'll have to traverse the tree. +func (t *Tree) Modules() []*Module { + result := make([]*Module, len(t.config.Modules)) + for i, m := range t.config.Modules { + result[i] = &Module{ + Name: m.Name, + Source: m.Source, + } + } + + return result +} + +// Name returns the name of the tree. This will be "" for the root +// tree and then the module name given for any children. +func (t *Tree) Name() string { + if t.name == "" { + return RootName + } + + return t.name +} + +// Load loads the configuration of the entire tree. +// +// The parameters are used to tell the tree where to find modules and +// whether it can download/update modules along the way. +// +// Calling this multiple times will reload the tree. +// +// Various semantic-like checks are made along the way of loading since +// module trees inherently require the configuration to be in a reasonably +// sane state: no circular dependencies, proper module sources, etc. A full +// suite of validations can be done by running Validate (after loading). +func (t *Tree) Load(s getter.Storage, mode GetMode) error { + t.lock.Lock() + defer t.lock.Unlock() + + // Reset the children if we have any + t.children = nil + + modules := t.Modules() + children := make(map[string]*Tree) + + // Go through all the modules and get the directory for them. + for _, m := range modules { + if _, ok := children[m.Name]; ok { + return fmt.Errorf( + "module %s: duplicated. module names must be unique", m.Name) + } + + // Determine the path to this child + path := make([]string, len(t.path), len(t.path)+1) + copy(path, t.path) + path = append(path, m.Name) + + // Split out the subdir if we have one + source, subDir := getter.SourceDirSubdir(m.Source) + + source, err := getter.Detect(source, t.config.Dir, getter.Detectors) + if err != nil { + return fmt.Errorf("module %s: %s", m.Name, err) + } + + // Check if the detector introduced something new. + source, subDir2 := getter.SourceDirSubdir(source) + if subDir2 != "" { + subDir = filepath.Join(subDir2, subDir) + } + + // Get the directory where this module is so we can load it + key := strings.Join(path, ".") + key = fmt.Sprintf("root.%s-%s", key, m.Source) + dir, ok, err := getStorage(s, key, source, mode) + if err != nil { + return err + } + if !ok { + return fmt.Errorf( + "module %s: not found, may need to be downloaded using 'terraform get'", m.Name) + } + + // If we have a subdirectory, then merge that in + if subDir != "" { + dir = filepath.Join(dir, subDir) + } + + // Load the configurations.Dir(source) + children[m.Name], err = NewTreeModule(m.Name, dir) + if err != nil { + return fmt.Errorf( + "module %s: %s", m.Name, err) + } + + // Set the path of this child + children[m.Name].path = path + } + + // Go through all the children and load them. + for _, c := range children { + if err := c.Load(s, mode); err != nil { + return err + } + } + + // Set our tree up + t.children = children + + return nil +} + +// Path is the full path to this tree. +func (t *Tree) Path() []string { + return t.path +} + +// String gives a nice output to describe the tree. +func (t *Tree) String() string { + var result bytes.Buffer + path := strings.Join(t.path, ", ") + if path != "" { + path = fmt.Sprintf(" (path: %s)", path) + } + result.WriteString(t.Name() + path + "\n") + + cs := t.Children() + if cs == nil { + result.WriteString(" not loaded") + } else { + // Go through each child and get its string value, then indent it + // by two. + for _, c := range cs { + r := strings.NewReader(c.String()) + scanner := bufio.NewScanner(r) + for scanner.Scan() { + result.WriteString(" ") + result.WriteString(scanner.Text()) + result.WriteString("\n") + } + } + } + + return result.String() +} + +// Validate does semantic checks on the entire tree of configurations. +// +// This will call the respective config.Config.Validate() functions as well +// as verifying things such as parameters/outputs between the various modules. +// +// Load must be called prior to calling Validate or an error will be returned. +func (t *Tree) Validate() error { + if !t.Loaded() { + return fmt.Errorf("tree must be loaded before calling Validate") + } + + // If something goes wrong, here is our error template + newErr := &treeError{Name: []string{t.Name()}} + + // Terraform core does not handle root module children named "root". + // We plan to fix this in the future but this bug was brought up in + // the middle of a release and we don't want to introduce wide-sweeping + // changes at that time. + if len(t.path) == 1 && t.name == "root" { + return fmt.Errorf("root module cannot contain module named 'root'") + } + + // Validate our configuration first. + if err := t.config.Validate(); err != nil { + newErr.Add(err) + } + + // If we're the root, we do extra validation. This validation usually + // requires the entire tree (since children don't have parent pointers). + if len(t.path) == 0 { + if err := t.validateProviderAlias(); err != nil { + newErr.Add(err) + } + } + + // Get the child trees + children := t.Children() + + // Validate all our children + for _, c := range children { + err := c.Validate() + if err == nil { + continue + } + + verr, ok := err.(*treeError) + if !ok { + // Unknown error, just return... + return err + } + + // Append ourselves to the error and then return + verr.Name = append(verr.Name, t.Name()) + newErr.AddChild(verr) + } + + // Go over all the modules and verify that any parameters are valid + // variables into the module in question. + for _, m := range t.config.Modules { + tree, ok := children[m.Name] + if !ok { + // This should never happen because Load watches us + panic("module not found in children: " + m.Name) + } + + // Build the variables that the module defines + requiredMap := make(map[string]struct{}) + varMap := make(map[string]struct{}) + for _, v := range tree.config.Variables { + varMap[v.Name] = struct{}{} + + if v.Required() { + requiredMap[v.Name] = struct{}{} + } + } + + // Compare to the keys in our raw config for the module + for k, _ := range m.RawConfig.Raw { + if _, ok := varMap[k]; !ok { + newErr.Add(fmt.Errorf( + "module %s: %s is not a valid parameter", + m.Name, k)) + } + + // Remove the required + delete(requiredMap, k) + } + + // If we have any required left over, they aren't set. + for k, _ := range requiredMap { + newErr.Add(fmt.Errorf( + "module %s: required variable %q not set", + m.Name, k)) + } + } + + // Go over all the variables used and make sure that any module + // variables represent outputs properly. + for source, vs := range t.config.InterpolatedVariables() { + for _, v := range vs { + mv, ok := v.(*config.ModuleVariable) + if !ok { + continue + } + + tree, ok := children[mv.Name] + if !ok { + newErr.Add(fmt.Errorf( + "%s: undefined module referenced %s", + source, mv.Name)) + continue + } + + found := false + for _, o := range tree.config.Outputs { + if o.Name == mv.Field { + found = true + break + } + } + if !found { + newErr.Add(fmt.Errorf( + "%s: %s is not a valid output for module %s", + source, mv.Field, mv.Name)) + } + } + } + + return newErr.ErrOrNil() +} + +// treeError is an error use by Tree.Validate to accumulates all +// validation errors. +type treeError struct { + Name []string + Errs []error + Children []*treeError +} + +func (e *treeError) Add(err error) { + e.Errs = append(e.Errs, err) +} + +func (e *treeError) AddChild(err *treeError) { + e.Children = append(e.Children, err) +} + +func (e *treeError) ErrOrNil() error { + if len(e.Errs) > 0 || len(e.Children) > 0 { + return e + } + return nil +} + +func (e *treeError) Error() string { + name := strings.Join(e.Name, ".") + var out bytes.Buffer + fmt.Fprintf(&out, "module %s: ", name) + + if len(e.Errs) == 1 { + // single like error + out.WriteString(e.Errs[0].Error()) + } else { + // multi-line error + for _, err := range e.Errs { + fmt.Fprintf(&out, "\n %s", err) + } + } + + if len(e.Children) > 0 { + // start the next error on a new line + out.WriteString("\n ") + } + for _, child := range e.Children { + out.WriteString(child.Error()) + } + + return out.String() +} diff --git a/vendor/github.com/hashicorp/terraform/config/module/tree_gob.go b/vendor/github.com/hashicorp/terraform/config/module/tree_gob.go new file mode 100644 index 0000000000..fcd37f4e71 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/config/module/tree_gob.go @@ -0,0 +1,57 @@ +package module + +import ( + "bytes" + "encoding/gob" + + "github.com/hashicorp/terraform/config" +) + +func (t *Tree) GobDecode(bs []byte) error { + t.lock.Lock() + defer t.lock.Unlock() + + // Decode the gob data + var data treeGob + dec := gob.NewDecoder(bytes.NewReader(bs)) + if err := dec.Decode(&data); err != nil { + return err + } + + // Set the fields + t.name = data.Name + t.config = data.Config + t.children = data.Children + t.path = data.Path + + return nil +} + +func (t *Tree) GobEncode() ([]byte, error) { + data := &treeGob{ + Config: t.config, + Children: t.children, + Name: t.name, + Path: t.path, + } + + var buf bytes.Buffer + enc := gob.NewEncoder(&buf) + if err := enc.Encode(data); err != nil { + return nil, err + } + + return buf.Bytes(), nil +} + +// treeGob is used as a structure to Gob encode a tree. +// +// This structure is private so it can't be referenced but the fields are +// public, allowing Gob to properly encode this. When we decode this, we are +// able to turn it into a Tree. +type treeGob struct { + Config *config.Config + Children map[string]*Tree + Name string + Path []string +} diff --git a/vendor/github.com/hashicorp/terraform/config/module/validate_provider_alias.go b/vendor/github.com/hashicorp/terraform/config/module/validate_provider_alias.go new file mode 100644 index 0000000000..090d4f7e39 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/config/module/validate_provider_alias.go @@ -0,0 +1,118 @@ +package module + +import ( + "fmt" + "strings" + + "github.com/hashicorp/go-multierror" + "github.com/hashicorp/terraform/dag" +) + +// validateProviderAlias validates that all provider alias references are +// defined at some point in the parent tree. This improves UX by catching +// alias typos at the slight cost of requiring a declaration of usage. This +// is usually a good tradeoff since not many aliases are used. +func (t *Tree) validateProviderAlias() error { + // If we're not the root, don't perform this validation. We must be the + // root since we require full tree visibilty. + if len(t.path) != 0 { + return nil + } + + // We'll use a graph to keep track of defined aliases at each level. + // As long as a parent defines an alias, it is okay. + var g dag.AcyclicGraph + t.buildProviderAliasGraph(&g, nil) + + // Go through the graph and check that the usage is all good. + var err error + for _, v := range g.Vertices() { + pv, ok := v.(*providerAliasVertex) + if !ok { + // This shouldn't happen, just ignore it. + continue + } + + // If we're not using any aliases, fast track and just continue + if len(pv.Used) == 0 { + continue + } + + // Grab the ancestors since we're going to have to check if our + // parents define any of our aliases. + var parents []*providerAliasVertex + ancestors, _ := g.Ancestors(v) + for _, raw := range ancestors.List() { + if pv, ok := raw.(*providerAliasVertex); ok { + parents = append(parents, pv) + } + } + for k, _ := range pv.Used { + // Check if we define this + if _, ok := pv.Defined[k]; ok { + continue + } + + // Check for a parent + found := false + for _, parent := range parents { + _, found = parent.Defined[k] + if found { + break + } + } + if found { + continue + } + + // We didn't find the alias, error! + err = multierror.Append(err, fmt.Errorf( + "module %s: provider alias must be defined by the module or a parent: %s", + strings.Join(pv.Path, "."), k)) + } + } + + return err +} + +func (t *Tree) buildProviderAliasGraph(g *dag.AcyclicGraph, parent dag.Vertex) { + // Add all our defined aliases + defined := make(map[string]struct{}) + for _, p := range t.config.ProviderConfigs { + defined[p.FullName()] = struct{}{} + } + + // Add all our used aliases + used := make(map[string]struct{}) + for _, r := range t.config.Resources { + if r.Provider != "" { + used[r.Provider] = struct{}{} + } + } + + // Add it to the graph + vertex := &providerAliasVertex{ + Path: t.Path(), + Defined: defined, + Used: used, + } + g.Add(vertex) + + // Connect to our parent if we have one + if parent != nil { + g.Connect(dag.BasicEdge(vertex, parent)) + } + + // Build all our children + for _, c := range t.Children() { + c.buildProviderAliasGraph(g, vertex) + } +} + +// providerAliasVertex is the vertex for the graph that keeps track of +// defined provider aliases. +type providerAliasVertex struct { + Path []string + Defined map[string]struct{} + Used map[string]struct{} +} diff --git a/vendor/github.com/hashicorp/terraform/config/provisioner_enums.go b/vendor/github.com/hashicorp/terraform/config/provisioner_enums.go new file mode 100644 index 0000000000..00fd43fce4 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/config/provisioner_enums.go @@ -0,0 +1,40 @@ +package config + +// ProvisionerWhen is an enum for valid values for when to run provisioners. +type ProvisionerWhen int + +const ( + ProvisionerWhenInvalid ProvisionerWhen = iota + ProvisionerWhenCreate + ProvisionerWhenDestroy +) + +var provisionerWhenStrs = map[ProvisionerWhen]string{ + ProvisionerWhenInvalid: "invalid", + ProvisionerWhenCreate: "create", + ProvisionerWhenDestroy: "destroy", +} + +func (v ProvisionerWhen) String() string { + return provisionerWhenStrs[v] +} + +// ProvisionerOnFailure is an enum for valid values for on_failure options +// for provisioners. +type ProvisionerOnFailure int + +const ( + ProvisionerOnFailureInvalid ProvisionerOnFailure = iota + ProvisionerOnFailureContinue + ProvisionerOnFailureFail +) + +var provisionerOnFailureStrs = map[ProvisionerOnFailure]string{ + ProvisionerOnFailureInvalid: "invalid", + ProvisionerOnFailureContinue: "continue", + ProvisionerOnFailureFail: "fail", +} + +func (v ProvisionerOnFailure) String() string { + return provisionerOnFailureStrs[v] +} diff --git a/vendor/github.com/hashicorp/terraform/config/raw_config.go b/vendor/github.com/hashicorp/terraform/config/raw_config.go new file mode 100644 index 0000000000..f8498d85ce --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/config/raw_config.go @@ -0,0 +1,335 @@ +package config + +import ( + "bytes" + "encoding/gob" + "sync" + + "github.com/hashicorp/hil" + "github.com/hashicorp/hil/ast" + "github.com/mitchellh/copystructure" + "github.com/mitchellh/reflectwalk" +) + +// UnknownVariableValue is a sentinel value that can be used +// to denote that the value of a variable is unknown at this time. +// RawConfig uses this information to build up data about +// unknown keys. +const UnknownVariableValue = "74D93920-ED26-11E3-AC10-0800200C9A66" + +// RawConfig is a structure that holds a piece of configuration +// where the overall structure is unknown since it will be used +// to configure a plugin or some other similar external component. +// +// RawConfigs can be interpolated with variables that come from +// other resources, user variables, etc. +// +// RawConfig supports a query-like interface to request +// information from deep within the structure. +type RawConfig struct { + Key string + Raw map[string]interface{} + Interpolations []ast.Node + Variables map[string]InterpolatedVariable + + lock sync.Mutex + config map[string]interface{} + unknownKeys []string +} + +// NewRawConfig creates a new RawConfig structure and populates the +// publicly readable struct fields. +func NewRawConfig(raw map[string]interface{}) (*RawConfig, error) { + result := &RawConfig{Raw: raw} + if err := result.init(); err != nil { + return nil, err + } + + return result, nil +} + +// RawMap returns a copy of the RawConfig.Raw map. +func (r *RawConfig) RawMap() map[string]interface{} { + r.lock.Lock() + defer r.lock.Unlock() + + m := make(map[string]interface{}) + for k, v := range r.Raw { + m[k] = v + } + return m +} + +// Copy returns a copy of this RawConfig, uninterpolated. +func (r *RawConfig) Copy() *RawConfig { + if r == nil { + return nil + } + + r.lock.Lock() + defer r.lock.Unlock() + + newRaw := make(map[string]interface{}) + for k, v := range r.Raw { + newRaw[k] = v + } + + result, err := NewRawConfig(newRaw) + if err != nil { + panic("copy failed: " + err.Error()) + } + + result.Key = r.Key + return result +} + +// Value returns the value of the configuration if this configuration +// has a Key set. If this does not have a Key set, nil will be returned. +func (r *RawConfig) Value() interface{} { + if c := r.Config(); c != nil { + if v, ok := c[r.Key]; ok { + return v + } + } + + r.lock.Lock() + defer r.lock.Unlock() + return r.Raw[r.Key] +} + +// Config returns the entire configuration with the variables +// interpolated from any call to Interpolate. +// +// If any interpolated variables are unknown (value set to +// UnknownVariableValue), the first non-container (map, slice, etc.) element +// will be removed from the config. The keys of unknown variables +// can be found using the UnknownKeys function. +// +// By pruning out unknown keys from the configuration, the raw +// structure will always successfully decode into its ultimate +// structure using something like mapstructure. +func (r *RawConfig) Config() map[string]interface{} { + r.lock.Lock() + defer r.lock.Unlock() + return r.config +} + +// Interpolate uses the given mapping of variable values and uses +// those as the values to replace any variables in this raw +// configuration. +// +// Any prior calls to Interpolate are replaced with this one. +// +// If a variable key is missing, this will panic. +func (r *RawConfig) Interpolate(vs map[string]ast.Variable) error { + r.lock.Lock() + defer r.lock.Unlock() + + config := langEvalConfig(vs) + return r.interpolate(func(root ast.Node) (interface{}, error) { + // None of the variables we need are computed, meaning we should + // be able to properly evaluate. + result, err := hil.Eval(root, config) + if err != nil { + return "", err + } + + return result.Value, nil + }) +} + +// Merge merges another RawConfig into this one (overriding any conflicting +// values in this config) and returns a new config. The original config +// is not modified. +func (r *RawConfig) Merge(other *RawConfig) *RawConfig { + r.lock.Lock() + defer r.lock.Unlock() + + // Merge the raw configurations + raw := make(map[string]interface{}) + for k, v := range r.Raw { + raw[k] = v + } + for k, v := range other.Raw { + raw[k] = v + } + + // Create the result + result, err := NewRawConfig(raw) + if err != nil { + panic(err) + } + + // Merge the interpolated results + result.config = make(map[string]interface{}) + for k, v := range r.config { + result.config[k] = v + } + for k, v := range other.config { + result.config[k] = v + } + + // Build the unknown keys + if len(r.unknownKeys) > 0 || len(other.unknownKeys) > 0 { + unknownKeys := make(map[string]struct{}) + for _, k := range r.unknownKeys { + unknownKeys[k] = struct{}{} + } + for _, k := range other.unknownKeys { + unknownKeys[k] = struct{}{} + } + + result.unknownKeys = make([]string, 0, len(unknownKeys)) + for k, _ := range unknownKeys { + result.unknownKeys = append(result.unknownKeys, k) + } + } + + return result +} + +func (r *RawConfig) init() error { + r.lock.Lock() + defer r.lock.Unlock() + + r.config = r.Raw + r.Interpolations = nil + r.Variables = nil + + fn := func(node ast.Node) (interface{}, error) { + r.Interpolations = append(r.Interpolations, node) + vars, err := DetectVariables(node) + if err != nil { + return "", err + } + + for _, v := range vars { + if r.Variables == nil { + r.Variables = make(map[string]InterpolatedVariable) + } + + r.Variables[v.FullKey()] = v + } + + return "", nil + } + + walker := &interpolationWalker{F: fn} + if err := reflectwalk.Walk(r.Raw, walker); err != nil { + return err + } + + return nil +} + +func (r *RawConfig) interpolate(fn interpolationWalkerFunc) error { + config, err := copystructure.Copy(r.Raw) + if err != nil { + return err + } + r.config = config.(map[string]interface{}) + + w := &interpolationWalker{F: fn, Replace: true} + err = reflectwalk.Walk(r.config, w) + if err != nil { + return err + } + + r.unknownKeys = w.unknownKeys + return nil +} + +func (r *RawConfig) merge(r2 *RawConfig) *RawConfig { + if r == nil && r2 == nil { + return nil + } + + if r == nil { + r = &RawConfig{} + } + + rawRaw, err := copystructure.Copy(r.Raw) + if err != nil { + panic(err) + } + + raw := rawRaw.(map[string]interface{}) + if r2 != nil { + for k, v := range r2.Raw { + raw[k] = v + } + } + + result, err := NewRawConfig(raw) + if err != nil { + panic(err) + } + + return result +} + +// UnknownKeys returns the keys of the configuration that are unknown +// because they had interpolated variables that must be computed. +func (r *RawConfig) UnknownKeys() []string { + r.lock.Lock() + defer r.lock.Unlock() + return r.unknownKeys +} + +// See GobEncode +func (r *RawConfig) GobDecode(b []byte) error { + var data gobRawConfig + err := gob.NewDecoder(bytes.NewReader(b)).Decode(&data) + if err != nil { + return err + } + + r.Key = data.Key + r.Raw = data.Raw + + return r.init() +} + +// GobEncode is a custom Gob encoder to use so that we only include the +// raw configuration. Interpolated variables and such are lost and the +// tree of interpolated variables is recomputed on decode, since it is +// referentially transparent. +func (r *RawConfig) GobEncode() ([]byte, error) { + r.lock.Lock() + defer r.lock.Unlock() + + data := gobRawConfig{ + Key: r.Key, + Raw: r.Raw, + } + + var buf bytes.Buffer + if err := gob.NewEncoder(&buf).Encode(data); err != nil { + return nil, err + } + + return buf.Bytes(), nil +} + +type gobRawConfig struct { + Key string + Raw map[string]interface{} +} + +// langEvalConfig returns the evaluation configuration we use to execute. +func langEvalConfig(vs map[string]ast.Variable) *hil.EvalConfig { + funcMap := make(map[string]ast.Function) + for k, v := range Funcs() { + funcMap[k] = v + } + funcMap["lookup"] = interpolationFuncLookup(vs) + funcMap["keys"] = interpolationFuncKeys(vs) + funcMap["values"] = interpolationFuncValues(vs) + + return &hil.EvalConfig{ + GlobalScope: &ast.BasicScope{ + VarMap: vs, + FuncMap: funcMap, + }, + } +} diff --git a/vendor/github.com/hashicorp/terraform/config/resource_mode.go b/vendor/github.com/hashicorp/terraform/config/resource_mode.go new file mode 100644 index 0000000000..877c6e8485 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/config/resource_mode.go @@ -0,0 +1,9 @@ +package config + +//go:generate stringer -type=ResourceMode -output=resource_mode_string.go resource_mode.go +type ResourceMode int + +const ( + ManagedResourceMode ResourceMode = iota + DataResourceMode +) diff --git a/vendor/github.com/hashicorp/terraform/config/resource_mode_string.go b/vendor/github.com/hashicorp/terraform/config/resource_mode_string.go new file mode 100644 index 0000000000..ea68b4fcdb --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/config/resource_mode_string.go @@ -0,0 +1,16 @@ +// Code generated by "stringer -type=ResourceMode -output=resource_mode_string.go resource_mode.go"; DO NOT EDIT. + +package config + +import "fmt" + +const _ResourceMode_name = "ManagedResourceModeDataResourceMode" + +var _ResourceMode_index = [...]uint8{0, 19, 35} + +func (i ResourceMode) String() string { + if i < 0 || i >= ResourceMode(len(_ResourceMode_index)-1) { + return fmt.Sprintf("ResourceMode(%d)", i) + } + return _ResourceMode_name[_ResourceMode_index[i]:_ResourceMode_index[i+1]] +} diff --git a/vendor/github.com/hashicorp/terraform/config/testing.go b/vendor/github.com/hashicorp/terraform/config/testing.go new file mode 100644 index 0000000000..f7bfadd9e8 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/config/testing.go @@ -0,0 +1,15 @@ +package config + +import ( + "testing" +) + +// TestRawConfig is used to create a RawConfig for testing. +func TestRawConfig(t *testing.T, c map[string]interface{}) *RawConfig { + cfg, err := NewRawConfig(c) + if err != nil { + t.Fatalf("err: %s", err) + } + + return cfg +} diff --git a/vendor/github.com/hashicorp/terraform/dag/dag.go b/vendor/github.com/hashicorp/terraform/dag/dag.go new file mode 100644 index 0000000000..f8776bc511 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/dag/dag.go @@ -0,0 +1,286 @@ +package dag + +import ( + "fmt" + "sort" + "strings" + + "github.com/hashicorp/go-multierror" +) + +// AcyclicGraph is a specialization of Graph that cannot have cycles. With +// this property, we get the property of sane graph traversal. +type AcyclicGraph struct { + Graph +} + +// WalkFunc is the callback used for walking the graph. +type WalkFunc func(Vertex) error + +// DepthWalkFunc is a walk function that also receives the current depth of the +// walk as an argument +type DepthWalkFunc func(Vertex, int) error + +func (g *AcyclicGraph) DirectedGraph() Grapher { + return g +} + +// Returns a Set that includes every Vertex yielded by walking down from the +// provided starting Vertex v. +func (g *AcyclicGraph) Ancestors(v Vertex) (*Set, error) { + s := new(Set) + start := AsVertexList(g.DownEdges(v)) + memoFunc := func(v Vertex, d int) error { + s.Add(v) + return nil + } + + if err := g.DepthFirstWalk(start, memoFunc); err != nil { + return nil, err + } + + return s, nil +} + +// Returns a Set that includes every Vertex yielded by walking up from the +// provided starting Vertex v. +func (g *AcyclicGraph) Descendents(v Vertex) (*Set, error) { + s := new(Set) + start := AsVertexList(g.UpEdges(v)) + memoFunc := func(v Vertex, d int) error { + s.Add(v) + return nil + } + + if err := g.ReverseDepthFirstWalk(start, memoFunc); err != nil { + return nil, err + } + + return s, nil +} + +// Root returns the root of the DAG, or an error. +// +// Complexity: O(V) +func (g *AcyclicGraph) Root() (Vertex, error) { + roots := make([]Vertex, 0, 1) + for _, v := range g.Vertices() { + if g.UpEdges(v).Len() == 0 { + roots = append(roots, v) + } + } + + if len(roots) > 1 { + // TODO(mitchellh): make this error message a lot better + return nil, fmt.Errorf("multiple roots: %#v", roots) + } + + if len(roots) == 0 { + return nil, fmt.Errorf("no roots found") + } + + return roots[0], nil +} + +// TransitiveReduction performs the transitive reduction of graph g in place. +// The transitive reduction of a graph is a graph with as few edges as +// possible with the same reachability as the original graph. This means +// that if there are three nodes A => B => C, and A connects to both +// B and C, and B connects to C, then the transitive reduction is the +// same graph with only a single edge between A and B, and a single edge +// between B and C. +// +// The graph must be valid for this operation to behave properly. If +// Validate() returns an error, the behavior is undefined and the results +// will likely be unexpected. +// +// Complexity: O(V(V+E)), or asymptotically O(VE) +func (g *AcyclicGraph) TransitiveReduction() { + // For each vertex u in graph g, do a DFS starting from each vertex + // v such that the edge (u,v) exists (v is a direct descendant of u). + // + // For each v-prime reachable from v, remove the edge (u, v-prime). + defer g.debug.BeginOperation("TransitiveReduction", "").End("") + + for _, u := range g.Vertices() { + uTargets := g.DownEdges(u) + vs := AsVertexList(g.DownEdges(u)) + + g.DepthFirstWalk(vs, func(v Vertex, d int) error { + shared := uTargets.Intersection(g.DownEdges(v)) + for _, vPrime := range AsVertexList(shared) { + g.RemoveEdge(BasicEdge(u, vPrime)) + } + + return nil + }) + } +} + +// Validate validates the DAG. A DAG is valid if it has a single root +// with no cycles. +func (g *AcyclicGraph) Validate() error { + if _, err := g.Root(); err != nil { + return err + } + + // Look for cycles of more than 1 component + var err error + cycles := g.Cycles() + if len(cycles) > 0 { + for _, cycle := range cycles { + cycleStr := make([]string, len(cycle)) + for j, vertex := range cycle { + cycleStr[j] = VertexName(vertex) + } + + err = multierror.Append(err, fmt.Errorf( + "Cycle: %s", strings.Join(cycleStr, ", "))) + } + } + + // Look for cycles to self + for _, e := range g.Edges() { + if e.Source() == e.Target() { + err = multierror.Append(err, fmt.Errorf( + "Self reference: %s", VertexName(e.Source()))) + } + } + + return err +} + +func (g *AcyclicGraph) Cycles() [][]Vertex { + var cycles [][]Vertex + for _, cycle := range StronglyConnected(&g.Graph) { + if len(cycle) > 1 { + cycles = append(cycles, cycle) + } + } + return cycles +} + +// Walk walks the graph, calling your callback as each node is visited. +// This will walk nodes in parallel if it can. Because the walk is done +// in parallel, the error returned will be a multierror. +func (g *AcyclicGraph) Walk(cb WalkFunc) error { + defer g.debug.BeginOperation(typeWalk, "").End("") + + w := &Walker{Callback: cb, Reverse: true} + w.Update(g) + return w.Wait() +} + +// simple convenience helper for converting a dag.Set to a []Vertex +func AsVertexList(s *Set) []Vertex { + rawList := s.List() + vertexList := make([]Vertex, len(rawList)) + for i, raw := range rawList { + vertexList[i] = raw.(Vertex) + } + return vertexList +} + +type vertexAtDepth struct { + Vertex Vertex + Depth int +} + +// depthFirstWalk does a depth-first walk of the graph starting from +// the vertices in start. This is not exported now but it would make sense +// to export this publicly at some point. +func (g *AcyclicGraph) DepthFirstWalk(start []Vertex, f DepthWalkFunc) error { + defer g.debug.BeginOperation(typeDepthFirstWalk, "").End("") + + seen := make(map[Vertex]struct{}) + frontier := make([]*vertexAtDepth, len(start)) + for i, v := range start { + frontier[i] = &vertexAtDepth{ + Vertex: v, + Depth: 0, + } + } + for len(frontier) > 0 { + // Pop the current vertex + n := len(frontier) + current := frontier[n-1] + frontier = frontier[:n-1] + + // Check if we've seen this already and return... + if _, ok := seen[current.Vertex]; ok { + continue + } + seen[current.Vertex] = struct{}{} + + // Visit the current node + if err := f(current.Vertex, current.Depth); err != nil { + return err + } + + // Visit targets of this in a consistent order. + targets := AsVertexList(g.DownEdges(current.Vertex)) + sort.Sort(byVertexName(targets)) + for _, t := range targets { + frontier = append(frontier, &vertexAtDepth{ + Vertex: t, + Depth: current.Depth + 1, + }) + } + } + + return nil +} + +// reverseDepthFirstWalk does a depth-first walk _up_ the graph starting from +// the vertices in start. +func (g *AcyclicGraph) ReverseDepthFirstWalk(start []Vertex, f DepthWalkFunc) error { + defer g.debug.BeginOperation(typeReverseDepthFirstWalk, "").End("") + + seen := make(map[Vertex]struct{}) + frontier := make([]*vertexAtDepth, len(start)) + for i, v := range start { + frontier[i] = &vertexAtDepth{ + Vertex: v, + Depth: 0, + } + } + for len(frontier) > 0 { + // Pop the current vertex + n := len(frontier) + current := frontier[n-1] + frontier = frontier[:n-1] + + // Check if we've seen this already and return... + if _, ok := seen[current.Vertex]; ok { + continue + } + seen[current.Vertex] = struct{}{} + + // Add next set of targets in a consistent order. + targets := AsVertexList(g.UpEdges(current.Vertex)) + sort.Sort(byVertexName(targets)) + for _, t := range targets { + frontier = append(frontier, &vertexAtDepth{ + Vertex: t, + Depth: current.Depth + 1, + }) + } + + // Visit the current node + if err := f(current.Vertex, current.Depth); err != nil { + return err + } + } + + return nil +} + +// byVertexName implements sort.Interface so a list of Vertices can be sorted +// consistently by their VertexName +type byVertexName []Vertex + +func (b byVertexName) Len() int { return len(b) } +func (b byVertexName) Swap(i, j int) { b[i], b[j] = b[j], b[i] } +func (b byVertexName) Less(i, j int) bool { + return VertexName(b[i]) < VertexName(b[j]) +} diff --git a/vendor/github.com/hashicorp/terraform/dag/dot.go b/vendor/github.com/hashicorp/terraform/dag/dot.go new file mode 100644 index 0000000000..7e6d2af3b1 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/dag/dot.go @@ -0,0 +1,282 @@ +package dag + +import ( + "bytes" + "fmt" + "sort" + "strings" +) + +// DotOpts are the options for generating a dot formatted Graph. +type DotOpts struct { + // Allows some nodes to decide to only show themselves when the user has + // requested the "verbose" graph. + Verbose bool + + // Highlight Cycles + DrawCycles bool + + // How many levels to expand modules as we draw + MaxDepth int + + // use this to keep the cluster_ naming convention from the previous dot writer + cluster bool +} + +// GraphNodeDotter can be implemented by a node to cause it to be included +// in the dot graph. The Dot method will be called which is expected to +// return a representation of this node. +type GraphNodeDotter interface { + // Dot is called to return the dot formatting for the node. + // The first parameter is the title of the node. + // The second parameter includes user-specified options that affect the dot + // graph. See GraphDotOpts below for details. + DotNode(string, *DotOpts) *DotNode +} + +// DotNode provides a structure for Vertices to return in order to specify their +// dot format. +type DotNode struct { + Name string + Attrs map[string]string +} + +// Returns the DOT representation of this Graph. +func (g *marshalGraph) Dot(opts *DotOpts) []byte { + if opts == nil { + opts = &DotOpts{ + DrawCycles: true, + MaxDepth: -1, + Verbose: true, + } + } + + var w indentWriter + w.WriteString("digraph {\n") + w.Indent() + + // some dot defaults + w.WriteString(`compound = "true"` + "\n") + w.WriteString(`newrank = "true"` + "\n") + + // the top level graph is written as the first subgraph + w.WriteString(`subgraph "root" {` + "\n") + g.writeBody(opts, &w) + + // cluster isn't really used other than for naming purposes in some graphs + opts.cluster = opts.MaxDepth != 0 + maxDepth := opts.MaxDepth + if maxDepth == 0 { + maxDepth = -1 + } + + for _, s := range g.Subgraphs { + g.writeSubgraph(s, opts, maxDepth, &w) + } + + w.Unindent() + w.WriteString("}\n") + return w.Bytes() +} + +func (v *marshalVertex) dot(g *marshalGraph, opts *DotOpts) []byte { + var buf bytes.Buffer + graphName := g.Name + if graphName == "" { + graphName = "root" + } + + name := v.Name + attrs := v.Attrs + if v.graphNodeDotter != nil { + node := v.graphNodeDotter.DotNode(name, opts) + if node == nil { + return []byte{} + } + + newAttrs := make(map[string]string) + for k, v := range attrs { + newAttrs[k] = v + } + for k, v := range node.Attrs { + newAttrs[k] = v + } + + name = node.Name + attrs = newAttrs + } + + buf.WriteString(fmt.Sprintf(`"[%s] %s"`, graphName, name)) + writeAttrs(&buf, attrs) + buf.WriteByte('\n') + + return buf.Bytes() +} + +func (e *marshalEdge) dot(g *marshalGraph) string { + var buf bytes.Buffer + graphName := g.Name + if graphName == "" { + graphName = "root" + } + + sourceName := g.vertexByID(e.Source).Name + targetName := g.vertexByID(e.Target).Name + s := fmt.Sprintf(`"[%s] %s" -> "[%s] %s"`, graphName, sourceName, graphName, targetName) + buf.WriteString(s) + writeAttrs(&buf, e.Attrs) + + return buf.String() +} + +func cycleDot(e *marshalEdge, g *marshalGraph) string { + return e.dot(g) + ` [color = "red", penwidth = "2.0"]` +} + +// Write the subgraph body. The is recursive, and the depth argument is used to +// record the current depth of iteration. +func (g *marshalGraph) writeSubgraph(sg *marshalGraph, opts *DotOpts, depth int, w *indentWriter) { + if depth == 0 { + return + } + depth-- + + name := sg.Name + if opts.cluster { + // we prefix with cluster_ to match the old dot output + name = "cluster_" + name + sg.Attrs["label"] = sg.Name + } + w.WriteString(fmt.Sprintf("subgraph %q {\n", name)) + sg.writeBody(opts, w) + + for _, sg := range sg.Subgraphs { + g.writeSubgraph(sg, opts, depth, w) + } +} + +func (g *marshalGraph) writeBody(opts *DotOpts, w *indentWriter) { + w.Indent() + + for _, as := range attrStrings(g.Attrs) { + w.WriteString(as + "\n") + } + + // list of Vertices that aren't to be included in the dot output + skip := map[string]bool{} + + for _, v := range g.Vertices { + if v.graphNodeDotter == nil { + skip[v.ID] = true + continue + } + + w.Write(v.dot(g, opts)) + } + + var dotEdges []string + + if opts.DrawCycles { + for _, c := range g.Cycles { + if len(c) < 2 { + continue + } + + for i, j := 0, 1; i < len(c); i, j = i+1, j+1 { + if j >= len(c) { + j = 0 + } + src := c[i] + tgt := c[j] + + if skip[src.ID] || skip[tgt.ID] { + continue + } + + e := &marshalEdge{ + Name: fmt.Sprintf("%s|%s", src.Name, tgt.Name), + Source: src.ID, + Target: tgt.ID, + Attrs: make(map[string]string), + } + + dotEdges = append(dotEdges, cycleDot(e, g)) + src = tgt + } + } + } + + for _, e := range g.Edges { + dotEdges = append(dotEdges, e.dot(g)) + } + + // srot these again to match the old output + sort.Strings(dotEdges) + + for _, e := range dotEdges { + w.WriteString(e + "\n") + } + + w.Unindent() + w.WriteString("}\n") +} + +func writeAttrs(buf *bytes.Buffer, attrs map[string]string) { + if len(attrs) > 0 { + buf.WriteString(" [") + buf.WriteString(strings.Join(attrStrings(attrs), ", ")) + buf.WriteString("]") + } +} + +func attrStrings(attrs map[string]string) []string { + strings := make([]string, 0, len(attrs)) + for k, v := range attrs { + strings = append(strings, fmt.Sprintf("%s = %q", k, v)) + } + sort.Strings(strings) + return strings +} + +// Provide a bytes.Buffer like structure, which will indent when starting a +// newline. +type indentWriter struct { + bytes.Buffer + level int +} + +func (w *indentWriter) indent() { + newline := []byte("\n") + if !bytes.HasSuffix(w.Bytes(), newline) { + return + } + for i := 0; i < w.level; i++ { + w.Buffer.WriteString("\t") + } +} + +// Indent increases indentation by 1 +func (w *indentWriter) Indent() { w.level++ } + +// Unindent decreases indentation by 1 +func (w *indentWriter) Unindent() { w.level-- } + +// the following methods intercecpt the byte.Buffer writes and insert the +// indentation when starting a new line. +func (w *indentWriter) Write(b []byte) (int, error) { + w.indent() + return w.Buffer.Write(b) +} + +func (w *indentWriter) WriteString(s string) (int, error) { + w.indent() + return w.Buffer.WriteString(s) +} +func (w *indentWriter) WriteByte(b byte) error { + w.indent() + return w.Buffer.WriteByte(b) +} +func (w *indentWriter) WriteRune(r rune) (int, error) { + w.indent() + return w.Buffer.WriteRune(r) +} diff --git a/vendor/github.com/hashicorp/terraform/dag/edge.go b/vendor/github.com/hashicorp/terraform/dag/edge.go new file mode 100644 index 0000000000..f0d99ee3a6 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/dag/edge.go @@ -0,0 +1,37 @@ +package dag + +import ( + "fmt" +) + +// Edge represents an edge in the graph, with a source and target vertex. +type Edge interface { + Source() Vertex + Target() Vertex + + Hashable +} + +// BasicEdge returns an Edge implementation that simply tracks the source +// and target given as-is. +func BasicEdge(source, target Vertex) Edge { + return &basicEdge{S: source, T: target} +} + +// basicEdge is a basic implementation of Edge that has the source and +// target vertex. +type basicEdge struct { + S, T Vertex +} + +func (e *basicEdge) Hashcode() interface{} { + return fmt.Sprintf("%p-%p", e.S, e.T) +} + +func (e *basicEdge) Source() Vertex { + return e.S +} + +func (e *basicEdge) Target() Vertex { + return e.T +} diff --git a/vendor/github.com/hashicorp/terraform/dag/graph.go b/vendor/github.com/hashicorp/terraform/dag/graph.go new file mode 100644 index 0000000000..e7517a2062 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/dag/graph.go @@ -0,0 +1,391 @@ +package dag + +import ( + "bytes" + "encoding/json" + "fmt" + "io" + "sort" +) + +// Graph is used to represent a dependency graph. +type Graph struct { + vertices *Set + edges *Set + downEdges map[interface{}]*Set + upEdges map[interface{}]*Set + + // JSON encoder for recording debug information + debug *encoder +} + +// Subgrapher allows a Vertex to be a Graph itself, by returning a Grapher. +type Subgrapher interface { + Subgraph() Grapher +} + +// A Grapher is any type that returns a Grapher, mainly used to identify +// dag.Graph and dag.AcyclicGraph. In the case of Graph and AcyclicGraph, they +// return themselves. +type Grapher interface { + DirectedGraph() Grapher +} + +// Vertex of the graph. +type Vertex interface{} + +// NamedVertex is an optional interface that can be implemented by Vertex +// to give it a human-friendly name that is used for outputting the graph. +type NamedVertex interface { + Vertex + Name() string +} + +func (g *Graph) DirectedGraph() Grapher { + return g +} + +// Vertices returns the list of all the vertices in the graph. +func (g *Graph) Vertices() []Vertex { + list := g.vertices.List() + result := make([]Vertex, len(list)) + for i, v := range list { + result[i] = v.(Vertex) + } + + return result +} + +// Edges returns the list of all the edges in the graph. +func (g *Graph) Edges() []Edge { + list := g.edges.List() + result := make([]Edge, len(list)) + for i, v := range list { + result[i] = v.(Edge) + } + + return result +} + +// EdgesFrom returns the list of edges from the given source. +func (g *Graph) EdgesFrom(v Vertex) []Edge { + var result []Edge + from := hashcode(v) + for _, e := range g.Edges() { + if hashcode(e.Source()) == from { + result = append(result, e) + } + } + + return result +} + +// EdgesTo returns the list of edges to the given target. +func (g *Graph) EdgesTo(v Vertex) []Edge { + var result []Edge + search := hashcode(v) + for _, e := range g.Edges() { + if hashcode(e.Target()) == search { + result = append(result, e) + } + } + + return result +} + +// HasVertex checks if the given Vertex is present in the graph. +func (g *Graph) HasVertex(v Vertex) bool { + return g.vertices.Include(v) +} + +// HasEdge checks if the given Edge is present in the graph. +func (g *Graph) HasEdge(e Edge) bool { + return g.edges.Include(e) +} + +// Add adds a vertex to the graph. This is safe to call multiple time with +// the same Vertex. +func (g *Graph) Add(v Vertex) Vertex { + g.init() + g.vertices.Add(v) + g.debug.Add(v) + return v +} + +// Remove removes a vertex from the graph. This will also remove any +// edges with this vertex as a source or target. +func (g *Graph) Remove(v Vertex) Vertex { + // Delete the vertex itself + g.vertices.Delete(v) + g.debug.Remove(v) + + // Delete the edges to non-existent things + for _, target := range g.DownEdges(v).List() { + g.RemoveEdge(BasicEdge(v, target)) + } + for _, source := range g.UpEdges(v).List() { + g.RemoveEdge(BasicEdge(source, v)) + } + + return nil +} + +// Replace replaces the original Vertex with replacement. If the original +// does not exist within the graph, then false is returned. Otherwise, true +// is returned. +func (g *Graph) Replace(original, replacement Vertex) bool { + // If we don't have the original, we can't do anything + if !g.vertices.Include(original) { + return false + } + + defer g.debug.BeginOperation("Replace", "").End("") + + // If they're the same, then don't do anything + if original == replacement { + return true + } + + // Add our new vertex, then copy all the edges + g.Add(replacement) + for _, target := range g.DownEdges(original).List() { + g.Connect(BasicEdge(replacement, target)) + } + for _, source := range g.UpEdges(original).List() { + g.Connect(BasicEdge(source, replacement)) + } + + // Remove our old vertex, which will also remove all the edges + g.Remove(original) + + return true +} + +// RemoveEdge removes an edge from the graph. +func (g *Graph) RemoveEdge(edge Edge) { + g.init() + g.debug.RemoveEdge(edge) + + // Delete the edge from the set + g.edges.Delete(edge) + + // Delete the up/down edges + if s, ok := g.downEdges[hashcode(edge.Source())]; ok { + s.Delete(edge.Target()) + } + if s, ok := g.upEdges[hashcode(edge.Target())]; ok { + s.Delete(edge.Source()) + } +} + +// DownEdges returns the outward edges from the source Vertex v. +func (g *Graph) DownEdges(v Vertex) *Set { + g.init() + return g.downEdges[hashcode(v)] +} + +// UpEdges returns the inward edges to the destination Vertex v. +func (g *Graph) UpEdges(v Vertex) *Set { + g.init() + return g.upEdges[hashcode(v)] +} + +// Connect adds an edge with the given source and target. This is safe to +// call multiple times with the same value. Note that the same value is +// verified through pointer equality of the vertices, not through the +// value of the edge itself. +func (g *Graph) Connect(edge Edge) { + g.init() + g.debug.Connect(edge) + + source := edge.Source() + target := edge.Target() + sourceCode := hashcode(source) + targetCode := hashcode(target) + + // Do we have this already? If so, don't add it again. + if s, ok := g.downEdges[sourceCode]; ok && s.Include(target) { + return + } + + // Add the edge to the set + g.edges.Add(edge) + + // Add the down edge + s, ok := g.downEdges[sourceCode] + if !ok { + s = new(Set) + g.downEdges[sourceCode] = s + } + s.Add(target) + + // Add the up edge + s, ok = g.upEdges[targetCode] + if !ok { + s = new(Set) + g.upEdges[targetCode] = s + } + s.Add(source) +} + +// String outputs some human-friendly output for the graph structure. +func (g *Graph) StringWithNodeTypes() string { + var buf bytes.Buffer + + // Build the list of node names and a mapping so that we can more + // easily alphabetize the output to remain deterministic. + vertices := g.Vertices() + names := make([]string, 0, len(vertices)) + mapping := make(map[string]Vertex, len(vertices)) + for _, v := range vertices { + name := VertexName(v) + names = append(names, name) + mapping[name] = v + } + sort.Strings(names) + + // Write each node in order... + for _, name := range names { + v := mapping[name] + targets := g.downEdges[hashcode(v)] + + buf.WriteString(fmt.Sprintf("%s - %T\n", name, v)) + + // Alphabetize dependencies + deps := make([]string, 0, targets.Len()) + targetNodes := make(map[string]Vertex) + for _, target := range targets.List() { + dep := VertexName(target) + deps = append(deps, dep) + targetNodes[dep] = target + } + sort.Strings(deps) + + // Write dependencies + for _, d := range deps { + buf.WriteString(fmt.Sprintf(" %s - %T\n", d, targetNodes[d])) + } + } + + return buf.String() +} + +// String outputs some human-friendly output for the graph structure. +func (g *Graph) String() string { + var buf bytes.Buffer + + // Build the list of node names and a mapping so that we can more + // easily alphabetize the output to remain deterministic. + vertices := g.Vertices() + names := make([]string, 0, len(vertices)) + mapping := make(map[string]Vertex, len(vertices)) + for _, v := range vertices { + name := VertexName(v) + names = append(names, name) + mapping[name] = v + } + sort.Strings(names) + + // Write each node in order... + for _, name := range names { + v := mapping[name] + targets := g.downEdges[hashcode(v)] + + buf.WriteString(fmt.Sprintf("%s\n", name)) + + // Alphabetize dependencies + deps := make([]string, 0, targets.Len()) + for _, target := range targets.List() { + deps = append(deps, VertexName(target)) + } + sort.Strings(deps) + + // Write dependencies + for _, d := range deps { + buf.WriteString(fmt.Sprintf(" %s\n", d)) + } + } + + return buf.String() +} + +func (g *Graph) init() { + if g.vertices == nil { + g.vertices = new(Set) + } + if g.edges == nil { + g.edges = new(Set) + } + if g.downEdges == nil { + g.downEdges = make(map[interface{}]*Set) + } + if g.upEdges == nil { + g.upEdges = make(map[interface{}]*Set) + } +} + +// Dot returns a dot-formatted representation of the Graph. +func (g *Graph) Dot(opts *DotOpts) []byte { + return newMarshalGraph("", g).Dot(opts) +} + +// MarshalJSON returns a JSON representation of the entire Graph. +func (g *Graph) MarshalJSON() ([]byte, error) { + dg := newMarshalGraph("root", g) + return json.MarshalIndent(dg, "", " ") +} + +// SetDebugWriter sets the io.Writer where the Graph will record debug +// information. After this is set, the graph will immediately encode itself to +// the stream, and continue to record all subsequent operations. +func (g *Graph) SetDebugWriter(w io.Writer) { + g.debug = &encoder{w: w} + g.debug.Encode(newMarshalGraph("root", g)) +} + +// DebugVertexInfo encodes arbitrary information about a vertex in the graph +// debug logs. +func (g *Graph) DebugVertexInfo(v Vertex, info string) { + va := newVertexInfo(typeVertexInfo, v, info) + g.debug.Encode(va) +} + +// DebugEdgeInfo encodes arbitrary information about an edge in the graph debug +// logs. +func (g *Graph) DebugEdgeInfo(e Edge, info string) { + ea := newEdgeInfo(typeEdgeInfo, e, info) + g.debug.Encode(ea) +} + +// DebugVisitInfo records a visit to a Vertex during a walk operation. +func (g *Graph) DebugVisitInfo(v Vertex, info string) { + vi := newVertexInfo(typeVisitInfo, v, info) + g.debug.Encode(vi) +} + +// DebugOperation marks the start of a set of graph transformations in +// the debug log, and returns a DebugOperationEnd func, which marks the end of +// the operation in the log. Additional information can be added to the log via +// the info parameter. +// +// The returned func's End method allows this method to be called from a single +// defer statement: +// defer g.DebugOperationBegin("OpName", "operating").End("") +// +// The returned function must be called to properly close the logical operation +// in the logs. +func (g *Graph) DebugOperation(operation string, info string) DebugOperationEnd { + return g.debug.BeginOperation(operation, info) +} + +// VertexName returns the name of a vertex. +func VertexName(raw Vertex) string { + switch v := raw.(type) { + case NamedVertex: + return v.Name() + case fmt.Stringer: + return fmt.Sprintf("%s", v) + default: + return fmt.Sprintf("%v", v) + } +} diff --git a/vendor/github.com/hashicorp/terraform/dag/marshal.go b/vendor/github.com/hashicorp/terraform/dag/marshal.go new file mode 100644 index 0000000000..16d5dd6dde --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/dag/marshal.go @@ -0,0 +1,462 @@ +package dag + +import ( + "encoding/json" + "fmt" + "io" + "log" + "reflect" + "sort" + "strconv" + "sync" +) + +const ( + typeOperation = "Operation" + typeTransform = "Transform" + typeWalk = "Walk" + typeDepthFirstWalk = "DepthFirstWalk" + typeReverseDepthFirstWalk = "ReverseDepthFirstWalk" + typeTransitiveReduction = "TransitiveReduction" + typeEdgeInfo = "EdgeInfo" + typeVertexInfo = "VertexInfo" + typeVisitInfo = "VisitInfo" +) + +// the marshal* structs are for serialization of the graph data. +type marshalGraph struct { + // Type is always "Graph", for identification as a top level object in the + // JSON stream. + Type string + + // Each marshal structure requires a unique ID so that it can be referenced + // by other structures. + ID string `json:",omitempty"` + + // Human readable name for this graph. + Name string `json:",omitempty"` + + // Arbitrary attributes that can be added to the output. + Attrs map[string]string `json:",omitempty"` + + // List of graph vertices, sorted by ID. + Vertices []*marshalVertex `json:",omitempty"` + + // List of edges, sorted by Source ID. + Edges []*marshalEdge `json:",omitempty"` + + // Any number of subgraphs. A subgraph itself is considered a vertex, and + // may be referenced by either end of an edge. + Subgraphs []*marshalGraph `json:",omitempty"` + + // Any lists of vertices that are included in cycles. + Cycles [][]*marshalVertex `json:",omitempty"` +} + +// The add, remove, connect, removeEdge methods mirror the basic Graph +// manipulations to reconstruct a marshalGraph from a debug log. +func (g *marshalGraph) add(v *marshalVertex) { + g.Vertices = append(g.Vertices, v) + sort.Sort(vertices(g.Vertices)) +} + +func (g *marshalGraph) remove(v *marshalVertex) { + for i, existing := range g.Vertices { + if v.ID == existing.ID { + g.Vertices = append(g.Vertices[:i], g.Vertices[i+1:]...) + return + } + } +} + +func (g *marshalGraph) connect(e *marshalEdge) { + g.Edges = append(g.Edges, e) + sort.Sort(edges(g.Edges)) +} + +func (g *marshalGraph) removeEdge(e *marshalEdge) { + for i, existing := range g.Edges { + if e.Source == existing.Source && e.Target == existing.Target { + g.Edges = append(g.Edges[:i], g.Edges[i+1:]...) + return + } + } +} + +func (g *marshalGraph) vertexByID(id string) *marshalVertex { + for _, v := range g.Vertices { + if id == v.ID { + return v + } + } + return nil +} + +type marshalVertex struct { + // Unique ID, used to reference this vertex from other structures. + ID string + + // Human readable name + Name string `json:",omitempty"` + + Attrs map[string]string `json:",omitempty"` + + // This is to help transition from the old Dot interfaces. We record if the + // node was a GraphNodeDotter here, so we can call it to get attributes. + graphNodeDotter GraphNodeDotter +} + +func newMarshalVertex(v Vertex) *marshalVertex { + dn, ok := v.(GraphNodeDotter) + if !ok { + dn = nil + } + + return &marshalVertex{ + ID: marshalVertexID(v), + Name: VertexName(v), + Attrs: make(map[string]string), + graphNodeDotter: dn, + } +} + +// vertices is a sort.Interface implementation for sorting vertices by ID +type vertices []*marshalVertex + +func (v vertices) Less(i, j int) bool { return v[i].Name < v[j].Name } +func (v vertices) Len() int { return len(v) } +func (v vertices) Swap(i, j int) { v[i], v[j] = v[j], v[i] } + +type marshalEdge struct { + // Human readable name + Name string + + // Source and Target Vertices by ID + Source string + Target string + + Attrs map[string]string `json:",omitempty"` +} + +func newMarshalEdge(e Edge) *marshalEdge { + return &marshalEdge{ + Name: fmt.Sprintf("%s|%s", VertexName(e.Source()), VertexName(e.Target())), + Source: marshalVertexID(e.Source()), + Target: marshalVertexID(e.Target()), + Attrs: make(map[string]string), + } +} + +// edges is a sort.Interface implementation for sorting edges by Source ID +type edges []*marshalEdge + +func (e edges) Less(i, j int) bool { return e[i].Name < e[j].Name } +func (e edges) Len() int { return len(e) } +func (e edges) Swap(i, j int) { e[i], e[j] = e[j], e[i] } + +// build a marshalGraph structure from a *Graph +func newMarshalGraph(name string, g *Graph) *marshalGraph { + mg := &marshalGraph{ + Type: "Graph", + Name: name, + Attrs: make(map[string]string), + } + + for _, v := range g.Vertices() { + id := marshalVertexID(v) + if sg, ok := marshalSubgrapher(v); ok { + smg := newMarshalGraph(VertexName(v), sg) + smg.ID = id + mg.Subgraphs = append(mg.Subgraphs, smg) + } + + mv := newMarshalVertex(v) + mg.Vertices = append(mg.Vertices, mv) + } + + sort.Sort(vertices(mg.Vertices)) + + for _, e := range g.Edges() { + mg.Edges = append(mg.Edges, newMarshalEdge(e)) + } + + sort.Sort(edges(mg.Edges)) + + for _, c := range (&AcyclicGraph{*g}).Cycles() { + var cycle []*marshalVertex + for _, v := range c { + mv := newMarshalVertex(v) + cycle = append(cycle, mv) + } + mg.Cycles = append(mg.Cycles, cycle) + } + + return mg +} + +// Attempt to return a unique ID for any vertex. +func marshalVertexID(v Vertex) string { + val := reflect.ValueOf(v) + switch val.Kind() { + case reflect.Chan, reflect.Func, reflect.Map, reflect.Ptr, reflect.Slice, reflect.UnsafePointer: + return strconv.Itoa(int(val.Pointer())) + case reflect.Interface: + return strconv.Itoa(int(val.InterfaceData()[1])) + } + + if v, ok := v.(Hashable); ok { + h := v.Hashcode() + if h, ok := h.(string); ok { + return h + } + } + + // fallback to a name, which we hope is unique. + return VertexName(v) + + // we could try harder by attempting to read the arbitrary value from the + // interface, but we shouldn't get here from terraform right now. +} + +// check for a Subgrapher, and return the underlying *Graph. +func marshalSubgrapher(v Vertex) (*Graph, bool) { + sg, ok := v.(Subgrapher) + if !ok { + return nil, false + } + + switch g := sg.Subgraph().DirectedGraph().(type) { + case *Graph: + return g, true + case *AcyclicGraph: + return &g.Graph, true + } + + return nil, false +} + +// The DebugOperationEnd func type provides a way to call an End function via a +// method call, allowing for the chaining of methods in a defer statement. +type DebugOperationEnd func(string) + +// End calls function e with the info parameter, marking the end of this +// operation in the logs. +func (e DebugOperationEnd) End(info string) { e(info) } + +// encoder provides methods to write debug data to an io.Writer, and is a noop +// when no writer is present +type encoder struct { + sync.Mutex + w io.Writer +} + +// Encode is analogous to json.Encoder.Encode +func (e *encoder) Encode(i interface{}) { + if e == nil || e.w == nil { + return + } + e.Lock() + defer e.Unlock() + + js, err := json.Marshal(i) + if err != nil { + log.Println("[ERROR] dag:", err) + return + } + js = append(js, '\n') + + _, err = e.w.Write(js) + if err != nil { + log.Println("[ERROR] dag:", err) + return + } +} + +func (e *encoder) Add(v Vertex) { + e.Encode(marshalTransform{ + Type: typeTransform, + AddVertex: newMarshalVertex(v), + }) +} + +// Remove records the removal of Vertex v. +func (e *encoder) Remove(v Vertex) { + e.Encode(marshalTransform{ + Type: typeTransform, + RemoveVertex: newMarshalVertex(v), + }) +} + +func (e *encoder) Connect(edge Edge) { + e.Encode(marshalTransform{ + Type: typeTransform, + AddEdge: newMarshalEdge(edge), + }) +} + +func (e *encoder) RemoveEdge(edge Edge) { + e.Encode(marshalTransform{ + Type: typeTransform, + RemoveEdge: newMarshalEdge(edge), + }) +} + +// BeginOperation marks the start of set of graph transformations, and returns +// an EndDebugOperation func to be called once the opration is complete. +func (e *encoder) BeginOperation(op string, info string) DebugOperationEnd { + if e == nil { + return func(string) {} + } + + e.Encode(marshalOperation{ + Type: typeOperation, + Begin: op, + Info: info, + }) + + return func(info string) { + e.Encode(marshalOperation{ + Type: typeOperation, + End: op, + Info: info, + }) + } +} + +// structure for recording graph transformations +type marshalTransform struct { + // Type: "Transform" + Type string + AddEdge *marshalEdge `json:",omitempty"` + RemoveEdge *marshalEdge `json:",omitempty"` + AddVertex *marshalVertex `json:",omitempty"` + RemoveVertex *marshalVertex `json:",omitempty"` +} + +func (t marshalTransform) Transform(g *marshalGraph) { + switch { + case t.AddEdge != nil: + g.connect(t.AddEdge) + case t.RemoveEdge != nil: + g.removeEdge(t.RemoveEdge) + case t.AddVertex != nil: + g.add(t.AddVertex) + case t.RemoveVertex != nil: + g.remove(t.RemoveVertex) + } +} + +// this structure allows us to decode any object in the json stream for +// inspection, then re-decode it into a proper struct if needed. +type streamDecode struct { + Type string + Map map[string]interface{} + JSON []byte +} + +func (s *streamDecode) UnmarshalJSON(d []byte) error { + s.JSON = d + err := json.Unmarshal(d, &s.Map) + if err != nil { + return err + } + + if t, ok := s.Map["Type"]; ok { + s.Type, _ = t.(string) + } + return nil +} + +// structure for recording the beginning and end of any multi-step +// transformations. These are informational, and not required to reproduce the +// graph state. +type marshalOperation struct { + Type string + Begin string `json:",omitempty"` + End string `json:",omitempty"` + Info string `json:",omitempty"` +} + +// decodeGraph decodes a marshalGraph from an encoded graph stream. +func decodeGraph(r io.Reader) (*marshalGraph, error) { + dec := json.NewDecoder(r) + + // a stream should always start with a graph + g := &marshalGraph{} + + err := dec.Decode(g) + if err != nil { + return nil, err + } + + // now replay any operations that occurred on the original graph + for dec.More() { + s := &streamDecode{} + err := dec.Decode(s) + if err != nil { + return g, err + } + + // the only Type we're concerned with here is Transform to complete the + // Graph + if s.Type != typeTransform { + continue + } + + t := &marshalTransform{} + err = json.Unmarshal(s.JSON, t) + if err != nil { + return g, err + } + t.Transform(g) + } + return g, nil +} + +// marshalVertexInfo allows encoding arbitrary information about the a single +// Vertex in the logs. These are accumulated for informational display while +// rebuilding the graph. +type marshalVertexInfo struct { + Type string + Vertex *marshalVertex + Info string +} + +func newVertexInfo(infoType string, v Vertex, info string) *marshalVertexInfo { + return &marshalVertexInfo{ + Type: infoType, + Vertex: newMarshalVertex(v), + Info: info, + } +} + +// marshalEdgeInfo allows encoding arbitrary information about the a single +// Edge in the logs. These are accumulated for informational display while +// rebuilding the graph. +type marshalEdgeInfo struct { + Type string + Edge *marshalEdge + Info string +} + +func newEdgeInfo(infoType string, e Edge, info string) *marshalEdgeInfo { + return &marshalEdgeInfo{ + Type: infoType, + Edge: newMarshalEdge(e), + Info: info, + } +} + +// JSON2Dot reads a Graph debug log from and io.Reader, and converts the final +// graph dot format. +// +// TODO: Allow returning the output at a certain point during decode. +// Encode extra information from the json log into the Dot. +func JSON2Dot(r io.Reader) ([]byte, error) { + g, err := decodeGraph(r) + if err != nil { + return nil, err + } + + return g.Dot(nil), nil +} diff --git a/vendor/github.com/hashicorp/terraform/dag/set.go b/vendor/github.com/hashicorp/terraform/dag/set.go new file mode 100644 index 0000000000..92b42151d7 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/dag/set.go @@ -0,0 +1,123 @@ +package dag + +import ( + "sync" +) + +// Set is a set data structure. +type Set struct { + m map[interface{}]interface{} + once sync.Once +} + +// Hashable is the interface used by set to get the hash code of a value. +// If this isn't given, then the value of the item being added to the set +// itself is used as the comparison value. +type Hashable interface { + Hashcode() interface{} +} + +// hashcode returns the hashcode used for set elements. +func hashcode(v interface{}) interface{} { + if h, ok := v.(Hashable); ok { + return h.Hashcode() + } + + return v +} + +// Add adds an item to the set +func (s *Set) Add(v interface{}) { + s.once.Do(s.init) + s.m[hashcode(v)] = v +} + +// Delete removes an item from the set. +func (s *Set) Delete(v interface{}) { + s.once.Do(s.init) + delete(s.m, hashcode(v)) +} + +// Include returns true/false of whether a value is in the set. +func (s *Set) Include(v interface{}) bool { + s.once.Do(s.init) + _, ok := s.m[hashcode(v)] + return ok +} + +// Intersection computes the set intersection with other. +func (s *Set) Intersection(other *Set) *Set { + result := new(Set) + if s == nil { + return result + } + if other != nil { + for _, v := range s.m { + if other.Include(v) { + result.Add(v) + } + } + } + + return result +} + +// Difference returns a set with the elements that s has but +// other doesn't. +func (s *Set) Difference(other *Set) *Set { + result := new(Set) + if s != nil { + for k, v := range s.m { + var ok bool + if other != nil { + _, ok = other.m[k] + } + if !ok { + result.Add(v) + } + } + } + + return result +} + +// Filter returns a set that contains the elements from the receiver +// where the given callback returns true. +func (s *Set) Filter(cb func(interface{}) bool) *Set { + result := new(Set) + + for _, v := range s.m { + if cb(v) { + result.Add(v) + } + } + + return result +} + +// Len is the number of items in the set. +func (s *Set) Len() int { + if s == nil { + return 0 + } + + return len(s.m) +} + +// List returns the list of set elements. +func (s *Set) List() []interface{} { + if s == nil { + return nil + } + + r := make([]interface{}, 0, len(s.m)) + for _, v := range s.m { + r = append(r, v) + } + + return r +} + +func (s *Set) init() { + s.m = make(map[interface{}]interface{}) +} diff --git a/vendor/github.com/hashicorp/terraform/dag/tarjan.go b/vendor/github.com/hashicorp/terraform/dag/tarjan.go new file mode 100644 index 0000000000..9d8b25ce2c --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/dag/tarjan.go @@ -0,0 +1,107 @@ +package dag + +// StronglyConnected returns the list of strongly connected components +// within the Graph g. This information is primarily used by this package +// for cycle detection, but strongly connected components have widespread +// use. +func StronglyConnected(g *Graph) [][]Vertex { + vs := g.Vertices() + acct := sccAcct{ + NextIndex: 1, + VertexIndex: make(map[Vertex]int, len(vs)), + } + for _, v := range vs { + // Recurse on any non-visited nodes + if acct.VertexIndex[v] == 0 { + stronglyConnected(&acct, g, v) + } + } + return acct.SCC +} + +func stronglyConnected(acct *sccAcct, g *Graph, v Vertex) int { + // Initial vertex visit + index := acct.visit(v) + minIdx := index + + for _, raw := range g.DownEdges(v).List() { + target := raw.(Vertex) + targetIdx := acct.VertexIndex[target] + + // Recurse on successor if not yet visited + if targetIdx == 0 { + minIdx = min(minIdx, stronglyConnected(acct, g, target)) + } else if acct.inStack(target) { + // Check if the vertex is in the stack + minIdx = min(minIdx, targetIdx) + } + } + + // Pop the strongly connected components off the stack if + // this is a root vertex + if index == minIdx { + var scc []Vertex + for { + v2 := acct.pop() + scc = append(scc, v2) + if v2 == v { + break + } + } + + acct.SCC = append(acct.SCC, scc) + } + + return minIdx +} + +func min(a, b int) int { + if a <= b { + return a + } + return b +} + +// sccAcct is used ot pass around accounting information for +// the StronglyConnectedComponents algorithm +type sccAcct struct { + NextIndex int + VertexIndex map[Vertex]int + Stack []Vertex + SCC [][]Vertex +} + +// visit assigns an index and pushes a vertex onto the stack +func (s *sccAcct) visit(v Vertex) int { + idx := s.NextIndex + s.VertexIndex[v] = idx + s.NextIndex++ + s.push(v) + return idx +} + +// push adds a vertex to the stack +func (s *sccAcct) push(n Vertex) { + s.Stack = append(s.Stack, n) +} + +// pop removes a vertex from the stack +func (s *sccAcct) pop() Vertex { + n := len(s.Stack) + if n == 0 { + return nil + } + vertex := s.Stack[n-1] + s.Stack = s.Stack[:n-1] + return vertex +} + +// inStack checks if a vertex is in the stack +func (s *sccAcct) inStack(needle Vertex) bool { + for _, n := range s.Stack { + if n == needle { + return true + } + } + return false +} diff --git a/vendor/github.com/hashicorp/terraform/dag/walk.go b/vendor/github.com/hashicorp/terraform/dag/walk.go new file mode 100644 index 0000000000..23c87adc1a --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/dag/walk.go @@ -0,0 +1,445 @@ +package dag + +import ( + "errors" + "fmt" + "log" + "sync" + "time" + + "github.com/hashicorp/go-multierror" +) + +// Walker is used to walk every vertex of a graph in parallel. +// +// A vertex will only be walked when the dependencies of that vertex have +// been walked. If two vertices can be walked at the same time, they will be. +// +// Update can be called to update the graph. This can be called even during +// a walk, cahnging vertices/edges mid-walk. This should be done carefully. +// If a vertex is removed but has already been executed, the result of that +// execution (any error) is still returned by Wait. Changing or re-adding +// a vertex that has already executed has no effect. Changing edges of +// a vertex that has already executed has no effect. +// +// Non-parallelism can be enforced by introducing a lock in your callback +// function. However, the goroutine overhead of a walk will remain. +// Walker will create V*2 goroutines (one for each vertex, and dependency +// waiter for each vertex). In general this should be of no concern unless +// there are a huge number of vertices. +// +// The walk is depth first by default. This can be changed with the Reverse +// option. +// +// A single walker is only valid for one graph walk. After the walk is complete +// you must construct a new walker to walk again. State for the walk is never +// deleted in case vertices or edges are changed. +type Walker struct { + // Callback is what is called for each vertex + Callback WalkFunc + + // Reverse, if true, causes the source of an edge to depend on a target. + // When false (default), the target depends on the source. + Reverse bool + + // changeLock must be held to modify any of the fields below. Only Update + // should modify these fields. Modifying them outside of Update can cause + // serious problems. + changeLock sync.Mutex + vertices Set + edges Set + vertexMap map[Vertex]*walkerVertex + + // wait is done when all vertices have executed. It may become "undone" + // if new vertices are added. + wait sync.WaitGroup + + // errMap contains the errors recorded so far for execution. Reading + // and writing should hold errLock. + errMap map[Vertex]error + errLock sync.Mutex +} + +type walkerVertex struct { + // These should only be set once on initialization and never written again. + // They are not protected by a lock since they don't need to be since + // they are write-once. + + // DoneCh is closed when this vertex has completed execution, regardless + // of success. + // + // CancelCh is closed when the vertex should cancel execution. If execution + // is already complete (DoneCh is closed), this has no effect. Otherwise, + // execution is cancelled as quickly as possible. + DoneCh chan struct{} + CancelCh chan struct{} + + // Dependency information. Any changes to any of these fields requires + // holding DepsLock. + // + // DepsCh is sent a single value that denotes whether the upstream deps + // were successful (no errors). Any value sent means that the upstream + // dependencies are complete. No other values will ever be sent again. + // + // DepsUpdateCh is closed when there is a new DepsCh set. + DepsCh chan bool + DepsUpdateCh chan struct{} + DepsLock sync.Mutex + + // Below is not safe to read/write in parallel. This behavior is + // enforced by changes only happening in Update. Nothing else should + // ever modify these. + deps map[Vertex]chan struct{} + depsCancelCh chan struct{} +} + +// errWalkUpstream is used in the errMap of a walk to note that an upstream +// dependency failed so this vertex wasn't run. This is not shown in the final +// user-returned error. +var errWalkUpstream = errors.New("upstream dependency failed") + +// Wait waits for the completion of the walk and returns any errors ( +// in the form of a multierror) that occurred. Update should be called +// to populate the walk with vertices and edges prior to calling this. +// +// Wait will return as soon as all currently known vertices are complete. +// If you plan on calling Update with more vertices in the future, you +// should not call Wait until after this is done. +func (w *Walker) Wait() error { + // Wait for completion + w.wait.Wait() + + // Grab the error lock + w.errLock.Lock() + defer w.errLock.Unlock() + + // Build the error + var result error + for v, err := range w.errMap { + if err != nil && err != errWalkUpstream { + result = multierror.Append(result, fmt.Errorf( + "%s: %s", VertexName(v), err)) + } + } + + return result +} + +// Update updates the currently executing walk with the given graph. +// This will perform a diff of the vertices and edges and update the walker. +// Already completed vertices remain completed (including any errors during +// their execution). +// +// This returns immediately once the walker is updated; it does not wait +// for completion of the walk. +// +// Multiple Updates can be called in parallel. Update can be called at any +// time during a walk. +func (w *Walker) Update(g *AcyclicGraph) { + var v, e *Set + if g != nil { + v, e = g.vertices, g.edges + } + + // Grab the change lock so no more updates happen but also so that + // no new vertices are executed during this time since we may be + // removing them. + w.changeLock.Lock() + defer w.changeLock.Unlock() + + // Initialize fields + if w.vertexMap == nil { + w.vertexMap = make(map[Vertex]*walkerVertex) + } + + // Calculate all our sets + newEdges := e.Difference(&w.edges) + oldEdges := w.edges.Difference(e) + newVerts := v.Difference(&w.vertices) + oldVerts := w.vertices.Difference(v) + + // Add the new vertices + for _, raw := range newVerts.List() { + v := raw.(Vertex) + + // Add to the waitgroup so our walk is not done until everything finishes + w.wait.Add(1) + + // Add to our own set so we know about it already + log.Printf("[DEBUG] dag/walk: added new vertex: %q", VertexName(v)) + w.vertices.Add(raw) + + // Initialize the vertex info + info := &walkerVertex{ + DoneCh: make(chan struct{}), + CancelCh: make(chan struct{}), + deps: make(map[Vertex]chan struct{}), + } + + // Add it to the map and kick off the walk + w.vertexMap[v] = info + } + + // Remove the old vertices + for _, raw := range oldVerts.List() { + v := raw.(Vertex) + + // Get the vertex info so we can cancel it + info, ok := w.vertexMap[v] + if !ok { + // This vertex for some reason was never in our map. This + // shouldn't be possible. + continue + } + + // Cancel the vertex + close(info.CancelCh) + + // Delete it out of the map + delete(w.vertexMap, v) + + log.Printf("[DEBUG] dag/walk: removed vertex: %q", VertexName(v)) + w.vertices.Delete(raw) + } + + // Add the new edges + var changedDeps Set + for _, raw := range newEdges.List() { + edge := raw.(Edge) + waiter, dep := w.edgeParts(edge) + + // Get the info for the waiter + waiterInfo, ok := w.vertexMap[waiter] + if !ok { + // Vertex doesn't exist... shouldn't be possible but ignore. + continue + } + + // Get the info for the dep + depInfo, ok := w.vertexMap[dep] + if !ok { + // Vertex doesn't exist... shouldn't be possible but ignore. + continue + } + + // Add the dependency to our waiter + waiterInfo.deps[dep] = depInfo.DoneCh + + // Record that the deps changed for this waiter + changedDeps.Add(waiter) + + log.Printf( + "[DEBUG] dag/walk: added edge: %q waiting on %q", + VertexName(waiter), VertexName(dep)) + w.edges.Add(raw) + } + + // Process reoved edges + for _, raw := range oldEdges.List() { + edge := raw.(Edge) + waiter, dep := w.edgeParts(edge) + + // Get the info for the waiter + waiterInfo, ok := w.vertexMap[waiter] + if !ok { + // Vertex doesn't exist... shouldn't be possible but ignore. + continue + } + + // Delete the dependency from the waiter + delete(waiterInfo.deps, dep) + + // Record that the deps changed for this waiter + changedDeps.Add(waiter) + + log.Printf( + "[DEBUG] dag/walk: removed edge: %q waiting on %q", + VertexName(waiter), VertexName(dep)) + w.edges.Delete(raw) + } + + // For each vertex with changed dependencies, we need to kick off + // a new waiter and notify the vertex of the changes. + for _, raw := range changedDeps.List() { + v := raw.(Vertex) + info, ok := w.vertexMap[v] + if !ok { + // Vertex doesn't exist... shouldn't be possible but ignore. + continue + } + + // Create a new done channel + doneCh := make(chan bool, 1) + + // Create the channel we close for cancellation + cancelCh := make(chan struct{}) + + // Build a new deps copy + deps := make(map[Vertex]<-chan struct{}) + for k, v := range info.deps { + deps[k] = v + } + + // Update the update channel + info.DepsLock.Lock() + if info.DepsUpdateCh != nil { + close(info.DepsUpdateCh) + } + info.DepsCh = doneCh + info.DepsUpdateCh = make(chan struct{}) + info.DepsLock.Unlock() + + // Cancel the older waiter + if info.depsCancelCh != nil { + close(info.depsCancelCh) + } + info.depsCancelCh = cancelCh + + log.Printf( + "[DEBUG] dag/walk: dependencies changed for %q, sending new deps", + VertexName(v)) + + // Start the waiter + go w.waitDeps(v, deps, doneCh, cancelCh) + } + + // Start all the new vertices. We do this at the end so that all + // the edge waiters and changes are setup above. + for _, raw := range newVerts.List() { + v := raw.(Vertex) + go w.walkVertex(v, w.vertexMap[v]) + } +} + +// edgeParts returns the waiter and the dependency, in that order. +// The waiter is waiting on the dependency. +func (w *Walker) edgeParts(e Edge) (Vertex, Vertex) { + if w.Reverse { + return e.Source(), e.Target() + } + + return e.Target(), e.Source() +} + +// walkVertex walks a single vertex, waiting for any dependencies before +// executing the callback. +func (w *Walker) walkVertex(v Vertex, info *walkerVertex) { + // When we're done executing, lower the waitgroup count + defer w.wait.Done() + + // When we're done, always close our done channel + defer close(info.DoneCh) + + // Wait for our dependencies. We create a [closed] deps channel so + // that we can immediately fall through to load our actual DepsCh. + var depsSuccess bool + var depsUpdateCh chan struct{} + depsCh := make(chan bool, 1) + depsCh <- true + close(depsCh) + for { + select { + case <-info.CancelCh: + // Cancel + return + + case depsSuccess = <-depsCh: + // Deps complete! Mark as nil to trigger completion handling. + depsCh = nil + + case <-depsUpdateCh: + // New deps, reloop + } + + // Check if we have updated dependencies. This can happen if the + // dependencies were satisfied exactly prior to an Update occurring. + // In that case, we'd like to take into account new dependencies + // if possible. + info.DepsLock.Lock() + if info.DepsCh != nil { + depsCh = info.DepsCh + info.DepsCh = nil + } + if info.DepsUpdateCh != nil { + depsUpdateCh = info.DepsUpdateCh + } + info.DepsLock.Unlock() + + // If we still have no deps channel set, then we're done! + if depsCh == nil { + break + } + } + + // If we passed dependencies, we just want to check once more that + // we're not cancelled, since this can happen just as dependencies pass. + select { + case <-info.CancelCh: + // Cancelled during an update while dependencies completed. + return + default: + } + + // Run our callback or note that our upstream failed + var err error + if depsSuccess { + log.Printf("[DEBUG] dag/walk: walking %q", VertexName(v)) + err = w.Callback(v) + } else { + log.Printf("[DEBUG] dag/walk: upstream errored, not walking %q", VertexName(v)) + err = errWalkUpstream + } + + // Record the error + if err != nil { + w.errLock.Lock() + defer w.errLock.Unlock() + + if w.errMap == nil { + w.errMap = make(map[Vertex]error) + } + w.errMap[v] = err + } +} + +func (w *Walker) waitDeps( + v Vertex, + deps map[Vertex]<-chan struct{}, + doneCh chan<- bool, + cancelCh <-chan struct{}) { + // For each dependency given to us, wait for it to complete + for dep, depCh := range deps { + DepSatisfied: + for { + select { + case <-depCh: + // Dependency satisfied! + break DepSatisfied + + case <-cancelCh: + // Wait cancelled. Note that we didn't satisfy dependencies + // so that anything waiting on us also doesn't run. + doneCh <- false + return + + case <-time.After(time.Second * 5): + log.Printf("[DEBUG] dag/walk: vertex %q, waiting for: %q", + VertexName(v), VertexName(dep)) + } + } + } + + // Dependencies satisfied! We need to check if any errored + w.errLock.Lock() + defer w.errLock.Unlock() + for dep, _ := range deps { + if w.errMap[dep] != nil { + // One of our dependencies failed, so return false + doneCh <- false + return + } + } + + // All dependencies satisfied and successful + doneCh <- true +} diff --git a/vendor/github.com/hashicorp/terraform/flatmap/expand.go b/vendor/github.com/hashicorp/terraform/flatmap/expand.go new file mode 100644 index 0000000000..e0b81b6410 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/flatmap/expand.go @@ -0,0 +1,147 @@ +package flatmap + +import ( + "fmt" + "sort" + "strconv" + "strings" + + "github.com/hashicorp/hil" +) + +// Expand takes a map and a key (prefix) and expands that value into +// a more complex structure. This is the reverse of the Flatten operation. +func Expand(m map[string]string, key string) interface{} { + // If the key is exactly a key in the map, just return it + if v, ok := m[key]; ok { + if v == "true" { + return true + } else if v == "false" { + return false + } + + return v + } + + // Check if the key is an array, and if so, expand the array + if v, ok := m[key+".#"]; ok { + // If the count of the key is unknown, then just put the unknown + // value in the value itself. This will be detected by Terraform + // core later. + if v == hil.UnknownValue { + return v + } + + return expandArray(m, key) + } + + // Check if this is a prefix in the map + prefix := key + "." + for k := range m { + if strings.HasPrefix(k, prefix) { + return expandMap(m, prefix) + } + } + + return nil +} + +func expandArray(m map[string]string, prefix string) []interface{} { + num, err := strconv.ParseInt(m[prefix+".#"], 0, 0) + if err != nil { + panic(err) + } + + // If the number of elements in this array is 0, then return an + // empty slice as there is nothing to expand. Trying to expand it + // anyway could lead to crashes as any child maps, arrays or sets + // that no longer exist are still shown as empty with a count of 0. + if num == 0 { + return []interface{}{} + } + + // The Schema "Set" type stores its values in an array format, but + // using numeric hash values instead of ordinal keys. Take the set + // of keys regardless of value, and expand them in numeric order. + // See GH-11042 for more details. + keySet := map[int]bool{} + computed := map[string]bool{} + for k := range m { + if !strings.HasPrefix(k, prefix+".") { + continue + } + + key := k[len(prefix)+1:] + idx := strings.Index(key, ".") + if idx != -1 { + key = key[:idx] + } + + // skip the count value + if key == "#" { + continue + } + + // strip the computed flag if there is one + if strings.HasPrefix(key, "~") { + key = key[1:] + computed[key] = true + } + + k, err := strconv.Atoi(key) + if err != nil { + panic(err) + } + keySet[int(k)] = true + } + + keysList := make([]int, 0, num) + for key := range keySet { + keysList = append(keysList, key) + } + sort.Ints(keysList) + + result := make([]interface{}, num) + for i, key := range keysList { + keyString := strconv.Itoa(key) + if computed[keyString] { + keyString = "~" + keyString + } + result[i] = Expand(m, fmt.Sprintf("%s.%s", prefix, keyString)) + } + + return result +} + +func expandMap(m map[string]string, prefix string) map[string]interface{} { + // Submaps may not have a '%' key, so we can't count on this value being + // here. If we don't have a count, just proceed as if we have have a map. + if count, ok := m[prefix+"%"]; ok && count == "0" { + return map[string]interface{}{} + } + + result := make(map[string]interface{}) + for k := range m { + if !strings.HasPrefix(k, prefix) { + continue + } + + key := k[len(prefix):] + idx := strings.Index(key, ".") + if idx != -1 { + key = key[:idx] + } + if _, ok := result[key]; ok { + continue + } + + // skip the map count value + if key == "%" { + continue + } + + result[key] = Expand(m, k[:len(prefix)+len(key)]) + } + + return result +} diff --git a/vendor/github.com/hashicorp/terraform/flatmap/flatten.go b/vendor/github.com/hashicorp/terraform/flatmap/flatten.go new file mode 100644 index 0000000000..9ff6e42652 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/flatmap/flatten.go @@ -0,0 +1,71 @@ +package flatmap + +import ( + "fmt" + "reflect" +) + +// Flatten takes a structure and turns into a flat map[string]string. +// +// Within the "thing" parameter, only primitive values are allowed. Structs are +// not supported. Therefore, it can only be slices, maps, primitives, and +// any combination of those together. +// +// See the tests for examples of what inputs are turned into. +func Flatten(thing map[string]interface{}) Map { + result := make(map[string]string) + + for k, raw := range thing { + flatten(result, k, reflect.ValueOf(raw)) + } + + return Map(result) +} + +func flatten(result map[string]string, prefix string, v reflect.Value) { + if v.Kind() == reflect.Interface { + v = v.Elem() + } + + switch v.Kind() { + case reflect.Bool: + if v.Bool() { + result[prefix] = "true" + } else { + result[prefix] = "false" + } + case reflect.Int: + result[prefix] = fmt.Sprintf("%d", v.Int()) + case reflect.Map: + flattenMap(result, prefix, v) + case reflect.Slice: + flattenSlice(result, prefix, v) + case reflect.String: + result[prefix] = v.String() + default: + panic(fmt.Sprintf("Unknown: %s", v)) + } +} + +func flattenMap(result map[string]string, prefix string, v reflect.Value) { + for _, k := range v.MapKeys() { + if k.Kind() == reflect.Interface { + k = k.Elem() + } + + if k.Kind() != reflect.String { + panic(fmt.Sprintf("%s: map key is not string: %s", prefix, k)) + } + + flatten(result, fmt.Sprintf("%s.%s", prefix, k.String()), v.MapIndex(k)) + } +} + +func flattenSlice(result map[string]string, prefix string, v reflect.Value) { + prefix = prefix + "." + + result[prefix+"#"] = fmt.Sprintf("%d", v.Len()) + for i := 0; i < v.Len(); i++ { + flatten(result, fmt.Sprintf("%s%d", prefix, i), v.Index(i)) + } +} diff --git a/vendor/github.com/hashicorp/terraform/flatmap/map.go b/vendor/github.com/hashicorp/terraform/flatmap/map.go new file mode 100644 index 0000000000..46b72c4014 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/flatmap/map.go @@ -0,0 +1,82 @@ +package flatmap + +import ( + "strings" +) + +// Map is a wrapper around map[string]string that provides some helpers +// above it that assume the map is in the format that flatmap expects +// (the result of Flatten). +// +// All modifying functions such as Delete are done in-place unless +// otherwise noted. +type Map map[string]string + +// Contains returns true if the map contains the given key. +func (m Map) Contains(key string) bool { + for _, k := range m.Keys() { + if k == key { + return true + } + } + + return false +} + +// Delete deletes a key out of the map with the given prefix. +func (m Map) Delete(prefix string) { + for k, _ := range m { + match := k == prefix + if !match { + if !strings.HasPrefix(k, prefix) { + continue + } + + if k[len(prefix):len(prefix)+1] != "." { + continue + } + } + + delete(m, k) + } +} + +// Keys returns all of the top-level keys in this map +func (m Map) Keys() []string { + ks := make(map[string]struct{}) + for k, _ := range m { + idx := strings.Index(k, ".") + if idx == -1 { + idx = len(k) + } + + ks[k[:idx]] = struct{}{} + } + + result := make([]string, 0, len(ks)) + for k, _ := range ks { + result = append(result, k) + } + + return result +} + +// Merge merges the contents of the other Map into this one. +// +// This merge is smarter than a simple map iteration because it +// will fully replace arrays and other complex structures that +// are present in this map with the other map's. For example, if +// this map has a 3 element "foo" list, and m2 has a 2 element "foo" +// list, then the result will be that m has a 2 element "foo" +// list. +func (m Map) Merge(m2 Map) { + for _, prefix := range m2.Keys() { + m.Delete(prefix) + + for k, v := range m2 { + if strings.HasPrefix(k, prefix) { + m[k] = v + } + } + } +} diff --git a/vendor/github.com/hashicorp/terraform/helper/acctest/acctest.go b/vendor/github.com/hashicorp/terraform/helper/acctest/acctest.go new file mode 100644 index 0000000000..9d31031a47 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/helper/acctest/acctest.go @@ -0,0 +1,2 @@ +// Package acctest contains for Terraform Acceptance Tests +package acctest diff --git a/vendor/github.com/hashicorp/terraform/helper/acctest/random.go b/vendor/github.com/hashicorp/terraform/helper/acctest/random.go new file mode 100644 index 0000000000..76fbf99ded --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/helper/acctest/random.go @@ -0,0 +1,142 @@ +package acctest + +import ( + "bytes" + crand "crypto/rand" + "crypto/rsa" + "crypto/x509" + "crypto/x509/pkix" + "encoding/pem" + "fmt" + "math/big" + "math/rand" + "strings" + "time" + + "golang.org/x/crypto/ssh" +) + +// Helpers for generating random tidbits for use in identifiers to prevent +// collisions in acceptance tests. + +// RandInt generates a random integer +func RandInt() int { + reseed() + return rand.New(rand.NewSource(time.Now().UnixNano())).Int() +} + +// RandomWithPrefix is used to generate a unique name with a prefix, for +// randomizing names in acceptance tests +func RandomWithPrefix(name string) string { + reseed() + return fmt.Sprintf("%s-%d", name, rand.New(rand.NewSource(time.Now().UnixNano())).Int()) +} + +func RandIntRange(min int, max int) int { + reseed() + source := rand.New(rand.NewSource(time.Now().UnixNano())) + rangeMax := max - min + + return int(source.Int31n(int32(rangeMax))) +} + +// RandString generates a random alphanumeric string of the length specified +func RandString(strlen int) string { + return RandStringFromCharSet(strlen, CharSetAlphaNum) +} + +// RandStringFromCharSet generates a random string by selecting characters from +// the charset provided +func RandStringFromCharSet(strlen int, charSet string) string { + reseed() + result := make([]byte, strlen) + for i := 0; i < strlen; i++ { + result[i] = charSet[rand.Intn(len(charSet))] + } + return string(result) +} + +// RandSSHKeyPair generates a public and private SSH key pair. The public key is +// returned in OpenSSH format, and the private key is PEM encoded. +func RandSSHKeyPair(comment string) (string, string, error) { + privateKey, privateKeyPEM, err := genPrivateKey() + + publicKey, err := ssh.NewPublicKey(&privateKey.PublicKey) + if err != nil { + return "", "", err + } + keyMaterial := strings.TrimSpace(string(ssh.MarshalAuthorizedKey(publicKey))) + return fmt.Sprintf("%s %s", keyMaterial, comment), privateKeyPEM, nil +} + +// RandTLSCert generates a self-signed TLS certificate with a newly created +// private key, and returns both the cert and the private key PEM encoded. +func RandTLSCert(orgName string) (string, string, error) { + template := &x509.Certificate{ + SerialNumber: big.NewInt(int64(RandInt())), + Subject: pkix.Name{ + Organization: []string{orgName}, + }, + NotBefore: time.Now(), + NotAfter: time.Now().Add(24 * time.Hour), + KeyUsage: x509.KeyUsageKeyEncipherment | x509.KeyUsageDigitalSignature, + ExtKeyUsage: []x509.ExtKeyUsage{x509.ExtKeyUsageServerAuth}, + BasicConstraintsValid: true, + } + + privateKey, privateKeyPEM, err := genPrivateKey() + if err != nil { + return "", "", err + } + + cert, err := x509.CreateCertificate(crand.Reader, template, template, &privateKey.PublicKey, privateKey) + if err != nil { + return "", "", err + } + + certPEM, err := pemEncode(cert, "CERTIFICATE") + if err != nil { + return "", "", err + } + + return certPEM, privateKeyPEM, nil +} + +func genPrivateKey() (*rsa.PrivateKey, string, error) { + privateKey, err := rsa.GenerateKey(crand.Reader, 1024) + if err != nil { + return nil, "", err + } + + privateKeyPEM, err := pemEncode(x509.MarshalPKCS1PrivateKey(privateKey), "RSA PRIVATE KEY") + if err != nil { + return nil, "", err + } + + return privateKey, privateKeyPEM, nil +} + +func pemEncode(b []byte, block string) (string, error) { + var buf bytes.Buffer + pb := &pem.Block{Type: block, Bytes: b} + if err := pem.Encode(&buf, pb); err != nil { + return "", err + } + + return buf.String(), nil +} + +// Seeds random with current timestamp +func reseed() { + rand.Seed(time.Now().UTC().UnixNano()) +} + +const ( + // CharSetAlphaNum is the alphanumeric character set for use with + // RandStringFromCharSet + CharSetAlphaNum = "abcdefghijklmnopqrstuvwxyz012346789" + + // CharSetAlpha is the alphabetical character set for use with + // RandStringFromCharSet + CharSetAlpha = "abcdefghijklmnopqrstuvwxyz" +) diff --git a/vendor/github.com/hashicorp/terraform/helper/acctest/remotetests.go b/vendor/github.com/hashicorp/terraform/helper/acctest/remotetests.go new file mode 100644 index 0000000000..87c60b8be4 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/helper/acctest/remotetests.go @@ -0,0 +1,27 @@ +package acctest + +import ( + "net/http" + "os" + "testing" +) + +// SkipRemoteTestsEnvVar is an environment variable that can be set by a user +// running the tests in an environment with limited network connectivity. By +// default, tests requiring internet connectivity make an effort to skip if no +// internet is available, but in some cases the smoke test will pass even +// though the test should still be skipped. +const SkipRemoteTestsEnvVar = "TF_SKIP_REMOTE_TESTS" + +// RemoteTestPrecheck is meant to be run by any unit test that requires +// outbound internet connectivity. The test will be skipped if it's +// unavailable. +func RemoteTestPrecheck(t *testing.T) { + if os.Getenv(SkipRemoteTestsEnvVar) != "" { + t.Skipf("skipping test, %s was set", SkipRemoteTestsEnvVar) + } + + if _, err := http.Get("http://google.com"); err != nil { + t.Skipf("skipping, internet seems to not be available: %s", err) + } +} diff --git a/vendor/github.com/hashicorp/terraform/helper/config/decode.go b/vendor/github.com/hashicorp/terraform/helper/config/decode.go new file mode 100644 index 0000000000..f470c9b4be --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/helper/config/decode.go @@ -0,0 +1,28 @@ +package config + +import ( + "github.com/mitchellh/mapstructure" +) + +func Decode(target interface{}, raws ...interface{}) (*mapstructure.Metadata, error) { + var md mapstructure.Metadata + decoderConfig := &mapstructure.DecoderConfig{ + Metadata: &md, + Result: target, + WeaklyTypedInput: true, + } + + decoder, err := mapstructure.NewDecoder(decoderConfig) + if err != nil { + return nil, err + } + + for _, raw := range raws { + err := decoder.Decode(raw) + if err != nil { + return nil, err + } + } + + return &md, nil +} diff --git a/vendor/github.com/hashicorp/terraform/helper/config/validator.go b/vendor/github.com/hashicorp/terraform/helper/config/validator.go new file mode 100644 index 0000000000..1a6e023b60 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/helper/config/validator.go @@ -0,0 +1,214 @@ +package config + +import ( + "fmt" + "strconv" + "strings" + + "github.com/hashicorp/terraform/flatmap" + "github.com/hashicorp/terraform/terraform" +) + +// Validator is a helper that helps you validate the configuration +// of your resource, resource provider, etc. +// +// At the most basic level, set the Required and Optional lists to be +// specifiers of keys that are required or optional. If a key shows up +// that isn't in one of these two lists, then an error is generated. +// +// The "specifiers" allowed in this is a fairly rich syntax to help +// describe the format of your configuration: +// +// * Basic keys are just strings. For example: "foo" will match the +// "foo" key. +// +// * Nested structure keys can be matched by doing +// "listener.*.foo". This will verify that there is at least one +// listener element that has the "foo" key set. +// +// * The existence of a nested structure can be checked by simply +// doing "listener.*" which will verify that there is at least +// one element in the "listener" structure. This is NOT +// validating that "listener" is an array. It is validating +// that it is a nested structure in the configuration. +// +type Validator struct { + Required []string + Optional []string +} + +func (v *Validator) Validate( + c *terraform.ResourceConfig) (ws []string, es []error) { + // Flatten the configuration so it is easier to reason about + flat := flatmap.Flatten(c.Raw) + + keySet := make(map[string]validatorKey) + for i, vs := range [][]string{v.Required, v.Optional} { + req := i == 0 + for _, k := range vs { + vk, err := newValidatorKey(k, req) + if err != nil { + es = append(es, err) + continue + } + + keySet[k] = vk + } + } + + purged := make([]string, 0) + for _, kv := range keySet { + p, w, e := kv.Validate(flat) + if len(w) > 0 { + ws = append(ws, w...) + } + if len(e) > 0 { + es = append(es, e...) + } + + purged = append(purged, p...) + } + + // Delete all the keys we processed in order to find + // the unknown keys. + for _, p := range purged { + delete(flat, p) + } + + // The rest are unknown + for k, _ := range flat { + es = append(es, fmt.Errorf("Unknown configuration: %s", k)) + } + + return +} + +type validatorKey interface { + // Validate validates the given configuration and returns viewed keys, + // warnings, and errors. + Validate(map[string]string) ([]string, []string, []error) +} + +func newValidatorKey(k string, req bool) (validatorKey, error) { + var result validatorKey + + parts := strings.Split(k, ".") + if len(parts) > 1 && parts[1] == "*" { + result = &nestedValidatorKey{ + Parts: parts, + Required: req, + } + } else { + result = &basicValidatorKey{ + Key: k, + Required: req, + } + } + + return result, nil +} + +// basicValidatorKey validates keys that are basic such as "foo" +type basicValidatorKey struct { + Key string + Required bool +} + +func (v *basicValidatorKey) Validate( + m map[string]string) ([]string, []string, []error) { + for k, _ := range m { + // If we have the exact key its a match + if k == v.Key { + return []string{k}, nil, nil + } + } + + if !v.Required { + return nil, nil, nil + } + + return nil, nil, []error{fmt.Errorf( + "Key not found: %s", v.Key)} +} + +type nestedValidatorKey struct { + Parts []string + Required bool +} + +func (v *nestedValidatorKey) validate( + m map[string]string, + prefix string, + offset int) ([]string, []string, []error) { + if offset >= len(v.Parts) { + // We're at the end. Look for a specific key. + v2 := &basicValidatorKey{Key: prefix, Required: v.Required} + return v2.Validate(m) + } + + current := v.Parts[offset] + + // If we're at offset 0, special case to start at the next one. + if offset == 0 { + return v.validate(m, current, offset+1) + } + + // Determine if we're doing a "for all" or a specific key + if current != "*" { + // We're looking at a specific key, continue on. + return v.validate(m, prefix+"."+current, offset+1) + } + + // We're doing a "for all", so we loop over. + countStr, ok := m[prefix+".#"] + if !ok { + if !v.Required { + // It wasn't required, so its no problem. + return nil, nil, nil + } + + return nil, nil, []error{fmt.Errorf( + "Key not found: %s", prefix)} + } + + count, err := strconv.ParseInt(countStr, 0, 0) + if err != nil { + // This shouldn't happen if flatmap works properly + panic("invalid flatmap array") + } + + var e []error + var w []string + u := make([]string, 1, count+1) + u[0] = prefix + ".#" + for i := 0; i < int(count); i++ { + prefix := fmt.Sprintf("%s.%d", prefix, i) + + // Mark that we saw this specific key + u = append(u, prefix) + + // Mark all prefixes of this + for k, _ := range m { + if !strings.HasPrefix(k, prefix+".") { + continue + } + u = append(u, k) + } + + // If we have more parts, then validate deeper + if offset+1 < len(v.Parts) { + u2, w2, e2 := v.validate(m, prefix, offset+1) + + u = append(u, u2...) + w = append(w, w2...) + e = append(e, e2...) + } + } + + return u, w, e +} + +func (v *nestedValidatorKey) Validate( + m map[string]string) ([]string, []string, []error) { + return v.validate(m, "", 0) +} diff --git a/vendor/github.com/hashicorp/terraform/helper/experiment/experiment.go b/vendor/github.com/hashicorp/terraform/helper/experiment/experiment.go new file mode 100644 index 0000000000..18b8837cc5 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/helper/experiment/experiment.go @@ -0,0 +1,154 @@ +// experiment package contains helper functions for tracking experimental +// features throughout Terraform. +// +// This package should be used for creating, enabling, querying, and deleting +// experimental features. By unifying all of that onto a single interface, +// we can have the Go compiler help us by enforcing every place we touch +// an experimental feature. +// +// To create a new experiment: +// +// 1. Add the experiment to the global vars list below, prefixed with X_ +// +// 2. Add the experiment variable to the All listin the init() function +// +// 3. Use it! +// +// To remove an experiment: +// +// 1. Delete the experiment global var. +// +// 2. Try to compile and fix all the places where the var was referenced. +// +// To use an experiment: +// +// 1. Use Flag() if you want the experiment to be available from the CLI. +// +// 2. Use Enabled() to check whether it is enabled. +// +// As a general user: +// +// 1. The `-Xexperiment-name` flag +// 2. The `TF_X_` env var. +// 3. The `TF_X_FORCE` env var can be set to force an experimental feature +// without human verifications. +// +package experiment + +import ( + "flag" + "fmt" + "os" + "strconv" + "strings" + "sync" +) + +// The experiments that are available are listed below. Any package in +// Terraform defining an experiment should define the experiments below. +// By keeping them all within the experiment package we force a single point +// of definition and use. This allows the compiler to enforce references +// so it becomes easy to remove the features. +var ( + // Shadow graph. This is already on by default. Disabling it will be + // allowed for awhile in order for it to not block operations. + X_shadow = newBasicID("shadow", "SHADOW", false) +) + +// Global variables this package uses because we are a package +// with global state. +var ( + // all is the list of all experiements. Do not modify this. + All []ID + + // enabled keeps track of what flags have been enabled + enabled map[string]bool + enabledLock sync.Mutex + + // Hidden "experiment" that forces all others to be on without verification + x_force = newBasicID("force", "FORCE", false) +) + +func init() { + // The list of all experiments, update this when an experiment is added. + All = []ID{ + X_shadow, + x_force, + } + + // Load + reload() +} + +// reload is used by tests to reload the global state. This is called by +// init publicly. +func reload() { + // Initialize + enabledLock.Lock() + enabled = make(map[string]bool) + enabledLock.Unlock() + + // Set defaults and check env vars + for _, id := range All { + // Get the default value + def := id.Default() + + // If we set it in the env var, default it to true + key := fmt.Sprintf("TF_X_%s", strings.ToUpper(id.Env())) + if v := os.Getenv(key); v != "" { + def = v != "0" + } + + // Set the default + SetEnabled(id, def) + } +} + +// Enabled returns whether an experiment has been enabled or not. +func Enabled(id ID) bool { + enabledLock.Lock() + defer enabledLock.Unlock() + return enabled[id.Flag()] +} + +// SetEnabled sets an experiment to enabled/disabled. Please check with +// the experiment docs for when calling this actually affects the experiment. +func SetEnabled(id ID, v bool) { + enabledLock.Lock() + defer enabledLock.Unlock() + enabled[id.Flag()] = v +} + +// Force returns true if the -Xforce of TF_X_FORCE flag is present, which +// advises users of this package to not verify with the user that they want +// experimental behavior and to just continue with it. +func Force() bool { + return Enabled(x_force) +} + +// Flag configures the given FlagSet with the flags to configure +// all active experiments. +func Flag(fs *flag.FlagSet) { + for _, id := range All { + desc := id.Flag() + key := fmt.Sprintf("X%s", id.Flag()) + fs.Var(&idValue{X: id}, key, desc) + } +} + +// idValue implements flag.Value for setting the enabled/disabled state +// of an experiment from the CLI. +type idValue struct { + X ID +} + +func (v *idValue) IsBoolFlag() bool { return true } +func (v *idValue) String() string { return strconv.FormatBool(Enabled(v.X)) } +func (v *idValue) Set(raw string) error { + b, err := strconv.ParseBool(raw) + if err == nil { + SetEnabled(v.X, b) + } + + return err +} diff --git a/vendor/github.com/hashicorp/terraform/helper/experiment/id.go b/vendor/github.com/hashicorp/terraform/helper/experiment/id.go new file mode 100644 index 0000000000..8e2f707322 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/helper/experiment/id.go @@ -0,0 +1,34 @@ +package experiment + +// ID represents an experimental feature. +// +// The global vars defined on this package should be used as ID values. +// This interface is purposely not implement-able outside of this package +// so that we can rely on the Go compiler to enforce all experiment references. +type ID interface { + Env() string + Flag() string + Default() bool + + unexported() // So the ID can't be implemented externally. +} + +// basicID implements ID. +type basicID struct { + EnvValue string + FlagValue string + DefaultValue bool +} + +func newBasicID(flag, env string, def bool) ID { + return &basicID{ + EnvValue: env, + FlagValue: flag, + DefaultValue: def, + } +} + +func (id *basicID) Env() string { return id.EnvValue } +func (id *basicID) Flag() string { return id.FlagValue } +func (id *basicID) Default() bool { return id.DefaultValue } +func (id *basicID) unexported() {} diff --git a/vendor/github.com/hashicorp/terraform/helper/hashcode/hashcode.go b/vendor/github.com/hashicorp/terraform/helper/hashcode/hashcode.go new file mode 100644 index 0000000000..64d8263e60 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/helper/hashcode/hashcode.go @@ -0,0 +1,22 @@ +package hashcode + +import ( + "hash/crc32" +) + +// String hashes a string to a unique hashcode. +// +// crc32 returns a uint32, but for our use we need +// and non negative integer. Here we cast to an integer +// and invert it if the result is negative. +func String(s string) int { + v := int(crc32.ChecksumIEEE([]byte(s))) + if v >= 0 { + return v + } + if -v >= 0 { + return -v + } + // v == MinInt + return 0 +} diff --git a/vendor/github.com/hashicorp/terraform/helper/hilmapstructure/hilmapstructure.go b/vendor/github.com/hashicorp/terraform/helper/hilmapstructure/hilmapstructure.go new file mode 100644 index 0000000000..67be1df1f9 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/helper/hilmapstructure/hilmapstructure.go @@ -0,0 +1,41 @@ +package hilmapstructure + +import ( + "fmt" + "reflect" + + "github.com/mitchellh/mapstructure" +) + +var hilMapstructureDecodeHookEmptySlice []interface{} +var hilMapstructureDecodeHookStringSlice []string +var hilMapstructureDecodeHookEmptyMap map[string]interface{} + +// WeakDecode behaves in the same way as mapstructure.WeakDecode but has a +// DecodeHook which defeats the backward compatibility mode of mapstructure +// which WeakDecodes []interface{}{} into an empty map[string]interface{}. This +// allows us to use WeakDecode (desirable), but not fail on empty lists. +func WeakDecode(m interface{}, rawVal interface{}) error { + config := &mapstructure.DecoderConfig{ + DecodeHook: func(source reflect.Type, target reflect.Type, val interface{}) (interface{}, error) { + sliceType := reflect.TypeOf(hilMapstructureDecodeHookEmptySlice) + stringSliceType := reflect.TypeOf(hilMapstructureDecodeHookStringSlice) + mapType := reflect.TypeOf(hilMapstructureDecodeHookEmptyMap) + + if (source == sliceType || source == stringSliceType) && target == mapType { + return nil, fmt.Errorf("Cannot convert a []interface{} into a map[string]interface{}") + } + + return val, nil + }, + WeaklyTypedInput: true, + Result: rawVal, + } + + decoder, err := mapstructure.NewDecoder(config) + if err != nil { + return err + } + + return decoder.Decode(m) +} diff --git a/vendor/github.com/hashicorp/terraform/helper/logging/logging.go b/vendor/github.com/hashicorp/terraform/helper/logging/logging.go new file mode 100644 index 0000000000..433cd77d3b --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/helper/logging/logging.go @@ -0,0 +1,100 @@ +package logging + +import ( + "io" + "io/ioutil" + "log" + "os" + "strings" + "syscall" + + "github.com/hashicorp/logutils" +) + +// These are the environmental variables that determine if we log, and if +// we log whether or not the log should go to a file. +const ( + EnvLog = "TF_LOG" // Set to True + EnvLogFile = "TF_LOG_PATH" // Set to a file +) + +var validLevels = []logutils.LogLevel{"TRACE", "DEBUG", "INFO", "WARN", "ERROR"} + +// LogOutput determines where we should send logs (if anywhere) and the log level. +func LogOutput() (logOutput io.Writer, err error) { + logOutput = ioutil.Discard + + logLevel := LogLevel() + if logLevel == "" { + return + } + + logOutput = os.Stderr + if logPath := os.Getenv(EnvLogFile); logPath != "" { + var err error + logOutput, err = os.OpenFile(logPath, syscall.O_CREAT|syscall.O_RDWR|syscall.O_APPEND, 0666) + if err != nil { + return nil, err + } + } + + // This was the default since the beginning + logOutput = &logutils.LevelFilter{ + Levels: validLevels, + MinLevel: logutils.LogLevel(logLevel), + Writer: logOutput, + } + + return +} + +// SetOutput checks for a log destination with LogOutput, and calls +// log.SetOutput with the result. If LogOutput returns nil, SetOutput uses +// ioutil.Discard. Any error from LogOutout is fatal. +func SetOutput() { + out, err := LogOutput() + if err != nil { + log.Fatal(err) + } + + if out == nil { + out = ioutil.Discard + } + + log.SetOutput(out) +} + +// LogLevel returns the current log level string based the environment vars +func LogLevel() string { + envLevel := os.Getenv(EnvLog) + if envLevel == "" { + return "" + } + + logLevel := "TRACE" + if isValidLogLevel(envLevel) { + // allow following for better ux: info, Info or INFO + logLevel = strings.ToUpper(envLevel) + } else { + log.Printf("[WARN] Invalid log level: %q. Defaulting to level: TRACE. Valid levels are: %+v", + envLevel, validLevels) + } + + return logLevel +} + +// IsDebugOrHigher returns whether or not the current log level is debug or trace +func IsDebugOrHigher() bool { + level := string(LogLevel()) + return level == "DEBUG" || level == "TRACE" +} + +func isValidLogLevel(level string) bool { + for _, l := range validLevels { + if strings.ToUpper(level) == string(l) { + return true + } + } + + return false +} diff --git a/vendor/github.com/hashicorp/terraform/helper/logging/transport.go b/vendor/github.com/hashicorp/terraform/helper/logging/transport.go new file mode 100644 index 0000000000..4477924879 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/helper/logging/transport.go @@ -0,0 +1,53 @@ +package logging + +import ( + "log" + "net/http" + "net/http/httputil" +) + +type transport struct { + name string + transport http.RoundTripper +} + +func (t *transport) RoundTrip(req *http.Request) (*http.Response, error) { + if IsDebugOrHigher() { + reqData, err := httputil.DumpRequestOut(req, true) + if err == nil { + log.Printf("[DEBUG] "+logReqMsg, t.name, string(reqData)) + } else { + log.Printf("[ERROR] %s API Request error: %#v", t.name, err) + } + } + + resp, err := t.transport.RoundTrip(req) + if err != nil { + return resp, err + } + + if IsDebugOrHigher() { + respData, err := httputil.DumpResponse(resp, true) + if err == nil { + log.Printf("[DEBUG] "+logRespMsg, t.name, string(respData)) + } else { + log.Printf("[ERROR] %s API Response error: %#v", t.name, err) + } + } + + return resp, nil +} + +func NewTransport(name string, t http.RoundTripper) *transport { + return &transport{name, t} +} + +const logReqMsg = `%s API Request Details: +---[ REQUEST ]--------------------------------------- +%s +-----------------------------------------------------` + +const logRespMsg = `%s API Response Details: +---[ RESPONSE ]-------------------------------------- +%s +-----------------------------------------------------` diff --git a/vendor/github.com/hashicorp/terraform/helper/resource/error.go b/vendor/github.com/hashicorp/terraform/helper/resource/error.go new file mode 100644 index 0000000000..7ee21614b9 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/helper/resource/error.go @@ -0,0 +1,79 @@ +package resource + +import ( + "fmt" + "strings" + "time" +) + +type NotFoundError struct { + LastError error + LastRequest interface{} + LastResponse interface{} + Message string + Retries int +} + +func (e *NotFoundError) Error() string { + if e.Message != "" { + return e.Message + } + + if e.Retries > 0 { + return fmt.Sprintf("couldn't find resource (%d retries)", e.Retries) + } + + return "couldn't find resource" +} + +// UnexpectedStateError is returned when Refresh returns a state that's neither in Target nor Pending +type UnexpectedStateError struct { + LastError error + State string + ExpectedState []string +} + +func (e *UnexpectedStateError) Error() string { + return fmt.Sprintf( + "unexpected state '%s', wanted target '%s'. last error: %s", + e.State, + strings.Join(e.ExpectedState, ", "), + e.LastError, + ) +} + +// TimeoutError is returned when WaitForState times out +type TimeoutError struct { + LastError error + LastState string + Timeout time.Duration + ExpectedState []string +} + +func (e *TimeoutError) Error() string { + expectedState := "resource to be gone" + if len(e.ExpectedState) > 0 { + expectedState = fmt.Sprintf("state to become '%s'", strings.Join(e.ExpectedState, ", ")) + } + + extraInfo := make([]string, 0) + if e.LastState != "" { + extraInfo = append(extraInfo, fmt.Sprintf("last state: '%s'", e.LastState)) + } + if e.Timeout > 0 { + extraInfo = append(extraInfo, fmt.Sprintf("timeout: %s", e.Timeout.String())) + } + + suffix := "" + if len(extraInfo) > 0 { + suffix = fmt.Sprintf(" (%s)", strings.Join(extraInfo, ", ")) + } + + if e.LastError != nil { + return fmt.Sprintf("timeout while waiting for %s%s: %s", + expectedState, suffix, e.LastError) + } + + return fmt.Sprintf("timeout while waiting for %s%s", + expectedState, suffix) +} diff --git a/vendor/github.com/hashicorp/terraform/helper/resource/id.go b/vendor/github.com/hashicorp/terraform/helper/resource/id.go new file mode 100644 index 0000000000..629582b3a2 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/helper/resource/id.go @@ -0,0 +1,39 @@ +package resource + +import ( + "crypto/rand" + "fmt" + "math/big" + "sync" +) + +const UniqueIdPrefix = `terraform-` + +// idCounter is a randomly seeded monotonic counter for generating ordered +// unique ids. It uses a big.Int so we can easily increment a long numeric +// string. The max possible hex value here with 12 random bytes is +// "01000000000000000000000000", so there's no chance of rollover during +// operation. +var idMutex sync.Mutex +var idCounter = big.NewInt(0).SetBytes(randomBytes(12)) + +// Helper for a resource to generate a unique identifier w/ default prefix +func UniqueId() string { + return PrefixedUniqueId(UniqueIdPrefix) +} + +// Helper for a resource to generate a unique identifier w/ given prefix +// +// After the prefix, the ID consists of an incrementing 26 digit value (to match +// previous timestamp output). +func PrefixedUniqueId(prefix string) string { + idMutex.Lock() + defer idMutex.Unlock() + return fmt.Sprintf("%s%026x", prefix, idCounter.Add(idCounter, big.NewInt(1))) +} + +func randomBytes(n int) []byte { + b := make([]byte, n) + rand.Read(b) + return b +} diff --git a/vendor/github.com/hashicorp/terraform/helper/resource/map.go b/vendor/github.com/hashicorp/terraform/helper/resource/map.go new file mode 100644 index 0000000000..a465136f77 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/helper/resource/map.go @@ -0,0 +1,140 @@ +package resource + +import ( + "fmt" + "sort" + + "github.com/hashicorp/terraform/terraform" +) + +// Map is a map of resources that are supported, and provides helpers for +// more easily implementing a ResourceProvider. +type Map struct { + Mapping map[string]Resource +} + +func (m *Map) Validate( + t string, c *terraform.ResourceConfig) ([]string, []error) { + r, ok := m.Mapping[t] + if !ok { + return nil, []error{fmt.Errorf("Unknown resource type: %s", t)} + } + + // If there is no validator set, then it is valid + if r.ConfigValidator == nil { + return nil, nil + } + + return r.ConfigValidator.Validate(c) +} + +// Apply performs a create or update depending on the diff, and calls +// the proper function on the matching Resource. +func (m *Map) Apply( + info *terraform.InstanceInfo, + s *terraform.InstanceState, + d *terraform.InstanceDiff, + meta interface{}) (*terraform.InstanceState, error) { + r, ok := m.Mapping[info.Type] + if !ok { + return nil, fmt.Errorf("Unknown resource type: %s", info.Type) + } + + if d.Destroy || d.RequiresNew() { + if s.ID != "" { + // Destroy the resource if it is created + err := r.Destroy(s, meta) + if err != nil { + return s, err + } + + s.ID = "" + } + + // If we're only destroying, and not creating, then return now. + // Otherwise, we continue so that we can create a new resource. + if !d.RequiresNew() { + return nil, nil + } + } + + var result *terraform.InstanceState + var err error + if s.ID == "" { + result, err = r.Create(s, d, meta) + } else { + if r.Update == nil { + return s, fmt.Errorf( + "Resource type '%s' doesn't support update", + info.Type) + } + + result, err = r.Update(s, d, meta) + } + if result != nil { + if result.Attributes == nil { + result.Attributes = make(map[string]string) + } + + result.Attributes["id"] = result.ID + } + + return result, err +} + +// Diff performs a diff on the proper resource type. +func (m *Map) Diff( + info *terraform.InstanceInfo, + s *terraform.InstanceState, + c *terraform.ResourceConfig, + meta interface{}) (*terraform.InstanceDiff, error) { + r, ok := m.Mapping[info.Type] + if !ok { + return nil, fmt.Errorf("Unknown resource type: %s", info.Type) + } + + return r.Diff(s, c, meta) +} + +// Refresh performs a Refresh on the proper resource type. +// +// Refresh on the Resource won't be called if the state represents a +// non-created resource (ID is blank). +// +// An error is returned if the resource isn't registered. +func (m *Map) Refresh( + info *terraform.InstanceInfo, + s *terraform.InstanceState, + meta interface{}) (*terraform.InstanceState, error) { + // If the resource isn't created, don't refresh. + if s.ID == "" { + return s, nil + } + + r, ok := m.Mapping[info.Type] + if !ok { + return nil, fmt.Errorf("Unknown resource type: %s", info.Type) + } + + return r.Refresh(s, meta) +} + +// Resources returns all the resources that are supported by this +// resource map and can be used to satisfy the Resources method of +// a ResourceProvider. +func (m *Map) Resources() []terraform.ResourceType { + ks := make([]string, 0, len(m.Mapping)) + for k, _ := range m.Mapping { + ks = append(ks, k) + } + sort.Strings(ks) + + rs := make([]terraform.ResourceType, 0, len(m.Mapping)) + for _, k := range ks { + rs = append(rs, terraform.ResourceType{ + Name: k, + }) + } + + return rs +} diff --git a/vendor/github.com/hashicorp/terraform/helper/resource/resource.go b/vendor/github.com/hashicorp/terraform/helper/resource/resource.go new file mode 100644 index 0000000000..0d9c831a65 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/helper/resource/resource.go @@ -0,0 +1,49 @@ +package resource + +import ( + "github.com/hashicorp/terraform/helper/config" + "github.com/hashicorp/terraform/terraform" +) + +type Resource struct { + ConfigValidator *config.Validator + Create CreateFunc + Destroy DestroyFunc + Diff DiffFunc + Refresh RefreshFunc + Update UpdateFunc +} + +// CreateFunc is a function that creates a resource that didn't previously +// exist. +type CreateFunc func( + *terraform.InstanceState, + *terraform.InstanceDiff, + interface{}) (*terraform.InstanceState, error) + +// DestroyFunc is a function that destroys a resource that previously +// exists using the state. +type DestroyFunc func( + *terraform.InstanceState, + interface{}) error + +// DiffFunc is a function that performs a diff of a resource. +type DiffFunc func( + *terraform.InstanceState, + *terraform.ResourceConfig, + interface{}) (*terraform.InstanceDiff, error) + +// RefreshFunc is a function that performs a refresh of a specific type +// of resource. +type RefreshFunc func( + *terraform.InstanceState, + interface{}) (*terraform.InstanceState, error) + +// UpdateFunc is a function that is called to update a resource that +// previously existed. The difference between this and CreateFunc is that +// the diff is guaranteed to only contain attributes that don't require +// a new resource. +type UpdateFunc func( + *terraform.InstanceState, + *terraform.InstanceDiff, + interface{}) (*terraform.InstanceState, error) diff --git a/vendor/github.com/hashicorp/terraform/helper/resource/state.go b/vendor/github.com/hashicorp/terraform/helper/resource/state.go new file mode 100644 index 0000000000..37c586a11a --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/helper/resource/state.go @@ -0,0 +1,259 @@ +package resource + +import ( + "log" + "time" +) + +var refreshGracePeriod = 30 * time.Second + +// StateRefreshFunc is a function type used for StateChangeConf that is +// responsible for refreshing the item being watched for a state change. +// +// It returns three results. `result` is any object that will be returned +// as the final object after waiting for state change. This allows you to +// return the final updated object, for example an EC2 instance after refreshing +// it. +// +// `state` is the latest state of that object. And `err` is any error that +// may have happened while refreshing the state. +type StateRefreshFunc func() (result interface{}, state string, err error) + +// StateChangeConf is the configuration struct used for `WaitForState`. +type StateChangeConf struct { + Delay time.Duration // Wait this time before starting checks + Pending []string // States that are "allowed" and will continue trying + Refresh StateRefreshFunc // Refreshes the current state + Target []string // Target state + Timeout time.Duration // The amount of time to wait before timeout + MinTimeout time.Duration // Smallest time to wait before refreshes + PollInterval time.Duration // Override MinTimeout/backoff and only poll this often + NotFoundChecks int // Number of times to allow not found + + // This is to work around inconsistent APIs + ContinuousTargetOccurence int // Number of times the Target state has to occur continuously +} + +// WaitForState watches an object and waits for it to achieve the state +// specified in the configuration using the specified Refresh() func, +// waiting the number of seconds specified in the timeout configuration. +// +// If the Refresh function returns a error, exit immediately with that error. +// +// If the Refresh function returns a state other than the Target state or one +// listed in Pending, return immediately with an error. +// +// If the Timeout is exceeded before reaching the Target state, return an +// error. +// +// Otherwise, result the result of the first call to the Refresh function to +// reach the target state. +func (conf *StateChangeConf) WaitForState() (interface{}, error) { + log.Printf("[DEBUG] Waiting for state to become: %s", conf.Target) + + notfoundTick := 0 + targetOccurence := 0 + + // Set a default for times to check for not found + if conf.NotFoundChecks == 0 { + conf.NotFoundChecks = 20 + } + + if conf.ContinuousTargetOccurence == 0 { + conf.ContinuousTargetOccurence = 1 + } + + type Result struct { + Result interface{} + State string + Error error + Done bool + } + + // Read every result from the refresh loop, waiting for a positive result.Done. + resCh := make(chan Result, 1) + // cancellation channel for the refresh loop + cancelCh := make(chan struct{}) + + result := Result{} + + go func() { + defer close(resCh) + + time.Sleep(conf.Delay) + + // start with 0 delay for the first loop + var wait time.Duration + + for { + // store the last result + resCh <- result + + // wait and watch for cancellation + select { + case <-cancelCh: + return + case <-time.After(wait): + // first round had no wait + if wait == 0 { + wait = 100 * time.Millisecond + } + } + + res, currentState, err := conf.Refresh() + result = Result{ + Result: res, + State: currentState, + Error: err, + } + + if err != nil { + resCh <- result + return + } + + // If we're waiting for the absence of a thing, then return + if res == nil && len(conf.Target) == 0 { + targetOccurence++ + if conf.ContinuousTargetOccurence == targetOccurence { + result.Done = true + resCh <- result + return + } + continue + } + + if res == nil { + // If we didn't find the resource, check if we have been + // not finding it for awhile, and if so, report an error. + notfoundTick++ + if notfoundTick > conf.NotFoundChecks { + result.Error = &NotFoundError{ + LastError: err, + Retries: notfoundTick, + } + resCh <- result + return + } + } else { + // Reset the counter for when a resource isn't found + notfoundTick = 0 + found := false + + for _, allowed := range conf.Target { + if currentState == allowed { + found = true + targetOccurence++ + if conf.ContinuousTargetOccurence == targetOccurence { + result.Done = true + resCh <- result + return + } + continue + } + } + + for _, allowed := range conf.Pending { + if currentState == allowed { + found = true + targetOccurence = 0 + break + } + } + + if !found && len(conf.Pending) > 0 { + result.Error = &UnexpectedStateError{ + LastError: err, + State: result.State, + ExpectedState: conf.Target, + } + resCh <- result + return + } + } + + // Wait between refreshes using exponential backoff, except when + // waiting for the target state to reoccur. + if targetOccurence == 0 { + wait *= 2 + } + + // If a poll interval has been specified, choose that interval. + // Otherwise bound the default value. + if conf.PollInterval > 0 && conf.PollInterval < 180*time.Second { + wait = conf.PollInterval + } else { + if wait < conf.MinTimeout { + wait = conf.MinTimeout + } else if wait > 10*time.Second { + wait = 10 * time.Second + } + } + + log.Printf("[TRACE] Waiting %s before next try", wait) + } + }() + + // store the last value result from the refresh loop + lastResult := Result{} + + timeout := time.After(conf.Timeout) + for { + select { + case r, ok := <-resCh: + // channel closed, so return the last result + if !ok { + return lastResult.Result, lastResult.Error + } + + // we reached the intended state + if r.Done { + return r.Result, r.Error + } + + // still waiting, store the last result + lastResult = r + + case <-timeout: + log.Printf("[WARN] WaitForState timeout after %s", conf.Timeout) + log.Printf("[WARN] WaitForState starting %s refresh grace period", refreshGracePeriod) + + // cancel the goroutine and start our grace period timer + close(cancelCh) + timeout := time.After(refreshGracePeriod) + + // we need a for loop and a label to break on, because we may have + // an extra response value to read, but still want to wait for the + // channel to close. + forSelect: + for { + select { + case r, ok := <-resCh: + if r.Done { + // the last refresh loop reached the desired state + return r.Result, r.Error + } + + if !ok { + // the goroutine returned + break forSelect + } + + // target state not reached, save the result for the + // TimeoutError and wait for the channel to close + lastResult = r + case <-timeout: + log.Println("[ERROR] WaitForState exceeded refresh grace period") + break forSelect + } + } + + return nil, &TimeoutError{ + LastError: lastResult.Error, + LastState: lastResult.State, + Timeout: conf.Timeout, + ExpectedState: conf.Target, + } + } + } +} diff --git a/vendor/github.com/hashicorp/terraform/helper/resource/testing.go b/vendor/github.com/hashicorp/terraform/helper/resource/testing.go new file mode 100644 index 0000000000..ebdbde2b5d --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/helper/resource/testing.go @@ -0,0 +1,938 @@ +package resource + +import ( + "flag" + "fmt" + "io" + "io/ioutil" + "log" + "os" + "path/filepath" + "reflect" + "regexp" + "strings" + "testing" + + "github.com/davecgh/go-spew/spew" + "github.com/hashicorp/go-getter" + "github.com/hashicorp/go-multierror" + "github.com/hashicorp/terraform/config/module" + "github.com/hashicorp/terraform/helper/logging" + "github.com/hashicorp/terraform/terraform" +) + +// flagSweep is a flag available when running tests on the command line. It +// contains a comma seperated list of regions to for the sweeper functions to +// run in. This flag bypasses the normal Test path and instead runs functions designed to +// clean up any leaked resources a testing environment could have created. It is +// a best effort attempt, and relies on Provider authors to implement "Sweeper" +// methods for resources. + +// Adding Sweeper methods with AddTestSweepers will +// construct a list of sweeper funcs to be called here. We iterate through +// regions provided by the sweep flag, and for each region we iterate through the +// tests, and exit on any errors. At time of writing, sweepers are ran +// sequentially, however they can list dependencies to be ran first. We track +// the sweepers that have been ran, so as to not run a sweeper twice for a given +// region. +// +// WARNING: +// Sweepers are designed to be destructive. You should not use the -sweep flag +// in any environment that is not strictly a test environment. Resources will be +// destroyed. + +var flagSweep = flag.String("sweep", "", "List of Regions to run available Sweepers") +var flagSweepRun = flag.String("sweep-run", "", "Comma seperated list of Sweeper Tests to run") +var sweeperFuncs map[string]*Sweeper + +// map of sweepers that have ran, and the success/fail status based on any error +// raised +var sweeperRunList map[string]bool + +// type SweeperFunc is a signature for a function that acts as a sweeper. It +// accepts a string for the region that the sweeper is to be ran in. This +// function must be able to construct a valid client for that region. +type SweeperFunc func(r string) error + +type Sweeper struct { + // Name for sweeper. Must be unique to be ran by the Sweeper Runner + Name string + + // Dependencies list the const names of other Sweeper functions that must be ran + // prior to running this Sweeper. This is an ordered list that will be invoked + // recursively at the helper/resource level + Dependencies []string + + // Sweeper function that when invoked sweeps the Provider of specific + // resources + F SweeperFunc +} + +func init() { + sweeperFuncs = make(map[string]*Sweeper) +} + +// AddTestSweepers function adds a given name and Sweeper configuration +// pair to the internal sweeperFuncs map. Invoke this function to register a +// resource sweeper to be available for running when the -sweep flag is used +// with `go test`. Sweeper names must be unique to help ensure a given sweeper +// is only ran once per run. +func AddTestSweepers(name string, s *Sweeper) { + if _, ok := sweeperFuncs[name]; ok { + log.Fatalf("[ERR] Error adding (%s) to sweeperFuncs: function already exists in map", name) + } + + sweeperFuncs[name] = s +} + +func TestMain(m *testing.M) { + flag.Parse() + if *flagSweep != "" { + // parse flagSweep contents for regions to run + regions := strings.Split(*flagSweep, ",") + + // get filtered list of sweepers to run based on sweep-run flag + sweepers := filterSweepers(*flagSweepRun, sweeperFuncs) + for _, region := range regions { + region = strings.TrimSpace(region) + // reset sweeperRunList for each region + sweeperRunList = map[string]bool{} + + log.Printf("[DEBUG] Running Sweepers for region (%s):\n", region) + for _, sweeper := range sweepers { + if err := runSweeperWithRegion(region, sweeper); err != nil { + log.Fatalf("[ERR] error running (%s): %s", sweeper.Name, err) + } + } + + log.Printf("Sweeper Tests ran:\n") + for s, _ := range sweeperRunList { + fmt.Printf("\t- %s\n", s) + } + } + } else { + os.Exit(m.Run()) + } +} + +// filterSweepers takes a comma seperated string listing the names of sweepers +// to be ran, and returns a filtered set from the list of all of sweepers to +// run based on the names given. +func filterSweepers(f string, source map[string]*Sweeper) map[string]*Sweeper { + filterSlice := strings.Split(strings.ToLower(f), ",") + if len(filterSlice) == 1 && filterSlice[0] == "" { + // if the filter slice is a single element of "" then no sweeper list was + // given, so just return the full list + return source + } + + sweepers := make(map[string]*Sweeper) + for name, sweeper := range source { + for _, s := range filterSlice { + if strings.Contains(strings.ToLower(name), s) { + sweepers[name] = sweeper + } + } + } + return sweepers +} + +// runSweeperWithRegion recieves a sweeper and a region, and recursively calls +// itself with that region for every dependency found for that sweeper. If there +// are no dependencies, invoke the contained sweeper fun with the region, and +// add the success/fail status to the sweeperRunList. +func runSweeperWithRegion(region string, s *Sweeper) error { + for _, dep := range s.Dependencies { + if depSweeper, ok := sweeperFuncs[dep]; ok { + log.Printf("[DEBUG] Sweeper (%s) has dependency (%s), running..", s.Name, dep) + if err := runSweeperWithRegion(region, depSweeper); err != nil { + return err + } + } else { + log.Printf("[DEBUG] Sweeper (%s) has dependency (%s), but that sweeper was not found", s.Name, dep) + } + } + + if _, ok := sweeperRunList[s.Name]; ok { + log.Printf("[DEBUG] Sweeper (%s) already ran in region (%s)", s.Name, region) + return nil + } + + runE := s.F(region) + if runE == nil { + sweeperRunList[s.Name] = true + } else { + sweeperRunList[s.Name] = false + } + + return runE +} + +const TestEnvVar = "TF_ACC" + +// TestProvider can be implemented by any ResourceProvider to provide custom +// reset functionality at the start of an acceptance test. +// The helper/schema Provider implements this interface. +type TestProvider interface { + TestReset() error +} + +// TestCheckFunc is the callback type used with acceptance tests to check +// the state of a resource. The state passed in is the latest state known, +// or in the case of being after a destroy, it is the last known state when +// it was created. +type TestCheckFunc func(*terraform.State) error + +// ImportStateCheckFunc is the check function for ImportState tests +type ImportStateCheckFunc func([]*terraform.InstanceState) error + +// TestCase is a single acceptance test case used to test the apply/destroy +// lifecycle of a resource in a specific configuration. +// +// When the destroy plan is executed, the config from the last TestStep +// is used to plan it. +type TestCase struct { + // IsUnitTest allows a test to run regardless of the TF_ACC + // environment variable. This should be used with care - only for + // fast tests on local resources (e.g. remote state with a local + // backend) but can be used to increase confidence in correct + // operation of Terraform without waiting for a full acctest run. + IsUnitTest bool + + // PreCheck, if non-nil, will be called before any test steps are + // executed. It will only be executed in the case that the steps + // would run, so it can be used for some validation before running + // acceptance tests, such as verifying that keys are setup. + PreCheck func() + + // Providers is the ResourceProvider that will be under test. + // + // Alternately, ProviderFactories can be specified for the providers + // that are valid. This takes priority over Providers. + // + // The end effect of each is the same: specifying the providers that + // are used within the tests. + Providers map[string]terraform.ResourceProvider + ProviderFactories map[string]terraform.ResourceProviderFactory + + // PreventPostDestroyRefresh can be set to true for cases where data sources + // are tested alongside real resources + PreventPostDestroyRefresh bool + + // CheckDestroy is called after the resource is finally destroyed + // to allow the tester to test that the resource is truly gone. + CheckDestroy TestCheckFunc + + // Steps are the apply sequences done within the context of the + // same state. Each step can have its own check to verify correctness. + Steps []TestStep + + // The settings below control the "ID-only refresh test." This is + // an enabled-by-default test that tests that a refresh can be + // refreshed with only an ID to result in the same attributes. + // This validates completeness of Refresh. + // + // IDRefreshName is the name of the resource to check. This will + // default to the first non-nil primary resource in the state. + // + // IDRefreshIgnore is a list of configuration keys that will be ignored. + IDRefreshName string + IDRefreshIgnore []string +} + +// TestStep is a single apply sequence of a test, done within the +// context of a state. +// +// Multiple TestSteps can be sequenced in a Test to allow testing +// potentially complex update logic. In general, simply create/destroy +// tests will only need one step. +type TestStep struct { + // ResourceName should be set to the name of the resource + // that is being tested. Example: "aws_instance.foo". Various test + // modes use this to auto-detect state information. + // + // This is only required if the test mode settings below say it is + // for the mode you're using. + ResourceName string + + // PreConfig is called before the Config is applied to perform any per-step + // setup that needs to happen. This is called regardless of "test mode" + // below. + PreConfig func() + + //--------------------------------------------------------------- + // Test modes. One of the following groups of settings must be + // set to determine what the test step will do. Ideally we would've + // used Go interfaces here but there are now hundreds of tests we don't + // want to re-type so instead we just determine which step logic + // to run based on what settings below are set. + //--------------------------------------------------------------- + + //--------------------------------------------------------------- + // Plan, Apply testing + //--------------------------------------------------------------- + + // Config a string of the configuration to give to Terraform. If this + // is set, then the TestCase will execute this step with the same logic + // as a `terraform apply`. + Config string + + // Check is called after the Config is applied. Use this step to + // make your own API calls to check the status of things, and to + // inspect the format of the ResourceState itself. + // + // If an error is returned, the test will fail. In this case, a + // destroy plan will still be attempted. + // + // If this is nil, no check is done on this step. + Check TestCheckFunc + + // Destroy will create a destroy plan if set to true. + Destroy bool + + // ExpectNonEmptyPlan can be set to true for specific types of tests that are + // looking to verify that a diff occurs + ExpectNonEmptyPlan bool + + // ExpectError allows the construction of test cases that we expect to fail + // with an error. The specified regexp must match against the error for the + // test to pass. + ExpectError *regexp.Regexp + + // PlanOnly can be set to only run `plan` with this configuration, and not + // actually apply it. This is useful for ensuring config changes result in + // no-op plans + PlanOnly bool + + // PreventPostDestroyRefresh can be set to true for cases where data sources + // are tested alongside real resources + PreventPostDestroyRefresh bool + + //--------------------------------------------------------------- + // ImportState testing + //--------------------------------------------------------------- + + // ImportState, if true, will test the functionality of ImportState + // by importing the resource with ResourceName (must be set) and the + // ID of that resource. + ImportState bool + + // ImportStateId is the ID to perform an ImportState operation with. + // This is optional. If it isn't set, then the resource ID is automatically + // determined by inspecting the state for ResourceName's ID. + ImportStateId string + + // ImportStateIdPrefix is the prefix added in front of ImportStateId. + // This can be useful in complex import cases, where more than one + // attribute needs to be passed on as the Import ID. Mainly in cases + // where the ID is not known, and a known prefix needs to be added to + // the unset ImportStateId field. + ImportStateIdPrefix string + + // ImportStateCheck checks the results of ImportState. It should be + // used to verify that the resulting value of ImportState has the + // proper resources, IDs, and attributes. + ImportStateCheck ImportStateCheckFunc + + // ImportStateVerify, if true, will also check that the state values + // that are finally put into the state after import match for all the + // IDs returned by the Import. + // + // ImportStateVerifyIgnore are fields that should not be verified to + // be equal. These can be set to ephemeral fields or fields that can't + // be refreshed and don't matter. + ImportStateVerify bool + ImportStateVerifyIgnore []string +} + +// Test performs an acceptance test on a resource. +// +// Tests are not run unless an environmental variable "TF_ACC" is +// set to some non-empty value. This is to avoid test cases surprising +// a user by creating real resources. +// +// Tests will fail unless the verbose flag (`go test -v`, or explicitly +// the "-test.v" flag) is set. Because some acceptance tests take quite +// long, we require the verbose flag so users are able to see progress +// output. +func Test(t TestT, c TestCase) { + // We only run acceptance tests if an env var is set because they're + // slow and generally require some outside configuration. You can opt out + // of this with OverrideEnvVar on individual TestCases. + if os.Getenv(TestEnvVar) == "" && !c.IsUnitTest { + t.Skip(fmt.Sprintf( + "Acceptance tests skipped unless env '%s' set", + TestEnvVar)) + return + } + + logWriter, err := logging.LogOutput() + if err != nil { + t.Error(fmt.Errorf("error setting up logging: %s", err)) + } + log.SetOutput(logWriter) + + // We require verbose mode so that the user knows what is going on. + if !testTesting && !testing.Verbose() && !c.IsUnitTest { + t.Fatal("Acceptance tests must be run with the -v flag on tests") + return + } + + // Run the PreCheck if we have it + if c.PreCheck != nil { + c.PreCheck() + } + + ctxProviders, err := testProviderFactories(c) + if err != nil { + t.Fatal(err) + } + opts := terraform.ContextOpts{Providers: ctxProviders} + + // A single state variable to track the lifecycle, starting with no state + var state *terraform.State + + // Go through each step and run it + var idRefreshCheck *terraform.ResourceState + idRefresh := c.IDRefreshName != "" + errored := false + for i, step := range c.Steps { + var err error + log.Printf("[WARN] Test: Executing step %d", i) + + // Determine the test mode to execute + if step.Config != "" { + state, err = testStepConfig(opts, state, step) + } else if step.ImportState { + state, err = testStepImportState(opts, state, step) + } else { + err = fmt.Errorf( + "unknown test mode for step. Please see TestStep docs\n\n%#v", + step) + } + + // If there was an error, exit + if err != nil { + // Perhaps we expected an error? Check if it matches + if step.ExpectError != nil { + if !step.ExpectError.MatchString(err.Error()) { + errored = true + t.Error(fmt.Sprintf( + "Step %d, expected error:\n\n%s\n\nTo match:\n\n%s\n\n", + i, err, step.ExpectError)) + break + } + } else { + errored = true + t.Error(fmt.Sprintf( + "Step %d error: %s", i, err)) + break + } + } + + // If we've never checked an id-only refresh and our state isn't + // empty, find the first resource and test it. + if idRefresh && idRefreshCheck == nil && !state.Empty() { + // Find the first non-nil resource in the state + for _, m := range state.Modules { + if len(m.Resources) > 0 { + if v, ok := m.Resources[c.IDRefreshName]; ok { + idRefreshCheck = v + } + + break + } + } + + // If we have an instance to check for refreshes, do it + // immediately. We do it in the middle of another test + // because it shouldn't affect the overall state (refresh + // is read-only semantically) and we want to fail early if + // this fails. If refresh isn't read-only, then this will have + // caught a different bug. + if idRefreshCheck != nil { + log.Printf( + "[WARN] Test: Running ID-only refresh check on %s", + idRefreshCheck.Primary.ID) + if err := testIDOnlyRefresh(c, opts, step, idRefreshCheck); err != nil { + log.Printf("[ERROR] Test: ID-only test failed: %s", err) + t.Error(fmt.Sprintf( + "[ERROR] Test: ID-only test failed: %s", err)) + break + } + } + } + } + + // If we never checked an id-only refresh, it is a failure. + if idRefresh { + if !errored && len(c.Steps) > 0 && idRefreshCheck == nil { + t.Error("ID-only refresh check never ran.") + } + } + + // If we have a state, then run the destroy + if state != nil { + lastStep := c.Steps[len(c.Steps)-1] + destroyStep := TestStep{ + Config: lastStep.Config, + Check: c.CheckDestroy, + Destroy: true, + PreventPostDestroyRefresh: c.PreventPostDestroyRefresh, + } + + log.Printf("[WARN] Test: Executing destroy step") + state, err := testStep(opts, state, destroyStep) + if err != nil { + t.Error(fmt.Sprintf( + "Error destroying resource! WARNING: Dangling resources\n"+ + "may exist. The full state and error is shown below.\n\n"+ + "Error: %s\n\nState: %s", + err, + state)) + } + } else { + log.Printf("[WARN] Skipping destroy test since there is no state.") + } +} + +// testProviderFactories is a helper to build the ResourceProviderFactory map +// with pre instantiated ResourceProviders, so that we can reset them for the +// test, while only calling the factory function once. +// Any errors are stored so that they can be returned by the factory in +// terraform to match non-test behavior. +func testProviderFactories(c TestCase) (map[string]terraform.ResourceProviderFactory, error) { + ctxProviders := c.ProviderFactories // make(map[string]terraform.ResourceProviderFactory) + if ctxProviders == nil { + ctxProviders = make(map[string]terraform.ResourceProviderFactory) + } + // add any fixed providers + for k, p := range c.Providers { + ctxProviders[k] = terraform.ResourceProviderFactoryFixed(p) + } + + // reset the providers if needed + for k, pf := range ctxProviders { + // we can ignore any errors here, if we don't have a provider to reset + // the error will be handled later + p, err := pf() + if err != nil { + return nil, err + } + if p, ok := p.(TestProvider); ok { + err := p.TestReset() + if err != nil { + return nil, fmt.Errorf("[ERROR] failed to reset provider %q: %s", k, err) + } + } + } + + return ctxProviders, nil +} + +// UnitTest is a helper to force the acceptance testing harness to run in the +// normal unit test suite. This should only be used for resource that don't +// have any external dependencies. +func UnitTest(t TestT, c TestCase) { + c.IsUnitTest = true + Test(t, c) +} + +func testIDOnlyRefresh(c TestCase, opts terraform.ContextOpts, step TestStep, r *terraform.ResourceState) error { + // TODO: We guard by this right now so master doesn't explode. We + // need to remove this eventually to make this part of the normal tests. + if os.Getenv("TF_ACC_IDONLY") == "" { + return nil + } + + name := fmt.Sprintf("%s.foo", r.Type) + + // Build the state. The state is just the resource with an ID. There + // are no attributes. We only set what is needed to perform a refresh. + state := terraform.NewState() + state.RootModule().Resources[name] = &terraform.ResourceState{ + Type: r.Type, + Primary: &terraform.InstanceState{ + ID: r.Primary.ID, + }, + } + + // Create the config module. We use the full config because Refresh + // doesn't have access to it and we may need things like provider + // configurations. The initial implementation of id-only checks used + // an empty config module, but that caused the aforementioned problems. + mod, err := testModule(opts, step) + if err != nil { + return err + } + + // Initialize the context + opts.Module = mod + opts.State = state + ctx, err := terraform.NewContext(&opts) + if err != nil { + return err + } + if ws, es := ctx.Validate(); len(ws) > 0 || len(es) > 0 { + if len(es) > 0 { + estrs := make([]string, len(es)) + for i, e := range es { + estrs[i] = e.Error() + } + return fmt.Errorf( + "Configuration is invalid.\n\nWarnings: %#v\n\nErrors: %#v", + ws, estrs) + } + + log.Printf("[WARN] Config warnings: %#v", ws) + } + + // Refresh! + state, err = ctx.Refresh() + if err != nil { + return fmt.Errorf("Error refreshing: %s", err) + } + + // Verify attribute equivalence. + actualR := state.RootModule().Resources[name] + if actualR == nil { + return fmt.Errorf("Resource gone!") + } + if actualR.Primary == nil { + return fmt.Errorf("Resource has no primary instance") + } + actual := actualR.Primary.Attributes + expected := r.Primary.Attributes + // Remove fields we're ignoring + for _, v := range c.IDRefreshIgnore { + for k, _ := range actual { + if strings.HasPrefix(k, v) { + delete(actual, k) + } + } + for k, _ := range expected { + if strings.HasPrefix(k, v) { + delete(expected, k) + } + } + } + + if !reflect.DeepEqual(actual, expected) { + // Determine only the different attributes + for k, v := range expected { + if av, ok := actual[k]; ok && v == av { + delete(expected, k) + delete(actual, k) + } + } + + spewConf := spew.NewDefaultConfig() + spewConf.SortKeys = true + return fmt.Errorf( + "Attributes not equivalent. Difference is shown below. Top is actual, bottom is expected."+ + "\n\n%s\n\n%s", + spewConf.Sdump(actual), spewConf.Sdump(expected)) + } + + return nil +} + +func testModule( + opts terraform.ContextOpts, + step TestStep) (*module.Tree, error) { + if step.PreConfig != nil { + step.PreConfig() + } + + cfgPath, err := ioutil.TempDir("", "tf-test") + if err != nil { + return nil, fmt.Errorf( + "Error creating temporary directory for config: %s", err) + } + defer os.RemoveAll(cfgPath) + + // Write the configuration + cfgF, err := os.Create(filepath.Join(cfgPath, "main.tf")) + if err != nil { + return nil, fmt.Errorf( + "Error creating temporary file for config: %s", err) + } + + _, err = io.Copy(cfgF, strings.NewReader(step.Config)) + cfgF.Close() + if err != nil { + return nil, fmt.Errorf( + "Error creating temporary file for config: %s", err) + } + + // Parse the configuration + mod, err := module.NewTreeModule("", cfgPath) + if err != nil { + return nil, fmt.Errorf( + "Error loading configuration: %s", err) + } + + // Load the modules + modStorage := &getter.FolderStorage{ + StorageDir: filepath.Join(cfgPath, ".tfmodules"), + } + err = mod.Load(modStorage, module.GetModeGet) + if err != nil { + return nil, fmt.Errorf("Error downloading modules: %s", err) + } + + return mod, nil +} + +func testResource(c TestStep, state *terraform.State) (*terraform.ResourceState, error) { + if c.ResourceName == "" { + return nil, fmt.Errorf("ResourceName must be set in TestStep") + } + + for _, m := range state.Modules { + if len(m.Resources) > 0 { + if v, ok := m.Resources[c.ResourceName]; ok { + return v, nil + } + } + } + + return nil, fmt.Errorf( + "Resource specified by ResourceName couldn't be found: %s", c.ResourceName) +} + +// ComposeTestCheckFunc lets you compose multiple TestCheckFuncs into +// a single TestCheckFunc. +// +// As a user testing their provider, this lets you decompose your checks +// into smaller pieces more easily. +func ComposeTestCheckFunc(fs ...TestCheckFunc) TestCheckFunc { + return func(s *terraform.State) error { + for i, f := range fs { + if err := f(s); err != nil { + return fmt.Errorf("Check %d/%d error: %s", i+1, len(fs), err) + } + } + + return nil + } +} + +// ComposeAggregateTestCheckFunc lets you compose multiple TestCheckFuncs into +// a single TestCheckFunc. +// +// As a user testing their provider, this lets you decompose your checks +// into smaller pieces more easily. +// +// Unlike ComposeTestCheckFunc, ComposeAggergateTestCheckFunc runs _all_ of the +// TestCheckFuncs and aggregates failures. +func ComposeAggregateTestCheckFunc(fs ...TestCheckFunc) TestCheckFunc { + return func(s *terraform.State) error { + var result *multierror.Error + + for i, f := range fs { + if err := f(s); err != nil { + result = multierror.Append(result, fmt.Errorf("Check %d/%d error: %s", i+1, len(fs), err)) + } + } + + return result.ErrorOrNil() + } +} + +// TestCheckResourceAttrSet is a TestCheckFunc which ensures a value +// exists in state for the given name/key combination. It is useful when +// testing that computed values were set, when it is not possible to +// know ahead of time what the values will be. +func TestCheckResourceAttrSet(name, key string) TestCheckFunc { + return func(s *terraform.State) error { + is, err := primaryInstanceState(s, name) + if err != nil { + return err + } + + if val, ok := is.Attributes[key]; ok && val != "" { + return nil + } + + return fmt.Errorf("%s: Attribute '%s' expected to be set", name, key) + } +} + +// TestCheckResourceAttr is a TestCheckFunc which validates +// the value in state for the given name/key combination. +func TestCheckResourceAttr(name, key, value string) TestCheckFunc { + return func(s *terraform.State) error { + is, err := primaryInstanceState(s, name) + if err != nil { + return err + } + + if v, ok := is.Attributes[key]; !ok || v != value { + if !ok { + return fmt.Errorf("%s: Attribute '%s' not found", name, key) + } + + return fmt.Errorf( + "%s: Attribute '%s' expected %#v, got %#v", + name, + key, + value, + v) + } + + return nil + } +} + +// TestCheckNoResourceAttr is a TestCheckFunc which ensures that +// NO value exists in state for the given name/key combination. +func TestCheckNoResourceAttr(name, key string) TestCheckFunc { + return func(s *terraform.State) error { + is, err := primaryInstanceState(s, name) + if err != nil { + return err + } + + if _, ok := is.Attributes[key]; ok { + return fmt.Errorf("%s: Attribute '%s' found when not expected", name, key) + } + + return nil + } +} + +// TestMatchResourceAttr is a TestCheckFunc which checks that the value +// in state for the given name/key combination matches the given regex. +func TestMatchResourceAttr(name, key string, r *regexp.Regexp) TestCheckFunc { + return func(s *terraform.State) error { + is, err := primaryInstanceState(s, name) + if err != nil { + return err + } + + if !r.MatchString(is.Attributes[key]) { + return fmt.Errorf( + "%s: Attribute '%s' didn't match %q, got %#v", + name, + key, + r.String(), + is.Attributes[key]) + } + + return nil + } +} + +// TestCheckResourceAttrPtr is like TestCheckResourceAttr except the +// value is a pointer so that it can be updated while the test is running. +// It will only be dereferenced at the point this step is run. +func TestCheckResourceAttrPtr(name string, key string, value *string) TestCheckFunc { + return func(s *terraform.State) error { + return TestCheckResourceAttr(name, key, *value)(s) + } +} + +// TestCheckResourceAttrPair is a TestCheckFunc which validates that the values +// in state for a pair of name/key combinations are equal. +func TestCheckResourceAttrPair(nameFirst, keyFirst, nameSecond, keySecond string) TestCheckFunc { + return func(s *terraform.State) error { + isFirst, err := primaryInstanceState(s, nameFirst) + if err != nil { + return err + } + vFirst, ok := isFirst.Attributes[keyFirst] + if !ok { + return fmt.Errorf("%s: Attribute '%s' not found", nameFirst, keyFirst) + } + + isSecond, err := primaryInstanceState(s, nameSecond) + if err != nil { + return err + } + vSecond, ok := isSecond.Attributes[keySecond] + if !ok { + return fmt.Errorf("%s: Attribute '%s' not found", nameSecond, keySecond) + } + + if vFirst != vSecond { + return fmt.Errorf( + "%s: Attribute '%s' expected %#v, got %#v", + nameFirst, + keyFirst, + vSecond, + vFirst) + } + + return nil + } +} + +// TestCheckOutput checks an output in the Terraform configuration +func TestCheckOutput(name, value string) TestCheckFunc { + return func(s *terraform.State) error { + ms := s.RootModule() + rs, ok := ms.Outputs[name] + if !ok { + return fmt.Errorf("Not found: %s", name) + } + + if rs.Value != value { + return fmt.Errorf( + "Output '%s': expected %#v, got %#v", + name, + value, + rs) + } + + return nil + } +} + +func TestMatchOutput(name string, r *regexp.Regexp) TestCheckFunc { + return func(s *terraform.State) error { + ms := s.RootModule() + rs, ok := ms.Outputs[name] + if !ok { + return fmt.Errorf("Not found: %s", name) + } + + if !r.MatchString(rs.Value.(string)) { + return fmt.Errorf( + "Output '%s': %#v didn't match %q", + name, + rs, + r.String()) + } + + return nil + } +} + +// TestT is the interface used to handle the test lifecycle of a test. +// +// Users should just use a *testing.T object, which implements this. +type TestT interface { + Error(args ...interface{}) + Fatal(args ...interface{}) + Skip(args ...interface{}) +} + +// This is set to true by unit tests to alter some behavior +var testTesting = false + +// primaryInstanceState returns the primary instance state for the given resource name. +func primaryInstanceState(s *terraform.State, name string) (*terraform.InstanceState, error) { + ms := s.RootModule() + rs, ok := ms.Resources[name] + if !ok { + return nil, fmt.Errorf("Not found: %s", name) + } + + is := rs.Primary + if is == nil { + return nil, fmt.Errorf("No primary instance: %s", name) + } + + return is, nil +} diff --git a/vendor/github.com/hashicorp/terraform/helper/resource/testing_config.go b/vendor/github.com/hashicorp/terraform/helper/resource/testing_config.go new file mode 100644 index 0000000000..537a11c34a --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/helper/resource/testing_config.go @@ -0,0 +1,160 @@ +package resource + +import ( + "fmt" + "log" + "strings" + + "github.com/hashicorp/terraform/terraform" +) + +// testStepConfig runs a config-mode test step +func testStepConfig( + opts terraform.ContextOpts, + state *terraform.State, + step TestStep) (*terraform.State, error) { + return testStep(opts, state, step) +} + +func testStep( + opts terraform.ContextOpts, + state *terraform.State, + step TestStep) (*terraform.State, error) { + mod, err := testModule(opts, step) + if err != nil { + return state, err + } + + // Build the context + opts.Module = mod + opts.State = state + opts.Destroy = step.Destroy + ctx, err := terraform.NewContext(&opts) + if err != nil { + return state, fmt.Errorf("Error initializing context: %s", err) + } + if ws, es := ctx.Validate(); len(ws) > 0 || len(es) > 0 { + if len(es) > 0 { + estrs := make([]string, len(es)) + for i, e := range es { + estrs[i] = e.Error() + } + return state, fmt.Errorf( + "Configuration is invalid.\n\nWarnings: %#v\n\nErrors: %#v", + ws, estrs) + } + log.Printf("[WARN] Config warnings: %#v", ws) + } + + // Refresh! + state, err = ctx.Refresh() + if err != nil { + return state, fmt.Errorf( + "Error refreshing: %s", err) + } + + // If this step is a PlanOnly step, skip over this first Plan and subsequent + // Apply, and use the follow up Plan that checks for perpetual diffs + if !step.PlanOnly { + // Plan! + if p, err := ctx.Plan(); err != nil { + return state, fmt.Errorf( + "Error planning: %s", err) + } else { + log.Printf("[WARN] Test: Step plan: %s", p) + } + + // We need to keep a copy of the state prior to destroying + // such that destroy steps can verify their behaviour in the check + // function + stateBeforeApplication := state.DeepCopy() + + // Apply! + state, err = ctx.Apply() + if err != nil { + return state, fmt.Errorf("Error applying: %s", err) + } + + // Check! Excitement! + if step.Check != nil { + if step.Destroy { + if err := step.Check(stateBeforeApplication); err != nil { + return state, fmt.Errorf("Check failed: %s", err) + } + } else { + if err := step.Check(state); err != nil { + return state, fmt.Errorf("Check failed: %s", err) + } + } + } + } + + // Now, verify that Plan is now empty and we don't have a perpetual diff issue + // We do this with TWO plans. One without a refresh. + var p *terraform.Plan + if p, err = ctx.Plan(); err != nil { + return state, fmt.Errorf("Error on follow-up plan: %s", err) + } + if p.Diff != nil && !p.Diff.Empty() { + if step.ExpectNonEmptyPlan { + log.Printf("[INFO] Got non-empty plan, as expected:\n\n%s", p) + } else { + return state, fmt.Errorf( + "After applying this step, the plan was not empty:\n\n%s", p) + } + } + + // And another after a Refresh. + if !step.Destroy || (step.Destroy && !step.PreventPostDestroyRefresh) { + state, err = ctx.Refresh() + if err != nil { + return state, fmt.Errorf( + "Error on follow-up refresh: %s", err) + } + } + if p, err = ctx.Plan(); err != nil { + return state, fmt.Errorf("Error on second follow-up plan: %s", err) + } + empty := p.Diff == nil || p.Diff.Empty() + + // Data resources are tricky because they legitimately get instantiated + // during refresh so that they will be already populated during the + // plan walk. Because of this, if we have any data resources in the + // config we'll end up wanting to destroy them again here. This is + // acceptable and expected, and we'll treat it as "empty" for the + // sake of this testing. + if step.Destroy { + empty = true + + for _, moduleDiff := range p.Diff.Modules { + for k, instanceDiff := range moduleDiff.Resources { + if !strings.HasPrefix(k, "data.") { + empty = false + break + } + + if !instanceDiff.Destroy { + empty = false + } + } + } + } + + if !empty { + if step.ExpectNonEmptyPlan { + log.Printf("[INFO] Got non-empty plan, as expected:\n\n%s", p) + } else { + return state, fmt.Errorf( + "After applying this step and refreshing, "+ + "the plan was not empty:\n\n%s", p) + } + } + + // Made it here, but expected a non-empty plan, fail! + if step.ExpectNonEmptyPlan && (p.Diff == nil || p.Diff.Empty()) { + return state, fmt.Errorf("Expected a non-empty plan, but got an empty plan!") + } + + // Made it here? Good job test step! + return state, nil +} diff --git a/vendor/github.com/hashicorp/terraform/helper/resource/testing_import_state.go b/vendor/github.com/hashicorp/terraform/helper/resource/testing_import_state.go new file mode 100644 index 0000000000..28ad105267 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/helper/resource/testing_import_state.go @@ -0,0 +1,141 @@ +package resource + +import ( + "fmt" + "log" + "reflect" + "strings" + + "github.com/davecgh/go-spew/spew" + "github.com/hashicorp/terraform/terraform" +) + +// testStepImportState runs an imort state test step +func testStepImportState( + opts terraform.ContextOpts, + state *terraform.State, + step TestStep) (*terraform.State, error) { + // Determine the ID to import + importId := step.ImportStateId + if importId == "" { + resource, err := testResource(step, state) + if err != nil { + return state, err + } + + importId = resource.Primary.ID + } + importPrefix := step.ImportStateIdPrefix + if importPrefix != "" { + importId = fmt.Sprintf("%s%s", importPrefix, importId) + } + + // Setup the context. We initialize with an empty state. We use the + // full config for provider configurations. + mod, err := testModule(opts, step) + if err != nil { + return state, err + } + + opts.Module = mod + opts.State = terraform.NewState() + ctx, err := terraform.NewContext(&opts) + if err != nil { + return state, err + } + + // Do the import! + newState, err := ctx.Import(&terraform.ImportOpts{ + // Set the module so that any provider config is loaded + Module: mod, + + Targets: []*terraform.ImportTarget{ + &terraform.ImportTarget{ + Addr: step.ResourceName, + ID: importId, + }, + }, + }) + if err != nil { + log.Printf("[ERROR] Test: ImportState failure: %s", err) + return state, err + } + + // Go through the new state and verify + if step.ImportStateCheck != nil { + var states []*terraform.InstanceState + for _, r := range newState.RootModule().Resources { + if r.Primary != nil { + states = append(states, r.Primary) + } + } + if err := step.ImportStateCheck(states); err != nil { + return state, err + } + } + + // Verify that all the states match + if step.ImportStateVerify { + new := newState.RootModule().Resources + old := state.RootModule().Resources + for _, r := range new { + // Find the existing resource + var oldR *terraform.ResourceState + for _, r2 := range old { + if r2.Primary != nil && r2.Primary.ID == r.Primary.ID && r2.Type == r.Type { + oldR = r2 + break + } + } + if oldR == nil { + return state, fmt.Errorf( + "Failed state verification, resource with ID %s not found", + r.Primary.ID) + } + + // Compare their attributes + actual := make(map[string]string) + for k, v := range r.Primary.Attributes { + actual[k] = v + } + expected := make(map[string]string) + for k, v := range oldR.Primary.Attributes { + expected[k] = v + } + + // Remove fields we're ignoring + for _, v := range step.ImportStateVerifyIgnore { + for k, _ := range actual { + if strings.HasPrefix(k, v) { + delete(actual, k) + } + } + for k, _ := range expected { + if strings.HasPrefix(k, v) { + delete(expected, k) + } + } + } + + if !reflect.DeepEqual(actual, expected) { + // Determine only the different attributes + for k, v := range expected { + if av, ok := actual[k]; ok && v == av { + delete(expected, k) + delete(actual, k) + } + } + + spewConf := spew.NewDefaultConfig() + spewConf.SortKeys = true + return state, fmt.Errorf( + "ImportStateVerify attributes not equivalent. Difference is shown below. Top is actual, bottom is expected."+ + "\n\n%s\n\n%s", + spewConf.Sdump(actual), spewConf.Sdump(expected)) + } + } + } + + // Return the old state (non-imported) so we don't change anything. + return state, nil +} diff --git a/vendor/github.com/hashicorp/terraform/helper/resource/wait.go b/vendor/github.com/hashicorp/terraform/helper/resource/wait.go new file mode 100644 index 0000000000..ca50e292fc --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/helper/resource/wait.go @@ -0,0 +1,84 @@ +package resource + +import ( + "sync" + "time" +) + +// Retry is a basic wrapper around StateChangeConf that will just retry +// a function until it no longer returns an error. +func Retry(timeout time.Duration, f RetryFunc) error { + // These are used to pull the error out of the function; need a mutex to + // avoid a data race. + var resultErr error + var resultErrMu sync.Mutex + + c := &StateChangeConf{ + Pending: []string{"retryableerror"}, + Target: []string{"success"}, + Timeout: timeout, + MinTimeout: 500 * time.Millisecond, + Refresh: func() (interface{}, string, error) { + rerr := f() + + resultErrMu.Lock() + defer resultErrMu.Unlock() + + if rerr == nil { + resultErr = nil + return 42, "success", nil + } + + resultErr = rerr.Err + + if rerr.Retryable { + return 42, "retryableerror", nil + } + return nil, "quit", rerr.Err + }, + } + + _, waitErr := c.WaitForState() + + // Need to acquire the lock here to be able to avoid race using resultErr as + // the return value + resultErrMu.Lock() + defer resultErrMu.Unlock() + + // resultErr may be nil because the wait timed out and resultErr was never + // set; this is still an error + if resultErr == nil { + return waitErr + } + // resultErr takes precedence over waitErr if both are set because it is + // more likely to be useful + return resultErr +} + +// RetryFunc is the function retried until it succeeds. +type RetryFunc func() *RetryError + +// RetryError is the required return type of RetryFunc. It forces client code +// to choose whether or not a given error is retryable. +type RetryError struct { + Err error + Retryable bool +} + +// RetryableError is a helper to create a RetryError that's retryable from a +// given error. +func RetryableError(err error) *RetryError { + if err == nil { + return nil + } + return &RetryError{Err: err, Retryable: true} +} + +// NonRetryableError is a helper to create a RetryError that's _not)_ retryable +// from a given error. +func NonRetryableError(err error) *RetryError { + if err == nil { + return nil + } + return &RetryError{Err: err, Retryable: false} +} diff --git a/vendor/github.com/hashicorp/terraform/helper/schema/README.md b/vendor/github.com/hashicorp/terraform/helper/schema/README.md new file mode 100644 index 0000000000..28c83628ec --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/helper/schema/README.md @@ -0,0 +1,11 @@ +# Terraform Helper Lib: schema + +The `schema` package provides a high-level interface for writing resource +providers for Terraform. + +If you're writing a resource provider, we recommend you use this package. + +The interface exposed by this package is much friendlier than trying to +write to the Terraform API directly. The core Terraform API is low-level +and built for maximum flexibility and control, whereas this library is built +as a framework around that to more easily write common providers. diff --git a/vendor/github.com/hashicorp/terraform/helper/schema/backend.go b/vendor/github.com/hashicorp/terraform/helper/schema/backend.go new file mode 100644 index 0000000000..a0729c02c4 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/helper/schema/backend.go @@ -0,0 +1,94 @@ +package schema + +import ( + "context" + + "github.com/hashicorp/terraform/terraform" +) + +// Backend represents a partial backend.Backend implementation and simplifies +// the creation of configuration loading and validation. +// +// Unlike other schema structs such as Provider, this struct is meant to be +// embedded within your actual implementation. It provides implementations +// only for Input and Configure and gives you a method for accessing the +// configuration in the form of a ResourceData that you're expected to call +// from the other implementation funcs. +type Backend struct { + // Schema is the schema for the configuration of this backend. If this + // Backend has no configuration this can be omitted. + Schema map[string]*Schema + + // ConfigureFunc is called to configure the backend. Use the + // FromContext* methods to extract information from the context. + // This can be nil, in which case nothing will be called but the + // config will still be stored. + ConfigureFunc func(context.Context) error + + config *ResourceData +} + +var ( + backendConfigKey = contextKey("backend config") +) + +// FromContextBackendConfig extracts a ResourceData with the configuration +// from the context. This should only be called by Backend functions. +func FromContextBackendConfig(ctx context.Context) *ResourceData { + return ctx.Value(backendConfigKey).(*ResourceData) +} + +func (b *Backend) Input( + input terraform.UIInput, + c *terraform.ResourceConfig) (*terraform.ResourceConfig, error) { + if b == nil { + return c, nil + } + + return schemaMap(b.Schema).Input(input, c) +} + +func (b *Backend) Validate(c *terraform.ResourceConfig) ([]string, []error) { + if b == nil { + return nil, nil + } + + return schemaMap(b.Schema).Validate(c) +} + +func (b *Backend) Configure(c *terraform.ResourceConfig) error { + if b == nil { + return nil + } + + sm := schemaMap(b.Schema) + + // Get a ResourceData for this configuration. To do this, we actually + // generate an intermediary "diff" although that is never exposed. + diff, err := sm.Diff(nil, c) + if err != nil { + return err + } + + data, err := sm.Data(nil, diff) + if err != nil { + return err + } + b.config = data + + if b.ConfigureFunc != nil { + err = b.ConfigureFunc(context.WithValue( + context.Background(), backendConfigKey, data)) + if err != nil { + return err + } + } + + return nil +} + +// Config returns the configuration. This is available after Configure is +// called. +func (b *Backend) Config() *ResourceData { + return b.config +} diff --git a/vendor/github.com/hashicorp/terraform/helper/schema/data_source_resource_shim.go b/vendor/github.com/hashicorp/terraform/helper/schema/data_source_resource_shim.go new file mode 100644 index 0000000000..5a03d2d801 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/helper/schema/data_source_resource_shim.go @@ -0,0 +1,59 @@ +package schema + +import ( + "fmt" +) + +// DataSourceResourceShim takes a Resource instance describing a data source +// (with a Read implementation and a Schema, at least) and returns a new +// Resource instance with additional Create and Delete implementations that +// allow the data source to be used as a resource. +// +// This is a backward-compatibility layer for data sources that were formerly +// read-only resources before the data source concept was added. It should not +// be used for any *new* data sources. +// +// The Read function for the data source *must* call d.SetId with a non-empty +// id in order for this shim to function as expected. +// +// The provided Resource instance, and its schema, will be modified in-place +// to make it suitable for use as a full resource. +func DataSourceResourceShim(name string, dataSource *Resource) *Resource { + // Recursively, in-place adjust the schema so that it has ForceNew + // on any user-settable resource. + dataSourceResourceShimAdjustSchema(dataSource.Schema) + + dataSource.Create = CreateFunc(dataSource.Read) + dataSource.Delete = func(d *ResourceData, meta interface{}) error { + d.SetId("") + return nil + } + dataSource.Update = nil // should already be nil, but let's make sure + + // FIXME: Link to some further docs either on the website or in the + // changelog, once such a thing exists. + dataSource.deprecationMessage = fmt.Sprintf( + "using %s as a resource is deprecated; consider using the data source instead", + name, + ) + + return dataSource +} + +func dataSourceResourceShimAdjustSchema(schema map[string]*Schema) { + for _, s := range schema { + // If the attribute is configurable then it must be ForceNew, + // since we have no Update implementation. + if s.Required || s.Optional { + s.ForceNew = true + } + + // If the attribute is a nested resource, we need to recursively + // apply these same adjustments to it. + if s.Elem != nil { + if r, ok := s.Elem.(*Resource); ok { + dataSourceResourceShimAdjustSchema(r.Schema) + } + } + } +} diff --git a/vendor/github.com/hashicorp/terraform/helper/schema/equal.go b/vendor/github.com/hashicorp/terraform/helper/schema/equal.go new file mode 100644 index 0000000000..d5e20e0388 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/helper/schema/equal.go @@ -0,0 +1,6 @@ +package schema + +// Equal is an interface that checks for deep equality between two objects. +type Equal interface { + Equal(interface{}) bool +} diff --git a/vendor/github.com/hashicorp/terraform/helper/schema/field_reader.go b/vendor/github.com/hashicorp/terraform/helper/schema/field_reader.go new file mode 100644 index 0000000000..1660a67027 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/helper/schema/field_reader.go @@ -0,0 +1,334 @@ +package schema + +import ( + "fmt" + "strconv" +) + +// FieldReaders are responsible for decoding fields out of data into +// the proper typed representation. ResourceData uses this to query data +// out of multiple sources: config, state, diffs, etc. +type FieldReader interface { + ReadField([]string) (FieldReadResult, error) +} + +// FieldReadResult encapsulates all the resulting data from reading +// a field. +type FieldReadResult struct { + // Value is the actual read value. NegValue is the _negative_ value + // or the items that should be removed (if they existed). NegValue + // doesn't make sense for primitives but is important for any + // container types such as maps, sets, lists. + Value interface{} + ValueProcessed interface{} + + // Exists is true if the field was found in the data. False means + // it wasn't found if there was no error. + Exists bool + + // Computed is true if the field was found but the value + // is computed. + Computed bool +} + +// ValueOrZero returns the value of this result or the zero value of the +// schema type, ensuring a consistent non-nil return value. +func (r *FieldReadResult) ValueOrZero(s *Schema) interface{} { + if r.Value != nil { + return r.Value + } + + return s.ZeroValue() +} + +// addrToSchema finds the final element schema for the given address +// and the given schema. It returns all the schemas that led to the final +// schema. These are in order of the address (out to in). +func addrToSchema(addr []string, schemaMap map[string]*Schema) []*Schema { + current := &Schema{ + Type: typeObject, + Elem: schemaMap, + } + + // If we aren't given an address, then the user is requesting the + // full object, so we return the special value which is the full object. + if len(addr) == 0 { + return []*Schema{current} + } + + result := make([]*Schema, 0, len(addr)) + for len(addr) > 0 { + k := addr[0] + addr = addr[1:] + + REPEAT: + // We want to trim off the first "typeObject" since its not a + // real lookup that people do. i.e. []string{"foo"} in a structure + // isn't {typeObject, typeString}, its just a {typeString}. + if len(result) > 0 || current.Type != typeObject { + result = append(result, current) + } + + switch t := current.Type; t { + case TypeBool, TypeInt, TypeFloat, TypeString: + if len(addr) > 0 { + return nil + } + case TypeList, TypeSet: + isIndex := len(addr) > 0 && addr[0] == "#" + + switch v := current.Elem.(type) { + case *Resource: + current = &Schema{ + Type: typeObject, + Elem: v.Schema, + } + case *Schema: + current = v + case ValueType: + current = &Schema{Type: v} + default: + // we may not know the Elem type and are just looking for the + // index + if isIndex { + break + } + + if len(addr) == 0 { + // we've processed the address, so return what we've + // collected + return result + } + + if len(addr) == 1 { + if _, err := strconv.Atoi(addr[0]); err == nil { + // we're indexing a value without a schema. This can + // happen if the list is nested in another schema type. + // Default to a TypeString like we do with a map + current = &Schema{Type: TypeString} + break + } + } + + return nil + } + + // If we only have one more thing and the next thing + // is a #, then we're accessing the index which is always + // an int. + if isIndex { + current = &Schema{Type: TypeInt} + break + } + + case TypeMap: + if len(addr) > 0 { + switch v := current.Elem.(type) { + case ValueType: + current = &Schema{Type: v} + default: + // maps default to string values. This is all we can have + // if this is nested in another list or map. + current = &Schema{Type: TypeString} + } + } + case typeObject: + // If we're already in the object, then we want to handle Sets + // and Lists specially. Basically, their next key is the lookup + // key (the set value or the list element). For these scenarios, + // we just want to skip it and move to the next element if there + // is one. + if len(result) > 0 { + lastType := result[len(result)-2].Type + if lastType == TypeSet || lastType == TypeList { + if len(addr) == 0 { + break + } + + k = addr[0] + addr = addr[1:] + } + } + + m := current.Elem.(map[string]*Schema) + val, ok := m[k] + if !ok { + return nil + } + + current = val + goto REPEAT + } + } + + return result +} + +// readListField is a generic method for reading a list field out of a +// a FieldReader. It does this based on the assumption that there is a key +// "foo.#" for a list "foo" and that the indexes are "foo.0", "foo.1", etc. +// after that point. +func readListField( + r FieldReader, addr []string, schema *Schema) (FieldReadResult, error) { + addrPadded := make([]string, len(addr)+1) + copy(addrPadded, addr) + addrPadded[len(addrPadded)-1] = "#" + + // Get the number of elements in the list + countResult, err := r.ReadField(addrPadded) + if err != nil { + return FieldReadResult{}, err + } + if !countResult.Exists { + // No count, means we have no list + countResult.Value = 0 + } + + // If we have an empty list, then return an empty list + if countResult.Computed || countResult.Value.(int) == 0 { + return FieldReadResult{ + Value: []interface{}{}, + Exists: countResult.Exists, + Computed: countResult.Computed, + }, nil + } + + // Go through each count, and get the item value out of it + result := make([]interface{}, countResult.Value.(int)) + for i, _ := range result { + is := strconv.FormatInt(int64(i), 10) + addrPadded[len(addrPadded)-1] = is + rawResult, err := r.ReadField(addrPadded) + if err != nil { + return FieldReadResult{}, err + } + if !rawResult.Exists { + // This should never happen, because by the time the data + // gets to the FieldReaders, all the defaults should be set by + // Schema. + rawResult.Value = nil + } + + result[i] = rawResult.Value + } + + return FieldReadResult{ + Value: result, + Exists: true, + }, nil +} + +// readObjectField is a generic method for reading objects out of FieldReaders +// based on the assumption that building an address of []string{k, FIELD} +// will result in the proper field data. +func readObjectField( + r FieldReader, + addr []string, + schema map[string]*Schema) (FieldReadResult, error) { + result := make(map[string]interface{}) + exists := false + for field, s := range schema { + addrRead := make([]string, len(addr), len(addr)+1) + copy(addrRead, addr) + addrRead = append(addrRead, field) + rawResult, err := r.ReadField(addrRead) + if err != nil { + return FieldReadResult{}, err + } + if rawResult.Exists { + exists = true + } + + result[field] = rawResult.ValueOrZero(s) + } + + return FieldReadResult{ + Value: result, + Exists: exists, + }, nil +} + +// convert map values to the proper primitive type based on schema.Elem +func mapValuesToPrimitive(m map[string]interface{}, schema *Schema) error { + + elemType := TypeString + if et, ok := schema.Elem.(ValueType); ok { + elemType = et + } + + switch elemType { + case TypeInt, TypeFloat, TypeBool: + for k, v := range m { + vs, ok := v.(string) + if !ok { + continue + } + + v, err := stringToPrimitive(vs, false, &Schema{Type: elemType}) + if err != nil { + return err + } + + m[k] = v + } + } + return nil +} + +func stringToPrimitive( + value string, computed bool, schema *Schema) (interface{}, error) { + var returnVal interface{} + switch schema.Type { + case TypeBool: + if value == "" { + returnVal = false + break + } + if computed { + break + } + + v, err := strconv.ParseBool(value) + if err != nil { + return nil, err + } + + returnVal = v + case TypeFloat: + if value == "" { + returnVal = 0.0 + break + } + if computed { + break + } + + v, err := strconv.ParseFloat(value, 64) + if err != nil { + return nil, err + } + + returnVal = v + case TypeInt: + if value == "" { + returnVal = 0 + break + } + if computed { + break + } + + v, err := strconv.ParseInt(value, 0, 0) + if err != nil { + return nil, err + } + + returnVal = int(v) + case TypeString: + returnVal = value + default: + panic(fmt.Sprintf("Unknown type: %s", schema.Type)) + } + + return returnVal, nil +} diff --git a/vendor/github.com/hashicorp/terraform/helper/schema/field_reader_config.go b/vendor/github.com/hashicorp/terraform/helper/schema/field_reader_config.go new file mode 100644 index 0000000000..f958bbcb12 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/helper/schema/field_reader_config.go @@ -0,0 +1,333 @@ +package schema + +import ( + "fmt" + "strconv" + "strings" + "sync" + + "github.com/hashicorp/terraform/terraform" + "github.com/mitchellh/mapstructure" +) + +// ConfigFieldReader reads fields out of an untyped map[string]string to the +// best of its ability. It also applies defaults from the Schema. (The other +// field readers do not need default handling because they source fully +// populated data structures.) +type ConfigFieldReader struct { + Config *terraform.ResourceConfig + Schema map[string]*Schema + + indexMaps map[string]map[string]int + once sync.Once +} + +func (r *ConfigFieldReader) ReadField(address []string) (FieldReadResult, error) { + r.once.Do(func() { r.indexMaps = make(map[string]map[string]int) }) + return r.readField(address, false) +} + +func (r *ConfigFieldReader) readField( + address []string, nested bool) (FieldReadResult, error) { + schemaList := addrToSchema(address, r.Schema) + if len(schemaList) == 0 { + return FieldReadResult{}, nil + } + + if !nested { + // If we have a set anywhere in the address, then we need to + // read that set out in order and actually replace that part of + // the address with the real list index. i.e. set.50 might actually + // map to set.12 in the config, since it is in list order in the + // config, not indexed by set value. + for i, v := range schemaList { + // Sets are the only thing that cause this issue. + if v.Type != TypeSet { + continue + } + + // If we're at the end of the list, then we don't have to worry + // about this because we're just requesting the whole set. + if i == len(schemaList)-1 { + continue + } + + // If we're looking for the count, then ignore... + if address[i+1] == "#" { + continue + } + + indexMap, ok := r.indexMaps[strings.Join(address[:i+1], ".")] + if !ok { + // Get the set so we can get the index map that tells us the + // mapping of the hash code to the list index + _, err := r.readSet(address[:i+1], v) + if err != nil { + return FieldReadResult{}, err + } + indexMap = r.indexMaps[strings.Join(address[:i+1], ".")] + } + + index, ok := indexMap[address[i+1]] + if !ok { + return FieldReadResult{}, nil + } + + address[i+1] = strconv.FormatInt(int64(index), 10) + } + } + + k := strings.Join(address, ".") + schema := schemaList[len(schemaList)-1] + + // If we're getting the single element of a promoted list, then + // check to see if we have a single element we need to promote. + if address[len(address)-1] == "0" && len(schemaList) > 1 { + lastSchema := schemaList[len(schemaList)-2] + if lastSchema.Type == TypeList && lastSchema.PromoteSingle { + k := strings.Join(address[:len(address)-1], ".") + result, err := r.readPrimitive(k, schema) + if err == nil { + return result, nil + } + } + } + + switch schema.Type { + case TypeBool, TypeFloat, TypeInt, TypeString: + return r.readPrimitive(k, schema) + case TypeList: + // If we support promotion then we first check if we have a lone + // value that we must promote. + // a value that is alone. + if schema.PromoteSingle { + result, err := r.readPrimitive(k, schema.Elem.(*Schema)) + if err == nil && result.Exists { + result.Value = []interface{}{result.Value} + return result, nil + } + } + + return readListField(&nestedConfigFieldReader{r}, address, schema) + case TypeMap: + return r.readMap(k, schema) + case TypeSet: + return r.readSet(address, schema) + case typeObject: + return readObjectField( + &nestedConfigFieldReader{r}, + address, schema.Elem.(map[string]*Schema)) + default: + panic(fmt.Sprintf("Unknown type: %s", schema.Type)) + } +} + +func (r *ConfigFieldReader) readMap(k string, schema *Schema) (FieldReadResult, error) { + // We want both the raw value and the interpolated. We use the interpolated + // to store actual values and we use the raw one to check for + // computed keys. Actual values are obtained in the switch, depending on + // the type of the raw value. + mraw, ok := r.Config.GetRaw(k) + if !ok { + // check if this is from an interpolated field by seeing if it exists + // in the config + _, ok := r.Config.Get(k) + if !ok { + // this really doesn't exist + return FieldReadResult{}, nil + } + + // We couldn't fetch the value from a nested data structure, so treat the + // raw value as an interpolation string. The mraw value is only used + // for the type switch below. + mraw = "${INTERPOLATED}" + } + + result := make(map[string]interface{}) + computed := false + switch m := mraw.(type) { + case string: + // This is a map which has come out of an interpolated variable, so we + // can just get the value directly from config. Values cannot be computed + // currently. + v, _ := r.Config.Get(k) + + // If this isn't a map[string]interface, it must be computed. + mapV, ok := v.(map[string]interface{}) + if !ok { + return FieldReadResult{ + Exists: true, + Computed: true, + }, nil + } + + // Otherwise we can proceed as usual. + for i, iv := range mapV { + result[i] = iv + } + case []interface{}: + for i, innerRaw := range m { + for ik := range innerRaw.(map[string]interface{}) { + key := fmt.Sprintf("%s.%d.%s", k, i, ik) + if r.Config.IsComputed(key) { + computed = true + break + } + + v, _ := r.Config.Get(key) + result[ik] = v + } + } + case []map[string]interface{}: + for i, innerRaw := range m { + for ik := range innerRaw { + key := fmt.Sprintf("%s.%d.%s", k, i, ik) + if r.Config.IsComputed(key) { + computed = true + break + } + + v, _ := r.Config.Get(key) + result[ik] = v + } + } + case map[string]interface{}: + for ik := range m { + key := fmt.Sprintf("%s.%s", k, ik) + if r.Config.IsComputed(key) { + computed = true + break + } + + v, _ := r.Config.Get(key) + result[ik] = v + } + default: + panic(fmt.Sprintf("unknown type: %#v", mraw)) + } + + err := mapValuesToPrimitive(result, schema) + if err != nil { + return FieldReadResult{}, nil + } + + var value interface{} + if !computed { + value = result + } + + return FieldReadResult{ + Value: value, + Exists: true, + Computed: computed, + }, nil +} + +func (r *ConfigFieldReader) readPrimitive( + k string, schema *Schema) (FieldReadResult, error) { + raw, ok := r.Config.Get(k) + if !ok { + // Nothing in config, but we might still have a default from the schema + var err error + raw, err = schema.DefaultValue() + if err != nil { + return FieldReadResult{}, fmt.Errorf("%s, error loading default: %s", k, err) + } + + if raw == nil { + return FieldReadResult{}, nil + } + } + + var result string + if err := mapstructure.WeakDecode(raw, &result); err != nil { + return FieldReadResult{}, err + } + + computed := r.Config.IsComputed(k) + returnVal, err := stringToPrimitive(result, computed, schema) + if err != nil { + return FieldReadResult{}, err + } + + return FieldReadResult{ + Value: returnVal, + Exists: true, + Computed: computed, + }, nil +} + +func (r *ConfigFieldReader) readSet( + address []string, schema *Schema) (FieldReadResult, error) { + indexMap := make(map[string]int) + // Create the set that will be our result + set := schema.ZeroValue().(*Set) + + raw, err := readListField(&nestedConfigFieldReader{r}, address, schema) + if err != nil { + return FieldReadResult{}, err + } + if !raw.Exists { + return FieldReadResult{Value: set}, nil + } + + // If the list is computed, the set is necessarilly computed + if raw.Computed { + return FieldReadResult{ + Value: set, + Exists: true, + Computed: raw.Computed, + }, nil + } + + // Build up the set from the list elements + for i, v := range raw.Value.([]interface{}) { + // Check if any of the keys in this item are computed + computed := r.hasComputedSubKeys( + fmt.Sprintf("%s.%d", strings.Join(address, "."), i), schema) + + code := set.add(v, computed) + indexMap[code] = i + } + + r.indexMaps[strings.Join(address, ".")] = indexMap + + return FieldReadResult{ + Value: set, + Exists: true, + }, nil +} + +// hasComputedSubKeys walks through a schema and returns whether or not the +// given key contains any subkeys that are computed. +func (r *ConfigFieldReader) hasComputedSubKeys(key string, schema *Schema) bool { + prefix := key + "." + + switch t := schema.Elem.(type) { + case *Resource: + for k, schema := range t.Schema { + if r.Config.IsComputed(prefix + k) { + return true + } + + if r.hasComputedSubKeys(prefix+k, schema) { + return true + } + } + } + + return false +} + +// nestedConfigFieldReader is a funny little thing that just wraps a +// ConfigFieldReader to call readField when ReadField is called so that +// we don't recalculate the set rewrites in the address, which leads to +// an infinite loop. +type nestedConfigFieldReader struct { + Reader *ConfigFieldReader +} + +func (r *nestedConfigFieldReader) ReadField( + address []string) (FieldReadResult, error) { + return r.Reader.readField(address, true) +} diff --git a/vendor/github.com/hashicorp/terraform/helper/schema/field_reader_diff.go b/vendor/github.com/hashicorp/terraform/helper/schema/field_reader_diff.go new file mode 100644 index 0000000000..16bbae2961 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/helper/schema/field_reader_diff.go @@ -0,0 +1,208 @@ +package schema + +import ( + "fmt" + "strings" + + "github.com/hashicorp/terraform/terraform" + "github.com/mitchellh/mapstructure" +) + +// DiffFieldReader reads fields out of a diff structures. +// +// It also requires access to a Reader that reads fields from the structure +// that the diff was derived from. This is usually the state. This is required +// because a diff on its own doesn't have complete data about full objects +// such as maps. +// +// The Source MUST be the data that the diff was derived from. If it isn't, +// the behavior of this struct is undefined. +// +// Reading fields from a DiffFieldReader is identical to reading from +// Source except the diff will be applied to the end result. +// +// The "Exists" field on the result will be set to true if the complete +// field exists whether its from the source, diff, or a combination of both. +// It cannot be determined whether a retrieved value is composed of +// diff elements. +type DiffFieldReader struct { + Diff *terraform.InstanceDiff + Source FieldReader + Schema map[string]*Schema +} + +func (r *DiffFieldReader) ReadField(address []string) (FieldReadResult, error) { + schemaList := addrToSchema(address, r.Schema) + if len(schemaList) == 0 { + return FieldReadResult{}, nil + } + + schema := schemaList[len(schemaList)-1] + switch schema.Type { + case TypeBool, TypeInt, TypeFloat, TypeString: + return r.readPrimitive(address, schema) + case TypeList: + return readListField(r, address, schema) + case TypeMap: + return r.readMap(address, schema) + case TypeSet: + return r.readSet(address, schema) + case typeObject: + return readObjectField(r, address, schema.Elem.(map[string]*Schema)) + default: + panic(fmt.Sprintf("Unknown type: %#v", schema.Type)) + } +} + +func (r *DiffFieldReader) readMap( + address []string, schema *Schema) (FieldReadResult, error) { + result := make(map[string]interface{}) + resultSet := false + + // First read the map from the underlying source + source, err := r.Source.ReadField(address) + if err != nil { + return FieldReadResult{}, err + } + if source.Exists { + result = source.Value.(map[string]interface{}) + resultSet = true + } + + // Next, read all the elements we have in our diff, and apply + // the diff to our result. + prefix := strings.Join(address, ".") + "." + for k, v := range r.Diff.Attributes { + if !strings.HasPrefix(k, prefix) { + continue + } + if strings.HasPrefix(k, prefix+"%") { + // Ignore the count field + continue + } + + resultSet = true + + k = k[len(prefix):] + if v.NewRemoved { + delete(result, k) + continue + } + + result[k] = v.New + } + + err = mapValuesToPrimitive(result, schema) + if err != nil { + return FieldReadResult{}, nil + } + + var resultVal interface{} + if resultSet { + resultVal = result + } + + return FieldReadResult{ + Value: resultVal, + Exists: resultSet, + }, nil +} + +func (r *DiffFieldReader) readPrimitive( + address []string, schema *Schema) (FieldReadResult, error) { + result, err := r.Source.ReadField(address) + if err != nil { + return FieldReadResult{}, err + } + + attrD, ok := r.Diff.Attributes[strings.Join(address, ".")] + if !ok { + return result, nil + } + + var resultVal string + if !attrD.NewComputed { + resultVal = attrD.New + if attrD.NewExtra != nil { + result.ValueProcessed = resultVal + if err := mapstructure.WeakDecode(attrD.NewExtra, &resultVal); err != nil { + return FieldReadResult{}, err + } + } + } + + result.Computed = attrD.NewComputed + result.Exists = true + result.Value, err = stringToPrimitive(resultVal, false, schema) + if err != nil { + return FieldReadResult{}, err + } + + return result, nil +} + +func (r *DiffFieldReader) readSet( + address []string, schema *Schema) (FieldReadResult, error) { + prefix := strings.Join(address, ".") + "." + + // Create the set that will be our result + set := schema.ZeroValue().(*Set) + + // Go through the map and find all the set items + for k, d := range r.Diff.Attributes { + if d.NewRemoved { + // If the field is removed, we always ignore it + continue + } + if !strings.HasPrefix(k, prefix) { + continue + } + if strings.HasSuffix(k, "#") { + // Ignore any count field + continue + } + + // Split the key, since it might be a sub-object like "idx.field" + parts := strings.Split(k[len(prefix):], ".") + idx := parts[0] + + raw, err := r.ReadField(append(address, idx)) + if err != nil { + return FieldReadResult{}, err + } + if !raw.Exists { + // This shouldn't happen because we just verified it does exist + panic("missing field in set: " + k + "." + idx) + } + + set.Add(raw.Value) + } + + // Determine if the set "exists". It exists if there are items or if + // the diff explicitly wanted it empty. + exists := set.Len() > 0 + if !exists { + // We could check if the diff value is "0" here but I think the + // existence of "#" on its own is enough to show it existed. This + // protects us in the future from the zero value changing from + // "0" to "" breaking us (if that were to happen). + if _, ok := r.Diff.Attributes[prefix+"#"]; ok { + exists = true + } + } + + if !exists { + result, err := r.Source.ReadField(address) + if err != nil { + return FieldReadResult{}, err + } + if result.Exists { + return result, nil + } + } + + return FieldReadResult{ + Value: set, + Exists: exists, + }, nil +} diff --git a/vendor/github.com/hashicorp/terraform/helper/schema/field_reader_map.go b/vendor/github.com/hashicorp/terraform/helper/schema/field_reader_map.go new file mode 100644 index 0000000000..95339810ba --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/helper/schema/field_reader_map.go @@ -0,0 +1,232 @@ +package schema + +import ( + "fmt" + "strings" +) + +// MapFieldReader reads fields out of an untyped map[string]string to +// the best of its ability. +type MapFieldReader struct { + Map MapReader + Schema map[string]*Schema +} + +func (r *MapFieldReader) ReadField(address []string) (FieldReadResult, error) { + k := strings.Join(address, ".") + schemaList := addrToSchema(address, r.Schema) + if len(schemaList) == 0 { + return FieldReadResult{}, nil + } + + schema := schemaList[len(schemaList)-1] + switch schema.Type { + case TypeBool, TypeInt, TypeFloat, TypeString: + return r.readPrimitive(address, schema) + case TypeList: + return readListField(r, address, schema) + case TypeMap: + return r.readMap(k, schema) + case TypeSet: + return r.readSet(address, schema) + case typeObject: + return readObjectField(r, address, schema.Elem.(map[string]*Schema)) + default: + panic(fmt.Sprintf("Unknown type: %s", schema.Type)) + } +} + +func (r *MapFieldReader) readMap(k string, schema *Schema) (FieldReadResult, error) { + result := make(map[string]interface{}) + resultSet := false + + // If the name of the map field is directly in the map with an + // empty string, it means that the map is being deleted, so mark + // that is is set. + if v, ok := r.Map.Access(k); ok && v == "" { + resultSet = true + } + + prefix := k + "." + r.Map.Range(func(k, v string) bool { + if strings.HasPrefix(k, prefix) { + resultSet = true + + key := k[len(prefix):] + if key != "%" && key != "#" { + result[key] = v + } + } + + return true + }) + + err := mapValuesToPrimitive(result, schema) + if err != nil { + return FieldReadResult{}, nil + } + + var resultVal interface{} + if resultSet { + resultVal = result + } + + return FieldReadResult{ + Value: resultVal, + Exists: resultSet, + }, nil +} + +func (r *MapFieldReader) readPrimitive( + address []string, schema *Schema) (FieldReadResult, error) { + k := strings.Join(address, ".") + result, ok := r.Map.Access(k) + if !ok { + return FieldReadResult{}, nil + } + + returnVal, err := stringToPrimitive(result, false, schema) + if err != nil { + return FieldReadResult{}, err + } + + return FieldReadResult{ + Value: returnVal, + Exists: true, + }, nil +} + +func (r *MapFieldReader) readSet( + address []string, schema *Schema) (FieldReadResult, error) { + // Get the number of elements in the list + countRaw, err := r.readPrimitive( + append(address, "#"), &Schema{Type: TypeInt}) + if err != nil { + return FieldReadResult{}, err + } + if !countRaw.Exists { + // No count, means we have no list + countRaw.Value = 0 + } + + // Create the set that will be our result + set := schema.ZeroValue().(*Set) + + // If we have an empty list, then return an empty list + if countRaw.Computed || countRaw.Value.(int) == 0 { + return FieldReadResult{ + Value: set, + Exists: countRaw.Exists, + Computed: countRaw.Computed, + }, nil + } + + // Go through the map and find all the set items + prefix := strings.Join(address, ".") + "." + countExpected := countRaw.Value.(int) + countActual := make(map[string]struct{}) + completed := r.Map.Range(func(k, _ string) bool { + if !strings.HasPrefix(k, prefix) { + return true + } + if strings.HasPrefix(k, prefix+"#") { + // Ignore the count field + return true + } + + // Split the key, since it might be a sub-object like "idx.field" + parts := strings.Split(k[len(prefix):], ".") + idx := parts[0] + + var raw FieldReadResult + raw, err = r.ReadField(append(address, idx)) + if err != nil { + return false + } + if !raw.Exists { + // This shouldn't happen because we just verified it does exist + panic("missing field in set: " + k + "." + idx) + } + + set.Add(raw.Value) + + // Due to the way multimap readers work, if we've seen the number + // of fields we expect, then exit so that we don't read later values. + // For example: the "set" map might have "ports.#", "ports.0", and + // "ports.1", but the "state" map might have those plus "ports.2". + // We don't want "ports.2" + countActual[idx] = struct{}{} + if len(countActual) >= countExpected { + return false + } + + return true + }) + if !completed && err != nil { + return FieldReadResult{}, err + } + + return FieldReadResult{ + Value: set, + Exists: true, + }, nil +} + +// MapReader is an interface that is given to MapFieldReader for accessing +// a "map". This can be used to have alternate implementations. For a basic +// map[string]string, use BasicMapReader. +type MapReader interface { + Access(string) (string, bool) + Range(func(string, string) bool) bool +} + +// BasicMapReader implements MapReader for a single map. +type BasicMapReader map[string]string + +func (r BasicMapReader) Access(k string) (string, bool) { + v, ok := r[k] + return v, ok +} + +func (r BasicMapReader) Range(f func(string, string) bool) bool { + for k, v := range r { + if cont := f(k, v); !cont { + return false + } + } + + return true +} + +// MultiMapReader reads over multiple maps, preferring keys that are +// founder earlier (lower number index) vs. later (higher number index) +type MultiMapReader []map[string]string + +func (r MultiMapReader) Access(k string) (string, bool) { + for _, m := range r { + if v, ok := m[k]; ok { + return v, ok + } + } + + return "", false +} + +func (r MultiMapReader) Range(f func(string, string) bool) bool { + done := make(map[string]struct{}) + for _, m := range r { + for k, v := range m { + if _, ok := done[k]; ok { + continue + } + + if cont := f(k, v); !cont { + return false + } + + done[k] = struct{}{} + } + } + + return true +} diff --git a/vendor/github.com/hashicorp/terraform/helper/schema/field_reader_multi.go b/vendor/github.com/hashicorp/terraform/helper/schema/field_reader_multi.go new file mode 100644 index 0000000000..89ad3a86f2 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/helper/schema/field_reader_multi.go @@ -0,0 +1,63 @@ +package schema + +import ( + "fmt" +) + +// MultiLevelFieldReader reads from other field readers, +// merging their results along the way in a specific order. You can specify +// "levels" and name them in order to read only an exact level or up to +// a specific level. +// +// This is useful for saying things such as "read the field from the state +// and config and merge them" or "read the latest value of the field". +type MultiLevelFieldReader struct { + Readers map[string]FieldReader + Levels []string +} + +func (r *MultiLevelFieldReader) ReadField(address []string) (FieldReadResult, error) { + return r.ReadFieldMerge(address, r.Levels[len(r.Levels)-1]) +} + +func (r *MultiLevelFieldReader) ReadFieldExact( + address []string, level string) (FieldReadResult, error) { + reader, ok := r.Readers[level] + if !ok { + return FieldReadResult{}, fmt.Errorf( + "Unknown reader level: %s", level) + } + + result, err := reader.ReadField(address) + if err != nil { + return FieldReadResult{}, fmt.Errorf( + "Error reading level %s: %s", level, err) + } + + return result, nil +} + +func (r *MultiLevelFieldReader) ReadFieldMerge( + address []string, level string) (FieldReadResult, error) { + var result FieldReadResult + for _, l := range r.Levels { + if r, ok := r.Readers[l]; ok { + out, err := r.ReadField(address) + if err != nil { + return FieldReadResult{}, fmt.Errorf( + "Error reading level %s: %s", l, err) + } + + // TODO: computed + if out.Exists { + result = out + } + } + + if l == level { + break + } + } + + return result, nil +} diff --git a/vendor/github.com/hashicorp/terraform/helper/schema/field_writer.go b/vendor/github.com/hashicorp/terraform/helper/schema/field_writer.go new file mode 100644 index 0000000000..9abc41b54f --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/helper/schema/field_writer.go @@ -0,0 +1,8 @@ +package schema + +// FieldWriters are responsible for writing fields by address into +// a proper typed representation. ResourceData uses this to write new data +// into existing sources. +type FieldWriter interface { + WriteField([]string, interface{}) error +} diff --git a/vendor/github.com/hashicorp/terraform/helper/schema/field_writer_map.go b/vendor/github.com/hashicorp/terraform/helper/schema/field_writer_map.go new file mode 100644 index 0000000000..689ed8d1cd --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/helper/schema/field_writer_map.go @@ -0,0 +1,319 @@ +package schema + +import ( + "fmt" + "reflect" + "strconv" + "strings" + "sync" + + "github.com/mitchellh/mapstructure" +) + +// MapFieldWriter writes data into a single map[string]string structure. +type MapFieldWriter struct { + Schema map[string]*Schema + + lock sync.Mutex + result map[string]string +} + +// Map returns the underlying map that is being written to. +func (w *MapFieldWriter) Map() map[string]string { + w.lock.Lock() + defer w.lock.Unlock() + if w.result == nil { + w.result = make(map[string]string) + } + + return w.result +} + +func (w *MapFieldWriter) unsafeWriteField(addr string, value string) { + w.lock.Lock() + defer w.lock.Unlock() + if w.result == nil { + w.result = make(map[string]string) + } + + w.result[addr] = value +} + +func (w *MapFieldWriter) WriteField(addr []string, value interface{}) error { + w.lock.Lock() + defer w.lock.Unlock() + if w.result == nil { + w.result = make(map[string]string) + } + + schemaList := addrToSchema(addr, w.Schema) + if len(schemaList) == 0 { + return fmt.Errorf("Invalid address to set: %#v", addr) + } + + // If we're setting anything other than a list root or set root, + // then disallow it. + for _, schema := range schemaList[:len(schemaList)-1] { + if schema.Type == TypeList { + return fmt.Errorf( + "%s: can only set full list", + strings.Join(addr, ".")) + } + + if schema.Type == TypeMap { + return fmt.Errorf( + "%s: can only set full map", + strings.Join(addr, ".")) + } + + if schema.Type == TypeSet { + return fmt.Errorf( + "%s: can only set full set", + strings.Join(addr, ".")) + } + } + + return w.set(addr, value) +} + +func (w *MapFieldWriter) set(addr []string, value interface{}) error { + schemaList := addrToSchema(addr, w.Schema) + if len(schemaList) == 0 { + return fmt.Errorf("Invalid address to set: %#v", addr) + } + + schema := schemaList[len(schemaList)-1] + switch schema.Type { + case TypeBool, TypeInt, TypeFloat, TypeString: + return w.setPrimitive(addr, value, schema) + case TypeList: + return w.setList(addr, value, schema) + case TypeMap: + return w.setMap(addr, value, schema) + case TypeSet: + return w.setSet(addr, value, schema) + case typeObject: + return w.setObject(addr, value, schema) + default: + panic(fmt.Sprintf("Unknown type: %#v", schema.Type)) + } +} + +func (w *MapFieldWriter) setList( + addr []string, + v interface{}, + schema *Schema) error { + k := strings.Join(addr, ".") + setElement := func(idx string, value interface{}) error { + addrCopy := make([]string, len(addr), len(addr)+1) + copy(addrCopy, addr) + return w.set(append(addrCopy, idx), value) + } + + var vs []interface{} + if err := mapstructure.Decode(v, &vs); err != nil { + return fmt.Errorf("%s: %s", k, err) + } + + // Set the entire list. + var err error + for i, elem := range vs { + is := strconv.FormatInt(int64(i), 10) + err = setElement(is, elem) + if err != nil { + break + } + } + if err != nil { + for i, _ := range vs { + is := strconv.FormatInt(int64(i), 10) + setElement(is, nil) + } + + return err + } + + w.result[k+".#"] = strconv.FormatInt(int64(len(vs)), 10) + return nil +} + +func (w *MapFieldWriter) setMap( + addr []string, + value interface{}, + schema *Schema) error { + k := strings.Join(addr, ".") + v := reflect.ValueOf(value) + vs := make(map[string]interface{}) + + if value == nil { + // The empty string here means the map is removed. + w.result[k] = "" + return nil + } + + if v.Kind() != reflect.Map { + return fmt.Errorf("%s: must be a map", k) + } + if v.Type().Key().Kind() != reflect.String { + return fmt.Errorf("%s: keys must strings", k) + } + for _, mk := range v.MapKeys() { + mv := v.MapIndex(mk) + vs[mk.String()] = mv.Interface() + } + + // Remove the pure key since we're setting the full map value + delete(w.result, k) + + // Set each subkey + addrCopy := make([]string, len(addr), len(addr)+1) + copy(addrCopy, addr) + for subKey, v := range vs { + if err := w.set(append(addrCopy, subKey), v); err != nil { + return err + } + } + + // Set the count + w.result[k+".%"] = strconv.Itoa(len(vs)) + + return nil +} + +func (w *MapFieldWriter) setObject( + addr []string, + value interface{}, + schema *Schema) error { + // Set the entire object. First decode into a proper structure + var v map[string]interface{} + if err := mapstructure.Decode(value, &v); err != nil { + return fmt.Errorf("%s: %s", strings.Join(addr, "."), err) + } + + // Make space for additional elements in the address + addrCopy := make([]string, len(addr), len(addr)+1) + copy(addrCopy, addr) + + // Set each element in turn + var err error + for k1, v1 := range v { + if err = w.set(append(addrCopy, k1), v1); err != nil { + break + } + } + if err != nil { + for k1, _ := range v { + w.set(append(addrCopy, k1), nil) + } + } + + return err +} + +func (w *MapFieldWriter) setPrimitive( + addr []string, + v interface{}, + schema *Schema) error { + k := strings.Join(addr, ".") + + if v == nil { + // The empty string here means the value is removed. + w.result[k] = "" + return nil + } + + var set string + switch schema.Type { + case TypeBool: + var b bool + if err := mapstructure.Decode(v, &b); err != nil { + return fmt.Errorf("%s: %s", k, err) + } + + set = strconv.FormatBool(b) + case TypeString: + if err := mapstructure.Decode(v, &set); err != nil { + return fmt.Errorf("%s: %s", k, err) + } + case TypeInt: + var n int + if err := mapstructure.Decode(v, &n); err != nil { + return fmt.Errorf("%s: %s", k, err) + } + set = strconv.FormatInt(int64(n), 10) + case TypeFloat: + var n float64 + if err := mapstructure.Decode(v, &n); err != nil { + return fmt.Errorf("%s: %s", k, err) + } + set = strconv.FormatFloat(float64(n), 'G', -1, 64) + default: + return fmt.Errorf("Unknown type: %#v", schema.Type) + } + + w.result[k] = set + return nil +} + +func (w *MapFieldWriter) setSet( + addr []string, + value interface{}, + schema *Schema) error { + addrCopy := make([]string, len(addr), len(addr)+1) + copy(addrCopy, addr) + k := strings.Join(addr, ".") + + if value == nil { + w.result[k+".#"] = "0" + return nil + } + + // If it is a slice, then we have to turn it into a *Set so that + // we get the proper order back based on the hash code. + if v := reflect.ValueOf(value); v.Kind() == reflect.Slice { + // Build a temp *ResourceData to use for the conversion + tempSchema := *schema + tempSchema.Type = TypeList + tempSchemaMap := map[string]*Schema{addr[0]: &tempSchema} + tempW := &MapFieldWriter{Schema: tempSchemaMap} + + // Set the entire list, this lets us get sane values out of it + if err := tempW.WriteField(addr, value); err != nil { + return err + } + + // Build the set by going over the list items in order and + // hashing them into the set. The reason we go over the list and + // not the `value` directly is because this forces all types + // to become []interface{} (generic) instead of []string, which + // most hash functions are expecting. + s := schema.ZeroValue().(*Set) + tempR := &MapFieldReader{ + Map: BasicMapReader(tempW.Map()), + Schema: tempSchemaMap, + } + for i := 0; i < v.Len(); i++ { + is := strconv.FormatInt(int64(i), 10) + result, err := tempR.ReadField(append(addrCopy, is)) + if err != nil { + return err + } + if !result.Exists { + panic("set item just set doesn't exist") + } + + s.Add(result.Value) + } + + value = s + } + + for code, elem := range value.(*Set).m { + if err := w.set(append(addrCopy, code), elem); err != nil { + return err + } + } + + w.result[k+".#"] = strconv.Itoa(value.(*Set).Len()) + return nil +} diff --git a/vendor/github.com/hashicorp/terraform/helper/schema/getsource_string.go b/vendor/github.com/hashicorp/terraform/helper/schema/getsource_string.go new file mode 100644 index 0000000000..3a97629394 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/helper/schema/getsource_string.go @@ -0,0 +1,36 @@ +// Code generated by "stringer -type=getSource resource_data_get_source.go"; DO NOT EDIT. + +package schema + +import "fmt" + +const ( + _getSource_name_0 = "getSourceStategetSourceConfig" + _getSource_name_1 = "getSourceDiff" + _getSource_name_2 = "getSourceSet" + _getSource_name_3 = "getSourceLevelMaskgetSourceExact" +) + +var ( + _getSource_index_0 = [...]uint8{0, 14, 29} + _getSource_index_1 = [...]uint8{0, 13} + _getSource_index_2 = [...]uint8{0, 12} + _getSource_index_3 = [...]uint8{0, 18, 32} +) + +func (i getSource) String() string { + switch { + case 1 <= i && i <= 2: + i -= 1 + return _getSource_name_0[_getSource_index_0[i]:_getSource_index_0[i+1]] + case i == 4: + return _getSource_name_1 + case i == 8: + return _getSource_name_2 + case 15 <= i && i <= 16: + i -= 15 + return _getSource_name_3[_getSource_index_3[i]:_getSource_index_3[i+1]] + default: + return fmt.Sprintf("getSource(%d)", i) + } +} diff --git a/vendor/github.com/hashicorp/terraform/helper/schema/provider.go b/vendor/github.com/hashicorp/terraform/helper/schema/provider.go new file mode 100644 index 0000000000..d52d2f5f06 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/helper/schema/provider.go @@ -0,0 +1,400 @@ +package schema + +import ( + "context" + "errors" + "fmt" + "sort" + "sync" + + "github.com/hashicorp/go-multierror" + "github.com/hashicorp/terraform/terraform" +) + +// Provider represents a resource provider in Terraform, and properly +// implements all of the ResourceProvider API. +// +// By defining a schema for the configuration of the provider, the +// map of supporting resources, and a configuration function, the schema +// framework takes over and handles all the provider operations for you. +// +// After defining the provider structure, it is unlikely that you'll require any +// of the methods on Provider itself. +type Provider struct { + // Schema is the schema for the configuration of this provider. If this + // provider has no configuration, this can be omitted. + // + // The keys of this map are the configuration keys, and the value is + // the schema describing the value of the configuration. + Schema map[string]*Schema + + // ResourcesMap is the list of available resources that this provider + // can manage, along with their Resource structure defining their + // own schemas and CRUD operations. + // + // Provider automatically handles routing operations such as Apply, + // Diff, etc. to the proper resource. + ResourcesMap map[string]*Resource + + // DataSourcesMap is the collection of available data sources that + // this provider implements, with a Resource instance defining + // the schema and Read operation of each. + // + // Resource instances for data sources must have a Read function + // and must *not* implement Create, Update or Delete. + DataSourcesMap map[string]*Resource + + // ConfigureFunc is a function for configuring the provider. If the + // provider doesn't need to be configured, this can be omitted. + // + // See the ConfigureFunc documentation for more information. + ConfigureFunc ConfigureFunc + + // MetaReset is called by TestReset to reset any state stored in the meta + // interface. This is especially important if the StopContext is stored by + // the provider. + MetaReset func() error + + meta interface{} + + // a mutex is required because TestReset can directly repalce the stopCtx + stopMu sync.Mutex + stopCtx context.Context + stopCtxCancel context.CancelFunc + stopOnce sync.Once +} + +// ConfigureFunc is the function used to configure a Provider. +// +// The interface{} value returned by this function is stored and passed into +// the subsequent resources as the meta parameter. This return value is +// usually used to pass along a configured API client, a configuration +// structure, etc. +type ConfigureFunc func(*ResourceData) (interface{}, error) + +// InternalValidate should be called to validate the structure +// of the provider. +// +// This should be called in a unit test for any provider to verify +// before release that a provider is properly configured for use with +// this library. +func (p *Provider) InternalValidate() error { + if p == nil { + return errors.New("provider is nil") + } + + var validationErrors error + sm := schemaMap(p.Schema) + if err := sm.InternalValidate(sm); err != nil { + validationErrors = multierror.Append(validationErrors, err) + } + + for k, r := range p.ResourcesMap { + if err := r.InternalValidate(nil, true); err != nil { + validationErrors = multierror.Append(validationErrors, fmt.Errorf("resource %s: %s", k, err)) + } + } + + for k, r := range p.DataSourcesMap { + if err := r.InternalValidate(nil, false); err != nil { + validationErrors = multierror.Append(validationErrors, fmt.Errorf("data source %s: %s", k, err)) + } + } + + return validationErrors +} + +// Meta returns the metadata associated with this provider that was +// returned by the Configure call. It will be nil until Configure is called. +func (p *Provider) Meta() interface{} { + return p.meta +} + +// SetMeta can be used to forcefully set the Meta object of the provider. +// Note that if Configure is called the return value will override anything +// set here. +func (p *Provider) SetMeta(v interface{}) { + p.meta = v +} + +// Stopped reports whether the provider has been stopped or not. +func (p *Provider) Stopped() bool { + ctx := p.StopContext() + select { + case <-ctx.Done(): + return true + default: + return false + } +} + +// StopCh returns a channel that is closed once the provider is stopped. +func (p *Provider) StopContext() context.Context { + p.stopOnce.Do(p.stopInit) + + p.stopMu.Lock() + defer p.stopMu.Unlock() + + return p.stopCtx +} + +func (p *Provider) stopInit() { + p.stopMu.Lock() + defer p.stopMu.Unlock() + + p.stopCtx, p.stopCtxCancel = context.WithCancel(context.Background()) +} + +// Stop implementation of terraform.ResourceProvider interface. +func (p *Provider) Stop() error { + p.stopOnce.Do(p.stopInit) + + p.stopMu.Lock() + defer p.stopMu.Unlock() + + p.stopCtxCancel() + return nil +} + +// TestReset resets any state stored in the Provider, and will call TestReset +// on Meta if it implements the TestProvider interface. +// This may be used to reset the schema.Provider at the start of a test, and is +// automatically called by resource.Test. +func (p *Provider) TestReset() error { + p.stopInit() + if p.MetaReset != nil { + return p.MetaReset() + } + return nil +} + +// Input implementation of terraform.ResourceProvider interface. +func (p *Provider) Input( + input terraform.UIInput, + c *terraform.ResourceConfig) (*terraform.ResourceConfig, error) { + return schemaMap(p.Schema).Input(input, c) +} + +// Validate implementation of terraform.ResourceProvider interface. +func (p *Provider) Validate(c *terraform.ResourceConfig) ([]string, []error) { + if err := p.InternalValidate(); err != nil { + return nil, []error{fmt.Errorf( + "Internal validation of the provider failed! This is always a bug\n"+ + "with the provider itself, and not a user issue. Please report\n"+ + "this bug:\n\n%s", err)} + } + + return schemaMap(p.Schema).Validate(c) +} + +// ValidateResource implementation of terraform.ResourceProvider interface. +func (p *Provider) ValidateResource( + t string, c *terraform.ResourceConfig) ([]string, []error) { + r, ok := p.ResourcesMap[t] + if !ok { + return nil, []error{fmt.Errorf( + "Provider doesn't support resource: %s", t)} + } + + return r.Validate(c) +} + +// Configure implementation of terraform.ResourceProvider interface. +func (p *Provider) Configure(c *terraform.ResourceConfig) error { + // No configuration + if p.ConfigureFunc == nil { + return nil + } + + sm := schemaMap(p.Schema) + + // Get a ResourceData for this configuration. To do this, we actually + // generate an intermediary "diff" although that is never exposed. + diff, err := sm.Diff(nil, c) + if err != nil { + return err + } + + data, err := sm.Data(nil, diff) + if err != nil { + return err + } + + meta, err := p.ConfigureFunc(data) + if err != nil { + return err + } + + p.meta = meta + return nil +} + +// Apply implementation of terraform.ResourceProvider interface. +func (p *Provider) Apply( + info *terraform.InstanceInfo, + s *terraform.InstanceState, + d *terraform.InstanceDiff) (*terraform.InstanceState, error) { + r, ok := p.ResourcesMap[info.Type] + if !ok { + return nil, fmt.Errorf("unknown resource type: %s", info.Type) + } + + return r.Apply(s, d, p.meta) +} + +// Diff implementation of terraform.ResourceProvider interface. +func (p *Provider) Diff( + info *terraform.InstanceInfo, + s *terraform.InstanceState, + c *terraform.ResourceConfig) (*terraform.InstanceDiff, error) { + r, ok := p.ResourcesMap[info.Type] + if !ok { + return nil, fmt.Errorf("unknown resource type: %s", info.Type) + } + + return r.Diff(s, c) +} + +// Refresh implementation of terraform.ResourceProvider interface. +func (p *Provider) Refresh( + info *terraform.InstanceInfo, + s *terraform.InstanceState) (*terraform.InstanceState, error) { + r, ok := p.ResourcesMap[info.Type] + if !ok { + return nil, fmt.Errorf("unknown resource type: %s", info.Type) + } + + return r.Refresh(s, p.meta) +} + +// Resources implementation of terraform.ResourceProvider interface. +func (p *Provider) Resources() []terraform.ResourceType { + keys := make([]string, 0, len(p.ResourcesMap)) + for k, _ := range p.ResourcesMap { + keys = append(keys, k) + } + sort.Strings(keys) + + result := make([]terraform.ResourceType, 0, len(keys)) + for _, k := range keys { + resource := p.ResourcesMap[k] + + // This isn't really possible (it'd fail InternalValidate), but + // we do it anyways to avoid a panic. + if resource == nil { + resource = &Resource{} + } + + result = append(result, terraform.ResourceType{ + Name: k, + Importable: resource.Importer != nil, + }) + } + + return result +} + +func (p *Provider) ImportState( + info *terraform.InstanceInfo, + id string) ([]*terraform.InstanceState, error) { + // Find the resource + r, ok := p.ResourcesMap[info.Type] + if !ok { + return nil, fmt.Errorf("unknown resource type: %s", info.Type) + } + + // If it doesn't support import, error + if r.Importer == nil { + return nil, fmt.Errorf("resource %s doesn't support import", info.Type) + } + + // Create the data + data := r.Data(nil) + data.SetId(id) + data.SetType(info.Type) + + // Call the import function + results := []*ResourceData{data} + if r.Importer.State != nil { + var err error + results, err = r.Importer.State(data, p.meta) + if err != nil { + return nil, err + } + } + + // Convert the results to InstanceState values and return it + states := make([]*terraform.InstanceState, len(results)) + for i, r := range results { + states[i] = r.State() + } + + // Verify that all are non-nil. If there are any nil the error + // isn't obvious so we circumvent that with a friendlier error. + for _, s := range states { + if s == nil { + return nil, fmt.Errorf( + "nil entry in ImportState results. This is always a bug with\n" + + "the resource that is being imported. Please report this as\n" + + "a bug to Terraform.") + } + } + + return states, nil +} + +// ValidateDataSource implementation of terraform.ResourceProvider interface. +func (p *Provider) ValidateDataSource( + t string, c *terraform.ResourceConfig) ([]string, []error) { + r, ok := p.DataSourcesMap[t] + if !ok { + return nil, []error{fmt.Errorf( + "Provider doesn't support data source: %s", t)} + } + + return r.Validate(c) +} + +// ReadDataDiff implementation of terraform.ResourceProvider interface. +func (p *Provider) ReadDataDiff( + info *terraform.InstanceInfo, + c *terraform.ResourceConfig) (*terraform.InstanceDiff, error) { + + r, ok := p.DataSourcesMap[info.Type] + if !ok { + return nil, fmt.Errorf("unknown data source: %s", info.Type) + } + + return r.Diff(nil, c) +} + +// RefreshData implementation of terraform.ResourceProvider interface. +func (p *Provider) ReadDataApply( + info *terraform.InstanceInfo, + d *terraform.InstanceDiff) (*terraform.InstanceState, error) { + + r, ok := p.DataSourcesMap[info.Type] + if !ok { + return nil, fmt.Errorf("unknown data source: %s", info.Type) + } + + return r.ReadDataApply(d, p.meta) +} + +// DataSources implementation of terraform.ResourceProvider interface. +func (p *Provider) DataSources() []terraform.DataSource { + keys := make([]string, 0, len(p.DataSourcesMap)) + for k, _ := range p.DataSourcesMap { + keys = append(keys, k) + } + sort.Strings(keys) + + result := make([]terraform.DataSource, 0, len(keys)) + for _, k := range keys { + result = append(result, terraform.DataSource{ + Name: k, + }) + } + + return result +} diff --git a/vendor/github.com/hashicorp/terraform/helper/schema/provisioner.go b/vendor/github.com/hashicorp/terraform/helper/schema/provisioner.go new file mode 100644 index 0000000000..856c6758a8 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/helper/schema/provisioner.go @@ -0,0 +1,206 @@ +package schema + +import ( + "context" + "errors" + "fmt" + "sync" + + "github.com/hashicorp/go-multierror" + "github.com/hashicorp/terraform/config" + "github.com/hashicorp/terraform/terraform" +) + +// Provisioner represents a resource provisioner in Terraform and properly +// implements all of the ResourceProvisioner API. +// +// This higher level structure makes it much easier to implement a new or +// custom provisioner for Terraform. +// +// The function callbacks for this structure are all passed a context object. +// This context object has a number of pre-defined values that can be accessed +// via the global functions defined in context.go. +type Provisioner struct { + // ConnSchema is the schema for the connection settings for this + // provisioner. + // + // The keys of this map are the configuration keys, and the value is + // the schema describing the value of the configuration. + // + // NOTE: The value of connection keys can only be strings for now. + ConnSchema map[string]*Schema + + // Schema is the schema for the usage of this provisioner. + // + // The keys of this map are the configuration keys, and the value is + // the schema describing the value of the configuration. + Schema map[string]*Schema + + // ApplyFunc is the function for executing the provisioner. This is required. + // It is given a context. See the Provisioner struct docs for more + // information. + ApplyFunc func(ctx context.Context) error + + // ValidateFunc is a function for extended validation. This is optional + // and should be used when individual field validation is not enough. + ValidateFunc func(*ResourceData) ([]string, []error) + + stopCtx context.Context + stopCtxCancel context.CancelFunc + stopOnce sync.Once +} + +// Keys that can be used to access data in the context parameters for +// Provisioners. +var ( + connDataInvalid = contextKey("data invalid") + + // This returns a *ResourceData for the connection information. + // Guaranteed to never be nil. + ProvConnDataKey = contextKey("provider conn data") + + // This returns a *ResourceData for the config information. + // Guaranteed to never be nil. + ProvConfigDataKey = contextKey("provider config data") + + // This returns a terraform.UIOutput. Guaranteed to never be nil. + ProvOutputKey = contextKey("provider output") + + // This returns the raw InstanceState passed to Apply. Guaranteed to + // be set, but may be nil. + ProvRawStateKey = contextKey("provider raw state") +) + +// InternalValidate should be called to validate the structure +// of the provisioner. +// +// This should be called in a unit test to verify before release that this +// structure is properly configured for use. +func (p *Provisioner) InternalValidate() error { + if p == nil { + return errors.New("provisioner is nil") + } + + var validationErrors error + { + sm := schemaMap(p.ConnSchema) + if err := sm.InternalValidate(sm); err != nil { + validationErrors = multierror.Append(validationErrors, err) + } + } + + { + sm := schemaMap(p.Schema) + if err := sm.InternalValidate(sm); err != nil { + validationErrors = multierror.Append(validationErrors, err) + } + } + + if p.ApplyFunc == nil { + validationErrors = multierror.Append(validationErrors, fmt.Errorf( + "ApplyFunc must not be nil")) + } + + return validationErrors +} + +// StopContext returns a context that checks whether a provisioner is stopped. +func (p *Provisioner) StopContext() context.Context { + p.stopOnce.Do(p.stopInit) + return p.stopCtx +} + +func (p *Provisioner) stopInit() { + p.stopCtx, p.stopCtxCancel = context.WithCancel(context.Background()) +} + +// Stop implementation of terraform.ResourceProvisioner interface. +func (p *Provisioner) Stop() error { + p.stopOnce.Do(p.stopInit) + p.stopCtxCancel() + return nil +} + +func (p *Provisioner) Validate(config *terraform.ResourceConfig) ([]string, []error) { + if err := p.InternalValidate(); err != nil { + return nil, []error{fmt.Errorf( + "Internal validation of the provisioner failed! This is always a bug\n"+ + "with the provisioner itself, and not a user issue. Please report\n"+ + "this bug:\n\n%s", err)} + } + w := []string{} + e := []error{} + if p.Schema != nil { + w2, e2 := schemaMap(p.Schema).Validate(config) + w = append(w, w2...) + e = append(e, e2...) + } + if p.ValidateFunc != nil { + data := &ResourceData{ + schema: p.Schema, + config: config, + } + w2, e2 := p.ValidateFunc(data) + w = append(w, w2...) + e = append(e, e2...) + } + return w, e +} + +// Apply implementation of terraform.ResourceProvisioner interface. +func (p *Provisioner) Apply( + o terraform.UIOutput, + s *terraform.InstanceState, + c *terraform.ResourceConfig) error { + var connData, configData *ResourceData + + { + // We first need to turn the connection information into a + // terraform.ResourceConfig so that we can use that type to more + // easily build a ResourceData structure. We do this by simply treating + // the conn info as configuration input. + raw := make(map[string]interface{}) + if s != nil { + for k, v := range s.Ephemeral.ConnInfo { + raw[k] = v + } + } + + c, err := config.NewRawConfig(raw) + if err != nil { + return err + } + + sm := schemaMap(p.ConnSchema) + diff, err := sm.Diff(nil, terraform.NewResourceConfig(c)) + if err != nil { + return err + } + connData, err = sm.Data(nil, diff) + if err != nil { + return err + } + } + + { + // Build the configuration data. Doing this requires making a "diff" + // even though that's never used. We use that just to get the correct types. + configMap := schemaMap(p.Schema) + diff, err := configMap.Diff(nil, c) + if err != nil { + return err + } + configData, err = configMap.Data(nil, diff) + if err != nil { + return err + } + } + + // Build the context and call the function + ctx := p.StopContext() + ctx = context.WithValue(ctx, ProvConnDataKey, connData) + ctx = context.WithValue(ctx, ProvConfigDataKey, configData) + ctx = context.WithValue(ctx, ProvOutputKey, o) + ctx = context.WithValue(ctx, ProvRawStateKey, s) + return p.ApplyFunc(ctx) +} diff --git a/vendor/github.com/hashicorp/terraform/helper/schema/resource.go b/vendor/github.com/hashicorp/terraform/helper/schema/resource.go new file mode 100644 index 0000000000..c8105588c8 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/helper/schema/resource.go @@ -0,0 +1,478 @@ +package schema + +import ( + "errors" + "fmt" + "log" + "strconv" + + "github.com/hashicorp/terraform/terraform" +) + +// Resource represents a thing in Terraform that has a set of configurable +// attributes and a lifecycle (create, read, update, delete). +// +// The Resource schema is an abstraction that allows provider writers to +// worry only about CRUD operations while off-loading validation, diff +// generation, etc. to this higher level library. +// +// In spite of the name, this struct is not used only for terraform resources, +// but also for data sources. In the case of data sources, the Create, +// Update and Delete functions must not be provided. +type Resource struct { + // Schema is the schema for the configuration of this resource. + // + // The keys of this map are the configuration keys, and the values + // describe the schema of the configuration value. + // + // The schema is used to represent both configurable data as well + // as data that might be computed in the process of creating this + // resource. + Schema map[string]*Schema + + // SchemaVersion is the version number for this resource's Schema + // definition. The current SchemaVersion stored in the state for each + // resource. Provider authors can increment this version number + // when Schema semantics change. If the State's SchemaVersion is less than + // the current SchemaVersion, the InstanceState is yielded to the + // MigrateState callback, where the provider can make whatever changes it + // needs to update the state to be compatible to the latest version of the + // Schema. + // + // When unset, SchemaVersion defaults to 0, so provider authors can start + // their Versioning at any integer >= 1 + SchemaVersion int + + // MigrateState is responsible for updating an InstanceState with an old + // version to the format expected by the current version of the Schema. + // + // It is called during Refresh if the State's stored SchemaVersion is less + // than the current SchemaVersion of the Resource. + // + // The function is yielded the state's stored SchemaVersion and a pointer to + // the InstanceState that needs updating, as well as the configured + // provider's configured meta interface{}, in case the migration process + // needs to make any remote API calls. + MigrateState StateMigrateFunc + + // The functions below are the CRUD operations for this resource. + // + // The only optional operation is Update. If Update is not implemented, + // then updates will not be supported for this resource. + // + // The ResourceData parameter in the functions below are used to + // query configuration and changes for the resource as well as to set + // the ID, computed data, etc. + // + // The interface{} parameter is the result of the ConfigureFunc in + // the provider for this resource. If the provider does not define + // a ConfigureFunc, this will be nil. This parameter should be used + // to store API clients, configuration structures, etc. + // + // If any errors occur during each of the operation, an error should be + // returned. If a resource was partially updated, be careful to enable + // partial state mode for ResourceData and use it accordingly. + // + // Exists is a function that is called to check if a resource still + // exists. If this returns false, then this will affect the diff + // accordingly. If this function isn't set, it will not be called. It + // is highly recommended to set it. The *ResourceData passed to Exists + // should _not_ be modified. + Create CreateFunc + Read ReadFunc + Update UpdateFunc + Delete DeleteFunc + Exists ExistsFunc + + // Importer is the ResourceImporter implementation for this resource. + // If this is nil, then this resource does not support importing. If + // this is non-nil, then it supports importing and ResourceImporter + // must be validated. The validity of ResourceImporter is verified + // by InternalValidate on Resource. + Importer *ResourceImporter + + // If non-empty, this string is emitted as a warning during Validate. + // This is a private interface for now, for use by DataSourceResourceShim, + // and not for general use. (But maybe later...) + deprecationMessage string + + // Timeouts allow users to specify specific time durations in which an + // operation should time out, to allow them to extend an action to suit their + // usage. For example, a user may specify a large Creation timeout for their + // AWS RDS Instance due to it's size, or restoring from a snapshot. + // Resource implementors must enable Timeout support by adding the allowed + // actions (Create, Read, Update, Delete, Default) to the Resource struct, and + // accessing them in the matching methods. + Timeouts *ResourceTimeout +} + +// See Resource documentation. +type CreateFunc func(*ResourceData, interface{}) error + +// See Resource documentation. +type ReadFunc func(*ResourceData, interface{}) error + +// See Resource documentation. +type UpdateFunc func(*ResourceData, interface{}) error + +// See Resource documentation. +type DeleteFunc func(*ResourceData, interface{}) error + +// See Resource documentation. +type ExistsFunc func(*ResourceData, interface{}) (bool, error) + +// See Resource documentation. +type StateMigrateFunc func( + int, *terraform.InstanceState, interface{}) (*terraform.InstanceState, error) + +// Apply creates, updates, and/or deletes a resource. +func (r *Resource) Apply( + s *terraform.InstanceState, + d *terraform.InstanceDiff, + meta interface{}) (*terraform.InstanceState, error) { + data, err := schemaMap(r.Schema).Data(s, d) + if err != nil { + return s, err + } + + // Instance Diff shoould have the timeout info, need to copy it over to the + // ResourceData meta + rt := ResourceTimeout{} + if _, ok := d.Meta[TimeoutKey]; ok { + if err := rt.DiffDecode(d); err != nil { + log.Printf("[ERR] Error decoding ResourceTimeout: %s", err) + } + } else { + log.Printf("[DEBUG] No meta timeoutkey found in Apply()") + } + data.timeouts = &rt + + if s == nil { + // The Terraform API dictates that this should never happen, but + // it doesn't hurt to be safe in this case. + s = new(terraform.InstanceState) + } + + if d.Destroy || d.RequiresNew() { + if s.ID != "" { + // Destroy the resource since it is created + if err := r.Delete(data, meta); err != nil { + return r.recordCurrentSchemaVersion(data.State()), err + } + + // Make sure the ID is gone. + data.SetId("") + } + + // If we're only destroying, and not creating, then return + // now since we're done! + if !d.RequiresNew() { + return nil, nil + } + + // Reset the data to be stateless since we just destroyed + data, err = schemaMap(r.Schema).Data(nil, d) + // data was reset, need to re-apply the parsed timeouts + data.timeouts = &rt + if err != nil { + return nil, err + } + } + + err = nil + if data.Id() == "" { + // We're creating, it is a new resource. + data.MarkNewResource() + err = r.Create(data, meta) + } else { + if r.Update == nil { + return s, fmt.Errorf("doesn't support update") + } + + err = r.Update(data, meta) + } + + return r.recordCurrentSchemaVersion(data.State()), err +} + +// Diff returns a diff of this resource and is API compatible with the +// ResourceProvider interface. +func (r *Resource) Diff( + s *terraform.InstanceState, + c *terraform.ResourceConfig) (*terraform.InstanceDiff, error) { + + t := &ResourceTimeout{} + err := t.ConfigDecode(r, c) + + if err != nil { + return nil, fmt.Errorf("[ERR] Error decoding timeout: %s", err) + } + + instanceDiff, err := schemaMap(r.Schema).Diff(s, c) + if err != nil { + return instanceDiff, err + } + + if instanceDiff != nil { + if err := t.DiffEncode(instanceDiff); err != nil { + log.Printf("[ERR] Error encoding timeout to instance diff: %s", err) + } + } else { + log.Printf("[DEBUG] Instance Diff is nil in Diff()") + } + + return instanceDiff, err +} + +// Validate validates the resource configuration against the schema. +func (r *Resource) Validate(c *terraform.ResourceConfig) ([]string, []error) { + warns, errs := schemaMap(r.Schema).Validate(c) + + if r.deprecationMessage != "" { + warns = append(warns, r.deprecationMessage) + } + + return warns, errs +} + +// ReadDataApply loads the data for a data source, given a diff that +// describes the configuration arguments and desired computed attributes. +func (r *Resource) ReadDataApply( + d *terraform.InstanceDiff, + meta interface{}, +) (*terraform.InstanceState, error) { + + // Data sources are always built completely from scratch + // on each read, so the source state is always nil. + data, err := schemaMap(r.Schema).Data(nil, d) + if err != nil { + return nil, err + } + + err = r.Read(data, meta) + state := data.State() + if state != nil && state.ID == "" { + // Data sources can set an ID if they want, but they aren't + // required to; we'll provide a placeholder if they don't, + // to preserve the invariant that all resources have non-empty + // ids. + state.ID = "-" + } + + return r.recordCurrentSchemaVersion(state), err +} + +// Refresh refreshes the state of the resource. +func (r *Resource) Refresh( + s *terraform.InstanceState, + meta interface{}) (*terraform.InstanceState, error) { + // If the ID is already somehow blank, it doesn't exist + if s.ID == "" { + return nil, nil + } + + rt := ResourceTimeout{} + if _, ok := s.Meta[TimeoutKey]; ok { + if err := rt.StateDecode(s); err != nil { + log.Printf("[ERR] Error decoding ResourceTimeout: %s", err) + } + } + + if r.Exists != nil { + // Make a copy of data so that if it is modified it doesn't + // affect our Read later. + data, err := schemaMap(r.Schema).Data(s, nil) + data.timeouts = &rt + + if err != nil { + return s, err + } + + exists, err := r.Exists(data, meta) + if err != nil { + return s, err + } + if !exists { + return nil, nil + } + } + + needsMigration, stateSchemaVersion := r.checkSchemaVersion(s) + if needsMigration && r.MigrateState != nil { + s, err := r.MigrateState(stateSchemaVersion, s, meta) + if err != nil { + return s, err + } + } + + data, err := schemaMap(r.Schema).Data(s, nil) + data.timeouts = &rt + if err != nil { + return s, err + } + + err = r.Read(data, meta) + state := data.State() + if state != nil && state.ID == "" { + state = nil + } + + return r.recordCurrentSchemaVersion(state), err +} + +// InternalValidate should be called to validate the structure +// of the resource. +// +// This should be called in a unit test for any resource to verify +// before release that a resource is properly configured for use with +// this library. +// +// Provider.InternalValidate() will automatically call this for all of +// the resources it manages, so you don't need to call this manually if it +// is part of a Provider. +func (r *Resource) InternalValidate(topSchemaMap schemaMap, writable bool) error { + if r == nil { + return errors.New("resource is nil") + } + + if !writable { + if r.Create != nil || r.Update != nil || r.Delete != nil { + return fmt.Errorf("must not implement Create, Update or Delete") + } + } + + tsm := topSchemaMap + + if r.isTopLevel() && writable { + // All non-Computed attributes must be ForceNew if Update is not defined + if r.Update == nil { + nonForceNewAttrs := make([]string, 0) + for k, v := range r.Schema { + if !v.ForceNew && !v.Computed { + nonForceNewAttrs = append(nonForceNewAttrs, k) + } + } + if len(nonForceNewAttrs) > 0 { + return fmt.Errorf( + "No Update defined, must set ForceNew on: %#v", nonForceNewAttrs) + } + } else { + nonUpdateableAttrs := make([]string, 0) + for k, v := range r.Schema { + if v.ForceNew || v.Computed && !v.Optional { + nonUpdateableAttrs = append(nonUpdateableAttrs, k) + } + } + updateableAttrs := len(r.Schema) - len(nonUpdateableAttrs) + if updateableAttrs == 0 { + return fmt.Errorf( + "All fields are ForceNew or Computed w/out Optional, Update is superfluous") + } + } + + tsm = schemaMap(r.Schema) + + // Destroy, and Read are required + if r.Read == nil { + return fmt.Errorf("Read must be implemented") + } + if r.Delete == nil { + return fmt.Errorf("Delete must be implemented") + } + + // If we have an importer, we need to verify the importer. + if r.Importer != nil { + if err := r.Importer.InternalValidate(); err != nil { + return err + } + } + } + + return schemaMap(r.Schema).InternalValidate(tsm) +} + +// Data returns a ResourceData struct for this Resource. Each return value +// is a separate copy and can be safely modified differently. +// +// The data returned from this function has no actual affect on the Resource +// itself (including the state given to this function). +// +// This function is useful for unit tests and ResourceImporter functions. +func (r *Resource) Data(s *terraform.InstanceState) *ResourceData { + result, err := schemaMap(r.Schema).Data(s, nil) + if err != nil { + // At the time of writing, this isn't possible (Data never returns + // non-nil errors). We panic to find this in the future if we have to. + // I don't see a reason for Data to ever return an error. + panic(err) + } + + // Set the schema version to latest by default + result.meta = map[string]interface{}{ + "schema_version": strconv.Itoa(r.SchemaVersion), + } + + return result +} + +// TestResourceData Yields a ResourceData filled with this resource's schema for use in unit testing +// +// TODO: May be able to be removed with the above ResourceData function. +func (r *Resource) TestResourceData() *ResourceData { + return &ResourceData{ + schema: r.Schema, + } +} + +// Returns true if the resource is "top level" i.e. not a sub-resource. +func (r *Resource) isTopLevel() bool { + // TODO: This is a heuristic; replace with a definitive attribute? + return r.Create != nil +} + +// Determines if a given InstanceState needs to be migrated by checking the +// stored version number with the current SchemaVersion +func (r *Resource) checkSchemaVersion(is *terraform.InstanceState) (bool, int) { + // Get the raw interface{} value for the schema version. If it doesn't + // exist or is nil then set it to zero. + raw := is.Meta["schema_version"] + if raw == nil { + raw = "0" + } + + // Try to convert it to a string. If it isn't a string then we pretend + // that it isn't set at all. It should never not be a string unless it + // was manually tampered with. + rawString, ok := raw.(string) + if !ok { + rawString = "0" + } + + stateSchemaVersion, _ := strconv.Atoi(rawString) + return stateSchemaVersion < r.SchemaVersion, stateSchemaVersion +} + +func (r *Resource) recordCurrentSchemaVersion( + state *terraform.InstanceState) *terraform.InstanceState { + if state != nil && r.SchemaVersion > 0 { + if state.Meta == nil { + state.Meta = make(map[string]interface{}) + } + state.Meta["schema_version"] = strconv.Itoa(r.SchemaVersion) + } + return state +} + +// Noop is a convenience implementation of resource function which takes +// no action and returns no error. +func Noop(*ResourceData, interface{}) error { + return nil +} + +// RemoveFromState is a convenience implementation of a resource function +// which sets the resource ID to empty string (to remove it from state) +// and returns no error. +func RemoveFromState(d *ResourceData, _ interface{}) error { + d.SetId("") + return nil +} diff --git a/vendor/github.com/hashicorp/terraform/helper/schema/resource_data.go b/vendor/github.com/hashicorp/terraform/helper/schema/resource_data.go new file mode 100644 index 0000000000..b2bc8f6c7c --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/helper/schema/resource_data.go @@ -0,0 +1,502 @@ +package schema + +import ( + "log" + "reflect" + "strings" + "sync" + "time" + + "github.com/hashicorp/terraform/terraform" +) + +// ResourceData is used to query and set the attributes of a resource. +// +// ResourceData is the primary argument received for CRUD operations on +// a resource as well as configuration of a provider. It is a powerful +// structure that can be used to not only query data, but check for changes, +// define partial state updates, etc. +// +// The most relevant methods to take a look at are Get, Set, and Partial. +type ResourceData struct { + // Settable (internally) + schema map[string]*Schema + config *terraform.ResourceConfig + state *terraform.InstanceState + diff *terraform.InstanceDiff + meta map[string]interface{} + timeouts *ResourceTimeout + + // Don't set + multiReader *MultiLevelFieldReader + setWriter *MapFieldWriter + newState *terraform.InstanceState + partial bool + partialMap map[string]struct{} + once sync.Once + isNew bool +} + +// getResult is the internal structure that is generated when a Get +// is called that contains some extra data that might be used. +type getResult struct { + Value interface{} + ValueProcessed interface{} + Computed bool + Exists bool + Schema *Schema +} + +// UnsafeSetFieldRaw allows setting arbitrary values in state to arbitrary +// values, bypassing schema. This MUST NOT be used in normal circumstances - +// it exists only to support the remote_state data source. +func (d *ResourceData) UnsafeSetFieldRaw(key string, value string) { + d.once.Do(d.init) + + d.setWriter.unsafeWriteField(key, value) +} + +// Get returns the data for the given key, or nil if the key doesn't exist +// in the schema. +// +// If the key does exist in the schema but doesn't exist in the configuration, +// then the default value for that type will be returned. For strings, this is +// "", for numbers it is 0, etc. +// +// If you want to test if something is set at all in the configuration, +// use GetOk. +func (d *ResourceData) Get(key string) interface{} { + v, _ := d.GetOk(key) + return v +} + +// GetChange returns the old and new value for a given key. +// +// HasChange should be used to check if a change exists. It is possible +// that both the old and new value are the same if the old value was not +// set and the new value is. This is common, for example, for boolean +// fields which have a zero value of false. +func (d *ResourceData) GetChange(key string) (interface{}, interface{}) { + o, n := d.getChange(key, getSourceState, getSourceDiff) + return o.Value, n.Value +} + +// GetOk returns the data for the given key and whether or not the key +// has been set to a non-zero value at some point. +// +// The first result will not necessarilly be nil if the value doesn't exist. +// The second result should be checked to determine this information. +func (d *ResourceData) GetOk(key string) (interface{}, bool) { + r := d.getRaw(key, getSourceSet) + exists := r.Exists && !r.Computed + if exists { + // If it exists, we also want to verify it is not the zero-value. + value := r.Value + zero := r.Schema.Type.Zero() + + if eq, ok := value.(Equal); ok { + exists = !eq.Equal(zero) + } else { + exists = !reflect.DeepEqual(value, zero) + } + } + + return r.Value, exists +} + +func (d *ResourceData) getRaw(key string, level getSource) getResult { + var parts []string + if key != "" { + parts = strings.Split(key, ".") + } + + return d.get(parts, level) +} + +// HasChange returns whether or not the given key has been changed. +func (d *ResourceData) HasChange(key string) bool { + o, n := d.GetChange(key) + + // If the type implements the Equal interface, then call that + // instead of just doing a reflect.DeepEqual. An example where this is + // needed is *Set + if eq, ok := o.(Equal); ok { + return !eq.Equal(n) + } + + return !reflect.DeepEqual(o, n) +} + +// Partial turns partial state mode on/off. +// +// When partial state mode is enabled, then only key prefixes specified +// by SetPartial will be in the final state. This allows providers to return +// partial states for partially applied resources (when errors occur). +func (d *ResourceData) Partial(on bool) { + d.partial = on + if on { + if d.partialMap == nil { + d.partialMap = make(map[string]struct{}) + } + } else { + d.partialMap = nil + } +} + +// Set sets the value for the given key. +// +// If the key is invalid or the value is not a correct type, an error +// will be returned. +func (d *ResourceData) Set(key string, value interface{}) error { + d.once.Do(d.init) + + // If the value is a pointer to a non-struct, get its value and + // use that. This allows Set to take a pointer to primitives to + // simplify the interface. + reflectVal := reflect.ValueOf(value) + if reflectVal.Kind() == reflect.Ptr { + if reflectVal.IsNil() { + // If the pointer is nil, then the value is just nil + value = nil + } else { + // Otherwise, we dereference the pointer as long as its not + // a pointer to a struct, since struct pointers are allowed. + reflectVal = reflect.Indirect(reflectVal) + if reflectVal.Kind() != reflect.Struct { + value = reflectVal.Interface() + } + } + } + + return d.setWriter.WriteField(strings.Split(key, "."), value) +} + +// SetPartial adds the key to the final state output while +// in partial state mode. The key must be a root key in the schema (i.e. +// it cannot be "list.0"). +// +// If partial state mode is disabled, then this has no effect. Additionally, +// whenever partial state mode is toggled, the partial data is cleared. +func (d *ResourceData) SetPartial(k string) { + if d.partial { + d.partialMap[k] = struct{}{} + } +} + +func (d *ResourceData) MarkNewResource() { + d.isNew = true +} + +func (d *ResourceData) IsNewResource() bool { + return d.isNew +} + +// Id returns the ID of the resource. +func (d *ResourceData) Id() string { + var result string + + if d.state != nil { + result = d.state.ID + } + + if d.newState != nil { + result = d.newState.ID + } + + return result +} + +// ConnInfo returns the connection info for this resource. +func (d *ResourceData) ConnInfo() map[string]string { + if d.newState != nil { + return d.newState.Ephemeral.ConnInfo + } + + if d.state != nil { + return d.state.Ephemeral.ConnInfo + } + + return nil +} + +// SetId sets the ID of the resource. If the value is blank, then the +// resource is destroyed. +func (d *ResourceData) SetId(v string) { + d.once.Do(d.init) + d.newState.ID = v +} + +// SetConnInfo sets the connection info for a resource. +func (d *ResourceData) SetConnInfo(v map[string]string) { + d.once.Do(d.init) + d.newState.Ephemeral.ConnInfo = v +} + +// SetType sets the ephemeral type for the data. This is only required +// for importing. +func (d *ResourceData) SetType(t string) { + d.once.Do(d.init) + d.newState.Ephemeral.Type = t +} + +// State returns the new InstanceState after the diff and any Set +// calls. +func (d *ResourceData) State() *terraform.InstanceState { + var result terraform.InstanceState + result.ID = d.Id() + result.Meta = d.meta + + // If we have no ID, then this resource doesn't exist and we just + // return nil. + if result.ID == "" { + return nil + } + + if d.timeouts != nil { + if err := d.timeouts.StateEncode(&result); err != nil { + log.Printf("[ERR] Error encoding Timeout meta to Instance State: %s", err) + } + } + + // Look for a magic key in the schema that determines we skip the + // integrity check of fields existing in the schema, allowing dynamic + // keys to be created. + hasDynamicAttributes := false + for k, _ := range d.schema { + if k == "__has_dynamic_attributes" { + hasDynamicAttributes = true + log.Printf("[INFO] Resource %s has dynamic attributes", result.ID) + } + } + + // In order to build the final state attributes, we read the full + // attribute set as a map[string]interface{}, write it to a MapFieldWriter, + // and then use that map. + rawMap := make(map[string]interface{}) + for k := range d.schema { + source := getSourceSet + if d.partial { + source = getSourceState + if _, ok := d.partialMap[k]; ok { + source = getSourceSet + } + } + + raw := d.get([]string{k}, source) + if raw.Exists && !raw.Computed { + rawMap[k] = raw.Value + if raw.ValueProcessed != nil { + rawMap[k] = raw.ValueProcessed + } + } + } + + mapW := &MapFieldWriter{Schema: d.schema} + if err := mapW.WriteField(nil, rawMap); err != nil { + return nil + } + + result.Attributes = mapW.Map() + + if hasDynamicAttributes { + // If we have dynamic attributes, just copy the attributes map + // one for one into the result attributes. + for k, v := range d.setWriter.Map() { + // Don't clobber schema values. This limits usage of dynamic + // attributes to names which _do not_ conflict with schema + // keys! + if _, ok := result.Attributes[k]; !ok { + result.Attributes[k] = v + } + } + } + + if d.newState != nil { + result.Ephemeral = d.newState.Ephemeral + } + + // TODO: This is hacky and we can remove this when we have a proper + // state writer. We should instead have a proper StateFieldWriter + // and use that. + for k, schema := range d.schema { + if schema.Type != TypeMap { + continue + } + + if result.Attributes[k] == "" { + delete(result.Attributes, k) + } + } + + if v := d.Id(); v != "" { + result.Attributes["id"] = d.Id() + } + + if d.state != nil { + result.Tainted = d.state.Tainted + } + + return &result +} + +// Timeout returns the data for the given timeout key +// Returns a duration of 20 minutes for any key not found, or not found and no default. +func (d *ResourceData) Timeout(key string) time.Duration { + key = strings.ToLower(key) + + var timeout *time.Duration + switch key { + case TimeoutCreate: + timeout = d.timeouts.Create + case TimeoutRead: + timeout = d.timeouts.Read + case TimeoutUpdate: + timeout = d.timeouts.Update + case TimeoutDelete: + timeout = d.timeouts.Delete + } + + if timeout != nil { + return *timeout + } + + if d.timeouts.Default != nil { + return *d.timeouts.Default + } + + // Return system default of 20 minutes + return 20 * time.Minute +} + +func (d *ResourceData) init() { + // Initialize the field that will store our new state + var copyState terraform.InstanceState + if d.state != nil { + copyState = *d.state.DeepCopy() + } + d.newState = ©State + + // Initialize the map for storing set data + d.setWriter = &MapFieldWriter{Schema: d.schema} + + // Initialize the reader for getting data from the + // underlying sources (config, diff, etc.) + readers := make(map[string]FieldReader) + var stateAttributes map[string]string + if d.state != nil { + stateAttributes = d.state.Attributes + readers["state"] = &MapFieldReader{ + Schema: d.schema, + Map: BasicMapReader(stateAttributes), + } + } + if d.config != nil { + readers["config"] = &ConfigFieldReader{ + Schema: d.schema, + Config: d.config, + } + } + if d.diff != nil { + readers["diff"] = &DiffFieldReader{ + Schema: d.schema, + Diff: d.diff, + Source: &MultiLevelFieldReader{ + Levels: []string{"state", "config"}, + Readers: readers, + }, + } + } + readers["set"] = &MapFieldReader{ + Schema: d.schema, + Map: BasicMapReader(d.setWriter.Map()), + } + d.multiReader = &MultiLevelFieldReader{ + Levels: []string{ + "state", + "config", + "diff", + "set", + }, + + Readers: readers, + } +} + +func (d *ResourceData) diffChange( + k string) (interface{}, interface{}, bool, bool) { + // Get the change between the state and the config. + o, n := d.getChange(k, getSourceState, getSourceConfig|getSourceExact) + if !o.Exists { + o.Value = nil + } + if !n.Exists { + n.Value = nil + } + + // Return the old, new, and whether there is a change + return o.Value, n.Value, !reflect.DeepEqual(o.Value, n.Value), n.Computed +} + +func (d *ResourceData) getChange( + k string, + oldLevel getSource, + newLevel getSource) (getResult, getResult) { + var parts, parts2 []string + if k != "" { + parts = strings.Split(k, ".") + parts2 = strings.Split(k, ".") + } + + o := d.get(parts, oldLevel) + n := d.get(parts2, newLevel) + return o, n +} + +func (d *ResourceData) get(addr []string, source getSource) getResult { + d.once.Do(d.init) + + level := "set" + flags := source & ^getSourceLevelMask + exact := flags&getSourceExact != 0 + source = source & getSourceLevelMask + if source >= getSourceSet { + level = "set" + } else if source >= getSourceDiff { + level = "diff" + } else if source >= getSourceConfig { + level = "config" + } else { + level = "state" + } + + var result FieldReadResult + var err error + if exact { + result, err = d.multiReader.ReadFieldExact(addr, level) + } else { + result, err = d.multiReader.ReadFieldMerge(addr, level) + } + if err != nil { + panic(err) + } + + // If the result doesn't exist, then we set the value to the zero value + var schema *Schema + if schemaL := addrToSchema(addr, d.schema); len(schemaL) > 0 { + schema = schemaL[len(schemaL)-1] + } + + if result.Value == nil && schema != nil { + result.Value = result.ValueOrZero(schema) + } + + // Transform the FieldReadResult into a getResult. It might be worth + // merging these two structures one day. + return getResult{ + Value: result.Value, + ValueProcessed: result.ValueProcessed, + Computed: result.Computed, + Exists: result.Exists, + Schema: schema, + } +} diff --git a/vendor/github.com/hashicorp/terraform/helper/schema/resource_data_get_source.go b/vendor/github.com/hashicorp/terraform/helper/schema/resource_data_get_source.go new file mode 100644 index 0000000000..7dd655de3d --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/helper/schema/resource_data_get_source.go @@ -0,0 +1,17 @@ +package schema + +//go:generate stringer -type=getSource resource_data_get_source.go + +// getSource represents the level we want to get for a value (internally). +// Any source less than or equal to the level will be loaded (whichever +// has a value first). +type getSource byte + +const ( + getSourceState getSource = 1 << iota + getSourceConfig + getSourceDiff + getSourceSet + getSourceExact // Only get from the _exact_ level + getSourceLevelMask getSource = getSourceState | getSourceConfig | getSourceDiff | getSourceSet +) diff --git a/vendor/github.com/hashicorp/terraform/helper/schema/resource_importer.go b/vendor/github.com/hashicorp/terraform/helper/schema/resource_importer.go new file mode 100644 index 0000000000..5dada3caf3 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/helper/schema/resource_importer.go @@ -0,0 +1,52 @@ +package schema + +// ResourceImporter defines how a resource is imported in Terraform. This +// can be set onto a Resource struct to make it Importable. Not all resources +// have to be importable; if a Resource doesn't have a ResourceImporter then +// it won't be importable. +// +// "Importing" in Terraform is the process of taking an already-created +// resource and bringing it under Terraform management. This can include +// updating Terraform state, generating Terraform configuration, etc. +type ResourceImporter struct { + // The functions below must all be implemented for importing to work. + + // State is called to convert an ID to one or more InstanceState to + // insert into the Terraform state. If this isn't specified, then + // the ID is passed straight through. + State StateFunc +} + +// StateFunc is the function called to import a resource into the +// Terraform state. It is given a ResourceData with only ID set. This +// ID is going to be an arbitrary value given by the user and may not map +// directly to the ID format that the resource expects, so that should +// be validated. +// +// This should return a slice of ResourceData that turn into the state +// that was imported. This might be as simple as returning only the argument +// that was given to the function. In other cases (such as AWS security groups), +// an import may fan out to multiple resources and this will have to return +// multiple. +// +// To create the ResourceData structures for other resource types (if +// you have to), instantiate your resource and call the Data function. +type StateFunc func(*ResourceData, interface{}) ([]*ResourceData, error) + +// InternalValidate should be called to validate the structure of this +// importer. This should be called in a unit test. +// +// Resource.InternalValidate() will automatically call this, so this doesn't +// need to be called manually. Further, Resource.InternalValidate() is +// automatically called by Provider.InternalValidate(), so you only need +// to internal validate the provider. +func (r *ResourceImporter) InternalValidate() error { + return nil +} + +// ImportStatePassthrough is an implementation of StateFunc that can be +// used to simply pass the ID directly through. This should be used only +// in the case that an ID-only refresh is possible. +func ImportStatePassthrough(d *ResourceData, m interface{}) ([]*ResourceData, error) { + return []*ResourceData{d}, nil +} diff --git a/vendor/github.com/hashicorp/terraform/helper/schema/resource_timeout.go b/vendor/github.com/hashicorp/terraform/helper/schema/resource_timeout.go new file mode 100644 index 0000000000..445819f0f9 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/helper/schema/resource_timeout.go @@ -0,0 +1,237 @@ +package schema + +import ( + "fmt" + "log" + "time" + + "github.com/hashicorp/terraform/terraform" + "github.com/mitchellh/copystructure" +) + +const TimeoutKey = "e2bfb730-ecaa-11e6-8f88-34363bc7c4c0" +const TimeoutsConfigKey = "timeouts" + +const ( + TimeoutCreate = "create" + TimeoutRead = "read" + TimeoutUpdate = "update" + TimeoutDelete = "delete" + TimeoutDefault = "default" +) + +func timeoutKeys() []string { + return []string{ + TimeoutCreate, + TimeoutRead, + TimeoutUpdate, + TimeoutDelete, + TimeoutDefault, + } +} + +// could be time.Duration, int64 or float64 +func DefaultTimeout(tx interface{}) *time.Duration { + var td time.Duration + switch raw := tx.(type) { + case time.Duration: + return &raw + case int64: + td = time.Duration(raw) + case float64: + td = time.Duration(int64(raw)) + default: + log.Printf("[WARN] Unknown type in DefaultTimeout: %#v", tx) + } + return &td +} + +type ResourceTimeout struct { + Create, Read, Update, Delete, Default *time.Duration +} + +// ConfigDecode takes a schema and the configuration (available in Diff) and +// validates, parses the timeouts into `t` +func (t *ResourceTimeout) ConfigDecode(s *Resource, c *terraform.ResourceConfig) error { + if s.Timeouts != nil { + raw, err := copystructure.Copy(s.Timeouts) + if err != nil { + log.Printf("[DEBUG] Error with deep copy: %s", err) + } + *t = *raw.(*ResourceTimeout) + } + + if raw, ok := c.Config[TimeoutsConfigKey]; ok { + if configTimeouts, ok := raw.([]map[string]interface{}); ok { + for _, timeoutValues := range configTimeouts { + // loop through each Timeout given in the configuration and validate they + // the Timeout defined in the resource + for timeKey, timeValue := range timeoutValues { + // validate that we're dealing with the normal CRUD actions + var found bool + for _, key := range timeoutKeys() { + if timeKey == key { + found = true + break + } + } + + if !found { + return fmt.Errorf("Unsupported Timeout configuration key found (%s)", timeKey) + } + + // Get timeout + rt, err := time.ParseDuration(timeValue.(string)) + if err != nil { + return fmt.Errorf("Error parsing Timeout for (%s): %s", timeKey, err) + } + + var timeout *time.Duration + switch timeKey { + case TimeoutCreate: + timeout = t.Create + case TimeoutUpdate: + timeout = t.Update + case TimeoutRead: + timeout = t.Read + case TimeoutDelete: + timeout = t.Delete + case TimeoutDefault: + timeout = t.Default + } + + // If the resource has not delcared this in the definition, then error + // with an unsupported message + if timeout == nil { + return unsupportedTimeoutKeyError(timeKey) + } + + *timeout = rt + } + } + } else { + log.Printf("[WARN] Invalid Timeout structure found, skipping timeouts") + } + } + + return nil +} + +func unsupportedTimeoutKeyError(key string) error { + return fmt.Errorf("Timeout Key (%s) is not supported", key) +} + +// DiffEncode, StateEncode, and MetaDecode are analogous to the Go stdlib JSONEncoder +// interface: they encode/decode a timeouts struct from an instance diff, which is +// where the timeout data is stored after a diff to pass into Apply. +// +// StateEncode encodes the timeout into the ResourceData's InstanceState for +// saving to state +// +func (t *ResourceTimeout) DiffEncode(id *terraform.InstanceDiff) error { + return t.metaEncode(id) +} + +func (t *ResourceTimeout) StateEncode(is *terraform.InstanceState) error { + return t.metaEncode(is) +} + +// metaEncode encodes the ResourceTimeout into a map[string]interface{} format +// and stores it in the Meta field of the interface it's given. +// Assumes the interface is either *terraform.InstanceState or +// *terraform.InstanceDiff, returns an error otherwise +func (t *ResourceTimeout) metaEncode(ids interface{}) error { + m := make(map[string]interface{}) + + if t.Create != nil { + m[TimeoutCreate] = t.Create.Nanoseconds() + } + if t.Read != nil { + m[TimeoutRead] = t.Read.Nanoseconds() + } + if t.Update != nil { + m[TimeoutUpdate] = t.Update.Nanoseconds() + } + if t.Delete != nil { + m[TimeoutDelete] = t.Delete.Nanoseconds() + } + if t.Default != nil { + m[TimeoutDefault] = t.Default.Nanoseconds() + // for any key above that is nil, if default is specified, we need to + // populate it with the default + for _, k := range timeoutKeys() { + if _, ok := m[k]; !ok { + m[k] = t.Default.Nanoseconds() + } + } + } + + // only add the Timeout to the Meta if we have values + if len(m) > 0 { + switch instance := ids.(type) { + case *terraform.InstanceDiff: + if instance.Meta == nil { + instance.Meta = make(map[string]interface{}) + } + instance.Meta[TimeoutKey] = m + case *terraform.InstanceState: + if instance.Meta == nil { + instance.Meta = make(map[string]interface{}) + } + instance.Meta[TimeoutKey] = m + default: + return fmt.Errorf("Error matching type for Diff Encode") + } + } + + return nil +} + +func (t *ResourceTimeout) StateDecode(id *terraform.InstanceState) error { + return t.metaDecode(id) +} +func (t *ResourceTimeout) DiffDecode(is *terraform.InstanceDiff) error { + return t.metaDecode(is) +} + +func (t *ResourceTimeout) metaDecode(ids interface{}) error { + var rawMeta interface{} + var ok bool + switch rawInstance := ids.(type) { + case *terraform.InstanceDiff: + rawMeta, ok = rawInstance.Meta[TimeoutKey] + if !ok { + return nil + } + case *terraform.InstanceState: + rawMeta, ok = rawInstance.Meta[TimeoutKey] + if !ok { + return nil + } + default: + return fmt.Errorf("Unknown or unsupported type in metaDecode: %#v", ids) + } + + times := rawMeta.(map[string]interface{}) + if len(times) == 0 { + return nil + } + + if v, ok := times[TimeoutCreate]; ok { + t.Create = DefaultTimeout(v) + } + if v, ok := times[TimeoutRead]; ok { + t.Read = DefaultTimeout(v) + } + if v, ok := times[TimeoutUpdate]; ok { + t.Update = DefaultTimeout(v) + } + if v, ok := times[TimeoutDelete]; ok { + t.Delete = DefaultTimeout(v) + } + if v, ok := times[TimeoutDefault]; ok { + t.Default = DefaultTimeout(v) + } + + return nil +} diff --git a/vendor/github.com/hashicorp/terraform/helper/schema/schema.go b/vendor/github.com/hashicorp/terraform/helper/schema/schema.go new file mode 100644 index 0000000000..632672ae06 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/helper/schema/schema.go @@ -0,0 +1,1537 @@ +// schema is a high-level framework for easily writing new providers +// for Terraform. Usage of schema is recommended over attempting to write +// to the low-level plugin interfaces manually. +// +// schema breaks down provider creation into simple CRUD operations for +// resources. The logic of diffing, destroying before creating, updating +// or creating, etc. is all handled by the framework. The plugin author +// only needs to implement a configuration schema and the CRUD operations and +// everything else is meant to just work. +// +// A good starting point is to view the Provider structure. +package schema + +import ( + "fmt" + "os" + "reflect" + "sort" + "strconv" + "strings" + + "github.com/hashicorp/terraform/terraform" + "github.com/mitchellh/mapstructure" +) + +// type used for schema package context keys +type contextKey string + +// Schema is used to describe the structure of a value. +// +// Read the documentation of the struct elements for important details. +type Schema struct { + // Type is the type of the value and must be one of the ValueType values. + // + // This type not only determines what type is expected/valid in configuring + // this value, but also what type is returned when ResourceData.Get is + // called. The types returned by Get are: + // + // TypeBool - bool + // TypeInt - int + // TypeFloat - float64 + // TypeString - string + // TypeList - []interface{} + // TypeMap - map[string]interface{} + // TypeSet - *schema.Set + // + Type ValueType + + // If one of these is set, then this item can come from the configuration. + // Both cannot be set. If Optional is set, the value is optional. If + // Required is set, the value is required. + // + // One of these must be set if the value is not computed. That is: + // value either comes from the config, is computed, or is both. + Optional bool + Required bool + + // If this is non-nil, the provided function will be used during diff + // of this field. If this is nil, a default diff for the type of the + // schema will be used. + // + // This allows comparison based on something other than primitive, list + // or map equality - for example SSH public keys may be considered + // equivalent regardless of trailing whitespace. + DiffSuppressFunc SchemaDiffSuppressFunc + + // If this is non-nil, then this will be a default value that is used + // when this item is not set in the configuration. + // + // DefaultFunc can be specified to compute a dynamic default. + // Only one of Default or DefaultFunc can be set. If DefaultFunc is + // used then its return value should be stable to avoid generating + // confusing/perpetual diffs. + // + // Changing either Default or the return value of DefaultFunc can be + // a breaking change, especially if the attribute in question has + // ForceNew set. If a default needs to change to align with changing + // assumptions in an upstream API then it may be necessary to also use + // the MigrateState function on the resource to change the state to match, + // or have the Read function adjust the state value to align with the + // new default. + // + // If Required is true above, then Default cannot be set. DefaultFunc + // can be set with Required. If the DefaultFunc returns nil, then there + // will be no default and the user will be asked to fill it in. + // + // If either of these is set, then the user won't be asked for input + // for this key if the default is not nil. + Default interface{} + DefaultFunc SchemaDefaultFunc + + // Description is used as the description for docs or asking for user + // input. It should be relatively short (a few sentences max) and should + // be formatted to fit a CLI. + Description string + + // InputDefault is the default value to use for when inputs are requested. + // This differs from Default in that if Default is set, no input is + // asked for. If Input is asked, this will be the default value offered. + InputDefault string + + // The fields below relate to diffs. + // + // If Computed is true, then the result of this value is computed + // (unless specified by config) on creation. + // + // If ForceNew is true, then a change in this resource necessitates + // the creation of a new resource. + // + // StateFunc is a function called to change the value of this before + // storing it in the state (and likewise before comparing for diffs). + // The use for this is for example with large strings, you may want + // to simply store the hash of it. + Computed bool + ForceNew bool + StateFunc SchemaStateFunc + + // The following fields are only set for a TypeList or TypeSet Type. + // + // Elem must be either a *Schema or a *Resource only if the Type is + // TypeList, and represents what the element type is. If it is *Schema, + // the element type is just a simple value. If it is *Resource, the + // element type is a complex structure, potentially with its own lifecycle. + // + // MaxItems defines a maximum amount of items that can exist within a + // TypeSet or TypeList. Specific use cases would be if a TypeSet is being + // used to wrap a complex structure, however more than one instance would + // cause instability. + // + // MinItems defines a minimum amount of items that can exist within a + // TypeSet or TypeList. Specific use cases would be if a TypeSet is being + // used to wrap a complex structure, however less than one instance would + // cause instability. + // + // PromoteSingle, if true, will allow single elements to be standalone + // and promote them to a list. For example "foo" would be promoted to + // ["foo"] automatically. This is primarily for legacy reasons and the + // ambiguity is not recommended for new usage. Promotion is only allowed + // for primitive element types. + Elem interface{} + MaxItems int + MinItems int + PromoteSingle bool + + // The following fields are only valid for a TypeSet type. + // + // Set defines a function to determine the unique ID of an item so that + // a proper set can be built. + Set SchemaSetFunc + + // ComputedWhen is a set of queries on the configuration. Whenever any + // of these things is changed, it will require a recompute (this requires + // that Computed is set to true). + // + // NOTE: This currently does not work. + ComputedWhen []string + + // ConflictsWith is a set of schema keys that conflict with this schema. + // This will only check that they're set in the _config_. This will not + // raise an error for a malfunctioning resource that sets a conflicting + // key. + ConflictsWith []string + + // When Deprecated is set, this attribute is deprecated. + // + // A deprecated field still works, but will probably stop working in near + // future. This string is the message shown to the user with instructions on + // how to address the deprecation. + Deprecated string + + // When Removed is set, this attribute has been removed from the schema + // + // Removed attributes can be left in the Schema to generate informative error + // messages for the user when they show up in resource configurations. + // This string is the message shown to the user with instructions on + // what do to about the removed attribute. + Removed string + + // ValidateFunc allows individual fields to define arbitrary validation + // logic. It is yielded the provided config value as an interface{} that is + // guaranteed to be of the proper Schema type, and it can yield warnings or + // errors based on inspection of that value. + // + // ValidateFunc currently only works for primitive types. + ValidateFunc SchemaValidateFunc + + // Sensitive ensures that the attribute's value does not get displayed in + // logs or regular output. It should be used for passwords or other + // secret fields. Future versions of Terraform may encrypt these + // values. + Sensitive bool +} + +// SchemaDiffSuppresFunc is a function which can be used to determine +// whether a detected diff on a schema element is "valid" or not, and +// suppress it from the plan if necessary. +// +// Return true if the diff should be suppressed, false to retain it. +type SchemaDiffSuppressFunc func(k, old, new string, d *ResourceData) bool + +// SchemaDefaultFunc is a function called to return a default value for +// a field. +type SchemaDefaultFunc func() (interface{}, error) + +// EnvDefaultFunc is a helper function that returns the value of the +// given environment variable, if one exists, or the default value +// otherwise. +func EnvDefaultFunc(k string, dv interface{}) SchemaDefaultFunc { + return func() (interface{}, error) { + if v := os.Getenv(k); v != "" { + return v, nil + } + + return dv, nil + } +} + +// MultiEnvDefaultFunc is a helper function that returns the value of the first +// environment variable in the given list that returns a non-empty value. If +// none of the environment variables return a value, the default value is +// returned. +func MultiEnvDefaultFunc(ks []string, dv interface{}) SchemaDefaultFunc { + return func() (interface{}, error) { + for _, k := range ks { + if v := os.Getenv(k); v != "" { + return v, nil + } + } + return dv, nil + } +} + +// SchemaSetFunc is a function that must return a unique ID for the given +// element. This unique ID is used to store the element in a hash. +type SchemaSetFunc func(interface{}) int + +// SchemaStateFunc is a function used to convert some type to a string +// to be stored in the state. +type SchemaStateFunc func(interface{}) string + +// SchemaValidateFunc is a function used to validate a single field in the +// schema. +type SchemaValidateFunc func(interface{}, string) ([]string, []error) + +func (s *Schema) GoString() string { + return fmt.Sprintf("*%#v", *s) +} + +// Returns a default value for this schema by either reading Default or +// evaluating DefaultFunc. If neither of these are defined, returns nil. +func (s *Schema) DefaultValue() (interface{}, error) { + if s.Default != nil { + return s.Default, nil + } + + if s.DefaultFunc != nil { + defaultValue, err := s.DefaultFunc() + if err != nil { + return nil, fmt.Errorf("error loading default: %s", err) + } + return defaultValue, nil + } + + return nil, nil +} + +// Returns a zero value for the schema. +func (s *Schema) ZeroValue() interface{} { + // If it's a set then we'll do a bit of extra work to provide the + // right hashing function in our empty value. + if s.Type == TypeSet { + setFunc := s.Set + if setFunc == nil { + // Default set function uses the schema to hash the whole value + elem := s.Elem + switch t := elem.(type) { + case *Schema: + setFunc = HashSchema(t) + case *Resource: + setFunc = HashResource(t) + default: + panic("invalid set element type") + } + } + return &Set{F: setFunc} + } else { + return s.Type.Zero() + } +} + +func (s *Schema) finalizeDiff( + d *terraform.ResourceAttrDiff) *terraform.ResourceAttrDiff { + if d == nil { + return d + } + + if s.Type == TypeBool { + normalizeBoolString := func(s string) string { + switch s { + case "0": + return "false" + case "1": + return "true" + } + return s + } + d.Old = normalizeBoolString(d.Old) + d.New = normalizeBoolString(d.New) + } + + if s.Computed && !d.NewRemoved && d.New == "" { + // Computed attribute without a new value set + d.NewComputed = true + } + + if s.ForceNew { + // ForceNew, mark that this field is requiring new under the + // following conditions, explained below: + // + // * Old != New - There is a change in value. This field + // is therefore causing a new resource. + // + // * NewComputed - This field is being computed, hence a + // potential change in value, mark as causing a new resource. + d.RequiresNew = d.Old != d.New || d.NewComputed + } + + if d.NewRemoved { + return d + } + + if s.Computed { + if d.Old != "" && d.New == "" { + // This is a computed value with an old value set already, + // just let it go. + return nil + } + + if d.New == "" { + // Computed attribute without a new value set + d.NewComputed = true + } + } + + if s.Sensitive { + // Set the Sensitive flag so output is hidden in the UI + d.Sensitive = true + } + + return d +} + +// schemaMap is a wrapper that adds nice functions on top of schemas. +type schemaMap map[string]*Schema + +// Data returns a ResourceData for the given schema, state, and diff. +// +// The diff is optional. +func (m schemaMap) Data( + s *terraform.InstanceState, + d *terraform.InstanceDiff) (*ResourceData, error) { + return &ResourceData{ + schema: m, + state: s, + diff: d, + }, nil +} + +// Diff returns the diff for a resource given the schema map, +// state, and configuration. +func (m schemaMap) Diff( + s *terraform.InstanceState, + c *terraform.ResourceConfig) (*terraform.InstanceDiff, error) { + result := new(terraform.InstanceDiff) + result.Attributes = make(map[string]*terraform.ResourceAttrDiff) + + // Make sure to mark if the resource is tainted + if s != nil { + result.DestroyTainted = s.Tainted + } + + d := &ResourceData{ + schema: m, + state: s, + config: c, + } + + for k, schema := range m { + err := m.diff(k, schema, result, d, false) + if err != nil { + return nil, err + } + } + + // If the diff requires a new resource, then we recompute the diff + // so we have the complete new resource diff, and preserve the + // RequiresNew fields where necessary so the user knows exactly what + // caused that. + if result.RequiresNew() { + // Create the new diff + result2 := new(terraform.InstanceDiff) + result2.Attributes = make(map[string]*terraform.ResourceAttrDiff) + + // Preserve the DestroyTainted flag + result2.DestroyTainted = result.DestroyTainted + + // Reset the data to not contain state. We have to call init() + // again in order to reset the FieldReaders. + d.state = nil + d.init() + + // Perform the diff again + for k, schema := range m { + err := m.diff(k, schema, result2, d, false) + if err != nil { + return nil, err + } + } + + // Force all the fields to not force a new since we know what we + // want to force new. + for k, attr := range result2.Attributes { + if attr == nil { + continue + } + + if attr.RequiresNew { + attr.RequiresNew = false + } + + if s != nil { + attr.Old = s.Attributes[k] + } + } + + // Now copy in all the requires new diffs... + for k, attr := range result.Attributes { + if attr == nil { + continue + } + + newAttr, ok := result2.Attributes[k] + if !ok { + newAttr = attr + } + + if attr.RequiresNew { + newAttr.RequiresNew = true + } + + result2.Attributes[k] = newAttr + } + + // And set the diff! + result = result2 + } + + // Remove any nil diffs just to keep things clean + for k, v := range result.Attributes { + if v == nil { + delete(result.Attributes, k) + } + } + + // Go through and detect all of the ComputedWhens now that we've + // finished the diff. + // TODO + + if result.Empty() { + // If we don't have any diff elements, just return nil + return nil, nil + } + + return result, nil +} + +// Input implements the terraform.ResourceProvider method by asking +// for input for required configuration keys that don't have a value. +func (m schemaMap) Input( + input terraform.UIInput, + c *terraform.ResourceConfig) (*terraform.ResourceConfig, error) { + keys := make([]string, 0, len(m)) + for k, _ := range m { + keys = append(keys, k) + } + sort.Strings(keys) + + for _, k := range keys { + v := m[k] + + // Skip things that don't require config, if that is even valid + // for a provider schema. + // Required XOR Optional must always be true to validate, so we only + // need to check one. + if v.Optional { + continue + } + + // Deprecated fields should never prompt + if v.Deprecated != "" { + continue + } + + // Skip things that have a value of some sort already + if _, ok := c.Raw[k]; ok { + continue + } + + // Skip if it has a default value + defaultValue, err := v.DefaultValue() + if err != nil { + return nil, fmt.Errorf("%s: error loading default: %s", k, err) + } + if defaultValue != nil { + continue + } + + var value interface{} + switch v.Type { + case TypeBool, TypeInt, TypeFloat, TypeSet, TypeList: + continue + case TypeString: + value, err = m.inputString(input, k, v) + default: + panic(fmt.Sprintf("Unknown type for input: %#v", v.Type)) + } + + if err != nil { + return nil, fmt.Errorf( + "%s: %s", k, err) + } + + c.Config[k] = value + } + + return c, nil +} + +// Validate validates the configuration against this schema mapping. +func (m schemaMap) Validate(c *terraform.ResourceConfig) ([]string, []error) { + return m.validateObject("", m, c) +} + +// InternalValidate validates the format of this schema. This should be called +// from a unit test (and not in user-path code) to verify that a schema +// is properly built. +func (m schemaMap) InternalValidate(topSchemaMap schemaMap) error { + if topSchemaMap == nil { + topSchemaMap = m + } + for k, v := range m { + if v.Type == TypeInvalid { + return fmt.Errorf("%s: Type must be specified", k) + } + + if v.Optional && v.Required { + return fmt.Errorf("%s: Optional or Required must be set, not both", k) + } + + if v.Required && v.Computed { + return fmt.Errorf("%s: Cannot be both Required and Computed", k) + } + + if !v.Required && !v.Optional && !v.Computed { + return fmt.Errorf("%s: One of optional, required, or computed must be set", k) + } + + if v.Computed && v.Default != nil { + return fmt.Errorf("%s: Default must be nil if computed", k) + } + + if v.Required && v.Default != nil { + return fmt.Errorf("%s: Default cannot be set with Required", k) + } + + if len(v.ComputedWhen) > 0 && !v.Computed { + return fmt.Errorf("%s: ComputedWhen can only be set with Computed", k) + } + + if len(v.ConflictsWith) > 0 && v.Required { + return fmt.Errorf("%s: ConflictsWith cannot be set with Required", k) + } + + if len(v.ConflictsWith) > 0 { + for _, key := range v.ConflictsWith { + parts := strings.Split(key, ".") + sm := topSchemaMap + var target *Schema + for _, part := range parts { + // Skip index fields + if _, err := strconv.Atoi(part); err == nil { + continue + } + + var ok bool + if target, ok = sm[part]; !ok { + return fmt.Errorf("%s: ConflictsWith references unknown attribute (%s)", k, key) + } + + if subResource, ok := target.Elem.(*Resource); ok { + sm = schemaMap(subResource.Schema) + } + } + if target == nil { + return fmt.Errorf("%s: ConflictsWith cannot find target attribute (%s), sm: %#v", k, key, sm) + } + if target.Required { + return fmt.Errorf("%s: ConflictsWith cannot contain Required attribute (%s)", k, key) + } + + if len(target.ComputedWhen) > 0 { + return fmt.Errorf("%s: ConflictsWith cannot contain Computed(When) attribute (%s)", k, key) + } + } + } + + if v.Type == TypeList || v.Type == TypeSet { + if v.Elem == nil { + return fmt.Errorf("%s: Elem must be set for lists", k) + } + + if v.Default != nil { + return fmt.Errorf("%s: Default is not valid for lists or sets", k) + } + + if v.Type != TypeSet && v.Set != nil { + return fmt.Errorf("%s: Set can only be set for TypeSet", k) + } + + switch t := v.Elem.(type) { + case *Resource: + if err := t.InternalValidate(topSchemaMap, true); err != nil { + return err + } + case *Schema: + bad := t.Computed || t.Optional || t.Required + if bad { + return fmt.Errorf( + "%s: Elem must have only Type set", k) + } + } + } else { + if v.MaxItems > 0 || v.MinItems > 0 { + return fmt.Errorf("%s: MaxItems and MinItems are only supported on lists or sets", k) + } + } + + // Computed-only field + if v.Computed && !v.Optional { + if v.ValidateFunc != nil { + return fmt.Errorf("%s: ValidateFunc is for validating user input, "+ + "there's nothing to validate on computed-only field", k) + } + if v.DiffSuppressFunc != nil { + return fmt.Errorf("%s: DiffSuppressFunc is for suppressing differences"+ + " between config and state representation. "+ + "There is no config for computed-only field, nothing to compare.", k) + } + } + + if v.ValidateFunc != nil { + switch v.Type { + case TypeList, TypeSet: + return fmt.Errorf("ValidateFunc is not yet supported on lists or sets.") + } + } + } + + return nil +} + +func (m schemaMap) diff( + k string, + schema *Schema, + diff *terraform.InstanceDiff, + d *ResourceData, + all bool) error { + + unsupressedDiff := new(terraform.InstanceDiff) + unsupressedDiff.Attributes = make(map[string]*terraform.ResourceAttrDiff) + + var err error + switch schema.Type { + case TypeBool, TypeInt, TypeFloat, TypeString: + err = m.diffString(k, schema, unsupressedDiff, d, all) + case TypeList: + err = m.diffList(k, schema, unsupressedDiff, d, all) + case TypeMap: + err = m.diffMap(k, schema, unsupressedDiff, d, all) + case TypeSet: + err = m.diffSet(k, schema, unsupressedDiff, d, all) + default: + err = fmt.Errorf("%s: unknown type %#v", k, schema.Type) + } + + for attrK, attrV := range unsupressedDiff.Attributes { + if schema.DiffSuppressFunc != nil && + attrV != nil && + schema.DiffSuppressFunc(attrK, attrV.Old, attrV.New, d) { + continue + } + + diff.Attributes[attrK] = attrV + } + + return err +} + +func (m schemaMap) diffList( + k string, + schema *Schema, + diff *terraform.InstanceDiff, + d *ResourceData, + all bool) error { + o, n, _, computedList := d.diffChange(k) + if computedList { + n = nil + } + nSet := n != nil + + // If we have an old value and no new value is set or will be + // computed once all variables can be interpolated and we're + // computed, then nothing has changed. + if o != nil && n == nil && !computedList && schema.Computed { + return nil + } + + if o == nil { + o = []interface{}{} + } + if n == nil { + n = []interface{}{} + } + if s, ok := o.(*Set); ok { + o = s.List() + } + if s, ok := n.(*Set); ok { + n = s.List() + } + os := o.([]interface{}) + vs := n.([]interface{}) + + // If the new value was set, and the two are equal, then we're done. + // We have to do this check here because sets might be NOT + // reflect.DeepEqual so we need to wait until we get the []interface{} + if !all && nSet && reflect.DeepEqual(os, vs) { + return nil + } + + // Get the counts + oldLen := len(os) + newLen := len(vs) + oldStr := strconv.FormatInt(int64(oldLen), 10) + + // If the whole list is computed, then say that the # is computed + if computedList { + diff.Attributes[k+".#"] = &terraform.ResourceAttrDiff{ + Old: oldStr, + NewComputed: true, + RequiresNew: schema.ForceNew, + } + return nil + } + + // If the counts are not the same, then record that diff + changed := oldLen != newLen + computed := oldLen == 0 && newLen == 0 && schema.Computed + if changed || computed || all { + countSchema := &Schema{ + Type: TypeInt, + Computed: schema.Computed, + ForceNew: schema.ForceNew, + } + + newStr := "" + if !computed { + newStr = strconv.FormatInt(int64(newLen), 10) + } else { + oldStr = "" + } + + diff.Attributes[k+".#"] = countSchema.finalizeDiff(&terraform.ResourceAttrDiff{ + Old: oldStr, + New: newStr, + }) + } + + // Figure out the maximum + maxLen := oldLen + if newLen > maxLen { + maxLen = newLen + } + + switch t := schema.Elem.(type) { + case *Resource: + // This is a complex resource + for i := 0; i < maxLen; i++ { + for k2, schema := range t.Schema { + subK := fmt.Sprintf("%s.%d.%s", k, i, k2) + err := m.diff(subK, schema, diff, d, all) + if err != nil { + return err + } + } + } + case *Schema: + // Copy the schema so that we can set Computed/ForceNew from + // the parent schema (the TypeList). + t2 := *t + t2.ForceNew = schema.ForceNew + + // This is just a primitive element, so go through each and + // just diff each. + for i := 0; i < maxLen; i++ { + subK := fmt.Sprintf("%s.%d", k, i) + err := m.diff(subK, &t2, diff, d, all) + if err != nil { + return err + } + } + default: + return fmt.Errorf("%s: unknown element type (internal)", k) + } + + return nil +} + +func (m schemaMap) diffMap( + k string, + schema *Schema, + diff *terraform.InstanceDiff, + d *ResourceData, + all bool) error { + prefix := k + "." + + // First get all the values from the state + var stateMap, configMap map[string]string + o, n, _, nComputed := d.diffChange(k) + if err := mapstructure.WeakDecode(o, &stateMap); err != nil { + return fmt.Errorf("%s: %s", k, err) + } + if err := mapstructure.WeakDecode(n, &configMap); err != nil { + return fmt.Errorf("%s: %s", k, err) + } + + // Keep track of whether the state _exists_ at all prior to clearing it + stateExists := o != nil + + // Delete any count values, since we don't use those + delete(configMap, "%") + delete(stateMap, "%") + + // Check if the number of elements has changed. + oldLen, newLen := len(stateMap), len(configMap) + changed := oldLen != newLen + if oldLen != 0 && newLen == 0 && schema.Computed { + changed = false + } + + // It is computed if we have no old value, no new value, the schema + // says it is computed, and it didn't exist in the state before. The + // last point means: if it existed in the state, even empty, then it + // has already been computed. + computed := oldLen == 0 && newLen == 0 && schema.Computed && !stateExists + + // If the count has changed or we're computed, then add a diff for the + // count. "nComputed" means that the new value _contains_ a value that + // is computed. We don't do granular diffs for this yet, so we mark the + // whole map as computed. + if changed || computed || nComputed { + countSchema := &Schema{ + Type: TypeInt, + Computed: schema.Computed || nComputed, + ForceNew: schema.ForceNew, + } + + oldStr := strconv.FormatInt(int64(oldLen), 10) + newStr := "" + if !computed && !nComputed { + newStr = strconv.FormatInt(int64(newLen), 10) + } else { + oldStr = "" + } + + diff.Attributes[k+".%"] = countSchema.finalizeDiff( + &terraform.ResourceAttrDiff{ + Old: oldStr, + New: newStr, + }, + ) + } + + // If the new map is nil and we're computed, then ignore it. + if n == nil && schema.Computed { + return nil + } + + // Now we compare, preferring values from the config map + for k, v := range configMap { + old, ok := stateMap[k] + delete(stateMap, k) + + if old == v && ok && !all { + continue + } + + diff.Attributes[prefix+k] = schema.finalizeDiff(&terraform.ResourceAttrDiff{ + Old: old, + New: v, + }) + } + for k, v := range stateMap { + diff.Attributes[prefix+k] = schema.finalizeDiff(&terraform.ResourceAttrDiff{ + Old: v, + NewRemoved: true, + }) + } + + return nil +} + +func (m schemaMap) diffSet( + k string, + schema *Schema, + diff *terraform.InstanceDiff, + d *ResourceData, + all bool) error { + + o, n, _, computedSet := d.diffChange(k) + if computedSet { + n = nil + } + nSet := n != nil + + // If we have an old value and no new value is set or will be + // computed once all variables can be interpolated and we're + // computed, then nothing has changed. + if o != nil && n == nil && !computedSet && schema.Computed { + return nil + } + + if o == nil { + o = schema.ZeroValue().(*Set) + } + if n == nil { + n = schema.ZeroValue().(*Set) + } + os := o.(*Set) + ns := n.(*Set) + + // If the new value was set, compare the listCode's to determine if + // the two are equal. Comparing listCode's instead of the actual values + // is needed because there could be computed values in the set which + // would result in false positives while comparing. + if !all && nSet && reflect.DeepEqual(os.listCode(), ns.listCode()) { + return nil + } + + // Get the counts + oldLen := os.Len() + newLen := ns.Len() + oldStr := strconv.Itoa(oldLen) + newStr := strconv.Itoa(newLen) + + // Build a schema for our count + countSchema := &Schema{ + Type: TypeInt, + Computed: schema.Computed, + ForceNew: schema.ForceNew, + } + + // If the set computed then say that the # is computed + if computedSet || schema.Computed && !nSet { + // If # already exists, equals 0 and no new set is supplied, there + // is nothing to record in the diff + count, ok := d.GetOk(k + ".#") + if ok && count.(int) == 0 && !nSet && !computedSet { + return nil + } + + // Set the count but make sure that if # does not exist, we don't + // use the zeroed value + countStr := strconv.Itoa(count.(int)) + if !ok { + countStr = "" + } + + diff.Attributes[k+".#"] = countSchema.finalizeDiff(&terraform.ResourceAttrDiff{ + Old: countStr, + NewComputed: true, + }) + return nil + } + + // If the counts are not the same, then record that diff + changed := oldLen != newLen + if changed || all { + diff.Attributes[k+".#"] = countSchema.finalizeDiff(&terraform.ResourceAttrDiff{ + Old: oldStr, + New: newStr, + }) + } + + // Build the list of codes that will make up our set. This is the + // removed codes as well as all the codes in the new codes. + codes := make([][]string, 2) + codes[0] = os.Difference(ns).listCode() + codes[1] = ns.listCode() + for _, list := range codes { + for _, code := range list { + switch t := schema.Elem.(type) { + case *Resource: + // This is a complex resource + for k2, schema := range t.Schema { + subK := fmt.Sprintf("%s.%s.%s", k, code, k2) + err := m.diff(subK, schema, diff, d, true) + if err != nil { + return err + } + } + case *Schema: + // Copy the schema so that we can set Computed/ForceNew from + // the parent schema (the TypeSet). + t2 := *t + t2.ForceNew = schema.ForceNew + + // This is just a primitive element, so go through each and + // just diff each. + subK := fmt.Sprintf("%s.%s", k, code) + err := m.diff(subK, &t2, diff, d, true) + if err != nil { + return err + } + default: + return fmt.Errorf("%s: unknown element type (internal)", k) + } + } + } + + return nil +} + +func (m schemaMap) diffString( + k string, + schema *Schema, + diff *terraform.InstanceDiff, + d *ResourceData, + all bool) error { + var originalN interface{} + var os, ns string + o, n, _, computed := d.diffChange(k) + if schema.StateFunc != nil && n != nil { + originalN = n + n = schema.StateFunc(n) + } + nraw := n + if nraw == nil && o != nil { + nraw = schema.Type.Zero() + } + if err := mapstructure.WeakDecode(o, &os); err != nil { + return fmt.Errorf("%s: %s", k, err) + } + if err := mapstructure.WeakDecode(nraw, &ns); err != nil { + return fmt.Errorf("%s: %s", k, err) + } + + if os == ns && !all { + // They're the same value. If there old value is not blank or we + // have an ID, then return right away since we're already setup. + if os != "" || d.Id() != "" { + return nil + } + + // Otherwise, only continue if we're computed + if !schema.Computed && !computed { + return nil + } + } + + removed := false + if o != nil && n == nil { + removed = true + } + if removed && schema.Computed { + return nil + } + + diff.Attributes[k] = schema.finalizeDiff(&terraform.ResourceAttrDiff{ + Old: os, + New: ns, + NewExtra: originalN, + NewRemoved: removed, + NewComputed: computed, + }) + + return nil +} + +func (m schemaMap) inputString( + input terraform.UIInput, + k string, + schema *Schema) (interface{}, error) { + result, err := input.Input(&terraform.InputOpts{ + Id: k, + Query: k, + Description: schema.Description, + Default: schema.InputDefault, + }) + + return result, err +} + +func (m schemaMap) validate( + k string, + schema *Schema, + c *terraform.ResourceConfig) ([]string, []error) { + raw, ok := c.Get(k) + if !ok && schema.DefaultFunc != nil { + // We have a dynamic default. Check if we have a value. + var err error + raw, err = schema.DefaultFunc() + if err != nil { + return nil, []error{fmt.Errorf( + "%q, error loading default: %s", k, err)} + } + + // We're okay as long as we had a value set + ok = raw != nil + } + if !ok { + if schema.Required { + return nil, []error{fmt.Errorf( + "%q: required field is not set", k)} + } + + return nil, nil + } + + if !schema.Required && !schema.Optional { + // This is a computed-only field + return nil, []error{fmt.Errorf( + "%q: this field cannot be set", k)} + } + + err := m.validateConflictingAttributes(k, schema, c) + if err != nil { + return nil, []error{err} + } + + return m.validateType(k, raw, schema, c) +} + +func (m schemaMap) validateConflictingAttributes( + k string, + schema *Schema, + c *terraform.ResourceConfig) error { + + if len(schema.ConflictsWith) == 0 { + return nil + } + + for _, conflicting_key := range schema.ConflictsWith { + if value, ok := c.Get(conflicting_key); ok { + return fmt.Errorf( + "%q: conflicts with %s (%#v)", k, conflicting_key, value) + } + } + + return nil +} + +func (m schemaMap) validateList( + k string, + raw interface{}, + schema *Schema, + c *terraform.ResourceConfig) ([]string, []error) { + // We use reflection to verify the slice because you can't + // case to []interface{} unless the slice is exactly that type. + rawV := reflect.ValueOf(raw) + + // If we support promotion and the raw value isn't a slice, wrap + // it in []interface{} and check again. + if schema.PromoteSingle && rawV.Kind() != reflect.Slice { + raw = []interface{}{raw} + rawV = reflect.ValueOf(raw) + } + + if rawV.Kind() != reflect.Slice { + return nil, []error{fmt.Errorf( + "%s: should be a list", k)} + } + + // Validate length + if schema.MaxItems > 0 && rawV.Len() > schema.MaxItems { + return nil, []error{fmt.Errorf( + "%s: attribute supports %d item maximum, config has %d declared", k, schema.MaxItems, rawV.Len())} + } + + if schema.MinItems > 0 && rawV.Len() < schema.MinItems { + return nil, []error{fmt.Errorf( + "%s: attribute supports %d item as a minimum, config has %d declared", k, schema.MinItems, rawV.Len())} + } + + // Now build the []interface{} + raws := make([]interface{}, rawV.Len()) + for i, _ := range raws { + raws[i] = rawV.Index(i).Interface() + } + + var ws []string + var es []error + for i, raw := range raws { + key := fmt.Sprintf("%s.%d", k, i) + + // Reify the key value from the ResourceConfig. + // If the list was computed we have all raw values, but some of these + // may be known in the config, and aren't individually marked as Computed. + if r, ok := c.Get(key); ok { + raw = r + } + + var ws2 []string + var es2 []error + switch t := schema.Elem.(type) { + case *Resource: + // This is a sub-resource + ws2, es2 = m.validateObject(key, t.Schema, c) + case *Schema: + ws2, es2 = m.validateType(key, raw, t, c) + } + + if len(ws2) > 0 { + ws = append(ws, ws2...) + } + if len(es2) > 0 { + es = append(es, es2...) + } + } + + return ws, es +} + +func (m schemaMap) validateMap( + k string, + raw interface{}, + schema *Schema, + c *terraform.ResourceConfig) ([]string, []error) { + // We use reflection to verify the slice because you can't + // case to []interface{} unless the slice is exactly that type. + rawV := reflect.ValueOf(raw) + switch rawV.Kind() { + case reflect.String: + // If raw and reified are equal, this is a string and should + // be rejected. + reified, reifiedOk := c.Get(k) + if reifiedOk && raw == reified && !c.IsComputed(k) { + return nil, []error{fmt.Errorf("%s: should be a map", k)} + } + // Otherwise it's likely raw is an interpolation. + return nil, nil + case reflect.Map: + case reflect.Slice: + default: + return nil, []error{fmt.Errorf("%s: should be a map", k)} + } + + // If it is not a slice, validate directly + if rawV.Kind() != reflect.Slice { + mapIface := rawV.Interface() + if _, errs := validateMapValues(k, mapIface.(map[string]interface{}), schema); len(errs) > 0 { + return nil, errs + } + if schema.ValidateFunc != nil { + return schema.ValidateFunc(mapIface, k) + } + return nil, nil + } + + // It is a slice, verify that all the elements are maps + raws := make([]interface{}, rawV.Len()) + for i, _ := range raws { + raws[i] = rawV.Index(i).Interface() + } + + for _, raw := range raws { + v := reflect.ValueOf(raw) + if v.Kind() != reflect.Map { + return nil, []error{fmt.Errorf( + "%s: should be a map", k)} + } + mapIface := v.Interface() + if _, errs := validateMapValues(k, mapIface.(map[string]interface{}), schema); len(errs) > 0 { + return nil, errs + } + } + + if schema.ValidateFunc != nil { + validatableMap := make(map[string]interface{}) + for _, raw := range raws { + for k, v := range raw.(map[string]interface{}) { + validatableMap[k] = v + } + } + + return schema.ValidateFunc(validatableMap, k) + } + + return nil, nil +} + +func validateMapValues(k string, m map[string]interface{}, schema *Schema) ([]string, []error) { + for key, raw := range m { + valueType, err := getValueType(k, schema) + if err != nil { + return nil, []error{err} + } + + switch valueType { + case TypeBool: + var n bool + if err := mapstructure.WeakDecode(raw, &n); err != nil { + return nil, []error{fmt.Errorf("%s (%s): %s", k, key, err)} + } + case TypeInt: + var n int + if err := mapstructure.WeakDecode(raw, &n); err != nil { + return nil, []error{fmt.Errorf("%s (%s): %s", k, key, err)} + } + case TypeFloat: + var n float64 + if err := mapstructure.WeakDecode(raw, &n); err != nil { + return nil, []error{fmt.Errorf("%s (%s): %s", k, key, err)} + } + case TypeString: + var n string + if err := mapstructure.WeakDecode(raw, &n); err != nil { + return nil, []error{fmt.Errorf("%s (%s): %s", k, key, err)} + } + default: + panic(fmt.Sprintf("Unknown validation type: %#v", schema.Type)) + } + } + return nil, nil +} + +func getValueType(k string, schema *Schema) (ValueType, error) { + if schema.Elem == nil { + return TypeString, nil + } + if vt, ok := schema.Elem.(ValueType); ok { + return vt, nil + } + + if s, ok := schema.Elem.(*Schema); ok { + if s.Elem == nil { + return TypeString, nil + } + if vt, ok := s.Elem.(ValueType); ok { + return vt, nil + } + } + + if _, ok := schema.Elem.(*Resource); ok { + // TODO: We don't actually support this (yet) + // but silently pass the validation, until we decide + // how to handle nested structures in maps + return TypeString, nil + } + return 0, fmt.Errorf("%s: unexpected map value type: %#v", k, schema.Elem) +} + +func (m schemaMap) validateObject( + k string, + schema map[string]*Schema, + c *terraform.ResourceConfig) ([]string, []error) { + raw, _ := c.Get(k) + if _, ok := raw.(map[string]interface{}); !ok && !c.IsComputed(k) { + return nil, []error{fmt.Errorf( + "%s: expected object, got %s", + k, reflect.ValueOf(raw).Kind())} + } + + var ws []string + var es []error + for subK, s := range schema { + key := subK + if k != "" { + key = fmt.Sprintf("%s.%s", k, subK) + } + + ws2, es2 := m.validate(key, s, c) + if len(ws2) > 0 { + ws = append(ws, ws2...) + } + if len(es2) > 0 { + es = append(es, es2...) + } + } + + // Detect any extra/unknown keys and report those as errors. + if m, ok := raw.(map[string]interface{}); ok { + for subk, _ := range m { + if _, ok := schema[subk]; !ok { + if subk == TimeoutsConfigKey { + continue + } + es = append(es, fmt.Errorf( + "%s: invalid or unknown key: %s", k, subk)) + } + } + } + + return ws, es +} + +func (m schemaMap) validatePrimitive( + k string, + raw interface{}, + schema *Schema, + c *terraform.ResourceConfig) ([]string, []error) { + + // Catch if the user gave a complex type where a primitive was + // expected, so we can return a friendly error message that + // doesn't contain Go type system terminology. + switch reflect.ValueOf(raw).Type().Kind() { + case reflect.Slice: + return nil, []error{ + fmt.Errorf("%s must be a single value, not a list", k), + } + case reflect.Map: + return nil, []error{ + fmt.Errorf("%s must be a single value, not a map", k), + } + default: // ok + } + + if c.IsComputed(k) { + // If the key is being computed, then it is not an error as + // long as it's not a slice or map. + return nil, nil + } + + var decoded interface{} + switch schema.Type { + case TypeBool: + // Verify that we can parse this as the correct type + var n bool + if err := mapstructure.WeakDecode(raw, &n); err != nil { + return nil, []error{fmt.Errorf("%s: %s", k, err)} + } + decoded = n + case TypeInt: + // Verify that we can parse this as an int + var n int + if err := mapstructure.WeakDecode(raw, &n); err != nil { + return nil, []error{fmt.Errorf("%s: %s", k, err)} + } + decoded = n + case TypeFloat: + // Verify that we can parse this as an int + var n float64 + if err := mapstructure.WeakDecode(raw, &n); err != nil { + return nil, []error{fmt.Errorf("%s: %s", k, err)} + } + decoded = n + case TypeString: + // Verify that we can parse this as a string + var n string + if err := mapstructure.WeakDecode(raw, &n); err != nil { + return nil, []error{fmt.Errorf("%s: %s", k, err)} + } + decoded = n + default: + panic(fmt.Sprintf("Unknown validation type: %#v", schema.Type)) + } + + if schema.ValidateFunc != nil { + return schema.ValidateFunc(decoded, k) + } + + return nil, nil +} + +func (m schemaMap) validateType( + k string, + raw interface{}, + schema *Schema, + c *terraform.ResourceConfig) ([]string, []error) { + var ws []string + var es []error + switch schema.Type { + case TypeSet, TypeList: + ws, es = m.validateList(k, raw, schema, c) + case TypeMap: + ws, es = m.validateMap(k, raw, schema, c) + default: + ws, es = m.validatePrimitive(k, raw, schema, c) + } + + if schema.Deprecated != "" { + ws = append(ws, fmt.Sprintf( + "%q: [DEPRECATED] %s", k, schema.Deprecated)) + } + + if schema.Removed != "" { + es = append(es, fmt.Errorf( + "%q: [REMOVED] %s", k, schema.Removed)) + } + + return ws, es +} + +// Zero returns the zero value for a type. +func (t ValueType) Zero() interface{} { + switch t { + case TypeInvalid: + return nil + case TypeBool: + return false + case TypeInt: + return 0 + case TypeFloat: + return 0.0 + case TypeString: + return "" + case TypeList: + return []interface{}{} + case TypeMap: + return map[string]interface{}{} + case TypeSet: + return new(Set) + case typeObject: + return map[string]interface{}{} + default: + panic(fmt.Sprintf("unknown type %s", t)) + } +} diff --git a/vendor/github.com/hashicorp/terraform/helper/schema/serialize.go b/vendor/github.com/hashicorp/terraform/helper/schema/serialize.go new file mode 100644 index 0000000000..fe6d7504c7 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/helper/schema/serialize.go @@ -0,0 +1,125 @@ +package schema + +import ( + "bytes" + "fmt" + "sort" + "strconv" +) + +func SerializeValueForHash(buf *bytes.Buffer, val interface{}, schema *Schema) { + if val == nil { + buf.WriteRune(';') + return + } + + switch schema.Type { + case TypeBool: + if val.(bool) { + buf.WriteRune('1') + } else { + buf.WriteRune('0') + } + case TypeInt: + buf.WriteString(strconv.Itoa(val.(int))) + case TypeFloat: + buf.WriteString(strconv.FormatFloat(val.(float64), 'g', -1, 64)) + case TypeString: + buf.WriteString(val.(string)) + case TypeList: + buf.WriteRune('(') + l := val.([]interface{}) + for _, innerVal := range l { + serializeCollectionMemberForHash(buf, innerVal, schema.Elem) + } + buf.WriteRune(')') + case TypeMap: + + m := val.(map[string]interface{}) + var keys []string + for k := range m { + keys = append(keys, k) + } + sort.Strings(keys) + buf.WriteRune('[') + for _, k := range keys { + innerVal := m[k] + if innerVal == nil { + continue + } + buf.WriteString(k) + buf.WriteRune(':') + + switch innerVal := innerVal.(type) { + case int: + buf.WriteString(strconv.Itoa(innerVal)) + case float64: + buf.WriteString(strconv.FormatFloat(innerVal, 'g', -1, 64)) + case string: + buf.WriteString(innerVal) + default: + panic(fmt.Sprintf("unknown value type in TypeMap %T", innerVal)) + } + + buf.WriteRune(';') + } + buf.WriteRune(']') + case TypeSet: + buf.WriteRune('{') + s := val.(*Set) + for _, innerVal := range s.List() { + serializeCollectionMemberForHash(buf, innerVal, schema.Elem) + } + buf.WriteRune('}') + default: + panic("unknown schema type to serialize") + } + buf.WriteRune(';') +} + +// SerializeValueForHash appends a serialization of the given resource config +// to the given buffer, guaranteeing deterministic results given the same value +// and schema. +// +// Its primary purpose is as input into a hashing function in order +// to hash complex substructures when used in sets, and so the serialization +// is not reversible. +func SerializeResourceForHash(buf *bytes.Buffer, val interface{}, resource *Resource) { + if val == nil { + return + } + sm := resource.Schema + m := val.(map[string]interface{}) + var keys []string + for k := range sm { + keys = append(keys, k) + } + sort.Strings(keys) + for _, k := range keys { + innerSchema := sm[k] + // Skip attributes that are not user-provided. Computed attributes + // do not contribute to the hash since their ultimate value cannot + // be known at plan/diff time. + if !(innerSchema.Required || innerSchema.Optional) { + continue + } + + buf.WriteString(k) + buf.WriteRune(':') + innerVal := m[k] + SerializeValueForHash(buf, innerVal, innerSchema) + } +} + +func serializeCollectionMemberForHash(buf *bytes.Buffer, val interface{}, elem interface{}) { + switch tElem := elem.(type) { + case *Schema: + SerializeValueForHash(buf, val, tElem) + case *Resource: + buf.WriteRune('<') + SerializeResourceForHash(buf, val, tElem) + buf.WriteString(">;") + default: + panic(fmt.Sprintf("invalid element type: %T", tElem)) + } +} diff --git a/vendor/github.com/hashicorp/terraform/helper/schema/set.go b/vendor/github.com/hashicorp/terraform/helper/schema/set.go new file mode 100644 index 0000000000..de05f40eed --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/helper/schema/set.go @@ -0,0 +1,209 @@ +package schema + +import ( + "bytes" + "fmt" + "reflect" + "sort" + "strconv" + "sync" + + "github.com/hashicorp/terraform/helper/hashcode" +) + +// HashString hashes strings. If you want a Set of strings, this is the +// SchemaSetFunc you want. +func HashString(v interface{}) int { + return hashcode.String(v.(string)) +} + +// HashResource hashes complex structures that are described using +// a *Resource. This is the default set implementation used when a set's +// element type is a full resource. +func HashResource(resource *Resource) SchemaSetFunc { + return func(v interface{}) int { + var buf bytes.Buffer + SerializeResourceForHash(&buf, v, resource) + return hashcode.String(buf.String()) + } +} + +// HashSchema hashes values that are described using a *Schema. This is the +// default set implementation used when a set's element type is a single +// schema. +func HashSchema(schema *Schema) SchemaSetFunc { + return func(v interface{}) int { + var buf bytes.Buffer + SerializeValueForHash(&buf, v, schema) + return hashcode.String(buf.String()) + } +} + +// Set is a set data structure that is returned for elements of type +// TypeSet. +type Set struct { + F SchemaSetFunc + + m map[string]interface{} + once sync.Once +} + +// NewSet is a convenience method for creating a new set with the given +// items. +func NewSet(f SchemaSetFunc, items []interface{}) *Set { + s := &Set{F: f} + for _, i := range items { + s.Add(i) + } + + return s +} + +// CopySet returns a copy of another set. +func CopySet(otherSet *Set) *Set { + return NewSet(otherSet.F, otherSet.List()) +} + +// Add adds an item to the set if it isn't already in the set. +func (s *Set) Add(item interface{}) { + s.add(item, false) +} + +// Remove removes an item if it's already in the set. Idempotent. +func (s *Set) Remove(item interface{}) { + s.remove(item) +} + +// Contains checks if the set has the given item. +func (s *Set) Contains(item interface{}) bool { + _, ok := s.m[s.hash(item)] + return ok +} + +// Len returns the amount of items in the set. +func (s *Set) Len() int { + return len(s.m) +} + +// List returns the elements of this set in slice format. +// +// The order of the returned elements is deterministic. Given the same +// set, the order of this will always be the same. +func (s *Set) List() []interface{} { + result := make([]interface{}, len(s.m)) + for i, k := range s.listCode() { + result[i] = s.m[k] + } + + return result +} + +// Difference performs a set difference of the two sets, returning +// a new third set that has only the elements unique to this set. +func (s *Set) Difference(other *Set) *Set { + result := &Set{F: s.F} + result.once.Do(result.init) + + for k, v := range s.m { + if _, ok := other.m[k]; !ok { + result.m[k] = v + } + } + + return result +} + +// Intersection performs the set intersection of the two sets +// and returns a new third set. +func (s *Set) Intersection(other *Set) *Set { + result := &Set{F: s.F} + result.once.Do(result.init) + + for k, v := range s.m { + if _, ok := other.m[k]; ok { + result.m[k] = v + } + } + + return result +} + +// Union performs the set union of the two sets and returns a new third +// set. +func (s *Set) Union(other *Set) *Set { + result := &Set{F: s.F} + result.once.Do(result.init) + + for k, v := range s.m { + result.m[k] = v + } + for k, v := range other.m { + result.m[k] = v + } + + return result +} + +func (s *Set) Equal(raw interface{}) bool { + other, ok := raw.(*Set) + if !ok { + return false + } + + return reflect.DeepEqual(s.m, other.m) +} + +func (s *Set) GoString() string { + return fmt.Sprintf("*Set(%#v)", s.m) +} + +func (s *Set) init() { + s.m = make(map[string]interface{}) +} + +func (s *Set) add(item interface{}, computed bool) string { + s.once.Do(s.init) + + code := s.hash(item) + if computed { + code = "~" + code + } + + if _, ok := s.m[code]; !ok { + s.m[code] = item + } + + return code +} + +func (s *Set) hash(item interface{}) string { + code := s.F(item) + // Always return a nonnegative hashcode. + if code < 0 { + code = -code + } + return strconv.Itoa(code) +} + +func (s *Set) remove(item interface{}) string { + s.once.Do(s.init) + + code := s.hash(item) + delete(s.m, code) + + return code +} + +func (s *Set) index(item interface{}) int { + return sort.SearchStrings(s.listCode(), s.hash(item)) +} + +func (s *Set) listCode() []string { + // Sort the hash codes so the order of the list is deterministic + keys := make([]string, 0, len(s.m)) + for k := range s.m { + keys = append(keys, k) + } + sort.Sort(sort.StringSlice(keys)) + return keys +} diff --git a/vendor/github.com/hashicorp/terraform/helper/schema/testing.go b/vendor/github.com/hashicorp/terraform/helper/schema/testing.go new file mode 100644 index 0000000000..9765bdbc6d --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/helper/schema/testing.go @@ -0,0 +1,30 @@ +package schema + +import ( + "testing" + + "github.com/hashicorp/terraform/config" + "github.com/hashicorp/terraform/terraform" +) + +// TestResourceDataRaw creates a ResourceData from a raw configuration map. +func TestResourceDataRaw( + t *testing.T, schema map[string]*Schema, raw map[string]interface{}) *ResourceData { + c, err := config.NewRawConfig(raw) + if err != nil { + t.Fatalf("err: %s", err) + } + + sm := schemaMap(schema) + diff, err := sm.Diff(nil, terraform.NewResourceConfig(c)) + if err != nil { + t.Fatalf("err: %s", err) + } + + result, err := sm.Data(nil, diff) + if err != nil { + t.Fatalf("err: %s", err) + } + + return result +} diff --git a/vendor/github.com/hashicorp/terraform/helper/schema/valuetype.go b/vendor/github.com/hashicorp/terraform/helper/schema/valuetype.go new file mode 100644 index 0000000000..9286987d57 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/helper/schema/valuetype.go @@ -0,0 +1,21 @@ +package schema + +//go:generate stringer -type=ValueType valuetype.go + +// ValueType is an enum of the type that can be represented by a schema. +type ValueType int + +const ( + TypeInvalid ValueType = iota + TypeBool + TypeInt + TypeFloat + TypeString + TypeList + TypeMap + TypeSet + typeObject +) + +// NOTE: ValueType has more functions defined on it in schema.go. We can't +// put them here because we reference other files. diff --git a/vendor/github.com/hashicorp/terraform/helper/schema/valuetype_string.go b/vendor/github.com/hashicorp/terraform/helper/schema/valuetype_string.go new file mode 100644 index 0000000000..1610cec2d3 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/helper/schema/valuetype_string.go @@ -0,0 +1,16 @@ +// Code generated by "stringer -type=ValueType valuetype.go"; DO NOT EDIT. + +package schema + +import "fmt" + +const _ValueType_name = "TypeInvalidTypeBoolTypeIntTypeFloatTypeStringTypeListTypeMapTypeSettypeObject" + +var _ValueType_index = [...]uint8{0, 11, 19, 26, 35, 45, 53, 60, 67, 77} + +func (i ValueType) String() string { + if i < 0 || i >= ValueType(len(_ValueType_index)-1) { + return fmt.Sprintf("ValueType(%d)", i) + } + return _ValueType_name[_ValueType_index[i]:_ValueType_index[i+1]] +} diff --git a/vendor/github.com/hashicorp/terraform/helper/shadow/closer.go b/vendor/github.com/hashicorp/terraform/helper/shadow/closer.go new file mode 100644 index 0000000000..7edd5e75db --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/helper/shadow/closer.go @@ -0,0 +1,80 @@ +package shadow + +import ( + "fmt" + "io" + "reflect" + + "github.com/hashicorp/go-multierror" + "github.com/mitchellh/reflectwalk" +) + +// Close will close all shadow values within the given structure. +// +// This uses reflection to walk the structure, find all shadow elements, +// and close them. Currently this will only find struct fields that are +// shadow values, and not slice elements, etc. +func Close(v interface{}) error { + // We require a pointer so we can address the internal fields + val := reflect.ValueOf(v) + if val.Kind() != reflect.Ptr { + return fmt.Errorf("value must be a pointer") + } + + // Walk and close + var w closeWalker + if err := reflectwalk.Walk(v, &w); err != nil { + return err + } + + return w.Err +} + +type closeWalker struct { + Err error +} + +func (w *closeWalker) Struct(reflect.Value) error { + // Do nothing. We implement this for reflectwalk.StructWalker + return nil +} + +func (w *closeWalker) StructField(f reflect.StructField, v reflect.Value) error { + // Not sure why this would be but lets avoid some panics + if !v.IsValid() { + return nil + } + + // Empty for exported, so don't check unexported fields + if f.PkgPath != "" { + return nil + } + + // Verify the io.Closer is in this package + typ := v.Type() + if typ.PkgPath() != "github.com/hashicorp/terraform/helper/shadow" { + return nil + } + + // We're looking for an io.Closer + raw := v.Interface() + if raw == nil { + return nil + } + + closer, ok := raw.(io.Closer) + if !ok && v.CanAddr() { + closer, ok = v.Addr().Interface().(io.Closer) + } + if !ok { + return reflectwalk.SkipEntry + } + + // Close it + if err := closer.Close(); err != nil { + w.Err = multierror.Append(w.Err, err) + } + + // Don't go into the struct field + return reflectwalk.SkipEntry +} diff --git a/vendor/github.com/hashicorp/terraform/helper/shadow/compared_value.go b/vendor/github.com/hashicorp/terraform/helper/shadow/compared_value.go new file mode 100644 index 0000000000..4223e9255e --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/helper/shadow/compared_value.go @@ -0,0 +1,128 @@ +package shadow + +import ( + "sync" +) + +// ComparedValue is a struct that finds a value by comparing some key +// to the list of stored values. This is useful when there is no easy +// uniquely identifying key that works in a map (for that, use KeyedValue). +// +// ComparedValue is very expensive, relative to other Value types. Try to +// limit the number of values stored in a ComparedValue by potentially +// nesting it within a KeyedValue (a keyed value points to a compared value, +// for example). +type ComparedValue struct { + // Func is a function that is given the lookup key and a single + // stored value. If it matches, it returns true. + Func func(k, v interface{}) bool + + lock sync.Mutex + once sync.Once + closed bool + values []interface{} + waiters map[interface{}]*Value +} + +// Close closes the value. This can never fail. For a definition of +// "close" see the ErrClosed docs. +func (w *ComparedValue) Close() error { + w.lock.Lock() + defer w.lock.Unlock() + + // Set closed to true always + w.closed = true + + // For all waiters, complete with ErrClosed + for k, val := range w.waiters { + val.SetValue(ErrClosed) + delete(w.waiters, k) + } + + return nil +} + +// Value returns the value that was set for the given key, or blocks +// until one is available. +func (w *ComparedValue) Value(k interface{}) interface{} { + v, val := w.valueWaiter(k) + if val == nil { + return v + } + + return val.Value() +} + +// ValueOk gets the value for the given key, returning immediately if the +// value doesn't exist. The second return argument is true if the value exists. +func (w *ComparedValue) ValueOk(k interface{}) (interface{}, bool) { + v, val := w.valueWaiter(k) + return v, val == nil +} + +func (w *ComparedValue) SetValue(v interface{}) { + w.lock.Lock() + defer w.lock.Unlock() + w.once.Do(w.init) + + // Check if we already have this exact value (by simply comparing + // with == directly). If we do, then we don't insert it again. + found := false + for _, v2 := range w.values { + if v == v2 { + found = true + break + } + } + + if !found { + // Set the value, always + w.values = append(w.values, v) + } + + // Go through the waiters + for k, val := range w.waiters { + if w.Func(k, v) { + val.SetValue(v) + delete(w.waiters, k) + } + } +} + +func (w *ComparedValue) valueWaiter(k interface{}) (interface{}, *Value) { + w.lock.Lock() + w.once.Do(w.init) + + // Look for a pre-existing value + for _, v := range w.values { + if w.Func(k, v) { + w.lock.Unlock() + return v, nil + } + } + + // If we're closed, return that + if w.closed { + w.lock.Unlock() + return ErrClosed, nil + } + + // Pre-existing value doesn't exist, create a waiter + val := w.waiters[k] + if val == nil { + val = new(Value) + w.waiters[k] = val + } + w.lock.Unlock() + + // Return the waiter + return nil, val +} + +// Must be called with w.lock held. +func (w *ComparedValue) init() { + w.waiters = make(map[interface{}]*Value) + if w.Func == nil { + w.Func = func(k, v interface{}) bool { return k == v } + } +} diff --git a/vendor/github.com/hashicorp/terraform/helper/shadow/keyed_value.go b/vendor/github.com/hashicorp/terraform/helper/shadow/keyed_value.go new file mode 100644 index 0000000000..432b03668e --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/helper/shadow/keyed_value.go @@ -0,0 +1,151 @@ +package shadow + +import ( + "sync" +) + +// KeyedValue is a struct that coordinates a value by key. If a value is +// not available for a give key, it'll block until it is available. +type KeyedValue struct { + lock sync.Mutex + once sync.Once + values map[string]interface{} + waiters map[string]*Value + closed bool +} + +// Close closes the value. This can never fail. For a definition of +// "close" see the ErrClosed docs. +func (w *KeyedValue) Close() error { + w.lock.Lock() + defer w.lock.Unlock() + + // Set closed to true always + w.closed = true + + // For all waiters, complete with ErrClosed + for k, val := range w.waiters { + val.SetValue(ErrClosed) + delete(w.waiters, k) + } + + return nil +} + +// Value returns the value that was set for the given key, or blocks +// until one is available. +func (w *KeyedValue) Value(k string) interface{} { + w.lock.Lock() + v, val := w.valueWaiter(k) + w.lock.Unlock() + + // If we have no waiter, then return the value + if val == nil { + return v + } + + // We have a waiter, so wait + return val.Value() +} + +// WaitForChange waits for the value with the given key to be set again. +// If the key isn't set, it'll wait for an initial value. Note that while +// it is called "WaitForChange", the value isn't guaranteed to _change_; +// this will return when a SetValue is called for the given k. +func (w *KeyedValue) WaitForChange(k string) interface{} { + w.lock.Lock() + w.once.Do(w.init) + + // If we're closed, we're closed + if w.closed { + w.lock.Unlock() + return ErrClosed + } + + // Check for an active waiter. If there isn't one, make it + val := w.waiters[k] + if val == nil { + val = new(Value) + w.waiters[k] = val + } + w.lock.Unlock() + + // And wait + return val.Value() +} + +// ValueOk gets the value for the given key, returning immediately if the +// value doesn't exist. The second return argument is true if the value exists. +func (w *KeyedValue) ValueOk(k string) (interface{}, bool) { + w.lock.Lock() + defer w.lock.Unlock() + + v, val := w.valueWaiter(k) + return v, val == nil +} + +func (w *KeyedValue) SetValue(k string, v interface{}) { + w.lock.Lock() + defer w.lock.Unlock() + w.setValue(k, v) +} + +// Init will initialize the key to a given value only if the key has +// not been set before. This is safe to call multiple times and in parallel. +func (w *KeyedValue) Init(k string, v interface{}) { + w.lock.Lock() + defer w.lock.Unlock() + + // If we have a waiter, set the value. + _, val := w.valueWaiter(k) + if val != nil { + w.setValue(k, v) + } +} + +// Must be called with w.lock held. +func (w *KeyedValue) init() { + w.values = make(map[string]interface{}) + w.waiters = make(map[string]*Value) +} + +// setValue is like SetValue but assumes the lock is held. +func (w *KeyedValue) setValue(k string, v interface{}) { + w.once.Do(w.init) + + // Set the value, always + w.values[k] = v + + // If we have a waiter, set it + if val, ok := w.waiters[k]; ok { + val.SetValue(v) + delete(w.waiters, k) + } +} + +// valueWaiter gets the value or the Value waiter for a given key. +// +// This must be called with lock held. +func (w *KeyedValue) valueWaiter(k string) (interface{}, *Value) { + w.once.Do(w.init) + + // If we have this value already, return it + if v, ok := w.values[k]; ok { + return v, nil + } + + // If we're closed, return that + if w.closed { + return ErrClosed, nil + } + + // No pending value, check for a waiter + val := w.waiters[k] + if val == nil { + val = new(Value) + w.waiters[k] = val + } + + // Return the waiter + return nil, val +} diff --git a/vendor/github.com/hashicorp/terraform/helper/shadow/ordered_value.go b/vendor/github.com/hashicorp/terraform/helper/shadow/ordered_value.go new file mode 100644 index 0000000000..0a43d4d4d4 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/helper/shadow/ordered_value.go @@ -0,0 +1,66 @@ +package shadow + +import ( + "container/list" + "sync" +) + +// OrderedValue is a struct that keeps track of a value in the order +// it is set. Each time Value() is called, it will return the most recent +// calls value then discard it. +// +// This is unlike Value that returns the same value once it is set. +type OrderedValue struct { + lock sync.Mutex + values *list.List + waiters *list.List +} + +// Value returns the last value that was set, or blocks until one +// is received. +func (w *OrderedValue) Value() interface{} { + w.lock.Lock() + + // If we have a pending value already, use it + if w.values != nil && w.values.Len() > 0 { + front := w.values.Front() + w.values.Remove(front) + w.lock.Unlock() + return front.Value + } + + // No pending value, create a waiter + if w.waiters == nil { + w.waiters = list.New() + } + + var val Value + w.waiters.PushBack(&val) + w.lock.Unlock() + + // Return the value once we have it + return val.Value() +} + +// SetValue sets the latest value. +func (w *OrderedValue) SetValue(v interface{}) { + w.lock.Lock() + defer w.lock.Unlock() + + // If we have a waiter, notify it + if w.waiters != nil && w.waiters.Len() > 0 { + front := w.waiters.Front() + w.waiters.Remove(front) + + val := front.Value.(*Value) + val.SetValue(v) + return + } + + // Add it to the list of values + if w.values == nil { + w.values = list.New() + } + + w.values.PushBack(v) +} diff --git a/vendor/github.com/hashicorp/terraform/helper/shadow/value.go b/vendor/github.com/hashicorp/terraform/helper/shadow/value.go new file mode 100644 index 0000000000..2413335b80 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/helper/shadow/value.go @@ -0,0 +1,79 @@ +package shadow + +import ( + "errors" + "sync" +) + +// ErrClosed is returned by any closed values. +// +// A "closed value" is when the shadow has been notified that the real +// side is complete and any blocking values will _never_ be satisfied +// in the future. In this case, this error is returned. If a value is already +// available, that is still returned. +var ErrClosed = errors.New("shadow closed") + +// Value is a struct that coordinates a value between two +// parallel routines. It is similar to atomic.Value except that when +// Value is called if it isn't set it will wait for it. +// +// The Value can be closed with Close, which will cause any future +// blocking operations to return immediately with ErrClosed. +type Value struct { + lock sync.Mutex + cond *sync.Cond + value interface{} + valueSet bool +} + +// Close closes the value. This can never fail. For a definition of +// "close" see the struct docs. +func (w *Value) Close() error { + w.lock.Lock() + set := w.valueSet + w.lock.Unlock() + + // If we haven't set the value, set it + if !set { + w.SetValue(ErrClosed) + } + + // Done + return nil +} + +// Value returns the value that was set. +func (w *Value) Value() interface{} { + w.lock.Lock() + defer w.lock.Unlock() + + // If we already have a value just return + for !w.valueSet { + // No value, setup the condition variable if we have to + if w.cond == nil { + w.cond = sync.NewCond(&w.lock) + } + + // Wait on it + w.cond.Wait() + } + + // Return the value + return w.value +} + +// SetValue sets the value. +func (w *Value) SetValue(v interface{}) { + w.lock.Lock() + defer w.lock.Unlock() + + // Set the value + w.valueSet = true + w.value = v + + // If we have a condition, clear it + if w.cond != nil { + w.cond.Broadcast() + w.cond = nil + } +} diff --git a/vendor/github.com/hashicorp/terraform/plugin/plugin.go b/vendor/github.com/hashicorp/terraform/plugin/plugin.go new file mode 100644 index 0000000000..00fa7b2967 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/plugin/plugin.go @@ -0,0 +1,13 @@ +package plugin + +import ( + "github.com/hashicorp/go-plugin" +) + +// See serve.go for serving plugins + +// PluginMap should be used by clients for the map of plugins. +var PluginMap = map[string]plugin.Plugin{ + "provider": &ResourceProviderPlugin{}, + "provisioner": &ResourceProvisionerPlugin{}, +} diff --git a/vendor/github.com/hashicorp/terraform/plugin/resource_provider.go b/vendor/github.com/hashicorp/terraform/plugin/resource_provider.go new file mode 100644 index 0000000000..473f786013 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/plugin/resource_provider.go @@ -0,0 +1,578 @@ +package plugin + +import ( + "net/rpc" + + "github.com/hashicorp/go-plugin" + "github.com/hashicorp/terraform/terraform" +) + +// ResourceProviderPlugin is the plugin.Plugin implementation. +type ResourceProviderPlugin struct { + F func() terraform.ResourceProvider +} + +func (p *ResourceProviderPlugin) Server(b *plugin.MuxBroker) (interface{}, error) { + return &ResourceProviderServer{Broker: b, Provider: p.F()}, nil +} + +func (p *ResourceProviderPlugin) Client( + b *plugin.MuxBroker, c *rpc.Client) (interface{}, error) { + return &ResourceProvider{Broker: b, Client: c}, nil +} + +// ResourceProvider is an implementation of terraform.ResourceProvider +// that communicates over RPC. +type ResourceProvider struct { + Broker *plugin.MuxBroker + Client *rpc.Client +} + +func (p *ResourceProvider) Stop() error { + var resp ResourceProviderStopResponse + err := p.Client.Call("Plugin.Stop", new(interface{}), &resp) + if err != nil { + return err + } + if resp.Error != nil { + err = resp.Error + } + + return err +} + +func (p *ResourceProvider) Input( + input terraform.UIInput, + c *terraform.ResourceConfig) (*terraform.ResourceConfig, error) { + id := p.Broker.NextId() + go p.Broker.AcceptAndServe(id, &UIInputServer{ + UIInput: input, + }) + + var resp ResourceProviderInputResponse + args := ResourceProviderInputArgs{ + InputId: id, + Config: c, + } + + err := p.Client.Call("Plugin.Input", &args, &resp) + if err != nil { + return nil, err + } + if resp.Error != nil { + err = resp.Error + return nil, err + } + + return resp.Config, nil +} + +func (p *ResourceProvider) Validate(c *terraform.ResourceConfig) ([]string, []error) { + var resp ResourceProviderValidateResponse + args := ResourceProviderValidateArgs{ + Config: c, + } + + err := p.Client.Call("Plugin.Validate", &args, &resp) + if err != nil { + return nil, []error{err} + } + + var errs []error + if len(resp.Errors) > 0 { + errs = make([]error, len(resp.Errors)) + for i, err := range resp.Errors { + errs[i] = err + } + } + + return resp.Warnings, errs +} + +func (p *ResourceProvider) ValidateResource( + t string, c *terraform.ResourceConfig) ([]string, []error) { + var resp ResourceProviderValidateResourceResponse + args := ResourceProviderValidateResourceArgs{ + Config: c, + Type: t, + } + + err := p.Client.Call("Plugin.ValidateResource", &args, &resp) + if err != nil { + return nil, []error{err} + } + + var errs []error + if len(resp.Errors) > 0 { + errs = make([]error, len(resp.Errors)) + for i, err := range resp.Errors { + errs[i] = err + } + } + + return resp.Warnings, errs +} + +func (p *ResourceProvider) Configure(c *terraform.ResourceConfig) error { + var resp ResourceProviderConfigureResponse + err := p.Client.Call("Plugin.Configure", c, &resp) + if err != nil { + return err + } + if resp.Error != nil { + err = resp.Error + } + + return err +} + +func (p *ResourceProvider) Apply( + info *terraform.InstanceInfo, + s *terraform.InstanceState, + d *terraform.InstanceDiff) (*terraform.InstanceState, error) { + var resp ResourceProviderApplyResponse + args := &ResourceProviderApplyArgs{ + Info: info, + State: s, + Diff: d, + } + + err := p.Client.Call("Plugin.Apply", args, &resp) + if err != nil { + return nil, err + } + if resp.Error != nil { + err = resp.Error + } + + return resp.State, err +} + +func (p *ResourceProvider) Diff( + info *terraform.InstanceInfo, + s *terraform.InstanceState, + c *terraform.ResourceConfig) (*terraform.InstanceDiff, error) { + var resp ResourceProviderDiffResponse + args := &ResourceProviderDiffArgs{ + Info: info, + State: s, + Config: c, + } + err := p.Client.Call("Plugin.Diff", args, &resp) + if err != nil { + return nil, err + } + if resp.Error != nil { + err = resp.Error + } + + return resp.Diff, err +} + +func (p *ResourceProvider) ValidateDataSource( + t string, c *terraform.ResourceConfig) ([]string, []error) { + var resp ResourceProviderValidateResourceResponse + args := ResourceProviderValidateResourceArgs{ + Config: c, + Type: t, + } + + err := p.Client.Call("Plugin.ValidateDataSource", &args, &resp) + if err != nil { + return nil, []error{err} + } + + var errs []error + if len(resp.Errors) > 0 { + errs = make([]error, len(resp.Errors)) + for i, err := range resp.Errors { + errs[i] = err + } + } + + return resp.Warnings, errs +} + +func (p *ResourceProvider) Refresh( + info *terraform.InstanceInfo, + s *terraform.InstanceState) (*terraform.InstanceState, error) { + var resp ResourceProviderRefreshResponse + args := &ResourceProviderRefreshArgs{ + Info: info, + State: s, + } + + err := p.Client.Call("Plugin.Refresh", args, &resp) + if err != nil { + return nil, err + } + if resp.Error != nil { + err = resp.Error + } + + return resp.State, err +} + +func (p *ResourceProvider) ImportState( + info *terraform.InstanceInfo, + id string) ([]*terraform.InstanceState, error) { + var resp ResourceProviderImportStateResponse + args := &ResourceProviderImportStateArgs{ + Info: info, + Id: id, + } + + err := p.Client.Call("Plugin.ImportState", args, &resp) + if err != nil { + return nil, err + } + if resp.Error != nil { + err = resp.Error + } + + return resp.State, err +} + +func (p *ResourceProvider) Resources() []terraform.ResourceType { + var result []terraform.ResourceType + + err := p.Client.Call("Plugin.Resources", new(interface{}), &result) + if err != nil { + // TODO: panic, log, what? + return nil + } + + return result +} + +func (p *ResourceProvider) ReadDataDiff( + info *terraform.InstanceInfo, + c *terraform.ResourceConfig) (*terraform.InstanceDiff, error) { + var resp ResourceProviderReadDataDiffResponse + args := &ResourceProviderReadDataDiffArgs{ + Info: info, + Config: c, + } + + err := p.Client.Call("Plugin.ReadDataDiff", args, &resp) + if err != nil { + return nil, err + } + if resp.Error != nil { + err = resp.Error + } + + return resp.Diff, err +} + +func (p *ResourceProvider) ReadDataApply( + info *terraform.InstanceInfo, + d *terraform.InstanceDiff) (*terraform.InstanceState, error) { + var resp ResourceProviderReadDataApplyResponse + args := &ResourceProviderReadDataApplyArgs{ + Info: info, + Diff: d, + } + + err := p.Client.Call("Plugin.ReadDataApply", args, &resp) + if err != nil { + return nil, err + } + if resp.Error != nil { + err = resp.Error + } + + return resp.State, err +} + +func (p *ResourceProvider) DataSources() []terraform.DataSource { + var result []terraform.DataSource + + err := p.Client.Call("Plugin.DataSources", new(interface{}), &result) + if err != nil { + // TODO: panic, log, what? + return nil + } + + return result +} + +func (p *ResourceProvider) Close() error { + return p.Client.Close() +} + +// ResourceProviderServer is a net/rpc compatible structure for serving +// a ResourceProvider. This should not be used directly. +type ResourceProviderServer struct { + Broker *plugin.MuxBroker + Provider terraform.ResourceProvider +} + +type ResourceProviderStopResponse struct { + Error *plugin.BasicError +} + +type ResourceProviderConfigureResponse struct { + Error *plugin.BasicError +} + +type ResourceProviderInputArgs struct { + InputId uint32 + Config *terraform.ResourceConfig +} + +type ResourceProviderInputResponse struct { + Config *terraform.ResourceConfig + Error *plugin.BasicError +} + +type ResourceProviderApplyArgs struct { + Info *terraform.InstanceInfo + State *terraform.InstanceState + Diff *terraform.InstanceDiff +} + +type ResourceProviderApplyResponse struct { + State *terraform.InstanceState + Error *plugin.BasicError +} + +type ResourceProviderDiffArgs struct { + Info *terraform.InstanceInfo + State *terraform.InstanceState + Config *terraform.ResourceConfig +} + +type ResourceProviderDiffResponse struct { + Diff *terraform.InstanceDiff + Error *plugin.BasicError +} + +type ResourceProviderRefreshArgs struct { + Info *terraform.InstanceInfo + State *terraform.InstanceState +} + +type ResourceProviderRefreshResponse struct { + State *terraform.InstanceState + Error *plugin.BasicError +} + +type ResourceProviderImportStateArgs struct { + Info *terraform.InstanceInfo + Id string +} + +type ResourceProviderImportStateResponse struct { + State []*terraform.InstanceState + Error *plugin.BasicError +} + +type ResourceProviderReadDataApplyArgs struct { + Info *terraform.InstanceInfo + Diff *terraform.InstanceDiff +} + +type ResourceProviderReadDataApplyResponse struct { + State *terraform.InstanceState + Error *plugin.BasicError +} + +type ResourceProviderReadDataDiffArgs struct { + Info *terraform.InstanceInfo + Config *terraform.ResourceConfig +} + +type ResourceProviderReadDataDiffResponse struct { + Diff *terraform.InstanceDiff + Error *plugin.BasicError +} + +type ResourceProviderValidateArgs struct { + Config *terraform.ResourceConfig +} + +type ResourceProviderValidateResponse struct { + Warnings []string + Errors []*plugin.BasicError +} + +type ResourceProviderValidateResourceArgs struct { + Config *terraform.ResourceConfig + Type string +} + +type ResourceProviderValidateResourceResponse struct { + Warnings []string + Errors []*plugin.BasicError +} + +func (s *ResourceProviderServer) Stop( + _ interface{}, + reply *ResourceProviderStopResponse) error { + err := s.Provider.Stop() + *reply = ResourceProviderStopResponse{ + Error: plugin.NewBasicError(err), + } + + return nil +} + +func (s *ResourceProviderServer) Input( + args *ResourceProviderInputArgs, + reply *ResourceProviderInputResponse) error { + conn, err := s.Broker.Dial(args.InputId) + if err != nil { + *reply = ResourceProviderInputResponse{ + Error: plugin.NewBasicError(err), + } + return nil + } + client := rpc.NewClient(conn) + defer client.Close() + + input := &UIInput{Client: client} + + config, err := s.Provider.Input(input, args.Config) + *reply = ResourceProviderInputResponse{ + Config: config, + Error: plugin.NewBasicError(err), + } + + return nil +} + +func (s *ResourceProviderServer) Validate( + args *ResourceProviderValidateArgs, + reply *ResourceProviderValidateResponse) error { + warns, errs := s.Provider.Validate(args.Config) + berrs := make([]*plugin.BasicError, len(errs)) + for i, err := range errs { + berrs[i] = plugin.NewBasicError(err) + } + *reply = ResourceProviderValidateResponse{ + Warnings: warns, + Errors: berrs, + } + return nil +} + +func (s *ResourceProviderServer) ValidateResource( + args *ResourceProviderValidateResourceArgs, + reply *ResourceProviderValidateResourceResponse) error { + warns, errs := s.Provider.ValidateResource(args.Type, args.Config) + berrs := make([]*plugin.BasicError, len(errs)) + for i, err := range errs { + berrs[i] = plugin.NewBasicError(err) + } + *reply = ResourceProviderValidateResourceResponse{ + Warnings: warns, + Errors: berrs, + } + return nil +} + +func (s *ResourceProviderServer) Configure( + config *terraform.ResourceConfig, + reply *ResourceProviderConfigureResponse) error { + err := s.Provider.Configure(config) + *reply = ResourceProviderConfigureResponse{ + Error: plugin.NewBasicError(err), + } + return nil +} + +func (s *ResourceProviderServer) Apply( + args *ResourceProviderApplyArgs, + result *ResourceProviderApplyResponse) error { + state, err := s.Provider.Apply(args.Info, args.State, args.Diff) + *result = ResourceProviderApplyResponse{ + State: state, + Error: plugin.NewBasicError(err), + } + return nil +} + +func (s *ResourceProviderServer) Diff( + args *ResourceProviderDiffArgs, + result *ResourceProviderDiffResponse) error { + diff, err := s.Provider.Diff(args.Info, args.State, args.Config) + *result = ResourceProviderDiffResponse{ + Diff: diff, + Error: plugin.NewBasicError(err), + } + return nil +} + +func (s *ResourceProviderServer) Refresh( + args *ResourceProviderRefreshArgs, + result *ResourceProviderRefreshResponse) error { + newState, err := s.Provider.Refresh(args.Info, args.State) + *result = ResourceProviderRefreshResponse{ + State: newState, + Error: plugin.NewBasicError(err), + } + return nil +} + +func (s *ResourceProviderServer) ImportState( + args *ResourceProviderImportStateArgs, + result *ResourceProviderImportStateResponse) error { + states, err := s.Provider.ImportState(args.Info, args.Id) + *result = ResourceProviderImportStateResponse{ + State: states, + Error: plugin.NewBasicError(err), + } + return nil +} + +func (s *ResourceProviderServer) Resources( + nothing interface{}, + result *[]terraform.ResourceType) error { + *result = s.Provider.Resources() + return nil +} + +func (s *ResourceProviderServer) ValidateDataSource( + args *ResourceProviderValidateResourceArgs, + reply *ResourceProviderValidateResourceResponse) error { + warns, errs := s.Provider.ValidateDataSource(args.Type, args.Config) + berrs := make([]*plugin.BasicError, len(errs)) + for i, err := range errs { + berrs[i] = plugin.NewBasicError(err) + } + *reply = ResourceProviderValidateResourceResponse{ + Warnings: warns, + Errors: berrs, + } + return nil +} + +func (s *ResourceProviderServer) ReadDataDiff( + args *ResourceProviderReadDataDiffArgs, + result *ResourceProviderReadDataDiffResponse) error { + diff, err := s.Provider.ReadDataDiff(args.Info, args.Config) + *result = ResourceProviderReadDataDiffResponse{ + Diff: diff, + Error: plugin.NewBasicError(err), + } + return nil +} + +func (s *ResourceProviderServer) ReadDataApply( + args *ResourceProviderReadDataApplyArgs, + result *ResourceProviderReadDataApplyResponse) error { + newState, err := s.Provider.ReadDataApply(args.Info, args.Diff) + *result = ResourceProviderReadDataApplyResponse{ + State: newState, + Error: plugin.NewBasicError(err), + } + return nil +} + +func (s *ResourceProviderServer) DataSources( + nothing interface{}, + result *[]terraform.DataSource) error { + *result = s.Provider.DataSources() + return nil +} diff --git a/vendor/github.com/hashicorp/terraform/plugin/resource_provisioner.go b/vendor/github.com/hashicorp/terraform/plugin/resource_provisioner.go new file mode 100644 index 0000000000..8fce9d8ae7 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/plugin/resource_provisioner.go @@ -0,0 +1,173 @@ +package plugin + +import ( + "net/rpc" + + "github.com/hashicorp/go-plugin" + "github.com/hashicorp/terraform/terraform" +) + +// ResourceProvisionerPlugin is the plugin.Plugin implementation. +type ResourceProvisionerPlugin struct { + F func() terraform.ResourceProvisioner +} + +func (p *ResourceProvisionerPlugin) Server(b *plugin.MuxBroker) (interface{}, error) { + return &ResourceProvisionerServer{Broker: b, Provisioner: p.F()}, nil +} + +func (p *ResourceProvisionerPlugin) Client( + b *plugin.MuxBroker, c *rpc.Client) (interface{}, error) { + return &ResourceProvisioner{Broker: b, Client: c}, nil +} + +// ResourceProvisioner is an implementation of terraform.ResourceProvisioner +// that communicates over RPC. +type ResourceProvisioner struct { + Broker *plugin.MuxBroker + Client *rpc.Client +} + +func (p *ResourceProvisioner) Validate(c *terraform.ResourceConfig) ([]string, []error) { + var resp ResourceProvisionerValidateResponse + args := ResourceProvisionerValidateArgs{ + Config: c, + } + + err := p.Client.Call("Plugin.Validate", &args, &resp) + if err != nil { + return nil, []error{err} + } + + var errs []error + if len(resp.Errors) > 0 { + errs = make([]error, len(resp.Errors)) + for i, err := range resp.Errors { + errs[i] = err + } + } + + return resp.Warnings, errs +} + +func (p *ResourceProvisioner) Apply( + output terraform.UIOutput, + s *terraform.InstanceState, + c *terraform.ResourceConfig) error { + id := p.Broker.NextId() + go p.Broker.AcceptAndServe(id, &UIOutputServer{ + UIOutput: output, + }) + + var resp ResourceProvisionerApplyResponse + args := &ResourceProvisionerApplyArgs{ + OutputId: id, + State: s, + Config: c, + } + + err := p.Client.Call("Plugin.Apply", args, &resp) + if err != nil { + return err + } + if resp.Error != nil { + err = resp.Error + } + + return err +} + +func (p *ResourceProvisioner) Stop() error { + var resp ResourceProvisionerStopResponse + err := p.Client.Call("Plugin.Stop", new(interface{}), &resp) + if err != nil { + return err + } + if resp.Error != nil { + err = resp.Error + } + + return err +} + +func (p *ResourceProvisioner) Close() error { + return p.Client.Close() +} + +type ResourceProvisionerValidateArgs struct { + Config *terraform.ResourceConfig +} + +type ResourceProvisionerValidateResponse struct { + Warnings []string + Errors []*plugin.BasicError +} + +type ResourceProvisionerApplyArgs struct { + OutputId uint32 + State *terraform.InstanceState + Config *terraform.ResourceConfig +} + +type ResourceProvisionerApplyResponse struct { + Error *plugin.BasicError +} + +type ResourceProvisionerStopResponse struct { + Error *plugin.BasicError +} + +// ResourceProvisionerServer is a net/rpc compatible structure for serving +// a ResourceProvisioner. This should not be used directly. +type ResourceProvisionerServer struct { + Broker *plugin.MuxBroker + Provisioner terraform.ResourceProvisioner +} + +func (s *ResourceProvisionerServer) Apply( + args *ResourceProvisionerApplyArgs, + result *ResourceProvisionerApplyResponse) error { + conn, err := s.Broker.Dial(args.OutputId) + if err != nil { + *result = ResourceProvisionerApplyResponse{ + Error: plugin.NewBasicError(err), + } + return nil + } + client := rpc.NewClient(conn) + defer client.Close() + + output := &UIOutput{Client: client} + + err = s.Provisioner.Apply(output, args.State, args.Config) + *result = ResourceProvisionerApplyResponse{ + Error: plugin.NewBasicError(err), + } + return nil +} + +func (s *ResourceProvisionerServer) Validate( + args *ResourceProvisionerValidateArgs, + reply *ResourceProvisionerValidateResponse) error { + warns, errs := s.Provisioner.Validate(args.Config) + berrs := make([]*plugin.BasicError, len(errs)) + for i, err := range errs { + berrs[i] = plugin.NewBasicError(err) + } + *reply = ResourceProvisionerValidateResponse{ + Warnings: warns, + Errors: berrs, + } + return nil +} + +func (s *ResourceProvisionerServer) Stop( + _ interface{}, + reply *ResourceProvisionerStopResponse) error { + err := s.Provisioner.Stop() + *reply = ResourceProvisionerStopResponse{ + Error: plugin.NewBasicError(err), + } + + return nil +} diff --git a/vendor/github.com/hashicorp/terraform/plugin/serve.go b/vendor/github.com/hashicorp/terraform/plugin/serve.go new file mode 100644 index 0000000000..2028a613ff --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/plugin/serve.go @@ -0,0 +1,54 @@ +package plugin + +import ( + "github.com/hashicorp/go-plugin" + "github.com/hashicorp/terraform/terraform" +) + +// The constants below are the names of the plugins that can be dispensed +// from the plugin server. +const ( + ProviderPluginName = "provider" + ProvisionerPluginName = "provisioner" +) + +// Handshake is the HandshakeConfig used to configure clients and servers. +var Handshake = plugin.HandshakeConfig{ + // The ProtocolVersion is the version that must match between TF core + // and TF plugins. This should be bumped whenever a change happens in + // one or the other that makes it so that they can't safely communicate. + // This could be adding a new interface value, it could be how + // helper/schema computes diffs, etc. + ProtocolVersion: 4, + + // The magic cookie values should NEVER be changed. + MagicCookieKey: "TF_PLUGIN_MAGIC_COOKIE", + MagicCookieValue: "d602bf8f470bc67ca7faa0386276bbdd4330efaf76d1a219cb4d6991ca9872b2", +} + +type ProviderFunc func() terraform.ResourceProvider +type ProvisionerFunc func() terraform.ResourceProvisioner + +// ServeOpts are the configurations to serve a plugin. +type ServeOpts struct { + ProviderFunc ProviderFunc + ProvisionerFunc ProvisionerFunc +} + +// Serve serves a plugin. This function never returns and should be the final +// function called in the main function of the plugin. +func Serve(opts *ServeOpts) { + plugin.Serve(&plugin.ServeConfig{ + HandshakeConfig: Handshake, + Plugins: pluginMap(opts), + }) +} + +// pluginMap returns the map[string]plugin.Plugin to use for configuring a plugin +// server or client. +func pluginMap(opts *ServeOpts) map[string]plugin.Plugin { + return map[string]plugin.Plugin{ + "provider": &ResourceProviderPlugin{F: opts.ProviderFunc}, + "provisioner": &ResourceProvisionerPlugin{F: opts.ProvisionerFunc}, + } +} diff --git a/vendor/github.com/hashicorp/terraform/plugin/ui_input.go b/vendor/github.com/hashicorp/terraform/plugin/ui_input.go new file mode 100644 index 0000000000..493efc0a91 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/plugin/ui_input.go @@ -0,0 +1,51 @@ +package plugin + +import ( + "net/rpc" + + "github.com/hashicorp/go-plugin" + "github.com/hashicorp/terraform/terraform" +) + +// UIInput is an implementatin of terraform.UIInput that communicates +// over RPC. +type UIInput struct { + Client *rpc.Client +} + +func (i *UIInput) Input(opts *terraform.InputOpts) (string, error) { + var resp UIInputInputResponse + err := i.Client.Call("Plugin.Input", opts, &resp) + if err != nil { + return "", err + } + if resp.Error != nil { + err = resp.Error + return "", err + } + + return resp.Value, nil +} + +type UIInputInputResponse struct { + Value string + Error *plugin.BasicError +} + +// UIInputServer is a net/rpc compatible structure for serving +// a UIInputServer. This should not be used directly. +type UIInputServer struct { + UIInput terraform.UIInput +} + +func (s *UIInputServer) Input( + opts *terraform.InputOpts, + reply *UIInputInputResponse) error { + value, err := s.UIInput.Input(opts) + *reply = UIInputInputResponse{ + Value: value, + Error: plugin.NewBasicError(err), + } + + return nil +} diff --git a/vendor/github.com/hashicorp/terraform/plugin/ui_output.go b/vendor/github.com/hashicorp/terraform/plugin/ui_output.go new file mode 100644 index 0000000000..c222b00cde --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/plugin/ui_output.go @@ -0,0 +1,29 @@ +package plugin + +import ( + "net/rpc" + + "github.com/hashicorp/terraform/terraform" +) + +// UIOutput is an implementatin of terraform.UIOutput that communicates +// over RPC. +type UIOutput struct { + Client *rpc.Client +} + +func (o *UIOutput) Output(v string) { + o.Client.Call("Plugin.Output", v, new(interface{})) +} + +// UIOutputServer is the RPC server for serving UIOutput. +type UIOutputServer struct { + UIOutput terraform.UIOutput +} + +func (s *UIOutputServer) Output( + v string, + reply *interface{}) error { + s.UIOutput.Output(v) + return nil +} diff --git a/vendor/github.com/hashicorp/terraform/terraform/context.go b/vendor/github.com/hashicorp/terraform/terraform/context.go new file mode 100644 index 0000000000..306128edfb --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/terraform/context.go @@ -0,0 +1,1022 @@ +package terraform + +import ( + "context" + "fmt" + "log" + "sort" + "strings" + "sync" + + "github.com/hashicorp/go-multierror" + "github.com/hashicorp/hcl" + "github.com/hashicorp/terraform/config" + "github.com/hashicorp/terraform/config/module" + "github.com/hashicorp/terraform/helper/experiment" +) + +// InputMode defines what sort of input will be asked for when Input +// is called on Context. +type InputMode byte + +const ( + // InputModeVar asks for all variables + InputModeVar InputMode = 1 << iota + + // InputModeVarUnset asks for variables which are not set yet. + // InputModeVar must be set for this to have an effect. + InputModeVarUnset + + // InputModeProvider asks for provider variables + InputModeProvider + + // InputModeStd is the standard operating mode and asks for both variables + // and providers. + InputModeStd = InputModeVar | InputModeProvider +) + +var ( + // contextFailOnShadowError will cause Context operations to return + // errors when shadow operations fail. This is only used for testing. + contextFailOnShadowError = false + + // contextTestDeepCopyOnPlan will perform a Diff DeepCopy on every + // Plan operation, effectively testing the Diff DeepCopy whenever + // a Plan occurs. This is enabled for tests. + contextTestDeepCopyOnPlan = false +) + +// ContextOpts are the user-configurable options to create a context with +// NewContext. +type ContextOpts struct { + Meta *ContextMeta + Destroy bool + Diff *Diff + Hooks []Hook + Module *module.Tree + Parallelism int + State *State + StateFutureAllowed bool + Providers map[string]ResourceProviderFactory + Provisioners map[string]ResourceProvisionerFactory + Shadow bool + Targets []string + Variables map[string]interface{} + + UIInput UIInput +} + +// ContextMeta is metadata about the running context. This is information +// that this package or structure cannot determine on its own but exposes +// into Terraform in various ways. This must be provided by the Context +// initializer. +type ContextMeta struct { + Env string // Env is the state environment +} + +// Context represents all the context that Terraform needs in order to +// perform operations on infrastructure. This structure is built using +// NewContext. See the documentation for that. +// +// Extra functions on Context can be found in context_*.go files. +type Context struct { + // Maintainer note: Anytime this struct is changed, please verify + // that newShadowContext still does the right thing. Tests should + // fail regardless but putting this note here as well. + + components contextComponentFactory + destroy bool + diff *Diff + diffLock sync.RWMutex + hooks []Hook + meta *ContextMeta + module *module.Tree + sh *stopHook + shadow bool + state *State + stateLock sync.RWMutex + targets []string + uiInput UIInput + variables map[string]interface{} + + l sync.Mutex // Lock acquired during any task + parallelSem Semaphore + providerInputConfig map[string]map[string]interface{} + runLock sync.Mutex + runCond *sync.Cond + runContext context.Context + runContextCancel context.CancelFunc + shadowErr error +} + +// NewContext creates a new Context structure. +// +// Once a Context is creator, the pointer values within ContextOpts +// should not be mutated in any way, since the pointers are copied, not +// the values themselves. +func NewContext(opts *ContextOpts) (*Context, error) { + // Validate the version requirement if it is given + if opts.Module != nil { + if err := checkRequiredVersion(opts.Module); err != nil { + return nil, err + } + } + + // Copy all the hooks and add our stop hook. We don't append directly + // to the Config so that we're not modifying that in-place. + sh := new(stopHook) + hooks := make([]Hook, len(opts.Hooks)+1) + copy(hooks, opts.Hooks) + hooks[len(opts.Hooks)] = sh + + state := opts.State + if state == nil { + state = new(State) + state.init() + } + + // If our state is from the future, then error. Callers can avoid + // this error by explicitly setting `StateFutureAllowed`. + if !opts.StateFutureAllowed && state.FromFutureTerraform() { + return nil, fmt.Errorf( + "Terraform doesn't allow running any operations against a state\n"+ + "that was written by a future Terraform version. The state is\n"+ + "reporting it is written by Terraform '%s'.\n\n"+ + "Please run at least that version of Terraform to continue.", + state.TFVersion) + } + + // Explicitly reset our state version to our current version so that + // any operations we do will write out that our latest version + // has run. + state.TFVersion = Version + + // Determine parallelism, default to 10. We do this both to limit + // CPU pressure but also to have an extra guard against rate throttling + // from providers. + par := opts.Parallelism + if par == 0 { + par = 10 + } + + // Set up the variables in the following sequence: + // 0 - Take default values from the configuration + // 1 - Take values from TF_VAR_x environment variables + // 2 - Take values specified in -var flags, overriding values + // set by environment variables if necessary. This includes + // values taken from -var-file in addition. + variables := make(map[string]interface{}) + + if opts.Module != nil { + var err error + variables, err = Variables(opts.Module, opts.Variables) + if err != nil { + return nil, err + } + } + + diff := opts.Diff + if diff == nil { + diff = &Diff{} + } + + return &Context{ + components: &basicComponentFactory{ + providers: opts.Providers, + provisioners: opts.Provisioners, + }, + destroy: opts.Destroy, + diff: diff, + hooks: hooks, + meta: opts.Meta, + module: opts.Module, + shadow: opts.Shadow, + state: state, + targets: opts.Targets, + uiInput: opts.UIInput, + variables: variables, + + parallelSem: NewSemaphore(par), + providerInputConfig: make(map[string]map[string]interface{}), + sh: sh, + }, nil +} + +type ContextGraphOpts struct { + // If true, validates the graph structure (checks for cycles). + Validate bool + + // Legacy graphs only: won't prune the graph + Verbose bool +} + +// Graph returns the graph used for the given operation type. +// +// The most extensive or complex graph type is GraphTypePlan. +func (c *Context) Graph(typ GraphType, opts *ContextGraphOpts) (*Graph, error) { + if opts == nil { + opts = &ContextGraphOpts{Validate: true} + } + + log.Printf("[INFO] terraform: building graph: %s", typ) + switch typ { + case GraphTypeApply: + return (&ApplyGraphBuilder{ + Module: c.module, + Diff: c.diff, + State: c.state, + Providers: c.components.ResourceProviders(), + Provisioners: c.components.ResourceProvisioners(), + Targets: c.targets, + Destroy: c.destroy, + Validate: opts.Validate, + }).Build(RootModulePath) + + case GraphTypeInput: + // The input graph is just a slightly modified plan graph + fallthrough + case GraphTypeValidate: + // The validate graph is just a slightly modified plan graph + fallthrough + case GraphTypePlan: + // Create the plan graph builder + p := &PlanGraphBuilder{ + Module: c.module, + State: c.state, + Providers: c.components.ResourceProviders(), + Targets: c.targets, + Validate: opts.Validate, + } + + // Some special cases for other graph types shared with plan currently + var b GraphBuilder = p + switch typ { + case GraphTypeInput: + b = InputGraphBuilder(p) + case GraphTypeValidate: + // We need to set the provisioners so those can be validated + p.Provisioners = c.components.ResourceProvisioners() + + b = ValidateGraphBuilder(p) + } + + return b.Build(RootModulePath) + + case GraphTypePlanDestroy: + return (&DestroyPlanGraphBuilder{ + Module: c.module, + State: c.state, + Targets: c.targets, + Validate: opts.Validate, + }).Build(RootModulePath) + + case GraphTypeRefresh: + return (&RefreshGraphBuilder{ + Module: c.module, + State: c.state, + Providers: c.components.ResourceProviders(), + Targets: c.targets, + Validate: opts.Validate, + }).Build(RootModulePath) + } + + return nil, fmt.Errorf("unknown graph type: %s", typ) +} + +// ShadowError returns any errors caught during a shadow operation. +// +// A shadow operation is an operation run in parallel to a real operation +// that performs the same tasks using new logic on copied state. The results +// are compared to ensure that the new logic works the same as the old logic. +// The shadow never affects the real operation or return values. +// +// The result of the shadow operation are only available through this function +// call after a real operation is complete. +// +// For API consumers of Context, you can safely ignore this function +// completely if you have no interest in helping report experimental feature +// errors to Terraform maintainers. Otherwise, please call this function +// after every operation and report this to the user. +// +// IMPORTANT: Shadow errors are _never_ critical: they _never_ affect +// the real state or result of a real operation. They are purely informational +// to assist in future Terraform versions being more stable. Please message +// this effectively to the end user. +// +// This must be called only when no other operation is running (refresh, +// plan, etc.). The result can be used in parallel to any other operation +// running. +func (c *Context) ShadowError() error { + return c.shadowErr +} + +// State returns a copy of the current state associated with this context. +// +// This cannot safely be called in parallel with any other Context function. +func (c *Context) State() *State { + return c.state.DeepCopy() +} + +// Interpolater returns an Interpolater built on a copy of the state +// that can be used to test interpolation values. +func (c *Context) Interpolater() *Interpolater { + var varLock sync.Mutex + var stateLock sync.RWMutex + return &Interpolater{ + Operation: walkApply, + Meta: c.meta, + Module: c.module, + State: c.state.DeepCopy(), + StateLock: &stateLock, + VariableValues: c.variables, + VariableValuesLock: &varLock, + } +} + +// Input asks for input to fill variables and provider configurations. +// This modifies the configuration in-place, so asking for Input twice +// may result in different UI output showing different current values. +func (c *Context) Input(mode InputMode) error { + defer c.acquireRun("input")() + + if mode&InputModeVar != 0 { + // Walk the variables first for the root module. We walk them in + // alphabetical order for UX reasons. + rootConf := c.module.Config() + names := make([]string, len(rootConf.Variables)) + m := make(map[string]*config.Variable) + for i, v := range rootConf.Variables { + names[i] = v.Name + m[v.Name] = v + } + sort.Strings(names) + for _, n := range names { + // If we only care about unset variables, then if the variable + // is set, continue on. + if mode&InputModeVarUnset != 0 { + if _, ok := c.variables[n]; ok { + continue + } + } + + var valueType config.VariableType + + v := m[n] + switch valueType = v.Type(); valueType { + case config.VariableTypeUnknown: + continue + case config.VariableTypeMap: + // OK + case config.VariableTypeList: + // OK + case config.VariableTypeString: + // OK + default: + panic(fmt.Sprintf("Unknown variable type: %#v", v.Type())) + } + + // If the variable is not already set, and the variable defines a + // default, use that for the value. + if _, ok := c.variables[n]; !ok { + if v.Default != nil { + c.variables[n] = v.Default.(string) + continue + } + } + + // this should only happen during tests + if c.uiInput == nil { + log.Println("[WARN] Content.uiInput is nil") + continue + } + + // Ask the user for a value for this variable + var value string + retry := 0 + for { + var err error + value, err = c.uiInput.Input(&InputOpts{ + Id: fmt.Sprintf("var.%s", n), + Query: fmt.Sprintf("var.%s", n), + Description: v.Description, + }) + if err != nil { + return fmt.Errorf( + "Error asking for %s: %s", n, err) + } + + if value == "" && v.Required() { + // Redo if it is required, but abort if we keep getting + // blank entries + if retry > 2 { + return fmt.Errorf("missing required value for %q", n) + } + retry++ + continue + } + + break + } + + // no value provided, so don't set the variable at all + if value == "" { + continue + } + + decoded, err := parseVariableAsHCL(n, value, valueType) + if err != nil { + return err + } + + if decoded != nil { + c.variables[n] = decoded + } + } + } + + if mode&InputModeProvider != 0 { + // Build the graph + graph, err := c.Graph(GraphTypeInput, nil) + if err != nil { + return err + } + + // Do the walk + if _, err := c.walk(graph, nil, walkInput); err != nil { + return err + } + } + + return nil +} + +// Apply applies the changes represented by this context and returns +// the resulting state. +// +// Even in the case an error is returned, the state may be returned and will +// potentially be partially updated. In addition to returning the resulting +// state, this context is updated with the latest state. +// +// If the state is required after an error, the caller should call +// Context.State, rather than rely on the return value. +// +// TODO: Apply and Refresh should either always return a state, or rely on the +// State() method. Currently the helper/resource testing framework relies +// on the absence of a returned state to determine if Destroy can be +// called, so that will need to be refactored before this can be changed. +func (c *Context) Apply() (*State, error) { + defer c.acquireRun("apply")() + + // Copy our own state + c.state = c.state.DeepCopy() + + // Build the graph. + graph, err := c.Graph(GraphTypeApply, nil) + if err != nil { + return nil, err + } + + // Determine the operation + operation := walkApply + if c.destroy { + operation = walkDestroy + } + + // Walk the graph + walker, err := c.walk(graph, graph, operation) + if len(walker.ValidationErrors) > 0 { + err = multierror.Append(err, walker.ValidationErrors...) + } + + // Clean out any unused things + c.state.prune() + + return c.state, err +} + +// Plan generates an execution plan for the given context. +// +// The execution plan encapsulates the context and can be stored +// in order to reinstantiate a context later for Apply. +// +// Plan also updates the diff of this context to be the diff generated +// by the plan, so Apply can be called after. +func (c *Context) Plan() (*Plan, error) { + defer c.acquireRun("plan")() + + p := &Plan{ + Module: c.module, + Vars: c.variables, + State: c.state, + Targets: c.targets, + } + + var operation walkOperation + if c.destroy { + operation = walkPlanDestroy + } else { + // Set our state to be something temporary. We do this so that + // the plan can update a fake state so that variables work, then + // we replace it back with our old state. + old := c.state + if old == nil { + c.state = &State{} + c.state.init() + } else { + c.state = old.DeepCopy() + } + defer func() { + c.state = old + }() + + operation = walkPlan + } + + // Setup our diff + c.diffLock.Lock() + c.diff = new(Diff) + c.diff.init() + c.diffLock.Unlock() + + // Build the graph. + graphType := GraphTypePlan + if c.destroy { + graphType = GraphTypePlanDestroy + } + graph, err := c.Graph(graphType, nil) + if err != nil { + return nil, err + } + + // Do the walk + walker, err := c.walk(graph, graph, operation) + if err != nil { + return nil, err + } + p.Diff = c.diff + + // If this is true, it means we're running unit tests. In this case, + // we perform a deep copy just to ensure that all context tests also + // test that a diff is copy-able. This will panic if it fails. This + // is enabled during unit tests. + // + // This should never be true during production usage, but even if it is, + // it can't do any real harm. + if contextTestDeepCopyOnPlan { + p.Diff.DeepCopy() + } + + /* + // We don't do the reverification during the new destroy plan because + // it will use a different apply process. + if X_legacyGraph { + // Now that we have a diff, we can build the exact graph that Apply will use + // and catch any possible cycles during the Plan phase. + if _, err := c.Graph(GraphTypeLegacy, nil); err != nil { + return nil, err + } + } + */ + + var errs error + if len(walker.ValidationErrors) > 0 { + errs = multierror.Append(errs, walker.ValidationErrors...) + } + return p, errs +} + +// Refresh goes through all the resources in the state and refreshes them +// to their latest state. This will update the state that this context +// works with, along with returning it. +// +// Even in the case an error is returned, the state may be returned and +// will potentially be partially updated. +func (c *Context) Refresh() (*State, error) { + defer c.acquireRun("refresh")() + + // Copy our own state + c.state = c.state.DeepCopy() + + // Build the graph. + graph, err := c.Graph(GraphTypeRefresh, nil) + if err != nil { + return nil, err + } + + // Do the walk + if _, err := c.walk(graph, graph, walkRefresh); err != nil { + return nil, err + } + + // Clean out any unused things + c.state.prune() + + return c.state, nil +} + +// Stop stops the running task. +// +// Stop will block until the task completes. +func (c *Context) Stop() { + log.Printf("[WARN] terraform: Stop called, initiating interrupt sequence") + + c.l.Lock() + defer c.l.Unlock() + + // If we're running, then stop + if c.runContextCancel != nil { + log.Printf("[WARN] terraform: run context exists, stopping") + + // Tell the hook we want to stop + c.sh.Stop() + + // Stop the context + c.runContextCancel() + c.runContextCancel = nil + } + + // Grab the condition var before we exit + if cond := c.runCond; cond != nil { + cond.Wait() + } + + log.Printf("[WARN] terraform: stop complete") +} + +// Validate validates the configuration and returns any warnings or errors. +func (c *Context) Validate() ([]string, []error) { + defer c.acquireRun("validate")() + + var errs error + + // Validate the configuration itself + if err := c.module.Validate(); err != nil { + errs = multierror.Append(errs, err) + } + + // This only needs to be done for the root module, since inter-module + // variables are validated in the module tree. + if config := c.module.Config(); config != nil { + // Validate the user variables + if err := smcUserVariables(config, c.variables); len(err) > 0 { + errs = multierror.Append(errs, err...) + } + } + + // If we have errors at this point, the graphing has no chance, + // so just bail early. + if errs != nil { + return nil, []error{errs} + } + + // Build the graph so we can walk it and run Validate on nodes. + // We also validate the graph generated here, but this graph doesn't + // necessarily match the graph that Plan will generate, so we'll validate the + // graph again later after Planning. + graph, err := c.Graph(GraphTypeValidate, nil) + if err != nil { + return nil, []error{err} + } + + // Walk + walker, err := c.walk(graph, graph, walkValidate) + if err != nil { + return nil, multierror.Append(errs, err).Errors + } + + // Return the result + rerrs := multierror.Append(errs, walker.ValidationErrors...) + + sort.Strings(walker.ValidationWarnings) + sort.Slice(rerrs.Errors, func(i, j int) bool { + return rerrs.Errors[i].Error() < rerrs.Errors[j].Error() + }) + + return walker.ValidationWarnings, rerrs.Errors +} + +// Module returns the module tree associated with this context. +func (c *Context) Module() *module.Tree { + return c.module +} + +// Variables will return the mapping of variables that were defined +// for this Context. If Input was called, this mapping may be different +// than what was given. +func (c *Context) Variables() map[string]interface{} { + return c.variables +} + +// SetVariable sets a variable after a context has already been built. +func (c *Context) SetVariable(k string, v interface{}) { + c.variables[k] = v +} + +func (c *Context) acquireRun(phase string) func() { + // With the run lock held, grab the context lock to make changes + // to the run context. + c.l.Lock() + defer c.l.Unlock() + + // Wait until we're no longer running + for c.runCond != nil { + c.runCond.Wait() + } + + // Build our lock + c.runCond = sync.NewCond(&c.l) + + // Setup debugging + dbug.SetPhase(phase) + + // Create a new run context + c.runContext, c.runContextCancel = context.WithCancel(context.Background()) + + // Reset the stop hook so we're not stopped + c.sh.Reset() + + // Reset the shadow errors + c.shadowErr = nil + + return c.releaseRun +} + +func (c *Context) releaseRun() { + // Grab the context lock so that we can make modifications to fields + c.l.Lock() + defer c.l.Unlock() + + // setting the phase to "INVALID" lets us easily detect if we have + // operations happening outside of a run, or we missed setting the proper + // phase + dbug.SetPhase("INVALID") + + // End our run. We check if runContext is non-nil because it can be + // set to nil if it was cancelled via Stop() + if c.runContextCancel != nil { + c.runContextCancel() + } + + // Unlock all waiting our condition + cond := c.runCond + c.runCond = nil + cond.Broadcast() + + // Unset the context + c.runContext = nil +} + +func (c *Context) walk( + graph, shadow *Graph, operation walkOperation) (*ContextGraphWalker, error) { + // Keep track of the "real" context which is the context that does + // the real work: talking to real providers, modifying real state, etc. + realCtx := c + + // If we don't want shadowing, remove it + if !experiment.Enabled(experiment.X_shadow) { + shadow = nil + } + + // Just log this so we can see it in a debug log + if !c.shadow { + log.Printf("[WARN] terraform: shadow graph disabled") + shadow = nil + } + + // If we have a shadow graph, walk that as well + var shadowCtx *Context + var shadowCloser Shadow + if shadow != nil { + // Build the shadow context. In the process, override the real context + // with the one that is wrapped so that the shadow context can verify + // the results of the real. + realCtx, shadowCtx, shadowCloser = newShadowContext(c) + } + + log.Printf("[DEBUG] Starting graph walk: %s", operation.String()) + + walker := &ContextGraphWalker{ + Context: realCtx, + Operation: operation, + StopContext: c.runContext, + } + + // Watch for a stop so we can call the provider Stop() API. + watchStop, watchWait := c.watchStop(walker) + + // Walk the real graph, this will block until it completes + realErr := graph.Walk(walker) + + // Close the channel so the watcher stops, and wait for it to return. + close(watchStop) + <-watchWait + + // If we have a shadow graph and we interrupted the real graph, then + // we just close the shadow and never verify it. It is non-trivial to + // recreate the exact execution state up until an interruption so this + // isn't supported with shadows at the moment. + if shadowCloser != nil && c.sh.Stopped() { + // Ignore the error result, there is nothing we could care about + shadowCloser.CloseShadow() + + // Set it to nil so we don't do anything + shadowCloser = nil + } + + // If we have a shadow graph, wait for that to complete. + if shadowCloser != nil { + // Build the graph walker for the shadow. We also wrap this in + // a panicwrap so that panics are captured. For the shadow graph, + // we just want panics to be normal errors rather than to crash + // Terraform. + shadowWalker := GraphWalkerPanicwrap(&ContextGraphWalker{ + Context: shadowCtx, + Operation: operation, + }) + + // Kick off the shadow walk. This will block on any operations + // on the real walk so it is fine to start first. + log.Printf("[INFO] Starting shadow graph walk: %s", operation.String()) + shadowCh := make(chan error) + go func() { + shadowCh <- shadow.Walk(shadowWalker) + }() + + // Notify the shadow that we're done + if err := shadowCloser.CloseShadow(); err != nil { + c.shadowErr = multierror.Append(c.shadowErr, err) + } + + // Wait for the walk to end + log.Printf("[DEBUG] Waiting for shadow graph to complete...") + shadowWalkErr := <-shadowCh + + // Get any shadow errors + if err := shadowCloser.ShadowError(); err != nil { + c.shadowErr = multierror.Append(c.shadowErr, err) + } + + // Verify the contexts (compare) + if err := shadowContextVerify(realCtx, shadowCtx); err != nil { + c.shadowErr = multierror.Append(c.shadowErr, err) + } + + // At this point, if we're supposed to fail on error, then + // we PANIC. Some tests just verify that there is an error, + // so simply appending it to realErr and returning could hide + // shadow problems. + // + // This must be done BEFORE appending shadowWalkErr since the + // shadowWalkErr may include expected errors. + // + // We only do this if we don't have a real error. In the case of + // a real error, we can't guarantee what nodes were and weren't + // traversed in parallel scenarios so we can't guarantee no + // shadow errors. + if c.shadowErr != nil && contextFailOnShadowError && realErr == nil { + panic(multierror.Prefix(c.shadowErr, "shadow graph:")) + } + + // Now, if we have a walk error, we append that through + if shadowWalkErr != nil { + c.shadowErr = multierror.Append(c.shadowErr, shadowWalkErr) + } + + if c.shadowErr == nil { + log.Printf("[INFO] Shadow graph success!") + } else { + log.Printf("[ERROR] Shadow graph error: %s", c.shadowErr) + + // If we're supposed to fail on shadow errors, then report it + if contextFailOnShadowError { + realErr = multierror.Append(realErr, multierror.Prefix( + c.shadowErr, "shadow graph:")) + } + } + } + + return walker, realErr +} + +// watchStop immediately returns a `stop` and a `wait` chan after dispatching +// the watchStop goroutine. This will watch the runContext for cancellation and +// stop the providers accordingly. When the watch is no longer needed, the +// `stop` chan should be closed before waiting on the `wait` chan. +// The `wait` chan is important, because without synchronizing with the end of +// the watchStop goroutine, the runContext may also be closed during the select +// incorrectly causing providers to be stopped. Even if the graph walk is done +// at that point, stopping a provider permanently cancels its StopContext which +// can cause later actions to fail. +func (c *Context) watchStop(walker *ContextGraphWalker) (chan struct{}, <-chan struct{}) { + stop := make(chan struct{}) + wait := make(chan struct{}) + + // get the runContext cancellation channel now, because releaseRun will + // write to the runContext field. + done := c.runContext.Done() + + go func() { + defer close(wait) + // Wait for a stop or completion + select { + case <-done: + // done means the context was canceled, so we need to try and stop + // providers. + case <-stop: + // our own stop channel was closed. + return + } + + // If we're here, we're stopped, trigger the call. + + { + // Copy the providers so that a misbehaved blocking Stop doesn't + // completely hang Terraform. + walker.providerLock.Lock() + ps := make([]ResourceProvider, 0, len(walker.providerCache)) + for _, p := range walker.providerCache { + ps = append(ps, p) + } + defer walker.providerLock.Unlock() + + for _, p := range ps { + // We ignore the error for now since there isn't any reasonable + // action to take if there is an error here, since the stop is still + // advisory: Terraform will exit once the graph node completes. + p.Stop() + } + } + + { + // Call stop on all the provisioners + walker.provisionerLock.Lock() + ps := make([]ResourceProvisioner, 0, len(walker.provisionerCache)) + for _, p := range walker.provisionerCache { + ps = append(ps, p) + } + defer walker.provisionerLock.Unlock() + + for _, p := range ps { + // We ignore the error for now since there isn't any reasonable + // action to take if there is an error here, since the stop is still + // advisory: Terraform will exit once the graph node completes. + p.Stop() + } + } + }() + + return stop, wait +} + +// parseVariableAsHCL parses the value of a single variable as would have been specified +// on the command line via -var or in an environment variable named TF_VAR_x, where x is +// the name of the variable. In order to get around the restriction of HCL requiring a +// top level object, we prepend a sentinel key, decode the user-specified value as its +// value and pull the value back out of the resulting map. +func parseVariableAsHCL(name string, input string, targetType config.VariableType) (interface{}, error) { + // expecting a string so don't decode anything, just strip quotes + if targetType == config.VariableTypeString { + return strings.Trim(input, `"`), nil + } + + // return empty types + if strings.TrimSpace(input) == "" { + switch targetType { + case config.VariableTypeList: + return []interface{}{}, nil + case config.VariableTypeMap: + return make(map[string]interface{}), nil + } + } + + const sentinelValue = "SENTINEL_TERRAFORM_VAR_OVERRIDE_KEY" + inputWithSentinal := fmt.Sprintf("%s = %s", sentinelValue, input) + + var decoded map[string]interface{} + err := hcl.Decode(&decoded, inputWithSentinal) + if err != nil { + return nil, fmt.Errorf("Cannot parse value for variable %s (%q) as valid HCL: %s", name, input, err) + } + + if len(decoded) != 1 { + return nil, fmt.Errorf("Cannot parse value for variable %s (%q) as valid HCL. Only one value may be specified.", name, input) + } + + parsedValue, ok := decoded[sentinelValue] + if !ok { + return nil, fmt.Errorf("Cannot parse value for variable %s (%q) as valid HCL. One value must be specified.", name, input) + } + + switch targetType { + case config.VariableTypeList: + return parsedValue, nil + case config.VariableTypeMap: + if list, ok := parsedValue.([]map[string]interface{}); ok { + return list[0], nil + } + + return nil, fmt.Errorf("Cannot parse value for variable %s (%q) as valid HCL. One value must be specified.", name, input) + default: + panic(fmt.Errorf("unknown type %s", targetType.Printable())) + } +} diff --git a/vendor/github.com/hashicorp/terraform/terraform/context_components.go b/vendor/github.com/hashicorp/terraform/terraform/context_components.go new file mode 100644 index 0000000000..6f507445c3 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/terraform/context_components.go @@ -0,0 +1,65 @@ +package terraform + +import ( + "fmt" +) + +// contextComponentFactory is the interface that Context uses +// to initialize various components such as providers and provisioners. +// This factory gets more information than the raw maps using to initialize +// a Context. This information is used for debugging. +type contextComponentFactory interface { + // ResourceProvider creates a new ResourceProvider with the given + // type. The "uid" is a unique identifier for this provider being + // initialized that can be used for internal tracking. + ResourceProvider(typ, uid string) (ResourceProvider, error) + ResourceProviders() []string + + // ResourceProvisioner creates a new ResourceProvisioner with the + // given type. The "uid" is a unique identifier for this provisioner + // being initialized that can be used for internal tracking. + ResourceProvisioner(typ, uid string) (ResourceProvisioner, error) + ResourceProvisioners() []string +} + +// basicComponentFactory just calls a factory from a map directly. +type basicComponentFactory struct { + providers map[string]ResourceProviderFactory + provisioners map[string]ResourceProvisionerFactory +} + +func (c *basicComponentFactory) ResourceProviders() []string { + result := make([]string, len(c.providers)) + for k, _ := range c.providers { + result = append(result, k) + } + + return result +} + +func (c *basicComponentFactory) ResourceProvisioners() []string { + result := make([]string, len(c.provisioners)) + for k, _ := range c.provisioners { + result = append(result, k) + } + + return result +} + +func (c *basicComponentFactory) ResourceProvider(typ, uid string) (ResourceProvider, error) { + f, ok := c.providers[typ] + if !ok { + return nil, fmt.Errorf("unknown provider %q", typ) + } + + return f() +} + +func (c *basicComponentFactory) ResourceProvisioner(typ, uid string) (ResourceProvisioner, error) { + f, ok := c.provisioners[typ] + if !ok { + return nil, fmt.Errorf("unknown provisioner %q", typ) + } + + return f() +} diff --git a/vendor/github.com/hashicorp/terraform/terraform/context_graph_type.go b/vendor/github.com/hashicorp/terraform/terraform/context_graph_type.go new file mode 100644 index 0000000000..084f0105dd --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/terraform/context_graph_type.go @@ -0,0 +1,32 @@ +package terraform + +//go:generate stringer -type=GraphType context_graph_type.go + +// GraphType is an enum of the type of graph to create with a Context. +// The values of the constants may change so they shouldn't be depended on; +// always use the constant name. +type GraphType byte + +const ( + GraphTypeInvalid GraphType = 0 + GraphTypeLegacy GraphType = iota + GraphTypeRefresh + GraphTypePlan + GraphTypePlanDestroy + GraphTypeApply + GraphTypeInput + GraphTypeValidate +) + +// GraphTypeMap is a mapping of human-readable string to GraphType. This +// is useful to use as the mechanism for human input for configurable +// graph types. +var GraphTypeMap = map[string]GraphType{ + "apply": GraphTypeApply, + "input": GraphTypeInput, + "plan": GraphTypePlan, + "plan-destroy": GraphTypePlanDestroy, + "refresh": GraphTypeRefresh, + "legacy": GraphTypeLegacy, + "validate": GraphTypeValidate, +} diff --git a/vendor/github.com/hashicorp/terraform/terraform/context_import.go b/vendor/github.com/hashicorp/terraform/terraform/context_import.go new file mode 100644 index 0000000000..f1d57760df --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/terraform/context_import.go @@ -0,0 +1,77 @@ +package terraform + +import ( + "github.com/hashicorp/terraform/config/module" +) + +// ImportOpts are used as the configuration for Import. +type ImportOpts struct { + // Targets are the targets to import + Targets []*ImportTarget + + // Module is optional, and specifies a config module that is loaded + // into the graph and evaluated. The use case for this is to provide + // provider configuration. + Module *module.Tree +} + +// ImportTarget is a single resource to import. +type ImportTarget struct { + // Addr is the full resource address of the resource to import. + // Example: "module.foo.aws_instance.bar" + Addr string + + // ID is the ID of the resource to import. This is resource-specific. + ID string + + // Provider string + Provider string +} + +// Import takes already-created external resources and brings them +// under Terraform management. Import requires the exact type, name, and ID +// of the resources to import. +// +// This operation is idempotent. If the requested resource is already +// imported, no changes are made to the state. +// +// Further, this operation also gracefully handles partial state. If during +// an import there is a failure, all previously imported resources remain +// imported. +func (c *Context) Import(opts *ImportOpts) (*State, error) { + // Hold a lock since we can modify our own state here + defer c.acquireRun("import")() + + // Copy our own state + c.state = c.state.DeepCopy() + + // If no module is given, default to the module configured with + // the Context. + module := opts.Module + if module == nil { + module = c.module + } + + // Initialize our graph builder + builder := &ImportGraphBuilder{ + ImportTargets: opts.Targets, + Module: module, + Providers: c.components.ResourceProviders(), + } + + // Build the graph! + graph, err := builder.Build(RootModulePath) + if err != nil { + return c.state, err + } + + // Walk it + if _, err := c.walk(graph, nil, walkImport); err != nil { + return c.state, err + } + + // Clean the state + c.state.prune() + + return c.state, nil +} diff --git a/vendor/github.com/hashicorp/terraform/terraform/debug.go b/vendor/github.com/hashicorp/terraform/terraform/debug.go new file mode 100644 index 0000000000..265339f636 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/terraform/debug.go @@ -0,0 +1,523 @@ +package terraform + +import ( + "archive/tar" + "bytes" + "compress/gzip" + "encoding/json" + "fmt" + "io" + "os" + "path/filepath" + "sync" + "time" +) + +// DebugInfo is the global handler for writing the debug archive. All methods +// are safe to call concurrently. Setting DebugInfo to nil will disable writing +// the debug archive. All methods are safe to call on the nil value. +var dbug *debugInfo + +// SetDebugInfo initializes the debug handler with a backing file in the +// provided directory. This must be called before any other terraform package +// operations or not at all. Once his is called, CloseDebugInfo should be +// called before program exit. +func SetDebugInfo(path string) error { + if os.Getenv("TF_DEBUG") == "" { + return nil + } + + di, err := newDebugInfoFile(path) + if err != nil { + return err + } + + dbug = di + return nil +} + +// CloseDebugInfo is the exported interface to Close the debug info handler. +// The debug handler needs to be closed before program exit, so we export this +// function to be deferred in the appropriate entrypoint for our executable. +func CloseDebugInfo() error { + return dbug.Close() +} + +// newDebugInfoFile initializes the global debug handler with a backing file in +// the provided directory. +func newDebugInfoFile(dir string) (*debugInfo, error) { + err := os.MkdirAll(dir, 0755) + if err != nil { + return nil, err + } + + // FIXME: not guaranteed unique, but good enough for now + name := fmt.Sprintf("debug-%s", time.Now().Format("2006-01-02-15-04-05.999999999")) + archivePath := filepath.Join(dir, name+".tar.gz") + + f, err := os.OpenFile(archivePath, os.O_RDWR|os.O_CREATE|os.O_EXCL, 0666) + if err != nil { + return nil, err + } + return newDebugInfo(name, f) +} + +// newDebugInfo initializes the global debug handler. +func newDebugInfo(name string, w io.Writer) (*debugInfo, error) { + gz := gzip.NewWriter(w) + + d := &debugInfo{ + name: name, + w: w, + gz: gz, + tar: tar.NewWriter(gz), + } + + // create the subdirs we need + topHdr := &tar.Header{ + Name: name, + Typeflag: tar.TypeDir, + Mode: 0755, + } + graphsHdr := &tar.Header{ + Name: name + "/graphs", + Typeflag: tar.TypeDir, + Mode: 0755, + } + err := d.tar.WriteHeader(topHdr) + // if the first errors, the second will too + err = d.tar.WriteHeader(graphsHdr) + if err != nil { + return nil, err + } + + return d, nil +} + +// debugInfo provides various methods for writing debug information to a +// central archive. The debugInfo struct should be initialized once before any +// output is written, and Close should be called before program exit. All +// exported methods on debugInfo will be safe for concurrent use. The exported +// methods are also all safe to call on a nil pointer, so that there is no need +// for conditional blocks before writing debug information. +// +// Each write operation done by the debugInfo will flush the gzip.Writer and +// tar.Writer, and call Sync() or Flush() on the output writer as needed. This +// ensures that as much data as possible is written to storage in the event of +// a crash. The append format of the tar file, and the stream format of the +// gzip writer allow easy recovery f the data in the event that the debugInfo +// is not closed before program exit. +type debugInfo struct { + sync.Mutex + + // archive root directory name + name string + + // current operation phase + phase string + + // step is monotonic counter for for recording the order of operations + step int + + // flag to protect Close() + closed bool + + // the debug log output is in a tar.gz format, written to the io.Writer w + w io.Writer + gz *gzip.Writer + tar *tar.Writer +} + +// Set the name of the current operational phase in the debug handler. Each file +// in the archive will contain the name of the phase in which it was created, +// i.e. "input", "apply", "plan", "refresh", "validate" +func (d *debugInfo) SetPhase(phase string) { + if d == nil { + return + } + d.Lock() + defer d.Unlock() + + d.phase = phase +} + +// Close the debugInfo, finalizing the data in storage. This closes the +// tar.Writer, the gzip.Wrtier, and if the output writer is an io.Closer, it is +// also closed. +func (d *debugInfo) Close() error { + if d == nil { + return nil + } + + d.Lock() + defer d.Unlock() + + if d.closed { + return nil + } + d.closed = true + + d.tar.Close() + d.gz.Close() + + if c, ok := d.w.(io.Closer); ok { + return c.Close() + } + return nil +} + +// debug buffer is an io.WriteCloser that will write itself to the debug +// archive when closed. +type debugBuffer struct { + debugInfo *debugInfo + name string + buf bytes.Buffer +} + +func (b *debugBuffer) Write(d []byte) (int, error) { + return b.buf.Write(d) +} + +func (b *debugBuffer) Close() error { + return b.debugInfo.WriteFile(b.name, b.buf.Bytes()) +} + +// ioutils only has a noop ReadCloser +type nopWriteCloser struct{} + +func (nopWriteCloser) Write([]byte) (int, error) { return 0, nil } +func (nopWriteCloser) Close() error { return nil } + +// NewFileWriter returns an io.WriteClose that will be buffered and written to +// the debug archive when closed. +func (d *debugInfo) NewFileWriter(name string) io.WriteCloser { + if d == nil { + return nopWriteCloser{} + } + + return &debugBuffer{ + debugInfo: d, + name: name, + } +} + +type syncer interface { + Sync() error +} + +type flusher interface { + Flush() error +} + +// Flush the tar.Writer and the gzip.Writer. Flush() or Sync() will be called +// on the output writer if they are available. +func (d *debugInfo) flush() { + d.tar.Flush() + d.gz.Flush() + + if f, ok := d.w.(flusher); ok { + f.Flush() + } + + if s, ok := d.w.(syncer); ok { + s.Sync() + } +} + +// WriteFile writes data as a single file to the debug arhive. +func (d *debugInfo) WriteFile(name string, data []byte) error { + if d == nil { + return nil + } + + d.Lock() + defer d.Unlock() + return d.writeFile(name, data) +} + +func (d *debugInfo) writeFile(name string, data []byte) error { + defer d.flush() + path := fmt.Sprintf("%s/%d-%s-%s", d.name, d.step, d.phase, name) + d.step++ + + hdr := &tar.Header{ + Name: path, + Mode: 0644, + Size: int64(len(data)), + } + err := d.tar.WriteHeader(hdr) + if err != nil { + return err + } + + _, err = d.tar.Write(data) + return err +} + +// DebugHook implements all methods of the terraform.Hook interface, and writes +// the arguments to a file in the archive. When a suitable format for the +// argument isn't available, the argument is encoded using json.Marshal. If the +// debug handler is nil, all DebugHook methods are noop, so no time is spent in +// marshaling the data structures. +type DebugHook struct{} + +func (*DebugHook) PreApply(ii *InstanceInfo, is *InstanceState, id *InstanceDiff) (HookAction, error) { + if dbug == nil { + return HookActionContinue, nil + } + + var buf bytes.Buffer + + if ii != nil { + buf.WriteString(ii.HumanId() + "\n") + } + + if is != nil { + buf.WriteString(is.String() + "\n") + } + + idCopy, err := id.Copy() + if err != nil { + return HookActionContinue, err + } + js, err := json.MarshalIndent(idCopy, "", " ") + if err != nil { + return HookActionContinue, err + } + buf.Write(js) + + dbug.WriteFile("hook-PreApply", buf.Bytes()) + + return HookActionContinue, nil +} + +func (*DebugHook) PostApply(ii *InstanceInfo, is *InstanceState, err error) (HookAction, error) { + if dbug == nil { + return HookActionContinue, nil + } + + var buf bytes.Buffer + + if ii != nil { + buf.WriteString(ii.HumanId() + "\n") + } + + if is != nil { + buf.WriteString(is.String() + "\n") + } + + if err != nil { + buf.WriteString(err.Error()) + } + + dbug.WriteFile("hook-PostApply", buf.Bytes()) + + return HookActionContinue, nil +} + +func (*DebugHook) PreDiff(ii *InstanceInfo, is *InstanceState) (HookAction, error) { + if dbug == nil { + return HookActionContinue, nil + } + + var buf bytes.Buffer + if ii != nil { + buf.WriteString(ii.HumanId() + "\n") + } + + if is != nil { + buf.WriteString(is.String()) + buf.WriteString("\n") + } + dbug.WriteFile("hook-PreDiff", buf.Bytes()) + + return HookActionContinue, nil +} + +func (*DebugHook) PostDiff(ii *InstanceInfo, id *InstanceDiff) (HookAction, error) { + if dbug == nil { + return HookActionContinue, nil + } + + var buf bytes.Buffer + if ii != nil { + buf.WriteString(ii.HumanId() + "\n") + } + + idCopy, err := id.Copy() + if err != nil { + return HookActionContinue, err + } + js, err := json.MarshalIndent(idCopy, "", " ") + if err != nil { + return HookActionContinue, err + } + buf.Write(js) + + dbug.WriteFile("hook-PostDiff", buf.Bytes()) + + return HookActionContinue, nil +} + +func (*DebugHook) PreProvisionResource(ii *InstanceInfo, is *InstanceState) (HookAction, error) { + if dbug == nil { + return HookActionContinue, nil + } + + var buf bytes.Buffer + if ii != nil { + buf.WriteString(ii.HumanId() + "\n") + } + + if is != nil { + buf.WriteString(is.String()) + buf.WriteString("\n") + } + dbug.WriteFile("hook-PreProvisionResource", buf.Bytes()) + + return HookActionContinue, nil +} + +func (*DebugHook) PostProvisionResource(ii *InstanceInfo, is *InstanceState) (HookAction, error) { + if dbug == nil { + return HookActionContinue, nil + } + + var buf bytes.Buffer + if ii != nil { + buf.WriteString(ii.HumanId()) + buf.WriteString("\n") + } + + if is != nil { + buf.WriteString(is.String()) + buf.WriteString("\n") + } + dbug.WriteFile("hook-PostProvisionResource", buf.Bytes()) + return HookActionContinue, nil +} + +func (*DebugHook) PreProvision(ii *InstanceInfo, s string) (HookAction, error) { + if dbug == nil { + return HookActionContinue, nil + } + + var buf bytes.Buffer + if ii != nil { + buf.WriteString(ii.HumanId()) + buf.WriteString("\n") + } + buf.WriteString(s + "\n") + + dbug.WriteFile("hook-PreProvision", buf.Bytes()) + return HookActionContinue, nil +} + +func (*DebugHook) PostProvision(ii *InstanceInfo, s string, err error) (HookAction, error) { + if dbug == nil { + return HookActionContinue, nil + } + + var buf bytes.Buffer + if ii != nil { + buf.WriteString(ii.HumanId() + "\n") + } + buf.WriteString(s + "\n") + + dbug.WriteFile("hook-PostProvision", buf.Bytes()) + return HookActionContinue, nil +} + +func (*DebugHook) ProvisionOutput(ii *InstanceInfo, s1 string, s2 string) { + if dbug == nil { + return + } + + var buf bytes.Buffer + if ii != nil { + buf.WriteString(ii.HumanId()) + buf.WriteString("\n") + } + buf.WriteString(s1 + "\n") + buf.WriteString(s2 + "\n") + + dbug.WriteFile("hook-ProvisionOutput", buf.Bytes()) +} + +func (*DebugHook) PreRefresh(ii *InstanceInfo, is *InstanceState) (HookAction, error) { + if dbug == nil { + return HookActionContinue, nil + } + + var buf bytes.Buffer + if ii != nil { + buf.WriteString(ii.HumanId() + "\n") + } + + if is != nil { + buf.WriteString(is.String()) + buf.WriteString("\n") + } + dbug.WriteFile("hook-PreRefresh", buf.Bytes()) + return HookActionContinue, nil +} + +func (*DebugHook) PostRefresh(ii *InstanceInfo, is *InstanceState) (HookAction, error) { + if dbug == nil { + return HookActionContinue, nil + } + + var buf bytes.Buffer + if ii != nil { + buf.WriteString(ii.HumanId()) + buf.WriteString("\n") + } + + if is != nil { + buf.WriteString(is.String()) + buf.WriteString("\n") + } + dbug.WriteFile("hook-PostRefresh", buf.Bytes()) + return HookActionContinue, nil +} + +func (*DebugHook) PreImportState(ii *InstanceInfo, s string) (HookAction, error) { + if dbug == nil { + return HookActionContinue, nil + } + + var buf bytes.Buffer + if ii != nil { + buf.WriteString(ii.HumanId()) + buf.WriteString("\n") + } + buf.WriteString(s + "\n") + + dbug.WriteFile("hook-PreImportState", buf.Bytes()) + return HookActionContinue, nil +} + +func (*DebugHook) PostImportState(ii *InstanceInfo, iss []*InstanceState) (HookAction, error) { + if dbug == nil { + return HookActionContinue, nil + } + + var buf bytes.Buffer + + if ii != nil { + buf.WriteString(ii.HumanId() + "\n") + } + + for _, is := range iss { + if is != nil { + buf.WriteString(is.String() + "\n") + } + } + dbug.WriteFile("hook-PostImportState", buf.Bytes()) + return HookActionContinue, nil +} + +// skip logging this for now, since it could be huge +func (*DebugHook) PostStateUpdate(*State) (HookAction, error) { + return HookActionContinue, nil +} diff --git a/vendor/github.com/hashicorp/terraform/terraform/diff.go b/vendor/github.com/hashicorp/terraform/terraform/diff.go new file mode 100644 index 0000000000..a9fae6c2c8 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/terraform/diff.go @@ -0,0 +1,866 @@ +package terraform + +import ( + "bufio" + "bytes" + "fmt" + "reflect" + "regexp" + "sort" + "strings" + "sync" + + "github.com/mitchellh/copystructure" +) + +// DiffChangeType is an enum with the kind of changes a diff has planned. +type DiffChangeType byte + +const ( + DiffInvalid DiffChangeType = iota + DiffNone + DiffCreate + DiffUpdate + DiffDestroy + DiffDestroyCreate +) + +// multiVal matches the index key to a flatmapped set, list or map +var multiVal = regexp.MustCompile(`\.(#|%)$`) + +// Diff trackes the changes that are necessary to apply a configuration +// to an existing infrastructure. +type Diff struct { + // Modules contains all the modules that have a diff + Modules []*ModuleDiff +} + +// Prune cleans out unused structures in the diff without affecting +// the behavior of the diff at all. +// +// This is not safe to call concurrently. This is safe to call on a +// nil Diff. +func (d *Diff) Prune() { + if d == nil { + return + } + + // Prune all empty modules + newModules := make([]*ModuleDiff, 0, len(d.Modules)) + for _, m := range d.Modules { + // If the module isn't empty, we keep it + if !m.Empty() { + newModules = append(newModules, m) + } + } + if len(newModules) == 0 { + newModules = nil + } + d.Modules = newModules +} + +// AddModule adds the module with the given path to the diff. +// +// This should be the preferred method to add module diffs since it +// allows us to optimize lookups later as well as control sorting. +func (d *Diff) AddModule(path []string) *ModuleDiff { + m := &ModuleDiff{Path: path} + m.init() + d.Modules = append(d.Modules, m) + return m +} + +// ModuleByPath is used to lookup the module diff for the given path. +// This should be the preferred lookup mechanism as it allows for future +// lookup optimizations. +func (d *Diff) ModuleByPath(path []string) *ModuleDiff { + if d == nil { + return nil + } + for _, mod := range d.Modules { + if mod.Path == nil { + panic("missing module path") + } + if reflect.DeepEqual(mod.Path, path) { + return mod + } + } + return nil +} + +// RootModule returns the ModuleState for the root module +func (d *Diff) RootModule() *ModuleDiff { + root := d.ModuleByPath(rootModulePath) + if root == nil { + panic("missing root module") + } + return root +} + +// Empty returns true if the diff has no changes. +func (d *Diff) Empty() bool { + if d == nil { + return true + } + + for _, m := range d.Modules { + if !m.Empty() { + return false + } + } + + return true +} + +// Equal compares two diffs for exact equality. +// +// This is different from the Same comparison that is supported which +// checks for operation equality taking into account computed values. Equal +// instead checks for exact equality. +func (d *Diff) Equal(d2 *Diff) bool { + // If one is nil, they must both be nil + if d == nil || d2 == nil { + return d == d2 + } + + // Sort the modules + sort.Sort(moduleDiffSort(d.Modules)) + sort.Sort(moduleDiffSort(d2.Modules)) + + // Copy since we have to modify the module destroy flag to false so + // we don't compare that. TODO: delete this when we get rid of the + // destroy flag on modules. + dCopy := d.DeepCopy() + d2Copy := d2.DeepCopy() + for _, m := range dCopy.Modules { + m.Destroy = false + } + for _, m := range d2Copy.Modules { + m.Destroy = false + } + + // Use DeepEqual + return reflect.DeepEqual(dCopy, d2Copy) +} + +// DeepCopy performs a deep copy of all parts of the Diff, making the +// resulting Diff safe to use without modifying this one. +func (d *Diff) DeepCopy() *Diff { + copy, err := copystructure.Config{Lock: true}.Copy(d) + if err != nil { + panic(err) + } + + return copy.(*Diff) +} + +func (d *Diff) String() string { + var buf bytes.Buffer + + keys := make([]string, 0, len(d.Modules)) + lookup := make(map[string]*ModuleDiff) + for _, m := range d.Modules { + key := fmt.Sprintf("module.%s", strings.Join(m.Path[1:], ".")) + keys = append(keys, key) + lookup[key] = m + } + sort.Strings(keys) + + for _, key := range keys { + m := lookup[key] + mStr := m.String() + + // If we're the root module, we just write the output directly. + if reflect.DeepEqual(m.Path, rootModulePath) { + buf.WriteString(mStr + "\n") + continue + } + + buf.WriteString(fmt.Sprintf("%s:\n", key)) + + s := bufio.NewScanner(strings.NewReader(mStr)) + for s.Scan() { + buf.WriteString(fmt.Sprintf(" %s\n", s.Text())) + } + } + + return strings.TrimSpace(buf.String()) +} + +func (d *Diff) init() { + if d.Modules == nil { + rootDiff := &ModuleDiff{Path: rootModulePath} + d.Modules = []*ModuleDiff{rootDiff} + } + for _, m := range d.Modules { + m.init() + } +} + +// ModuleDiff tracks the differences between resources to apply within +// a single module. +type ModuleDiff struct { + Path []string + Resources map[string]*InstanceDiff + Destroy bool // Set only by the destroy plan +} + +func (d *ModuleDiff) init() { + if d.Resources == nil { + d.Resources = make(map[string]*InstanceDiff) + } + for _, r := range d.Resources { + r.init() + } +} + +// ChangeType returns the type of changes that the diff for this +// module includes. +// +// At a module level, this will only be DiffNone, DiffUpdate, DiffDestroy, or +// DiffCreate. If an instance within the module has a DiffDestroyCreate +// then this will register as a DiffCreate for a module. +func (d *ModuleDiff) ChangeType() DiffChangeType { + result := DiffNone + for _, r := range d.Resources { + change := r.ChangeType() + switch change { + case DiffCreate, DiffDestroy: + if result == DiffNone { + result = change + } + case DiffDestroyCreate, DiffUpdate: + result = DiffUpdate + } + } + + return result +} + +// Empty returns true if the diff has no changes within this module. +func (d *ModuleDiff) Empty() bool { + if d.Destroy { + return false + } + + if len(d.Resources) == 0 { + return true + } + + for _, rd := range d.Resources { + if !rd.Empty() { + return false + } + } + + return true +} + +// Instances returns the instance diffs for the id given. This can return +// multiple instance diffs if there are counts within the resource. +func (d *ModuleDiff) Instances(id string) []*InstanceDiff { + var result []*InstanceDiff + for k, diff := range d.Resources { + if k == id || strings.HasPrefix(k, id+".") { + if !diff.Empty() { + result = append(result, diff) + } + } + } + + return result +} + +// IsRoot says whether or not this module diff is for the root module. +func (d *ModuleDiff) IsRoot() bool { + return reflect.DeepEqual(d.Path, rootModulePath) +} + +// String outputs the diff in a long but command-line friendly output +// format that users can read to quickly inspect a diff. +func (d *ModuleDiff) String() string { + var buf bytes.Buffer + + names := make([]string, 0, len(d.Resources)) + for name, _ := range d.Resources { + names = append(names, name) + } + sort.Strings(names) + + for _, name := range names { + rdiff := d.Resources[name] + + crud := "UPDATE" + switch { + case rdiff.RequiresNew() && (rdiff.GetDestroy() || rdiff.GetDestroyTainted()): + crud = "DESTROY/CREATE" + case rdiff.GetDestroy() || rdiff.GetDestroyDeposed(): + crud = "DESTROY" + case rdiff.RequiresNew(): + crud = "CREATE" + } + + extra := "" + if !rdiff.GetDestroy() && rdiff.GetDestroyDeposed() { + extra = " (deposed only)" + } + + buf.WriteString(fmt.Sprintf( + "%s: %s%s\n", + crud, + name, + extra)) + + keyLen := 0 + rdiffAttrs := rdiff.CopyAttributes() + keys := make([]string, 0, len(rdiffAttrs)) + for key, _ := range rdiffAttrs { + if key == "id" { + continue + } + + keys = append(keys, key) + if len(key) > keyLen { + keyLen = len(key) + } + } + sort.Strings(keys) + + for _, attrK := range keys { + attrDiff, _ := rdiff.GetAttribute(attrK) + + v := attrDiff.New + u := attrDiff.Old + if attrDiff.NewComputed { + v = "" + } + + if attrDiff.Sensitive { + u = "" + v = "" + } + + updateMsg := "" + if attrDiff.RequiresNew { + updateMsg = " (forces new resource)" + } else if attrDiff.Sensitive { + updateMsg = " (attribute changed)" + } + + buf.WriteString(fmt.Sprintf( + " %s:%s %#v => %#v%s\n", + attrK, + strings.Repeat(" ", keyLen-len(attrK)), + u, + v, + updateMsg)) + } + } + + return buf.String() +} + +// InstanceDiff is the diff of a resource from some state to another. +type InstanceDiff struct { + mu sync.Mutex + Attributes map[string]*ResourceAttrDiff + Destroy bool + DestroyDeposed bool + DestroyTainted bool + + // Meta is a simple K/V map that is stored in a diff and persisted to + // plans but otherwise is completely ignored by Terraform core. It is + // mean to be used for additional data a resource may want to pass through. + // The value here must only contain Go primitives and collections. + Meta map[string]interface{} +} + +func (d *InstanceDiff) Lock() { d.mu.Lock() } +func (d *InstanceDiff) Unlock() { d.mu.Unlock() } + +// ResourceAttrDiff is the diff of a single attribute of a resource. +type ResourceAttrDiff struct { + Old string // Old Value + New string // New Value + NewComputed bool // True if new value is computed (unknown currently) + NewRemoved bool // True if this attribute is being removed + NewExtra interface{} // Extra information for the provider + RequiresNew bool // True if change requires new resource + Sensitive bool // True if the data should not be displayed in UI output + Type DiffAttrType +} + +// Empty returns true if the diff for this attr is neutral +func (d *ResourceAttrDiff) Empty() bool { + return d.Old == d.New && !d.NewComputed && !d.NewRemoved +} + +func (d *ResourceAttrDiff) GoString() string { + return fmt.Sprintf("*%#v", *d) +} + +// DiffAttrType is an enum type that says whether a resource attribute +// diff is an input attribute (comes from the configuration) or an +// output attribute (comes as a result of applying the configuration). An +// example input would be "ami" for AWS and an example output would be +// "private_ip". +type DiffAttrType byte + +const ( + DiffAttrUnknown DiffAttrType = iota + DiffAttrInput + DiffAttrOutput +) + +func (d *InstanceDiff) init() { + if d.Attributes == nil { + d.Attributes = make(map[string]*ResourceAttrDiff) + } +} + +func NewInstanceDiff() *InstanceDiff { + return &InstanceDiff{Attributes: make(map[string]*ResourceAttrDiff)} +} + +func (d *InstanceDiff) Copy() (*InstanceDiff, error) { + if d == nil { + return nil, nil + } + + dCopy, err := copystructure.Config{Lock: true}.Copy(d) + if err != nil { + return nil, err + } + + return dCopy.(*InstanceDiff), nil +} + +// ChangeType returns the DiffChangeType represented by the diff +// for this single instance. +func (d *InstanceDiff) ChangeType() DiffChangeType { + if d.Empty() { + return DiffNone + } + + if d.RequiresNew() && (d.GetDestroy() || d.GetDestroyTainted()) { + return DiffDestroyCreate + } + + if d.GetDestroy() || d.GetDestroyDeposed() { + return DiffDestroy + } + + if d.RequiresNew() { + return DiffCreate + } + + return DiffUpdate +} + +// Empty returns true if this diff encapsulates no changes. +func (d *InstanceDiff) Empty() bool { + if d == nil { + return true + } + + d.mu.Lock() + defer d.mu.Unlock() + return !d.Destroy && + !d.DestroyTainted && + !d.DestroyDeposed && + len(d.Attributes) == 0 +} + +// Equal compares two diffs for exact equality. +// +// This is different from the Same comparison that is supported which +// checks for operation equality taking into account computed values. Equal +// instead checks for exact equality. +func (d *InstanceDiff) Equal(d2 *InstanceDiff) bool { + // If one is nil, they must both be nil + if d == nil || d2 == nil { + return d == d2 + } + + // Use DeepEqual + return reflect.DeepEqual(d, d2) +} + +// DeepCopy performs a deep copy of all parts of the InstanceDiff +func (d *InstanceDiff) DeepCopy() *InstanceDiff { + copy, err := copystructure.Config{Lock: true}.Copy(d) + if err != nil { + panic(err) + } + + return copy.(*InstanceDiff) +} + +func (d *InstanceDiff) GoString() string { + return fmt.Sprintf("*%#v", InstanceDiff{ + Attributes: d.Attributes, + Destroy: d.Destroy, + DestroyTainted: d.DestroyTainted, + DestroyDeposed: d.DestroyDeposed, + }) +} + +// RequiresNew returns true if the diff requires the creation of a new +// resource (implying the destruction of the old). +func (d *InstanceDiff) RequiresNew() bool { + if d == nil { + return false + } + + d.mu.Lock() + defer d.mu.Unlock() + + return d.requiresNew() +} + +func (d *InstanceDiff) requiresNew() bool { + if d == nil { + return false + } + + if d.DestroyTainted { + return true + } + + for _, rd := range d.Attributes { + if rd != nil && rd.RequiresNew { + return true + } + } + + return false +} + +func (d *InstanceDiff) GetDestroyDeposed() bool { + d.mu.Lock() + defer d.mu.Unlock() + + return d.DestroyDeposed +} + +func (d *InstanceDiff) SetDestroyDeposed(b bool) { + d.mu.Lock() + defer d.mu.Unlock() + + d.DestroyDeposed = b +} + +// These methods are properly locked, for use outside other InstanceDiff +// methods but everywhere else within in the terraform package. +// TODO refactor the locking scheme +func (d *InstanceDiff) SetTainted(b bool) { + d.mu.Lock() + defer d.mu.Unlock() + + d.DestroyTainted = b +} + +func (d *InstanceDiff) GetDestroyTainted() bool { + d.mu.Lock() + defer d.mu.Unlock() + + return d.DestroyTainted +} + +func (d *InstanceDiff) SetDestroy(b bool) { + d.mu.Lock() + defer d.mu.Unlock() + + d.Destroy = b +} + +func (d *InstanceDiff) GetDestroy() bool { + d.mu.Lock() + defer d.mu.Unlock() + + return d.Destroy +} + +func (d *InstanceDiff) SetAttribute(key string, attr *ResourceAttrDiff) { + d.mu.Lock() + defer d.mu.Unlock() + + d.Attributes[key] = attr +} + +func (d *InstanceDiff) DelAttribute(key string) { + d.mu.Lock() + defer d.mu.Unlock() + + delete(d.Attributes, key) +} + +func (d *InstanceDiff) GetAttribute(key string) (*ResourceAttrDiff, bool) { + d.mu.Lock() + defer d.mu.Unlock() + + attr, ok := d.Attributes[key] + return attr, ok +} +func (d *InstanceDiff) GetAttributesLen() int { + d.mu.Lock() + defer d.mu.Unlock() + + return len(d.Attributes) +} + +// Safely copies the Attributes map +func (d *InstanceDiff) CopyAttributes() map[string]*ResourceAttrDiff { + d.mu.Lock() + defer d.mu.Unlock() + + attrs := make(map[string]*ResourceAttrDiff) + for k, v := range d.Attributes { + attrs[k] = v + } + + return attrs +} + +// Same checks whether or not two InstanceDiff's are the "same". When +// we say "same", it is not necessarily exactly equal. Instead, it is +// just checking that the same attributes are changing, a destroy +// isn't suddenly happening, etc. +func (d *InstanceDiff) Same(d2 *InstanceDiff) (bool, string) { + // we can safely compare the pointers without a lock + switch { + case d == nil && d2 == nil: + return true, "" + case d == nil || d2 == nil: + return false, "one nil" + case d == d2: + return true, "" + } + + d.mu.Lock() + defer d.mu.Unlock() + + // If we're going from requiring new to NOT requiring new, then we have + // to see if all required news were computed. If so, it is allowed since + // computed may also mean "same value and therefore not new". + oldNew := d.requiresNew() + newNew := d2.RequiresNew() + if oldNew && !newNew { + oldNew = false + + // This section builds a list of ignorable attributes for requiresNew + // by removing off any elements of collections going to zero elements. + // For collections going to zero, they may not exist at all in the + // new diff (and hence RequiresNew == false). + ignoreAttrs := make(map[string]struct{}) + for k, diffOld := range d.Attributes { + if !strings.HasSuffix(k, ".%") && !strings.HasSuffix(k, ".#") { + continue + } + + // This case is in here as a protection measure. The bug that this + // code originally fixed (GH-11349) didn't have to deal with computed + // so I'm not 100% sure what the correct behavior is. Best to leave + // the old behavior. + if diffOld.NewComputed { + continue + } + + // We're looking for the case a map goes to exactly 0. + if diffOld.New != "0" { + continue + } + + // Found it! Ignore all of these. The prefix here is stripping + // off the "%" so it is just "k." + prefix := k[:len(k)-1] + for k2, _ := range d.Attributes { + if strings.HasPrefix(k2, prefix) { + ignoreAttrs[k2] = struct{}{} + } + } + } + + for k, rd := range d.Attributes { + if _, ok := ignoreAttrs[k]; ok { + continue + } + + // If the field is requires new and NOT computed, then what + // we have is a diff mismatch for sure. We set that the old + // diff does REQUIRE a ForceNew. + if rd != nil && rd.RequiresNew && !rd.NewComputed { + oldNew = true + break + } + } + } + + if oldNew != newNew { + return false, fmt.Sprintf( + "diff RequiresNew; old: %t, new: %t", oldNew, newNew) + } + + // Verify that destroy matches. The second boolean here allows us to + // have mismatching Destroy if we're moving from RequiresNew true + // to false above. Therefore, the second boolean will only pass if + // we're moving from Destroy: true to false as well. + if d.Destroy != d2.GetDestroy() && d.requiresNew() == oldNew { + return false, fmt.Sprintf( + "diff: Destroy; old: %t, new: %t", d.Destroy, d2.GetDestroy()) + } + + // Go through the old diff and make sure the new diff has all the + // same attributes. To start, build up the check map to be all the keys. + checkOld := make(map[string]struct{}) + checkNew := make(map[string]struct{}) + for k, _ := range d.Attributes { + checkOld[k] = struct{}{} + } + for k, _ := range d2.CopyAttributes() { + checkNew[k] = struct{}{} + } + + // Make an ordered list so we are sure the approximated hashes are left + // to process at the end of the loop + keys := make([]string, 0, len(d.Attributes)) + for k, _ := range d.Attributes { + keys = append(keys, k) + } + sort.StringSlice(keys).Sort() + + for _, k := range keys { + diffOld := d.Attributes[k] + + if _, ok := checkOld[k]; !ok { + // We're not checking this key for whatever reason (see where + // check is modified). + continue + } + + // Remove this key since we'll never hit it again + delete(checkOld, k) + delete(checkNew, k) + + _, ok := d2.GetAttribute(k) + if !ok { + // If there's no new attribute, and the old diff expected the attribute + // to be removed, that's just fine. + if diffOld.NewRemoved { + continue + } + + // If the last diff was a computed value then the absense of + // that value is allowed since it may mean the value ended up + // being the same. + if diffOld.NewComputed { + ok = true + } + + // No exact match, but maybe this is a set containing computed + // values. So check if there is an approximate hash in the key + // and if so, try to match the key. + if strings.Contains(k, "~") { + parts := strings.Split(k, ".") + parts2 := append([]string(nil), parts...) + + re := regexp.MustCompile(`^~\d+$`) + for i, part := range parts { + if re.MatchString(part) { + // we're going to consider this the base of a + // computed hash, and remove all longer matching fields + ok = true + + parts2[i] = `\d+` + parts2 = parts2[:i+1] + break + } + } + + re, err := regexp.Compile("^" + strings.Join(parts2, `\.`)) + if err != nil { + return false, fmt.Sprintf("regexp failed to compile; err: %#v", err) + } + + for k2, _ := range checkNew { + if re.MatchString(k2) { + delete(checkNew, k2) + } + } + } + + // This is a little tricky, but when a diff contains a computed + // list, set, or map that can only be interpolated after the apply + // command has created the dependent resources, it could turn out + // that the result is actually the same as the existing state which + // would remove the key from the diff. + if diffOld.NewComputed && (strings.HasSuffix(k, ".#") || strings.HasSuffix(k, ".%")) { + ok = true + } + + // Similarly, in a RequiresNew scenario, a list that shows up in the plan + // diff can disappear from the apply diff, which is calculated from an + // empty state. + if d.requiresNew() && (strings.HasSuffix(k, ".#") || strings.HasSuffix(k, ".%")) { + ok = true + } + + if !ok { + return false, fmt.Sprintf("attribute mismatch: %s", k) + } + } + + // search for the suffix of the base of a [computed] map, list or set. + match := multiVal.FindStringSubmatch(k) + + if diffOld.NewComputed && len(match) == 2 { + matchLen := len(match[1]) + + // This is a computed list, set, or map, so remove any keys with + // this prefix from the check list. + kprefix := k[:len(k)-matchLen] + for k2, _ := range checkOld { + if strings.HasPrefix(k2, kprefix) { + delete(checkOld, k2) + } + } + for k2, _ := range checkNew { + if strings.HasPrefix(k2, kprefix) { + delete(checkNew, k2) + } + } + } + + // TODO: check for the same value if not computed + } + + // Check for leftover attributes + if len(checkNew) > 0 { + extras := make([]string, 0, len(checkNew)) + for attr, _ := range checkNew { + extras = append(extras, attr) + } + return false, + fmt.Sprintf("extra attributes: %s", strings.Join(extras, ", ")) + } + + return true, "" +} + +// moduleDiffSort implements sort.Interface to sort module diffs by path. +type moduleDiffSort []*ModuleDiff + +func (s moduleDiffSort) Len() int { return len(s) } +func (s moduleDiffSort) Swap(i, j int) { s[i], s[j] = s[j], s[i] } +func (s moduleDiffSort) Less(i, j int) bool { + a := s[i] + b := s[j] + + // If the lengths are different, then the shorter one always wins + if len(a.Path) != len(b.Path) { + return len(a.Path) < len(b.Path) + } + + // Otherwise, compare lexically + return strings.Join(a.Path, ".") < strings.Join(b.Path, ".") +} diff --git a/vendor/github.com/hashicorp/terraform/terraform/edge_destroy.go b/vendor/github.com/hashicorp/terraform/terraform/edge_destroy.go new file mode 100644 index 0000000000..bc9d638aad --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/terraform/edge_destroy.go @@ -0,0 +1,17 @@ +package terraform + +import ( + "fmt" + + "github.com/hashicorp/terraform/dag" +) + +// DestroyEdge is an edge that represents a standard "destroy" relationship: +// Target depends on Source because Source is destroying. +type DestroyEdge struct { + S, T dag.Vertex +} + +func (e *DestroyEdge) Hashcode() interface{} { return fmt.Sprintf("%p-%p", e.S, e.T) } +func (e *DestroyEdge) Source() dag.Vertex { return e.S } +func (e *DestroyEdge) Target() dag.Vertex { return e.T } diff --git a/vendor/github.com/hashicorp/terraform/terraform/eval.go b/vendor/github.com/hashicorp/terraform/terraform/eval.go new file mode 100644 index 0000000000..3cb088a252 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/terraform/eval.go @@ -0,0 +1,63 @@ +package terraform + +import ( + "log" + "strings" +) + +// EvalNode is the interface that must be implemented by graph nodes to +// evaluate/execute. +type EvalNode interface { + // Eval evaluates this node with the given context. The second parameter + // are the argument values. These will match in order and 1-1 with the + // results of the Args() return value. + Eval(EvalContext) (interface{}, error) +} + +// GraphNodeEvalable is the interface that graph nodes must implement +// to enable valuation. +type GraphNodeEvalable interface { + EvalTree() EvalNode +} + +// EvalEarlyExitError is a special error return value that can be returned +// by eval nodes that does an early exit. +type EvalEarlyExitError struct{} + +func (EvalEarlyExitError) Error() string { return "early exit" } + +// Eval evaluates the given EvalNode with the given context, properly +// evaluating all args in the correct order. +func Eval(n EvalNode, ctx EvalContext) (interface{}, error) { + // Call the lower level eval which doesn't understand early exit, + // and if we early exit, it isn't an error. + result, err := EvalRaw(n, ctx) + if err != nil { + if _, ok := err.(EvalEarlyExitError); ok { + return nil, nil + } + } + + return result, err +} + +// EvalRaw is like Eval except that it returns all errors, even if they +// signal something normal such as EvalEarlyExitError. +func EvalRaw(n EvalNode, ctx EvalContext) (interface{}, error) { + path := "unknown" + if ctx != nil { + path = strings.Join(ctx.Path(), ".") + } + + log.Printf("[DEBUG] %s: eval: %T", path, n) + output, err := n.Eval(ctx) + if err != nil { + if _, ok := err.(EvalEarlyExitError); ok { + log.Printf("[DEBUG] %s: eval: %T, err: %s", path, n, err) + } else { + log.Printf("[ERROR] %s: eval: %T, err: %s", path, n, err) + } + } + + return output, err +} diff --git a/vendor/github.com/hashicorp/terraform/terraform/eval_apply.go b/vendor/github.com/hashicorp/terraform/terraform/eval_apply.go new file mode 100644 index 0000000000..2f6a4973e4 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/terraform/eval_apply.go @@ -0,0 +1,359 @@ +package terraform + +import ( + "fmt" + "log" + "strconv" + + "github.com/hashicorp/go-multierror" + "github.com/hashicorp/terraform/config" +) + +// EvalApply is an EvalNode implementation that writes the diff to +// the full diff. +type EvalApply struct { + Info *InstanceInfo + State **InstanceState + Diff **InstanceDiff + Provider *ResourceProvider + Output **InstanceState + CreateNew *bool + Error *error +} + +// TODO: test +func (n *EvalApply) Eval(ctx EvalContext) (interface{}, error) { + diff := *n.Diff + provider := *n.Provider + state := *n.State + + // If we have no diff, we have nothing to do! + if diff.Empty() { + log.Printf( + "[DEBUG] apply: %s: diff is empty, doing nothing.", n.Info.Id) + return nil, nil + } + + // Remove any output values from the diff + for k, ad := range diff.CopyAttributes() { + if ad.Type == DiffAttrOutput { + diff.DelAttribute(k) + } + } + + // If the state is nil, make it non-nil + if state == nil { + state = new(InstanceState) + } + state.init() + + // Flag if we're creating a new instance + if n.CreateNew != nil { + *n.CreateNew = state.ID == "" && !diff.GetDestroy() || diff.RequiresNew() + } + + // With the completed diff, apply! + log.Printf("[DEBUG] apply: %s: executing Apply", n.Info.Id) + state, err := provider.Apply(n.Info, state, diff) + if state == nil { + state = new(InstanceState) + } + state.init() + + // Force the "id" attribute to be our ID + if state.ID != "" { + state.Attributes["id"] = state.ID + } + + // If the value is the unknown variable value, then it is an error. + // In this case we record the error and remove it from the state + for ak, av := range state.Attributes { + if av == config.UnknownVariableValue { + err = multierror.Append(err, fmt.Errorf( + "Attribute with unknown value: %s", ak)) + delete(state.Attributes, ak) + } + } + + // Write the final state + if n.Output != nil { + *n.Output = state + } + + // If there are no errors, then we append it to our output error + // if we have one, otherwise we just output it. + if err != nil { + if n.Error != nil { + helpfulErr := fmt.Errorf("%s: %s", n.Info.Id, err.Error()) + *n.Error = multierror.Append(*n.Error, helpfulErr) + } else { + return nil, err + } + } + + return nil, nil +} + +// EvalApplyPre is an EvalNode implementation that does the pre-Apply work +type EvalApplyPre struct { + Info *InstanceInfo + State **InstanceState + Diff **InstanceDiff +} + +// TODO: test +func (n *EvalApplyPre) Eval(ctx EvalContext) (interface{}, error) { + state := *n.State + diff := *n.Diff + + // If the state is nil, make it non-nil + if state == nil { + state = new(InstanceState) + } + state.init() + + { + // Call post-apply hook + err := ctx.Hook(func(h Hook) (HookAction, error) { + return h.PreApply(n.Info, state, diff) + }) + if err != nil { + return nil, err + } + } + + return nil, nil +} + +// EvalApplyPost is an EvalNode implementation that does the post-Apply work +type EvalApplyPost struct { + Info *InstanceInfo + State **InstanceState + Error *error +} + +// TODO: test +func (n *EvalApplyPost) Eval(ctx EvalContext) (interface{}, error) { + state := *n.State + + { + // Call post-apply hook + err := ctx.Hook(func(h Hook) (HookAction, error) { + return h.PostApply(n.Info, state, *n.Error) + }) + if err != nil { + return nil, err + } + } + + return nil, *n.Error +} + +// EvalApplyProvisioners is an EvalNode implementation that executes +// the provisioners for a resource. +// +// TODO(mitchellh): This should probably be split up into a more fine-grained +// ApplyProvisioner (single) that is looped over. +type EvalApplyProvisioners struct { + Info *InstanceInfo + State **InstanceState + Resource *config.Resource + InterpResource *Resource + CreateNew *bool + Error *error + + // When is the type of provisioner to run at this point + When config.ProvisionerWhen +} + +// TODO: test +func (n *EvalApplyProvisioners) Eval(ctx EvalContext) (interface{}, error) { + state := *n.State + + if n.CreateNew != nil && !*n.CreateNew { + // If we're not creating a new resource, then don't run provisioners + return nil, nil + } + + provs := n.filterProvisioners() + if len(provs) == 0 { + // We have no provisioners, so don't do anything + return nil, nil + } + + // taint tells us whether to enable tainting. + taint := n.When == config.ProvisionerWhenCreate + + if n.Error != nil && *n.Error != nil { + if taint { + state.Tainted = true + } + + // We're already tainted, so just return out + return nil, nil + } + + { + // Call pre hook + err := ctx.Hook(func(h Hook) (HookAction, error) { + return h.PreProvisionResource(n.Info, state) + }) + if err != nil { + return nil, err + } + } + + // If there are no errors, then we append it to our output error + // if we have one, otherwise we just output it. + err := n.apply(ctx, provs) + if err != nil { + if taint { + state.Tainted = true + } + + if n.Error != nil { + *n.Error = multierror.Append(*n.Error, err) + } else { + return nil, err + } + } + + { + // Call post hook + err := ctx.Hook(func(h Hook) (HookAction, error) { + return h.PostProvisionResource(n.Info, state) + }) + if err != nil { + return nil, err + } + } + + return nil, nil +} + +// filterProvisioners filters the provisioners on the resource to only +// the provisioners specified by the "when" option. +func (n *EvalApplyProvisioners) filterProvisioners() []*config.Provisioner { + // Fast path the zero case + if n.Resource == nil { + return nil + } + + if len(n.Resource.Provisioners) == 0 { + return nil + } + + result := make([]*config.Provisioner, 0, len(n.Resource.Provisioners)) + for _, p := range n.Resource.Provisioners { + if p.When == n.When { + result = append(result, p) + } + } + + return result +} + +func (n *EvalApplyProvisioners) apply(ctx EvalContext, provs []*config.Provisioner) error { + state := *n.State + + // Store the original connection info, restore later + origConnInfo := state.Ephemeral.ConnInfo + defer func() { + state.Ephemeral.ConnInfo = origConnInfo + }() + + for _, prov := range provs { + // Get the provisioner + provisioner := ctx.Provisioner(prov.Type) + + // Interpolate the provisioner config + provConfig, err := ctx.Interpolate(prov.RawConfig.Copy(), n.InterpResource) + if err != nil { + return err + } + + // Interpolate the conn info, since it may contain variables + connInfo, err := ctx.Interpolate(prov.ConnInfo.Copy(), n.InterpResource) + if err != nil { + return err + } + + // Merge the connection information + overlay := make(map[string]string) + if origConnInfo != nil { + for k, v := range origConnInfo { + overlay[k] = v + } + } + for k, v := range connInfo.Config { + switch vt := v.(type) { + case string: + overlay[k] = vt + case int64: + overlay[k] = strconv.FormatInt(vt, 10) + case int32: + overlay[k] = strconv.FormatInt(int64(vt), 10) + case int: + overlay[k] = strconv.FormatInt(int64(vt), 10) + case float32: + overlay[k] = strconv.FormatFloat(float64(vt), 'f', 3, 32) + case float64: + overlay[k] = strconv.FormatFloat(vt, 'f', 3, 64) + case bool: + overlay[k] = strconv.FormatBool(vt) + default: + overlay[k] = fmt.Sprintf("%v", vt) + } + } + state.Ephemeral.ConnInfo = overlay + + { + // Call pre hook + err := ctx.Hook(func(h Hook) (HookAction, error) { + return h.PreProvision(n.Info, prov.Type) + }) + if err != nil { + return err + } + } + + // The output function + outputFn := func(msg string) { + ctx.Hook(func(h Hook) (HookAction, error) { + h.ProvisionOutput(n.Info, prov.Type, msg) + return HookActionContinue, nil + }) + } + + // Invoke the Provisioner + output := CallbackUIOutput{OutputFn: outputFn} + applyErr := provisioner.Apply(&output, state, provConfig) + + // Call post hook + hookErr := ctx.Hook(func(h Hook) (HookAction, error) { + return h.PostProvision(n.Info, prov.Type, applyErr) + }) + + // Handle the error before we deal with the hook + if applyErr != nil { + // Determine failure behavior + switch prov.OnFailure { + case config.ProvisionerOnFailureContinue: + log.Printf( + "[INFO] apply: %s [%s]: error during provision, continue requested", + n.Info.Id, prov.Type) + + case config.ProvisionerOnFailureFail: + return applyErr + } + } + + // Deal with the hook + if hookErr != nil { + return hookErr + } + } + + return nil + +} diff --git a/vendor/github.com/hashicorp/terraform/terraform/eval_check_prevent_destroy.go b/vendor/github.com/hashicorp/terraform/terraform/eval_check_prevent_destroy.go new file mode 100644 index 0000000000..715e79e174 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/terraform/eval_check_prevent_destroy.go @@ -0,0 +1,38 @@ +package terraform + +import ( + "fmt" + + "github.com/hashicorp/terraform/config" +) + +// EvalPreventDestroy is an EvalNode implementation that returns an +// error if a resource has PreventDestroy configured and the diff +// would destroy the resource. +type EvalCheckPreventDestroy struct { + Resource *config.Resource + ResourceId string + Diff **InstanceDiff +} + +func (n *EvalCheckPreventDestroy) Eval(ctx EvalContext) (interface{}, error) { + if n.Diff == nil || *n.Diff == nil || n.Resource == nil { + return nil, nil + } + + diff := *n.Diff + preventDestroy := n.Resource.Lifecycle.PreventDestroy + + if diff.GetDestroy() && preventDestroy { + resourceId := n.ResourceId + if resourceId == "" { + resourceId = n.Resource.Id() + } + + return nil, fmt.Errorf(preventDestroyErrStr, resourceId) + } + + return nil, nil +} + +const preventDestroyErrStr = `%s: the plan would destroy this resource, but it currently has lifecycle.prevent_destroy set to true. To avoid this error and continue with the plan, either disable lifecycle.prevent_destroy or adjust the scope of the plan using the -target flag.` diff --git a/vendor/github.com/hashicorp/terraform/terraform/eval_context.go b/vendor/github.com/hashicorp/terraform/terraform/eval_context.go new file mode 100644 index 0000000000..a1f815b7d4 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/terraform/eval_context.go @@ -0,0 +1,84 @@ +package terraform + +import ( + "sync" + + "github.com/hashicorp/terraform/config" +) + +// EvalContext is the interface that is given to eval nodes to execute. +type EvalContext interface { + // Stopped returns a channel that is closed when evaluation is stopped + // via Terraform.Context.Stop() + Stopped() <-chan struct{} + + // Path is the current module path. + Path() []string + + // Hook is used to call hook methods. The callback is called for each + // hook and should return the hook action to take and the error. + Hook(func(Hook) (HookAction, error)) error + + // Input is the UIInput object for interacting with the UI. + Input() UIInput + + // InitProvider initializes the provider with the given name and + // returns the implementation of the resource provider or an error. + // + // It is an error to initialize the same provider more than once. + InitProvider(string) (ResourceProvider, error) + + // Provider gets the provider instance with the given name (already + // initialized) or returns nil if the provider isn't initialized. + Provider(string) ResourceProvider + + // CloseProvider closes provider connections that aren't needed anymore. + CloseProvider(string) error + + // ConfigureProvider configures the provider with the given + // configuration. This is a separate context call because this call + // is used to store the provider configuration for inheritance lookups + // with ParentProviderConfig(). + ConfigureProvider(string, *ResourceConfig) error + SetProviderConfig(string, *ResourceConfig) error + ParentProviderConfig(string) *ResourceConfig + + // ProviderInput and SetProviderInput are used to configure providers + // from user input. + ProviderInput(string) map[string]interface{} + SetProviderInput(string, map[string]interface{}) + + // InitProvisioner initializes the provisioner with the given name and + // returns the implementation of the resource provisioner or an error. + // + // It is an error to initialize the same provisioner more than once. + InitProvisioner(string) (ResourceProvisioner, error) + + // Provisioner gets the provisioner instance with the given name (already + // initialized) or returns nil if the provisioner isn't initialized. + Provisioner(string) ResourceProvisioner + + // CloseProvisioner closes provisioner connections that aren't needed + // anymore. + CloseProvisioner(string) error + + // Interpolate takes the given raw configuration and completes + // the interpolations, returning the processed ResourceConfig. + // + // The resource argument is optional. If given, it is the resource + // that is currently being acted upon. + Interpolate(*config.RawConfig, *Resource) (*ResourceConfig, error) + + // SetVariables sets the variables for the module within + // this context with the name n. This function call is additive: + // the second parameter is merged with any previous call. + SetVariables(string, map[string]interface{}) + + // Diff returns the global diff as well as the lock that should + // be used to modify that diff. + Diff() (*Diff, *sync.RWMutex) + + // State returns the global state as well as the lock that should + // be used to modify that state. + State() (*State, *sync.RWMutex) +} diff --git a/vendor/github.com/hashicorp/terraform/terraform/eval_context_builtin.go b/vendor/github.com/hashicorp/terraform/terraform/eval_context_builtin.go new file mode 100644 index 0000000000..3dcfb2275b --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/terraform/eval_context_builtin.go @@ -0,0 +1,347 @@ +package terraform + +import ( + "context" + "fmt" + "log" + "strings" + "sync" + + "github.com/hashicorp/terraform/config" +) + +// BuiltinEvalContext is an EvalContext implementation that is used by +// Terraform by default. +type BuiltinEvalContext struct { + // StopContext is the context used to track whether we're complete + StopContext context.Context + + // PathValue is the Path that this context is operating within. + PathValue []string + + // Interpolater setting below affect the interpolation of variables. + // + // The InterpolaterVars are the exact value for ${var.foo} values. + // The map is shared between all contexts and is a mapping of + // PATH to KEY to VALUE. Because it is shared by all contexts as well + // as the Interpolater itself, it is protected by InterpolaterVarLock + // which must be locked during any access to the map. + Interpolater *Interpolater + InterpolaterVars map[string]map[string]interface{} + InterpolaterVarLock *sync.Mutex + + Components contextComponentFactory + Hooks []Hook + InputValue UIInput + ProviderCache map[string]ResourceProvider + ProviderConfigCache map[string]*ResourceConfig + ProviderInputConfig map[string]map[string]interface{} + ProviderLock *sync.Mutex + ProvisionerCache map[string]ResourceProvisioner + ProvisionerLock *sync.Mutex + DiffValue *Diff + DiffLock *sync.RWMutex + StateValue *State + StateLock *sync.RWMutex + + once sync.Once +} + +func (ctx *BuiltinEvalContext) Stopped() <-chan struct{} { + // This can happen during tests. During tests, we just block forever. + if ctx.StopContext == nil { + return nil + } + + return ctx.StopContext.Done() +} + +func (ctx *BuiltinEvalContext) Hook(fn func(Hook) (HookAction, error)) error { + for _, h := range ctx.Hooks { + action, err := fn(h) + if err != nil { + return err + } + + switch action { + case HookActionContinue: + continue + case HookActionHalt: + // Return an early exit error to trigger an early exit + log.Printf("[WARN] Early exit triggered by hook: %T", h) + return EvalEarlyExitError{} + } + } + + return nil +} + +func (ctx *BuiltinEvalContext) Input() UIInput { + return ctx.InputValue +} + +func (ctx *BuiltinEvalContext) InitProvider(n string) (ResourceProvider, error) { + ctx.once.Do(ctx.init) + + // If we already initialized, it is an error + if p := ctx.Provider(n); p != nil { + return nil, fmt.Errorf("Provider '%s' already initialized", n) + } + + // Warning: make sure to acquire these locks AFTER the call to Provider + // above, since it also acquires locks. + ctx.ProviderLock.Lock() + defer ctx.ProviderLock.Unlock() + + providerPath := make([]string, len(ctx.Path())+1) + copy(providerPath, ctx.Path()) + providerPath[len(providerPath)-1] = n + key := PathCacheKey(providerPath) + + typeName := strings.SplitN(n, ".", 2)[0] + p, err := ctx.Components.ResourceProvider(typeName, key) + if err != nil { + return nil, err + } + + ctx.ProviderCache[key] = p + return p, nil +} + +func (ctx *BuiltinEvalContext) Provider(n string) ResourceProvider { + ctx.once.Do(ctx.init) + + ctx.ProviderLock.Lock() + defer ctx.ProviderLock.Unlock() + + providerPath := make([]string, len(ctx.Path())+1) + copy(providerPath, ctx.Path()) + providerPath[len(providerPath)-1] = n + + return ctx.ProviderCache[PathCacheKey(providerPath)] +} + +func (ctx *BuiltinEvalContext) CloseProvider(n string) error { + ctx.once.Do(ctx.init) + + ctx.ProviderLock.Lock() + defer ctx.ProviderLock.Unlock() + + providerPath := make([]string, len(ctx.Path())+1) + copy(providerPath, ctx.Path()) + providerPath[len(providerPath)-1] = n + + var provider interface{} + provider = ctx.ProviderCache[PathCacheKey(providerPath)] + if provider != nil { + if p, ok := provider.(ResourceProviderCloser); ok { + delete(ctx.ProviderCache, PathCacheKey(providerPath)) + return p.Close() + } + } + + return nil +} + +func (ctx *BuiltinEvalContext) ConfigureProvider( + n string, cfg *ResourceConfig) error { + p := ctx.Provider(n) + if p == nil { + return fmt.Errorf("Provider '%s' not initialized", n) + } + + if err := ctx.SetProviderConfig(n, cfg); err != nil { + return nil + } + + return p.Configure(cfg) +} + +func (ctx *BuiltinEvalContext) SetProviderConfig( + n string, cfg *ResourceConfig) error { + providerPath := make([]string, len(ctx.Path())+1) + copy(providerPath, ctx.Path()) + providerPath[len(providerPath)-1] = n + + // Save the configuration + ctx.ProviderLock.Lock() + ctx.ProviderConfigCache[PathCacheKey(providerPath)] = cfg + ctx.ProviderLock.Unlock() + + return nil +} + +func (ctx *BuiltinEvalContext) ProviderInput(n string) map[string]interface{} { + ctx.ProviderLock.Lock() + defer ctx.ProviderLock.Unlock() + + // Make a copy of the path so we can safely edit it + path := ctx.Path() + pathCopy := make([]string, len(path)+1) + copy(pathCopy, path) + + // Go up the tree. + for i := len(path) - 1; i >= 0; i-- { + pathCopy[i+1] = n + k := PathCacheKey(pathCopy[:i+2]) + if v, ok := ctx.ProviderInputConfig[k]; ok { + return v + } + } + + return nil +} + +func (ctx *BuiltinEvalContext) SetProviderInput(n string, c map[string]interface{}) { + providerPath := make([]string, len(ctx.Path())+1) + copy(providerPath, ctx.Path()) + providerPath[len(providerPath)-1] = n + + // Save the configuration + ctx.ProviderLock.Lock() + ctx.ProviderInputConfig[PathCacheKey(providerPath)] = c + ctx.ProviderLock.Unlock() +} + +func (ctx *BuiltinEvalContext) ParentProviderConfig(n string) *ResourceConfig { + ctx.ProviderLock.Lock() + defer ctx.ProviderLock.Unlock() + + // Make a copy of the path so we can safely edit it + path := ctx.Path() + pathCopy := make([]string, len(path)+1) + copy(pathCopy, path) + + // Go up the tree. + for i := len(path) - 1; i >= 0; i-- { + pathCopy[i+1] = n + k := PathCacheKey(pathCopy[:i+2]) + if v, ok := ctx.ProviderConfigCache[k]; ok { + return v + } + } + + return nil +} + +func (ctx *BuiltinEvalContext) InitProvisioner( + n string) (ResourceProvisioner, error) { + ctx.once.Do(ctx.init) + + // If we already initialized, it is an error + if p := ctx.Provisioner(n); p != nil { + return nil, fmt.Errorf("Provisioner '%s' already initialized", n) + } + + // Warning: make sure to acquire these locks AFTER the call to Provisioner + // above, since it also acquires locks. + ctx.ProvisionerLock.Lock() + defer ctx.ProvisionerLock.Unlock() + + provPath := make([]string, len(ctx.Path())+1) + copy(provPath, ctx.Path()) + provPath[len(provPath)-1] = n + key := PathCacheKey(provPath) + + p, err := ctx.Components.ResourceProvisioner(n, key) + if err != nil { + return nil, err + } + + ctx.ProvisionerCache[key] = p + return p, nil +} + +func (ctx *BuiltinEvalContext) Provisioner(n string) ResourceProvisioner { + ctx.once.Do(ctx.init) + + ctx.ProvisionerLock.Lock() + defer ctx.ProvisionerLock.Unlock() + + provPath := make([]string, len(ctx.Path())+1) + copy(provPath, ctx.Path()) + provPath[len(provPath)-1] = n + + return ctx.ProvisionerCache[PathCacheKey(provPath)] +} + +func (ctx *BuiltinEvalContext) CloseProvisioner(n string) error { + ctx.once.Do(ctx.init) + + ctx.ProvisionerLock.Lock() + defer ctx.ProvisionerLock.Unlock() + + provPath := make([]string, len(ctx.Path())+1) + copy(provPath, ctx.Path()) + provPath[len(provPath)-1] = n + + var prov interface{} + prov = ctx.ProvisionerCache[PathCacheKey(provPath)] + if prov != nil { + if p, ok := prov.(ResourceProvisionerCloser); ok { + delete(ctx.ProvisionerCache, PathCacheKey(provPath)) + return p.Close() + } + } + + return nil +} + +func (ctx *BuiltinEvalContext) Interpolate( + cfg *config.RawConfig, r *Resource) (*ResourceConfig, error) { + if cfg != nil { + scope := &InterpolationScope{ + Path: ctx.Path(), + Resource: r, + } + + vs, err := ctx.Interpolater.Values(scope, cfg.Variables) + if err != nil { + return nil, err + } + + // Do the interpolation + if err := cfg.Interpolate(vs); err != nil { + return nil, err + } + } + + result := NewResourceConfig(cfg) + result.interpolateForce() + return result, nil +} + +func (ctx *BuiltinEvalContext) Path() []string { + return ctx.PathValue +} + +func (ctx *BuiltinEvalContext) SetVariables(n string, vs map[string]interface{}) { + ctx.InterpolaterVarLock.Lock() + defer ctx.InterpolaterVarLock.Unlock() + + path := make([]string, len(ctx.Path())+1) + copy(path, ctx.Path()) + path[len(path)-1] = n + key := PathCacheKey(path) + + vars := ctx.InterpolaterVars[key] + if vars == nil { + vars = make(map[string]interface{}) + ctx.InterpolaterVars[key] = vars + } + + for k, v := range vs { + vars[k] = v + } +} + +func (ctx *BuiltinEvalContext) Diff() (*Diff, *sync.RWMutex) { + return ctx.DiffValue, ctx.DiffLock +} + +func (ctx *BuiltinEvalContext) State() (*State, *sync.RWMutex) { + return ctx.StateValue, ctx.StateLock +} + +func (ctx *BuiltinEvalContext) init() { +} diff --git a/vendor/github.com/hashicorp/terraform/terraform/eval_context_mock.go b/vendor/github.com/hashicorp/terraform/terraform/eval_context_mock.go new file mode 100644 index 0000000000..4f90d5b129 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/terraform/eval_context_mock.go @@ -0,0 +1,208 @@ +package terraform + +import ( + "sync" + + "github.com/hashicorp/terraform/config" +) + +// MockEvalContext is a mock version of EvalContext that can be used +// for tests. +type MockEvalContext struct { + StoppedCalled bool + StoppedValue <-chan struct{} + + HookCalled bool + HookHook Hook + HookError error + + InputCalled bool + InputInput UIInput + + InitProviderCalled bool + InitProviderName string + InitProviderProvider ResourceProvider + InitProviderError error + + ProviderCalled bool + ProviderName string + ProviderProvider ResourceProvider + + CloseProviderCalled bool + CloseProviderName string + CloseProviderProvider ResourceProvider + + ProviderInputCalled bool + ProviderInputName string + ProviderInputConfig map[string]interface{} + + SetProviderInputCalled bool + SetProviderInputName string + SetProviderInputConfig map[string]interface{} + + ConfigureProviderCalled bool + ConfigureProviderName string + ConfigureProviderConfig *ResourceConfig + ConfigureProviderError error + + SetProviderConfigCalled bool + SetProviderConfigName string + SetProviderConfigConfig *ResourceConfig + + ParentProviderConfigCalled bool + ParentProviderConfigName string + ParentProviderConfigConfig *ResourceConfig + + InitProvisionerCalled bool + InitProvisionerName string + InitProvisionerProvisioner ResourceProvisioner + InitProvisionerError error + + ProvisionerCalled bool + ProvisionerName string + ProvisionerProvisioner ResourceProvisioner + + CloseProvisionerCalled bool + CloseProvisionerName string + CloseProvisionerProvisioner ResourceProvisioner + + InterpolateCalled bool + InterpolateConfig *config.RawConfig + InterpolateResource *Resource + InterpolateConfigResult *ResourceConfig + InterpolateError error + + PathCalled bool + PathPath []string + + SetVariablesCalled bool + SetVariablesModule string + SetVariablesVariables map[string]interface{} + + DiffCalled bool + DiffDiff *Diff + DiffLock *sync.RWMutex + + StateCalled bool + StateState *State + StateLock *sync.RWMutex +} + +func (c *MockEvalContext) Stopped() <-chan struct{} { + c.StoppedCalled = true + return c.StoppedValue +} + +func (c *MockEvalContext) Hook(fn func(Hook) (HookAction, error)) error { + c.HookCalled = true + if c.HookHook != nil { + if _, err := fn(c.HookHook); err != nil { + return err + } + } + + return c.HookError +} + +func (c *MockEvalContext) Input() UIInput { + c.InputCalled = true + return c.InputInput +} + +func (c *MockEvalContext) InitProvider(n string) (ResourceProvider, error) { + c.InitProviderCalled = true + c.InitProviderName = n + return c.InitProviderProvider, c.InitProviderError +} + +func (c *MockEvalContext) Provider(n string) ResourceProvider { + c.ProviderCalled = true + c.ProviderName = n + return c.ProviderProvider +} + +func (c *MockEvalContext) CloseProvider(n string) error { + c.CloseProviderCalled = true + c.CloseProviderName = n + return nil +} + +func (c *MockEvalContext) ConfigureProvider(n string, cfg *ResourceConfig) error { + c.ConfigureProviderCalled = true + c.ConfigureProviderName = n + c.ConfigureProviderConfig = cfg + return c.ConfigureProviderError +} + +func (c *MockEvalContext) SetProviderConfig( + n string, cfg *ResourceConfig) error { + c.SetProviderConfigCalled = true + c.SetProviderConfigName = n + c.SetProviderConfigConfig = cfg + return nil +} + +func (c *MockEvalContext) ParentProviderConfig(n string) *ResourceConfig { + c.ParentProviderConfigCalled = true + c.ParentProviderConfigName = n + return c.ParentProviderConfigConfig +} + +func (c *MockEvalContext) ProviderInput(n string) map[string]interface{} { + c.ProviderInputCalled = true + c.ProviderInputName = n + return c.ProviderInputConfig +} + +func (c *MockEvalContext) SetProviderInput(n string, cfg map[string]interface{}) { + c.SetProviderInputCalled = true + c.SetProviderInputName = n + c.SetProviderInputConfig = cfg +} + +func (c *MockEvalContext) InitProvisioner(n string) (ResourceProvisioner, error) { + c.InitProvisionerCalled = true + c.InitProvisionerName = n + return c.InitProvisionerProvisioner, c.InitProvisionerError +} + +func (c *MockEvalContext) Provisioner(n string) ResourceProvisioner { + c.ProvisionerCalled = true + c.ProvisionerName = n + return c.ProvisionerProvisioner +} + +func (c *MockEvalContext) CloseProvisioner(n string) error { + c.CloseProvisionerCalled = true + c.CloseProvisionerName = n + return nil +} + +func (c *MockEvalContext) Interpolate( + config *config.RawConfig, resource *Resource) (*ResourceConfig, error) { + c.InterpolateCalled = true + c.InterpolateConfig = config + c.InterpolateResource = resource + return c.InterpolateConfigResult, c.InterpolateError +} + +func (c *MockEvalContext) Path() []string { + c.PathCalled = true + return c.PathPath +} + +func (c *MockEvalContext) SetVariables(n string, vs map[string]interface{}) { + c.SetVariablesCalled = true + c.SetVariablesModule = n + c.SetVariablesVariables = vs +} + +func (c *MockEvalContext) Diff() (*Diff, *sync.RWMutex) { + c.DiffCalled = true + return c.DiffDiff, c.DiffLock +} + +func (c *MockEvalContext) State() (*State, *sync.RWMutex) { + c.StateCalled = true + return c.StateState, c.StateLock +} diff --git a/vendor/github.com/hashicorp/terraform/terraform/eval_count.go b/vendor/github.com/hashicorp/terraform/terraform/eval_count.go new file mode 100644 index 0000000000..2ae56a751c --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/terraform/eval_count.go @@ -0,0 +1,58 @@ +package terraform + +import ( + "github.com/hashicorp/terraform/config" +) + +// EvalCountFixZeroOneBoundary is an EvalNode that fixes up the state +// when there is a resource count with zero/one boundary, i.e. fixing +// a resource named "aws_instance.foo" to "aws_instance.foo.0" and vice-versa. +type EvalCountFixZeroOneBoundary struct { + Resource *config.Resource +} + +// TODO: test +func (n *EvalCountFixZeroOneBoundary) Eval(ctx EvalContext) (interface{}, error) { + // Get the count, important for knowing whether we're supposed to + // be adding the zero, or trimming it. + count, err := n.Resource.Count() + if err != nil { + return nil, err + } + + // Figure what to look for and what to replace it with + hunt := n.Resource.Id() + replace := hunt + ".0" + if count < 2 { + hunt, replace = replace, hunt + } + + state, lock := ctx.State() + + // Get a lock so we can access this instance and potentially make + // changes to it. + lock.Lock() + defer lock.Unlock() + + // Look for the module state. If we don't have one, then it doesn't matter. + mod := state.ModuleByPath(ctx.Path()) + if mod == nil { + return nil, nil + } + + // Look for the resource state. If we don't have one, then it is okay. + rs, ok := mod.Resources[hunt] + if !ok { + return nil, nil + } + + // If the replacement key exists, we just keep both + if _, ok := mod.Resources[replace]; ok { + return nil, nil + } + + mod.Resources[replace] = rs + delete(mod.Resources, hunt) + + return nil, nil +} diff --git a/vendor/github.com/hashicorp/terraform/terraform/eval_count_boundary.go b/vendor/github.com/hashicorp/terraform/terraform/eval_count_boundary.go new file mode 100644 index 0000000000..91e2b904e9 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/terraform/eval_count_boundary.go @@ -0,0 +1,78 @@ +package terraform + +import ( + "log" +) + +// EvalCountFixZeroOneBoundaryGlobal is an EvalNode that fixes up the state +// when there is a resource count with zero/one boundary, i.e. fixing +// a resource named "aws_instance.foo" to "aws_instance.foo.0" and vice-versa. +// +// This works on the global state. +type EvalCountFixZeroOneBoundaryGlobal struct{} + +// TODO: test +func (n *EvalCountFixZeroOneBoundaryGlobal) Eval(ctx EvalContext) (interface{}, error) { + // Get the state and lock it since we'll potentially modify it + state, lock := ctx.State() + lock.Lock() + defer lock.Unlock() + + // Prune the state since we require a clean state to work + state.prune() + + // Go through each modules since the boundaries are restricted to a + // module scope. + for _, m := range state.Modules { + if err := n.fixModule(m); err != nil { + return nil, err + } + } + + return nil, nil +} + +func (n *EvalCountFixZeroOneBoundaryGlobal) fixModule(m *ModuleState) error { + // Counts keeps track of keys and their counts + counts := make(map[string]int) + for k, _ := range m.Resources { + // Parse the key + key, err := ParseResourceStateKey(k) + if err != nil { + return err + } + + // Set the index to -1 so that we can keep count + key.Index = -1 + + // Increment + counts[key.String()]++ + } + + // Go through the counts and do the fixup for each resource + for raw, count := range counts { + // Search and replace this resource + search := raw + replace := raw + ".0" + if count < 2 { + search, replace = replace, search + } + log.Printf("[TRACE] EvalCountFixZeroOneBoundaryGlobal: count %d, search %q, replace %q", count, search, replace) + + // Look for the resource state. If we don't have one, then it is okay. + rs, ok := m.Resources[search] + if !ok { + continue + } + + // If the replacement key exists, we just keep both + if _, ok := m.Resources[replace]; ok { + continue + } + + m.Resources[replace] = rs + delete(m.Resources, search) + } + + return nil +} diff --git a/vendor/github.com/hashicorp/terraform/terraform/eval_count_computed.go b/vendor/github.com/hashicorp/terraform/terraform/eval_count_computed.go new file mode 100644 index 0000000000..54a8333e0f --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/terraform/eval_count_computed.go @@ -0,0 +1,25 @@ +package terraform + +import ( + "fmt" + + "github.com/hashicorp/terraform/config" +) + +// EvalCountCheckComputed is an EvalNode that checks if a resource count +// is computed and errors if so. This can possibly happen across a +// module boundary and we don't yet support this. +type EvalCountCheckComputed struct { + Resource *config.Resource +} + +// TODO: test +func (n *EvalCountCheckComputed) Eval(ctx EvalContext) (interface{}, error) { + if n.Resource.RawCount.Value() == unknownValue() { + return nil, fmt.Errorf( + "%s: value of 'count' cannot be computed", + n.Resource.Id()) + } + + return nil, nil +} diff --git a/vendor/github.com/hashicorp/terraform/terraform/eval_diff.go b/vendor/github.com/hashicorp/terraform/terraform/eval_diff.go new file mode 100644 index 0000000000..6f09526a4c --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/terraform/eval_diff.go @@ -0,0 +1,478 @@ +package terraform + +import ( + "fmt" + "log" + "strings" + + "github.com/hashicorp/terraform/config" +) + +// EvalCompareDiff is an EvalNode implementation that compares two diffs +// and errors if the diffs are not equal. +type EvalCompareDiff struct { + Info *InstanceInfo + One, Two **InstanceDiff +} + +// TODO: test +func (n *EvalCompareDiff) Eval(ctx EvalContext) (interface{}, error) { + one, two := *n.One, *n.Two + + // If either are nil, let them be empty + if one == nil { + one = new(InstanceDiff) + one.init() + } + if two == nil { + two = new(InstanceDiff) + two.init() + } + oneId, _ := one.GetAttribute("id") + twoId, _ := two.GetAttribute("id") + one.DelAttribute("id") + two.DelAttribute("id") + defer func() { + if oneId != nil { + one.SetAttribute("id", oneId) + } + if twoId != nil { + two.SetAttribute("id", twoId) + } + }() + + if same, reason := one.Same(two); !same { + log.Printf("[ERROR] %s: diffs didn't match", n.Info.Id) + log.Printf("[ERROR] %s: reason: %s", n.Info.Id, reason) + log.Printf("[ERROR] %s: diff one: %#v", n.Info.Id, one) + log.Printf("[ERROR] %s: diff two: %#v", n.Info.Id, two) + return nil, fmt.Errorf( + "%s: diffs didn't match during apply. This is a bug with "+ + "Terraform and should be reported as a GitHub Issue.\n"+ + "\n"+ + "Please include the following information in your report:\n"+ + "\n"+ + " Terraform Version: %s\n"+ + " Resource ID: %s\n"+ + " Mismatch reason: %s\n"+ + " Diff One (usually from plan): %#v\n"+ + " Diff Two (usually from apply): %#v\n"+ + "\n"+ + "Also include as much context as you can about your config, state, "+ + "and the steps you performed to trigger this error.\n", + n.Info.Id, Version, n.Info.Id, reason, one, two) + } + + return nil, nil +} + +// EvalDiff is an EvalNode implementation that does a refresh for +// a resource. +type EvalDiff struct { + Name string + Info *InstanceInfo + Config **ResourceConfig + Provider *ResourceProvider + Diff **InstanceDiff + State **InstanceState + OutputDiff **InstanceDiff + OutputState **InstanceState + + // Resource is needed to fetch the ignore_changes list so we can + // filter user-requested ignored attributes from the diff. + Resource *config.Resource +} + +// TODO: test +func (n *EvalDiff) Eval(ctx EvalContext) (interface{}, error) { + state := *n.State + config := *n.Config + provider := *n.Provider + + // Call pre-diff hook + err := ctx.Hook(func(h Hook) (HookAction, error) { + return h.PreDiff(n.Info, state) + }) + if err != nil { + return nil, err + } + + // The state for the diff must never be nil + diffState := state + if diffState == nil { + diffState = new(InstanceState) + } + diffState.init() + + // Diff! + diff, err := provider.Diff(n.Info, diffState, config) + if err != nil { + return nil, err + } + if diff == nil { + diff = new(InstanceDiff) + } + + // Set DestroyDeposed if we have deposed instances + _, err = readInstanceFromState(ctx, n.Name, nil, func(rs *ResourceState) (*InstanceState, error) { + if len(rs.Deposed) > 0 { + diff.DestroyDeposed = true + } + + return nil, nil + }) + if err != nil { + return nil, err + } + + // Preserve the DestroyTainted flag + if n.Diff != nil { + diff.SetTainted((*n.Diff).GetDestroyTainted()) + } + + // Require a destroy if there is an ID and it requires new. + if diff.RequiresNew() && state != nil && state.ID != "" { + diff.SetDestroy(true) + } + + // If we're creating a new resource, compute its ID + if diff.RequiresNew() || state == nil || state.ID == "" { + var oldID string + if state != nil { + oldID = state.Attributes["id"] + } + + // Add diff to compute new ID + diff.init() + diff.SetAttribute("id", &ResourceAttrDiff{ + Old: oldID, + NewComputed: true, + RequiresNew: true, + Type: DiffAttrOutput, + }) + } + + // filter out ignored resources + if err := n.processIgnoreChanges(diff); err != nil { + return nil, err + } + + // Call post-refresh hook + err = ctx.Hook(func(h Hook) (HookAction, error) { + return h.PostDiff(n.Info, diff) + }) + if err != nil { + return nil, err + } + + // Update our output + *n.OutputDiff = diff + + // Update the state if we care + if n.OutputState != nil { + *n.OutputState = state + + // Merge our state so that the state is updated with our plan + if !diff.Empty() && n.OutputState != nil { + *n.OutputState = state.MergeDiff(diff) + } + } + + return nil, nil +} + +func (n *EvalDiff) processIgnoreChanges(diff *InstanceDiff) error { + if diff == nil || n.Resource == nil || n.Resource.Id() == "" { + return nil + } + ignoreChanges := n.Resource.Lifecycle.IgnoreChanges + + if len(ignoreChanges) == 0 { + return nil + } + + // If we're just creating the resource, we shouldn't alter the + // Diff at all + if diff.ChangeType() == DiffCreate { + return nil + } + + // If the resource has been tainted then we don't process ignore changes + // since we MUST recreate the entire resource. + if diff.GetDestroyTainted() { + return nil + } + + attrs := diff.CopyAttributes() + + // get the complete set of keys we want to ignore + ignorableAttrKeys := make(map[string]bool) + for _, ignoredKey := range ignoreChanges { + for k := range attrs { + if ignoredKey == "*" || strings.HasPrefix(k, ignoredKey) { + ignorableAttrKeys[k] = true + } + } + } + + // If the resource was being destroyed, check to see if we can ignore the + // reason for it being destroyed. + if diff.GetDestroy() { + for k, v := range attrs { + if k == "id" { + // id will always be changed if we intended to replace this instance + continue + } + if v.Empty() || v.NewComputed { + continue + } + + // If any RequiresNew attribute isn't ignored, we need to keep the diff + // as-is to be able to replace the resource. + if v.RequiresNew && !ignorableAttrKeys[k] { + return nil + } + } + + // Now that we know that we aren't replacing the instance, we can filter + // out all the empty and computed attributes. There may be a bunch of + // extraneous attribute diffs for the other non-requires-new attributes + // going from "" -> "configval" or "" -> "". + // We must make sure any flatmapped containers are filterred (or not) as a + // whole. + containers := groupContainers(diff) + keep := map[string]bool{} + for _, v := range containers { + if v.keepDiff() { + // At least one key has changes, so list all the sibling keys + // to keep in the diff. + for k := range v { + keep[k] = true + } + } + } + + for k, v := range attrs { + if (v.Empty() || v.NewComputed) && !keep[k] { + ignorableAttrKeys[k] = true + } + } + } + + // Here we undo the two reactions to RequireNew in EvalDiff - the "id" + // attribute diff and the Destroy boolean field + log.Printf("[DEBUG] Removing 'id' diff and setting Destroy to false " + + "because after ignore_changes, this diff no longer requires replacement") + diff.DelAttribute("id") + diff.SetDestroy(false) + + // If we didn't hit any of our early exit conditions, we can filter the diff. + for k := range ignorableAttrKeys { + log.Printf("[DEBUG] [EvalIgnoreChanges] %s - Ignoring diff attribute: %s", + n.Resource.Id(), k) + diff.DelAttribute(k) + } + + return nil +} + +// a group of key-*ResourceAttrDiff pairs from the same flatmapped container +type flatAttrDiff map[string]*ResourceAttrDiff + +// we need to keep all keys if any of them have a diff +func (f flatAttrDiff) keepDiff() bool { + for _, v := range f { + if !v.Empty() && !v.NewComputed { + return true + } + } + return false +} + +// sets, lists and maps need to be compared for diff inclusion as a whole, so +// group the flatmapped keys together for easier comparison. +func groupContainers(d *InstanceDiff) map[string]flatAttrDiff { + isIndex := multiVal.MatchString + containers := map[string]flatAttrDiff{} + attrs := d.CopyAttributes() + // we need to loop once to find the index key + for k := range attrs { + if isIndex(k) { + // add the key, always including the final dot to fully qualify it + containers[k[:len(k)-1]] = flatAttrDiff{} + } + } + + // loop again to find all the sub keys + for prefix, values := range containers { + for k, attrDiff := range attrs { + // we include the index value as well, since it could be part of the diff + if strings.HasPrefix(k, prefix) { + values[k] = attrDiff + } + } + } + + return containers +} + +// EvalDiffDestroy is an EvalNode implementation that returns a plain +// destroy diff. +type EvalDiffDestroy struct { + Info *InstanceInfo + State **InstanceState + Output **InstanceDiff +} + +// TODO: test +func (n *EvalDiffDestroy) Eval(ctx EvalContext) (interface{}, error) { + state := *n.State + + // If there is no state or we don't have an ID, we're already destroyed + if state == nil || state.ID == "" { + return nil, nil + } + + // Call pre-diff hook + err := ctx.Hook(func(h Hook) (HookAction, error) { + return h.PreDiff(n.Info, state) + }) + if err != nil { + return nil, err + } + + // The diff + diff := &InstanceDiff{Destroy: true} + + // Call post-diff hook + err = ctx.Hook(func(h Hook) (HookAction, error) { + return h.PostDiff(n.Info, diff) + }) + if err != nil { + return nil, err + } + + // Update our output + *n.Output = diff + + return nil, nil +} + +// EvalDiffDestroyModule is an EvalNode implementation that writes the diff to +// the full diff. +type EvalDiffDestroyModule struct { + Path []string +} + +// TODO: test +func (n *EvalDiffDestroyModule) Eval(ctx EvalContext) (interface{}, error) { + diff, lock := ctx.Diff() + + // Acquire the lock so that we can do this safely concurrently + lock.Lock() + defer lock.Unlock() + + // Write the diff + modDiff := diff.ModuleByPath(n.Path) + if modDiff == nil { + modDiff = diff.AddModule(n.Path) + } + modDiff.Destroy = true + + return nil, nil +} + +// EvalFilterDiff is an EvalNode implementation that filters the diff +// according to some filter. +type EvalFilterDiff struct { + // Input and output + Diff **InstanceDiff + Output **InstanceDiff + + // Destroy, if true, will only include a destroy diff if it is set. + Destroy bool +} + +func (n *EvalFilterDiff) Eval(ctx EvalContext) (interface{}, error) { + if *n.Diff == nil { + return nil, nil + } + + input := *n.Diff + result := new(InstanceDiff) + + if n.Destroy { + if input.GetDestroy() || input.RequiresNew() { + result.SetDestroy(true) + } + } + + if n.Output != nil { + *n.Output = result + } + + return nil, nil +} + +// EvalReadDiff is an EvalNode implementation that writes the diff to +// the full diff. +type EvalReadDiff struct { + Name string + Diff **InstanceDiff +} + +func (n *EvalReadDiff) Eval(ctx EvalContext) (interface{}, error) { + diff, lock := ctx.Diff() + + // Acquire the lock so that we can do this safely concurrently + lock.Lock() + defer lock.Unlock() + + // Write the diff + modDiff := diff.ModuleByPath(ctx.Path()) + if modDiff == nil { + return nil, nil + } + + *n.Diff = modDiff.Resources[n.Name] + + return nil, nil +} + +// EvalWriteDiff is an EvalNode implementation that writes the diff to +// the full diff. +type EvalWriteDiff struct { + Name string + Diff **InstanceDiff +} + +// TODO: test +func (n *EvalWriteDiff) Eval(ctx EvalContext) (interface{}, error) { + diff, lock := ctx.Diff() + + // The diff to write, if its empty it should write nil + var diffVal *InstanceDiff + if n.Diff != nil { + diffVal = *n.Diff + } + if diffVal.Empty() { + diffVal = nil + } + + // Acquire the lock so that we can do this safely concurrently + lock.Lock() + defer lock.Unlock() + + // Write the diff + modDiff := diff.ModuleByPath(ctx.Path()) + if modDiff == nil { + modDiff = diff.AddModule(ctx.Path()) + } + if diffVal != nil { + modDiff.Resources[n.Name] = diffVal + } else { + delete(modDiff.Resources, n.Name) + } + + return nil, nil +} diff --git a/vendor/github.com/hashicorp/terraform/terraform/eval_error.go b/vendor/github.com/hashicorp/terraform/terraform/eval_error.go new file mode 100644 index 0000000000..470f798b7f --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/terraform/eval_error.go @@ -0,0 +1,20 @@ +package terraform + +// EvalReturnError is an EvalNode implementation that returns an +// error if it is present. +// +// This is useful for scenarios where an error has been captured by +// another EvalNode (like EvalApply) for special EvalTree-based error +// handling, and that handling has completed, so the error should be +// returned normally. +type EvalReturnError struct { + Error *error +} + +func (n *EvalReturnError) Eval(ctx EvalContext) (interface{}, error) { + if n.Error == nil { + return nil, nil + } + + return nil, *n.Error +} diff --git a/vendor/github.com/hashicorp/terraform/terraform/eval_filter.go b/vendor/github.com/hashicorp/terraform/terraform/eval_filter.go new file mode 100644 index 0000000000..711c625c83 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/terraform/eval_filter.go @@ -0,0 +1,25 @@ +package terraform + +// EvalNodeFilterFunc is the callback used to replace a node with +// another to node. To not do the replacement, just return the input node. +type EvalNodeFilterFunc func(EvalNode) EvalNode + +// EvalNodeFilterable is an interface that can be implemented by +// EvalNodes to allow filtering of sub-elements. Note that this isn't +// a common thing to implement and you probably don't need it. +type EvalNodeFilterable interface { + EvalNode + Filter(EvalNodeFilterFunc) +} + +// EvalFilter runs the filter on the given node and returns the +// final filtered value. This should be called rather than checking +// the EvalNode directly since this will properly handle EvalNodeFilterables. +func EvalFilter(node EvalNode, fn EvalNodeFilterFunc) EvalNode { + if f, ok := node.(EvalNodeFilterable); ok { + f.Filter(fn) + return node + } + + return fn(node) +} diff --git a/vendor/github.com/hashicorp/terraform/terraform/eval_filter_operation.go b/vendor/github.com/hashicorp/terraform/terraform/eval_filter_operation.go new file mode 100644 index 0000000000..1a55f024a8 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/terraform/eval_filter_operation.go @@ -0,0 +1,49 @@ +package terraform + +// EvalNodeOpFilterable is an interface that EvalNodes can implement +// to be filterable by the operation that is being run on Terraform. +type EvalNodeOpFilterable interface { + IncludeInOp(walkOperation) bool +} + +// EvalNodeFilterOp returns a filter function that filters nodes that +// include themselves in specific operations. +func EvalNodeFilterOp(op walkOperation) EvalNodeFilterFunc { + return func(n EvalNode) EvalNode { + include := true + if of, ok := n.(EvalNodeOpFilterable); ok { + include = of.IncludeInOp(op) + } + if include { + return n + } + + return EvalNoop{} + } +} + +// EvalOpFilter is an EvalNode implementation that is a proxy to +// another node but filters based on the operation. +type EvalOpFilter struct { + // Ops is the list of operations to include this node in. + Ops []walkOperation + + // Node is the node to execute + Node EvalNode +} + +// TODO: test +func (n *EvalOpFilter) Eval(ctx EvalContext) (interface{}, error) { + return EvalRaw(n.Node, ctx) +} + +// EvalNodeOpFilterable impl. +func (n *EvalOpFilter) IncludeInOp(op walkOperation) bool { + for _, v := range n.Ops { + if v == op { + return true + } + } + + return false +} diff --git a/vendor/github.com/hashicorp/terraform/terraform/eval_if.go b/vendor/github.com/hashicorp/terraform/terraform/eval_if.go new file mode 100644 index 0000000000..d6b46a1f22 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/terraform/eval_if.go @@ -0,0 +1,26 @@ +package terraform + +// EvalIf is an EvalNode that is a conditional. +type EvalIf struct { + If func(EvalContext) (bool, error) + Then EvalNode + Else EvalNode +} + +// TODO: test +func (n *EvalIf) Eval(ctx EvalContext) (interface{}, error) { + yes, err := n.If(ctx) + if err != nil { + return nil, err + } + + if yes { + return EvalRaw(n.Then, ctx) + } else { + if n.Else != nil { + return EvalRaw(n.Else, ctx) + } + } + + return nil, nil +} diff --git a/vendor/github.com/hashicorp/terraform/terraform/eval_import_state.go b/vendor/github.com/hashicorp/terraform/terraform/eval_import_state.go new file mode 100644 index 0000000000..62cc581fad --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/terraform/eval_import_state.go @@ -0,0 +1,76 @@ +package terraform + +import ( + "fmt" +) + +// EvalImportState is an EvalNode implementation that performs an +// ImportState operation on a provider. This will return the imported +// states but won't modify any actual state. +type EvalImportState struct { + Provider *ResourceProvider + Info *InstanceInfo + Id string + Output *[]*InstanceState +} + +// TODO: test +func (n *EvalImportState) Eval(ctx EvalContext) (interface{}, error) { + provider := *n.Provider + + { + // Call pre-import hook + err := ctx.Hook(func(h Hook) (HookAction, error) { + return h.PreImportState(n.Info, n.Id) + }) + if err != nil { + return nil, err + } + } + + // Import! + state, err := provider.ImportState(n.Info, n.Id) + if err != nil { + return nil, fmt.Errorf( + "import %s (id: %s): %s", n.Info.HumanId(), n.Id, err) + } + + if n.Output != nil { + *n.Output = state + } + + { + // Call post-import hook + err := ctx.Hook(func(h Hook) (HookAction, error) { + return h.PostImportState(n.Info, state) + }) + if err != nil { + return nil, err + } + } + + return nil, nil +} + +// EvalImportStateVerify verifies the state after ImportState and +// after the refresh to make sure it is non-nil and valid. +type EvalImportStateVerify struct { + Info *InstanceInfo + Id string + State **InstanceState +} + +// TODO: test +func (n *EvalImportStateVerify) Eval(ctx EvalContext) (interface{}, error) { + state := *n.State + if state.Empty() { + return nil, fmt.Errorf( + "import %s (id: %s): Terraform detected a resource with this ID doesn't\n"+ + "exist. Please verify the ID is correct. You cannot import non-existent\n"+ + "resources using Terraform import.", + n.Info.HumanId(), + n.Id) + } + + return nil, nil +} diff --git a/vendor/github.com/hashicorp/terraform/terraform/eval_interpolate.go b/vendor/github.com/hashicorp/terraform/terraform/eval_interpolate.go new file mode 100644 index 0000000000..6825ff5909 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/terraform/eval_interpolate.go @@ -0,0 +1,24 @@ +package terraform + +import "github.com/hashicorp/terraform/config" + +// EvalInterpolate is an EvalNode implementation that takes a raw +// configuration and interpolates it. +type EvalInterpolate struct { + Config *config.RawConfig + Resource *Resource + Output **ResourceConfig +} + +func (n *EvalInterpolate) Eval(ctx EvalContext) (interface{}, error) { + rc, err := ctx.Interpolate(n.Config, n.Resource) + if err != nil { + return nil, err + } + + if n.Output != nil { + *n.Output = rc + } + + return nil, nil +} diff --git a/vendor/github.com/hashicorp/terraform/terraform/eval_noop.go b/vendor/github.com/hashicorp/terraform/terraform/eval_noop.go new file mode 100644 index 0000000000..f4bc8225c5 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/terraform/eval_noop.go @@ -0,0 +1,8 @@ +package terraform + +// EvalNoop is an EvalNode that does nothing. +type EvalNoop struct{} + +func (EvalNoop) Eval(EvalContext) (interface{}, error) { + return nil, nil +} diff --git a/vendor/github.com/hashicorp/terraform/terraform/eval_output.go b/vendor/github.com/hashicorp/terraform/terraform/eval_output.go new file mode 100644 index 0000000000..cf61781e5b --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/terraform/eval_output.go @@ -0,0 +1,119 @@ +package terraform + +import ( + "fmt" + "log" + + "github.com/hashicorp/terraform/config" +) + +// EvalDeleteOutput is an EvalNode implementation that deletes an output +// from the state. +type EvalDeleteOutput struct { + Name string +} + +// TODO: test +func (n *EvalDeleteOutput) Eval(ctx EvalContext) (interface{}, error) { + state, lock := ctx.State() + if state == nil { + return nil, nil + } + + // Get a write lock so we can access this instance + lock.Lock() + defer lock.Unlock() + + // Look for the module state. If we don't have one, create it. + mod := state.ModuleByPath(ctx.Path()) + if mod == nil { + return nil, nil + } + + delete(mod.Outputs, n.Name) + + return nil, nil +} + +// EvalWriteOutput is an EvalNode implementation that writes the output +// for the given name to the current state. +type EvalWriteOutput struct { + Name string + Sensitive bool + Value *config.RawConfig +} + +// TODO: test +func (n *EvalWriteOutput) Eval(ctx EvalContext) (interface{}, error) { + cfg, err := ctx.Interpolate(n.Value, nil) + if err != nil { + // Log error but continue anyway + log.Printf("[WARN] Output interpolation %q failed: %s", n.Name, err) + } + + state, lock := ctx.State() + if state == nil { + return nil, fmt.Errorf("cannot write state to nil state") + } + + // Get a write lock so we can access this instance + lock.Lock() + defer lock.Unlock() + + // Look for the module state. If we don't have one, create it. + mod := state.ModuleByPath(ctx.Path()) + if mod == nil { + mod = state.AddModule(ctx.Path()) + } + + // Get the value from the config + var valueRaw interface{} = config.UnknownVariableValue + if cfg != nil { + var ok bool + valueRaw, ok = cfg.Get("value") + if !ok { + valueRaw = "" + } + if cfg.IsComputed("value") { + valueRaw = config.UnknownVariableValue + } + } + + switch valueTyped := valueRaw.(type) { + case string: + mod.Outputs[n.Name] = &OutputState{ + Type: "string", + Sensitive: n.Sensitive, + Value: valueTyped, + } + case []interface{}: + mod.Outputs[n.Name] = &OutputState{ + Type: "list", + Sensitive: n.Sensitive, + Value: valueTyped, + } + case map[string]interface{}: + mod.Outputs[n.Name] = &OutputState{ + Type: "map", + Sensitive: n.Sensitive, + Value: valueTyped, + } + case []map[string]interface{}: + // an HCL map is multi-valued, so if this was read out of a config the + // map may still be in a slice. + if len(valueTyped) == 1 { + mod.Outputs[n.Name] = &OutputState{ + Type: "map", + Sensitive: n.Sensitive, + Value: valueTyped[0], + } + break + } + return nil, fmt.Errorf("output %s type (%T) with %d values not valid for type map", + n.Name, valueTyped, len(valueTyped)) + default: + return nil, fmt.Errorf("output %s is not a valid type (%T)\n", n.Name, valueTyped) + } + + return nil, nil +} diff --git a/vendor/github.com/hashicorp/terraform/terraform/eval_provider.go b/vendor/github.com/hashicorp/terraform/terraform/eval_provider.go new file mode 100644 index 0000000000..092fd18d83 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/terraform/eval_provider.go @@ -0,0 +1,164 @@ +package terraform + +import ( + "fmt" + + "github.com/hashicorp/terraform/config" +) + +// EvalSetProviderConfig sets the parent configuration for a provider +// without configuring that provider, validating it, etc. +type EvalSetProviderConfig struct { + Provider string + Config **ResourceConfig +} + +func (n *EvalSetProviderConfig) Eval(ctx EvalContext) (interface{}, error) { + return nil, ctx.SetProviderConfig(n.Provider, *n.Config) +} + +// EvalBuildProviderConfig outputs a *ResourceConfig that is properly +// merged with parents and inputs on top of what is configured in the file. +type EvalBuildProviderConfig struct { + Provider string + Config **ResourceConfig + Output **ResourceConfig +} + +func (n *EvalBuildProviderConfig) Eval(ctx EvalContext) (interface{}, error) { + cfg := *n.Config + + // If we have a configuration set, then merge that in + if input := ctx.ProviderInput(n.Provider); input != nil { + // "input" is a map of the subset of config values that were known + // during the input walk, set by EvalInputProvider. Note that + // in particular it does *not* include attributes that had + // computed values at input time; those appear *only* in + // "cfg" here. + rc, err := config.NewRawConfig(input) + if err != nil { + return nil, err + } + + merged := cfg.raw.Merge(rc) + cfg = NewResourceConfig(merged) + } + + // Get the parent configuration if there is one + if parent := ctx.ParentProviderConfig(n.Provider); parent != nil { + merged := cfg.raw.Merge(parent.raw) + cfg = NewResourceConfig(merged) + } + + *n.Output = cfg + return nil, nil +} + +// EvalConfigProvider is an EvalNode implementation that configures +// a provider that is already initialized and retrieved. +type EvalConfigProvider struct { + Provider string + Config **ResourceConfig +} + +func (n *EvalConfigProvider) Eval(ctx EvalContext) (interface{}, error) { + return nil, ctx.ConfigureProvider(n.Provider, *n.Config) +} + +// EvalInitProvider is an EvalNode implementation that initializes a provider +// and returns nothing. The provider can be retrieved again with the +// EvalGetProvider node. +type EvalInitProvider struct { + Name string +} + +func (n *EvalInitProvider) Eval(ctx EvalContext) (interface{}, error) { + return ctx.InitProvider(n.Name) +} + +// EvalCloseProvider is an EvalNode implementation that closes provider +// connections that aren't needed anymore. +type EvalCloseProvider struct { + Name string +} + +func (n *EvalCloseProvider) Eval(ctx EvalContext) (interface{}, error) { + ctx.CloseProvider(n.Name) + return nil, nil +} + +// EvalGetProvider is an EvalNode implementation that retrieves an already +// initialized provider instance for the given name. +type EvalGetProvider struct { + Name string + Output *ResourceProvider +} + +func (n *EvalGetProvider) Eval(ctx EvalContext) (interface{}, error) { + result := ctx.Provider(n.Name) + if result == nil { + return nil, fmt.Errorf("provider %s not initialized", n.Name) + } + + if n.Output != nil { + *n.Output = result + } + + return nil, nil +} + +// EvalInputProvider is an EvalNode implementation that asks for input +// for the given provider configurations. +type EvalInputProvider struct { + Name string + Provider *ResourceProvider + Config **ResourceConfig +} + +func (n *EvalInputProvider) Eval(ctx EvalContext) (interface{}, error) { + // If we already configured this provider, then don't do this again + if v := ctx.ProviderInput(n.Name); v != nil { + return nil, nil + } + + rc := *n.Config + + // Wrap the input into a namespace + input := &PrefixUIInput{ + IdPrefix: fmt.Sprintf("provider.%s", n.Name), + QueryPrefix: fmt.Sprintf("provider.%s.", n.Name), + UIInput: ctx.Input(), + } + + // Go through each provider and capture the input necessary + // to satisfy it. + config, err := (*n.Provider).Input(input, rc) + if err != nil { + return nil, fmt.Errorf( + "Error configuring %s: %s", n.Name, err) + } + + // Set the input that we received so that child modules don't attempt + // to ask for input again. + if config != nil && len(config.Config) > 0 { + // This repository of provider input results on the context doesn't + // retain config.ComputedKeys, so we need to filter those out here + // in order that later users of this data won't try to use the unknown + // value placeholder as if it were a literal value. This map is just + // of known values we've been able to complete so far; dynamic stuff + // will be merged in by EvalBuildProviderConfig on subsequent + // (post-input) walks. + confMap := config.Config + if config.ComputedKeys != nil { + for _, key := range config.ComputedKeys { + delete(confMap, key) + } + } + + ctx.SetProviderInput(n.Name, confMap) + } else { + ctx.SetProviderInput(n.Name, map[string]interface{}{}) + } + + return nil, nil +} diff --git a/vendor/github.com/hashicorp/terraform/terraform/eval_provisioner.go b/vendor/github.com/hashicorp/terraform/terraform/eval_provisioner.go new file mode 100644 index 0000000000..89579c0557 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/terraform/eval_provisioner.go @@ -0,0 +1,47 @@ +package terraform + +import ( + "fmt" +) + +// EvalInitProvisioner is an EvalNode implementation that initializes a provisioner +// and returns nothing. The provisioner can be retrieved again with the +// EvalGetProvisioner node. +type EvalInitProvisioner struct { + Name string +} + +func (n *EvalInitProvisioner) Eval(ctx EvalContext) (interface{}, error) { + return ctx.InitProvisioner(n.Name) +} + +// EvalCloseProvisioner is an EvalNode implementation that closes provisioner +// connections that aren't needed anymore. +type EvalCloseProvisioner struct { + Name string +} + +func (n *EvalCloseProvisioner) Eval(ctx EvalContext) (interface{}, error) { + ctx.CloseProvisioner(n.Name) + return nil, nil +} + +// EvalGetProvisioner is an EvalNode implementation that retrieves an already +// initialized provisioner instance for the given name. +type EvalGetProvisioner struct { + Name string + Output *ResourceProvisioner +} + +func (n *EvalGetProvisioner) Eval(ctx EvalContext) (interface{}, error) { + result := ctx.Provisioner(n.Name) + if result == nil { + return nil, fmt.Errorf("provisioner %s not initialized", n.Name) + } + + if n.Output != nil { + *n.Output = result + } + + return result, nil +} diff --git a/vendor/github.com/hashicorp/terraform/terraform/eval_read_data.go b/vendor/github.com/hashicorp/terraform/terraform/eval_read_data.go new file mode 100644 index 0000000000..fb85a284e8 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/terraform/eval_read_data.go @@ -0,0 +1,139 @@ +package terraform + +import ( + "fmt" +) + +// EvalReadDataDiff is an EvalNode implementation that executes a data +// resource's ReadDataDiff method to discover what attributes it exports. +type EvalReadDataDiff struct { + Provider *ResourceProvider + Output **InstanceDiff + OutputState **InstanceState + Config **ResourceConfig + Info *InstanceInfo + + // Set Previous when re-evaluating diff during apply, to ensure that + // the "Destroy" flag is preserved. + Previous **InstanceDiff +} + +func (n *EvalReadDataDiff) Eval(ctx EvalContext) (interface{}, error) { + // TODO: test + + err := ctx.Hook(func(h Hook) (HookAction, error) { + return h.PreDiff(n.Info, nil) + }) + if err != nil { + return nil, err + } + + var diff *InstanceDiff + + if n.Previous != nil && *n.Previous != nil && (*n.Previous).GetDestroy() { + // If we're re-diffing for a diff that was already planning to + // destroy, then we'll just continue with that plan. + diff = &InstanceDiff{Destroy: true} + } else { + provider := *n.Provider + config := *n.Config + + var err error + diff, err = provider.ReadDataDiff(n.Info, config) + if err != nil { + return nil, err + } + if diff == nil { + diff = new(InstanceDiff) + } + + // if id isn't explicitly set then it's always computed, because we're + // always "creating a new resource". + diff.init() + if _, ok := diff.Attributes["id"]; !ok { + diff.SetAttribute("id", &ResourceAttrDiff{ + Old: "", + NewComputed: true, + RequiresNew: true, + Type: DiffAttrOutput, + }) + } + } + + err = ctx.Hook(func(h Hook) (HookAction, error) { + return h.PostDiff(n.Info, diff) + }) + if err != nil { + return nil, err + } + + *n.Output = diff + + if n.OutputState != nil { + state := &InstanceState{} + *n.OutputState = state + + // Apply the diff to the returned state, so the state includes + // any attribute values that are not computed. + if !diff.Empty() && n.OutputState != nil { + *n.OutputState = state.MergeDiff(diff) + } + } + + return nil, nil +} + +// EvalReadDataApply is an EvalNode implementation that executes a data +// resource's ReadDataApply method to read data from the data source. +type EvalReadDataApply struct { + Provider *ResourceProvider + Output **InstanceState + Diff **InstanceDiff + Info *InstanceInfo +} + +func (n *EvalReadDataApply) Eval(ctx EvalContext) (interface{}, error) { + // TODO: test + provider := *n.Provider + diff := *n.Diff + + // If the diff is for *destroying* this resource then we'll + // just drop its state and move on, since data resources don't + // support an actual "destroy" action. + if diff != nil && diff.GetDestroy() { + if n.Output != nil { + *n.Output = nil + } + return nil, nil + } + + // For the purpose of external hooks we present a data apply as a + // "Refresh" rather than an "Apply" because creating a data source + // is presented to users/callers as a "read" operation. + err := ctx.Hook(func(h Hook) (HookAction, error) { + // We don't have a state yet, so we'll just give the hook an + // empty one to work with. + return h.PreRefresh(n.Info, &InstanceState{}) + }) + if err != nil { + return nil, err + } + + state, err := provider.ReadDataApply(n.Info, diff) + if err != nil { + return nil, fmt.Errorf("%s: %s", n.Info.Id, err) + } + + err = ctx.Hook(func(h Hook) (HookAction, error) { + return h.PostRefresh(n.Info, state) + }) + if err != nil { + return nil, err + } + + if n.Output != nil { + *n.Output = state + } + + return nil, nil +} diff --git a/vendor/github.com/hashicorp/terraform/terraform/eval_refresh.go b/vendor/github.com/hashicorp/terraform/terraform/eval_refresh.go new file mode 100644 index 0000000000..fa2b8126cd --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/terraform/eval_refresh.go @@ -0,0 +1,55 @@ +package terraform + +import ( + "fmt" + "log" +) + +// EvalRefresh is an EvalNode implementation that does a refresh for +// a resource. +type EvalRefresh struct { + Provider *ResourceProvider + State **InstanceState + Info *InstanceInfo + Output **InstanceState +} + +// TODO: test +func (n *EvalRefresh) Eval(ctx EvalContext) (interface{}, error) { + provider := *n.Provider + state := *n.State + + // If we have no state, we don't do any refreshing + if state == nil { + log.Printf("[DEBUG] refresh: %s: no state, not refreshing", n.Info.Id) + return nil, nil + } + + // Call pre-refresh hook + err := ctx.Hook(func(h Hook) (HookAction, error) { + return h.PreRefresh(n.Info, state) + }) + if err != nil { + return nil, err + } + + // Refresh! + state, err = provider.Refresh(n.Info, state) + if err != nil { + return nil, fmt.Errorf("%s: %s", n.Info.Id, err.Error()) + } + + // Call post-refresh hook + err = ctx.Hook(func(h Hook) (HookAction, error) { + return h.PostRefresh(n.Info, state) + }) + if err != nil { + return nil, err + } + + if n.Output != nil { + *n.Output = state + } + + return nil, nil +} diff --git a/vendor/github.com/hashicorp/terraform/terraform/eval_resource.go b/vendor/github.com/hashicorp/terraform/terraform/eval_resource.go new file mode 100644 index 0000000000..5eca6782a6 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/terraform/eval_resource.go @@ -0,0 +1,13 @@ +package terraform + +// EvalInstanceInfo is an EvalNode implementation that fills in the +// InstanceInfo as much as it can. +type EvalInstanceInfo struct { + Info *InstanceInfo +} + +// TODO: test +func (n *EvalInstanceInfo) Eval(ctx EvalContext) (interface{}, error) { + n.Info.ModulePath = ctx.Path() + return nil, nil +} diff --git a/vendor/github.com/hashicorp/terraform/terraform/eval_sequence.go b/vendor/github.com/hashicorp/terraform/terraform/eval_sequence.go new file mode 100644 index 0000000000..82d81782af --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/terraform/eval_sequence.go @@ -0,0 +1,27 @@ +package terraform + +// EvalSequence is an EvalNode that evaluates in sequence. +type EvalSequence struct { + Nodes []EvalNode +} + +func (n *EvalSequence) Eval(ctx EvalContext) (interface{}, error) { + for _, n := range n.Nodes { + if n == nil { + continue + } + + if _, err := EvalRaw(n, ctx); err != nil { + return nil, err + } + } + + return nil, nil +} + +// EvalNodeFilterable impl. +func (n *EvalSequence) Filter(fn EvalNodeFilterFunc) { + for i, node := range n.Nodes { + n.Nodes[i] = fn(node) + } +} diff --git a/vendor/github.com/hashicorp/terraform/terraform/eval_state.go b/vendor/github.com/hashicorp/terraform/terraform/eval_state.go new file mode 100644 index 0000000000..126a0e63a9 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/terraform/eval_state.go @@ -0,0 +1,324 @@ +package terraform + +import "fmt" + +// EvalReadState is an EvalNode implementation that reads the +// primary InstanceState for a specific resource out of the state. +type EvalReadState struct { + Name string + Output **InstanceState +} + +func (n *EvalReadState) Eval(ctx EvalContext) (interface{}, error) { + return readInstanceFromState(ctx, n.Name, n.Output, func(rs *ResourceState) (*InstanceState, error) { + return rs.Primary, nil + }) +} + +// EvalReadStateDeposed is an EvalNode implementation that reads the +// deposed InstanceState for a specific resource out of the state +type EvalReadStateDeposed struct { + Name string + Output **InstanceState + // Index indicates which instance in the Deposed list to target, or -1 for + // the last item. + Index int +} + +func (n *EvalReadStateDeposed) Eval(ctx EvalContext) (interface{}, error) { + return readInstanceFromState(ctx, n.Name, n.Output, func(rs *ResourceState) (*InstanceState, error) { + // Get the index. If it is negative, then we get the last one + idx := n.Index + if idx < 0 { + idx = len(rs.Deposed) - 1 + } + if idx >= 0 && idx < len(rs.Deposed) { + return rs.Deposed[idx], nil + } else { + return nil, fmt.Errorf("bad deposed index: %d, for resource: %#v", idx, rs) + } + }) +} + +// Does the bulk of the work for the various flavors of ReadState eval nodes. +// Each node just provides a reader function to get from the ResourceState to the +// InstanceState, and this takes care of all the plumbing. +func readInstanceFromState( + ctx EvalContext, + resourceName string, + output **InstanceState, + readerFn func(*ResourceState) (*InstanceState, error), +) (*InstanceState, error) { + state, lock := ctx.State() + + // Get a read lock so we can access this instance + lock.RLock() + defer lock.RUnlock() + + // Look for the module state. If we don't have one, then it doesn't matter. + mod := state.ModuleByPath(ctx.Path()) + if mod == nil { + return nil, nil + } + + // Look for the resource state. If we don't have one, then it is okay. + rs := mod.Resources[resourceName] + if rs == nil { + return nil, nil + } + + // Use the delegate function to get the instance state from the resource state + is, err := readerFn(rs) + if err != nil { + return nil, err + } + + // Write the result to the output pointer + if output != nil { + *output = is + } + + return is, nil +} + +// EvalRequireState is an EvalNode implementation that early exits +// if the state doesn't have an ID. +type EvalRequireState struct { + State **InstanceState +} + +func (n *EvalRequireState) Eval(ctx EvalContext) (interface{}, error) { + if n.State == nil { + return nil, EvalEarlyExitError{} + } + + state := *n.State + if state == nil || state.ID == "" { + return nil, EvalEarlyExitError{} + } + + return nil, nil +} + +// EvalUpdateStateHook is an EvalNode implementation that calls the +// PostStateUpdate hook with the current state. +type EvalUpdateStateHook struct{} + +func (n *EvalUpdateStateHook) Eval(ctx EvalContext) (interface{}, error) { + state, lock := ctx.State() + + // Get a full lock. Even calling something like WriteState can modify + // (prune) the state, so we need the full lock. + lock.Lock() + defer lock.Unlock() + + // Call the hook + err := ctx.Hook(func(h Hook) (HookAction, error) { + return h.PostStateUpdate(state) + }) + if err != nil { + return nil, err + } + + return nil, nil +} + +// EvalWriteState is an EvalNode implementation that writes the +// primary InstanceState for a specific resource into the state. +type EvalWriteState struct { + Name string + ResourceType string + Provider string + Dependencies []string + State **InstanceState +} + +func (n *EvalWriteState) Eval(ctx EvalContext) (interface{}, error) { + return writeInstanceToState(ctx, n.Name, n.ResourceType, n.Provider, n.Dependencies, + func(rs *ResourceState) error { + rs.Primary = *n.State + return nil + }, + ) +} + +// EvalWriteStateDeposed is an EvalNode implementation that writes +// an InstanceState out to the Deposed list of a resource in the state. +type EvalWriteStateDeposed struct { + Name string + ResourceType string + Provider string + Dependencies []string + State **InstanceState + // Index indicates which instance in the Deposed list to target, or -1 to append. + Index int +} + +func (n *EvalWriteStateDeposed) Eval(ctx EvalContext) (interface{}, error) { + return writeInstanceToState(ctx, n.Name, n.ResourceType, n.Provider, n.Dependencies, + func(rs *ResourceState) error { + if n.Index == -1 { + rs.Deposed = append(rs.Deposed, *n.State) + } else { + rs.Deposed[n.Index] = *n.State + } + return nil + }, + ) +} + +// Pulls together the common tasks of the EvalWriteState nodes. All the args +// are passed directly down from the EvalNode along with a `writer` function +// which is yielded the *ResourceState and is responsible for writing an +// InstanceState to the proper field in the ResourceState. +func writeInstanceToState( + ctx EvalContext, + resourceName string, + resourceType string, + provider string, + dependencies []string, + writerFn func(*ResourceState) error, +) (*InstanceState, error) { + state, lock := ctx.State() + if state == nil { + return nil, fmt.Errorf("cannot write state to nil state") + } + + // Get a write lock so we can access this instance + lock.Lock() + defer lock.Unlock() + + // Look for the module state. If we don't have one, create it. + mod := state.ModuleByPath(ctx.Path()) + if mod == nil { + mod = state.AddModule(ctx.Path()) + } + + // Look for the resource state. + rs := mod.Resources[resourceName] + if rs == nil { + rs = &ResourceState{} + rs.init() + mod.Resources[resourceName] = rs + } + rs.Type = resourceType + rs.Dependencies = dependencies + rs.Provider = provider + + if err := writerFn(rs); err != nil { + return nil, err + } + + return nil, nil +} + +// EvalClearPrimaryState is an EvalNode implementation that clears the primary +// instance from a resource state. +type EvalClearPrimaryState struct { + Name string +} + +func (n *EvalClearPrimaryState) Eval(ctx EvalContext) (interface{}, error) { + state, lock := ctx.State() + + // Get a read lock so we can access this instance + lock.RLock() + defer lock.RUnlock() + + // Look for the module state. If we don't have one, then it doesn't matter. + mod := state.ModuleByPath(ctx.Path()) + if mod == nil { + return nil, nil + } + + // Look for the resource state. If we don't have one, then it is okay. + rs := mod.Resources[n.Name] + if rs == nil { + return nil, nil + } + + // Clear primary from the resource state + rs.Primary = nil + + return nil, nil +} + +// EvalDeposeState is an EvalNode implementation that takes the primary +// out of a state and makes it Deposed. This is done at the beginning of +// create-before-destroy calls so that the create can create while preserving +// the old state of the to-be-destroyed resource. +type EvalDeposeState struct { + Name string +} + +// TODO: test +func (n *EvalDeposeState) Eval(ctx EvalContext) (interface{}, error) { + state, lock := ctx.State() + + // Get a read lock so we can access this instance + lock.RLock() + defer lock.RUnlock() + + // Look for the module state. If we don't have one, then it doesn't matter. + mod := state.ModuleByPath(ctx.Path()) + if mod == nil { + return nil, nil + } + + // Look for the resource state. If we don't have one, then it is okay. + rs := mod.Resources[n.Name] + if rs == nil { + return nil, nil + } + + // If we don't have a primary, we have nothing to depose + if rs.Primary == nil { + return nil, nil + } + + // Depose + rs.Deposed = append(rs.Deposed, rs.Primary) + rs.Primary = nil + + return nil, nil +} + +// EvalUndeposeState is an EvalNode implementation that reads the +// InstanceState for a specific resource out of the state. +type EvalUndeposeState struct { + Name string + State **InstanceState +} + +// TODO: test +func (n *EvalUndeposeState) Eval(ctx EvalContext) (interface{}, error) { + state, lock := ctx.State() + + // Get a read lock so we can access this instance + lock.RLock() + defer lock.RUnlock() + + // Look for the module state. If we don't have one, then it doesn't matter. + mod := state.ModuleByPath(ctx.Path()) + if mod == nil { + return nil, nil + } + + // Look for the resource state. If we don't have one, then it is okay. + rs := mod.Resources[n.Name] + if rs == nil { + return nil, nil + } + + // If we don't have any desposed resource, then we don't have anything to do + if len(rs.Deposed) == 0 { + return nil, nil + } + + // Undepose + idx := len(rs.Deposed) - 1 + rs.Primary = rs.Deposed[idx] + rs.Deposed[idx] = *n.State + + return nil, nil +} diff --git a/vendor/github.com/hashicorp/terraform/terraform/eval_validate.go b/vendor/github.com/hashicorp/terraform/terraform/eval_validate.go new file mode 100644 index 0000000000..478aa64005 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/terraform/eval_validate.go @@ -0,0 +1,227 @@ +package terraform + +import ( + "fmt" + + "github.com/hashicorp/terraform/config" + "github.com/mitchellh/mapstructure" +) + +// EvalValidateError is the error structure returned if there were +// validation errors. +type EvalValidateError struct { + Warnings []string + Errors []error +} + +func (e *EvalValidateError) Error() string { + return fmt.Sprintf("Warnings: %s. Errors: %s", e.Warnings, e.Errors) +} + +// EvalValidateCount is an EvalNode implementation that validates +// the count of a resource. +type EvalValidateCount struct { + Resource *config.Resource +} + +// TODO: test +func (n *EvalValidateCount) Eval(ctx EvalContext) (interface{}, error) { + var count int + var errs []error + var err error + if _, err := ctx.Interpolate(n.Resource.RawCount, nil); err != nil { + errs = append(errs, fmt.Errorf( + "Failed to interpolate count: %s", err)) + goto RETURN + } + + count, err = n.Resource.Count() + if err != nil { + // If we can't get the count during validation, then + // just replace it with the number 1. + c := n.Resource.RawCount.Config() + c[n.Resource.RawCount.Key] = "1" + count = 1 + } + err = nil + + if count < 0 { + errs = append(errs, fmt.Errorf( + "Count is less than zero: %d", count)) + } + +RETURN: + if len(errs) != 0 { + err = &EvalValidateError{ + Errors: errs, + } + } + return nil, err +} + +// EvalValidateProvider is an EvalNode implementation that validates +// the configuration of a resource. +type EvalValidateProvider struct { + Provider *ResourceProvider + Config **ResourceConfig +} + +func (n *EvalValidateProvider) Eval(ctx EvalContext) (interface{}, error) { + provider := *n.Provider + config := *n.Config + + warns, errs := provider.Validate(config) + if len(warns) == 0 && len(errs) == 0 { + return nil, nil + } + + return nil, &EvalValidateError{ + Warnings: warns, + Errors: errs, + } +} + +// EvalValidateProvisioner is an EvalNode implementation that validates +// the configuration of a resource. +type EvalValidateProvisioner struct { + Provisioner *ResourceProvisioner + Config **ResourceConfig + ConnConfig **ResourceConfig +} + +func (n *EvalValidateProvisioner) Eval(ctx EvalContext) (interface{}, error) { + provisioner := *n.Provisioner + config := *n.Config + var warns []string + var errs []error + + { + // Validate the provisioner's own config first + w, e := provisioner.Validate(config) + warns = append(warns, w...) + errs = append(errs, e...) + } + + { + // Now validate the connection config, which might either be from + // the provisioner block itself or inherited from the resource's + // shared connection info. + w, e := n.validateConnConfig(*n.ConnConfig) + warns = append(warns, w...) + errs = append(errs, e...) + } + + if len(warns) == 0 && len(errs) == 0 { + return nil, nil + } + + return nil, &EvalValidateError{ + Warnings: warns, + Errors: errs, + } +} + +func (n *EvalValidateProvisioner) validateConnConfig(connConfig *ResourceConfig) (warns []string, errs []error) { + // We can't comprehensively validate the connection config since its + // final structure is decided by the communicator and we can't instantiate + // that until we have a complete instance state. However, we *can* catch + // configuration keys that are not valid for *any* communicator, catching + // typos early rather than waiting until we actually try to run one of + // the resource's provisioners. + + type connConfigSuperset struct { + // All attribute types are interface{} here because at this point we + // may still have unresolved interpolation expressions, which will + // appear as strings regardless of the final goal type. + + Type interface{} `mapstructure:"type"` + User interface{} `mapstructure:"user"` + Password interface{} `mapstructure:"password"` + Host interface{} `mapstructure:"host"` + Port interface{} `mapstructure:"port"` + Timeout interface{} `mapstructure:"timeout"` + ScriptPath interface{} `mapstructure:"script_path"` + + // For type=ssh only (enforced in ssh communicator) + PrivateKey interface{} `mapstructure:"private_key"` + Agent interface{} `mapstructure:"agent"` + BastionHost interface{} `mapstructure:"bastion_host"` + BastionPort interface{} `mapstructure:"bastion_port"` + BastionUser interface{} `mapstructure:"bastion_user"` + BastionPassword interface{} `mapstructure:"bastion_password"` + BastionPrivateKey interface{} `mapstructure:"bastion_private_key"` + + // For type=winrm only (enforced in winrm communicator) + HTTPS interface{} `mapstructure:"https"` + Insecure interface{} `mapstructure:"insecure"` + CACert interface{} `mapstructure:"cacert"` + } + + var metadata mapstructure.Metadata + decoder, err := mapstructure.NewDecoder(&mapstructure.DecoderConfig{ + Metadata: &metadata, + Result: &connConfigSuperset{}, // result is disregarded; we only care about unused keys + }) + if err != nil { + // should never happen + errs = append(errs, err) + return + } + + if err := decoder.Decode(connConfig.Config); err != nil { + errs = append(errs, err) + return + } + + for _, attrName := range metadata.Unused { + errs = append(errs, fmt.Errorf("unknown 'connection' argument %q", attrName)) + } + return +} + +// EvalValidateResource is an EvalNode implementation that validates +// the configuration of a resource. +type EvalValidateResource struct { + Provider *ResourceProvider + Config **ResourceConfig + ResourceName string + ResourceType string + ResourceMode config.ResourceMode + + // IgnoreWarnings means that warnings will not be passed through. This allows + // "just-in-time" passes of validation to continue execution through warnings. + IgnoreWarnings bool +} + +func (n *EvalValidateResource) Eval(ctx EvalContext) (interface{}, error) { + provider := *n.Provider + cfg := *n.Config + var warns []string + var errs []error + // Provider entry point varies depending on resource mode, because + // managed resources and data resources are two distinct concepts + // in the provider abstraction. + switch n.ResourceMode { + case config.ManagedResourceMode: + warns, errs = provider.ValidateResource(n.ResourceType, cfg) + case config.DataResourceMode: + warns, errs = provider.ValidateDataSource(n.ResourceType, cfg) + } + + // If the resource name doesn't match the name regular + // expression, show an error. + if !config.NameRegexp.Match([]byte(n.ResourceName)) { + errs = append(errs, fmt.Errorf( + "%s: resource name can only contain letters, numbers, "+ + "dashes, and underscores.", n.ResourceName)) + } + + if (len(warns) == 0 || n.IgnoreWarnings) && len(errs) == 0 { + return nil, nil + } + + return nil, &EvalValidateError{ + Warnings: warns, + Errors: errs, + } +} diff --git a/vendor/github.com/hashicorp/terraform/terraform/eval_validate_selfref.go b/vendor/github.com/hashicorp/terraform/terraform/eval_validate_selfref.go new file mode 100644 index 0000000000..ae4436a2ee --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/terraform/eval_validate_selfref.go @@ -0,0 +1,74 @@ +package terraform + +import ( + "fmt" + + "github.com/hashicorp/terraform/config" +) + +// EvalValidateResourceSelfRef is an EvalNode implementation that validates that +// a configuration doesn't contain a reference to the resource itself. +// +// This must be done prior to interpolating configuration in order to avoid +// any infinite loop scenarios. +type EvalValidateResourceSelfRef struct { + Addr **ResourceAddress + Config **config.RawConfig +} + +func (n *EvalValidateResourceSelfRef) Eval(ctx EvalContext) (interface{}, error) { + addr := *n.Addr + conf := *n.Config + + // Go through the variables and find self references + var errs []error + for k, raw := range conf.Variables { + rv, ok := raw.(*config.ResourceVariable) + if !ok { + continue + } + + // Build an address from the variable + varAddr := &ResourceAddress{ + Path: addr.Path, + Mode: rv.Mode, + Type: rv.Type, + Name: rv.Name, + Index: rv.Index, + InstanceType: TypePrimary, + } + + // If the variable access is a multi-access (*), then we just + // match the index so that we'll match our own addr if everything + // else matches. + if rv.Multi && rv.Index == -1 { + varAddr.Index = addr.Index + } + + // This is a weird thing where ResourceAddres has index "-1" when + // index isn't set at all. This means index "0" for resource access. + // So, if we have this scenario, just set our varAddr to -1 so it + // matches. + if addr.Index == -1 && varAddr.Index == 0 { + varAddr.Index = -1 + } + + // If the addresses match, then this is a self reference + if varAddr.Equals(addr) && varAddr.Index == addr.Index { + errs = append(errs, fmt.Errorf( + "%s: self reference not allowed: %q", + addr, k)) + } + } + + // If no errors, no errors! + if len(errs) == 0 { + return nil, nil + } + + // Wrap the errors in the proper wrapper so we can handle validation + // formatting properly upstream. + return nil, &EvalValidateError{ + Errors: errs, + } +} diff --git a/vendor/github.com/hashicorp/terraform/terraform/eval_variable.go b/vendor/github.com/hashicorp/terraform/terraform/eval_variable.go new file mode 100644 index 0000000000..e39a33c2a9 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/terraform/eval_variable.go @@ -0,0 +1,279 @@ +package terraform + +import ( + "fmt" + "log" + "reflect" + "strconv" + "strings" + + "github.com/hashicorp/terraform/config" + "github.com/hashicorp/terraform/config/module" + "github.com/hashicorp/terraform/helper/hilmapstructure" +) + +// EvalTypeCheckVariable is an EvalNode which ensures that the variable +// values which are assigned as inputs to a module (including the root) +// match the types which are either declared for the variables explicitly +// or inferred from the default values. +// +// In order to achieve this three things are required: +// - a map of the proposed variable values +// - the configuration tree of the module in which the variable is +// declared +// - the path to the module (so we know which part of the tree to +// compare the values against). +type EvalTypeCheckVariable struct { + Variables map[string]interface{} + ModulePath []string + ModuleTree *module.Tree +} + +func (n *EvalTypeCheckVariable) Eval(ctx EvalContext) (interface{}, error) { + currentTree := n.ModuleTree + for _, pathComponent := range n.ModulePath[1:] { + currentTree = currentTree.Children()[pathComponent] + } + targetConfig := currentTree.Config() + + prototypes := make(map[string]config.VariableType) + for _, variable := range targetConfig.Variables { + prototypes[variable.Name] = variable.Type() + } + + // Only display a module in an error message if we are not in the root module + modulePathDescription := fmt.Sprintf(" in module %s", strings.Join(n.ModulePath[1:], ".")) + if len(n.ModulePath) == 1 { + modulePathDescription = "" + } + + for name, declaredType := range prototypes { + proposedValue, ok := n.Variables[name] + if !ok { + // This means the default value should be used as no overriding value + // has been set. Therefore we should continue as no check is necessary. + continue + } + + if proposedValue == config.UnknownVariableValue { + continue + } + + switch declaredType { + case config.VariableTypeString: + switch proposedValue.(type) { + case string: + continue + default: + return nil, fmt.Errorf("variable %s%s should be type %s, got %s", + name, modulePathDescription, declaredType.Printable(), hclTypeName(proposedValue)) + } + case config.VariableTypeMap: + switch proposedValue.(type) { + case map[string]interface{}: + continue + default: + return nil, fmt.Errorf("variable %s%s should be type %s, got %s", + name, modulePathDescription, declaredType.Printable(), hclTypeName(proposedValue)) + } + case config.VariableTypeList: + switch proposedValue.(type) { + case []interface{}: + continue + default: + return nil, fmt.Errorf("variable %s%s should be type %s, got %s", + name, modulePathDescription, declaredType.Printable(), hclTypeName(proposedValue)) + } + default: + return nil, fmt.Errorf("variable %s%s should be type %s, got type string", + name, modulePathDescription, declaredType.Printable()) + } + } + + return nil, nil +} + +// EvalSetVariables is an EvalNode implementation that sets the variables +// explicitly for interpolation later. +type EvalSetVariables struct { + Module *string + Variables map[string]interface{} +} + +// TODO: test +func (n *EvalSetVariables) Eval(ctx EvalContext) (interface{}, error) { + ctx.SetVariables(*n.Module, n.Variables) + return nil, nil +} + +// EvalVariableBlock is an EvalNode implementation that evaluates the +// given configuration, and uses the final values as a way to set the +// mapping. +type EvalVariableBlock struct { + Config **ResourceConfig + VariableValues map[string]interface{} +} + +func (n *EvalVariableBlock) Eval(ctx EvalContext) (interface{}, error) { + // Clear out the existing mapping + for k, _ := range n.VariableValues { + delete(n.VariableValues, k) + } + + // Get our configuration + rc := *n.Config + for k, v := range rc.Config { + vKind := reflect.ValueOf(v).Type().Kind() + + switch vKind { + case reflect.Slice: + var vSlice []interface{} + if err := hilmapstructure.WeakDecode(v, &vSlice); err == nil { + n.VariableValues[k] = vSlice + continue + } + case reflect.Map: + var vMap map[string]interface{} + if err := hilmapstructure.WeakDecode(v, &vMap); err == nil { + n.VariableValues[k] = vMap + continue + } + default: + var vString string + if err := hilmapstructure.WeakDecode(v, &vString); err == nil { + n.VariableValues[k] = vString + continue + } + } + + return nil, fmt.Errorf("Variable value for %s is not a string, list or map type", k) + } + + for _, path := range rc.ComputedKeys { + log.Printf("[DEBUG] Setting Unknown Variable Value for computed key: %s", path) + err := n.setUnknownVariableValueForPath(path) + if err != nil { + return nil, err + } + } + + return nil, nil +} + +func (n *EvalVariableBlock) setUnknownVariableValueForPath(path string) error { + pathComponents := strings.Split(path, ".") + + if len(pathComponents) < 1 { + return fmt.Errorf("No path comoponents in %s", path) + } + + if len(pathComponents) == 1 { + // Special case the "top level" since we know the type + if _, ok := n.VariableValues[pathComponents[0]]; !ok { + n.VariableValues[pathComponents[0]] = config.UnknownVariableValue + } + return nil + } + + // Otherwise find the correct point in the tree and then set to unknown + var current interface{} = n.VariableValues[pathComponents[0]] + for i := 1; i < len(pathComponents); i++ { + switch tCurrent := current.(type) { + case []interface{}: + index, err := strconv.Atoi(pathComponents[i]) + if err != nil { + return fmt.Errorf("Cannot convert %s to slice index in path %s", + pathComponents[i], path) + } + current = tCurrent[index] + case []map[string]interface{}: + index, err := strconv.Atoi(pathComponents[i]) + if err != nil { + return fmt.Errorf("Cannot convert %s to slice index in path %s", + pathComponents[i], path) + } + current = tCurrent[index] + case map[string]interface{}: + if val, hasVal := tCurrent[pathComponents[i]]; hasVal { + current = val + continue + } + + tCurrent[pathComponents[i]] = config.UnknownVariableValue + break + } + } + + return nil +} + +// EvalCoerceMapVariable is an EvalNode implementation that recognizes a +// specific ambiguous HCL parsing situation and resolves it. In HCL parsing, a +// bare map literal is indistinguishable from a list of maps w/ one element. +// +// We take all the same inputs as EvalTypeCheckVariable above, since we need +// both the target type and the proposed value in order to properly coerce. +type EvalCoerceMapVariable struct { + Variables map[string]interface{} + ModulePath []string + ModuleTree *module.Tree +} + +// Eval implements the EvalNode interface. See EvalCoerceMapVariable for +// details. +func (n *EvalCoerceMapVariable) Eval(ctx EvalContext) (interface{}, error) { + currentTree := n.ModuleTree + for _, pathComponent := range n.ModulePath[1:] { + currentTree = currentTree.Children()[pathComponent] + } + targetConfig := currentTree.Config() + + prototypes := make(map[string]config.VariableType) + for _, variable := range targetConfig.Variables { + prototypes[variable.Name] = variable.Type() + } + + for name, declaredType := range prototypes { + if declaredType != config.VariableTypeMap { + continue + } + + proposedValue, ok := n.Variables[name] + if !ok { + continue + } + + if list, ok := proposedValue.([]interface{}); ok && len(list) == 1 { + if m, ok := list[0].(map[string]interface{}); ok { + log.Printf("[DEBUG] EvalCoerceMapVariable: "+ + "Coercing single element list into map: %#v", m) + n.Variables[name] = m + } + } + } + + return nil, nil +} + +// hclTypeName returns the name of the type that would represent this value in +// a config file, or falls back to the Go type name if there's no corresponding +// HCL type. This is used for formatted output, not for comparing types. +func hclTypeName(i interface{}) string { + switch k := reflect.Indirect(reflect.ValueOf(i)).Kind(); k { + case reflect.Bool: + return "boolean" + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, + reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, + reflect.Uint64, reflect.Uintptr, reflect.Float32, reflect.Float64: + return "number" + case reflect.Array, reflect.Slice: + return "list" + case reflect.Map: + return "map" + case reflect.String: + return "string" + default: + // fall back to the Go type if there's no match + return k.String() + } +} diff --git a/vendor/github.com/hashicorp/terraform/terraform/evaltree_provider.go b/vendor/github.com/hashicorp/terraform/terraform/evaltree_provider.go new file mode 100644 index 0000000000..00392efed8 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/terraform/evaltree_provider.go @@ -0,0 +1,119 @@ +package terraform + +import ( + "github.com/hashicorp/terraform/config" +) + +// ProviderEvalTree returns the evaluation tree for initializing and +// configuring providers. +func ProviderEvalTree(n string, config *config.RawConfig) EvalNode { + var provider ResourceProvider + var resourceConfig *ResourceConfig + + seq := make([]EvalNode, 0, 5) + seq = append(seq, &EvalInitProvider{Name: n}) + + // Input stuff + seq = append(seq, &EvalOpFilter{ + Ops: []walkOperation{walkInput, walkImport}, + Node: &EvalSequence{ + Nodes: []EvalNode{ + &EvalGetProvider{ + Name: n, + Output: &provider, + }, + &EvalInterpolate{ + Config: config, + Output: &resourceConfig, + }, + &EvalBuildProviderConfig{ + Provider: n, + Config: &resourceConfig, + Output: &resourceConfig, + }, + &EvalInputProvider{ + Name: n, + Provider: &provider, + Config: &resourceConfig, + }, + }, + }, + }) + + seq = append(seq, &EvalOpFilter{ + Ops: []walkOperation{walkValidate}, + Node: &EvalSequence{ + Nodes: []EvalNode{ + &EvalGetProvider{ + Name: n, + Output: &provider, + }, + &EvalInterpolate{ + Config: config, + Output: &resourceConfig, + }, + &EvalBuildProviderConfig{ + Provider: n, + Config: &resourceConfig, + Output: &resourceConfig, + }, + &EvalValidateProvider{ + Provider: &provider, + Config: &resourceConfig, + }, + &EvalSetProviderConfig{ + Provider: n, + Config: &resourceConfig, + }, + }, + }, + }) + + // Apply stuff + seq = append(seq, &EvalOpFilter{ + Ops: []walkOperation{walkRefresh, walkPlan, walkApply, walkDestroy, walkImport}, + Node: &EvalSequence{ + Nodes: []EvalNode{ + &EvalGetProvider{ + Name: n, + Output: &provider, + }, + &EvalInterpolate{ + Config: config, + Output: &resourceConfig, + }, + &EvalBuildProviderConfig{ + Provider: n, + Config: &resourceConfig, + Output: &resourceConfig, + }, + &EvalSetProviderConfig{ + Provider: n, + Config: &resourceConfig, + }, + }, + }, + }) + + // We configure on everything but validate, since validate may + // not have access to all the variables. + seq = append(seq, &EvalOpFilter{ + Ops: []walkOperation{walkRefresh, walkPlan, walkApply, walkDestroy, walkImport}, + Node: &EvalSequence{ + Nodes: []EvalNode{ + &EvalConfigProvider{ + Provider: n, + Config: &resourceConfig, + }, + }, + }, + }) + + return &EvalSequence{Nodes: seq} +} + +// CloseProviderEvalTree returns the evaluation tree for closing +// provider connections that aren't needed anymore. +func CloseProviderEvalTree(n string) EvalNode { + return &EvalCloseProvider{Name: n} +} diff --git a/vendor/github.com/hashicorp/terraform/terraform/graph.go b/vendor/github.com/hashicorp/terraform/terraform/graph.go new file mode 100644 index 0000000000..48ce6a3366 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/terraform/graph.go @@ -0,0 +1,172 @@ +package terraform + +import ( + "fmt" + "log" + "runtime/debug" + "strings" + + "github.com/hashicorp/terraform/dag" +) + +// RootModuleName is the name given to the root module implicitly. +const RootModuleName = "root" + +// RootModulePath is the path for the root module. +var RootModulePath = []string{RootModuleName} + +// Graph represents the graph that Terraform uses to represent resources +// and their dependencies. +type Graph struct { + // Graph is the actual DAG. This is embedded so you can call the DAG + // methods directly. + dag.AcyclicGraph + + // Path is the path in the module tree that this Graph represents. + // The root is represented by a single element list containing + // RootModuleName + Path []string + + // debugName is a name for reference in the debug output. This is usually + // to indicate what topmost builder was, and if this graph is a shadow or + // not. + debugName string +} + +func (g *Graph) DirectedGraph() dag.Grapher { + return &g.AcyclicGraph +} + +// Walk walks the graph with the given walker for callbacks. The graph +// will be walked with full parallelism, so the walker should expect +// to be called in concurrently. +func (g *Graph) Walk(walker GraphWalker) error { + return g.walk(walker) +} + +func (g *Graph) walk(walker GraphWalker) error { + // The callbacks for enter/exiting a graph + ctx := walker.EnterPath(g.Path) + defer walker.ExitPath(g.Path) + + // Get the path for logs + path := strings.Join(ctx.Path(), ".") + + // Determine if our walker is a panic wrapper + panicwrap, ok := walker.(GraphWalkerPanicwrapper) + if !ok { + panicwrap = nil // just to be sure + } + + debugName := "walk-graph.json" + if g.debugName != "" { + debugName = g.debugName + "-" + debugName + } + + debugBuf := dbug.NewFileWriter(debugName) + g.SetDebugWriter(debugBuf) + defer debugBuf.Close() + + // Walk the graph. + var walkFn dag.WalkFunc + walkFn = func(v dag.Vertex) (rerr error) { + log.Printf("[DEBUG] vertex '%s.%s': walking", path, dag.VertexName(v)) + g.DebugVisitInfo(v, g.debugName) + + // If we have a panic wrap GraphWalker and a panic occurs, recover + // and call that. We ensure the return value is an error, however, + // so that future nodes are not called. + defer func() { + // If no panicwrap, do nothing + if panicwrap == nil { + return + } + + // If no panic, do nothing + err := recover() + if err == nil { + return + } + + // Modify the return value to show the error + rerr = fmt.Errorf("vertex %q captured panic: %s\n\n%s", + dag.VertexName(v), err, debug.Stack()) + + // Call the panic wrapper + panicwrap.Panic(v, err) + }() + + walker.EnterVertex(v) + defer walker.ExitVertex(v, rerr) + + // vertexCtx is the context that we use when evaluating. This + // is normally the context of our graph but can be overridden + // with a GraphNodeSubPath impl. + vertexCtx := ctx + if pn, ok := v.(GraphNodeSubPath); ok && len(pn.Path()) > 0 { + vertexCtx = walker.EnterPath(normalizeModulePath(pn.Path())) + defer walker.ExitPath(pn.Path()) + } + + // If the node is eval-able, then evaluate it. + if ev, ok := v.(GraphNodeEvalable); ok { + tree := ev.EvalTree() + if tree == nil { + panic(fmt.Sprintf( + "%s.%s (%T): nil eval tree", path, dag.VertexName(v), v)) + } + + // Allow the walker to change our tree if needed. Eval, + // then callback with the output. + log.Printf("[DEBUG] vertex '%s.%s': evaluating", path, dag.VertexName(v)) + + g.DebugVertexInfo(v, fmt.Sprintf("evaluating %T(%s)", v, path)) + + tree = walker.EnterEvalTree(v, tree) + output, err := Eval(tree, vertexCtx) + if rerr = walker.ExitEvalTree(v, output, err); rerr != nil { + return + } + } + + // If the node is dynamically expanded, then expand it + if ev, ok := v.(GraphNodeDynamicExpandable); ok { + log.Printf( + "[DEBUG] vertex '%s.%s': expanding/walking dynamic subgraph", + path, + dag.VertexName(v)) + + g.DebugVertexInfo(v, fmt.Sprintf("expanding %T(%s)", v, path)) + + g, err := ev.DynamicExpand(vertexCtx) + if err != nil { + rerr = err + return + } + if g != nil { + // Walk the subgraph + if rerr = g.walk(walker); rerr != nil { + return + } + } + } + + // If the node has a subgraph, then walk the subgraph + if sn, ok := v.(GraphNodeSubgraph); ok { + log.Printf( + "[DEBUG] vertex '%s.%s': walking subgraph", + path, + dag.VertexName(v)) + + g.DebugVertexInfo(v, fmt.Sprintf("subgraph: %T(%s)", v, path)) + + if rerr = sn.Subgraph().(*Graph).walk(walker); rerr != nil { + return + } + } + + return nil + } + + return g.AcyclicGraph.Walk(walkFn) +} diff --git a/vendor/github.com/hashicorp/terraform/terraform/graph_builder.go b/vendor/github.com/hashicorp/terraform/terraform/graph_builder.go new file mode 100644 index 0000000000..6374bb9045 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/terraform/graph_builder.go @@ -0,0 +1,77 @@ +package terraform + +import ( + "fmt" + "log" + "strings" +) + +// GraphBuilder is an interface that can be implemented and used with +// Terraform to build the graph that Terraform walks. +type GraphBuilder interface { + // Build builds the graph for the given module path. It is up to + // the interface implementation whether this build should expand + // the graph or not. + Build(path []string) (*Graph, error) +} + +// BasicGraphBuilder is a GraphBuilder that builds a graph out of a +// series of transforms and (optionally) validates the graph is a valid +// structure. +type BasicGraphBuilder struct { + Steps []GraphTransformer + Validate bool + // Optional name to add to the graph debug log + Name string +} + +func (b *BasicGraphBuilder) Build(path []string) (*Graph, error) { + g := &Graph{Path: path} + + debugName := "graph.json" + if b.Name != "" { + debugName = b.Name + "-" + debugName + } + debugBuf := dbug.NewFileWriter(debugName) + g.SetDebugWriter(debugBuf) + defer debugBuf.Close() + + for _, step := range b.Steps { + if step == nil { + continue + } + + stepName := fmt.Sprintf("%T", step) + dot := strings.LastIndex(stepName, ".") + if dot >= 0 { + stepName = stepName[dot+1:] + } + + debugOp := g.DebugOperation(stepName, "") + err := step.Transform(g) + + errMsg := "" + if err != nil { + errMsg = err.Error() + } + debugOp.End(errMsg) + + log.Printf( + "[TRACE] Graph after step %T:\n\n%s", + step, g.StringWithNodeTypes()) + + if err != nil { + return g, err + } + } + + // Validate the graph structure + if b.Validate { + if err := g.Validate(); err != nil { + log.Printf("[ERROR] Graph validation failed. Graph:\n\n%s", g.String()) + return nil, err + } + } + + return g, nil +} diff --git a/vendor/github.com/hashicorp/terraform/terraform/graph_builder_apply.go b/vendor/github.com/hashicorp/terraform/terraform/graph_builder_apply.go new file mode 100644 index 0000000000..38a90f2775 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/terraform/graph_builder_apply.go @@ -0,0 +1,141 @@ +package terraform + +import ( + "github.com/hashicorp/terraform/config/module" + "github.com/hashicorp/terraform/dag" +) + +// ApplyGraphBuilder implements GraphBuilder and is responsible for building +// a graph for applying a Terraform diff. +// +// Because the graph is built from the diff (vs. the config or state), +// this helps ensure that the apply-time graph doesn't modify any resources +// that aren't explicitly in the diff. There are other scenarios where the +// diff can be deviated, so this is just one layer of protection. +type ApplyGraphBuilder struct { + // Module is the root module for the graph to build. + Module *module.Tree + + // Diff is the diff to apply. + Diff *Diff + + // State is the current state + State *State + + // Providers is the list of providers supported. + Providers []string + + // Provisioners is the list of provisioners supported. + Provisioners []string + + // Targets are resources to target. This is only required to make sure + // unnecessary outputs aren't included in the apply graph. The plan + // builder successfully handles targeting resources. In the future, + // outputs should go into the diff so that this is unnecessary. + Targets []string + + // DisableReduce, if true, will not reduce the graph. Great for testing. + DisableReduce bool + + // Destroy, if true, represents a pure destroy operation + Destroy bool + + // Validate will do structural validation of the graph. + Validate bool +} + +// See GraphBuilder +func (b *ApplyGraphBuilder) Build(path []string) (*Graph, error) { + return (&BasicGraphBuilder{ + Steps: b.Steps(), + Validate: b.Validate, + Name: "ApplyGraphBuilder", + }).Build(path) +} + +// See GraphBuilder +func (b *ApplyGraphBuilder) Steps() []GraphTransformer { + // Custom factory for creating providers. + concreteProvider := func(a *NodeAbstractProvider) dag.Vertex { + return &NodeApplyableProvider{ + NodeAbstractProvider: a, + } + } + + concreteResource := func(a *NodeAbstractResource) dag.Vertex { + return &NodeApplyableResource{ + NodeAbstractResource: a, + } + } + + steps := []GraphTransformer{ + // Creates all the nodes represented in the diff. + &DiffTransformer{ + Concrete: concreteResource, + + Diff: b.Diff, + Module: b.Module, + State: b.State, + }, + + // Create orphan output nodes + &OrphanOutputTransformer{Module: b.Module, State: b.State}, + + // Attach the configuration to any resources + &AttachResourceConfigTransformer{Module: b.Module}, + + // Attach the state + &AttachStateTransformer{State: b.State}, + + // Create all the providers + &MissingProviderTransformer{Providers: b.Providers, Concrete: concreteProvider}, + &ProviderTransformer{}, + &DisableProviderTransformer{}, + &ParentProviderTransformer{}, + &AttachProviderConfigTransformer{Module: b.Module}, + + // Destruction ordering + &DestroyEdgeTransformer{Module: b.Module, State: b.State}, + GraphTransformIf( + func() bool { return !b.Destroy }, + &CBDEdgeTransformer{Module: b.Module, State: b.State}, + ), + + // Provisioner-related transformations + &MissingProvisionerTransformer{Provisioners: b.Provisioners}, + &ProvisionerTransformer{}, + + // Add root variables + &RootVariableTransformer{Module: b.Module}, + + // Add the outputs + &OutputTransformer{Module: b.Module}, + + // Add module variables + &ModuleVariableTransformer{Module: b.Module}, + + // Connect references so ordering is correct + &ReferenceTransformer{}, + + // Add the node to fix the state count boundaries + &CountBoundaryTransformer{}, + + // Target + &TargetsTransformer{Targets: b.Targets}, + + // Close opened plugin connections + &CloseProviderTransformer{}, + &CloseProvisionerTransformer{}, + + // Single root + &RootTransformer{}, + } + + if !b.DisableReduce { + // Perform the transitive reduction to make our graph a bit + // more sane if possible (it usually is possible). + steps = append(steps, &TransitiveReductionTransformer{}) + } + + return steps +} diff --git a/vendor/github.com/hashicorp/terraform/terraform/graph_builder_destroy_plan.go b/vendor/github.com/hashicorp/terraform/terraform/graph_builder_destroy_plan.go new file mode 100644 index 0000000000..014b348e5f --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/terraform/graph_builder_destroy_plan.go @@ -0,0 +1,67 @@ +package terraform + +import ( + "github.com/hashicorp/terraform/config/module" + "github.com/hashicorp/terraform/dag" +) + +// DestroyPlanGraphBuilder implements GraphBuilder and is responsible for +// planning a pure-destroy. +// +// Planning a pure destroy operation is simple because we can ignore most +// ordering configuration and simply reverse the state. +type DestroyPlanGraphBuilder struct { + // Module is the root module for the graph to build. + Module *module.Tree + + // State is the current state + State *State + + // Targets are resources to target + Targets []string + + // Validate will do structural validation of the graph. + Validate bool +} + +// See GraphBuilder +func (b *DestroyPlanGraphBuilder) Build(path []string) (*Graph, error) { + return (&BasicGraphBuilder{ + Steps: b.Steps(), + Validate: b.Validate, + Name: "DestroyPlanGraphBuilder", + }).Build(path) +} + +// See GraphBuilder +func (b *DestroyPlanGraphBuilder) Steps() []GraphTransformer { + concreteResource := func(a *NodeAbstractResource) dag.Vertex { + return &NodePlanDestroyableResource{ + NodeAbstractResource: a, + } + } + + steps := []GraphTransformer{ + // Creates all the nodes represented in the state. + &StateTransformer{ + Concrete: concreteResource, + State: b.State, + }, + + // Attach the configuration to any resources + &AttachResourceConfigTransformer{Module: b.Module}, + + // Destruction ordering. We require this only so that + // targeting below will prune the correct things. + &DestroyEdgeTransformer{Module: b.Module, State: b.State}, + + // Target. Note we don't set "Destroy: true" here since we already + // created proper destroy ordering. + &TargetsTransformer{Targets: b.Targets}, + + // Single root + &RootTransformer{}, + } + + return steps +} diff --git a/vendor/github.com/hashicorp/terraform/terraform/graph_builder_import.go b/vendor/github.com/hashicorp/terraform/terraform/graph_builder_import.go new file mode 100644 index 0000000000..7070c59e40 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/terraform/graph_builder_import.go @@ -0,0 +1,76 @@ +package terraform + +import ( + "github.com/hashicorp/terraform/config/module" + "github.com/hashicorp/terraform/dag" +) + +// ImportGraphBuilder implements GraphBuilder and is responsible for building +// a graph for importing resources into Terraform. This is a much, much +// simpler graph than a normal configuration graph. +type ImportGraphBuilder struct { + // ImportTargets are the list of resources to import. + ImportTargets []*ImportTarget + + // Module is the module to add to the graph. See ImportOpts.Module. + Module *module.Tree + + // Providers is the list of providers supported. + Providers []string +} + +// Build builds the graph according to the steps returned by Steps. +func (b *ImportGraphBuilder) Build(path []string) (*Graph, error) { + return (&BasicGraphBuilder{ + Steps: b.Steps(), + Validate: true, + Name: "ImportGraphBuilder", + }).Build(path) +} + +// Steps returns the ordered list of GraphTransformers that must be executed +// to build a complete graph. +func (b *ImportGraphBuilder) Steps() []GraphTransformer { + // Get the module. If we don't have one, we just use an empty tree + // so that the transform still works but does nothing. + mod := b.Module + if mod == nil { + mod = module.NewEmptyTree() + } + + // Custom factory for creating providers. + concreteProvider := func(a *NodeAbstractProvider) dag.Vertex { + return &NodeApplyableProvider{ + NodeAbstractProvider: a, + } + } + + steps := []GraphTransformer{ + // Create all our resources from the configuration and state + &ConfigTransformer{Module: mod}, + + // Add the import steps + &ImportStateTransformer{Targets: b.ImportTargets}, + + // Provider-related transformations + &MissingProviderTransformer{Providers: b.Providers, Concrete: concreteProvider}, + &ProviderTransformer{}, + &DisableProviderTransformer{}, + &ParentProviderTransformer{}, + &AttachProviderConfigTransformer{Module: mod}, + + // This validates that the providers only depend on variables + &ImportProviderValidateTransformer{}, + + // Close opened plugin connections + &CloseProviderTransformer{}, + + // Single root + &RootTransformer{}, + + // Optimize + &TransitiveReductionTransformer{}, + } + + return steps +} diff --git a/vendor/github.com/hashicorp/terraform/terraform/graph_builder_input.go b/vendor/github.com/hashicorp/terraform/terraform/graph_builder_input.go new file mode 100644 index 0000000000..0df48cdb87 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/terraform/graph_builder_input.go @@ -0,0 +1,27 @@ +package terraform + +import ( + "github.com/hashicorp/terraform/dag" +) + +// InputGraphBuilder creates the graph for the input operation. +// +// Unlike other graph builders, this is a function since it currently modifies +// and is based on the PlanGraphBuilder. The PlanGraphBuilder passed in will be +// modified and should not be used for any other operations. +func InputGraphBuilder(p *PlanGraphBuilder) GraphBuilder { + // We're going to customize the concrete functions + p.CustomConcrete = true + + // Set the provider to the normal provider. This will ask for input. + p.ConcreteProvider = func(a *NodeAbstractProvider) dag.Vertex { + return &NodeApplyableProvider{ + NodeAbstractProvider: a, + } + } + + // We purposely don't set any more concrete fields since the remainder + // should be no-ops. + + return p +} diff --git a/vendor/github.com/hashicorp/terraform/terraform/graph_builder_plan.go b/vendor/github.com/hashicorp/terraform/terraform/graph_builder_plan.go new file mode 100644 index 0000000000..a6a3a90d48 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/terraform/graph_builder_plan.go @@ -0,0 +1,164 @@ +package terraform + +import ( + "sync" + + "github.com/hashicorp/terraform/config/module" + "github.com/hashicorp/terraform/dag" +) + +// PlanGraphBuilder implements GraphBuilder and is responsible for building +// a graph for planning (creating a Terraform Diff). +// +// The primary difference between this graph and others: +// +// * Based on the config since it represents the target state +// +// * Ignores lifecycle options since no lifecycle events occur here. This +// simplifies the graph significantly since complex transforms such as +// create-before-destroy can be completely ignored. +// +type PlanGraphBuilder struct { + // Module is the root module for the graph to build. + Module *module.Tree + + // State is the current state + State *State + + // Providers is the list of providers supported. + Providers []string + + // Provisioners is the list of provisioners supported. + Provisioners []string + + // Targets are resources to target + Targets []string + + // DisableReduce, if true, will not reduce the graph. Great for testing. + DisableReduce bool + + // Validate will do structural validation of the graph. + Validate bool + + // CustomConcrete can be set to customize the node types created + // for various parts of the plan. This is useful in order to customize + // the plan behavior. + CustomConcrete bool + ConcreteProvider ConcreteProviderNodeFunc + ConcreteResource ConcreteResourceNodeFunc + ConcreteResourceOrphan ConcreteResourceNodeFunc + + once sync.Once +} + +// See GraphBuilder +func (b *PlanGraphBuilder) Build(path []string) (*Graph, error) { + return (&BasicGraphBuilder{ + Steps: b.Steps(), + Validate: b.Validate, + Name: "PlanGraphBuilder", + }).Build(path) +} + +// See GraphBuilder +func (b *PlanGraphBuilder) Steps() []GraphTransformer { + b.once.Do(b.init) + + steps := []GraphTransformer{ + // Creates all the resources represented in the config + &ConfigTransformer{ + Concrete: b.ConcreteResource, + Module: b.Module, + }, + + // Add the outputs + &OutputTransformer{Module: b.Module}, + + // Add orphan resources + &OrphanResourceTransformer{ + Concrete: b.ConcreteResourceOrphan, + State: b.State, + Module: b.Module, + }, + + // Attach the configuration to any resources + &AttachResourceConfigTransformer{Module: b.Module}, + + // Attach the state + &AttachStateTransformer{State: b.State}, + + // Add root variables + &RootVariableTransformer{Module: b.Module}, + + // Create all the providers + &MissingProviderTransformer{Providers: b.Providers, Concrete: b.ConcreteProvider}, + &ProviderTransformer{}, + &DisableProviderTransformer{}, + &ParentProviderTransformer{}, + &AttachProviderConfigTransformer{Module: b.Module}, + + // Provisioner-related transformations. Only add these if requested. + GraphTransformIf( + func() bool { return b.Provisioners != nil }, + GraphTransformMulti( + &MissingProvisionerTransformer{Provisioners: b.Provisioners}, + &ProvisionerTransformer{}, + ), + ), + + // Add module variables + &ModuleVariableTransformer{Module: b.Module}, + + // Connect so that the references are ready for targeting. We'll + // have to connect again later for providers and so on. + &ReferenceTransformer{}, + + // Add the node to fix the state count boundaries + &CountBoundaryTransformer{}, + + // Target + &TargetsTransformer{Targets: b.Targets}, + + // Close opened plugin connections + &CloseProviderTransformer{}, + &CloseProvisionerTransformer{}, + + // Single root + &RootTransformer{}, + } + + if !b.DisableReduce { + // Perform the transitive reduction to make our graph a bit + // more sane if possible (it usually is possible). + steps = append(steps, &TransitiveReductionTransformer{}) + } + + return steps +} + +func (b *PlanGraphBuilder) init() { + // Do nothing if the user requests customizing the fields + if b.CustomConcrete { + return + } + + b.ConcreteProvider = func(a *NodeAbstractProvider) dag.Vertex { + return &NodeApplyableProvider{ + NodeAbstractProvider: a, + } + } + + b.ConcreteResource = func(a *NodeAbstractResource) dag.Vertex { + return &NodePlannableResource{ + NodeAbstractCountResource: &NodeAbstractCountResource{ + NodeAbstractResource: a, + }, + } + } + + b.ConcreteResourceOrphan = func(a *NodeAbstractResource) dag.Vertex { + return &NodePlannableResourceOrphan{ + NodeAbstractResource: a, + } + } +} diff --git a/vendor/github.com/hashicorp/terraform/terraform/graph_builder_refresh.go b/vendor/github.com/hashicorp/terraform/terraform/graph_builder_refresh.go new file mode 100644 index 0000000000..0634f9698d --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/terraform/graph_builder_refresh.go @@ -0,0 +1,163 @@ +package terraform + +import ( + "log" + + "github.com/hashicorp/terraform/config" + "github.com/hashicorp/terraform/config/module" + "github.com/hashicorp/terraform/dag" +) + +// RefreshGraphBuilder implements GraphBuilder and is responsible for building +// a graph for refreshing (updating the Terraform state). +// +// The primary difference between this graph and others: +// +// * Based on the state since it represents the only resources that +// need to be refreshed. +// +// * Ignores lifecycle options since no lifecycle events occur here. This +// simplifies the graph significantly since complex transforms such as +// create-before-destroy can be completely ignored. +// +type RefreshGraphBuilder struct { + // Module is the root module for the graph to build. + Module *module.Tree + + // State is the current state + State *State + + // Providers is the list of providers supported. + Providers []string + + // Targets are resources to target + Targets []string + + // DisableReduce, if true, will not reduce the graph. Great for testing. + DisableReduce bool + + // Validate will do structural validation of the graph. + Validate bool +} + +// See GraphBuilder +func (b *RefreshGraphBuilder) Build(path []string) (*Graph, error) { + return (&BasicGraphBuilder{ + Steps: b.Steps(), + Validate: b.Validate, + Name: "RefreshGraphBuilder", + }).Build(path) +} + +// See GraphBuilder +func (b *RefreshGraphBuilder) Steps() []GraphTransformer { + // Custom factory for creating providers. + concreteProvider := func(a *NodeAbstractProvider) dag.Vertex { + return &NodeApplyableProvider{ + NodeAbstractProvider: a, + } + } + + concreteManagedResource := func(a *NodeAbstractResource) dag.Vertex { + return &NodeRefreshableManagedResource{ + NodeAbstractCountResource: &NodeAbstractCountResource{ + NodeAbstractResource: a, + }, + } + } + + concreteManagedResourceInstance := func(a *NodeAbstractResource) dag.Vertex { + return &NodeRefreshableManagedResourceInstance{ + NodeAbstractResource: a, + } + } + + concreteDataResource := func(a *NodeAbstractResource) dag.Vertex { + return &NodeRefreshableDataResource{ + NodeAbstractCountResource: &NodeAbstractCountResource{ + NodeAbstractResource: a, + }, + } + } + + steps := []GraphTransformer{ + // Creates all the managed resources that aren't in the state, but only if + // we have a state already. No resources in state means there's not + // anything to refresh. + func() GraphTransformer { + if b.State.HasResources() { + return &ConfigTransformer{ + Concrete: concreteManagedResource, + Module: b.Module, + Unique: true, + ModeFilter: true, + Mode: config.ManagedResourceMode, + } + } + log.Println("[TRACE] No managed resources in state during refresh, skipping managed resource transformer") + return nil + }(), + + // Creates all the data resources that aren't in the state. This will also + // add any orphans from scaling in as destroy nodes. + &ConfigTransformer{ + Concrete: concreteDataResource, + Module: b.Module, + Unique: true, + ModeFilter: true, + Mode: config.DataResourceMode, + }, + + // Add any fully-orphaned resources from config (ones that have been + // removed completely, not ones that are just orphaned due to a scaled-in + // count. + &OrphanResourceTransformer{ + Concrete: concreteManagedResourceInstance, + State: b.State, + Module: b.Module, + }, + + // Attach the state + &AttachStateTransformer{State: b.State}, + + // Attach the configuration to any resources + &AttachResourceConfigTransformer{Module: b.Module}, + + // Add root variables + &RootVariableTransformer{Module: b.Module}, + + // Create all the providers + &MissingProviderTransformer{Providers: b.Providers, Concrete: concreteProvider}, + &ProviderTransformer{}, + &DisableProviderTransformer{}, + &ParentProviderTransformer{}, + &AttachProviderConfigTransformer{Module: b.Module}, + + // Add the outputs + &OutputTransformer{Module: b.Module}, + + // Add module variables + &ModuleVariableTransformer{Module: b.Module}, + + // Connect so that the references are ready for targeting. We'll + // have to connect again later for providers and so on. + &ReferenceTransformer{}, + + // Target + &TargetsTransformer{Targets: b.Targets}, + + // Close opened plugin connections + &CloseProviderTransformer{}, + + // Single root + &RootTransformer{}, + } + + if !b.DisableReduce { + // Perform the transitive reduction to make our graph a bit + // more sane if possible (it usually is possible). + steps = append(steps, &TransitiveReductionTransformer{}) + } + + return steps +} diff --git a/vendor/github.com/hashicorp/terraform/terraform/graph_builder_validate.go b/vendor/github.com/hashicorp/terraform/terraform/graph_builder_validate.go new file mode 100644 index 0000000000..645ec7be96 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/terraform/graph_builder_validate.go @@ -0,0 +1,36 @@ +package terraform + +import ( + "github.com/hashicorp/terraform/dag" +) + +// ValidateGraphBuilder creates the graph for the validate operation. +// +// ValidateGraphBuilder is based on the PlanGraphBuilder. We do this so that +// we only have to validate what we'd normally plan anyways. The +// PlanGraphBuilder given will be modified so it shouldn't be used for anything +// else after calling this function. +func ValidateGraphBuilder(p *PlanGraphBuilder) GraphBuilder { + // We're going to customize the concrete functions + p.CustomConcrete = true + + // Set the provider to the normal provider. This will ask for input. + p.ConcreteProvider = func(a *NodeAbstractProvider) dag.Vertex { + return &NodeApplyableProvider{ + NodeAbstractProvider: a, + } + } + + p.ConcreteResource = func(a *NodeAbstractResource) dag.Vertex { + return &NodeValidatableResource{ + NodeAbstractCountResource: &NodeAbstractCountResource{ + NodeAbstractResource: a, + }, + } + } + + // We purposely don't set any other concrete types since they don't + // require validation. + + return p +} diff --git a/vendor/github.com/hashicorp/terraform/terraform/graph_dot.go b/vendor/github.com/hashicorp/terraform/terraform/graph_dot.go new file mode 100644 index 0000000000..73e3821fbb --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/terraform/graph_dot.go @@ -0,0 +1,9 @@ +package terraform + +import "github.com/hashicorp/terraform/dag" + +// GraphDot returns the dot formatting of a visual representation of +// the given Terraform graph. +func GraphDot(g *Graph, opts *dag.DotOpts) (string, error) { + return string(g.Dot(opts)), nil +} diff --git a/vendor/github.com/hashicorp/terraform/terraform/graph_interface_subgraph.go b/vendor/github.com/hashicorp/terraform/terraform/graph_interface_subgraph.go new file mode 100644 index 0000000000..2897eb546a --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/terraform/graph_interface_subgraph.go @@ -0,0 +1,7 @@ +package terraform + +// GraphNodeSubPath says that a node is part of a graph with a +// different path, and the context should be adjusted accordingly. +type GraphNodeSubPath interface { + Path() []string +} diff --git a/vendor/github.com/hashicorp/terraform/terraform/graph_walk.go b/vendor/github.com/hashicorp/terraform/terraform/graph_walk.go new file mode 100644 index 0000000000..34ce6f6404 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/terraform/graph_walk.go @@ -0,0 +1,60 @@ +package terraform + +import ( + "github.com/hashicorp/terraform/dag" +) + +// GraphWalker is an interface that can be implemented that when used +// with Graph.Walk will invoke the given callbacks under certain events. +type GraphWalker interface { + EnterPath([]string) EvalContext + ExitPath([]string) + EnterVertex(dag.Vertex) + ExitVertex(dag.Vertex, error) + EnterEvalTree(dag.Vertex, EvalNode) EvalNode + ExitEvalTree(dag.Vertex, interface{}, error) error +} + +// GrpahWalkerPanicwrapper can be optionally implemented to catch panics +// that occur while walking the graph. This is not generally recommended +// since panics should crash Terraform and result in a bug report. However, +// this is particularly useful for situations like the shadow graph where +// you don't ever want to cause a panic. +type GraphWalkerPanicwrapper interface { + GraphWalker + + // Panic is called when a panic occurs. This will halt the panic from + // propogating so if the walker wants it to crash still it should panic + // again. This is called from within a defer so runtime/debug.Stack can + // be used to get the stack trace of the panic. + Panic(dag.Vertex, interface{}) +} + +// GraphWalkerPanicwrap wraps an existing Graphwalker to wrap and swallow +// the panics. This doesn't lose the panics since the panics are still +// returned as errors as part of a graph walk. +func GraphWalkerPanicwrap(w GraphWalker) GraphWalkerPanicwrapper { + return &graphWalkerPanicwrapper{ + GraphWalker: w, + } +} + +type graphWalkerPanicwrapper struct { + GraphWalker +} + +func (graphWalkerPanicwrapper) Panic(dag.Vertex, interface{}) {} + +// NullGraphWalker is a GraphWalker implementation that does nothing. +// This can be embedded within other GraphWalker implementations for easily +// implementing all the required functions. +type NullGraphWalker struct{} + +func (NullGraphWalker) EnterPath([]string) EvalContext { return new(MockEvalContext) } +func (NullGraphWalker) ExitPath([]string) {} +func (NullGraphWalker) EnterVertex(dag.Vertex) {} +func (NullGraphWalker) ExitVertex(dag.Vertex, error) {} +func (NullGraphWalker) EnterEvalTree(v dag.Vertex, n EvalNode) EvalNode { return n } +func (NullGraphWalker) ExitEvalTree(dag.Vertex, interface{}, error) error { + return nil +} diff --git a/vendor/github.com/hashicorp/terraform/terraform/graph_walk_context.go b/vendor/github.com/hashicorp/terraform/terraform/graph_walk_context.go new file mode 100644 index 0000000000..e63b460356 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/terraform/graph_walk_context.go @@ -0,0 +1,157 @@ +package terraform + +import ( + "context" + "fmt" + "log" + "sync" + + "github.com/hashicorp/errwrap" + "github.com/hashicorp/terraform/dag" +) + +// ContextGraphWalker is the GraphWalker implementation used with the +// Context struct to walk and evaluate the graph. +type ContextGraphWalker struct { + NullGraphWalker + + // Configurable values + Context *Context + Operation walkOperation + StopContext context.Context + + // Outputs, do not set these. Do not read these while the graph + // is being walked. + ValidationWarnings []string + ValidationErrors []error + + errorLock sync.Mutex + once sync.Once + contexts map[string]*BuiltinEvalContext + contextLock sync.Mutex + interpolaterVars map[string]map[string]interface{} + interpolaterVarLock sync.Mutex + providerCache map[string]ResourceProvider + providerConfigCache map[string]*ResourceConfig + providerLock sync.Mutex + provisionerCache map[string]ResourceProvisioner + provisionerLock sync.Mutex +} + +func (w *ContextGraphWalker) EnterPath(path []string) EvalContext { + w.once.Do(w.init) + + w.contextLock.Lock() + defer w.contextLock.Unlock() + + // If we already have a context for this path cached, use that + key := PathCacheKey(path) + if ctx, ok := w.contexts[key]; ok { + return ctx + } + + // Setup the variables for this interpolater + variables := make(map[string]interface{}) + if len(path) <= 1 { + for k, v := range w.Context.variables { + variables[k] = v + } + } + w.interpolaterVarLock.Lock() + if m, ok := w.interpolaterVars[key]; ok { + for k, v := range m { + variables[k] = v + } + } + w.interpolaterVars[key] = variables + w.interpolaterVarLock.Unlock() + + ctx := &BuiltinEvalContext{ + StopContext: w.StopContext, + PathValue: path, + Hooks: w.Context.hooks, + InputValue: w.Context.uiInput, + Components: w.Context.components, + ProviderCache: w.providerCache, + ProviderConfigCache: w.providerConfigCache, + ProviderInputConfig: w.Context.providerInputConfig, + ProviderLock: &w.providerLock, + ProvisionerCache: w.provisionerCache, + ProvisionerLock: &w.provisionerLock, + DiffValue: w.Context.diff, + DiffLock: &w.Context.diffLock, + StateValue: w.Context.state, + StateLock: &w.Context.stateLock, + Interpolater: &Interpolater{ + Operation: w.Operation, + Meta: w.Context.meta, + Module: w.Context.module, + State: w.Context.state, + StateLock: &w.Context.stateLock, + VariableValues: variables, + VariableValuesLock: &w.interpolaterVarLock, + }, + InterpolaterVars: w.interpolaterVars, + InterpolaterVarLock: &w.interpolaterVarLock, + } + + w.contexts[key] = ctx + return ctx +} + +func (w *ContextGraphWalker) EnterEvalTree(v dag.Vertex, n EvalNode) EvalNode { + log.Printf("[TRACE] [%s] Entering eval tree: %s", + w.Operation, dag.VertexName(v)) + + // Acquire a lock on the semaphore + w.Context.parallelSem.Acquire() + + // We want to filter the evaluation tree to only include operations + // that belong in this operation. + return EvalFilter(n, EvalNodeFilterOp(w.Operation)) +} + +func (w *ContextGraphWalker) ExitEvalTree( + v dag.Vertex, output interface{}, err error) error { + log.Printf("[TRACE] [%s] Exiting eval tree: %s", + w.Operation, dag.VertexName(v)) + + // Release the semaphore + w.Context.parallelSem.Release() + + if err == nil { + return nil + } + + // Acquire the lock because anything is going to require a lock. + w.errorLock.Lock() + defer w.errorLock.Unlock() + + // Try to get a validation error out of it. If its not a validation + // error, then just record the normal error. + verr, ok := err.(*EvalValidateError) + if !ok { + return err + } + + for _, msg := range verr.Warnings { + w.ValidationWarnings = append( + w.ValidationWarnings, + fmt.Sprintf("%s: %s", dag.VertexName(v), msg)) + } + for _, e := range verr.Errors { + w.ValidationErrors = append( + w.ValidationErrors, + errwrap.Wrapf(fmt.Sprintf("%s: {{err}}", dag.VertexName(v)), e)) + } + + return nil +} + +func (w *ContextGraphWalker) init() { + w.contexts = make(map[string]*BuiltinEvalContext, 5) + w.providerCache = make(map[string]ResourceProvider, 5) + w.providerConfigCache = make(map[string]*ResourceConfig, 5) + w.provisionerCache = make(map[string]ResourceProvisioner, 5) + w.interpolaterVars = make(map[string]map[string]interface{}, 5) +} diff --git a/vendor/github.com/hashicorp/terraform/terraform/graph_walk_operation.go b/vendor/github.com/hashicorp/terraform/terraform/graph_walk_operation.go new file mode 100644 index 0000000000..3fb3748191 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/terraform/graph_walk_operation.go @@ -0,0 +1,18 @@ +package terraform + +//go:generate stringer -type=walkOperation graph_walk_operation.go + +// walkOperation is an enum which tells the walkContext what to do. +type walkOperation byte + +const ( + walkInvalid walkOperation = iota + walkInput + walkApply + walkPlan + walkPlanDestroy + walkRefresh + walkValidate + walkDestroy + walkImport +) diff --git a/vendor/github.com/hashicorp/terraform/terraform/graphtype_string.go b/vendor/github.com/hashicorp/terraform/terraform/graphtype_string.go new file mode 100644 index 0000000000..e97b4855a9 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/terraform/graphtype_string.go @@ -0,0 +1,16 @@ +// Code generated by "stringer -type=GraphType context_graph_type.go"; DO NOT EDIT. + +package terraform + +import "fmt" + +const _GraphType_name = "GraphTypeInvalidGraphTypeLegacyGraphTypeRefreshGraphTypePlanGraphTypePlanDestroyGraphTypeApplyGraphTypeInputGraphTypeValidate" + +var _GraphType_index = [...]uint8{0, 16, 31, 47, 60, 80, 94, 108, 125} + +func (i GraphType) String() string { + if i >= GraphType(len(_GraphType_index)-1) { + return fmt.Sprintf("GraphType(%d)", i) + } + return _GraphType_name[_GraphType_index[i]:_GraphType_index[i+1]] +} diff --git a/vendor/github.com/hashicorp/terraform/terraform/hook.go b/vendor/github.com/hashicorp/terraform/terraform/hook.go new file mode 100644 index 0000000000..ab11e8ee01 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/terraform/hook.go @@ -0,0 +1,137 @@ +package terraform + +// HookAction is an enum of actions that can be taken as a result of a hook +// callback. This allows you to modify the behavior of Terraform at runtime. +type HookAction byte + +const ( + // HookActionContinue continues with processing as usual. + HookActionContinue HookAction = iota + + // HookActionHalt halts immediately: no more hooks are processed + // and the action that Terraform was about to take is cancelled. + HookActionHalt +) + +// Hook is the interface that must be implemented to hook into various +// parts of Terraform, allowing you to inspect or change behavior at runtime. +// +// There are MANY hook points into Terraform. If you only want to implement +// some hook points, but not all (which is the likely case), then embed the +// NilHook into your struct, which implements all of the interface but does +// nothing. Then, override only the functions you want to implement. +type Hook interface { + // PreApply and PostApply are called before and after a single + // resource is applied. The error argument in PostApply is the + // error, if any, that was returned from the provider Apply call itself. + PreApply(*InstanceInfo, *InstanceState, *InstanceDiff) (HookAction, error) + PostApply(*InstanceInfo, *InstanceState, error) (HookAction, error) + + // PreDiff and PostDiff are called before and after a single resource + // resource is diffed. + PreDiff(*InstanceInfo, *InstanceState) (HookAction, error) + PostDiff(*InstanceInfo, *InstanceDiff) (HookAction, error) + + // Provisioning hooks + // + // All should be self-explanatory. ProvisionOutput is called with + // output sent back by the provisioners. This will be called multiple + // times as output comes in, but each call should represent a line of + // output. The ProvisionOutput method cannot control whether the + // hook continues running. + PreProvisionResource(*InstanceInfo, *InstanceState) (HookAction, error) + PostProvisionResource(*InstanceInfo, *InstanceState) (HookAction, error) + PreProvision(*InstanceInfo, string) (HookAction, error) + PostProvision(*InstanceInfo, string, error) (HookAction, error) + ProvisionOutput(*InstanceInfo, string, string) + + // PreRefresh and PostRefresh are called before and after a single + // resource state is refreshed, respectively. + PreRefresh(*InstanceInfo, *InstanceState) (HookAction, error) + PostRefresh(*InstanceInfo, *InstanceState) (HookAction, error) + + // PostStateUpdate is called after the state is updated. + PostStateUpdate(*State) (HookAction, error) + + // PreImportState and PostImportState are called before and after + // a single resource's state is being improted. + PreImportState(*InstanceInfo, string) (HookAction, error) + PostImportState(*InstanceInfo, []*InstanceState) (HookAction, error) +} + +// NilHook is a Hook implementation that does nothing. It exists only to +// simplify implementing hooks. You can embed this into your Hook implementation +// and only implement the functions you are interested in. +type NilHook struct{} + +func (*NilHook) PreApply(*InstanceInfo, *InstanceState, *InstanceDiff) (HookAction, error) { + return HookActionContinue, nil +} + +func (*NilHook) PostApply(*InstanceInfo, *InstanceState, error) (HookAction, error) { + return HookActionContinue, nil +} + +func (*NilHook) PreDiff(*InstanceInfo, *InstanceState) (HookAction, error) { + return HookActionContinue, nil +} + +func (*NilHook) PostDiff(*InstanceInfo, *InstanceDiff) (HookAction, error) { + return HookActionContinue, nil +} + +func (*NilHook) PreProvisionResource(*InstanceInfo, *InstanceState) (HookAction, error) { + return HookActionContinue, nil +} + +func (*NilHook) PostProvisionResource(*InstanceInfo, *InstanceState) (HookAction, error) { + return HookActionContinue, nil +} + +func (*NilHook) PreProvision(*InstanceInfo, string) (HookAction, error) { + return HookActionContinue, nil +} + +func (*NilHook) PostProvision(*InstanceInfo, string, error) (HookAction, error) { + return HookActionContinue, nil +} + +func (*NilHook) ProvisionOutput( + *InstanceInfo, string, string) { +} + +func (*NilHook) PreRefresh(*InstanceInfo, *InstanceState) (HookAction, error) { + return HookActionContinue, nil +} + +func (*NilHook) PostRefresh(*InstanceInfo, *InstanceState) (HookAction, error) { + return HookActionContinue, nil +} + +func (*NilHook) PreImportState(*InstanceInfo, string) (HookAction, error) { + return HookActionContinue, nil +} + +func (*NilHook) PostImportState(*InstanceInfo, []*InstanceState) (HookAction, error) { + return HookActionContinue, nil +} + +func (*NilHook) PostStateUpdate(*State) (HookAction, error) { + return HookActionContinue, nil +} + +// handleHook turns hook actions into panics. This lets you use the +// panic/recover mechanism in Go as a flow control mechanism for hook +// actions. +func handleHook(a HookAction, err error) { + if err != nil { + // TODO: handle errors + } + + switch a { + case HookActionContinue: + return + case HookActionHalt: + panic(HookActionHalt) + } +} diff --git a/vendor/github.com/hashicorp/terraform/terraform/hook_mock.go b/vendor/github.com/hashicorp/terraform/terraform/hook_mock.go new file mode 100644 index 0000000000..0e46400678 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/terraform/hook_mock.go @@ -0,0 +1,245 @@ +package terraform + +import "sync" + +// MockHook is an implementation of Hook that can be used for tests. +// It records all of its function calls. +type MockHook struct { + sync.Mutex + + PreApplyCalled bool + PreApplyInfo *InstanceInfo + PreApplyDiff *InstanceDiff + PreApplyState *InstanceState + PreApplyReturn HookAction + PreApplyError error + + PostApplyCalled bool + PostApplyInfo *InstanceInfo + PostApplyState *InstanceState + PostApplyError error + PostApplyReturn HookAction + PostApplyReturnError error + PostApplyFn func(*InstanceInfo, *InstanceState, error) (HookAction, error) + + PreDiffCalled bool + PreDiffInfo *InstanceInfo + PreDiffState *InstanceState + PreDiffReturn HookAction + PreDiffError error + + PostDiffCalled bool + PostDiffInfo *InstanceInfo + PostDiffDiff *InstanceDiff + PostDiffReturn HookAction + PostDiffError error + + PreProvisionResourceCalled bool + PreProvisionResourceInfo *InstanceInfo + PreProvisionInstanceState *InstanceState + PreProvisionResourceReturn HookAction + PreProvisionResourceError error + + PostProvisionResourceCalled bool + PostProvisionResourceInfo *InstanceInfo + PostProvisionInstanceState *InstanceState + PostProvisionResourceReturn HookAction + PostProvisionResourceError error + + PreProvisionCalled bool + PreProvisionInfo *InstanceInfo + PreProvisionProvisionerId string + PreProvisionReturn HookAction + PreProvisionError error + + PostProvisionCalled bool + PostProvisionInfo *InstanceInfo + PostProvisionProvisionerId string + PostProvisionErrorArg error + PostProvisionReturn HookAction + PostProvisionError error + + ProvisionOutputCalled bool + ProvisionOutputInfo *InstanceInfo + ProvisionOutputProvisionerId string + ProvisionOutputMessage string + + PostRefreshCalled bool + PostRefreshInfo *InstanceInfo + PostRefreshState *InstanceState + PostRefreshReturn HookAction + PostRefreshError error + + PreRefreshCalled bool + PreRefreshInfo *InstanceInfo + PreRefreshState *InstanceState + PreRefreshReturn HookAction + PreRefreshError error + + PreImportStateCalled bool + PreImportStateInfo *InstanceInfo + PreImportStateId string + PreImportStateReturn HookAction + PreImportStateError error + + PostImportStateCalled bool + PostImportStateInfo *InstanceInfo + PostImportStateState []*InstanceState + PostImportStateReturn HookAction + PostImportStateError error + + PostStateUpdateCalled bool + PostStateUpdateState *State + PostStateUpdateReturn HookAction + PostStateUpdateError error +} + +func (h *MockHook) PreApply(n *InstanceInfo, s *InstanceState, d *InstanceDiff) (HookAction, error) { + h.Lock() + defer h.Unlock() + + h.PreApplyCalled = true + h.PreApplyInfo = n + h.PreApplyDiff = d + h.PreApplyState = s + return h.PreApplyReturn, h.PreApplyError +} + +func (h *MockHook) PostApply(n *InstanceInfo, s *InstanceState, e error) (HookAction, error) { + h.Lock() + defer h.Unlock() + + h.PostApplyCalled = true + h.PostApplyInfo = n + h.PostApplyState = s + h.PostApplyError = e + + if h.PostApplyFn != nil { + return h.PostApplyFn(n, s, e) + } + + return h.PostApplyReturn, h.PostApplyReturnError +} + +func (h *MockHook) PreDiff(n *InstanceInfo, s *InstanceState) (HookAction, error) { + h.Lock() + defer h.Unlock() + + h.PreDiffCalled = true + h.PreDiffInfo = n + h.PreDiffState = s + return h.PreDiffReturn, h.PreDiffError +} + +func (h *MockHook) PostDiff(n *InstanceInfo, d *InstanceDiff) (HookAction, error) { + h.Lock() + defer h.Unlock() + + h.PostDiffCalled = true + h.PostDiffInfo = n + h.PostDiffDiff = d + return h.PostDiffReturn, h.PostDiffError +} + +func (h *MockHook) PreProvisionResource(n *InstanceInfo, s *InstanceState) (HookAction, error) { + h.Lock() + defer h.Unlock() + + h.PreProvisionResourceCalled = true + h.PreProvisionResourceInfo = n + h.PreProvisionInstanceState = s + return h.PreProvisionResourceReturn, h.PreProvisionResourceError +} + +func (h *MockHook) PostProvisionResource(n *InstanceInfo, s *InstanceState) (HookAction, error) { + h.Lock() + defer h.Unlock() + + h.PostProvisionResourceCalled = true + h.PostProvisionResourceInfo = n + h.PostProvisionInstanceState = s + return h.PostProvisionResourceReturn, h.PostProvisionResourceError +} + +func (h *MockHook) PreProvision(n *InstanceInfo, provId string) (HookAction, error) { + h.Lock() + defer h.Unlock() + + h.PreProvisionCalled = true + h.PreProvisionInfo = n + h.PreProvisionProvisionerId = provId + return h.PreProvisionReturn, h.PreProvisionError +} + +func (h *MockHook) PostProvision(n *InstanceInfo, provId string, err error) (HookAction, error) { + h.Lock() + defer h.Unlock() + + h.PostProvisionCalled = true + h.PostProvisionInfo = n + h.PostProvisionProvisionerId = provId + h.PostProvisionErrorArg = err + return h.PostProvisionReturn, h.PostProvisionError +} + +func (h *MockHook) ProvisionOutput( + n *InstanceInfo, + provId string, + msg string) { + h.Lock() + defer h.Unlock() + + h.ProvisionOutputCalled = true + h.ProvisionOutputInfo = n + h.ProvisionOutputProvisionerId = provId + h.ProvisionOutputMessage = msg +} + +func (h *MockHook) PreRefresh(n *InstanceInfo, s *InstanceState) (HookAction, error) { + h.Lock() + defer h.Unlock() + + h.PreRefreshCalled = true + h.PreRefreshInfo = n + h.PreRefreshState = s + return h.PreRefreshReturn, h.PreRefreshError +} + +func (h *MockHook) PostRefresh(n *InstanceInfo, s *InstanceState) (HookAction, error) { + h.Lock() + defer h.Unlock() + + h.PostRefreshCalled = true + h.PostRefreshInfo = n + h.PostRefreshState = s + return h.PostRefreshReturn, h.PostRefreshError +} + +func (h *MockHook) PreImportState(info *InstanceInfo, id string) (HookAction, error) { + h.Lock() + defer h.Unlock() + + h.PreImportStateCalled = true + h.PreImportStateInfo = info + h.PreImportStateId = id + return h.PreImportStateReturn, h.PreImportStateError +} + +func (h *MockHook) PostImportState(info *InstanceInfo, s []*InstanceState) (HookAction, error) { + h.Lock() + defer h.Unlock() + + h.PostImportStateCalled = true + h.PostImportStateInfo = info + h.PostImportStateState = s + return h.PostImportStateReturn, h.PostImportStateError +} + +func (h *MockHook) PostStateUpdate(s *State) (HookAction, error) { + h.Lock() + defer h.Unlock() + + h.PostStateUpdateCalled = true + h.PostStateUpdateState = s + return h.PostStateUpdateReturn, h.PostStateUpdateError +} diff --git a/vendor/github.com/hashicorp/terraform/terraform/hook_stop.go b/vendor/github.com/hashicorp/terraform/terraform/hook_stop.go new file mode 100644 index 0000000000..104d0098a1 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/terraform/hook_stop.go @@ -0,0 +1,87 @@ +package terraform + +import ( + "sync/atomic" +) + +// stopHook is a private Hook implementation that Terraform uses to +// signal when to stop or cancel actions. +type stopHook struct { + stop uint32 +} + +func (h *stopHook) PreApply(*InstanceInfo, *InstanceState, *InstanceDiff) (HookAction, error) { + return h.hook() +} + +func (h *stopHook) PostApply(*InstanceInfo, *InstanceState, error) (HookAction, error) { + return h.hook() +} + +func (h *stopHook) PreDiff(*InstanceInfo, *InstanceState) (HookAction, error) { + return h.hook() +} + +func (h *stopHook) PostDiff(*InstanceInfo, *InstanceDiff) (HookAction, error) { + return h.hook() +} + +func (h *stopHook) PreProvisionResource(*InstanceInfo, *InstanceState) (HookAction, error) { + return h.hook() +} + +func (h *stopHook) PostProvisionResource(*InstanceInfo, *InstanceState) (HookAction, error) { + return h.hook() +} + +func (h *stopHook) PreProvision(*InstanceInfo, string) (HookAction, error) { + return h.hook() +} + +func (h *stopHook) PostProvision(*InstanceInfo, string, error) (HookAction, error) { + return h.hook() +} + +func (h *stopHook) ProvisionOutput(*InstanceInfo, string, string) { +} + +func (h *stopHook) PreRefresh(*InstanceInfo, *InstanceState) (HookAction, error) { + return h.hook() +} + +func (h *stopHook) PostRefresh(*InstanceInfo, *InstanceState) (HookAction, error) { + return h.hook() +} + +func (h *stopHook) PreImportState(*InstanceInfo, string) (HookAction, error) { + return h.hook() +} + +func (h *stopHook) PostImportState(*InstanceInfo, []*InstanceState) (HookAction, error) { + return h.hook() +} + +func (h *stopHook) PostStateUpdate(*State) (HookAction, error) { + return h.hook() +} + +func (h *stopHook) hook() (HookAction, error) { + if h.Stopped() { + return HookActionHalt, nil + } + + return HookActionContinue, nil +} + +// reset should be called within the lock context +func (h *stopHook) Reset() { + atomic.StoreUint32(&h.stop, 0) +} + +func (h *stopHook) Stop() { + atomic.StoreUint32(&h.stop, 1) +} + +func (h *stopHook) Stopped() bool { + return atomic.LoadUint32(&h.stop) == 1 +} diff --git a/vendor/github.com/hashicorp/terraform/terraform/instancetype.go b/vendor/github.com/hashicorp/terraform/terraform/instancetype.go new file mode 100644 index 0000000000..08959717b9 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/terraform/instancetype.go @@ -0,0 +1,13 @@ +package terraform + +//go:generate stringer -type=InstanceType instancetype.go + +// InstanceType is an enum of the various types of instances store in the State +type InstanceType int + +const ( + TypeInvalid InstanceType = iota + TypePrimary + TypeTainted + TypeDeposed +) diff --git a/vendor/github.com/hashicorp/terraform/terraform/instancetype_string.go b/vendor/github.com/hashicorp/terraform/terraform/instancetype_string.go new file mode 100644 index 0000000000..f69267cd52 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/terraform/instancetype_string.go @@ -0,0 +1,16 @@ +// Code generated by "stringer -type=InstanceType instancetype.go"; DO NOT EDIT. + +package terraform + +import "fmt" + +const _InstanceType_name = "TypeInvalidTypePrimaryTypeTaintedTypeDeposed" + +var _InstanceType_index = [...]uint8{0, 11, 22, 33, 44} + +func (i InstanceType) String() string { + if i < 0 || i >= InstanceType(len(_InstanceType_index)-1) { + return fmt.Sprintf("InstanceType(%d)", i) + } + return _InstanceType_name[_InstanceType_index[i]:_InstanceType_index[i+1]] +} diff --git a/vendor/github.com/hashicorp/terraform/terraform/interpolate.go b/vendor/github.com/hashicorp/terraform/terraform/interpolate.go new file mode 100644 index 0000000000..0def295fa9 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/terraform/interpolate.go @@ -0,0 +1,795 @@ +package terraform + +import ( + "fmt" + "log" + "os" + "strconv" + "strings" + "sync" + + "github.com/hashicorp/hil" + "github.com/hashicorp/hil/ast" + "github.com/hashicorp/terraform/config" + "github.com/hashicorp/terraform/config/module" + "github.com/hashicorp/terraform/flatmap" +) + +const ( + // VarEnvPrefix is the prefix of variables that are read from + // the environment to set variables here. + VarEnvPrefix = "TF_VAR_" +) + +// Interpolater is the structure responsible for determining the values +// for interpolations such as `aws_instance.foo.bar`. +type Interpolater struct { + Operation walkOperation + Meta *ContextMeta + Module *module.Tree + State *State + StateLock *sync.RWMutex + VariableValues map[string]interface{} + VariableValuesLock *sync.Mutex +} + +// InterpolationScope is the current scope of execution. This is required +// since some variables which are interpolated are dependent on what we're +// operating on and where we are. +type InterpolationScope struct { + Path []string + Resource *Resource +} + +// Values returns the values for all the variables in the given map. +func (i *Interpolater) Values( + scope *InterpolationScope, + vars map[string]config.InterpolatedVariable) (map[string]ast.Variable, error) { + if scope == nil { + scope = &InterpolationScope{} + } + + result := make(map[string]ast.Variable, len(vars)) + + // Copy the default variables + if i.Module != nil && scope != nil { + mod := i.Module + if len(scope.Path) > 1 { + mod = i.Module.Child(scope.Path[1:]) + } + for _, v := range mod.Config().Variables { + // Set default variables + if v.Default == nil { + continue + } + + n := fmt.Sprintf("var.%s", v.Name) + variable, err := hil.InterfaceToVariable(v.Default) + if err != nil { + return nil, fmt.Errorf("invalid default map value for %s: %v", v.Name, v.Default) + } + + result[n] = variable + } + } + + for n, rawV := range vars { + var err error + switch v := rawV.(type) { + case *config.CountVariable: + err = i.valueCountVar(scope, n, v, result) + case *config.ModuleVariable: + err = i.valueModuleVar(scope, n, v, result) + case *config.PathVariable: + err = i.valuePathVar(scope, n, v, result) + case *config.ResourceVariable: + err = i.valueResourceVar(scope, n, v, result) + case *config.SelfVariable: + err = i.valueSelfVar(scope, n, v, result) + case *config.SimpleVariable: + err = i.valueSimpleVar(scope, n, v, result) + case *config.TerraformVariable: + err = i.valueTerraformVar(scope, n, v, result) + case *config.UserVariable: + err = i.valueUserVar(scope, n, v, result) + default: + err = fmt.Errorf("%s: unknown variable type: %T", n, rawV) + } + + if err != nil { + return nil, err + } + } + + return result, nil +} + +func (i *Interpolater) valueCountVar( + scope *InterpolationScope, + n string, + v *config.CountVariable, + result map[string]ast.Variable) error { + switch v.Type { + case config.CountValueIndex: + if scope.Resource == nil { + return fmt.Errorf("%s: count.index is only valid within resources", n) + } + result[n] = ast.Variable{ + Value: scope.Resource.CountIndex, + Type: ast.TypeInt, + } + return nil + default: + return fmt.Errorf("%s: unknown count type: %#v", n, v.Type) + } +} + +func unknownVariable() ast.Variable { + return ast.Variable{ + Type: ast.TypeUnknown, + Value: config.UnknownVariableValue, + } +} + +func unknownValue() string { + return hil.UnknownValue +} + +func (i *Interpolater) valueModuleVar( + scope *InterpolationScope, + n string, + v *config.ModuleVariable, + result map[string]ast.Variable) error { + + // Build the path to the child module we want + path := make([]string, len(scope.Path), len(scope.Path)+1) + copy(path, scope.Path) + path = append(path, v.Name) + + // Grab the lock so that if other interpolations are running or + // state is being modified, we'll be safe. + i.StateLock.RLock() + defer i.StateLock.RUnlock() + + // Get the module where we're looking for the value + mod := i.State.ModuleByPath(path) + if mod == nil { + // If the module doesn't exist, then we can return an empty string. + // This happens usually only in Refresh() when we haven't populated + // a state. During validation, we semantically verify that all + // modules reference other modules, and graph ordering should + // ensure that the module is in the state, so if we reach this + // point otherwise it really is a panic. + result[n] = unknownVariable() + + // During apply this is always an error + if i.Operation == walkApply { + return fmt.Errorf( + "Couldn't find module %q for var: %s", + v.Name, v.FullKey()) + } + } else { + // Get the value from the outputs + if outputState, ok := mod.Outputs[v.Field]; ok { + output, err := hil.InterfaceToVariable(outputState.Value) + if err != nil { + return err + } + result[n] = output + } else { + // Same reasons as the comment above. + result[n] = unknownVariable() + + // During apply this is always an error + if i.Operation == walkApply { + return fmt.Errorf( + "Couldn't find output %q for module var: %s", + v.Field, v.FullKey()) + } + } + } + + return nil +} + +func (i *Interpolater) valuePathVar( + scope *InterpolationScope, + n string, + v *config.PathVariable, + result map[string]ast.Variable) error { + switch v.Type { + case config.PathValueCwd: + wd, err := os.Getwd() + if err != nil { + return fmt.Errorf( + "Couldn't get cwd for var %s: %s", + v.FullKey(), err) + } + + result[n] = ast.Variable{ + Value: wd, + Type: ast.TypeString, + } + case config.PathValueModule: + if t := i.Module.Child(scope.Path[1:]); t != nil { + result[n] = ast.Variable{ + Value: t.Config().Dir, + Type: ast.TypeString, + } + } + case config.PathValueRoot: + result[n] = ast.Variable{ + Value: i.Module.Config().Dir, + Type: ast.TypeString, + } + default: + return fmt.Errorf("%s: unknown path type: %#v", n, v.Type) + } + + return nil + +} + +func (i *Interpolater) valueResourceVar( + scope *InterpolationScope, + n string, + v *config.ResourceVariable, + result map[string]ast.Variable) error { + // If we're computing all dynamic fields, then module vars count + // and we mark it as computed. + if i.Operation == walkValidate { + result[n] = unknownVariable() + return nil + } + + var variable *ast.Variable + var err error + + if v.Multi && v.Index == -1 { + variable, err = i.computeResourceMultiVariable(scope, v) + } else { + variable, err = i.computeResourceVariable(scope, v) + } + + if err != nil { + return err + } + + if variable == nil { + // During the input walk we tolerate missing variables because + // we haven't yet had a chance to refresh state, so dynamic data may + // not yet be complete. + // If it truly is missing, we'll catch it on a later walk. + // This applies only to graph nodes that interpolate during the + // config walk, e.g. providers. + if i.Operation == walkInput || i.Operation == walkRefresh { + result[n] = unknownVariable() + return nil + } + + return fmt.Errorf("variable %q is nil, but no error was reported", v.Name) + } + + result[n] = *variable + return nil +} + +func (i *Interpolater) valueSelfVar( + scope *InterpolationScope, + n string, + v *config.SelfVariable, + result map[string]ast.Variable) error { + if scope == nil || scope.Resource == nil { + return fmt.Errorf( + "%s: invalid scope, self variables are only valid on resources", n) + } + + rv, err := config.NewResourceVariable(fmt.Sprintf( + "%s.%s.%d.%s", + scope.Resource.Type, + scope.Resource.Name, + scope.Resource.CountIndex, + v.Field)) + if err != nil { + return err + } + + return i.valueResourceVar(scope, n, rv, result) +} + +func (i *Interpolater) valueSimpleVar( + scope *InterpolationScope, + n string, + v *config.SimpleVariable, + result map[string]ast.Variable) error { + // This error message includes some information for people who + // relied on this for their template_file data sources. We should + // remove this at some point but there isn't any rush. + return fmt.Errorf( + "invalid variable syntax: %q. Did you mean 'var.%s'? If this is part of inline `template` parameter\n"+ + "then you must escape the interpolation with two dollar signs. For\n"+ + "example: ${a} becomes $${a}.", + n, n) +} + +func (i *Interpolater) valueTerraformVar( + scope *InterpolationScope, + n string, + v *config.TerraformVariable, + result map[string]ast.Variable) error { + if v.Field != "env" { + return fmt.Errorf( + "%s: only supported key for 'terraform.X' interpolations is 'env'", n) + } + + if i.Meta == nil { + return fmt.Errorf( + "%s: internal error: nil Meta. Please report a bug.", n) + } + + result[n] = ast.Variable{Type: ast.TypeString, Value: i.Meta.Env} + return nil +} + +func (i *Interpolater) valueUserVar( + scope *InterpolationScope, + n string, + v *config.UserVariable, + result map[string]ast.Variable) error { + i.VariableValuesLock.Lock() + defer i.VariableValuesLock.Unlock() + val, ok := i.VariableValues[v.Name] + if ok { + varValue, err := hil.InterfaceToVariable(val) + if err != nil { + return fmt.Errorf("cannot convert %s value %q to an ast.Variable for interpolation: %s", + v.Name, val, err) + } + result[n] = varValue + return nil + } + + if _, ok := result[n]; !ok && i.Operation == walkValidate { + result[n] = unknownVariable() + return nil + } + + // Look up if we have any variables with this prefix because + // those are map overrides. Include those. + for k, val := range i.VariableValues { + if strings.HasPrefix(k, v.Name+".") { + keyComponents := strings.Split(k, ".") + overrideKey := keyComponents[len(keyComponents)-1] + + mapInterface, ok := result["var."+v.Name] + if !ok { + return fmt.Errorf("override for non-existent variable: %s", v.Name) + } + + mapVariable := mapInterface.Value.(map[string]ast.Variable) + + varValue, err := hil.InterfaceToVariable(val) + if err != nil { + return fmt.Errorf("cannot convert %s value %q to an ast.Variable for interpolation: %s", + v.Name, val, err) + } + mapVariable[overrideKey] = varValue + } + } + + return nil +} + +func (i *Interpolater) computeResourceVariable( + scope *InterpolationScope, + v *config.ResourceVariable) (*ast.Variable, error) { + id := v.ResourceId() + if v.Multi { + id = fmt.Sprintf("%s.%d", id, v.Index) + } + + i.StateLock.RLock() + defer i.StateLock.RUnlock() + + unknownVariable := unknownVariable() + + // These variables must be declared early because of the use of GOTO + var isList bool + var isMap bool + + // Get the information about this resource variable, and verify + // that it exists and such. + module, cr, err := i.resourceVariableInfo(scope, v) + if err != nil { + return nil, err + } + + // If we're requesting "count" its a special variable that we grab + // directly from the config itself. + if v.Field == "count" { + var count int + if cr != nil { + count, err = cr.Count() + } else { + count, err = i.resourceCountMax(module, cr, v) + } + if err != nil { + return nil, fmt.Errorf( + "Error reading %s count: %s", + v.ResourceId(), + err) + } + + return &ast.Variable{Type: ast.TypeInt, Value: count}, nil + } + + // Get the resource out from the state. We know the state exists + // at this point and if there is a state, we expect there to be a + // resource with the given name. + var r *ResourceState + if module != nil && len(module.Resources) > 0 { + var ok bool + r, ok = module.Resources[id] + if !ok && v.Multi && v.Index == 0 { + r, ok = module.Resources[v.ResourceId()] + } + if !ok { + r = nil + } + } + if r == nil || r.Primary == nil { + if i.Operation == walkApply || i.Operation == walkPlan { + return nil, fmt.Errorf( + "Resource '%s' not found for variable '%s'", + v.ResourceId(), + v.FullKey()) + } + + // If we have no module in the state yet or count, return empty. + // NOTE(@mitchellh): I actually don't know why this is here. During + // a refactor I kept this here to maintain the same behavior, but + // I'm not sure why its here. + if module == nil || len(module.Resources) == 0 { + return nil, nil + } + + goto MISSING + } + + if attr, ok := r.Primary.Attributes[v.Field]; ok { + v, err := hil.InterfaceToVariable(attr) + return &v, err + } + + // computed list or map attribute + _, isList = r.Primary.Attributes[v.Field+".#"] + _, isMap = r.Primary.Attributes[v.Field+".%"] + if isList || isMap { + variable, err := i.interpolateComplexTypeAttribute(v.Field, r.Primary.Attributes) + return &variable, err + } + + // At apply time, we can't do the "maybe has it" check below + // that we need for plans since parent elements might be computed. + // Therefore, it is an error and we're missing the key. + // + // TODO: test by creating a state and configuration that is referencing + // a non-existent variable "foo.bar" where the state only has "foo" + // and verify plan works, but apply doesn't. + if i.Operation == walkApply || i.Operation == walkDestroy { + goto MISSING + } + + // We didn't find the exact field, so lets separate the dots + // and see if anything along the way is a computed set. i.e. if + // we have "foo.0.bar" as the field, check to see if "foo" is + // a computed list. If so, then the whole thing is computed. + if parts := strings.Split(v.Field, "."); len(parts) > 1 { + for i := 1; i < len(parts); i++ { + // Lists and sets make this + key := fmt.Sprintf("%s.#", strings.Join(parts[:i], ".")) + if attr, ok := r.Primary.Attributes[key]; ok { + v, err := hil.InterfaceToVariable(attr) + return &v, err + } + + // Maps make this + key = fmt.Sprintf("%s", strings.Join(parts[:i], ".")) + if attr, ok := r.Primary.Attributes[key]; ok { + v, err := hil.InterfaceToVariable(attr) + return &v, err + } + } + } + +MISSING: + // Validation for missing interpolations should happen at a higher + // semantic level. If we reached this point and don't have variables, + // just return the computed value. + if scope == nil && scope.Resource == nil { + return &unknownVariable, nil + } + + // If the operation is refresh, it isn't an error for a value to + // be unknown. Instead, we return that the value is computed so + // that the graph can continue to refresh other nodes. It doesn't + // matter because the config isn't interpolated anyways. + // + // For a Destroy, we're also fine with computed values, since our goal is + // only to get destroy nodes for existing resources. + // + // For an input walk, computed values are okay to return because we're only + // looking for missing variables to prompt the user for. + if i.Operation == walkRefresh || i.Operation == walkPlanDestroy || i.Operation == walkInput { + return &unknownVariable, nil + } + + return nil, fmt.Errorf( + "Resource '%s' does not have attribute '%s' "+ + "for variable '%s'", + id, + v.Field, + v.FullKey()) +} + +func (i *Interpolater) computeResourceMultiVariable( + scope *InterpolationScope, + v *config.ResourceVariable) (*ast.Variable, error) { + i.StateLock.RLock() + defer i.StateLock.RUnlock() + + unknownVariable := unknownVariable() + + // If we're only looking for input, we don't need to expand a + // multi-variable. This prevents us from encountering things that should be + // known but aren't because the state has yet to be refreshed. + if i.Operation == walkInput { + return &unknownVariable, nil + } + + // Get the information about this resource variable, and verify + // that it exists and such. + module, cr, err := i.resourceVariableInfo(scope, v) + if err != nil { + return nil, err + } + + // Get the keys for all the resources that are created for this resource + countMax, err := i.resourceCountMax(module, cr, v) + if err != nil { + return nil, err + } + + // If count is zero, we return an empty list + if countMax == 0 { + return &ast.Variable{Type: ast.TypeList, Value: []ast.Variable{}}, nil + } + + // If we have no module in the state yet or count, return unknown + if module == nil || len(module.Resources) == 0 { + return &unknownVariable, nil + } + + var values []interface{} + for idx := 0; idx < countMax; idx++ { + id := fmt.Sprintf("%s.%d", v.ResourceId(), idx) + + // ID doesn't have a trailing index. We try both here, but if a value + // without a trailing index is found we prefer that. This choice + // is for legacy reasons: older versions of TF preferred it. + if id == v.ResourceId()+".0" { + potential := v.ResourceId() + if _, ok := module.Resources[potential]; ok { + id = potential + } + } + + r, ok := module.Resources[id] + if !ok { + continue + } + + if r.Primary == nil { + continue + } + + if singleAttr, ok := r.Primary.Attributes[v.Field]; ok { + values = append(values, singleAttr) + continue + } + + // computed list or map attribute + _, isList := r.Primary.Attributes[v.Field+".#"] + _, isMap := r.Primary.Attributes[v.Field+".%"] + if !(isList || isMap) { + continue + } + multiAttr, err := i.interpolateComplexTypeAttribute(v.Field, r.Primary.Attributes) + if err != nil { + return nil, err + } + + values = append(values, multiAttr) + } + + if len(values) == 0 { + // If the operation is refresh, it isn't an error for a value to + // be unknown. Instead, we return that the value is computed so + // that the graph can continue to refresh other nodes. It doesn't + // matter because the config isn't interpolated anyways. + // + // For a Destroy, we're also fine with computed values, since our goal is + // only to get destroy nodes for existing resources. + // + // For an input walk, computed values are okay to return because we're only + // looking for missing variables to prompt the user for. + if i.Operation == walkRefresh || i.Operation == walkPlanDestroy || i.Operation == walkDestroy || i.Operation == walkInput { + return &unknownVariable, nil + } + + return nil, fmt.Errorf( + "Resource '%s' does not have attribute '%s' "+ + "for variable '%s'", + v.ResourceId(), + v.Field, + v.FullKey()) + } + + variable, err := hil.InterfaceToVariable(values) + return &variable, err +} + +func (i *Interpolater) interpolateComplexTypeAttribute( + resourceID string, + attributes map[string]string) (ast.Variable, error) { + + // We can now distinguish between lists and maps in state by the count field: + // - lists (and by extension, sets) use the traditional .# notation + // - maps use the newer .% notation + // Consequently here we can decide how to deal with the keys appropriately + // based on whether the type is a map of list. + if lengthAttr, isList := attributes[resourceID+".#"]; isList { + log.Printf("[DEBUG] Interpolating computed list element attribute %s (%s)", + resourceID, lengthAttr) + + // In Terraform's internal dotted representation of list-like attributes, the + // ".#" count field is marked as unknown to indicate "this whole list is + // unknown". We must honor that meaning here so computed references can be + // treated properly during the plan phase. + if lengthAttr == config.UnknownVariableValue { + return unknownVariable(), nil + } + + expanded := flatmap.Expand(attributes, resourceID) + return hil.InterfaceToVariable(expanded) + } + + if lengthAttr, isMap := attributes[resourceID+".%"]; isMap { + log.Printf("[DEBUG] Interpolating computed map element attribute %s (%s)", + resourceID, lengthAttr) + + // In Terraform's internal dotted representation of map attributes, the + // ".%" count field is marked as unknown to indicate "this whole list is + // unknown". We must honor that meaning here so computed references can be + // treated properly during the plan phase. + if lengthAttr == config.UnknownVariableValue { + return unknownVariable(), nil + } + + expanded := flatmap.Expand(attributes, resourceID) + return hil.InterfaceToVariable(expanded) + } + + return ast.Variable{}, fmt.Errorf("No complex type %s found", resourceID) +} + +func (i *Interpolater) resourceVariableInfo( + scope *InterpolationScope, + v *config.ResourceVariable) (*ModuleState, *config.Resource, error) { + // Get the module tree that contains our current path. This is + // either the current module (path is empty) or a child. + modTree := i.Module + if len(scope.Path) > 1 { + modTree = i.Module.Child(scope.Path[1:]) + } + + // Get the resource from the configuration so we can verify + // that the resource is in the configuration and so we can access + // the configuration if we need to. + var cr *config.Resource + for _, r := range modTree.Config().Resources { + if r.Id() == v.ResourceId() { + cr = r + break + } + } + + // Get the relevant module + module := i.State.ModuleByPath(scope.Path) + return module, cr, nil +} + +func (i *Interpolater) resourceCountMax( + ms *ModuleState, + cr *config.Resource, + v *config.ResourceVariable) (int, error) { + id := v.ResourceId() + + // If we're NOT applying, then we assume we can read the count + // from the state. Plan and so on may not have any state yet so + // we do a full interpolation. + if i.Operation != walkApply { + if cr == nil { + return 0, nil + } + + count, err := cr.Count() + if err != nil { + return 0, err + } + + return count, nil + } + + // If we have no module state in the apply walk, that suggests we've hit + // a rather awkward edge-case: the resource this variable refers to + // has count = 0 and is the only resource processed so far on this walk, + // and so we've ended up not creating any resource states yet. We don't + // create a module state until the first resource is written into it, + // so the module state doesn't exist when we get here. + // + // In this case we act as we would if we had been passed a module + // with an empty resource state map. + if ms == nil { + return 0, nil + } + + // We need to determine the list of resource keys to get values from. + // This needs to be sorted so the order is deterministic. We used to + // use "cr.Count()" but that doesn't work if the count is interpolated + // and we can't guarantee that so we instead depend on the state. + max := -1 + for k, _ := range ms.Resources { + // Get the index number for this resource + index := "" + if k == id { + // If the key is the id, then its just 0 (no explicit index) + index = "0" + } else if strings.HasPrefix(k, id+".") { + // Grab the index number out of the state + index = k[len(id+"."):] + if idx := strings.IndexRune(index, '.'); idx >= 0 { + index = index[:idx] + } + } + + // If there was no index then this resource didn't match + // the one we're looking for, exit. + if index == "" { + continue + } + + // Turn the index into an int + raw, err := strconv.ParseInt(index, 0, 0) + if err != nil { + return 0, fmt.Errorf( + "%s: error parsing index %q as int: %s", + id, index, err) + } + + // Keep track of this index if its the max + if new := int(raw); new > max { + max = new + } + } + + // If we never found any matching resources in the state, we + // have zero. + if max == -1 { + return 0, nil + } + + // The result value is "max+1" because we're returning the + // max COUNT, not the max INDEX, and we zero-index. + return max + 1, nil +} diff --git a/vendor/github.com/hashicorp/terraform/terraform/node_count_boundary.go b/vendor/github.com/hashicorp/terraform/terraform/node_count_boundary.go new file mode 100644 index 0000000000..bd32c79f34 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/terraform/node_count_boundary.go @@ -0,0 +1,14 @@ +package terraform + +// NodeCountBoundary fixes any "count boundarie" in the state: resources +// that are named "foo.0" when they should be named "foo" +type NodeCountBoundary struct{} + +func (n *NodeCountBoundary) Name() string { + return "meta.count-boundary (count boundary fixup)" +} + +// GraphNodeEvalable +func (n *NodeCountBoundary) EvalTree() EvalNode { + return &EvalCountFixZeroOneBoundaryGlobal{} +} diff --git a/vendor/github.com/hashicorp/terraform/terraform/node_data_destroy.go b/vendor/github.com/hashicorp/terraform/terraform/node_data_destroy.go new file mode 100644 index 0000000000..e32cea8825 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/terraform/node_data_destroy.go @@ -0,0 +1,22 @@ +package terraform + +// NodeDestroyableDataResource represents a resource that is "plannable": +// it is ready to be planned in order to create a diff. +type NodeDestroyableDataResource struct { + *NodeAbstractResource +} + +// GraphNodeEvalable +func (n *NodeDestroyableDataResource) EvalTree() EvalNode { + addr := n.NodeAbstractResource.Addr + + // stateId is the ID to put into the state + stateId := addr.stateId() + + // Just destroy it. + var state *InstanceState + return &EvalWriteState{ + Name: stateId, + State: &state, // state is nil here + } +} diff --git a/vendor/github.com/hashicorp/terraform/terraform/node_data_refresh.go b/vendor/github.com/hashicorp/terraform/terraform/node_data_refresh.go new file mode 100644 index 0000000000..45129b3cbf --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/terraform/node_data_refresh.go @@ -0,0 +1,218 @@ +package terraform + +import ( + "github.com/hashicorp/terraform/dag" +) + +// NodeRefreshableDataResource represents a resource that is "plannable": +// it is ready to be planned in order to create a diff. +type NodeRefreshableDataResource struct { + *NodeAbstractCountResource +} + +// GraphNodeDynamicExpandable +func (n *NodeRefreshableDataResource) DynamicExpand(ctx EvalContext) (*Graph, error) { + // Grab the state which we read + state, lock := ctx.State() + lock.RLock() + defer lock.RUnlock() + + // Expand the resource count which must be available by now from EvalTree + count, err := n.Config.Count() + if err != nil { + return nil, err + } + + // The concrete resource factory we'll use + concreteResource := func(a *NodeAbstractResource) dag.Vertex { + // Add the config and state since we don't do that via transforms + a.Config = n.Config + + return &NodeRefreshableDataResourceInstance{ + NodeAbstractResource: a, + } + } + + // We also need a destroyable resource for orphans that are a result of a + // scaled-in count. + concreteResourceDestroyable := func(a *NodeAbstractResource) dag.Vertex { + // Add the config since we don't do that via transforms + a.Config = n.Config + + return &NodeDestroyableDataResource{ + NodeAbstractResource: a, + } + } + + // Start creating the steps + steps := []GraphTransformer{ + // Expand the count. + &ResourceCountTransformer{ + Concrete: concreteResource, + Count: count, + Addr: n.ResourceAddr(), + }, + + // Add the count orphans. As these are orphaned refresh nodes, we add them + // directly as NodeDestroyableDataResource. + &OrphanResourceCountTransformer{ + Concrete: concreteResourceDestroyable, + Count: count, + Addr: n.ResourceAddr(), + State: state, + }, + + // Attach the state + &AttachStateTransformer{State: state}, + + // Targeting + &TargetsTransformer{ParsedTargets: n.Targets}, + + // Connect references so ordering is correct + &ReferenceTransformer{}, + + // Make sure there is a single root + &RootTransformer{}, + } + + // Build the graph + b := &BasicGraphBuilder{ + Steps: steps, + Validate: true, + Name: "NodeRefreshableDataResource", + } + + return b.Build(ctx.Path()) +} + +// NodeRefreshableDataResourceInstance represents a _single_ resource instance +// that is refreshable. +type NodeRefreshableDataResourceInstance struct { + *NodeAbstractResource +} + +// GraphNodeEvalable +func (n *NodeRefreshableDataResourceInstance) EvalTree() EvalNode { + addr := n.NodeAbstractResource.Addr + + // stateId is the ID to put into the state + stateId := addr.stateId() + + // Build the instance info. More of this will be populated during eval + info := &InstanceInfo{ + Id: stateId, + Type: addr.Type, + } + + // Get the state if we have it, if not we build it + rs := n.ResourceState + if rs == nil { + rs = &ResourceState{} + } + + // If the config isn't empty we update the state + if n.Config != nil { + rs = &ResourceState{ + Type: n.Config.Type, + Provider: n.Config.Provider, + Dependencies: n.StateReferences(), + } + } + + // Build the resource for eval + resource := &Resource{ + Name: addr.Name, + Type: addr.Type, + CountIndex: addr.Index, + } + if resource.CountIndex < 0 { + resource.CountIndex = 0 + } + + // Declare a bunch of variables that are used for state during + // evaluation. Most of this are written to by-address below. + var config *ResourceConfig + var diff *InstanceDiff + var provider ResourceProvider + var state *InstanceState + + return &EvalSequence{ + Nodes: []EvalNode{ + // Always destroy the existing state first, since we must + // make sure that values from a previous read will not + // get interpolated if we end up needing to defer our + // loading until apply time. + &EvalWriteState{ + Name: stateId, + ResourceType: rs.Type, + Provider: rs.Provider, + Dependencies: rs.Dependencies, + State: &state, // state is nil here + }, + + &EvalInterpolate{ + Config: n.Config.RawConfig.Copy(), + Resource: resource, + Output: &config, + }, + + // The rest of this pass can proceed only if there are no + // computed values in our config. + // (If there are, we'll deal with this during the plan and + // apply phases.) + &EvalIf{ + If: func(ctx EvalContext) (bool, error) { + if config.ComputedKeys != nil && len(config.ComputedKeys) > 0 { + return true, EvalEarlyExitError{} + } + + // If the config explicitly has a depends_on for this + // data source, assume the intention is to prevent + // refreshing ahead of that dependency. + if len(n.Config.DependsOn) > 0 { + return true, EvalEarlyExitError{} + } + + return true, nil + }, + + Then: EvalNoop{}, + }, + + // The remainder of this pass is the same as running + // a "plan" pass immediately followed by an "apply" pass, + // populating the state early so it'll be available to + // provider configurations that need this data during + // refresh/plan. + &EvalGetProvider{ + Name: n.ProvidedBy()[0], + Output: &provider, + }, + + &EvalReadDataDiff{ + Info: info, + Config: &config, + Provider: &provider, + Output: &diff, + OutputState: &state, + }, + + &EvalReadDataApply{ + Info: info, + Diff: &diff, + Provider: &provider, + Output: &state, + }, + + &EvalWriteState{ + Name: stateId, + ResourceType: rs.Type, + Provider: rs.Provider, + Dependencies: rs.Dependencies, + State: &state, + }, + + &EvalUpdateStateHook{}, + }, + } +} diff --git a/vendor/github.com/hashicorp/terraform/terraform/node_module_destroy.go b/vendor/github.com/hashicorp/terraform/terraform/node_module_destroy.go new file mode 100644 index 0000000000..319df1e3a4 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/terraform/node_module_destroy.go @@ -0,0 +1,29 @@ +package terraform + +import ( + "fmt" +) + +// NodeDestroyableModule represents a module destruction. +type NodeDestroyableModuleVariable struct { + PathValue []string +} + +func (n *NodeDestroyableModuleVariable) Name() string { + result := "plan-destroy" + if len(n.PathValue) > 1 { + result = fmt.Sprintf("%s.%s", modulePrefixStr(n.PathValue), result) + } + + return result +} + +// GraphNodeSubPath +func (n *NodeDestroyableModuleVariable) Path() []string { + return n.PathValue +} + +// GraphNodeEvalable +func (n *NodeDestroyableModuleVariable) EvalTree() EvalNode { + return &EvalDiffDestroyModule{Path: n.PathValue} +} diff --git a/vendor/github.com/hashicorp/terraform/terraform/node_module_variable.go b/vendor/github.com/hashicorp/terraform/terraform/node_module_variable.go new file mode 100644 index 0000000000..13fe8fc3ae --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/terraform/node_module_variable.go @@ -0,0 +1,125 @@ +package terraform + +import ( + "fmt" + + "github.com/hashicorp/terraform/config" + "github.com/hashicorp/terraform/config/module" +) + +// NodeApplyableModuleVariable represents a module variable input during +// the apply step. +type NodeApplyableModuleVariable struct { + PathValue []string + Config *config.Variable // Config is the var in the config + Value *config.RawConfig // Value is the value that is set + + Module *module.Tree // Antiquated, want to remove +} + +func (n *NodeApplyableModuleVariable) Name() string { + result := fmt.Sprintf("var.%s", n.Config.Name) + if len(n.PathValue) > 1 { + result = fmt.Sprintf("%s.%s", modulePrefixStr(n.PathValue), result) + } + + return result +} + +// GraphNodeSubPath +func (n *NodeApplyableModuleVariable) Path() []string { + // We execute in the parent scope (above our own module) so that + // we can access the proper interpolations. + if len(n.PathValue) > 2 { + return n.PathValue[:len(n.PathValue)-1] + } + + return rootModulePath +} + +// RemovableIfNotTargeted +func (n *NodeApplyableModuleVariable) RemoveIfNotTargeted() bool { + // We need to add this so that this node will be removed if + // it isn't targeted or a dependency of a target. + return true +} + +// GraphNodeReferenceGlobal +func (n *NodeApplyableModuleVariable) ReferenceGlobal() bool { + // We have to create fully qualified references because we cross + // boundaries here: our ReferenceableName is in one path and our + // References are from another path. + return true +} + +// GraphNodeReferenceable +func (n *NodeApplyableModuleVariable) ReferenceableName() []string { + return []string{n.Name()} +} + +// GraphNodeReferencer +func (n *NodeApplyableModuleVariable) References() []string { + // If we have no value set, we depend on nothing + if n.Value == nil { + return nil + } + + // Can't depend on anything if we're in the root + if len(n.PathValue) < 2 { + return nil + } + + // Otherwise, we depend on anything that is in our value, but + // specifically in the namespace of the parent path. + // Create the prefix based on the path + var prefix string + if p := n.Path(); len(p) > 0 { + prefix = modulePrefixStr(p) + } + + result := ReferencesFromConfig(n.Value) + return modulePrefixList(result, prefix) +} + +// GraphNodeEvalable +func (n *NodeApplyableModuleVariable) EvalTree() EvalNode { + // If we have no value, do nothing + if n.Value == nil { + return &EvalNoop{} + } + + // Otherwise, interpolate the value of this variable and set it + // within the variables mapping. + var config *ResourceConfig + variables := make(map[string]interface{}) + return &EvalSequence{ + Nodes: []EvalNode{ + &EvalInterpolate{ + Config: n.Value, + Output: &config, + }, + + &EvalVariableBlock{ + Config: &config, + VariableValues: variables, + }, + + &EvalCoerceMapVariable{ + Variables: variables, + ModulePath: n.PathValue, + ModuleTree: n.Module, + }, + + &EvalTypeCheckVariable{ + Variables: variables, + ModulePath: n.PathValue, + ModuleTree: n.Module, + }, + + &EvalSetVariables{ + Module: &n.PathValue[len(n.PathValue)-1], + Variables: variables, + }, + }, + } +} diff --git a/vendor/github.com/hashicorp/terraform/terraform/node_output.go b/vendor/github.com/hashicorp/terraform/terraform/node_output.go new file mode 100644 index 0000000000..9017a63c40 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/terraform/node_output.go @@ -0,0 +1,85 @@ +package terraform + +import ( + "fmt" + "strings" + + "github.com/hashicorp/terraform/config" + "github.com/hashicorp/terraform/dag" +) + +// NodeApplyableOutput represents an output that is "applyable": +// it is ready to be applied. +type NodeApplyableOutput struct { + PathValue []string + Config *config.Output // Config is the output in the config +} + +func (n *NodeApplyableOutput) Name() string { + result := fmt.Sprintf("output.%s", n.Config.Name) + if len(n.PathValue) > 1 { + result = fmt.Sprintf("%s.%s", modulePrefixStr(n.PathValue), result) + } + + return result +} + +// GraphNodeSubPath +func (n *NodeApplyableOutput) Path() []string { + return n.PathValue +} + +// RemovableIfNotTargeted +func (n *NodeApplyableOutput) RemoveIfNotTargeted() bool { + // We need to add this so that this node will be removed if + // it isn't targeted or a dependency of a target. + return true +} + +// GraphNodeTargetDownstream +func (n *NodeApplyableOutput) TargetDownstream(targetedDeps, untargetedDeps *dag.Set) bool { + // If any of the direct dependencies of an output are targeted then + // the output must always be targeted as well, so its value will always + // be up-to-date at the completion of an apply walk. + return true +} + +// GraphNodeReferenceable +func (n *NodeApplyableOutput) ReferenceableName() []string { + name := fmt.Sprintf("output.%s", n.Config.Name) + return []string{name} +} + +// GraphNodeReferencer +func (n *NodeApplyableOutput) References() []string { + var result []string + result = append(result, n.Config.DependsOn...) + result = append(result, ReferencesFromConfig(n.Config.RawConfig)...) + for _, v := range result { + split := strings.Split(v, "/") + for i, s := range split { + split[i] = s + ".destroy" + } + + result = append(result, strings.Join(split, "/")) + } + + return result +} + +// GraphNodeEvalable +func (n *NodeApplyableOutput) EvalTree() EvalNode { + return &EvalOpFilter{ + Ops: []walkOperation{walkRefresh, walkPlan, walkApply, + walkDestroy, walkInput, walkValidate}, + Node: &EvalSequence{ + Nodes: []EvalNode{ + &EvalWriteOutput{ + Name: n.Config.Name, + Sensitive: n.Config.Sensitive, + Value: n.Config.RawConfig, + }, + }, + }, + } +} diff --git a/vendor/github.com/hashicorp/terraform/terraform/node_output_orphan.go b/vendor/github.com/hashicorp/terraform/terraform/node_output_orphan.go new file mode 100644 index 0000000000..636a15df11 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/terraform/node_output_orphan.go @@ -0,0 +1,35 @@ +package terraform + +import ( + "fmt" +) + +// NodeOutputOrphan represents an output that is an orphan. +type NodeOutputOrphan struct { + OutputName string + PathValue []string +} + +func (n *NodeOutputOrphan) Name() string { + result := fmt.Sprintf("output.%s (orphan)", n.OutputName) + if len(n.PathValue) > 1 { + result = fmt.Sprintf("%s.%s", modulePrefixStr(n.PathValue), result) + } + + return result +} + +// GraphNodeSubPath +func (n *NodeOutputOrphan) Path() []string { + return n.PathValue +} + +// GraphNodeEvalable +func (n *NodeOutputOrphan) EvalTree() EvalNode { + return &EvalOpFilter{ + Ops: []walkOperation{walkRefresh, walkApply, walkDestroy}, + Node: &EvalDeleteOutput{ + Name: n.OutputName, + }, + } +} diff --git a/vendor/github.com/hashicorp/terraform/terraform/node_provider.go b/vendor/github.com/hashicorp/terraform/terraform/node_provider.go new file mode 100644 index 0000000000..8e2c176fa9 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/terraform/node_provider.go @@ -0,0 +1,11 @@ +package terraform + +// NodeApplyableProvider represents a provider during an apply. +type NodeApplyableProvider struct { + *NodeAbstractProvider +} + +// GraphNodeEvalable +func (n *NodeApplyableProvider) EvalTree() EvalNode { + return ProviderEvalTree(n.NameValue, n.ProviderConfig()) +} diff --git a/vendor/github.com/hashicorp/terraform/terraform/node_provider_abstract.go b/vendor/github.com/hashicorp/terraform/terraform/node_provider_abstract.go new file mode 100644 index 0000000000..6cc836560c --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/terraform/node_provider_abstract.go @@ -0,0 +1,85 @@ +package terraform + +import ( + "fmt" + + "github.com/hashicorp/terraform/config" + "github.com/hashicorp/terraform/dag" +) + +// ConcreteProviderNodeFunc is a callback type used to convert an +// abstract provider to a concrete one of some type. +type ConcreteProviderNodeFunc func(*NodeAbstractProvider) dag.Vertex + +// NodeAbstractProvider represents a provider that has no associated operations. +// It registers all the common interfaces across operations for providers. +type NodeAbstractProvider struct { + NameValue string + PathValue []string + + // The fields below will be automatically set using the Attach + // interfaces if you're running those transforms, but also be explicitly + // set if you already have that information. + + Config *config.ProviderConfig +} + +func (n *NodeAbstractProvider) Name() string { + result := fmt.Sprintf("provider.%s", n.NameValue) + if len(n.PathValue) > 1 { + result = fmt.Sprintf("%s.%s", modulePrefixStr(n.PathValue), result) + } + + return result +} + +// GraphNodeSubPath +func (n *NodeAbstractProvider) Path() []string { + return n.PathValue +} + +// RemovableIfNotTargeted +func (n *NodeAbstractProvider) RemoveIfNotTargeted() bool { + // We need to add this so that this node will be removed if + // it isn't targeted or a dependency of a target. + return true +} + +// GraphNodeReferencer +func (n *NodeAbstractProvider) References() []string { + if n.Config == nil { + return nil + } + + return ReferencesFromConfig(n.Config.RawConfig) +} + +// GraphNodeProvider +func (n *NodeAbstractProvider) ProviderName() string { + return n.NameValue +} + +// GraphNodeProvider +func (n *NodeAbstractProvider) ProviderConfig() *config.RawConfig { + if n.Config == nil { + return nil + } + + return n.Config.RawConfig +} + +// GraphNodeAttachProvider +func (n *NodeAbstractProvider) AttachProvider(c *config.ProviderConfig) { + n.Config = c +} + +// GraphNodeDotter impl. +func (n *NodeAbstractProvider) DotNode(name string, opts *dag.DotOpts) *dag.DotNode { + return &dag.DotNode{ + Name: name, + Attrs: map[string]string{ + "label": n.Name(), + "shape": "diamond", + }, + } +} diff --git a/vendor/github.com/hashicorp/terraform/terraform/node_provider_disabled.go b/vendor/github.com/hashicorp/terraform/terraform/node_provider_disabled.go new file mode 100644 index 0000000000..25e7e620e1 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/terraform/node_provider_disabled.go @@ -0,0 +1,38 @@ +package terraform + +import ( + "fmt" +) + +// NodeDisabledProvider represents a provider that is disabled. A disabled +// provider does nothing. It exists to properly set inheritance information +// for child providers. +type NodeDisabledProvider struct { + *NodeAbstractProvider +} + +func (n *NodeDisabledProvider) Name() string { + return fmt.Sprintf("%s (disabled)", n.NodeAbstractProvider.Name()) +} + +// GraphNodeEvalable +func (n *NodeDisabledProvider) EvalTree() EvalNode { + var resourceConfig *ResourceConfig + return &EvalSequence{ + Nodes: []EvalNode{ + &EvalInterpolate{ + Config: n.ProviderConfig(), + Output: &resourceConfig, + }, + &EvalBuildProviderConfig{ + Provider: n.ProviderName(), + Config: &resourceConfig, + Output: &resourceConfig, + }, + &EvalSetProviderConfig{ + Provider: n.ProviderName(), + Config: &resourceConfig, + }, + }, + } +} diff --git a/vendor/github.com/hashicorp/terraform/terraform/node_provisioner.go b/vendor/github.com/hashicorp/terraform/terraform/node_provisioner.go new file mode 100644 index 0000000000..bb117c1d6f --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/terraform/node_provisioner.go @@ -0,0 +1,44 @@ +package terraform + +import ( + "fmt" + + "github.com/hashicorp/terraform/config" +) + +// NodeProvisioner represents a provider that has no associated operations. +// It registers all the common interfaces across operations for providers. +type NodeProvisioner struct { + NameValue string + PathValue []string + + // The fields below will be automatically set using the Attach + // interfaces if you're running those transforms, but also be explicitly + // set if you already have that information. + + Config *config.ProviderConfig +} + +func (n *NodeProvisioner) Name() string { + result := fmt.Sprintf("provisioner.%s", n.NameValue) + if len(n.PathValue) > 1 { + result = fmt.Sprintf("%s.%s", modulePrefixStr(n.PathValue), result) + } + + return result +} + +// GraphNodeSubPath +func (n *NodeProvisioner) Path() []string { + return n.PathValue +} + +// GraphNodeProvisioner +func (n *NodeProvisioner) ProvisionerName() string { + return n.NameValue +} + +// GraphNodeEvalable impl. +func (n *NodeProvisioner) EvalTree() EvalNode { + return &EvalInitProvisioner{Name: n.NameValue} +} diff --git a/vendor/github.com/hashicorp/terraform/terraform/node_resource_abstract.go b/vendor/github.com/hashicorp/terraform/terraform/node_resource_abstract.go new file mode 100644 index 0000000000..50bb70792a --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/terraform/node_resource_abstract.go @@ -0,0 +1,240 @@ +package terraform + +import ( + "fmt" + "strings" + + "github.com/hashicorp/terraform/config" + "github.com/hashicorp/terraform/dag" +) + +// ConcreteResourceNodeFunc is a callback type used to convert an +// abstract resource to a concrete one of some type. +type ConcreteResourceNodeFunc func(*NodeAbstractResource) dag.Vertex + +// GraphNodeResource is implemented by any nodes that represent a resource. +// The type of operation cannot be assumed, only that this node represents +// the given resource. +type GraphNodeResource interface { + ResourceAddr() *ResourceAddress +} + +// NodeAbstractResource represents a resource that has no associated +// operations. It registers all the interfaces for a resource that common +// across multiple operation types. +type NodeAbstractResource struct { + Addr *ResourceAddress // Addr is the address for this resource + + // The fields below will be automatically set using the Attach + // interfaces if you're running those transforms, but also be explicitly + // set if you already have that information. + + Config *config.Resource // Config is the resource in the config + ResourceState *ResourceState // ResourceState is the ResourceState for this + + Targets []ResourceAddress // Set from GraphNodeTargetable +} + +func (n *NodeAbstractResource) Name() string { + return n.Addr.String() +} + +// GraphNodeSubPath +func (n *NodeAbstractResource) Path() []string { + return n.Addr.Path +} + +// GraphNodeReferenceable +func (n *NodeAbstractResource) ReferenceableName() []string { + // We always are referenceable as "type.name" as long as + // we have a config or address. Determine what that value is. + var id string + if n.Config != nil { + id = n.Config.Id() + } else if n.Addr != nil { + addrCopy := n.Addr.Copy() + addrCopy.Path = nil // ReferenceTransformer handles paths + addrCopy.Index = -1 // We handle indexes below + id = addrCopy.String() + } else { + // No way to determine our type.name, just return + return nil + } + + var result []string + + // Always include our own ID. This is primarily for backwards + // compatibility with states that didn't yet support the more + // specific dep string. + result = append(result, id) + + // We represent all multi-access + result = append(result, fmt.Sprintf("%s.*", id)) + + // We represent either a specific number, or all numbers + suffix := "N" + if n.Addr != nil { + idx := n.Addr.Index + if idx == -1 { + idx = 0 + } + + suffix = fmt.Sprintf("%d", idx) + } + result = append(result, fmt.Sprintf("%s.%s", id, suffix)) + + return result +} + +// GraphNodeReferencer +func (n *NodeAbstractResource) References() []string { + // If we have a config, that is our source of truth + if c := n.Config; c != nil { + // Grab all the references + var result []string + result = append(result, c.DependsOn...) + result = append(result, ReferencesFromConfig(c.RawCount)...) + result = append(result, ReferencesFromConfig(c.RawConfig)...) + for _, p := range c.Provisioners { + if p.When == config.ProvisionerWhenCreate { + result = append(result, ReferencesFromConfig(p.ConnInfo)...) + result = append(result, ReferencesFromConfig(p.RawConfig)...) + } + } + + return uniqueStrings(result) + } + + // If we have state, that is our next source + if s := n.ResourceState; s != nil { + return s.Dependencies + } + + return nil +} + +// StateReferences returns the dependencies to put into the state for +// this resource. +func (n *NodeAbstractResource) StateReferences() []string { + self := n.ReferenceableName() + + // Determine what our "prefix" is for checking for references to + // ourself. + addrCopy := n.Addr.Copy() + addrCopy.Index = -1 + selfPrefix := addrCopy.String() + "." + + depsRaw := n.References() + deps := make([]string, 0, len(depsRaw)) + for _, d := range depsRaw { + // Ignore any variable dependencies + if strings.HasPrefix(d, "var.") { + continue + } + + // If this has a backup ref, ignore those for now. The old state + // file never contained those and I'd rather store the rich types we + // add in the future. + if idx := strings.IndexRune(d, '/'); idx != -1 { + d = d[:idx] + } + + // If we're referencing ourself, then ignore it + found := false + for _, s := range self { + if d == s { + found = true + } + } + if found { + continue + } + + // If this is a reference to ourself and a specific index, we keep + // it. For example, if this resource is "foo.bar" and the reference + // is "foo.bar.0" then we keep it exact. Otherwise, we strip it. + if strings.HasSuffix(d, ".0") && !strings.HasPrefix(d, selfPrefix) { + d = d[:len(d)-2] + } + + // This is sad. The dependencies are currently in the format of + // "module.foo.bar" (the full field). This strips the field off. + if strings.HasPrefix(d, "module.") { + parts := strings.SplitN(d, ".", 3) + d = strings.Join(parts[0:2], ".") + } + + deps = append(deps, d) + } + + return deps +} + +// GraphNodeProviderConsumer +func (n *NodeAbstractResource) ProvidedBy() []string { + // If we have a config we prefer that above all else + if n.Config != nil { + return []string{resourceProvider(n.Config.Type, n.Config.Provider)} + } + + // If we have state, then we will use the provider from there + if n.ResourceState != nil && n.ResourceState.Provider != "" { + return []string{n.ResourceState.Provider} + } + + // Use our type + return []string{resourceProvider(n.Addr.Type, "")} +} + +// GraphNodeProvisionerConsumer +func (n *NodeAbstractResource) ProvisionedBy() []string { + // If we have no configuration, then we have no provisioners + if n.Config == nil { + return nil + } + + // Build the list of provisioners we need based on the configuration. + // It is okay to have duplicates here. + result := make([]string, len(n.Config.Provisioners)) + for i, p := range n.Config.Provisioners { + result[i] = p.Type + } + + return result +} + +// GraphNodeResource, GraphNodeAttachResourceState +func (n *NodeAbstractResource) ResourceAddr() *ResourceAddress { + return n.Addr +} + +// GraphNodeAddressable, TODO: remove, used by target, should unify +func (n *NodeAbstractResource) ResourceAddress() *ResourceAddress { + return n.ResourceAddr() +} + +// GraphNodeTargetable +func (n *NodeAbstractResource) SetTargets(targets []ResourceAddress) { + n.Targets = targets +} + +// GraphNodeAttachResourceState +func (n *NodeAbstractResource) AttachResourceState(s *ResourceState) { + n.ResourceState = s +} + +// GraphNodeAttachResourceConfig +func (n *NodeAbstractResource) AttachResourceConfig(c *config.Resource) { + n.Config = c +} + +// GraphNodeDotter impl. +func (n *NodeAbstractResource) DotNode(name string, opts *dag.DotOpts) *dag.DotNode { + return &dag.DotNode{ + Name: name, + Attrs: map[string]string{ + "label": n.Name(), + "shape": "box", + }, + } +} diff --git a/vendor/github.com/hashicorp/terraform/terraform/node_resource_abstract_count.go b/vendor/github.com/hashicorp/terraform/terraform/node_resource_abstract_count.go new file mode 100644 index 0000000000..573570d8e2 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/terraform/node_resource_abstract_count.go @@ -0,0 +1,50 @@ +package terraform + +// NodeAbstractCountResource should be embedded instead of NodeAbstractResource +// if the resource has a `count` value that needs to be expanded. +// +// The embedder should implement `DynamicExpand` to process the count. +type NodeAbstractCountResource struct { + *NodeAbstractResource + + // Validate, if true, will perform the validation for the count. + // This should only be turned on for the "validate" operation. + Validate bool +} + +// GraphNodeEvalable +func (n *NodeAbstractCountResource) EvalTree() EvalNode { + // We only check if the count is computed if we're not validating. + // If we're validating we allow computed counts since they just turn + // into more computed values. + var evalCountCheckComputed EvalNode + if !n.Validate { + evalCountCheckComputed = &EvalCountCheckComputed{Resource: n.Config} + } + + return &EvalSequence{ + Nodes: []EvalNode{ + // The EvalTree for a plannable resource primarily involves + // interpolating the count since it can contain variables + // we only just received access to. + // + // With the interpolated count, we can then DynamicExpand + // into the proper number of instances. + &EvalInterpolate{Config: n.Config.RawCount}, + + // Check if the count is computed + evalCountCheckComputed, + + // If validation is enabled, perform the validation + &EvalIf{ + If: func(ctx EvalContext) (bool, error) { + return n.Validate, nil + }, + + Then: &EvalValidateCount{Resource: n.Config}, + }, + + &EvalCountFixZeroOneBoundary{Resource: n.Config}, + }, + } +} diff --git a/vendor/github.com/hashicorp/terraform/terraform/node_resource_apply.go b/vendor/github.com/hashicorp/terraform/terraform/node_resource_apply.go new file mode 100644 index 0000000000..3599782b9d --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/terraform/node_resource_apply.go @@ -0,0 +1,357 @@ +package terraform + +import ( + "fmt" + + "github.com/hashicorp/terraform/config" +) + +// NodeApplyableResource represents a resource that is "applyable": +// it is ready to be applied and is represented by a diff. +type NodeApplyableResource struct { + *NodeAbstractResource +} + +// GraphNodeCreator +func (n *NodeApplyableResource) CreateAddr() *ResourceAddress { + return n.NodeAbstractResource.Addr +} + +// GraphNodeReferencer, overriding NodeAbstractResource +func (n *NodeApplyableResource) References() []string { + result := n.NodeAbstractResource.References() + + // The "apply" side of a resource generally also depends on the + // destruction of its dependencies as well. For example, if a LB + // references a set of VMs with ${vm.foo.*.id}, then we must wait for + // the destruction so we get the newly updated list of VMs. + // + // The exception here is CBD. When CBD is set, we don't do this since + // it would create a cycle. By not creating a cycle, we require two + // applies since the first apply the creation step will use the OLD + // values (pre-destroy) and the second step will update. + // + // This is how Terraform behaved with "legacy" graphs (TF <= 0.7.x). + // We mimic that behavior here now and can improve upon it in the future. + // + // This behavior is tested in graph_build_apply_test.go to test ordering. + cbd := n.Config != nil && n.Config.Lifecycle.CreateBeforeDestroy + if !cbd { + // The "apply" side of a resource always depends on the destruction + // of all its dependencies in addition to the creation. + for _, v := range result { + result = append(result, v+".destroy") + } + } + + return result +} + +// GraphNodeEvalable +func (n *NodeApplyableResource) EvalTree() EvalNode { + addr := n.NodeAbstractResource.Addr + + // stateId is the ID to put into the state + stateId := addr.stateId() + + // Build the instance info. More of this will be populated during eval + info := &InstanceInfo{ + Id: stateId, + Type: addr.Type, + } + + // Build the resource for eval + resource := &Resource{ + Name: addr.Name, + Type: addr.Type, + CountIndex: addr.Index, + } + if resource.CountIndex < 0 { + resource.CountIndex = 0 + } + + // Determine the dependencies for the state. + stateDeps := n.StateReferences() + + // Eval info is different depending on what kind of resource this is + switch n.Config.Mode { + case config.ManagedResourceMode: + return n.evalTreeManagedResource( + stateId, info, resource, stateDeps, + ) + case config.DataResourceMode: + return n.evalTreeDataResource( + stateId, info, resource, stateDeps) + default: + panic(fmt.Errorf("unsupported resource mode %s", n.Config.Mode)) + } +} + +func (n *NodeApplyableResource) evalTreeDataResource( + stateId string, info *InstanceInfo, + resource *Resource, stateDeps []string) EvalNode { + var provider ResourceProvider + var config *ResourceConfig + var diff *InstanceDiff + var state *InstanceState + + return &EvalSequence{ + Nodes: []EvalNode{ + // Build the instance info + &EvalInstanceInfo{ + Info: info, + }, + + // Get the saved diff for apply + &EvalReadDiff{ + Name: stateId, + Diff: &diff, + }, + + // Stop here if we don't actually have a diff + &EvalIf{ + If: func(ctx EvalContext) (bool, error) { + if diff == nil { + return true, EvalEarlyExitError{} + } + + if diff.GetAttributesLen() == 0 { + return true, EvalEarlyExitError{} + } + + return true, nil + }, + Then: EvalNoop{}, + }, + + // We need to re-interpolate the config here, rather than + // just using the diff's values directly, because we've + // potentially learned more variable values during the + // apply pass that weren't known when the diff was produced. + &EvalInterpolate{ + Config: n.Config.RawConfig.Copy(), + Resource: resource, + Output: &config, + }, + + &EvalGetProvider{ + Name: n.ProvidedBy()[0], + Output: &provider, + }, + + // Make a new diff with our newly-interpolated config. + &EvalReadDataDiff{ + Info: info, + Config: &config, + Previous: &diff, + Provider: &provider, + Output: &diff, + }, + + &EvalReadDataApply{ + Info: info, + Diff: &diff, + Provider: &provider, + Output: &state, + }, + + &EvalWriteState{ + Name: stateId, + ResourceType: n.Config.Type, + Provider: n.Config.Provider, + Dependencies: stateDeps, + State: &state, + }, + + // Clear the diff now that we've applied it, so + // later nodes won't see a diff that's now a no-op. + &EvalWriteDiff{ + Name: stateId, + Diff: nil, + }, + + &EvalUpdateStateHook{}, + }, + } +} + +func (n *NodeApplyableResource) evalTreeManagedResource( + stateId string, info *InstanceInfo, + resource *Resource, stateDeps []string) EvalNode { + // Declare a bunch of variables that are used for state during + // evaluation. Most of this are written to by-address below. + var provider ResourceProvider + var diff, diffApply *InstanceDiff + var state *InstanceState + var resourceConfig *ResourceConfig + var err error + var createNew bool + var createBeforeDestroyEnabled bool + + return &EvalSequence{ + Nodes: []EvalNode{ + // Build the instance info + &EvalInstanceInfo{ + Info: info, + }, + + // Get the saved diff for apply + &EvalReadDiff{ + Name: stateId, + Diff: &diffApply, + }, + + // We don't want to do any destroys + &EvalIf{ + If: func(ctx EvalContext) (bool, error) { + if diffApply == nil { + return true, EvalEarlyExitError{} + } + + if diffApply.GetDestroy() && diffApply.GetAttributesLen() == 0 { + return true, EvalEarlyExitError{} + } + + diffApply.SetDestroy(false) + return true, nil + }, + Then: EvalNoop{}, + }, + + &EvalIf{ + If: func(ctx EvalContext) (bool, error) { + destroy := false + if diffApply != nil { + destroy = diffApply.GetDestroy() || diffApply.RequiresNew() + } + + createBeforeDestroyEnabled = + n.Config.Lifecycle.CreateBeforeDestroy && + destroy + + return createBeforeDestroyEnabled, nil + }, + Then: &EvalDeposeState{ + Name: stateId, + }, + }, + + &EvalInterpolate{ + Config: n.Config.RawConfig.Copy(), + Resource: resource, + Output: &resourceConfig, + }, + &EvalGetProvider{ + Name: n.ProvidedBy()[0], + Output: &provider, + }, + &EvalReadState{ + Name: stateId, + Output: &state, + }, + // Re-run validation to catch any errors we missed, e.g. type + // mismatches on computed values. + &EvalValidateResource{ + Provider: &provider, + Config: &resourceConfig, + ResourceName: n.Config.Name, + ResourceType: n.Config.Type, + ResourceMode: n.Config.Mode, + IgnoreWarnings: true, + }, + &EvalDiff{ + Info: info, + Config: &resourceConfig, + Resource: n.Config, + Provider: &provider, + Diff: &diffApply, + State: &state, + OutputDiff: &diffApply, + }, + + // Get the saved diff + &EvalReadDiff{ + Name: stateId, + Diff: &diff, + }, + + // Compare the diffs + &EvalCompareDiff{ + Info: info, + One: &diff, + Two: &diffApply, + }, + + &EvalGetProvider{ + Name: n.ProvidedBy()[0], + Output: &provider, + }, + &EvalReadState{ + Name: stateId, + Output: &state, + }, + // Call pre-apply hook + &EvalApplyPre{ + Info: info, + State: &state, + Diff: &diffApply, + }, + &EvalApply{ + Info: info, + State: &state, + Diff: &diffApply, + Provider: &provider, + Output: &state, + Error: &err, + CreateNew: &createNew, + }, + &EvalWriteState{ + Name: stateId, + ResourceType: n.Config.Type, + Provider: n.Config.Provider, + Dependencies: stateDeps, + State: &state, + }, + &EvalApplyProvisioners{ + Info: info, + State: &state, + Resource: n.Config, + InterpResource: resource, + CreateNew: &createNew, + Error: &err, + When: config.ProvisionerWhenCreate, + }, + &EvalIf{ + If: func(ctx EvalContext) (bool, error) { + return createBeforeDestroyEnabled && err != nil, nil + }, + Then: &EvalUndeposeState{ + Name: stateId, + State: &state, + }, + Else: &EvalWriteState{ + Name: stateId, + ResourceType: n.Config.Type, + Provider: n.Config.Provider, + Dependencies: stateDeps, + State: &state, + }, + }, + + // We clear the diff out here so that future nodes + // don't see a diff that is already complete. There + // is no longer a diff! + &EvalWriteDiff{ + Name: stateId, + Diff: nil, + }, + + &EvalApplyPost{ + Info: info, + State: &state, + Error: &err, + }, + &EvalUpdateStateHook{}, + }, + } +} diff --git a/vendor/github.com/hashicorp/terraform/terraform/node_resource_destroy.go b/vendor/github.com/hashicorp/terraform/terraform/node_resource_destroy.go new file mode 100644 index 0000000000..c2efd2c384 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/terraform/node_resource_destroy.go @@ -0,0 +1,288 @@ +package terraform + +import ( + "fmt" + + "github.com/hashicorp/terraform/config" +) + +// NodeDestroyResource represents a resource that is to be destroyed. +type NodeDestroyResource struct { + *NodeAbstractResource +} + +func (n *NodeDestroyResource) Name() string { + return n.NodeAbstractResource.Name() + " (destroy)" +} + +// GraphNodeDestroyer +func (n *NodeDestroyResource) DestroyAddr() *ResourceAddress { + return n.Addr +} + +// GraphNodeDestroyerCBD +func (n *NodeDestroyResource) CreateBeforeDestroy() bool { + // If we have no config, we just assume no + if n.Config == nil { + return false + } + + return n.Config.Lifecycle.CreateBeforeDestroy +} + +// GraphNodeDestroyerCBD +func (n *NodeDestroyResource) ModifyCreateBeforeDestroy(v bool) error { + // If we have no config, do nothing since it won't affect the + // create step anyways. + if n.Config == nil { + return nil + } + + // Set CBD to true + n.Config.Lifecycle.CreateBeforeDestroy = true + + return nil +} + +// GraphNodeReferenceable, overriding NodeAbstractResource +func (n *NodeDestroyResource) ReferenceableName() []string { + // We modify our referenceable name to have the suffix of ".destroy" + // since depending on the creation side doesn't necessarilly mean + // depending on destruction. + suffix := ".destroy" + + // If we're CBD, we also append "-cbd". This is because CBD will setup + // its own edges (in CBDEdgeTransformer). Depending on the "destroy" + // side generally doesn't mean depending on CBD as well. See GH-11349 + if n.CreateBeforeDestroy() { + suffix += "-cbd" + } + + result := n.NodeAbstractResource.ReferenceableName() + for i, v := range result { + result[i] = v + suffix + } + + return result +} + +// GraphNodeReferencer, overriding NodeAbstractResource +func (n *NodeDestroyResource) References() []string { + // If we have a config, then we need to include destroy-time dependencies + if c := n.Config; c != nil { + var result []string + for _, p := range c.Provisioners { + // We include conn info and config for destroy time provisioners + // as dependencies that we have. + if p.When == config.ProvisionerWhenDestroy { + result = append(result, ReferencesFromConfig(p.ConnInfo)...) + result = append(result, ReferencesFromConfig(p.RawConfig)...) + } + } + + return result + } + + return nil +} + +// GraphNodeDynamicExpandable +func (n *NodeDestroyResource) DynamicExpand(ctx EvalContext) (*Graph, error) { + // If we have no config we do nothing + if n.Addr == nil { + return nil, nil + } + + state, lock := ctx.State() + lock.RLock() + defer lock.RUnlock() + + // Start creating the steps + steps := make([]GraphTransformer, 0, 5) + + // We want deposed resources in the state to be destroyed + steps = append(steps, &DeposedTransformer{ + State: state, + View: n.Addr.stateId(), + }) + + // Target + steps = append(steps, &TargetsTransformer{ + ParsedTargets: n.Targets, + }) + + // Always end with the root being added + steps = append(steps, &RootTransformer{}) + + // Build the graph + b := &BasicGraphBuilder{ + Steps: steps, + Name: "NodeResourceDestroy", + } + return b.Build(ctx.Path()) +} + +// GraphNodeEvalable +func (n *NodeDestroyResource) EvalTree() EvalNode { + // stateId is the ID to put into the state + stateId := n.Addr.stateId() + + // Build the instance info. More of this will be populated during eval + info := &InstanceInfo{ + Id: stateId, + Type: n.Addr.Type, + uniqueExtra: "destroy", + } + + // Build the resource for eval + addr := n.Addr + resource := &Resource{ + Name: addr.Name, + Type: addr.Type, + CountIndex: addr.Index, + } + if resource.CountIndex < 0 { + resource.CountIndex = 0 + } + + // Get our state + rs := n.ResourceState + if rs == nil { + rs = &ResourceState{} + } + + var diffApply *InstanceDiff + var provider ResourceProvider + var state *InstanceState + var err error + return &EvalOpFilter{ + Ops: []walkOperation{walkApply, walkDestroy}, + Node: &EvalSequence{ + Nodes: []EvalNode{ + // Get the saved diff for apply + &EvalReadDiff{ + Name: stateId, + Diff: &diffApply, + }, + + // Filter the diff so we only get the destroy + &EvalFilterDiff{ + Diff: &diffApply, + Output: &diffApply, + Destroy: true, + }, + + // If we're not destroying, then compare diffs + &EvalIf{ + If: func(ctx EvalContext) (bool, error) { + if diffApply != nil && diffApply.GetDestroy() { + return true, nil + } + + return true, EvalEarlyExitError{} + }, + Then: EvalNoop{}, + }, + + // Load the instance info so we have the module path set + &EvalInstanceInfo{Info: info}, + + &EvalGetProvider{ + Name: n.ProvidedBy()[0], + Output: &provider, + }, + &EvalReadState{ + Name: stateId, + Output: &state, + }, + &EvalRequireState{ + State: &state, + }, + + // Call pre-apply hook + &EvalApplyPre{ + Info: info, + State: &state, + Diff: &diffApply, + }, + + // Run destroy provisioners if not tainted + &EvalIf{ + If: func(ctx EvalContext) (bool, error) { + if state != nil && state.Tainted { + return false, nil + } + + return true, nil + }, + + Then: &EvalApplyProvisioners{ + Info: info, + State: &state, + Resource: n.Config, + InterpResource: resource, + Error: &err, + When: config.ProvisionerWhenDestroy, + }, + }, + + // If we have a provisioning error, then we just call + // the post-apply hook now. + &EvalIf{ + If: func(ctx EvalContext) (bool, error) { + return err != nil, nil + }, + + Then: &EvalApplyPost{ + Info: info, + State: &state, + Error: &err, + }, + }, + + // Make sure we handle data sources properly. + &EvalIf{ + If: func(ctx EvalContext) (bool, error) { + if n.Addr == nil { + return false, fmt.Errorf("nil address") + } + + if n.Addr.Mode == config.DataResourceMode { + return true, nil + } + + return false, nil + }, + + Then: &EvalReadDataApply{ + Info: info, + Diff: &diffApply, + Provider: &provider, + Output: &state, + }, + Else: &EvalApply{ + Info: info, + State: &state, + Diff: &diffApply, + Provider: &provider, + Output: &state, + Error: &err, + }, + }, + &EvalWriteState{ + Name: stateId, + ResourceType: n.Addr.Type, + Provider: rs.Provider, + Dependencies: rs.Dependencies, + State: &state, + }, + &EvalApplyPost{ + Info: info, + State: &state, + Error: &err, + }, + &EvalUpdateStateHook{}, + }, + }, + } +} diff --git a/vendor/github.com/hashicorp/terraform/terraform/node_resource_plan.go b/vendor/github.com/hashicorp/terraform/terraform/node_resource_plan.go new file mode 100644 index 0000000000..52bbf88a1b --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/terraform/node_resource_plan.go @@ -0,0 +1,83 @@ +package terraform + +import ( + "github.com/hashicorp/terraform/dag" +) + +// NodePlannableResource represents a resource that is "plannable": +// it is ready to be planned in order to create a diff. +type NodePlannableResource struct { + *NodeAbstractCountResource +} + +// GraphNodeDynamicExpandable +func (n *NodePlannableResource) DynamicExpand(ctx EvalContext) (*Graph, error) { + // Grab the state which we read + state, lock := ctx.State() + lock.RLock() + defer lock.RUnlock() + + // Expand the resource count which must be available by now from EvalTree + count, err := n.Config.Count() + if err != nil { + return nil, err + } + + // The concrete resource factory we'll use + concreteResource := func(a *NodeAbstractResource) dag.Vertex { + // Add the config and state since we don't do that via transforms + a.Config = n.Config + + return &NodePlannableResourceInstance{ + NodeAbstractResource: a, + } + } + + // The concrete resource factory we'll use for oprhans + concreteResourceOrphan := func(a *NodeAbstractResource) dag.Vertex { + // Add the config and state since we don't do that via transforms + a.Config = n.Config + + return &NodePlannableResourceOrphan{ + NodeAbstractResource: a, + } + } + + // Start creating the steps + steps := []GraphTransformer{ + // Expand the count. + &ResourceCountTransformer{ + Concrete: concreteResource, + Count: count, + Addr: n.ResourceAddr(), + }, + + // Add the count orphans + &OrphanResourceCountTransformer{ + Concrete: concreteResourceOrphan, + Count: count, + Addr: n.ResourceAddr(), + State: state, + }, + + // Attach the state + &AttachStateTransformer{State: state}, + + // Targeting + &TargetsTransformer{ParsedTargets: n.Targets}, + + // Connect references so ordering is correct + &ReferenceTransformer{}, + + // Make sure there is a single root + &RootTransformer{}, + } + + // Build the graph + b := &BasicGraphBuilder{ + Steps: steps, + Validate: true, + Name: "NodePlannableResource", + } + return b.Build(ctx.Path()) +} diff --git a/vendor/github.com/hashicorp/terraform/terraform/node_resource_plan_destroy.go b/vendor/github.com/hashicorp/terraform/terraform/node_resource_plan_destroy.go new file mode 100644 index 0000000000..9b02362b6f --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/terraform/node_resource_plan_destroy.go @@ -0,0 +1,53 @@ +package terraform + +// NodePlanDestroyableResource represents a resource that is "applyable": +// it is ready to be applied and is represented by a diff. +type NodePlanDestroyableResource struct { + *NodeAbstractResource +} + +// GraphNodeDestroyer +func (n *NodePlanDestroyableResource) DestroyAddr() *ResourceAddress { + return n.Addr +} + +// GraphNodeEvalable +func (n *NodePlanDestroyableResource) EvalTree() EvalNode { + addr := n.NodeAbstractResource.Addr + + // stateId is the ID to put into the state + stateId := addr.stateId() + + // Build the instance info. More of this will be populated during eval + info := &InstanceInfo{ + Id: stateId, + Type: addr.Type, + } + + // Declare a bunch of variables that are used for state during + // evaluation. Most of this are written to by-address below. + var diff *InstanceDiff + var state *InstanceState + + return &EvalSequence{ + Nodes: []EvalNode{ + &EvalReadState{ + Name: stateId, + Output: &state, + }, + &EvalDiffDestroy{ + Info: info, + State: &state, + Output: &diff, + }, + &EvalCheckPreventDestroy{ + Resource: n.Config, + Diff: &diff, + }, + &EvalWriteDiff{ + Name: stateId, + Diff: &diff, + }, + }, + } +} diff --git a/vendor/github.com/hashicorp/terraform/terraform/node_resource_plan_instance.go b/vendor/github.com/hashicorp/terraform/terraform/node_resource_plan_instance.go new file mode 100644 index 0000000000..b52956908b --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/terraform/node_resource_plan_instance.go @@ -0,0 +1,190 @@ +package terraform + +import ( + "fmt" + + "github.com/hashicorp/terraform/config" +) + +// NodePlannableResourceInstance represents a _single_ resource +// instance that is plannable. This means this represents a single +// count index, for example. +type NodePlannableResourceInstance struct { + *NodeAbstractResource +} + +// GraphNodeEvalable +func (n *NodePlannableResourceInstance) EvalTree() EvalNode { + addr := n.NodeAbstractResource.Addr + + // stateId is the ID to put into the state + stateId := addr.stateId() + + // Build the instance info. More of this will be populated during eval + info := &InstanceInfo{ + Id: stateId, + Type: addr.Type, + ModulePath: normalizeModulePath(addr.Path), + } + + // Build the resource for eval + resource := &Resource{ + Name: addr.Name, + Type: addr.Type, + CountIndex: addr.Index, + } + if resource.CountIndex < 0 { + resource.CountIndex = 0 + } + + // Determine the dependencies for the state. + stateDeps := n.StateReferences() + + // Eval info is different depending on what kind of resource this is + switch n.Config.Mode { + case config.ManagedResourceMode: + return n.evalTreeManagedResource( + stateId, info, resource, stateDeps, + ) + case config.DataResourceMode: + return n.evalTreeDataResource( + stateId, info, resource, stateDeps) + default: + panic(fmt.Errorf("unsupported resource mode %s", n.Config.Mode)) + } +} + +func (n *NodePlannableResourceInstance) evalTreeDataResource( + stateId string, info *InstanceInfo, + resource *Resource, stateDeps []string) EvalNode { + var provider ResourceProvider + var config *ResourceConfig + var diff *InstanceDiff + var state *InstanceState + + return &EvalSequence{ + Nodes: []EvalNode{ + &EvalReadState{ + Name: stateId, + Output: &state, + }, + + // We need to re-interpolate the config here because some + // of the attributes may have become computed during + // earlier planning, due to other resources having + // "requires new resource" diffs. + &EvalInterpolate{ + Config: n.Config.RawConfig.Copy(), + Resource: resource, + Output: &config, + }, + + &EvalIf{ + If: func(ctx EvalContext) (bool, error) { + computed := config.ComputedKeys != nil && len(config.ComputedKeys) > 0 + + // If the configuration is complete and we + // already have a state then we don't need to + // do any further work during apply, because we + // already populated the state during refresh. + if !computed && state != nil { + return true, EvalEarlyExitError{} + } + + return true, nil + }, + Then: EvalNoop{}, + }, + + &EvalGetProvider{ + Name: n.ProvidedBy()[0], + Output: &provider, + }, + + &EvalReadDataDiff{ + Info: info, + Config: &config, + Provider: &provider, + Output: &diff, + OutputState: &state, + }, + + &EvalWriteState{ + Name: stateId, + ResourceType: n.Config.Type, + Provider: n.Config.Provider, + Dependencies: stateDeps, + State: &state, + }, + + &EvalWriteDiff{ + Name: stateId, + Diff: &diff, + }, + }, + } +} + +func (n *NodePlannableResourceInstance) evalTreeManagedResource( + stateId string, info *InstanceInfo, + resource *Resource, stateDeps []string) EvalNode { + // Declare a bunch of variables that are used for state during + // evaluation. Most of this are written to by-address below. + var provider ResourceProvider + var diff *InstanceDiff + var state *InstanceState + var resourceConfig *ResourceConfig + + return &EvalSequence{ + Nodes: []EvalNode{ + &EvalInterpolate{ + Config: n.Config.RawConfig.Copy(), + Resource: resource, + Output: &resourceConfig, + }, + &EvalGetProvider{ + Name: n.ProvidedBy()[0], + Output: &provider, + }, + // Re-run validation to catch any errors we missed, e.g. type + // mismatches on computed values. + &EvalValidateResource{ + Provider: &provider, + Config: &resourceConfig, + ResourceName: n.Config.Name, + ResourceType: n.Config.Type, + ResourceMode: n.Config.Mode, + IgnoreWarnings: true, + }, + &EvalReadState{ + Name: stateId, + Output: &state, + }, + &EvalDiff{ + Name: stateId, + Info: info, + Config: &resourceConfig, + Resource: n.Config, + Provider: &provider, + State: &state, + OutputDiff: &diff, + OutputState: &state, + }, + &EvalCheckPreventDestroy{ + Resource: n.Config, + Diff: &diff, + }, + &EvalWriteState{ + Name: stateId, + ResourceType: n.Config.Type, + Provider: n.Config.Provider, + Dependencies: stateDeps, + State: &state, + }, + &EvalWriteDiff{ + Name: stateId, + Diff: &diff, + }, + }, + } +} diff --git a/vendor/github.com/hashicorp/terraform/terraform/node_resource_plan_orphan.go b/vendor/github.com/hashicorp/terraform/terraform/node_resource_plan_orphan.go new file mode 100644 index 0000000000..73d6e41f54 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/terraform/node_resource_plan_orphan.go @@ -0,0 +1,54 @@ +package terraform + +// NodePlannableResourceOrphan represents a resource that is "applyable": +// it is ready to be applied and is represented by a diff. +type NodePlannableResourceOrphan struct { + *NodeAbstractResource +} + +func (n *NodePlannableResourceOrphan) Name() string { + return n.NodeAbstractResource.Name() + " (orphan)" +} + +// GraphNodeEvalable +func (n *NodePlannableResourceOrphan) EvalTree() EvalNode { + addr := n.NodeAbstractResource.Addr + + // stateId is the ID to put into the state + stateId := addr.stateId() + + // Build the instance info. More of this will be populated during eval + info := &InstanceInfo{ + Id: stateId, + Type: addr.Type, + ModulePath: normalizeModulePath(addr.Path), + } + + // Declare a bunch of variables that are used for state during + // evaluation. Most of this are written to by-address below. + var diff *InstanceDiff + var state *InstanceState + + return &EvalSequence{ + Nodes: []EvalNode{ + &EvalReadState{ + Name: stateId, + Output: &state, + }, + &EvalDiffDestroy{ + Info: info, + State: &state, + Output: &diff, + }, + &EvalCheckPreventDestroy{ + Resource: n.Config, + ResourceId: stateId, + Diff: &diff, + }, + &EvalWriteDiff{ + Name: stateId, + Diff: &diff, + }, + }, + } +} diff --git a/vendor/github.com/hashicorp/terraform/terraform/node_resource_refresh.go b/vendor/github.com/hashicorp/terraform/terraform/node_resource_refresh.go new file mode 100644 index 0000000000..6ab9df7a26 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/terraform/node_resource_refresh.go @@ -0,0 +1,178 @@ +package terraform + +import ( + "fmt" + + "github.com/hashicorp/terraform/config" + "github.com/hashicorp/terraform/dag" +) + +// NodeRefreshableManagedResource represents a resource that is expanabled into +// NodeRefreshableManagedResourceInstance. Resource count orphans are also added. +type NodeRefreshableManagedResource struct { + *NodeAbstractCountResource +} + +// GraphNodeDynamicExpandable +func (n *NodeRefreshableManagedResource) DynamicExpand(ctx EvalContext) (*Graph, error) { + // Grab the state which we read + state, lock := ctx.State() + lock.RLock() + defer lock.RUnlock() + + // Expand the resource count which must be available by now from EvalTree + count, err := n.Config.Count() + if err != nil { + return nil, err + } + + // The concrete resource factory we'll use + concreteResource := func(a *NodeAbstractResource) dag.Vertex { + // Add the config and state since we don't do that via transforms + a.Config = n.Config + + return &NodeRefreshableManagedResourceInstance{ + NodeAbstractResource: a, + } + } + + // Start creating the steps + steps := []GraphTransformer{ + // Expand the count. + &ResourceCountTransformer{ + Concrete: concreteResource, + Count: count, + Addr: n.ResourceAddr(), + }, + + // Switch up any node missing state to a plannable resource. This helps + // catch cases where data sources depend on the counts from this resource + // during a scale out. + &ResourceRefreshPlannableTransformer{ + State: state, + }, + + // Add the count orphans to make sure these resources are accounted for + // during a scale in. + &OrphanResourceCountTransformer{ + Concrete: concreteResource, + Count: count, + Addr: n.ResourceAddr(), + State: state, + }, + + // Attach the state + &AttachStateTransformer{State: state}, + + // Targeting + &TargetsTransformer{ParsedTargets: n.Targets}, + + // Connect references so ordering is correct + &ReferenceTransformer{}, + + // Make sure there is a single root + &RootTransformer{}, + } + + // Build the graph + b := &BasicGraphBuilder{ + Steps: steps, + Validate: true, + Name: "NodeRefreshableManagedResource", + } + + return b.Build(ctx.Path()) +} + +// NodeRefreshableManagedResourceInstance represents a resource that is "applyable": +// it is ready to be applied and is represented by a diff. +type NodeRefreshableManagedResourceInstance struct { + *NodeAbstractResource +} + +// GraphNodeDestroyer +func (n *NodeRefreshableManagedResourceInstance) DestroyAddr() *ResourceAddress { + return n.Addr +} + +// GraphNodeEvalable +func (n *NodeRefreshableManagedResourceInstance) EvalTree() EvalNode { + // Eval info is different depending on what kind of resource this is + switch mode := n.Addr.Mode; mode { + case config.ManagedResourceMode: + return n.evalTreeManagedResource() + + case config.DataResourceMode: + // Get the data source node. If we don't have a configuration + // then it is an orphan so we destroy it (remove it from the state). + var dn GraphNodeEvalable + if n.Config != nil { + dn = &NodeRefreshableDataResourceInstance{ + NodeAbstractResource: n.NodeAbstractResource, + } + } else { + dn = &NodeDestroyableDataResource{ + NodeAbstractResource: n.NodeAbstractResource, + } + } + + return dn.EvalTree() + default: + panic(fmt.Errorf("unsupported resource mode %s", mode)) + } +} + +func (n *NodeRefreshableManagedResourceInstance) evalTreeManagedResource() EvalNode { + addr := n.NodeAbstractResource.Addr + + // stateId is the ID to put into the state + stateId := addr.stateId() + + // Build the instance info. More of this will be populated during eval + info := &InstanceInfo{ + Id: stateId, + Type: addr.Type, + } + + // Declare a bunch of variables that are used for state during + // evaluation. Most of this are written to by-address below. + var provider ResourceProvider + var state *InstanceState + + // This happened during initial development. All known cases were + // fixed and tested but as a sanity check let's assert here. + if n.ResourceState == nil { + err := fmt.Errorf( + "No resource state attached for addr: %s\n\n"+ + "This is a bug. Please report this to Terraform with your configuration\n"+ + "and state attached. Please be careful to scrub any sensitive information.", + addr) + return &EvalReturnError{Error: &err} + } + + return &EvalSequence{ + Nodes: []EvalNode{ + &EvalGetProvider{ + Name: n.ProvidedBy()[0], + Output: &provider, + }, + &EvalReadState{ + Name: stateId, + Output: &state, + }, + &EvalRefresh{ + Info: info, + Provider: &provider, + State: &state, + Output: &state, + }, + &EvalWriteState{ + Name: stateId, + ResourceType: n.ResourceState.Type, + Provider: n.ResourceState.Provider, + Dependencies: n.ResourceState.Dependencies, + State: &state, + }, + }, + } +} diff --git a/vendor/github.com/hashicorp/terraform/terraform/node_resource_validate.go b/vendor/github.com/hashicorp/terraform/terraform/node_resource_validate.go new file mode 100644 index 0000000000..f528f24b19 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/terraform/node_resource_validate.go @@ -0,0 +1,158 @@ +package terraform + +import ( + "github.com/hashicorp/terraform/dag" +) + +// NodeValidatableResource represents a resource that is used for validation +// only. +type NodeValidatableResource struct { + *NodeAbstractCountResource +} + +// GraphNodeEvalable +func (n *NodeValidatableResource) EvalTree() EvalNode { + // Ensure we're validating + c := n.NodeAbstractCountResource + c.Validate = true + return c.EvalTree() +} + +// GraphNodeDynamicExpandable +func (n *NodeValidatableResource) DynamicExpand(ctx EvalContext) (*Graph, error) { + // Grab the state which we read + state, lock := ctx.State() + lock.RLock() + defer lock.RUnlock() + + // Expand the resource count which must be available by now from EvalTree + count := 1 + if n.Config.RawCount.Value() != unknownValue() { + var err error + count, err = n.Config.Count() + if err != nil { + return nil, err + } + } + + // The concrete resource factory we'll use + concreteResource := func(a *NodeAbstractResource) dag.Vertex { + // Add the config and state since we don't do that via transforms + a.Config = n.Config + + return &NodeValidatableResourceInstance{ + NodeAbstractResource: a, + } + } + + // Start creating the steps + steps := []GraphTransformer{ + // Expand the count. + &ResourceCountTransformer{ + Concrete: concreteResource, + Count: count, + Addr: n.ResourceAddr(), + }, + + // Attach the state + &AttachStateTransformer{State: state}, + + // Targeting + &TargetsTransformer{ParsedTargets: n.Targets}, + + // Connect references so ordering is correct + &ReferenceTransformer{}, + + // Make sure there is a single root + &RootTransformer{}, + } + + // Build the graph + b := &BasicGraphBuilder{ + Steps: steps, + Validate: true, + Name: "NodeValidatableResource", + } + + return b.Build(ctx.Path()) +} + +// This represents a _single_ resource instance to validate. +type NodeValidatableResourceInstance struct { + *NodeAbstractResource +} + +// GraphNodeEvalable +func (n *NodeValidatableResourceInstance) EvalTree() EvalNode { + addr := n.NodeAbstractResource.Addr + + // Build the resource for eval + resource := &Resource{ + Name: addr.Name, + Type: addr.Type, + CountIndex: addr.Index, + } + if resource.CountIndex < 0 { + resource.CountIndex = 0 + } + + // Declare a bunch of variables that are used for state during + // evaluation. Most of this are written to by-address below. + var config *ResourceConfig + var provider ResourceProvider + + seq := &EvalSequence{ + Nodes: []EvalNode{ + &EvalValidateResourceSelfRef{ + Addr: &addr, + Config: &n.Config.RawConfig, + }, + &EvalGetProvider{ + Name: n.ProvidedBy()[0], + Output: &provider, + }, + &EvalInterpolate{ + Config: n.Config.RawConfig.Copy(), + Resource: resource, + Output: &config, + }, + &EvalValidateResource{ + Provider: &provider, + Config: &config, + ResourceName: n.Config.Name, + ResourceType: n.Config.Type, + ResourceMode: n.Config.Mode, + }, + }, + } + + // Validate all the provisioners + for _, p := range n.Config.Provisioners { + var provisioner ResourceProvisioner + var connConfig *ResourceConfig + seq.Nodes = append( + seq.Nodes, + &EvalGetProvisioner{ + Name: p.Type, + Output: &provisioner, + }, + &EvalInterpolate{ + Config: p.RawConfig.Copy(), + Resource: resource, + Output: &config, + }, + &EvalInterpolate{ + Config: p.ConnInfo.Copy(), + Resource: resource, + Output: &connConfig, + }, + &EvalValidateProvisioner{ + Provisioner: &provisioner, + Config: &config, + ConnConfig: &connConfig, + }, + ) + } + + return seq +} diff --git a/vendor/github.com/hashicorp/terraform/terraform/node_root_variable.go b/vendor/github.com/hashicorp/terraform/terraform/node_root_variable.go new file mode 100644 index 0000000000..cb61a4e3a6 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/terraform/node_root_variable.go @@ -0,0 +1,22 @@ +package terraform + +import ( + "fmt" + + "github.com/hashicorp/terraform/config" +) + +// NodeRootVariable represents a root variable input. +type NodeRootVariable struct { + Config *config.Variable +} + +func (n *NodeRootVariable) Name() string { + result := fmt.Sprintf("var.%s", n.Config.Name) + return result +} + +// GraphNodeReferenceable +func (n *NodeRootVariable) ReferenceableName() []string { + return []string{n.Name()} +} diff --git a/vendor/github.com/hashicorp/terraform/terraform/path.go b/vendor/github.com/hashicorp/terraform/terraform/path.go new file mode 100644 index 0000000000..ca99685ad3 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/terraform/path.go @@ -0,0 +1,24 @@ +package terraform + +import ( + "crypto/md5" + "encoding/hex" +) + +// PathCacheKey returns a cache key for a module path. +// +// TODO: test +func PathCacheKey(path []string) string { + // There is probably a better way to do this, but this is working for now. + // We just create an MD5 hash of all the MD5 hashes of all the path + // elements. This gets us the property that it is unique per ordering. + hash := md5.New() + for _, p := range path { + single := md5.Sum([]byte(p)) + if _, err := hash.Write(single[:]); err != nil { + panic(err) + } + } + + return hex.EncodeToString(hash.Sum(nil)) +} diff --git a/vendor/github.com/hashicorp/terraform/terraform/plan.go b/vendor/github.com/hashicorp/terraform/terraform/plan.go new file mode 100644 index 0000000000..ea0884505a --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/terraform/plan.go @@ -0,0 +1,153 @@ +package terraform + +import ( + "bytes" + "encoding/gob" + "errors" + "fmt" + "io" + "sync" + + "github.com/hashicorp/terraform/config/module" +) + +func init() { + gob.Register(make([]interface{}, 0)) + gob.Register(make([]map[string]interface{}, 0)) + gob.Register(make(map[string]interface{})) + gob.Register(make(map[string]string)) +} + +// Plan represents a single Terraform execution plan, which contains +// all the information necessary to make an infrastructure change. +// +// A plan has to contain basically the entire state of the world +// necessary to make a change: the state, diff, config, backend config, etc. +// This is so that it can run alone without any other data. +type Plan struct { + Diff *Diff + Module *module.Tree + State *State + Vars map[string]interface{} + Targets []string + + // Backend is the backend that this plan should use and store data with. + Backend *BackendState + + once sync.Once +} + +// Context returns a Context with the data encapsulated in this plan. +// +// The following fields in opts are overridden by the plan: Config, +// Diff, State, Variables. +func (p *Plan) Context(opts *ContextOpts) (*Context, error) { + opts.Diff = p.Diff + opts.Module = p.Module + opts.State = p.State + opts.Targets = p.Targets + + opts.Variables = make(map[string]interface{}) + for k, v := range p.Vars { + opts.Variables[k] = v + } + + return NewContext(opts) +} + +func (p *Plan) String() string { + buf := new(bytes.Buffer) + buf.WriteString("DIFF:\n\n") + buf.WriteString(p.Diff.String()) + buf.WriteString("\n\nSTATE:\n\n") + buf.WriteString(p.State.String()) + return buf.String() +} + +func (p *Plan) init() { + p.once.Do(func() { + if p.Diff == nil { + p.Diff = new(Diff) + p.Diff.init() + } + + if p.State == nil { + p.State = new(State) + p.State.init() + } + + if p.Vars == nil { + p.Vars = make(map[string]interface{}) + } + }) +} + +// The format byte is prefixed into the plan file format so that we have +// the ability in the future to change the file format if we want for any +// reason. +const planFormatMagic = "tfplan" +const planFormatVersion byte = 1 + +// ReadPlan reads a plan structure out of a reader in the format that +// was written by WritePlan. +func ReadPlan(src io.Reader) (*Plan, error) { + var result *Plan + var err error + n := 0 + + // Verify the magic bytes + magic := make([]byte, len(planFormatMagic)) + for n < len(magic) { + n, err = src.Read(magic[n:]) + if err != nil { + return nil, fmt.Errorf("error while reading magic bytes: %s", err) + } + } + if string(magic) != planFormatMagic { + return nil, fmt.Errorf("not a valid plan file") + } + + // Verify the version is something we can read + var formatByte [1]byte + n, err = src.Read(formatByte[:]) + if err != nil { + return nil, err + } + if n != len(formatByte) { + return nil, errors.New("failed to read plan version byte") + } + + if formatByte[0] != planFormatVersion { + return nil, fmt.Errorf("unknown plan file version: %d", formatByte[0]) + } + + dec := gob.NewDecoder(src) + if err := dec.Decode(&result); err != nil { + return nil, err + } + + return result, nil +} + +// WritePlan writes a plan somewhere in a binary format. +func WritePlan(d *Plan, dst io.Writer) error { + // Write the magic bytes so we can determine the file format later + n, err := dst.Write([]byte(planFormatMagic)) + if err != nil { + return err + } + if n != len(planFormatMagic) { + return errors.New("failed to write plan format magic bytes") + } + + // Write a version byte so we can iterate on version at some point + n, err = dst.Write([]byte{planFormatVersion}) + if err != nil { + return err + } + if n != 1 { + return errors.New("failed to write plan version byte") + } + + return gob.NewEncoder(dst).Encode(d) +} diff --git a/vendor/github.com/hashicorp/terraform/terraform/resource.go b/vendor/github.com/hashicorp/terraform/terraform/resource.go new file mode 100644 index 0000000000..0acf0beb2a --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/terraform/resource.go @@ -0,0 +1,360 @@ +package terraform + +import ( + "fmt" + "reflect" + "sort" + "strconv" + "strings" + + "github.com/hashicorp/terraform/config" + "github.com/mitchellh/copystructure" + "github.com/mitchellh/reflectwalk" +) + +// ResourceProvisionerConfig is used to pair a provisioner +// with its provided configuration. This allows us to use singleton +// instances of each ResourceProvisioner and to keep the relevant +// configuration instead of instantiating a new Provisioner for each +// resource. +type ResourceProvisionerConfig struct { + Type string + Provisioner ResourceProvisioner + Config *ResourceConfig + RawConfig *config.RawConfig + ConnInfo *config.RawConfig +} + +// Resource encapsulates a resource, its configuration, its provider, +// its current state, and potentially a desired diff from the state it +// wants to reach. +type Resource struct { + // These are all used by the new EvalNode stuff. + Name string + Type string + CountIndex int + + // These aren't really used anymore anywhere, but we keep them around + // since we haven't done a proper cleanup yet. + Id string + Info *InstanceInfo + Config *ResourceConfig + Dependencies []string + Diff *InstanceDiff + Provider ResourceProvider + State *InstanceState + Provisioners []*ResourceProvisionerConfig + Flags ResourceFlag +} + +// ResourceKind specifies what kind of instance we're working with, whether +// its a primary instance, a tainted instance, or an orphan. +type ResourceFlag byte + +// InstanceInfo is used to hold information about the instance and/or +// resource being modified. +type InstanceInfo struct { + // Id is a unique name to represent this instance. This is not related + // to InstanceState.ID in any way. + Id string + + // ModulePath is the complete path of the module containing this + // instance. + ModulePath []string + + // Type is the resource type of this instance + Type string + + // uniqueExtra is an internal field that can be populated to supply + // extra metadata that is used to identify a unique instance in + // the graph walk. This will be appended to HumanID when uniqueId + // is called. + uniqueExtra string +} + +// HumanId is a unique Id that is human-friendly and useful for UI elements. +func (i *InstanceInfo) HumanId() string { + if i == nil { + return "" + } + + if len(i.ModulePath) <= 1 { + return i.Id + } + + return fmt.Sprintf( + "module.%s.%s", + strings.Join(i.ModulePath[1:], "."), + i.Id) +} + +func (i *InstanceInfo) uniqueId() string { + prefix := i.HumanId() + if v := i.uniqueExtra; v != "" { + prefix += " " + v + } + + return prefix +} + +// ResourceConfig holds the configuration given for a resource. This is +// done instead of a raw `map[string]interface{}` type so that rich +// methods can be added to it to make dealing with it easier. +type ResourceConfig struct { + ComputedKeys []string + Raw map[string]interface{} + Config map[string]interface{} + + raw *config.RawConfig +} + +// NewResourceConfig creates a new ResourceConfig from a config.RawConfig. +func NewResourceConfig(c *config.RawConfig) *ResourceConfig { + result := &ResourceConfig{raw: c} + result.interpolateForce() + return result +} + +// DeepCopy performs a deep copy of the configuration. This makes it safe +// to modify any of the structures that are part of the resource config without +// affecting the original configuration. +func (c *ResourceConfig) DeepCopy() *ResourceConfig { + // DeepCopying a nil should return a nil to avoid panics + if c == nil { + return nil + } + + // Copy, this will copy all the exported attributes + copy, err := copystructure.Config{Lock: true}.Copy(c) + if err != nil { + panic(err) + } + + // Force the type + result := copy.(*ResourceConfig) + + // For the raw configuration, we can just use its own copy method + result.raw = c.raw.Copy() + + return result +} + +// Equal checks the equality of two resource configs. +func (c *ResourceConfig) Equal(c2 *ResourceConfig) bool { + // If either are nil, then they're only equal if they're both nil + if c == nil || c2 == nil { + return c == c2 + } + + // Sort the computed keys so they're deterministic + sort.Strings(c.ComputedKeys) + sort.Strings(c2.ComputedKeys) + + // Two resource configs if their exported properties are equal. + // We don't compare "raw" because it is never used again after + // initialization and for all intents and purposes they are equal + // if the exported properties are equal. + check := [][2]interface{}{ + {c.ComputedKeys, c2.ComputedKeys}, + {c.Raw, c2.Raw}, + {c.Config, c2.Config}, + } + for _, pair := range check { + if !reflect.DeepEqual(pair[0], pair[1]) { + return false + } + } + + return true +} + +// CheckSet checks that the given list of configuration keys is +// properly set. If not, errors are returned for each unset key. +// +// This is useful to be called in the Validate method of a ResourceProvider. +func (c *ResourceConfig) CheckSet(keys []string) []error { + var errs []error + + for _, k := range keys { + if !c.IsSet(k) { + errs = append(errs, fmt.Errorf("%s must be set", k)) + } + } + + return errs +} + +// Get looks up a configuration value by key and returns the value. +// +// The second return value is true if the get was successful. Get will +// return the raw value if the key is computed, so you should pair this +// with IsComputed. +func (c *ResourceConfig) Get(k string) (interface{}, bool) { + // We aim to get a value from the configuration. If it is computed, + // then we return the pure raw value. + source := c.Config + if c.IsComputed(k) { + source = c.Raw + } + + return c.get(k, source) +} + +// GetRaw looks up a configuration value by key and returns the value, +// from the raw, uninterpolated config. +// +// The second return value is true if the get was successful. Get will +// not succeed if the value is being computed. +func (c *ResourceConfig) GetRaw(k string) (interface{}, bool) { + return c.get(k, c.Raw) +} + +// IsComputed returns whether the given key is computed or not. +func (c *ResourceConfig) IsComputed(k string) bool { + // The next thing we do is check the config if we get a computed + // value out of it. + v, ok := c.get(k, c.Config) + if !ok { + return false + } + + // If value is nil, then it isn't computed + if v == nil { + return false + } + + // Test if the value contains an unknown value + var w unknownCheckWalker + if err := reflectwalk.Walk(v, &w); err != nil { + panic(err) + } + + return w.Unknown +} + +// IsSet checks if the key in the configuration is set. A key is set if +// it has a value or the value is being computed (is unknown currently). +// +// This function should be used rather than checking the keys of the +// raw configuration itself, since a key may be omitted from the raw +// configuration if it is being computed. +func (c *ResourceConfig) IsSet(k string) bool { + if c == nil { + return false + } + + if c.IsComputed(k) { + return true + } + + if _, ok := c.Get(k); ok { + return true + } + + return false +} + +func (c *ResourceConfig) get( + k string, raw map[string]interface{}) (interface{}, bool) { + parts := strings.Split(k, ".") + if len(parts) == 1 && parts[0] == "" { + parts = nil + } + + var current interface{} = raw + var previous interface{} = nil + for i, part := range parts { + if current == nil { + return nil, false + } + + cv := reflect.ValueOf(current) + switch cv.Kind() { + case reflect.Map: + previous = current + v := cv.MapIndex(reflect.ValueOf(part)) + if !v.IsValid() { + if i > 0 && i != (len(parts)-1) { + tryKey := strings.Join(parts[i:], ".") + v := cv.MapIndex(reflect.ValueOf(tryKey)) + if !v.IsValid() { + return nil, false + } + + return v.Interface(), true + } + + return nil, false + } + + current = v.Interface() + case reflect.Slice: + previous = current + + if part == "#" { + // If any value in a list is computed, this whole thing + // is computed and we can't read any part of it. + for i := 0; i < cv.Len(); i++ { + if v := cv.Index(i).Interface(); v == unknownValue() { + return v, true + } + } + + current = cv.Len() + } else { + i, err := strconv.ParseInt(part, 0, 0) + if err != nil { + return nil, false + } + if i >= int64(cv.Len()) { + return nil, false + } + current = cv.Index(int(i)).Interface() + } + case reflect.String: + // This happens when map keys contain "." and have a common + // prefix so were split as path components above. + actualKey := strings.Join(parts[i-1:], ".") + if prevMap, ok := previous.(map[string]interface{}); ok { + v, ok := prevMap[actualKey] + return v, ok + } + + return nil, false + default: + panic(fmt.Sprintf("Unknown kind: %s", cv.Kind())) + } + } + + return current, true +} + +// interpolateForce is a temporary thing. We want to get rid of interpolate +// above and likewise this, but it can only be done after the f-ast-graph +// refactor is complete. +func (c *ResourceConfig) interpolateForce() { + if c.raw == nil { + var err error + c.raw, err = config.NewRawConfig(make(map[string]interface{})) + if err != nil { + panic(err) + } + } + + c.ComputedKeys = c.raw.UnknownKeys() + c.Raw = c.raw.RawMap() + c.Config = c.raw.Config() +} + +// unknownCheckWalker +type unknownCheckWalker struct { + Unknown bool +} + +func (w *unknownCheckWalker) Primitive(v reflect.Value) error { + if v.Interface() == unknownValue() { + w.Unknown = true + } + + return nil +} diff --git a/vendor/github.com/hashicorp/terraform/terraform/resource_address.go b/vendor/github.com/hashicorp/terraform/terraform/resource_address.go new file mode 100644 index 0000000000..a8a0c95530 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/terraform/resource_address.go @@ -0,0 +1,301 @@ +package terraform + +import ( + "fmt" + "reflect" + "regexp" + "strconv" + "strings" + + "github.com/hashicorp/terraform/config" +) + +// ResourceAddress is a way of identifying an individual resource (or, +// eventually, a subset of resources) within the state. It is used for Targets. +type ResourceAddress struct { + // Addresses a resource falling somewhere in the module path + // When specified alone, addresses all resources within a module path + Path []string + + // Addresses a specific resource that occurs in a list + Index int + + InstanceType InstanceType + InstanceTypeSet bool + Name string + Type string + Mode config.ResourceMode // significant only if InstanceTypeSet +} + +// Copy returns a copy of this ResourceAddress +func (r *ResourceAddress) Copy() *ResourceAddress { + if r == nil { + return nil + } + + n := &ResourceAddress{ + Path: make([]string, 0, len(r.Path)), + Index: r.Index, + InstanceType: r.InstanceType, + Name: r.Name, + Type: r.Type, + Mode: r.Mode, + } + for _, p := range r.Path { + n.Path = append(n.Path, p) + } + return n +} + +// String outputs the address that parses into this address. +func (r *ResourceAddress) String() string { + var result []string + for _, p := range r.Path { + result = append(result, "module", p) + } + + switch r.Mode { + case config.ManagedResourceMode: + // nothing to do + case config.DataResourceMode: + result = append(result, "data") + default: + panic(fmt.Errorf("unsupported resource mode %s", r.Mode)) + } + + if r.Type != "" { + result = append(result, r.Type) + } + + if r.Name != "" { + name := r.Name + if r.InstanceTypeSet { + switch r.InstanceType { + case TypePrimary: + name += ".primary" + case TypeDeposed: + name += ".deposed" + case TypeTainted: + name += ".tainted" + } + } + + if r.Index >= 0 { + name += fmt.Sprintf("[%d]", r.Index) + } + result = append(result, name) + } + + return strings.Join(result, ".") +} + +// stateId returns the ID that this resource should be entered with +// in the state. This is also used for diffs. In the future, we'd like to +// move away from this string field so I don't export this. +func (r *ResourceAddress) stateId() string { + result := fmt.Sprintf("%s.%s", r.Type, r.Name) + switch r.Mode { + case config.ManagedResourceMode: + // Done + case config.DataResourceMode: + result = fmt.Sprintf("data.%s", result) + default: + panic(fmt.Errorf("unknown resource mode: %s", r.Mode)) + } + if r.Index >= 0 { + result += fmt.Sprintf(".%d", r.Index) + } + + return result +} + +// parseResourceAddressConfig creates a resource address from a config.Resource +func parseResourceAddressConfig(r *config.Resource) (*ResourceAddress, error) { + return &ResourceAddress{ + Type: r.Type, + Name: r.Name, + Index: -1, + InstanceType: TypePrimary, + Mode: r.Mode, + }, nil +} + +// parseResourceAddressInternal parses the somewhat bespoke resource +// identifier used in states and diffs, such as "instance.name.0". +func parseResourceAddressInternal(s string) (*ResourceAddress, error) { + // Split based on ".". Every resource address should have at least two + // elements (type and name). + parts := strings.Split(s, ".") + if len(parts) < 2 || len(parts) > 4 { + return nil, fmt.Errorf("Invalid internal resource address format: %s", s) + } + + // Data resource if we have at least 3 parts and the first one is data + mode := config.ManagedResourceMode + if len(parts) > 2 && parts[0] == "data" { + mode = config.DataResourceMode + parts = parts[1:] + } + + // If we're not a data resource and we have more than 3, then it is an error + if len(parts) > 3 && mode != config.DataResourceMode { + return nil, fmt.Errorf("Invalid internal resource address format: %s", s) + } + + // Build the parts of the resource address that are guaranteed to exist + addr := &ResourceAddress{ + Type: parts[0], + Name: parts[1], + Index: -1, + InstanceType: TypePrimary, + Mode: mode, + } + + // If we have more parts, then we have an index. Parse that. + if len(parts) > 2 { + idx, err := strconv.ParseInt(parts[2], 0, 0) + if err != nil { + return nil, fmt.Errorf("Error parsing resource address %q: %s", s, err) + } + + addr.Index = int(idx) + } + + return addr, nil +} + +func ParseResourceAddress(s string) (*ResourceAddress, error) { + matches, err := tokenizeResourceAddress(s) + if err != nil { + return nil, err + } + mode := config.ManagedResourceMode + if matches["data_prefix"] != "" { + mode = config.DataResourceMode + } + resourceIndex, err := ParseResourceIndex(matches["index"]) + if err != nil { + return nil, err + } + instanceType, err := ParseInstanceType(matches["instance_type"]) + if err != nil { + return nil, err + } + path := ParseResourcePath(matches["path"]) + + // not allowed to say "data." without a type following + if mode == config.DataResourceMode && matches["type"] == "" { + return nil, fmt.Errorf("must target specific data instance") + } + + return &ResourceAddress{ + Path: path, + Index: resourceIndex, + InstanceType: instanceType, + InstanceTypeSet: matches["instance_type"] != "", + Name: matches["name"], + Type: matches["type"], + Mode: mode, + }, nil +} + +func (addr *ResourceAddress) Equals(raw interface{}) bool { + other, ok := raw.(*ResourceAddress) + if !ok { + return false + } + + pathMatch := len(addr.Path) == 0 && len(other.Path) == 0 || + reflect.DeepEqual(addr.Path, other.Path) + + indexMatch := addr.Index == -1 || + other.Index == -1 || + addr.Index == other.Index + + nameMatch := addr.Name == "" || + other.Name == "" || + addr.Name == other.Name + + typeMatch := addr.Type == "" || + other.Type == "" || + addr.Type == other.Type + + // mode is significant only when type is set + modeMatch := addr.Type == "" || + other.Type == "" || + addr.Mode == other.Mode + + return pathMatch && + indexMatch && + addr.InstanceType == other.InstanceType && + nameMatch && + typeMatch && + modeMatch +} + +func ParseResourceIndex(s string) (int, error) { + if s == "" { + return -1, nil + } + return strconv.Atoi(s) +} + +func ParseResourcePath(s string) []string { + if s == "" { + return nil + } + parts := strings.Split(s, ".") + path := make([]string, 0, len(parts)) + for _, s := range parts { + // Due to the limitations of the regexp match below, the path match has + // some noise in it we have to filter out :| + if s == "" || s == "module" { + continue + } + path = append(path, s) + } + return path +} + +func ParseInstanceType(s string) (InstanceType, error) { + switch s { + case "", "primary": + return TypePrimary, nil + case "deposed": + return TypeDeposed, nil + case "tainted": + return TypeTainted, nil + default: + return TypeInvalid, fmt.Errorf("Unexpected value for InstanceType field: %q", s) + } +} + +func tokenizeResourceAddress(s string) (map[string]string, error) { + // Example of portions of the regexp below using the + // string "aws_instance.web.tainted[1]" + re := regexp.MustCompile(`\A` + + // "module.foo.module.bar" (optional) + `(?P(?:module\.[^.]+\.?)*)` + + // possibly "data.", if targeting is a data resource + `(?P(?:data\.)?)` + + // "aws_instance.web" (optional when module path specified) + `(?:(?P[^.]+)\.(?P[^.[]+))?` + + // "tainted" (optional, omission implies: "primary") + `(?:\.(?P\w+))?` + + // "1" (optional, omission implies: "0") + `(?:\[(?P\d+)\])?` + + `\z`) + + groupNames := re.SubexpNames() + rawMatches := re.FindAllStringSubmatch(s, -1) + if len(rawMatches) != 1 { + return nil, fmt.Errorf("Problem parsing address: %q", s) + } + + matches := make(map[string]string) + for i, m := range rawMatches[0] { + matches[groupNames[i]] = m + } + + return matches, nil +} diff --git a/vendor/github.com/hashicorp/terraform/terraform/resource_provider.go b/vendor/github.com/hashicorp/terraform/terraform/resource_provider.go new file mode 100644 index 0000000000..1a68c8699c --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/terraform/resource_provider.go @@ -0,0 +1,204 @@ +package terraform + +// ResourceProvider is an interface that must be implemented by any +// resource provider: the thing that creates and manages the resources in +// a Terraform configuration. +// +// Important implementation note: All returned pointers, such as +// *ResourceConfig, *InstanceState, *InstanceDiff, etc. must not point to +// shared data. Terraform is highly parallel and assumes that this data is safe +// to read/write in parallel so it must be unique references. Note that it is +// safe to return arguments as results, however. +type ResourceProvider interface { + /********************************************************************* + * Functions related to the provider + *********************************************************************/ + + // Input is called to ask the provider to ask the user for input + // for completing the configuration if necesarry. + // + // This may or may not be called, so resource provider writers shouldn't + // rely on this being available to set some default values for validate + // later. Example of a situation where this wouldn't be called is if + // the user is not using a TTY. + Input(UIInput, *ResourceConfig) (*ResourceConfig, error) + + // Validate is called once at the beginning with the raw configuration + // (no interpolation done) and can return a list of warnings and/or + // errors. + // + // This is called once with the provider configuration only. It may not + // be called at all if no provider configuration is given. + // + // This should not assume that any values of the configurations are valid. + // The primary use case of this call is to check that required keys are + // set. + Validate(*ResourceConfig) ([]string, []error) + + // Configure configures the provider itself with the configuration + // given. This is useful for setting things like access keys. + // + // This won't be called at all if no provider configuration is given. + // + // Configure returns an error if it occurred. + Configure(*ResourceConfig) error + + // Resources returns all the available resource types that this provider + // knows how to manage. + Resources() []ResourceType + + // Stop is called when the provider should halt any in-flight actions. + // + // This can be used to make a nicer Ctrl-C experience for Terraform. + // Even if this isn't implemented to do anything (just returns nil), + // Terraform will still cleanly stop after the currently executing + // graph node is complete. However, this API can be used to make more + // efficient halts. + // + // Stop doesn't have to and shouldn't block waiting for in-flight actions + // to complete. It should take any action it wants and return immediately + // acknowledging it has received the stop request. Terraform core will + // automatically not make any further API calls to the provider soon + // after Stop is called (technically exactly once the currently executing + // graph nodes are complete). + // + // The error returned, if non-nil, is assumed to mean that signaling the + // stop somehow failed and that the user should expect potentially waiting + // a longer period of time. + Stop() error + + /********************************************************************* + * Functions related to individual resources + *********************************************************************/ + + // ValidateResource is called once at the beginning with the raw + // configuration (no interpolation done) and can return a list of warnings + // and/or errors. + // + // This is called once per resource. + // + // This should not assume any of the values in the resource configuration + // are valid since it is possible they have to be interpolated still. + // The primary use case of this call is to check that the required keys + // are set and that the general structure is correct. + ValidateResource(string, *ResourceConfig) ([]string, []error) + + // Apply applies a diff to a specific resource and returns the new + // resource state along with an error. + // + // If the resource state given has an empty ID, then a new resource + // is expected to be created. + Apply( + *InstanceInfo, + *InstanceState, + *InstanceDiff) (*InstanceState, error) + + // Diff diffs a resource versus a desired state and returns + // a diff. + Diff( + *InstanceInfo, + *InstanceState, + *ResourceConfig) (*InstanceDiff, error) + + // Refresh refreshes a resource and updates all of its attributes + // with the latest information. + Refresh(*InstanceInfo, *InstanceState) (*InstanceState, error) + + /********************************************************************* + * Functions related to importing + *********************************************************************/ + + // ImportState requests that the given resource be imported. + // + // The returned InstanceState only requires ID be set. Importing + // will always call Refresh after the state to complete it. + // + // IMPORTANT: InstanceState doesn't have the resource type attached + // to it. A type must be specified on the state via the Ephemeral + // field on the state. + // + // This function can return multiple states. Normally, an import + // will map 1:1 to a physical resource. However, some resources map + // to multiple. For example, an AWS security group may contain many rules. + // Each rule is represented by a separate resource in Terraform, + // therefore multiple states are returned. + ImportState(*InstanceInfo, string) ([]*InstanceState, error) + + /********************************************************************* + * Functions related to data resources + *********************************************************************/ + + // ValidateDataSource is called once at the beginning with the raw + // configuration (no interpolation done) and can return a list of warnings + // and/or errors. + // + // This is called once per data source instance. + // + // This should not assume any of the values in the resource configuration + // are valid since it is possible they have to be interpolated still. + // The primary use case of this call is to check that the required keys + // are set and that the general structure is correct. + ValidateDataSource(string, *ResourceConfig) ([]string, []error) + + // DataSources returns all of the available data sources that this + // provider implements. + DataSources() []DataSource + + // ReadDataDiff produces a diff that represents the state that will + // be produced when the given data source is read using a later call + // to ReadDataApply. + ReadDataDiff(*InstanceInfo, *ResourceConfig) (*InstanceDiff, error) + + // ReadDataApply initializes a data instance using the configuration + // in a diff produced by ReadDataDiff. + ReadDataApply(*InstanceInfo, *InstanceDiff) (*InstanceState, error) +} + +// ResourceProviderCloser is an interface that providers that can close +// connections that aren't needed anymore must implement. +type ResourceProviderCloser interface { + Close() error +} + +// ResourceType is a type of resource that a resource provider can manage. +type ResourceType struct { + Name string // Name of the resource, example "instance" (no provider prefix) + Importable bool // Whether this resource supports importing +} + +// DataSource is a data source that a resource provider implements. +type DataSource struct { + Name string +} + +// ResourceProviderFactory is a function type that creates a new instance +// of a resource provider. +type ResourceProviderFactory func() (ResourceProvider, error) + +// ResourceProviderFactoryFixed is a helper that creates a +// ResourceProviderFactory that just returns some fixed provider. +func ResourceProviderFactoryFixed(p ResourceProvider) ResourceProviderFactory { + return func() (ResourceProvider, error) { + return p, nil + } +} + +func ProviderHasResource(p ResourceProvider, n string) bool { + for _, rt := range p.Resources() { + if rt.Name == n { + return true + } + } + + return false +} + +func ProviderHasDataSource(p ResourceProvider, n string) bool { + for _, rt := range p.DataSources() { + if rt.Name == n { + return true + } + } + + return false +} diff --git a/vendor/github.com/hashicorp/terraform/terraform/resource_provider_mock.go b/vendor/github.com/hashicorp/terraform/terraform/resource_provider_mock.go new file mode 100644 index 0000000000..f5315339fb --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/terraform/resource_provider_mock.go @@ -0,0 +1,297 @@ +package terraform + +import "sync" + +// MockResourceProvider implements ResourceProvider but mocks out all the +// calls for testing purposes. +type MockResourceProvider struct { + sync.Mutex + + // Anything you want, in case you need to store extra data with the mock. + Meta interface{} + + CloseCalled bool + CloseError error + InputCalled bool + InputInput UIInput + InputConfig *ResourceConfig + InputReturnConfig *ResourceConfig + InputReturnError error + InputFn func(UIInput, *ResourceConfig) (*ResourceConfig, error) + ApplyCalled bool + ApplyInfo *InstanceInfo + ApplyState *InstanceState + ApplyDiff *InstanceDiff + ApplyFn func(*InstanceInfo, *InstanceState, *InstanceDiff) (*InstanceState, error) + ApplyReturn *InstanceState + ApplyReturnError error + ConfigureCalled bool + ConfigureConfig *ResourceConfig + ConfigureFn func(*ResourceConfig) error + ConfigureReturnError error + DiffCalled bool + DiffInfo *InstanceInfo + DiffState *InstanceState + DiffDesired *ResourceConfig + DiffFn func(*InstanceInfo, *InstanceState, *ResourceConfig) (*InstanceDiff, error) + DiffReturn *InstanceDiff + DiffReturnError error + RefreshCalled bool + RefreshInfo *InstanceInfo + RefreshState *InstanceState + RefreshFn func(*InstanceInfo, *InstanceState) (*InstanceState, error) + RefreshReturn *InstanceState + RefreshReturnError error + ResourcesCalled bool + ResourcesReturn []ResourceType + ReadDataApplyCalled bool + ReadDataApplyInfo *InstanceInfo + ReadDataApplyDiff *InstanceDiff + ReadDataApplyFn func(*InstanceInfo, *InstanceDiff) (*InstanceState, error) + ReadDataApplyReturn *InstanceState + ReadDataApplyReturnError error + ReadDataDiffCalled bool + ReadDataDiffInfo *InstanceInfo + ReadDataDiffDesired *ResourceConfig + ReadDataDiffFn func(*InstanceInfo, *ResourceConfig) (*InstanceDiff, error) + ReadDataDiffReturn *InstanceDiff + ReadDataDiffReturnError error + StopCalled bool + StopFn func() error + StopReturnError error + DataSourcesCalled bool + DataSourcesReturn []DataSource + ValidateCalled bool + ValidateConfig *ResourceConfig + ValidateFn func(*ResourceConfig) ([]string, []error) + ValidateReturnWarns []string + ValidateReturnErrors []error + ValidateResourceFn func(string, *ResourceConfig) ([]string, []error) + ValidateResourceCalled bool + ValidateResourceType string + ValidateResourceConfig *ResourceConfig + ValidateResourceReturnWarns []string + ValidateResourceReturnErrors []error + ValidateDataSourceFn func(string, *ResourceConfig) ([]string, []error) + ValidateDataSourceCalled bool + ValidateDataSourceType string + ValidateDataSourceConfig *ResourceConfig + ValidateDataSourceReturnWarns []string + ValidateDataSourceReturnErrors []error + + ImportStateCalled bool + ImportStateInfo *InstanceInfo + ImportStateID string + ImportStateReturn []*InstanceState + ImportStateReturnError error + ImportStateFn func(*InstanceInfo, string) ([]*InstanceState, error) +} + +func (p *MockResourceProvider) Close() error { + p.CloseCalled = true + return p.CloseError +} + +func (p *MockResourceProvider) Input( + input UIInput, c *ResourceConfig) (*ResourceConfig, error) { + p.InputCalled = true + p.InputInput = input + p.InputConfig = c + if p.InputFn != nil { + return p.InputFn(input, c) + } + return p.InputReturnConfig, p.InputReturnError +} + +func (p *MockResourceProvider) Validate(c *ResourceConfig) ([]string, []error) { + p.Lock() + defer p.Unlock() + + p.ValidateCalled = true + p.ValidateConfig = c + if p.ValidateFn != nil { + return p.ValidateFn(c) + } + return p.ValidateReturnWarns, p.ValidateReturnErrors +} + +func (p *MockResourceProvider) ValidateResource(t string, c *ResourceConfig) ([]string, []error) { + p.Lock() + defer p.Unlock() + + p.ValidateResourceCalled = true + p.ValidateResourceType = t + p.ValidateResourceConfig = c + + if p.ValidateResourceFn != nil { + return p.ValidateResourceFn(t, c) + } + + return p.ValidateResourceReturnWarns, p.ValidateResourceReturnErrors +} + +func (p *MockResourceProvider) Configure(c *ResourceConfig) error { + p.Lock() + defer p.Unlock() + + p.ConfigureCalled = true + p.ConfigureConfig = c + + if p.ConfigureFn != nil { + return p.ConfigureFn(c) + } + + return p.ConfigureReturnError +} + +func (p *MockResourceProvider) Stop() error { + p.Lock() + defer p.Unlock() + + p.StopCalled = true + if p.StopFn != nil { + return p.StopFn() + } + + return p.StopReturnError +} + +func (p *MockResourceProvider) Apply( + info *InstanceInfo, + state *InstanceState, + diff *InstanceDiff) (*InstanceState, error) { + // We only lock while writing data. Reading is fine + p.Lock() + p.ApplyCalled = true + p.ApplyInfo = info + p.ApplyState = state + p.ApplyDiff = diff + p.Unlock() + + if p.ApplyFn != nil { + return p.ApplyFn(info, state, diff) + } + + return p.ApplyReturn.DeepCopy(), p.ApplyReturnError +} + +func (p *MockResourceProvider) Diff( + info *InstanceInfo, + state *InstanceState, + desired *ResourceConfig) (*InstanceDiff, error) { + p.Lock() + defer p.Unlock() + + p.DiffCalled = true + p.DiffInfo = info + p.DiffState = state + p.DiffDesired = desired + if p.DiffFn != nil { + return p.DiffFn(info, state, desired) + } + + return p.DiffReturn.DeepCopy(), p.DiffReturnError +} + +func (p *MockResourceProvider) Refresh( + info *InstanceInfo, + s *InstanceState) (*InstanceState, error) { + p.Lock() + defer p.Unlock() + + p.RefreshCalled = true + p.RefreshInfo = info + p.RefreshState = s + + if p.RefreshFn != nil { + return p.RefreshFn(info, s) + } + + return p.RefreshReturn.DeepCopy(), p.RefreshReturnError +} + +func (p *MockResourceProvider) Resources() []ResourceType { + p.Lock() + defer p.Unlock() + + p.ResourcesCalled = true + return p.ResourcesReturn +} + +func (p *MockResourceProvider) ImportState(info *InstanceInfo, id string) ([]*InstanceState, error) { + p.Lock() + defer p.Unlock() + + p.ImportStateCalled = true + p.ImportStateInfo = info + p.ImportStateID = id + if p.ImportStateFn != nil { + return p.ImportStateFn(info, id) + } + + var result []*InstanceState + if p.ImportStateReturn != nil { + result = make([]*InstanceState, len(p.ImportStateReturn)) + for i, v := range p.ImportStateReturn { + result[i] = v.DeepCopy() + } + } + + return result, p.ImportStateReturnError +} + +func (p *MockResourceProvider) ValidateDataSource(t string, c *ResourceConfig) ([]string, []error) { + p.Lock() + defer p.Unlock() + + p.ValidateDataSourceCalled = true + p.ValidateDataSourceType = t + p.ValidateDataSourceConfig = c + + if p.ValidateDataSourceFn != nil { + return p.ValidateDataSourceFn(t, c) + } + + return p.ValidateDataSourceReturnWarns, p.ValidateDataSourceReturnErrors +} + +func (p *MockResourceProvider) ReadDataDiff( + info *InstanceInfo, + desired *ResourceConfig) (*InstanceDiff, error) { + p.Lock() + defer p.Unlock() + + p.ReadDataDiffCalled = true + p.ReadDataDiffInfo = info + p.ReadDataDiffDesired = desired + if p.ReadDataDiffFn != nil { + return p.ReadDataDiffFn(info, desired) + } + + return p.ReadDataDiffReturn.DeepCopy(), p.ReadDataDiffReturnError +} + +func (p *MockResourceProvider) ReadDataApply( + info *InstanceInfo, + d *InstanceDiff) (*InstanceState, error) { + p.Lock() + defer p.Unlock() + + p.ReadDataApplyCalled = true + p.ReadDataApplyInfo = info + p.ReadDataApplyDiff = d + + if p.ReadDataApplyFn != nil { + return p.ReadDataApplyFn(info, d) + } + + return p.ReadDataApplyReturn.DeepCopy(), p.ReadDataApplyReturnError +} + +func (p *MockResourceProvider) DataSources() []DataSource { + p.Lock() + defer p.Unlock() + + p.DataSourcesCalled = true + return p.DataSourcesReturn +} diff --git a/vendor/github.com/hashicorp/terraform/terraform/resource_provisioner.go b/vendor/github.com/hashicorp/terraform/terraform/resource_provisioner.go new file mode 100644 index 0000000000..361ec1ec09 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/terraform/resource_provisioner.go @@ -0,0 +1,54 @@ +package terraform + +// ResourceProvisioner is an interface that must be implemented by any +// resource provisioner: the thing that initializes resources in +// a Terraform configuration. +type ResourceProvisioner interface { + // Validate is called once at the beginning with the raw + // configuration (no interpolation done) and can return a list of warnings + // and/or errors. + // + // This is called once per resource. + // + // This should not assume any of the values in the resource configuration + // are valid since it is possible they have to be interpolated still. + // The primary use case of this call is to check that the required keys + // are set and that the general structure is correct. + Validate(*ResourceConfig) ([]string, []error) + + // Apply runs the provisioner on a specific resource and returns the new + // resource state along with an error. Instead of a diff, the ResourceConfig + // is provided since provisioners only run after a resource has been + // newly created. + Apply(UIOutput, *InstanceState, *ResourceConfig) error + + // Stop is called when the provisioner should halt any in-flight actions. + // + // This can be used to make a nicer Ctrl-C experience for Terraform. + // Even if this isn't implemented to do anything (just returns nil), + // Terraform will still cleanly stop after the currently executing + // graph node is complete. However, this API can be used to make more + // efficient halts. + // + // Stop doesn't have to and shouldn't block waiting for in-flight actions + // to complete. It should take any action it wants and return immediately + // acknowledging it has received the stop request. Terraform core will + // automatically not make any further API calls to the provider soon + // after Stop is called (technically exactly once the currently executing + // graph nodes are complete). + // + // The error returned, if non-nil, is assumed to mean that signaling the + // stop somehow failed and that the user should expect potentially waiting + // a longer period of time. + Stop() error +} + +// ResourceProvisionerCloser is an interface that provisioners that can close +// connections that aren't needed anymore must implement. +type ResourceProvisionerCloser interface { + Close() error +} + +// ResourceProvisionerFactory is a function type that creates a new instance +// of a resource provisioner. +type ResourceProvisionerFactory func() (ResourceProvisioner, error) diff --git a/vendor/github.com/hashicorp/terraform/terraform/resource_provisioner_mock.go b/vendor/github.com/hashicorp/terraform/terraform/resource_provisioner_mock.go new file mode 100644 index 0000000000..f471a5182b --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/terraform/resource_provisioner_mock.go @@ -0,0 +1,72 @@ +package terraform + +import "sync" + +// MockResourceProvisioner implements ResourceProvisioner but mocks out all the +// calls for testing purposes. +type MockResourceProvisioner struct { + sync.Mutex + // Anything you want, in case you need to store extra data with the mock. + Meta interface{} + + ApplyCalled bool + ApplyOutput UIOutput + ApplyState *InstanceState + ApplyConfig *ResourceConfig + ApplyFn func(*InstanceState, *ResourceConfig) error + ApplyReturnError error + + ValidateCalled bool + ValidateConfig *ResourceConfig + ValidateFn func(c *ResourceConfig) ([]string, []error) + ValidateReturnWarns []string + ValidateReturnErrors []error + + StopCalled bool + StopFn func() error + StopReturnError error +} + +func (p *MockResourceProvisioner) Validate(c *ResourceConfig) ([]string, []error) { + p.Lock() + defer p.Unlock() + + p.ValidateCalled = true + p.ValidateConfig = c + if p.ValidateFn != nil { + return p.ValidateFn(c) + } + return p.ValidateReturnWarns, p.ValidateReturnErrors +} + +func (p *MockResourceProvisioner) Apply( + output UIOutput, + state *InstanceState, + c *ResourceConfig) error { + p.Lock() + + p.ApplyCalled = true + p.ApplyOutput = output + p.ApplyState = state + p.ApplyConfig = c + if p.ApplyFn != nil { + fn := p.ApplyFn + p.Unlock() + return fn(state, c) + } + + defer p.Unlock() + return p.ApplyReturnError +} + +func (p *MockResourceProvisioner) Stop() error { + p.Lock() + defer p.Unlock() + + p.StopCalled = true + if p.StopFn != nil { + return p.StopFn() + } + + return p.StopReturnError +} diff --git a/vendor/github.com/hashicorp/terraform/terraform/semantics.go b/vendor/github.com/hashicorp/terraform/terraform/semantics.go new file mode 100644 index 0000000000..20f1d8a274 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/terraform/semantics.go @@ -0,0 +1,132 @@ +package terraform + +import ( + "fmt" + "strings" + + "github.com/hashicorp/go-multierror" + "github.com/hashicorp/terraform/config" + "github.com/hashicorp/terraform/dag" +) + +// GraphSemanticChecker is the interface that semantic checks across +// the entire Terraform graph implement. +// +// The graph should NOT be modified by the semantic checker. +type GraphSemanticChecker interface { + Check(*dag.Graph) error +} + +// UnorderedSemanticCheckRunner is an implementation of GraphSemanticChecker +// that runs a list of SemanticCheckers against the vertices of the graph +// in no specified order. +type UnorderedSemanticCheckRunner struct { + Checks []SemanticChecker +} + +func (sc *UnorderedSemanticCheckRunner) Check(g *dag.Graph) error { + var err error + for _, v := range g.Vertices() { + for _, check := range sc.Checks { + if e := check.Check(g, v); e != nil { + err = multierror.Append(err, e) + } + } + } + + return err +} + +// SemanticChecker is the interface that semantic checks across the +// Terraform graph implement. Errors are accumulated. Even after an error +// is returned, child vertices in the graph will still be visited. +// +// The graph should NOT be modified by the semantic checker. +// +// The order in which vertices are visited is left unspecified, so the +// semantic checks should not rely on that. +type SemanticChecker interface { + Check(*dag.Graph, dag.Vertex) error +} + +// smcUserVariables does all the semantic checks to verify that the +// variables given satisfy the configuration itself. +func smcUserVariables(c *config.Config, vs map[string]interface{}) []error { + var errs []error + + cvs := make(map[string]*config.Variable) + for _, v := range c.Variables { + cvs[v.Name] = v + } + + // Check that all required variables are present + required := make(map[string]struct{}) + for _, v := range c.Variables { + if v.Required() { + required[v.Name] = struct{}{} + } + } + for k, _ := range vs { + delete(required, k) + } + if len(required) > 0 { + for k, _ := range required { + errs = append(errs, fmt.Errorf( + "Required variable not set: %s", k)) + } + } + + // Check that types match up + for name, proposedValue := range vs { + // Check for "map.key" fields. These stopped working with Terraform + // 0.7 but we do this to surface a better error message informing + // the user what happened. + if idx := strings.Index(name, "."); idx > 0 { + key := name[:idx] + if _, ok := cvs[key]; ok { + errs = append(errs, fmt.Errorf( + "%s: Overriding map keys with the format `name.key` is no "+ + "longer allowed. You may still override keys by setting "+ + "`name = { key = value }`. The maps will be merged. This "+ + "behavior appeared in 0.7.0.", name)) + continue + } + } + + schema, ok := cvs[name] + if !ok { + continue + } + + declaredType := schema.Type() + + switch declaredType { + case config.VariableTypeString: + switch proposedValue.(type) { + case string: + continue + } + case config.VariableTypeMap: + switch v := proposedValue.(type) { + case map[string]interface{}: + continue + case []map[string]interface{}: + // if we have a list of 1 map, it will get coerced later as needed + if len(v) == 1 { + continue + } + } + case config.VariableTypeList: + switch proposedValue.(type) { + case []interface{}: + continue + } + } + errs = append(errs, fmt.Errorf("variable %s should be type %s, got %s", + name, declaredType.Printable(), hclTypeName(proposedValue))) + } + + // TODO(mitchellh): variables that are unknown + + return errs +} diff --git a/vendor/github.com/hashicorp/terraform/terraform/shadow.go b/vendor/github.com/hashicorp/terraform/terraform/shadow.go new file mode 100644 index 0000000000..46325595f1 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/terraform/shadow.go @@ -0,0 +1,28 @@ +package terraform + +// Shadow is the interface that any "shadow" structures must implement. +// +// A shadow structure is an interface implementation (typically) that +// shadows a real implementation and verifies that the same behavior occurs +// on both. The semantics of this behavior are up to the interface itself. +// +// A shadow NEVER modifies real values or state. It must always be safe to use. +// +// For example, a ResourceProvider shadow ensures that the same operations +// are done on the same resources with the same configurations. +// +// The typical usage of a shadow following this interface is to complete +// the real operations, then call CloseShadow which tells the shadow that +// the real side is done. Then, once the shadow is also complete, call +// ShadowError to find any errors that may have been caught. +type Shadow interface { + // CloseShadow tells the shadow that the REAL implementation is + // complete. Therefore, any calls that would block should now return + // immediately since no more changes will happen to the real side. + CloseShadow() error + + // ShadowError returns the errors that the shadow has found. + // This should be called AFTER CloseShadow and AFTER the shadow is + // known to be complete (no more calls to it). + ShadowError() error +} diff --git a/vendor/github.com/hashicorp/terraform/terraform/shadow_components.go b/vendor/github.com/hashicorp/terraform/terraform/shadow_components.go new file mode 100644 index 0000000000..116cf84f97 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/terraform/shadow_components.go @@ -0,0 +1,273 @@ +package terraform + +import ( + "fmt" + "sync" + + "github.com/hashicorp/go-multierror" + "github.com/hashicorp/terraform/helper/shadow" +) + +// newShadowComponentFactory creates a shadowed contextComponentFactory +// so that requests to create new components result in both a real and +// shadow side. +func newShadowComponentFactory( + f contextComponentFactory) (contextComponentFactory, *shadowComponentFactory) { + // Create the shared data + shared := &shadowComponentFactoryShared{contextComponentFactory: f} + + // Create the real side + real := &shadowComponentFactory{ + shadowComponentFactoryShared: shared, + } + + // Create the shadow + shadow := &shadowComponentFactory{ + shadowComponentFactoryShared: shared, + Shadow: true, + } + + return real, shadow +} + +// shadowComponentFactory is the shadow side. Any components created +// with this factory are fake and will not cause real work to happen. +// +// Unlike other shadowers, the shadow component factory will allow the +// shadow to create _any_ component even if it is never requested on the +// real side. This is because errors will happen later downstream as function +// calls are made to the shadows that are never matched on the real side. +type shadowComponentFactory struct { + *shadowComponentFactoryShared + + Shadow bool // True if this should return the shadow + lock sync.Mutex +} + +func (f *shadowComponentFactory) ResourceProvider( + n, uid string) (ResourceProvider, error) { + f.lock.Lock() + defer f.lock.Unlock() + + real, shadow, err := f.shadowComponentFactoryShared.ResourceProvider(n, uid) + var result ResourceProvider = real + if f.Shadow { + result = shadow + } + + return result, err +} + +func (f *shadowComponentFactory) ResourceProvisioner( + n, uid string) (ResourceProvisioner, error) { + f.lock.Lock() + defer f.lock.Unlock() + + real, shadow, err := f.shadowComponentFactoryShared.ResourceProvisioner(n, uid) + var result ResourceProvisioner = real + if f.Shadow { + result = shadow + } + + return result, err +} + +// CloseShadow is called when the _real_ side is complete. This will cause +// all future blocking operations to return immediately on the shadow to +// ensure the shadow also completes. +func (f *shadowComponentFactory) CloseShadow() error { + // If we aren't the shadow, just return + if !f.Shadow { + return nil + } + + // Lock ourselves so we don't modify state + f.lock.Lock() + defer f.lock.Unlock() + + // Grab our shared state + shared := f.shadowComponentFactoryShared + + // If we're already closed, its an error + if shared.closed { + return fmt.Errorf("component factory shadow already closed") + } + + // Close all the providers and provisioners and return the error + var result error + for _, n := range shared.providerKeys { + _, shadow, err := shared.ResourceProvider(n, n) + if err == nil && shadow != nil { + if err := shadow.CloseShadow(); err != nil { + result = multierror.Append(result, err) + } + } + } + + for _, n := range shared.provisionerKeys { + _, shadow, err := shared.ResourceProvisioner(n, n) + if err == nil && shadow != nil { + if err := shadow.CloseShadow(); err != nil { + result = multierror.Append(result, err) + } + } + } + + // Mark ourselves as closed + shared.closed = true + + return result +} + +func (f *shadowComponentFactory) ShadowError() error { + // If we aren't the shadow, just return + if !f.Shadow { + return nil + } + + // Lock ourselves so we don't modify state + f.lock.Lock() + defer f.lock.Unlock() + + // Grab our shared state + shared := f.shadowComponentFactoryShared + + // If we're not closed, its an error + if !shared.closed { + return fmt.Errorf("component factory must be closed to retrieve errors") + } + + // Close all the providers and provisioners and return the error + var result error + for _, n := range shared.providerKeys { + _, shadow, err := shared.ResourceProvider(n, n) + if err == nil && shadow != nil { + if err := shadow.ShadowError(); err != nil { + result = multierror.Append(result, err) + } + } + } + + for _, n := range shared.provisionerKeys { + _, shadow, err := shared.ResourceProvisioner(n, n) + if err == nil && shadow != nil { + if err := shadow.ShadowError(); err != nil { + result = multierror.Append(result, err) + } + } + } + + return result +} + +// shadowComponentFactoryShared is shared data between the two factories. +// +// It is NOT SAFE to run any function on this struct in parallel. Lock +// access to this struct. +type shadowComponentFactoryShared struct { + contextComponentFactory + + closed bool + providers shadow.KeyedValue + providerKeys []string + provisioners shadow.KeyedValue + provisionerKeys []string +} + +// shadowResourceProviderFactoryEntry is the entry that is stored in +// the Shadows key/value for a provider. +type shadowComponentFactoryProviderEntry struct { + Real ResourceProvider + Shadow shadowResourceProvider + Err error +} + +type shadowComponentFactoryProvisionerEntry struct { + Real ResourceProvisioner + Shadow shadowResourceProvisioner + Err error +} + +func (f *shadowComponentFactoryShared) ResourceProvider( + n, uid string) (ResourceProvider, shadowResourceProvider, error) { + // Determine if we already have a value + raw, ok := f.providers.ValueOk(uid) + if !ok { + // Build the entry + var entry shadowComponentFactoryProviderEntry + + // No value, initialize. Create the original + p, err := f.contextComponentFactory.ResourceProvider(n, uid) + if err != nil { + entry.Err = err + p = nil // Just to be sure + } + + if p != nil { + // Create the shadow + real, shadow := newShadowResourceProvider(p) + entry.Real = real + entry.Shadow = shadow + + if f.closed { + shadow.CloseShadow() + } + } + + // Store the value + f.providers.SetValue(uid, &entry) + f.providerKeys = append(f.providerKeys, uid) + raw = &entry + } + + // Read the entry + entry, ok := raw.(*shadowComponentFactoryProviderEntry) + if !ok { + return nil, nil, fmt.Errorf("Unknown value for shadow provider: %#v", raw) + } + + // Return + return entry.Real, entry.Shadow, entry.Err +} + +func (f *shadowComponentFactoryShared) ResourceProvisioner( + n, uid string) (ResourceProvisioner, shadowResourceProvisioner, error) { + // Determine if we already have a value + raw, ok := f.provisioners.ValueOk(uid) + if !ok { + // Build the entry + var entry shadowComponentFactoryProvisionerEntry + + // No value, initialize. Create the original + p, err := f.contextComponentFactory.ResourceProvisioner(n, uid) + if err != nil { + entry.Err = err + p = nil // Just to be sure + } + + if p != nil { + // For now, just create a mock since we don't support provisioners yet + real, shadow := newShadowResourceProvisioner(p) + entry.Real = real + entry.Shadow = shadow + + if f.closed { + shadow.CloseShadow() + } + } + + // Store the value + f.provisioners.SetValue(uid, &entry) + f.provisionerKeys = append(f.provisionerKeys, uid) + raw = &entry + } + + // Read the entry + entry, ok := raw.(*shadowComponentFactoryProvisionerEntry) + if !ok { + return nil, nil, fmt.Errorf("Unknown value for shadow provisioner: %#v", raw) + } + + // Return + return entry.Real, entry.Shadow, entry.Err +} diff --git a/vendor/github.com/hashicorp/terraform/terraform/shadow_context.go b/vendor/github.com/hashicorp/terraform/terraform/shadow_context.go new file mode 100644 index 0000000000..5588af252c --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/terraform/shadow_context.go @@ -0,0 +1,158 @@ +package terraform + +import ( + "fmt" + "strings" + + "github.com/hashicorp/go-multierror" + "github.com/mitchellh/copystructure" +) + +// newShadowContext creates a new context that will shadow the given context +// when walking the graph. The resulting context should be used _only once_ +// for a graph walk. +// +// The returned Shadow should be closed after the graph walk with the +// real context is complete. Errors from the shadow can be retrieved there. +// +// Most importantly, any operations done on the shadow context (the returned +// context) will NEVER affect the real context. All structures are deep +// copied, no real providers or resources are used, etc. +func newShadowContext(c *Context) (*Context, *Context, Shadow) { + // Copy the targets + targetRaw, err := copystructure.Copy(c.targets) + if err != nil { + panic(err) + } + + // Copy the variables + varRaw, err := copystructure.Copy(c.variables) + if err != nil { + panic(err) + } + + // Copy the provider inputs + providerInputRaw, err := copystructure.Copy(c.providerInputConfig) + if err != nil { + panic(err) + } + + // The factories + componentsReal, componentsShadow := newShadowComponentFactory(c.components) + + // Create the shadow + shadow := &Context{ + components: componentsShadow, + destroy: c.destroy, + diff: c.diff.DeepCopy(), + hooks: nil, + meta: c.meta, + module: c.module, + state: c.state.DeepCopy(), + targets: targetRaw.([]string), + variables: varRaw.(map[string]interface{}), + + // NOTE(mitchellh): This is not going to work for shadows that are + // testing that input results in the proper end state. At the time + // of writing, input is not used in any state-changing graph + // walks anyways, so this checks nothing. We set it to this to avoid + // any panics but even a "nil" value worked here. + uiInput: new(MockUIInput), + + // Hardcoded to 4 since parallelism in the shadow doesn't matter + // a ton since we're doing far less compared to the real side + // and our operations are MUCH faster. + parallelSem: NewSemaphore(4), + providerInputConfig: providerInputRaw.(map[string]map[string]interface{}), + } + + // Create the real context. This is effectively just a copy of + // the context given except we need to modify some of the values + // to point to the real side of a shadow so the shadow can compare values. + real := &Context{ + // The fields below are changed. + components: componentsReal, + + // The fields below are direct copies + destroy: c.destroy, + diff: c.diff, + // diffLock - no copy + hooks: c.hooks, + meta: c.meta, + module: c.module, + sh: c.sh, + state: c.state, + // stateLock - no copy + targets: c.targets, + uiInput: c.uiInput, + variables: c.variables, + + // l - no copy + parallelSem: c.parallelSem, + providerInputConfig: c.providerInputConfig, + runContext: c.runContext, + runContextCancel: c.runContextCancel, + shadowErr: c.shadowErr, + } + + return real, shadow, &shadowContextCloser{ + Components: componentsShadow, + } +} + +// shadowContextVerify takes the real and shadow context and verifies they +// have equal diffs and states. +func shadowContextVerify(real, shadow *Context) error { + var result error + + // The states compared must be pruned so they're minimal/clean + real.state.prune() + shadow.state.prune() + + // Compare the states + if !real.state.Equal(shadow.state) { + result = multierror.Append(result, fmt.Errorf( + "Real and shadow states do not match! "+ + "Real state:\n\n%s\n\n"+ + "Shadow state:\n\n%s\n\n", + real.state, shadow.state)) + } + + // Compare the diffs + if !real.diff.Equal(shadow.diff) { + result = multierror.Append(result, fmt.Errorf( + "Real and shadow diffs do not match! "+ + "Real diff:\n\n%s\n\n"+ + "Shadow diff:\n\n%s\n\n", + real.diff, shadow.diff)) + } + + return result +} + +// shadowContextCloser is the io.Closer returned by newShadowContext that +// closes all the shadows and returns the results. +type shadowContextCloser struct { + Components *shadowComponentFactory +} + +// Close closes the shadow context. +func (c *shadowContextCloser) CloseShadow() error { + return c.Components.CloseShadow() +} + +func (c *shadowContextCloser) ShadowError() error { + err := c.Components.ShadowError() + if err == nil { + return nil + } + + // This is a sad edge case: if the configuration contains uuid() at + // any point, we cannot reason aboyt the shadow execution. Tested + // with Context2Plan_shadowUuid. + if strings.Contains(err.Error(), "uuid()") { + err = nil + } + + return err +} diff --git a/vendor/github.com/hashicorp/terraform/terraform/shadow_resource_provider.go b/vendor/github.com/hashicorp/terraform/terraform/shadow_resource_provider.go new file mode 100644 index 0000000000..9741d7e796 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/terraform/shadow_resource_provider.go @@ -0,0 +1,815 @@ +package terraform + +import ( + "fmt" + "log" + "sync" + + "github.com/hashicorp/go-multierror" + "github.com/hashicorp/terraform/helper/shadow" +) + +// shadowResourceProvider implements ResourceProvider for the shadow +// eval context defined in eval_context_shadow.go. +// +// This is used to verify behavior with a real provider. This shouldn't +// be used directly. +type shadowResourceProvider interface { + ResourceProvider + Shadow +} + +// newShadowResourceProvider creates a new shadowed ResourceProvider. +// +// This will assume a well behaved real ResourceProvider. For example, +// it assumes that the `Resources` call underneath doesn't change values +// since once it is called on the real provider, it will be cached and +// returned in the shadow since number of calls to that shouldn't affect +// actual behavior. +// +// However, with calls like Apply, call order is taken into account, +// parameters are checked for equality, etc. +func newShadowResourceProvider(p ResourceProvider) (ResourceProvider, shadowResourceProvider) { + // Create the shared data + shared := shadowResourceProviderShared{} + + // Create the real provider that does actual work + real := &shadowResourceProviderReal{ + ResourceProvider: p, + Shared: &shared, + } + + // Create the shadow that watches the real value + shadow := &shadowResourceProviderShadow{ + Shared: &shared, + + resources: p.Resources(), + dataSources: p.DataSources(), + } + + return real, shadow +} + +// shadowResourceProviderReal is the real resource provider. Function calls +// to this will perform real work. This records the parameters and return +// values and call order for the shadow to reproduce. +type shadowResourceProviderReal struct { + ResourceProvider + + Shared *shadowResourceProviderShared +} + +func (p *shadowResourceProviderReal) Close() error { + var result error + if c, ok := p.ResourceProvider.(ResourceProviderCloser); ok { + result = c.Close() + } + + p.Shared.CloseErr.SetValue(result) + return result +} + +func (p *shadowResourceProviderReal) Input( + input UIInput, c *ResourceConfig) (*ResourceConfig, error) { + cCopy := c.DeepCopy() + + result, err := p.ResourceProvider.Input(input, c) + p.Shared.Input.SetValue(&shadowResourceProviderInput{ + Config: cCopy, + Result: result.DeepCopy(), + ResultErr: err, + }) + + return result, err +} + +func (p *shadowResourceProviderReal) Validate(c *ResourceConfig) ([]string, []error) { + warns, errs := p.ResourceProvider.Validate(c) + p.Shared.Validate.SetValue(&shadowResourceProviderValidate{ + Config: c.DeepCopy(), + ResultWarn: warns, + ResultErr: errs, + }) + + return warns, errs +} + +func (p *shadowResourceProviderReal) Configure(c *ResourceConfig) error { + cCopy := c.DeepCopy() + + err := p.ResourceProvider.Configure(c) + p.Shared.Configure.SetValue(&shadowResourceProviderConfigure{ + Config: cCopy, + Result: err, + }) + + return err +} + +func (p *shadowResourceProviderReal) Stop() error { + return p.ResourceProvider.Stop() +} + +func (p *shadowResourceProviderReal) ValidateResource( + t string, c *ResourceConfig) ([]string, []error) { + key := t + configCopy := c.DeepCopy() + + // Real operation + warns, errs := p.ResourceProvider.ValidateResource(t, c) + + // Initialize to ensure we always have a wrapper with a lock + p.Shared.ValidateResource.Init( + key, &shadowResourceProviderValidateResourceWrapper{}) + + // Get the result + raw := p.Shared.ValidateResource.Value(key) + wrapper, ok := raw.(*shadowResourceProviderValidateResourceWrapper) + if !ok { + // If this fails then we just continue with our day... the shadow + // will fail to but there isn't much we can do. + log.Printf( + "[ERROR] unknown value in ValidateResource shadow value: %#v", raw) + return warns, errs + } + + // Lock the wrapper for writing and record our call + wrapper.Lock() + defer wrapper.Unlock() + + wrapper.Calls = append(wrapper.Calls, &shadowResourceProviderValidateResource{ + Config: configCopy, + Warns: warns, + Errors: errs, + }) + + // With it locked, call SetValue again so that it triggers WaitForChange + p.Shared.ValidateResource.SetValue(key, wrapper) + + // Return the result + return warns, errs +} + +func (p *shadowResourceProviderReal) Apply( + info *InstanceInfo, + state *InstanceState, + diff *InstanceDiff) (*InstanceState, error) { + // Thse have to be copied before the call since call can modify + stateCopy := state.DeepCopy() + diffCopy := diff.DeepCopy() + + result, err := p.ResourceProvider.Apply(info, state, diff) + p.Shared.Apply.SetValue(info.uniqueId(), &shadowResourceProviderApply{ + State: stateCopy, + Diff: diffCopy, + Result: result.DeepCopy(), + ResultErr: err, + }) + + return result, err +} + +func (p *shadowResourceProviderReal) Diff( + info *InstanceInfo, + state *InstanceState, + desired *ResourceConfig) (*InstanceDiff, error) { + // Thse have to be copied before the call since call can modify + stateCopy := state.DeepCopy() + desiredCopy := desired.DeepCopy() + + result, err := p.ResourceProvider.Diff(info, state, desired) + p.Shared.Diff.SetValue(info.uniqueId(), &shadowResourceProviderDiff{ + State: stateCopy, + Desired: desiredCopy, + Result: result.DeepCopy(), + ResultErr: err, + }) + + return result, err +} + +func (p *shadowResourceProviderReal) Refresh( + info *InstanceInfo, + state *InstanceState) (*InstanceState, error) { + // Thse have to be copied before the call since call can modify + stateCopy := state.DeepCopy() + + result, err := p.ResourceProvider.Refresh(info, state) + p.Shared.Refresh.SetValue(info.uniqueId(), &shadowResourceProviderRefresh{ + State: stateCopy, + Result: result.DeepCopy(), + ResultErr: err, + }) + + return result, err +} + +func (p *shadowResourceProviderReal) ValidateDataSource( + t string, c *ResourceConfig) ([]string, []error) { + key := t + configCopy := c.DeepCopy() + + // Real operation + warns, errs := p.ResourceProvider.ValidateDataSource(t, c) + + // Initialize + p.Shared.ValidateDataSource.Init( + key, &shadowResourceProviderValidateDataSourceWrapper{}) + + // Get the result + raw := p.Shared.ValidateDataSource.Value(key) + wrapper, ok := raw.(*shadowResourceProviderValidateDataSourceWrapper) + if !ok { + // If this fails then we just continue with our day... the shadow + // will fail to but there isn't much we can do. + log.Printf( + "[ERROR] unknown value in ValidateDataSource shadow value: %#v", raw) + return warns, errs + } + + // Lock the wrapper for writing and record our call + wrapper.Lock() + defer wrapper.Unlock() + + wrapper.Calls = append(wrapper.Calls, &shadowResourceProviderValidateDataSource{ + Config: configCopy, + Warns: warns, + Errors: errs, + }) + + // Set it + p.Shared.ValidateDataSource.SetValue(key, wrapper) + + // Return the result + return warns, errs +} + +func (p *shadowResourceProviderReal) ReadDataDiff( + info *InstanceInfo, + desired *ResourceConfig) (*InstanceDiff, error) { + // These have to be copied before the call since call can modify + desiredCopy := desired.DeepCopy() + + result, err := p.ResourceProvider.ReadDataDiff(info, desired) + p.Shared.ReadDataDiff.SetValue(info.uniqueId(), &shadowResourceProviderReadDataDiff{ + Desired: desiredCopy, + Result: result.DeepCopy(), + ResultErr: err, + }) + + return result, err +} + +func (p *shadowResourceProviderReal) ReadDataApply( + info *InstanceInfo, + diff *InstanceDiff) (*InstanceState, error) { + // Thse have to be copied before the call since call can modify + diffCopy := diff.DeepCopy() + + result, err := p.ResourceProvider.ReadDataApply(info, diff) + p.Shared.ReadDataApply.SetValue(info.uniqueId(), &shadowResourceProviderReadDataApply{ + Diff: diffCopy, + Result: result.DeepCopy(), + ResultErr: err, + }) + + return result, err +} + +// shadowResourceProviderShadow is the shadow resource provider. Function +// calls never affect real resources. This is paired with the "real" side +// which must be called properly to enable recording. +type shadowResourceProviderShadow struct { + Shared *shadowResourceProviderShared + + // Cached values that are expected to not change + resources []ResourceType + dataSources []DataSource + + Error error // Error is the list of errors from the shadow + ErrorLock sync.Mutex +} + +type shadowResourceProviderShared struct { + // NOTE: Anytime a value is added here, be sure to add it to + // the Close() method so that it is closed. + + CloseErr shadow.Value + Input shadow.Value + Validate shadow.Value + Configure shadow.Value + ValidateResource shadow.KeyedValue + Apply shadow.KeyedValue + Diff shadow.KeyedValue + Refresh shadow.KeyedValue + ValidateDataSource shadow.KeyedValue + ReadDataDiff shadow.KeyedValue + ReadDataApply shadow.KeyedValue +} + +func (p *shadowResourceProviderShared) Close() error { + return shadow.Close(p) +} + +func (p *shadowResourceProviderShadow) CloseShadow() error { + err := p.Shared.Close() + if err != nil { + err = fmt.Errorf("close error: %s", err) + } + + return err +} + +func (p *shadowResourceProviderShadow) ShadowError() error { + return p.Error +} + +func (p *shadowResourceProviderShadow) Resources() []ResourceType { + return p.resources +} + +func (p *shadowResourceProviderShadow) DataSources() []DataSource { + return p.dataSources +} + +func (p *shadowResourceProviderShadow) Close() error { + v := p.Shared.CloseErr.Value() + if v == nil { + return nil + } + + return v.(error) +} + +func (p *shadowResourceProviderShadow) Input( + input UIInput, c *ResourceConfig) (*ResourceConfig, error) { + // Get the result of the input call + raw := p.Shared.Input.Value() + if raw == nil { + return nil, nil + } + + result, ok := raw.(*shadowResourceProviderInput) + if !ok { + p.ErrorLock.Lock() + defer p.ErrorLock.Unlock() + p.Error = multierror.Append(p.Error, fmt.Errorf( + "Unknown 'input' shadow value: %#v", raw)) + return nil, nil + } + + // Compare the parameters, which should be identical + if !c.Equal(result.Config) { + p.ErrorLock.Lock() + p.Error = multierror.Append(p.Error, fmt.Errorf( + "Input had unequal configurations (real, then shadow):\n\n%#v\n\n%#v", + result.Config, c)) + p.ErrorLock.Unlock() + } + + // Return the results + return result.Result, result.ResultErr +} + +func (p *shadowResourceProviderShadow) Validate(c *ResourceConfig) ([]string, []error) { + // Get the result of the validate call + raw := p.Shared.Validate.Value() + if raw == nil { + return nil, nil + } + + result, ok := raw.(*shadowResourceProviderValidate) + if !ok { + p.ErrorLock.Lock() + defer p.ErrorLock.Unlock() + p.Error = multierror.Append(p.Error, fmt.Errorf( + "Unknown 'validate' shadow value: %#v", raw)) + return nil, nil + } + + // Compare the parameters, which should be identical + if !c.Equal(result.Config) { + p.ErrorLock.Lock() + p.Error = multierror.Append(p.Error, fmt.Errorf( + "Validate had unequal configurations (real, then shadow):\n\n%#v\n\n%#v", + result.Config, c)) + p.ErrorLock.Unlock() + } + + // Return the results + return result.ResultWarn, result.ResultErr +} + +func (p *shadowResourceProviderShadow) Configure(c *ResourceConfig) error { + // Get the result of the call + raw := p.Shared.Configure.Value() + if raw == nil { + return nil + } + + result, ok := raw.(*shadowResourceProviderConfigure) + if !ok { + p.ErrorLock.Lock() + defer p.ErrorLock.Unlock() + p.Error = multierror.Append(p.Error, fmt.Errorf( + "Unknown 'configure' shadow value: %#v", raw)) + return nil + } + + // Compare the parameters, which should be identical + if !c.Equal(result.Config) { + p.ErrorLock.Lock() + p.Error = multierror.Append(p.Error, fmt.Errorf( + "Configure had unequal configurations (real, then shadow):\n\n%#v\n\n%#v", + result.Config, c)) + p.ErrorLock.Unlock() + } + + // Return the results + return result.Result +} + +// Stop returns immediately. +func (p *shadowResourceProviderShadow) Stop() error { + return nil +} + +func (p *shadowResourceProviderShadow) ValidateResource(t string, c *ResourceConfig) ([]string, []error) { + // Unique key + key := t + + // Get the initial value + raw := p.Shared.ValidateResource.Value(key) + + // Find a validation with our configuration + var result *shadowResourceProviderValidateResource + for { + // Get the value + if raw == nil { + p.ErrorLock.Lock() + defer p.ErrorLock.Unlock() + p.Error = multierror.Append(p.Error, fmt.Errorf( + "Unknown 'ValidateResource' call for %q:\n\n%#v", + key, c)) + return nil, nil + } + + wrapper, ok := raw.(*shadowResourceProviderValidateResourceWrapper) + if !ok { + p.ErrorLock.Lock() + defer p.ErrorLock.Unlock() + p.Error = multierror.Append(p.Error, fmt.Errorf( + "Unknown 'ValidateResource' shadow value for %q: %#v", key, raw)) + return nil, nil + } + + // Look for the matching call with our configuration + wrapper.RLock() + for _, call := range wrapper.Calls { + if call.Config.Equal(c) { + result = call + break + } + } + wrapper.RUnlock() + + // If we found a result, exit + if result != nil { + break + } + + // Wait for a change so we can get the wrapper again + raw = p.Shared.ValidateResource.WaitForChange(key) + } + + return result.Warns, result.Errors +} + +func (p *shadowResourceProviderShadow) Apply( + info *InstanceInfo, + state *InstanceState, + diff *InstanceDiff) (*InstanceState, error) { + // Unique key + key := info.uniqueId() + raw := p.Shared.Apply.Value(key) + if raw == nil { + p.ErrorLock.Lock() + defer p.ErrorLock.Unlock() + p.Error = multierror.Append(p.Error, fmt.Errorf( + "Unknown 'apply' call for %q:\n\n%#v\n\n%#v", + key, state, diff)) + return nil, nil + } + + result, ok := raw.(*shadowResourceProviderApply) + if !ok { + p.ErrorLock.Lock() + defer p.ErrorLock.Unlock() + p.Error = multierror.Append(p.Error, fmt.Errorf( + "Unknown 'apply' shadow value for %q: %#v", key, raw)) + return nil, nil + } + + // Compare the parameters, which should be identical + if !state.Equal(result.State) { + p.ErrorLock.Lock() + p.Error = multierror.Append(p.Error, fmt.Errorf( + "Apply %q: state had unequal states (real, then shadow):\n\n%#v\n\n%#v", + key, result.State, state)) + p.ErrorLock.Unlock() + } + + if !diff.Equal(result.Diff) { + p.ErrorLock.Lock() + p.Error = multierror.Append(p.Error, fmt.Errorf( + "Apply %q: unequal diffs (real, then shadow):\n\n%#v\n\n%#v", + key, result.Diff, diff)) + p.ErrorLock.Unlock() + } + + return result.Result, result.ResultErr +} + +func (p *shadowResourceProviderShadow) Diff( + info *InstanceInfo, + state *InstanceState, + desired *ResourceConfig) (*InstanceDiff, error) { + // Unique key + key := info.uniqueId() + raw := p.Shared.Diff.Value(key) + if raw == nil { + p.ErrorLock.Lock() + defer p.ErrorLock.Unlock() + p.Error = multierror.Append(p.Error, fmt.Errorf( + "Unknown 'diff' call for %q:\n\n%#v\n\n%#v", + key, state, desired)) + return nil, nil + } + + result, ok := raw.(*shadowResourceProviderDiff) + if !ok { + p.ErrorLock.Lock() + defer p.ErrorLock.Unlock() + p.Error = multierror.Append(p.Error, fmt.Errorf( + "Unknown 'diff' shadow value for %q: %#v", key, raw)) + return nil, nil + } + + // Compare the parameters, which should be identical + if !state.Equal(result.State) { + p.ErrorLock.Lock() + p.Error = multierror.Append(p.Error, fmt.Errorf( + "Diff %q had unequal states (real, then shadow):\n\n%#v\n\n%#v", + key, result.State, state)) + p.ErrorLock.Unlock() + } + if !desired.Equal(result.Desired) { + p.ErrorLock.Lock() + p.Error = multierror.Append(p.Error, fmt.Errorf( + "Diff %q had unequal states (real, then shadow):\n\n%#v\n\n%#v", + key, result.Desired, desired)) + p.ErrorLock.Unlock() + } + + return result.Result, result.ResultErr +} + +func (p *shadowResourceProviderShadow) Refresh( + info *InstanceInfo, + state *InstanceState) (*InstanceState, error) { + // Unique key + key := info.uniqueId() + raw := p.Shared.Refresh.Value(key) + if raw == nil { + p.ErrorLock.Lock() + defer p.ErrorLock.Unlock() + p.Error = multierror.Append(p.Error, fmt.Errorf( + "Unknown 'refresh' call for %q:\n\n%#v", + key, state)) + return nil, nil + } + + result, ok := raw.(*shadowResourceProviderRefresh) + if !ok { + p.ErrorLock.Lock() + defer p.ErrorLock.Unlock() + p.Error = multierror.Append(p.Error, fmt.Errorf( + "Unknown 'refresh' shadow value: %#v", raw)) + return nil, nil + } + + // Compare the parameters, which should be identical + if !state.Equal(result.State) { + p.ErrorLock.Lock() + p.Error = multierror.Append(p.Error, fmt.Errorf( + "Refresh %q had unequal states (real, then shadow):\n\n%#v\n\n%#v", + key, result.State, state)) + p.ErrorLock.Unlock() + } + + return result.Result, result.ResultErr +} + +func (p *shadowResourceProviderShadow) ValidateDataSource( + t string, c *ResourceConfig) ([]string, []error) { + // Unique key + key := t + + // Get the initial value + raw := p.Shared.ValidateDataSource.Value(key) + + // Find a validation with our configuration + var result *shadowResourceProviderValidateDataSource + for { + // Get the value + if raw == nil { + p.ErrorLock.Lock() + defer p.ErrorLock.Unlock() + p.Error = multierror.Append(p.Error, fmt.Errorf( + "Unknown 'ValidateDataSource' call for %q:\n\n%#v", + key, c)) + return nil, nil + } + + wrapper, ok := raw.(*shadowResourceProviderValidateDataSourceWrapper) + if !ok { + p.ErrorLock.Lock() + defer p.ErrorLock.Unlock() + p.Error = multierror.Append(p.Error, fmt.Errorf( + "Unknown 'ValidateDataSource' shadow value: %#v", raw)) + return nil, nil + } + + // Look for the matching call with our configuration + wrapper.RLock() + for _, call := range wrapper.Calls { + if call.Config.Equal(c) { + result = call + break + } + } + wrapper.RUnlock() + + // If we found a result, exit + if result != nil { + break + } + + // Wait for a change so we can get the wrapper again + raw = p.Shared.ValidateDataSource.WaitForChange(key) + } + + return result.Warns, result.Errors +} + +func (p *shadowResourceProviderShadow) ReadDataDiff( + info *InstanceInfo, + desired *ResourceConfig) (*InstanceDiff, error) { + // Unique key + key := info.uniqueId() + raw := p.Shared.ReadDataDiff.Value(key) + if raw == nil { + p.ErrorLock.Lock() + defer p.ErrorLock.Unlock() + p.Error = multierror.Append(p.Error, fmt.Errorf( + "Unknown 'ReadDataDiff' call for %q:\n\n%#v", + key, desired)) + return nil, nil + } + + result, ok := raw.(*shadowResourceProviderReadDataDiff) + if !ok { + p.ErrorLock.Lock() + defer p.ErrorLock.Unlock() + p.Error = multierror.Append(p.Error, fmt.Errorf( + "Unknown 'ReadDataDiff' shadow value for %q: %#v", key, raw)) + return nil, nil + } + + // Compare the parameters, which should be identical + if !desired.Equal(result.Desired) { + p.ErrorLock.Lock() + p.Error = multierror.Append(p.Error, fmt.Errorf( + "ReadDataDiff %q had unequal configs (real, then shadow):\n\n%#v\n\n%#v", + key, result.Desired, desired)) + p.ErrorLock.Unlock() + } + + return result.Result, result.ResultErr +} + +func (p *shadowResourceProviderShadow) ReadDataApply( + info *InstanceInfo, + d *InstanceDiff) (*InstanceState, error) { + // Unique key + key := info.uniqueId() + raw := p.Shared.ReadDataApply.Value(key) + if raw == nil { + p.ErrorLock.Lock() + defer p.ErrorLock.Unlock() + p.Error = multierror.Append(p.Error, fmt.Errorf( + "Unknown 'ReadDataApply' call for %q:\n\n%#v", + key, d)) + return nil, nil + } + + result, ok := raw.(*shadowResourceProviderReadDataApply) + if !ok { + p.ErrorLock.Lock() + defer p.ErrorLock.Unlock() + p.Error = multierror.Append(p.Error, fmt.Errorf( + "Unknown 'ReadDataApply' shadow value for %q: %#v", key, raw)) + return nil, nil + } + + // Compare the parameters, which should be identical + if !d.Equal(result.Diff) { + p.ErrorLock.Lock() + p.Error = multierror.Append(p.Error, fmt.Errorf( + "ReadDataApply: unequal diffs (real, then shadow):\n\n%#v\n\n%#v", + result.Diff, d)) + p.ErrorLock.Unlock() + } + + return result.Result, result.ResultErr +} + +func (p *shadowResourceProviderShadow) ImportState(info *InstanceInfo, id string) ([]*InstanceState, error) { + panic("import not supported by shadow graph") +} + +// The structs for the various function calls are put below. These structs +// are used to carry call information across the real/shadow boundaries. + +type shadowResourceProviderInput struct { + Config *ResourceConfig + Result *ResourceConfig + ResultErr error +} + +type shadowResourceProviderValidate struct { + Config *ResourceConfig + ResultWarn []string + ResultErr []error +} + +type shadowResourceProviderConfigure struct { + Config *ResourceConfig + Result error +} + +type shadowResourceProviderValidateResourceWrapper struct { + sync.RWMutex + + Calls []*shadowResourceProviderValidateResource +} + +type shadowResourceProviderValidateResource struct { + Config *ResourceConfig + Warns []string + Errors []error +} + +type shadowResourceProviderApply struct { + State *InstanceState + Diff *InstanceDiff + Result *InstanceState + ResultErr error +} + +type shadowResourceProviderDiff struct { + State *InstanceState + Desired *ResourceConfig + Result *InstanceDiff + ResultErr error +} + +type shadowResourceProviderRefresh struct { + State *InstanceState + Result *InstanceState + ResultErr error +} + +type shadowResourceProviderValidateDataSourceWrapper struct { + sync.RWMutex + + Calls []*shadowResourceProviderValidateDataSource +} + +type shadowResourceProviderValidateDataSource struct { + Config *ResourceConfig + Warns []string + Errors []error +} + +type shadowResourceProviderReadDataDiff struct { + Desired *ResourceConfig + Result *InstanceDiff + ResultErr error +} + +type shadowResourceProviderReadDataApply struct { + Diff *InstanceDiff + Result *InstanceState + ResultErr error +} diff --git a/vendor/github.com/hashicorp/terraform/terraform/shadow_resource_provisioner.go b/vendor/github.com/hashicorp/terraform/terraform/shadow_resource_provisioner.go new file mode 100644 index 0000000000..60a4908896 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/terraform/shadow_resource_provisioner.go @@ -0,0 +1,282 @@ +package terraform + +import ( + "fmt" + "io" + "log" + "sync" + + "github.com/hashicorp/go-multierror" + "github.com/hashicorp/terraform/helper/shadow" +) + +// shadowResourceProvisioner implements ResourceProvisioner for the shadow +// eval context defined in eval_context_shadow.go. +// +// This is used to verify behavior with a real provisioner. This shouldn't +// be used directly. +type shadowResourceProvisioner interface { + ResourceProvisioner + Shadow +} + +// newShadowResourceProvisioner creates a new shadowed ResourceProvisioner. +func newShadowResourceProvisioner( + p ResourceProvisioner) (ResourceProvisioner, shadowResourceProvisioner) { + // Create the shared data + shared := shadowResourceProvisionerShared{ + Validate: shadow.ComparedValue{ + Func: shadowResourceProvisionerValidateCompare, + }, + } + + // Create the real provisioner that does actual work + real := &shadowResourceProvisionerReal{ + ResourceProvisioner: p, + Shared: &shared, + } + + // Create the shadow that watches the real value + shadow := &shadowResourceProvisionerShadow{ + Shared: &shared, + } + + return real, shadow +} + +// shadowResourceProvisionerReal is the real resource provisioner. Function calls +// to this will perform real work. This records the parameters and return +// values and call order for the shadow to reproduce. +type shadowResourceProvisionerReal struct { + ResourceProvisioner + + Shared *shadowResourceProvisionerShared +} + +func (p *shadowResourceProvisionerReal) Close() error { + var result error + if c, ok := p.ResourceProvisioner.(ResourceProvisionerCloser); ok { + result = c.Close() + } + + p.Shared.CloseErr.SetValue(result) + return result +} + +func (p *shadowResourceProvisionerReal) Validate(c *ResourceConfig) ([]string, []error) { + warns, errs := p.ResourceProvisioner.Validate(c) + p.Shared.Validate.SetValue(&shadowResourceProvisionerValidate{ + Config: c, + ResultWarn: warns, + ResultErr: errs, + }) + + return warns, errs +} + +func (p *shadowResourceProvisionerReal) Apply( + output UIOutput, s *InstanceState, c *ResourceConfig) error { + err := p.ResourceProvisioner.Apply(output, s, c) + + // Write the result, grab a lock for writing. This should nver + // block long since the operations below don't block. + p.Shared.ApplyLock.Lock() + defer p.Shared.ApplyLock.Unlock() + + key := s.ID + raw, ok := p.Shared.Apply.ValueOk(key) + if !ok { + // Setup a new value + raw = &shadow.ComparedValue{ + Func: shadowResourceProvisionerApplyCompare, + } + + // Set it + p.Shared.Apply.SetValue(key, raw) + } + + compareVal, ok := raw.(*shadow.ComparedValue) + if !ok { + // Just log and return so that we don't cause the real side + // any side effects. + log.Printf("[ERROR] unknown value in 'apply': %#v", raw) + return err + } + + // Write the resulting value + compareVal.SetValue(&shadowResourceProvisionerApply{ + Config: c, + ResultErr: err, + }) + + return err +} + +func (p *shadowResourceProvisionerReal) Stop() error { + return p.ResourceProvisioner.Stop() +} + +// shadowResourceProvisionerShadow is the shadow resource provisioner. Function +// calls never affect real resources. This is paired with the "real" side +// which must be called properly to enable recording. +type shadowResourceProvisionerShadow struct { + Shared *shadowResourceProvisionerShared + + Error error // Error is the list of errors from the shadow + ErrorLock sync.Mutex +} + +type shadowResourceProvisionerShared struct { + // NOTE: Anytime a value is added here, be sure to add it to + // the Close() method so that it is closed. + + CloseErr shadow.Value + Validate shadow.ComparedValue + Apply shadow.KeyedValue + ApplyLock sync.Mutex // For writing only +} + +func (p *shadowResourceProvisionerShared) Close() error { + closers := []io.Closer{ + &p.CloseErr, + } + + for _, c := range closers { + // This should never happen, but we don't panic because a panic + // could affect the real behavior of Terraform and a shadow should + // never be able to do that. + if err := c.Close(); err != nil { + return err + } + } + + return nil +} + +func (p *shadowResourceProvisionerShadow) CloseShadow() error { + err := p.Shared.Close() + if err != nil { + err = fmt.Errorf("close error: %s", err) + } + + return err +} + +func (p *shadowResourceProvisionerShadow) ShadowError() error { + return p.Error +} + +func (p *shadowResourceProvisionerShadow) Close() error { + v := p.Shared.CloseErr.Value() + if v == nil { + return nil + } + + return v.(error) +} + +func (p *shadowResourceProvisionerShadow) Validate(c *ResourceConfig) ([]string, []error) { + // Get the result of the validate call + raw := p.Shared.Validate.Value(c) + if raw == nil { + return nil, nil + } + + result, ok := raw.(*shadowResourceProvisionerValidate) + if !ok { + p.ErrorLock.Lock() + defer p.ErrorLock.Unlock() + p.Error = multierror.Append(p.Error, fmt.Errorf( + "Unknown 'validate' shadow value: %#v", raw)) + return nil, nil + } + + // We don't need to compare configurations because we key on the + // configuration so just return right away. + return result.ResultWarn, result.ResultErr +} + +func (p *shadowResourceProvisionerShadow) Apply( + output UIOutput, s *InstanceState, c *ResourceConfig) error { + // Get the value based on the key + key := s.ID + raw := p.Shared.Apply.Value(key) + if raw == nil { + return nil + } + + compareVal, ok := raw.(*shadow.ComparedValue) + if !ok { + p.ErrorLock.Lock() + defer p.ErrorLock.Unlock() + p.Error = multierror.Append(p.Error, fmt.Errorf( + "Unknown 'apply' shadow value: %#v", raw)) + return nil + } + + // With the compared value, we compare against our config + raw = compareVal.Value(c) + if raw == nil { + return nil + } + + result, ok := raw.(*shadowResourceProvisionerApply) + if !ok { + p.ErrorLock.Lock() + defer p.ErrorLock.Unlock() + p.Error = multierror.Append(p.Error, fmt.Errorf( + "Unknown 'apply' shadow value: %#v", raw)) + return nil + } + + return result.ResultErr +} + +func (p *shadowResourceProvisionerShadow) Stop() error { + // For the shadow, we always just return nil since a Stop indicates + // that we were interrupted and shadows are disabled during interrupts + // anyways. + return nil +} + +// The structs for the various function calls are put below. These structs +// are used to carry call information across the real/shadow boundaries. + +type shadowResourceProvisionerValidate struct { + Config *ResourceConfig + ResultWarn []string + ResultErr []error +} + +type shadowResourceProvisionerApply struct { + Config *ResourceConfig + ResultErr error +} + +func shadowResourceProvisionerValidateCompare(k, v interface{}) bool { + c, ok := k.(*ResourceConfig) + if !ok { + return false + } + + result, ok := v.(*shadowResourceProvisionerValidate) + if !ok { + return false + } + + return c.Equal(result.Config) +} + +func shadowResourceProvisionerApplyCompare(k, v interface{}) bool { + c, ok := k.(*ResourceConfig) + if !ok { + return false + } + + result, ok := v.(*shadowResourceProvisionerApply) + if !ok { + return false + } + + return c.Equal(result.Config) +} diff --git a/vendor/github.com/hashicorp/terraform/terraform/state.go b/vendor/github.com/hashicorp/terraform/terraform/state.go new file mode 100644 index 0000000000..074b682454 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/terraform/state.go @@ -0,0 +1,2118 @@ +package terraform + +import ( + "bufio" + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "io/ioutil" + "log" + "reflect" + "sort" + "strconv" + "strings" + "sync" + + "github.com/hashicorp/go-multierror" + "github.com/hashicorp/go-version" + "github.com/hashicorp/terraform/config" + "github.com/mitchellh/copystructure" + "github.com/satori/go.uuid" +) + +const ( + // StateVersion is the current version for our state file + StateVersion = 3 +) + +// rootModulePath is the path of the root module +var rootModulePath = []string{"root"} + +// normalizeModulePath takes a raw module path and returns a path that +// has the rootModulePath prepended to it. If I could go back in time I +// would've never had a rootModulePath (empty path would be root). We can +// still fix this but thats a big refactor that my branch doesn't make sense +// for. Instead, this function normalizes paths. +func normalizeModulePath(p []string) []string { + k := len(rootModulePath) + + // If we already have a root module prefix, we're done + if len(p) >= len(rootModulePath) { + if reflect.DeepEqual(p[:k], rootModulePath) { + return p + } + } + + // None? Prefix it + result := make([]string, len(rootModulePath)+len(p)) + copy(result, rootModulePath) + copy(result[k:], p) + return result +} + +// State keeps track of a snapshot state-of-the-world that Terraform +// can use to keep track of what real world resources it is actually +// managing. +type State struct { + // Version is the state file protocol version. + Version int `json:"version"` + + // TFVersion is the version of Terraform that wrote this state. + TFVersion string `json:"terraform_version,omitempty"` + + // Serial is incremented on any operation that modifies + // the State file. It is used to detect potentially conflicting + // updates. + Serial int64 `json:"serial"` + + // Lineage is set when a new, blank state is created and then + // never updated. This allows us to determine whether the serials + // of two states can be meaningfully compared. + // Apart from the guarantee that collisions between two lineages + // are very unlikely, this value is opaque and external callers + // should only compare lineage strings byte-for-byte for equality. + Lineage string `json:"lineage"` + + // Remote is used to track the metadata required to + // pull and push state files from a remote storage endpoint. + Remote *RemoteState `json:"remote,omitempty"` + + // Backend tracks the configuration for the backend in use with + // this state. This is used to track any changes in the backend + // configuration. + Backend *BackendState `json:"backend,omitempty"` + + // Modules contains all the modules in a breadth-first order + Modules []*ModuleState `json:"modules"` + + mu sync.Mutex +} + +func (s *State) Lock() { s.mu.Lock() } +func (s *State) Unlock() { s.mu.Unlock() } + +// NewState is used to initialize a blank state +func NewState() *State { + s := &State{} + s.init() + return s +} + +// Children returns the ModuleStates that are direct children of +// the given path. If the path is "root", for example, then children +// returned might be "root.child", but not "root.child.grandchild". +func (s *State) Children(path []string) []*ModuleState { + s.Lock() + defer s.Unlock() + // TODO: test + + return s.children(path) +} + +func (s *State) children(path []string) []*ModuleState { + result := make([]*ModuleState, 0) + for _, m := range s.Modules { + if m == nil { + continue + } + + if len(m.Path) != len(path)+1 { + continue + } + if !reflect.DeepEqual(path, m.Path[:len(path)]) { + continue + } + + result = append(result, m) + } + + return result +} + +// AddModule adds the module with the given path to the state. +// +// This should be the preferred method to add module states since it +// allows us to optimize lookups later as well as control sorting. +func (s *State) AddModule(path []string) *ModuleState { + s.Lock() + defer s.Unlock() + + return s.addModule(path) +} + +func (s *State) addModule(path []string) *ModuleState { + // check if the module exists first + m := s.moduleByPath(path) + if m != nil { + return m + } + + m = &ModuleState{Path: path} + m.init() + s.Modules = append(s.Modules, m) + s.sort() + return m +} + +// ModuleByPath is used to lookup the module state for the given path. +// This should be the preferred lookup mechanism as it allows for future +// lookup optimizations. +func (s *State) ModuleByPath(path []string) *ModuleState { + if s == nil { + return nil + } + s.Lock() + defer s.Unlock() + + return s.moduleByPath(path) +} + +func (s *State) moduleByPath(path []string) *ModuleState { + for _, mod := range s.Modules { + if mod == nil { + continue + } + if mod.Path == nil { + panic("missing module path") + } + if reflect.DeepEqual(mod.Path, path) { + return mod + } + } + return nil +} + +// ModuleOrphans returns all the module orphans in this state by +// returning their full paths. These paths can be used with ModuleByPath +// to return the actual state. +func (s *State) ModuleOrphans(path []string, c *config.Config) [][]string { + s.Lock() + defer s.Unlock() + + return s.moduleOrphans(path, c) + +} + +func (s *State) moduleOrphans(path []string, c *config.Config) [][]string { + // direct keeps track of what direct children we have both in our config + // and in our state. childrenKeys keeps track of what isn't an orphan. + direct := make(map[string]struct{}) + childrenKeys := make(map[string]struct{}) + if c != nil { + for _, m := range c.Modules { + childrenKeys[m.Name] = struct{}{} + direct[m.Name] = struct{}{} + } + } + + // Go over the direct children and find any that aren't in our keys. + var orphans [][]string + for _, m := range s.children(path) { + key := m.Path[len(m.Path)-1] + + // Record that we found this key as a direct child. We use this + // later to find orphan nested modules. + direct[key] = struct{}{} + + // If we have a direct child still in our config, it is not an orphan + if _, ok := childrenKeys[key]; ok { + continue + } + + orphans = append(orphans, m.Path) + } + + // Find the orphans that are nested... + for _, m := range s.Modules { + if m == nil { + continue + } + + // We only want modules that are at least grandchildren + if len(m.Path) < len(path)+2 { + continue + } + + // If it isn't part of our tree, continue + if !reflect.DeepEqual(path, m.Path[:len(path)]) { + continue + } + + // If we have the direct child, then just skip it. + key := m.Path[len(path)] + if _, ok := direct[key]; ok { + continue + } + + orphanPath := m.Path[:len(path)+1] + + // Don't double-add if we've already added this orphan (which can happen if + // there are multiple nested sub-modules that get orphaned together). + alreadyAdded := false + for _, o := range orphans { + if reflect.DeepEqual(o, orphanPath) { + alreadyAdded = true + break + } + } + if alreadyAdded { + continue + } + + // Add this orphan + orphans = append(orphans, orphanPath) + } + + return orphans +} + +// Empty returns true if the state is empty. +func (s *State) Empty() bool { + if s == nil { + return true + } + s.Lock() + defer s.Unlock() + + return len(s.Modules) == 0 +} + +// HasResources returns true if the state contains any resources. +// +// This is similar to !s.Empty, but returns true also in the case where the +// state has modules but all of them are devoid of resources. +func (s *State) HasResources() bool { + if s.Empty() { + return false + } + + for _, mod := range s.Modules { + if len(mod.Resources) > 0 { + return true + } + } + + return false +} + +// IsRemote returns true if State represents a state that exists and is +// remote. +func (s *State) IsRemote() bool { + if s == nil { + return false + } + s.Lock() + defer s.Unlock() + + if s.Remote == nil { + return false + } + if s.Remote.Type == "" { + return false + } + + return true +} + +// Validate validates the integrity of this state file. +// +// Certain properties of the statefile are expected by Terraform in order +// to behave properly. The core of Terraform will assume that once it +// receives a State structure that it has been validated. This validation +// check should be called to ensure that. +// +// If this returns an error, then the user should be notified. The error +// response will include detailed information on the nature of the error. +func (s *State) Validate() error { + s.Lock() + defer s.Unlock() + + var result error + + // !!!! FOR DEVELOPERS !!!! + // + // Any errors returned from this Validate function will BLOCK TERRAFORM + // from loading a state file. Therefore, this should only contain checks + // that are only resolvable through manual intervention. + // + // !!!! FOR DEVELOPERS !!!! + + // Make sure there are no duplicate module states. We open a new + // block here so we can use basic variable names and future validations + // can do the same. + { + found := make(map[string]struct{}) + for _, ms := range s.Modules { + if ms == nil { + continue + } + + key := strings.Join(ms.Path, ".") + if _, ok := found[key]; ok { + result = multierror.Append(result, fmt.Errorf( + strings.TrimSpace(stateValidateErrMultiModule), key)) + continue + } + + found[key] = struct{}{} + } + } + + return result +} + +// Remove removes the item in the state at the given address, returning +// any errors that may have occurred. +// +// If the address references a module state or resource, it will delete +// all children as well. To check what will be deleted, use a StateFilter +// first. +func (s *State) Remove(addr ...string) error { + s.Lock() + defer s.Unlock() + + // Filter out what we need to delete + filter := &StateFilter{State: s} + results, err := filter.Filter(addr...) + if err != nil { + return err + } + + // If we have no results, just exit early, we're not going to do anything. + // While what happens below is fairly fast, this is an important early + // exit since the prune below might modify the state more and we don't + // want to modify the state if we don't have to. + if len(results) == 0 { + return nil + } + + // Go through each result and grab what we need + removed := make(map[interface{}]struct{}) + for _, r := range results { + // Convert the path to our own type + path := append([]string{"root"}, r.Path...) + + // If we removed this already, then ignore + if _, ok := removed[r.Value]; ok { + continue + } + + // If we removed the parent already, then ignore + if r.Parent != nil { + if _, ok := removed[r.Parent.Value]; ok { + continue + } + } + + // Add this to the removed list + removed[r.Value] = struct{}{} + + switch v := r.Value.(type) { + case *ModuleState: + s.removeModule(path, v) + case *ResourceState: + s.removeResource(path, v) + case *InstanceState: + s.removeInstance(path, r.Parent.Value.(*ResourceState), v) + default: + return fmt.Errorf("unknown type to delete: %T", r.Value) + } + } + + // Prune since the removal functions often do the bare minimum to + // remove a thing and may leave around dangling empty modules, resources, + // etc. Prune will clean that all up. + s.prune() + + return nil +} + +func (s *State) removeModule(path []string, v *ModuleState) { + for i, m := range s.Modules { + if m == v { + s.Modules, s.Modules[len(s.Modules)-1] = append(s.Modules[:i], s.Modules[i+1:]...), nil + return + } + } +} + +func (s *State) removeResource(path []string, v *ResourceState) { + // Get the module this resource lives in. If it doesn't exist, we're done. + mod := s.moduleByPath(path) + if mod == nil { + return + } + + // Find this resource. This is a O(N) lookup when if we had the key + // it could be O(1) but even with thousands of resources this shouldn't + // matter right now. We can easily up performance here when the time comes. + for k, r := range mod.Resources { + if r == v { + // Found it + delete(mod.Resources, k) + return + } + } +} + +func (s *State) removeInstance(path []string, r *ResourceState, v *InstanceState) { + // Go through the resource and find the instance that matches this + // (if any) and remove it. + + // Check primary + if r.Primary == v { + r.Primary = nil + return + } + + // Check lists + lists := [][]*InstanceState{r.Deposed} + for _, is := range lists { + for i, instance := range is { + if instance == v { + // Found it, remove it + is, is[len(is)-1] = append(is[:i], is[i+1:]...), nil + + // Done + return + } + } + } +} + +// RootModule returns the ModuleState for the root module +func (s *State) RootModule() *ModuleState { + root := s.ModuleByPath(rootModulePath) + if root == nil { + panic("missing root module") + } + return root +} + +// Equal tests if one state is equal to another. +func (s *State) Equal(other *State) bool { + // If one is nil, we do a direct check + if s == nil || other == nil { + return s == other + } + + s.Lock() + defer s.Unlock() + return s.equal(other) +} + +func (s *State) equal(other *State) bool { + if s == nil || other == nil { + return s == other + } + + // If the versions are different, they're certainly not equal + if s.Version != other.Version { + return false + } + + // If any of the modules are not equal, then this state isn't equal + if len(s.Modules) != len(other.Modules) { + return false + } + for _, m := range s.Modules { + // This isn't very optimal currently but works. + otherM := other.moduleByPath(m.Path) + if otherM == nil { + return false + } + + // If they're not equal, then we're not equal! + if !m.Equal(otherM) { + return false + } + } + + return true +} + +type StateAgeComparison int + +const ( + StateAgeEqual StateAgeComparison = 0 + StateAgeReceiverNewer StateAgeComparison = 1 + StateAgeReceiverOlder StateAgeComparison = -1 +) + +// CompareAges compares one state with another for which is "older". +// +// This is a simple check using the state's serial, and is thus only as +// reliable as the serial itself. In the normal case, only one state +// exists for a given combination of lineage/serial, but Terraform +// does not guarantee this and so the result of this method should be +// used with care. +// +// Returns an integer that is negative if the receiver is older than +// the argument, positive if the converse, and zero if they are equal. +// An error is returned if the two states are not of the same lineage, +// in which case the integer returned has no meaning. +func (s *State) CompareAges(other *State) (StateAgeComparison, error) { + // nil states are "older" than actual states + switch { + case s != nil && other == nil: + return StateAgeReceiverNewer, nil + case s == nil && other != nil: + return StateAgeReceiverOlder, nil + case s == nil && other == nil: + return StateAgeEqual, nil + } + + if !s.SameLineage(other) { + return StateAgeEqual, fmt.Errorf( + "can't compare two states of differing lineage", + ) + } + + s.Lock() + defer s.Unlock() + + switch { + case s.Serial < other.Serial: + return StateAgeReceiverOlder, nil + case s.Serial > other.Serial: + return StateAgeReceiverNewer, nil + default: + return StateAgeEqual, nil + } +} + +// SameLineage returns true only if the state given in argument belongs +// to the same "lineage" of states as the receiver. +func (s *State) SameLineage(other *State) bool { + s.Lock() + defer s.Unlock() + + // If one of the states has no lineage then it is assumed to predate + // this concept, and so we'll accept it as belonging to any lineage + // so that a lineage string can be assigned to newer versions + // without breaking compatibility with older versions. + if s.Lineage == "" || other.Lineage == "" { + return true + } + + return s.Lineage == other.Lineage +} + +// DeepCopy performs a deep copy of the state structure and returns +// a new structure. +func (s *State) DeepCopy() *State { + copy, err := copystructure.Config{Lock: true}.Copy(s) + if err != nil { + panic(err) + } + + return copy.(*State) +} + +// IncrementSerialMaybe increments the serial number of this state +// if it different from the other state. +func (s *State) IncrementSerialMaybe(other *State) { + if s == nil { + return + } + if other == nil { + return + } + s.Lock() + defer s.Unlock() + + if s.Serial > other.Serial { + return + } + if other.TFVersion != s.TFVersion || !s.equal(other) { + if other.Serial > s.Serial { + s.Serial = other.Serial + } + + s.Serial++ + } +} + +// FromFutureTerraform checks if this state was written by a Terraform +// version from the future. +func (s *State) FromFutureTerraform() bool { + s.Lock() + defer s.Unlock() + + // No TF version means it is certainly from the past + if s.TFVersion == "" { + return false + } + + v := version.Must(version.NewVersion(s.TFVersion)) + return SemVersion.LessThan(v) +} + +func (s *State) Init() { + s.Lock() + defer s.Unlock() + s.init() +} + +func (s *State) init() { + if s.Version == 0 { + s.Version = StateVersion + } + if s.moduleByPath(rootModulePath) == nil { + s.addModule(rootModulePath) + } + s.ensureHasLineage() + + for _, mod := range s.Modules { + if mod != nil { + mod.init() + } + } + + if s.Remote != nil { + s.Remote.init() + } + +} + +func (s *State) EnsureHasLineage() { + s.Lock() + defer s.Unlock() + + s.ensureHasLineage() +} + +func (s *State) ensureHasLineage() { + if s.Lineage == "" { + s.Lineage = uuid.NewV4().String() + log.Printf("[DEBUG] New state was assigned lineage %q\n", s.Lineage) + } else { + log.Printf("[TRACE] Preserving existing state lineage %q\n", s.Lineage) + } +} + +// AddModuleState insert this module state and override any existing ModuleState +func (s *State) AddModuleState(mod *ModuleState) { + mod.init() + s.Lock() + defer s.Unlock() + + s.addModuleState(mod) +} + +func (s *State) addModuleState(mod *ModuleState) { + for i, m := range s.Modules { + if reflect.DeepEqual(m.Path, mod.Path) { + s.Modules[i] = mod + return + } + } + + s.Modules = append(s.Modules, mod) + s.sort() +} + +// prune is used to remove any resources that are no longer required +func (s *State) prune() { + if s == nil { + return + } + + // Filter out empty modules. + // A module is always assumed to have a path, and it's length isn't always + // bounds checked later on. Modules may be "emptied" during destroy, but we + // never want to store those in the state. + for i := 0; i < len(s.Modules); i++ { + if s.Modules[i] == nil || len(s.Modules[i].Path) == 0 { + s.Modules = append(s.Modules[:i], s.Modules[i+1:]...) + i-- + } + } + + for _, mod := range s.Modules { + mod.prune() + } + if s.Remote != nil && s.Remote.Empty() { + s.Remote = nil + } +} + +// sort sorts the modules +func (s *State) sort() { + sort.Sort(moduleStateSort(s.Modules)) + + // Allow modules to be sorted + for _, m := range s.Modules { + if m != nil { + m.sort() + } + } +} + +func (s *State) String() string { + if s == nil { + return "" + } + s.Lock() + defer s.Unlock() + + var buf bytes.Buffer + for _, m := range s.Modules { + mStr := m.String() + + // If we're the root module, we just write the output directly. + if reflect.DeepEqual(m.Path, rootModulePath) { + buf.WriteString(mStr + "\n") + continue + } + + buf.WriteString(fmt.Sprintf("module.%s:\n", strings.Join(m.Path[1:], "."))) + + s := bufio.NewScanner(strings.NewReader(mStr)) + for s.Scan() { + text := s.Text() + if text != "" { + text = " " + text + } + + buf.WriteString(fmt.Sprintf("%s\n", text)) + } + } + + return strings.TrimSpace(buf.String()) +} + +// BackendState stores the configuration to connect to a remote backend. +type BackendState struct { + Type string `json:"type"` // Backend type + Config map[string]interface{} `json:"config"` // Backend raw config + + // Hash is the hash code to uniquely identify the original source + // configuration. We use this to detect when there is a change in + // configuration even when "type" isn't changed. + Hash uint64 `json:"hash"` +} + +// Empty returns true if BackendState has no state. +func (s *BackendState) Empty() bool { + return s == nil || s.Type == "" +} + +// Rehash returns a unique content hash for this backend's configuration +// as a uint64 value. +// The Hash stored in the backend state needs to match the config itself, but +// we need to compare the backend config after it has been combined with all +// options. +// This function must match the implementation used by config.Backend. +func (s *BackendState) Rehash() uint64 { + if s == nil { + return 0 + } + + cfg := config.Backend{ + Type: s.Type, + RawConfig: &config.RawConfig{ + Raw: s.Config, + }, + } + + return cfg.Rehash() +} + +// RemoteState is used to track the information about a remote +// state store that we push/pull state to. +type RemoteState struct { + // Type controls the client we use for the remote state + Type string `json:"type"` + + // Config is used to store arbitrary configuration that + // is type specific + Config map[string]string `json:"config"` + + mu sync.Mutex +} + +func (s *RemoteState) Lock() { s.mu.Lock() } +func (s *RemoteState) Unlock() { s.mu.Unlock() } + +func (r *RemoteState) init() { + r.Lock() + defer r.Unlock() + + if r.Config == nil { + r.Config = make(map[string]string) + } +} + +func (r *RemoteState) deepcopy() *RemoteState { + r.Lock() + defer r.Unlock() + + confCopy := make(map[string]string, len(r.Config)) + for k, v := range r.Config { + confCopy[k] = v + } + return &RemoteState{ + Type: r.Type, + Config: confCopy, + } +} + +func (r *RemoteState) Empty() bool { + if r == nil { + return true + } + r.Lock() + defer r.Unlock() + + return r.Type == "" +} + +func (r *RemoteState) Equals(other *RemoteState) bool { + r.Lock() + defer r.Unlock() + + if r.Type != other.Type { + return false + } + if len(r.Config) != len(other.Config) { + return false + } + for k, v := range r.Config { + if other.Config[k] != v { + return false + } + } + return true +} + +// OutputState is used to track the state relevant to a single output. +type OutputState struct { + // Sensitive describes whether the output is considered sensitive, + // which may lead to masking the value on screen in some cases. + Sensitive bool `json:"sensitive"` + // Type describes the structure of Value. Valid values are "string", + // "map" and "list" + Type string `json:"type"` + // Value contains the value of the output, in the structure described + // by the Type field. + Value interface{} `json:"value"` + + mu sync.Mutex +} + +func (s *OutputState) Lock() { s.mu.Lock() } +func (s *OutputState) Unlock() { s.mu.Unlock() } + +func (s *OutputState) String() string { + return fmt.Sprintf("%#v", s.Value) +} + +// Equal compares two OutputState structures for equality. nil values are +// considered equal. +func (s *OutputState) Equal(other *OutputState) bool { + if s == nil && other == nil { + return true + } + + if s == nil || other == nil { + return false + } + s.Lock() + defer s.Unlock() + + if s.Type != other.Type { + return false + } + + if s.Sensitive != other.Sensitive { + return false + } + + if !reflect.DeepEqual(s.Value, other.Value) { + return false + } + + return true +} + +func (s *OutputState) deepcopy() *OutputState { + if s == nil { + return nil + } + + stateCopy, err := copystructure.Config{Lock: true}.Copy(s) + if err != nil { + panic(fmt.Errorf("Error copying output value: %s", err)) + } + + return stateCopy.(*OutputState) +} + +// ModuleState is used to track all the state relevant to a single +// module. Previous to Terraform 0.3, all state belonged to the "root" +// module. +type ModuleState struct { + // Path is the import path from the root module. Modules imports are + // always disjoint, so the path represents amodule tree + Path []string `json:"path"` + + // Outputs declared by the module and maintained for each module + // even though only the root module technically needs to be kept. + // This allows operators to inspect values at the boundaries. + Outputs map[string]*OutputState `json:"outputs"` + + // Resources is a mapping of the logically named resource to + // the state of the resource. Each resource may actually have + // N instances underneath, although a user only needs to think + // about the 1:1 case. + Resources map[string]*ResourceState `json:"resources"` + + // Dependencies are a list of things that this module relies on + // existing to remain intact. For example: an module may depend + // on a VPC ID given by an aws_vpc resource. + // + // Terraform uses this information to build valid destruction + // orders and to warn the user if they're destroying a module that + // another resource depends on. + // + // Things can be put into this list that may not be managed by + // Terraform. If Terraform doesn't find a matching ID in the + // overall state, then it assumes it isn't managed and doesn't + // worry about it. + Dependencies []string `json:"depends_on"` + + mu sync.Mutex +} + +func (s *ModuleState) Lock() { s.mu.Lock() } +func (s *ModuleState) Unlock() { s.mu.Unlock() } + +// Equal tests whether one module state is equal to another. +func (m *ModuleState) Equal(other *ModuleState) bool { + m.Lock() + defer m.Unlock() + + // Paths must be equal + if !reflect.DeepEqual(m.Path, other.Path) { + return false + } + + // Outputs must be equal + if len(m.Outputs) != len(other.Outputs) { + return false + } + for k, v := range m.Outputs { + if !other.Outputs[k].Equal(v) { + return false + } + } + + // Dependencies must be equal. This sorts these in place but + // this shouldn't cause any problems. + sort.Strings(m.Dependencies) + sort.Strings(other.Dependencies) + if len(m.Dependencies) != len(other.Dependencies) { + return false + } + for i, d := range m.Dependencies { + if other.Dependencies[i] != d { + return false + } + } + + // Resources must be equal + if len(m.Resources) != len(other.Resources) { + return false + } + for k, r := range m.Resources { + otherR, ok := other.Resources[k] + if !ok { + return false + } + + if !r.Equal(otherR) { + return false + } + } + + return true +} + +// IsRoot says whether or not this module diff is for the root module. +func (m *ModuleState) IsRoot() bool { + m.Lock() + defer m.Unlock() + return reflect.DeepEqual(m.Path, rootModulePath) +} + +// IsDescendent returns true if other is a descendent of this module. +func (m *ModuleState) IsDescendent(other *ModuleState) bool { + m.Lock() + defer m.Unlock() + + i := len(m.Path) + return len(other.Path) > i && reflect.DeepEqual(other.Path[:i], m.Path) +} + +// Orphans returns a list of keys of resources that are in the State +// but aren't present in the configuration itself. Hence, these keys +// represent the state of resources that are orphans. +func (m *ModuleState) Orphans(c *config.Config) []string { + m.Lock() + defer m.Unlock() + + keys := make(map[string]struct{}) + for k, _ := range m.Resources { + keys[k] = struct{}{} + } + + if c != nil { + for _, r := range c.Resources { + delete(keys, r.Id()) + + for k, _ := range keys { + if strings.HasPrefix(k, r.Id()+".") { + delete(keys, k) + } + } + } + } + + result := make([]string, 0, len(keys)) + for k, _ := range keys { + result = append(result, k) + } + + return result +} + +// View returns a view with the given resource prefix. +func (m *ModuleState) View(id string) *ModuleState { + if m == nil { + return m + } + + r := m.deepcopy() + for k, _ := range r.Resources { + if id == k || strings.HasPrefix(k, id+".") { + continue + } + + delete(r.Resources, k) + } + + return r +} + +func (m *ModuleState) init() { + m.Lock() + defer m.Unlock() + + if m.Path == nil { + m.Path = []string{} + } + if m.Outputs == nil { + m.Outputs = make(map[string]*OutputState) + } + if m.Resources == nil { + m.Resources = make(map[string]*ResourceState) + } + + if m.Dependencies == nil { + m.Dependencies = make([]string, 0) + } + + for _, rs := range m.Resources { + rs.init() + } +} + +func (m *ModuleState) deepcopy() *ModuleState { + if m == nil { + return nil + } + + stateCopy, err := copystructure.Config{Lock: true}.Copy(m) + if err != nil { + panic(err) + } + + return stateCopy.(*ModuleState) +} + +// prune is used to remove any resources that are no longer required +func (m *ModuleState) prune() { + m.Lock() + defer m.Unlock() + + for k, v := range m.Resources { + if v == nil || (v.Primary == nil || v.Primary.ID == "") && len(v.Deposed) == 0 { + delete(m.Resources, k) + continue + } + + v.prune() + } + + for k, v := range m.Outputs { + if v.Value == config.UnknownVariableValue { + delete(m.Outputs, k) + } + } + + m.Dependencies = uniqueStrings(m.Dependencies) +} + +func (m *ModuleState) sort() { + for _, v := range m.Resources { + v.sort() + } +} + +func (m *ModuleState) String() string { + m.Lock() + defer m.Unlock() + + var buf bytes.Buffer + + if len(m.Resources) == 0 { + buf.WriteString("") + } + + names := make([]string, 0, len(m.Resources)) + for name, _ := range m.Resources { + names = append(names, name) + } + + sort.Sort(resourceNameSort(names)) + + for _, k := range names { + rs := m.Resources[k] + var id string + if rs.Primary != nil { + id = rs.Primary.ID + } + if id == "" { + id = "" + } + + taintStr := "" + if rs.Primary.Tainted { + taintStr = " (tainted)" + } + + deposedStr := "" + if len(rs.Deposed) > 0 { + deposedStr = fmt.Sprintf(" (%d deposed)", len(rs.Deposed)) + } + + buf.WriteString(fmt.Sprintf("%s:%s%s\n", k, taintStr, deposedStr)) + buf.WriteString(fmt.Sprintf(" ID = %s\n", id)) + if rs.Provider != "" { + buf.WriteString(fmt.Sprintf(" provider = %s\n", rs.Provider)) + } + + var attributes map[string]string + if rs.Primary != nil { + attributes = rs.Primary.Attributes + } + attrKeys := make([]string, 0, len(attributes)) + for ak, _ := range attributes { + if ak == "id" { + continue + } + + attrKeys = append(attrKeys, ak) + } + + sort.Strings(attrKeys) + + for _, ak := range attrKeys { + av := attributes[ak] + buf.WriteString(fmt.Sprintf(" %s = %s\n", ak, av)) + } + + for idx, t := range rs.Deposed { + taintStr := "" + if t.Tainted { + taintStr = " (tainted)" + } + buf.WriteString(fmt.Sprintf(" Deposed ID %d = %s%s\n", idx+1, t.ID, taintStr)) + } + + if len(rs.Dependencies) > 0 { + buf.WriteString(fmt.Sprintf("\n Dependencies:\n")) + for _, dep := range rs.Dependencies { + buf.WriteString(fmt.Sprintf(" %s\n", dep)) + } + } + } + + if len(m.Outputs) > 0 { + buf.WriteString("\nOutputs:\n\n") + + ks := make([]string, 0, len(m.Outputs)) + for k, _ := range m.Outputs { + ks = append(ks, k) + } + + sort.Strings(ks) + + for _, k := range ks { + v := m.Outputs[k] + switch vTyped := v.Value.(type) { + case string: + buf.WriteString(fmt.Sprintf("%s = %s\n", k, vTyped)) + case []interface{}: + buf.WriteString(fmt.Sprintf("%s = %s\n", k, vTyped)) + case map[string]interface{}: + var mapKeys []string + for key, _ := range vTyped { + mapKeys = append(mapKeys, key) + } + sort.Strings(mapKeys) + + var mapBuf bytes.Buffer + mapBuf.WriteString("{") + for _, key := range mapKeys { + mapBuf.WriteString(fmt.Sprintf("%s:%s ", key, vTyped[key])) + } + mapBuf.WriteString("}") + + buf.WriteString(fmt.Sprintf("%s = %s\n", k, mapBuf.String())) + } + } + } + + return buf.String() +} + +// ResourceStateKey is a structured representation of the key used for the +// ModuleState.Resources mapping +type ResourceStateKey struct { + Name string + Type string + Mode config.ResourceMode + Index int +} + +// Equal determines whether two ResourceStateKeys are the same +func (rsk *ResourceStateKey) Equal(other *ResourceStateKey) bool { + if rsk == nil || other == nil { + return false + } + if rsk.Mode != other.Mode { + return false + } + if rsk.Type != other.Type { + return false + } + if rsk.Name != other.Name { + return false + } + if rsk.Index != other.Index { + return false + } + return true +} + +func (rsk *ResourceStateKey) String() string { + if rsk == nil { + return "" + } + var prefix string + switch rsk.Mode { + case config.ManagedResourceMode: + prefix = "" + case config.DataResourceMode: + prefix = "data." + default: + panic(fmt.Errorf("unknown resource mode %s", rsk.Mode)) + } + if rsk.Index == -1 { + return fmt.Sprintf("%s%s.%s", prefix, rsk.Type, rsk.Name) + } + return fmt.Sprintf("%s%s.%s.%d", prefix, rsk.Type, rsk.Name, rsk.Index) +} + +// ParseResourceStateKey accepts a key in the format used by +// ModuleState.Resources and returns a resource name and resource index. In the +// state, a resource has the format "type.name.index" or "type.name". In the +// latter case, the index is returned as -1. +func ParseResourceStateKey(k string) (*ResourceStateKey, error) { + parts := strings.Split(k, ".") + mode := config.ManagedResourceMode + if len(parts) > 0 && parts[0] == "data" { + mode = config.DataResourceMode + // Don't need the constant "data" prefix for parsing + // now that we've figured out the mode. + parts = parts[1:] + } + if len(parts) < 2 || len(parts) > 3 { + return nil, fmt.Errorf("Malformed resource state key: %s", k) + } + rsk := &ResourceStateKey{ + Mode: mode, + Type: parts[0], + Name: parts[1], + Index: -1, + } + if len(parts) == 3 { + index, err := strconv.Atoi(parts[2]) + if err != nil { + return nil, fmt.Errorf("Malformed resource state key index: %s", k) + } + rsk.Index = index + } + return rsk, nil +} + +// ResourceState holds the state of a resource that is used so that +// a provider can find and manage an existing resource as well as for +// storing attributes that are used to populate variables of child +// resources. +// +// Attributes has attributes about the created resource that are +// queryable in interpolation: "${type.id.attr}" +// +// Extra is just extra data that a provider can return that we store +// for later, but is not exposed in any way to the user. +// +type ResourceState struct { + // This is filled in and managed by Terraform, and is the resource + // type itself such as "mycloud_instance". If a resource provider sets + // this value, it won't be persisted. + Type string `json:"type"` + + // Dependencies are a list of things that this resource relies on + // existing to remain intact. For example: an AWS instance might + // depend on a subnet (which itself might depend on a VPC, and so + // on). + // + // Terraform uses this information to build valid destruction + // orders and to warn the user if they're destroying a resource that + // another resource depends on. + // + // Things can be put into this list that may not be managed by + // Terraform. If Terraform doesn't find a matching ID in the + // overall state, then it assumes it isn't managed and doesn't + // worry about it. + Dependencies []string `json:"depends_on"` + + // Primary is the current active instance for this resource. + // It can be replaced but only after a successful creation. + // This is the instances on which providers will act. + Primary *InstanceState `json:"primary"` + + // Deposed is used in the mechanics of CreateBeforeDestroy: the existing + // Primary is Deposed to get it out of the way for the replacement Primary to + // be created by Apply. If the replacement Primary creates successfully, the + // Deposed instance is cleaned up. + // + // If there were problems creating the replacement Primary, the Deposed + // instance and the (now tainted) replacement Primary will be swapped so the + // tainted replacement will be cleaned up instead. + // + // An instance will remain in the Deposed list until it is successfully + // destroyed and purged. + Deposed []*InstanceState `json:"deposed"` + + // Provider is used when a resource is connected to a provider with an alias. + // If this string is empty, the resource is connected to the default provider, + // e.g. "aws_instance" goes with the "aws" provider. + // If the resource block contained a "provider" key, that value will be set here. + Provider string `json:"provider"` + + mu sync.Mutex +} + +func (s *ResourceState) Lock() { s.mu.Lock() } +func (s *ResourceState) Unlock() { s.mu.Unlock() } + +// Equal tests whether two ResourceStates are equal. +func (s *ResourceState) Equal(other *ResourceState) bool { + s.Lock() + defer s.Unlock() + + if s.Type != other.Type { + return false + } + + if s.Provider != other.Provider { + return false + } + + // Dependencies must be equal + sort.Strings(s.Dependencies) + sort.Strings(other.Dependencies) + if len(s.Dependencies) != len(other.Dependencies) { + return false + } + for i, d := range s.Dependencies { + if other.Dependencies[i] != d { + return false + } + } + + // States must be equal + if !s.Primary.Equal(other.Primary) { + return false + } + + return true +} + +// Taint marks a resource as tainted. +func (s *ResourceState) Taint() { + s.Lock() + defer s.Unlock() + + if s.Primary != nil { + s.Primary.Tainted = true + } +} + +// Untaint unmarks a resource as tainted. +func (s *ResourceState) Untaint() { + s.Lock() + defer s.Unlock() + + if s.Primary != nil { + s.Primary.Tainted = false + } +} + +func (s *ResourceState) init() { + s.Lock() + defer s.Unlock() + + if s.Primary == nil { + s.Primary = &InstanceState{} + } + s.Primary.init() + + if s.Dependencies == nil { + s.Dependencies = []string{} + } + + if s.Deposed == nil { + s.Deposed = make([]*InstanceState, 0) + } +} + +func (s *ResourceState) deepcopy() *ResourceState { + copy, err := copystructure.Config{Lock: true}.Copy(s) + if err != nil { + panic(err) + } + + return copy.(*ResourceState) +} + +// prune is used to remove any instances that are no longer required +func (s *ResourceState) prune() { + s.Lock() + defer s.Unlock() + + n := len(s.Deposed) + for i := 0; i < n; i++ { + inst := s.Deposed[i] + if inst == nil || inst.ID == "" { + copy(s.Deposed[i:], s.Deposed[i+1:]) + s.Deposed[n-1] = nil + n-- + i-- + } + } + s.Deposed = s.Deposed[:n] + + s.Dependencies = uniqueStrings(s.Dependencies) +} + +func (s *ResourceState) sort() { + s.Lock() + defer s.Unlock() + + sort.Strings(s.Dependencies) +} + +func (s *ResourceState) String() string { + s.Lock() + defer s.Unlock() + + var buf bytes.Buffer + buf.WriteString(fmt.Sprintf("Type = %s", s.Type)) + return buf.String() +} + +// InstanceState is used to track the unique state information belonging +// to a given instance. +type InstanceState struct { + // A unique ID for this resource. This is opaque to Terraform + // and is only meant as a lookup mechanism for the providers. + ID string `json:"id"` + + // Attributes are basic information about the resource. Any keys here + // are accessible in variable format within Terraform configurations: + // ${resourcetype.name.attribute}. + Attributes map[string]string `json:"attributes"` + + // Ephemeral is used to store any state associated with this instance + // that is necessary for the Terraform run to complete, but is not + // persisted to a state file. + Ephemeral EphemeralState `json:"-"` + + // Meta is a simple K/V map that is persisted to the State but otherwise + // ignored by Terraform core. It's meant to be used for accounting by + // external client code. The value here must only contain Go primitives + // and collections. + Meta map[string]interface{} `json:"meta"` + + // Tainted is used to mark a resource for recreation. + Tainted bool `json:"tainted"` + + mu sync.Mutex +} + +func (s *InstanceState) Lock() { s.mu.Lock() } +func (s *InstanceState) Unlock() { s.mu.Unlock() } + +func (s *InstanceState) init() { + s.Lock() + defer s.Unlock() + + if s.Attributes == nil { + s.Attributes = make(map[string]string) + } + if s.Meta == nil { + s.Meta = make(map[string]interface{}) + } + s.Ephemeral.init() +} + +// Copy all the Fields from another InstanceState +func (s *InstanceState) Set(from *InstanceState) { + s.Lock() + defer s.Unlock() + + from.Lock() + defer from.Unlock() + + s.ID = from.ID + s.Attributes = from.Attributes + s.Ephemeral = from.Ephemeral + s.Meta = from.Meta + s.Tainted = from.Tainted +} + +func (s *InstanceState) DeepCopy() *InstanceState { + copy, err := copystructure.Config{Lock: true}.Copy(s) + if err != nil { + panic(err) + } + + return copy.(*InstanceState) +} + +func (s *InstanceState) Empty() bool { + if s == nil { + return true + } + s.Lock() + defer s.Unlock() + + return s.ID == "" +} + +func (s *InstanceState) Equal(other *InstanceState) bool { + // Short circuit some nil checks + if s == nil || other == nil { + return s == other + } + s.Lock() + defer s.Unlock() + + // IDs must be equal + if s.ID != other.ID { + return false + } + + // Attributes must be equal + if len(s.Attributes) != len(other.Attributes) { + return false + } + for k, v := range s.Attributes { + otherV, ok := other.Attributes[k] + if !ok { + return false + } + + if v != otherV { + return false + } + } + + // Meta must be equal + if len(s.Meta) != len(other.Meta) { + return false + } + if s.Meta != nil && other.Meta != nil { + // We only do the deep check if both are non-nil. If one is nil + // we treat it as equal since their lengths are both zero (check + // above). + if !reflect.DeepEqual(s.Meta, other.Meta) { + return false + } + } + + if s.Tainted != other.Tainted { + return false + } + + return true +} + +// MergeDiff takes a ResourceDiff and merges the attributes into +// this resource state in order to generate a new state. This new +// state can be used to provide updated attribute lookups for +// variable interpolation. +// +// If the diff attribute requires computing the value, and hence +// won't be available until apply, the value is replaced with the +// computeID. +func (s *InstanceState) MergeDiff(d *InstanceDiff) *InstanceState { + result := s.DeepCopy() + if result == nil { + result = new(InstanceState) + } + result.init() + + if s != nil { + s.Lock() + defer s.Unlock() + for k, v := range s.Attributes { + result.Attributes[k] = v + } + } + if d != nil { + for k, diff := range d.CopyAttributes() { + if diff.NewRemoved { + delete(result.Attributes, k) + continue + } + if diff.NewComputed { + result.Attributes[k] = config.UnknownVariableValue + continue + } + + result.Attributes[k] = diff.New + } + } + + return result +} + +func (s *InstanceState) String() string { + s.Lock() + defer s.Unlock() + + var buf bytes.Buffer + + if s == nil || s.ID == "" { + return "" + } + + buf.WriteString(fmt.Sprintf("ID = %s\n", s.ID)) + + attributes := s.Attributes + attrKeys := make([]string, 0, len(attributes)) + for ak, _ := range attributes { + if ak == "id" { + continue + } + + attrKeys = append(attrKeys, ak) + } + sort.Strings(attrKeys) + + for _, ak := range attrKeys { + av := attributes[ak] + buf.WriteString(fmt.Sprintf("%s = %s\n", ak, av)) + } + + buf.WriteString(fmt.Sprintf("Tainted = %t\n", s.Tainted)) + + return buf.String() +} + +// EphemeralState is used for transient state that is only kept in-memory +type EphemeralState struct { + // ConnInfo is used for the providers to export information which is + // used to connect to the resource for provisioning. For example, + // this could contain SSH or WinRM credentials. + ConnInfo map[string]string `json:"-"` + + // Type is used to specify the resource type for this instance. This is only + // required for import operations (as documented). If the documentation + // doesn't state that you need to set this, then don't worry about + // setting it. + Type string `json:"-"` +} + +func (e *EphemeralState) init() { + if e.ConnInfo == nil { + e.ConnInfo = make(map[string]string) + } +} + +func (e *EphemeralState) DeepCopy() *EphemeralState { + copy, err := copystructure.Config{Lock: true}.Copy(e) + if err != nil { + panic(err) + } + + return copy.(*EphemeralState) +} + +type jsonStateVersionIdentifier struct { + Version int `json:"version"` +} + +// Check if this is a V0 format - the magic bytes at the start of the file +// should be "tfstate" if so. We no longer support upgrading this type of +// state but return an error message explaining to a user how they can +// upgrade via the 0.6.x series. +func testForV0State(buf *bufio.Reader) error { + start, err := buf.Peek(len("tfstate")) + if err != nil { + return fmt.Errorf("Failed to check for magic bytes: %v", err) + } + if string(start) == "tfstate" { + return fmt.Errorf("Terraform 0.7 no longer supports upgrading the binary state\n" + + "format which was used prior to Terraform 0.3. Please upgrade\n" + + "this state file using Terraform 0.6.16 prior to using it with\n" + + "Terraform 0.7.") + } + + return nil +} + +// ErrNoState is returned by ReadState when the io.Reader contains no data +var ErrNoState = errors.New("no state") + +// ReadState reads a state structure out of a reader in the format that +// was written by WriteState. +func ReadState(src io.Reader) (*State, error) { + buf := bufio.NewReader(src) + if _, err := buf.Peek(1); err != nil { + // the error is either io.EOF or "invalid argument", and both are from + // an empty state. + return nil, ErrNoState + } + + if err := testForV0State(buf); err != nil { + return nil, err + } + + // If we are JSON we buffer the whole thing in memory so we can read it twice. + // This is suboptimal, but will work for now. + jsonBytes, err := ioutil.ReadAll(buf) + if err != nil { + return nil, fmt.Errorf("Reading state file failed: %v", err) + } + + versionIdentifier := &jsonStateVersionIdentifier{} + if err := json.Unmarshal(jsonBytes, versionIdentifier); err != nil { + return nil, fmt.Errorf("Decoding state file version failed: %v", err) + } + + var result *State + switch versionIdentifier.Version { + case 0: + return nil, fmt.Errorf("State version 0 is not supported as JSON.") + case 1: + v1State, err := ReadStateV1(jsonBytes) + if err != nil { + return nil, err + } + + v2State, err := upgradeStateV1ToV2(v1State) + if err != nil { + return nil, err + } + + v3State, err := upgradeStateV2ToV3(v2State) + if err != nil { + return nil, err + } + + // increment the Serial whenever we upgrade state + v3State.Serial++ + result = v3State + case 2: + v2State, err := ReadStateV2(jsonBytes) + if err != nil { + return nil, err + } + v3State, err := upgradeStateV2ToV3(v2State) + if err != nil { + return nil, err + } + + v3State.Serial++ + result = v3State + case 3: + v3State, err := ReadStateV3(jsonBytes) + if err != nil { + return nil, err + } + + result = v3State + default: + return nil, fmt.Errorf("Terraform %s does not support state version %d, please update.", + SemVersion.String(), versionIdentifier.Version) + } + + // If we reached this place we must have a result set + if result == nil { + panic("resulting state in load not set, assertion failed") + } + + // Prune the state when read it. Its possible to write unpruned states or + // for a user to make a state unpruned (nil-ing a module state for example). + result.prune() + + // Validate the state file is valid + if err := result.Validate(); err != nil { + return nil, err + } + + return result, nil +} + +func ReadStateV1(jsonBytes []byte) (*stateV1, error) { + v1State := &stateV1{} + if err := json.Unmarshal(jsonBytes, v1State); err != nil { + return nil, fmt.Errorf("Decoding state file failed: %v", err) + } + + if v1State.Version != 1 { + return nil, fmt.Errorf("Decoded state version did not match the decoder selection: "+ + "read %d, expected 1", v1State.Version) + } + + return v1State, nil +} + +func ReadStateV2(jsonBytes []byte) (*State, error) { + state := &State{} + if err := json.Unmarshal(jsonBytes, state); err != nil { + return nil, fmt.Errorf("Decoding state file failed: %v", err) + } + + // Check the version, this to ensure we don't read a future + // version that we don't understand + if state.Version > StateVersion { + return nil, fmt.Errorf("Terraform %s does not support state version %d, please update.", + SemVersion.String(), state.Version) + } + + // Make sure the version is semantic + if state.TFVersion != "" { + if _, err := version.NewVersion(state.TFVersion); err != nil { + return nil, fmt.Errorf( + "State contains invalid version: %s\n\n"+ + "Terraform validates the version format prior to writing it. This\n"+ + "means that this is invalid of the state becoming corrupted through\n"+ + "some external means. Please manually modify the Terraform version\n"+ + "field to be a proper semantic version.", + state.TFVersion) + } + } + + // catch any unitialized fields in the state + state.init() + + // Sort it + state.sort() + + return state, nil +} + +func ReadStateV3(jsonBytes []byte) (*State, error) { + state := &State{} + if err := json.Unmarshal(jsonBytes, state); err != nil { + return nil, fmt.Errorf("Decoding state file failed: %v", err) + } + + // Check the version, this to ensure we don't read a future + // version that we don't understand + if state.Version > StateVersion { + return nil, fmt.Errorf("Terraform %s does not support state version %d, please update.", + SemVersion.String(), state.Version) + } + + // Make sure the version is semantic + if state.TFVersion != "" { + if _, err := version.NewVersion(state.TFVersion); err != nil { + return nil, fmt.Errorf( + "State contains invalid version: %s\n\n"+ + "Terraform validates the version format prior to writing it. This\n"+ + "means that this is invalid of the state becoming corrupted through\n"+ + "some external means. Please manually modify the Terraform version\n"+ + "field to be a proper semantic version.", + state.TFVersion) + } + } + + // catch any unitialized fields in the state + state.init() + + // Sort it + state.sort() + + // Now we write the state back out to detect any changes in normaliztion. + // If our state is now written out differently, bump the serial number to + // prevent conflicts. + var buf bytes.Buffer + err := WriteState(state, &buf) + if err != nil { + return nil, err + } + + if !bytes.Equal(jsonBytes, buf.Bytes()) { + log.Println("[INFO] state modified during read or write. incrementing serial number") + state.Serial++ + } + + return state, nil +} + +// WriteState writes a state somewhere in a binary format. +func WriteState(d *State, dst io.Writer) error { + // writing a nil state is a noop. + if d == nil { + return nil + } + + // make sure we have no uninitialized fields + d.init() + + // Make sure it is sorted + d.sort() + + // Ensure the version is set + d.Version = StateVersion + + // If the TFVersion is set, verify it. We used to just set the version + // here, but this isn't safe since it changes the MD5 sum on some remote + // state storage backends such as Atlas. We now leave it be if needed. + if d.TFVersion != "" { + if _, err := version.NewVersion(d.TFVersion); err != nil { + return fmt.Errorf( + "Error writing state, invalid version: %s\n\n"+ + "The Terraform version when writing the state must be a semantic\n"+ + "version.", + d.TFVersion) + } + } + + // Encode the data in a human-friendly way + data, err := json.MarshalIndent(d, "", " ") + if err != nil { + return fmt.Errorf("Failed to encode state: %s", err) + } + + // We append a newline to the data because MarshalIndent doesn't + data = append(data, '\n') + + // Write the data out to the dst + if _, err := io.Copy(dst, bytes.NewReader(data)); err != nil { + return fmt.Errorf("Failed to write state: %v", err) + } + + return nil +} + +// resourceNameSort implements the sort.Interface to sort name parts lexically for +// strings and numerically for integer indexes. +type resourceNameSort []string + +func (r resourceNameSort) Len() int { return len(r) } +func (r resourceNameSort) Swap(i, j int) { r[i], r[j] = r[j], r[i] } + +func (r resourceNameSort) Less(i, j int) bool { + iParts := strings.Split(r[i], ".") + jParts := strings.Split(r[j], ".") + + end := len(iParts) + if len(jParts) < end { + end = len(jParts) + } + + for idx := 0; idx < end; idx++ { + if iParts[idx] == jParts[idx] { + continue + } + + // sort on the first non-matching part + iInt, iIntErr := strconv.Atoi(iParts[idx]) + jInt, jIntErr := strconv.Atoi(jParts[idx]) + + switch { + case iIntErr == nil && jIntErr == nil: + // sort numerically if both parts are integers + return iInt < jInt + case iIntErr == nil: + // numbers sort before strings + return true + case jIntErr == nil: + return false + default: + return iParts[idx] < jParts[idx] + } + } + + return r[i] < r[j] +} + +// moduleStateSort implements sort.Interface to sort module states +type moduleStateSort []*ModuleState + +func (s moduleStateSort) Len() int { + return len(s) +} + +func (s moduleStateSort) Less(i, j int) bool { + a := s[i] + b := s[j] + + // If either is nil, then the nil one is "less" than + if a == nil || b == nil { + return a == nil + } + + // If the lengths are different, then the shorter one always wins + if len(a.Path) != len(b.Path) { + return len(a.Path) < len(b.Path) + } + + // Otherwise, compare lexically + return strings.Join(a.Path, ".") < strings.Join(b.Path, ".") +} + +func (s moduleStateSort) Swap(i, j int) { + s[i], s[j] = s[j], s[i] +} + +const stateValidateErrMultiModule = ` +Multiple modules with the same path: %s + +This means that there are multiple entries in the "modules" field +in your state file that point to the same module. This will cause Terraform +to behave in unexpected and error prone ways and is invalid. Please back up +and modify your state file manually to resolve this. +` diff --git a/vendor/github.com/hashicorp/terraform/terraform/state_add.go b/vendor/github.com/hashicorp/terraform/terraform/state_add.go new file mode 100644 index 0000000000..116373032f --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/terraform/state_add.go @@ -0,0 +1,374 @@ +package terraform + +import "fmt" + +// Add adds the item in the state at the given address. +// +// The item can be a ModuleState, ResourceState, or InstanceState. Depending +// on the item type, the address may or may not be valid. For example, a +// module cannot be moved to a resource address, however a resource can be +// moved to a module address (it retains the same name, under that resource). +// +// The item can also be a []*ModuleState, which is the case for nested +// modules. In this case, Add will expect the zero-index to be the top-most +// module to add and will only nest children from there. For semantics, this +// is equivalent to module => module. +// +// The full semantics of Add: +// +// ┌───────────────────┬───────────────────┬───────────────────┐ +// │ Module Address │ Resource Address │ Instance Address │ +// ┌─────────────────┼───────────────────┼───────────────────┼───────────────────┤ +// │ ModuleState │ ✓ │ x │ x │ +// ├─────────────────┼───────────────────┼───────────────────┼───────────────────┤ +// │ ResourceState │ ✓ │ ✓ │ maybe* │ +// ├─────────────────┼───────────────────┼───────────────────┼───────────────────┤ +// │ Instance State │ ✓ │ ✓ │ ✓ │ +// └─────────────────┴───────────────────┴───────────────────┴───────────────────┘ +// +// *maybe - Resources can be added at an instance address only if the resource +// represents a single instance (primary). Example: +// "aws_instance.foo" can be moved to "aws_instance.bar.tainted" +// +func (s *State) Add(fromAddrRaw string, toAddrRaw string, raw interface{}) error { + // Parse the address + + toAddr, err := ParseResourceAddress(toAddrRaw) + if err != nil { + return err + } + + // Parse the from address + fromAddr, err := ParseResourceAddress(fromAddrRaw) + if err != nil { + return err + } + + // Determine the types + from := detectValueAddLoc(raw) + to := detectAddrAddLoc(toAddr) + + // Find the function to do this + fromMap, ok := stateAddFuncs[from] + if !ok { + return fmt.Errorf("invalid source to add to state: %T", raw) + } + f, ok := fromMap[to] + if !ok { + return fmt.Errorf("invalid destination: %s (%d)", toAddr, to) + } + + // Call the migrator + if err := f(s, fromAddr, toAddr, raw); err != nil { + return err + } + + // Prune the state + s.prune() + return nil +} + +func stateAddFunc_Module_Module(s *State, fromAddr, addr *ResourceAddress, raw interface{}) error { + // raw can be either *ModuleState or []*ModuleState. The former means + // we're moving just one module. The latter means we're moving a module + // and children. + root := raw + var rest []*ModuleState + if list, ok := raw.([]*ModuleState); ok { + // We need at least one item + if len(list) == 0 { + return fmt.Errorf("module move with no value to: %s", addr) + } + + // The first item is always the root + root = list[0] + if len(list) > 1 { + rest = list[1:] + } + } + + // Get the actual module state + src := root.(*ModuleState).deepcopy() + + // If the target module exists, it is an error + path := append([]string{"root"}, addr.Path...) + if s.ModuleByPath(path) != nil { + return fmt.Errorf("module target is not empty: %s", addr) + } + + // Create it and copy our outputs and dependencies + mod := s.AddModule(path) + mod.Outputs = src.Outputs + mod.Dependencies = src.Dependencies + + // Go through the resources perform an add for each of those + for k, v := range src.Resources { + resourceKey, err := ParseResourceStateKey(k) + if err != nil { + return err + } + + // Update the resource address for this + addrCopy := *addr + addrCopy.Type = resourceKey.Type + addrCopy.Name = resourceKey.Name + addrCopy.Index = resourceKey.Index + addrCopy.Mode = resourceKey.Mode + + // Perform an add + if err := s.Add(fromAddr.String(), addrCopy.String(), v); err != nil { + return err + } + } + + // Add all the children if we have them + for _, item := range rest { + // If item isn't a descendent of our root, then ignore it + if !src.IsDescendent(item) { + continue + } + + // It is! Strip the leading prefix and attach that to our address + extra := item.Path[len(src.Path):] + addrCopy := addr.Copy() + addrCopy.Path = append(addrCopy.Path, extra...) + + // Add it + s.Add(fromAddr.String(), addrCopy.String(), item) + } + + return nil +} + +func stateAddFunc_Resource_Module( + s *State, from, to *ResourceAddress, raw interface{}) error { + // Build the more specific to addr + addr := *to + addr.Type = from.Type + addr.Name = from.Name + + return s.Add(from.String(), addr.String(), raw) +} + +func stateAddFunc_Resource_Resource(s *State, fromAddr, addr *ResourceAddress, raw interface{}) error { + // raw can be either *ResourceState or []*ResourceState. The former means + // we're moving just one resource. The latter means we're moving a count + // of resources. + if list, ok := raw.([]*ResourceState); ok { + // We need at least one item + if len(list) == 0 { + return fmt.Errorf("resource move with no value to: %s", addr) + } + + // If there is an index, this is an error since we can't assign + // a set of resources to a single index + if addr.Index >= 0 && len(list) > 1 { + return fmt.Errorf( + "multiple resources can't be moved to a single index: "+ + "%s => %s", fromAddr, addr) + } + + // Add each with a specific index + for i, rs := range list { + addrCopy := addr.Copy() + addrCopy.Index = i + + if err := s.Add(fromAddr.String(), addrCopy.String(), rs); err != nil { + return err + } + } + + return nil + } + + src := raw.(*ResourceState).deepcopy() + + // Initialize the resource + resourceRaw, exists := stateAddInitAddr(s, addr) + if exists { + return fmt.Errorf("resource exists and not empty: %s", addr) + } + resource := resourceRaw.(*ResourceState) + resource.Type = src.Type + resource.Dependencies = src.Dependencies + resource.Provider = src.Provider + + // Move the primary + if src.Primary != nil { + addrCopy := *addr + addrCopy.InstanceType = TypePrimary + addrCopy.InstanceTypeSet = true + if err := s.Add(fromAddr.String(), addrCopy.String(), src.Primary); err != nil { + return err + } + } + + // Move all deposed + if len(src.Deposed) > 0 { + resource.Deposed = src.Deposed + } + + return nil +} + +func stateAddFunc_Instance_Instance(s *State, fromAddr, addr *ResourceAddress, raw interface{}) error { + src := raw.(*InstanceState).DeepCopy() + + // Create the instance + instanceRaw, _ := stateAddInitAddr(s, addr) + instance := instanceRaw.(*InstanceState) + + // Set it + instance.Set(src) + + return nil +} + +func stateAddFunc_Instance_Module( + s *State, from, to *ResourceAddress, raw interface{}) error { + addr := *to + addr.Type = from.Type + addr.Name = from.Name + + return s.Add(from.String(), addr.String(), raw) +} + +func stateAddFunc_Instance_Resource( + s *State, from, to *ResourceAddress, raw interface{}) error { + addr := *to + addr.InstanceType = TypePrimary + addr.InstanceTypeSet = true + + return s.Add(from.String(), addr.String(), raw) +} + +// stateAddFunc is the type of function for adding an item to a state +type stateAddFunc func(s *State, from, to *ResourceAddress, item interface{}) error + +// stateAddFuncs has the full matrix mapping of the state adders. +var stateAddFuncs map[stateAddLoc]map[stateAddLoc]stateAddFunc + +func init() { + stateAddFuncs = map[stateAddLoc]map[stateAddLoc]stateAddFunc{ + stateAddModule: { + stateAddModule: stateAddFunc_Module_Module, + }, + stateAddResource: { + stateAddModule: stateAddFunc_Resource_Module, + stateAddResource: stateAddFunc_Resource_Resource, + }, + stateAddInstance: { + stateAddInstance: stateAddFunc_Instance_Instance, + stateAddModule: stateAddFunc_Instance_Module, + stateAddResource: stateAddFunc_Instance_Resource, + }, + } +} + +// stateAddLoc is an enum to represent the location where state is being +// moved from/to. We use this for quick lookups in a function map. +type stateAddLoc uint + +const ( + stateAddInvalid stateAddLoc = iota + stateAddModule + stateAddResource + stateAddInstance +) + +// detectAddrAddLoc detects the state type for the given address. This +// function is specifically not unit tested since we consider the State.Add +// functionality to be comprehensive enough to cover this. +func detectAddrAddLoc(addr *ResourceAddress) stateAddLoc { + if addr.Name == "" { + return stateAddModule + } + + if !addr.InstanceTypeSet { + return stateAddResource + } + + return stateAddInstance +} + +// detectValueAddLoc determines the stateAddLoc value from the raw value +// that is some State structure. +func detectValueAddLoc(raw interface{}) stateAddLoc { + switch raw.(type) { + case *ModuleState: + return stateAddModule + case []*ModuleState: + return stateAddModule + case *ResourceState: + return stateAddResource + case []*ResourceState: + return stateAddResource + case *InstanceState: + return stateAddInstance + default: + return stateAddInvalid + } +} + +// stateAddInitAddr takes a ResourceAddress and creates the non-existing +// resources up to that point, returning the empty (or existing) interface +// at that address. +func stateAddInitAddr(s *State, addr *ResourceAddress) (interface{}, bool) { + addType := detectAddrAddLoc(addr) + + // Get the module + path := append([]string{"root"}, addr.Path...) + exists := true + mod := s.ModuleByPath(path) + if mod == nil { + mod = s.AddModule(path) + exists = false + } + if addType == stateAddModule { + return mod, exists + } + + // Add the resource + resourceKey := (&ResourceStateKey{ + Name: addr.Name, + Type: addr.Type, + Index: addr.Index, + Mode: addr.Mode, + }).String() + exists = true + resource, ok := mod.Resources[resourceKey] + if !ok { + resource = &ResourceState{Type: addr.Type} + resource.init() + mod.Resources[resourceKey] = resource + exists = false + } + if addType == stateAddResource { + return resource, exists + } + + // Get the instance + exists = true + instance := &InstanceState{} + switch addr.InstanceType { + case TypePrimary, TypeTainted: + if v := resource.Primary; v != nil { + instance = resource.Primary + } else { + exists = false + } + case TypeDeposed: + idx := addr.Index + if addr.Index < 0 { + idx = 0 + } + if len(resource.Deposed) > idx { + instance = resource.Deposed[idx] + } else { + resource.Deposed = append(resource.Deposed, instance) + exists = false + } + } + + return instance, exists +} diff --git a/vendor/github.com/hashicorp/terraform/terraform/state_filter.go b/vendor/github.com/hashicorp/terraform/terraform/state_filter.go new file mode 100644 index 0000000000..2dcb11b76b --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/terraform/state_filter.go @@ -0,0 +1,267 @@ +package terraform + +import ( + "fmt" + "sort" +) + +// StateFilter is responsible for filtering and searching a state. +// +// This is a separate struct from State rather than a method on State +// because StateFilter might create sidecar data structures to optimize +// filtering on the state. +// +// If you change the State, the filter created is invalid and either +// Reset should be called or a new one should be allocated. StateFilter +// will not watch State for changes and do this for you. If you filter after +// changing the State without calling Reset, the behavior is not defined. +type StateFilter struct { + State *State +} + +// Filter takes the addresses specified by fs and finds all the matches. +// The values of fs are resource addressing syntax that can be parsed by +// ParseResourceAddress. +func (f *StateFilter) Filter(fs ...string) ([]*StateFilterResult, error) { + // Parse all the addresses + as := make([]*ResourceAddress, len(fs)) + for i, v := range fs { + a, err := ParseResourceAddress(v) + if err != nil { + return nil, fmt.Errorf("Error parsing address '%s': %s", v, err) + } + + as[i] = a + } + + // If we weren't given any filters, then we list all + if len(fs) == 0 { + as = append(as, &ResourceAddress{Index: -1}) + } + + // Filter each of the address. We keep track of this in a map to + // strip duplicates. + resultSet := make(map[string]*StateFilterResult) + for _, a := range as { + for _, r := range f.filterSingle(a) { + resultSet[r.String()] = r + } + } + + // Make the result list + results := make([]*StateFilterResult, 0, len(resultSet)) + for _, v := range resultSet { + results = append(results, v) + } + + // Sort them and return + sort.Sort(StateFilterResultSlice(results)) + return results, nil +} + +func (f *StateFilter) filterSingle(a *ResourceAddress) []*StateFilterResult { + // The slice to keep track of results + var results []*StateFilterResult + + // Go through modules first. + modules := make([]*ModuleState, 0, len(f.State.Modules)) + for _, m := range f.State.Modules { + if f.relevant(a, m) { + modules = append(modules, m) + + // Only add the module to the results if we haven't specified a type. + // We also ignore the root module. + if a.Type == "" && len(m.Path) > 1 { + results = append(results, &StateFilterResult{ + Path: m.Path[1:], + Address: (&ResourceAddress{Path: m.Path[1:]}).String(), + Value: m, + }) + } + } + } + + // With the modules set, go through all the resources within + // the modules to find relevant resources. + for _, m := range modules { + for n, r := range m.Resources { + // The name in the state contains valuable information. Parse. + key, err := ParseResourceStateKey(n) + if err != nil { + // If we get an error parsing, then just ignore it + // out of the state. + continue + } + + // Older states and test fixtures often don't contain the + // type directly on the ResourceState. We add this so StateFilter + // is a bit more robust. + if r.Type == "" { + r.Type = key.Type + } + + if f.relevant(a, r) { + if a.Name != "" && a.Name != key.Name { + // Name doesn't match + continue + } + + if a.Index >= 0 && key.Index != a.Index { + // Index doesn't match + continue + } + + if a.Name != "" && a.Name != key.Name { + continue + } + + // Build the address for this resource + addr := &ResourceAddress{ + Path: m.Path[1:], + Name: key.Name, + Type: key.Type, + Index: key.Index, + } + + // Add the resource level result + resourceResult := &StateFilterResult{ + Path: addr.Path, + Address: addr.String(), + Value: r, + } + if !a.InstanceTypeSet { + results = append(results, resourceResult) + } + + // Add the instances + if r.Primary != nil { + addr.InstanceType = TypePrimary + addr.InstanceTypeSet = false + results = append(results, &StateFilterResult{ + Path: addr.Path, + Address: addr.String(), + Parent: resourceResult, + Value: r.Primary, + }) + } + + for _, instance := range r.Deposed { + if f.relevant(a, instance) { + addr.InstanceType = TypeDeposed + addr.InstanceTypeSet = true + results = append(results, &StateFilterResult{ + Path: addr.Path, + Address: addr.String(), + Parent: resourceResult, + Value: instance, + }) + } + } + } + } + } + + return results +} + +// relevant checks for relevance of this address against the given value. +func (f *StateFilter) relevant(addr *ResourceAddress, raw interface{}) bool { + switch v := raw.(type) { + case *ModuleState: + path := v.Path[1:] + + if len(addr.Path) > len(path) { + // Longer path in address means there is no way we match. + return false + } + + // Check for a prefix match + for i, p := range addr.Path { + if path[i] != p { + // Any mismatches don't match. + return false + } + } + + return true + case *ResourceState: + if addr.Type == "" { + // If we have no resource type, then we're interested in all! + return true + } + + // If the type doesn't match we fail immediately + if v.Type != addr.Type { + return false + } + + return true + default: + // If we don't know about it, let's just say no + return false + } +} + +// StateFilterResult is a single result from a filter operation. Filter +// can match multiple things within a state (module, resource, instance, etc.) +// and this unifies that. +type StateFilterResult struct { + // Module path of the result + Path []string + + // Address is the address that can be used to reference this exact result. + Address string + + // Parent, if non-nil, is a parent of this result. For instances, the + // parent would be a resource. For resources, the parent would be + // a module. For modules, this is currently nil. + Parent *StateFilterResult + + // Value is the actual value. This must be type switched on. It can be + // any data structures that `State` can hold: `ModuleState`, + // `ResourceState`, `InstanceState`. + Value interface{} +} + +func (r *StateFilterResult) String() string { + return fmt.Sprintf("%T: %s", r.Value, r.Address) +} + +func (r *StateFilterResult) sortedType() int { + switch r.Value.(type) { + case *ModuleState: + return 0 + case *ResourceState: + return 1 + case *InstanceState: + return 2 + default: + return 50 + } +} + +// StateFilterResultSlice is a slice of results that implements +// sort.Interface. The sorting goal is what is most appealing to +// human output. +type StateFilterResultSlice []*StateFilterResult + +func (s StateFilterResultSlice) Len() int { return len(s) } +func (s StateFilterResultSlice) Swap(i, j int) { s[i], s[j] = s[j], s[i] } +func (s StateFilterResultSlice) Less(i, j int) bool { + a, b := s[i], s[j] + + // if these address contain an index, we want to sort by index rather than name + addrA, errA := ParseResourceAddress(a.Address) + addrB, errB := ParseResourceAddress(b.Address) + if errA == nil && errB == nil && addrA.Name == addrB.Name && addrA.Index != addrB.Index { + return addrA.Index < addrB.Index + } + + // If the addresses are different it is just lexographic sorting + if a.Address != b.Address { + return a.Address < b.Address + } + + // Addresses are the same, which means it matters on the type + return a.sortedType() < b.sortedType() +} diff --git a/vendor/github.com/hashicorp/terraform/terraform/state_upgrade_v1_to_v2.go b/vendor/github.com/hashicorp/terraform/terraform/state_upgrade_v1_to_v2.go new file mode 100644 index 0000000000..aa13cce803 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/terraform/state_upgrade_v1_to_v2.go @@ -0,0 +1,189 @@ +package terraform + +import ( + "fmt" + + "github.com/mitchellh/copystructure" +) + +// upgradeStateV1ToV2 is used to upgrade a V1 state representation +// into a V2 state representation +func upgradeStateV1ToV2(old *stateV1) (*State, error) { + if old == nil { + return nil, nil + } + + remote, err := old.Remote.upgradeToV2() + if err != nil { + return nil, fmt.Errorf("Error upgrading State V1: %v", err) + } + + modules := make([]*ModuleState, len(old.Modules)) + for i, module := range old.Modules { + upgraded, err := module.upgradeToV2() + if err != nil { + return nil, fmt.Errorf("Error upgrading State V1: %v", err) + } + modules[i] = upgraded + } + if len(modules) == 0 { + modules = nil + } + + newState := &State{ + Version: 2, + Serial: old.Serial, + Remote: remote, + Modules: modules, + } + + newState.sort() + newState.init() + + return newState, nil +} + +func (old *remoteStateV1) upgradeToV2() (*RemoteState, error) { + if old == nil { + return nil, nil + } + + config, err := copystructure.Copy(old.Config) + if err != nil { + return nil, fmt.Errorf("Error upgrading RemoteState V1: %v", err) + } + + return &RemoteState{ + Type: old.Type, + Config: config.(map[string]string), + }, nil +} + +func (old *moduleStateV1) upgradeToV2() (*ModuleState, error) { + if old == nil { + return nil, nil + } + + pathRaw, err := copystructure.Copy(old.Path) + if err != nil { + return nil, fmt.Errorf("Error upgrading ModuleState V1: %v", err) + } + path, ok := pathRaw.([]string) + if !ok { + return nil, fmt.Errorf("Error upgrading ModuleState V1: path is not a list of strings") + } + if len(path) == 0 { + // We found some V1 states with a nil path. Assume root and catch + // duplicate path errors later (as part of Validate). + path = rootModulePath + } + + // Outputs needs upgrading to use the new structure + outputs := make(map[string]*OutputState) + for key, output := range old.Outputs { + outputs[key] = &OutputState{ + Type: "string", + Value: output, + Sensitive: false, + } + } + + resources := make(map[string]*ResourceState) + for key, oldResource := range old.Resources { + upgraded, err := oldResource.upgradeToV2() + if err != nil { + return nil, fmt.Errorf("Error upgrading ModuleState V1: %v", err) + } + resources[key] = upgraded + } + + dependencies, err := copystructure.Copy(old.Dependencies) + if err != nil { + return nil, fmt.Errorf("Error upgrading ModuleState V1: %v", err) + } + + return &ModuleState{ + Path: path, + Outputs: outputs, + Resources: resources, + Dependencies: dependencies.([]string), + }, nil +} + +func (old *resourceStateV1) upgradeToV2() (*ResourceState, error) { + if old == nil { + return nil, nil + } + + dependencies, err := copystructure.Copy(old.Dependencies) + if err != nil { + return nil, fmt.Errorf("Error upgrading ResourceState V1: %v", err) + } + + primary, err := old.Primary.upgradeToV2() + if err != nil { + return nil, fmt.Errorf("Error upgrading ResourceState V1: %v", err) + } + + deposed := make([]*InstanceState, len(old.Deposed)) + for i, v := range old.Deposed { + upgraded, err := v.upgradeToV2() + if err != nil { + return nil, fmt.Errorf("Error upgrading ResourceState V1: %v", err) + } + deposed[i] = upgraded + } + if len(deposed) == 0 { + deposed = nil + } + + return &ResourceState{ + Type: old.Type, + Dependencies: dependencies.([]string), + Primary: primary, + Deposed: deposed, + Provider: old.Provider, + }, nil +} + +func (old *instanceStateV1) upgradeToV2() (*InstanceState, error) { + if old == nil { + return nil, nil + } + + attributes, err := copystructure.Copy(old.Attributes) + if err != nil { + return nil, fmt.Errorf("Error upgrading InstanceState V1: %v", err) + } + ephemeral, err := old.Ephemeral.upgradeToV2() + if err != nil { + return nil, fmt.Errorf("Error upgrading InstanceState V1: %v", err) + } + + meta, err := copystructure.Copy(old.Meta) + if err != nil { + return nil, fmt.Errorf("Error upgrading InstanceState V1: %v", err) + } + + newMeta := make(map[string]interface{}) + for k, v := range meta.(map[string]string) { + newMeta[k] = v + } + + return &InstanceState{ + ID: old.ID, + Attributes: attributes.(map[string]string), + Ephemeral: *ephemeral, + Meta: newMeta, + }, nil +} + +func (old *ephemeralStateV1) upgradeToV2() (*EphemeralState, error) { + connInfo, err := copystructure.Copy(old.ConnInfo) + if err != nil { + return nil, fmt.Errorf("Error upgrading EphemeralState V1: %v", err) + } + return &EphemeralState{ + ConnInfo: connInfo.(map[string]string), + }, nil +} diff --git a/vendor/github.com/hashicorp/terraform/terraform/state_upgrade_v2_to_v3.go b/vendor/github.com/hashicorp/terraform/terraform/state_upgrade_v2_to_v3.go new file mode 100644 index 0000000000..e52d35fcd1 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/terraform/state_upgrade_v2_to_v3.go @@ -0,0 +1,142 @@ +package terraform + +import ( + "fmt" + "log" + "regexp" + "sort" + "strconv" + "strings" +) + +// The upgrade process from V2 to V3 state does not affect the structure, +// so we do not need to redeclare all of the structs involved - we just +// take a deep copy of the old structure and assert the version number is +// as we expect. +func upgradeStateV2ToV3(old *State) (*State, error) { + new := old.DeepCopy() + + // Ensure the copied version is v2 before attempting to upgrade + if new.Version != 2 { + return nil, fmt.Errorf("Cannot apply v2->v3 state upgrade to " + + "a state which is not version 2.") + } + + // Set the new version number + new.Version = 3 + + // Change the counts for things which look like maps to use the % + // syntax. Remove counts for empty collections - they will be added + // back in later. + for _, module := range new.Modules { + for _, resource := range module.Resources { + // Upgrade Primary + if resource.Primary != nil { + upgradeAttributesV2ToV3(resource.Primary) + } + + // Upgrade Deposed + if resource.Deposed != nil { + for _, deposed := range resource.Deposed { + upgradeAttributesV2ToV3(deposed) + } + } + } + } + + return new, nil +} + +func upgradeAttributesV2ToV3(instanceState *InstanceState) error { + collectionKeyRegexp := regexp.MustCompile(`^(.*\.)#$`) + collectionSubkeyRegexp := regexp.MustCompile(`^([^\.]+)\..*`) + + // Identify the key prefix of anything which is a collection + var collectionKeyPrefixes []string + for key := range instanceState.Attributes { + if submatches := collectionKeyRegexp.FindAllStringSubmatch(key, -1); len(submatches) > 0 { + collectionKeyPrefixes = append(collectionKeyPrefixes, submatches[0][1]) + } + } + sort.Strings(collectionKeyPrefixes) + + log.Printf("[STATE UPGRADE] Detected the following collections in state: %v", collectionKeyPrefixes) + + // This could be rolled into fewer loops, but it is somewhat clearer this way, and will not + // run very often. + for _, prefix := range collectionKeyPrefixes { + // First get the actual keys that belong to this prefix + var potentialKeysMatching []string + for key := range instanceState.Attributes { + if strings.HasPrefix(key, prefix) { + potentialKeysMatching = append(potentialKeysMatching, strings.TrimPrefix(key, prefix)) + } + } + sort.Strings(potentialKeysMatching) + + var actualKeysMatching []string + for _, key := range potentialKeysMatching { + if submatches := collectionSubkeyRegexp.FindAllStringSubmatch(key, -1); len(submatches) > 0 { + actualKeysMatching = append(actualKeysMatching, submatches[0][1]) + } else { + if key != "#" { + actualKeysMatching = append(actualKeysMatching, key) + } + } + } + actualKeysMatching = uniqueSortedStrings(actualKeysMatching) + + // Now inspect the keys in order to determine whether this is most likely to be + // a map, list or set. There is room for error here, so we log in each case. If + // there is no method of telling, we remove the key from the InstanceState in + // order that it will be recreated. Again, this could be rolled into fewer loops + // but we prefer clarity. + + oldCountKey := fmt.Sprintf("%s#", prefix) + + // First, detect "obvious" maps - which have non-numeric keys (mostly). + hasNonNumericKeys := false + for _, key := range actualKeysMatching { + if _, err := strconv.Atoi(key); err != nil { + hasNonNumericKeys = true + } + } + if hasNonNumericKeys { + newCountKey := fmt.Sprintf("%s%%", prefix) + + instanceState.Attributes[newCountKey] = instanceState.Attributes[oldCountKey] + delete(instanceState.Attributes, oldCountKey) + log.Printf("[STATE UPGRADE] Detected %s as a map. Replaced count = %s", + strings.TrimSuffix(prefix, "."), instanceState.Attributes[newCountKey]) + } + + // Now detect empty collections and remove them from state. + if len(actualKeysMatching) == 0 { + delete(instanceState.Attributes, oldCountKey) + log.Printf("[STATE UPGRADE] Detected %s as an empty collection. Removed from state.", + strings.TrimSuffix(prefix, ".")) + } + } + + return nil +} + +// uniqueSortedStrings removes duplicates from a slice of strings and returns +// a sorted slice of the unique strings. +func uniqueSortedStrings(input []string) []string { + uniquemap := make(map[string]struct{}) + for _, str := range input { + uniquemap[str] = struct{}{} + } + + output := make([]string, len(uniquemap)) + + i := 0 + for key := range uniquemap { + output[i] = key + i = i + 1 + } + + sort.Strings(output) + return output +} diff --git a/vendor/github.com/hashicorp/terraform/terraform/state_v1.go b/vendor/github.com/hashicorp/terraform/terraform/state_v1.go new file mode 100644 index 0000000000..68cffb41b5 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/terraform/state_v1.go @@ -0,0 +1,145 @@ +package terraform + +// stateV1 keeps track of a snapshot state-of-the-world that Terraform +// can use to keep track of what real world resources it is actually +// managing. +// +// stateV1 is _only used for the purposes of backwards compatibility +// and is no longer used in Terraform. +// +// For the upgrade process, see state_upgrade_v1_to_v2.go +type stateV1 struct { + // Version is the protocol version. "1" for a StateV1. + Version int `json:"version"` + + // Serial is incremented on any operation that modifies + // the State file. It is used to detect potentially conflicting + // updates. + Serial int64 `json:"serial"` + + // Remote is used to track the metadata required to + // pull and push state files from a remote storage endpoint. + Remote *remoteStateV1 `json:"remote,omitempty"` + + // Modules contains all the modules in a breadth-first order + Modules []*moduleStateV1 `json:"modules"` +} + +type remoteStateV1 struct { + // Type controls the client we use for the remote state + Type string `json:"type"` + + // Config is used to store arbitrary configuration that + // is type specific + Config map[string]string `json:"config"` +} + +type moduleStateV1 struct { + // Path is the import path from the root module. Modules imports are + // always disjoint, so the path represents amodule tree + Path []string `json:"path"` + + // Outputs declared by the module and maintained for each module + // even though only the root module technically needs to be kept. + // This allows operators to inspect values at the boundaries. + Outputs map[string]string `json:"outputs"` + + // Resources is a mapping of the logically named resource to + // the state of the resource. Each resource may actually have + // N instances underneath, although a user only needs to think + // about the 1:1 case. + Resources map[string]*resourceStateV1 `json:"resources"` + + // Dependencies are a list of things that this module relies on + // existing to remain intact. For example: an module may depend + // on a VPC ID given by an aws_vpc resource. + // + // Terraform uses this information to build valid destruction + // orders and to warn the user if they're destroying a module that + // another resource depends on. + // + // Things can be put into this list that may not be managed by + // Terraform. If Terraform doesn't find a matching ID in the + // overall state, then it assumes it isn't managed and doesn't + // worry about it. + Dependencies []string `json:"depends_on,omitempty"` +} + +type resourceStateV1 struct { + // This is filled in and managed by Terraform, and is the resource + // type itself such as "mycloud_instance". If a resource provider sets + // this value, it won't be persisted. + Type string `json:"type"` + + // Dependencies are a list of things that this resource relies on + // existing to remain intact. For example: an AWS instance might + // depend on a subnet (which itself might depend on a VPC, and so + // on). + // + // Terraform uses this information to build valid destruction + // orders and to warn the user if they're destroying a resource that + // another resource depends on. + // + // Things can be put into this list that may not be managed by + // Terraform. If Terraform doesn't find a matching ID in the + // overall state, then it assumes it isn't managed and doesn't + // worry about it. + Dependencies []string `json:"depends_on,omitempty"` + + // Primary is the current active instance for this resource. + // It can be replaced but only after a successful creation. + // This is the instances on which providers will act. + Primary *instanceStateV1 `json:"primary"` + + // Tainted is used to track any underlying instances that + // have been created but are in a bad or unknown state and + // need to be cleaned up subsequently. In the + // standard case, there is only at most a single instance. + // However, in pathological cases, it is possible for the number + // of instances to accumulate. + Tainted []*instanceStateV1 `json:"tainted,omitempty"` + + // Deposed is used in the mechanics of CreateBeforeDestroy: the existing + // Primary is Deposed to get it out of the way for the replacement Primary to + // be created by Apply. If the replacement Primary creates successfully, the + // Deposed instance is cleaned up. If there were problems creating the + // replacement, the instance remains in the Deposed list so it can be + // destroyed in a future run. Functionally, Deposed instances are very + // similar to Tainted instances in that Terraform is only tracking them in + // order to remember to destroy them. + Deposed []*instanceStateV1 `json:"deposed,omitempty"` + + // Provider is used when a resource is connected to a provider with an alias. + // If this string is empty, the resource is connected to the default provider, + // e.g. "aws_instance" goes with the "aws" provider. + // If the resource block contained a "provider" key, that value will be set here. + Provider string `json:"provider,omitempty"` +} + +type instanceStateV1 struct { + // A unique ID for this resource. This is opaque to Terraform + // and is only meant as a lookup mechanism for the providers. + ID string `json:"id"` + + // Attributes are basic information about the resource. Any keys here + // are accessible in variable format within Terraform configurations: + // ${resourcetype.name.attribute}. + Attributes map[string]string `json:"attributes,omitempty"` + + // Ephemeral is used to store any state associated with this instance + // that is necessary for the Terraform run to complete, but is not + // persisted to a state file. + Ephemeral ephemeralStateV1 `json:"-"` + + // Meta is a simple K/V map that is persisted to the State but otherwise + // ignored by Terraform core. It's meant to be used for accounting by + // external client code. + Meta map[string]string `json:"meta,omitempty"` +} + +type ephemeralStateV1 struct { + // ConnInfo is used for the providers to export information which is + // used to connect to the resource for provisioning. For example, + // this could contain SSH or WinRM credentials. + ConnInfo map[string]string `json:"-"` +} diff --git a/vendor/github.com/hashicorp/terraform/terraform/testing.go b/vendor/github.com/hashicorp/terraform/terraform/testing.go new file mode 100644 index 0000000000..3f0418d927 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/terraform/testing.go @@ -0,0 +1,19 @@ +package terraform + +import ( + "os" + "testing" +) + +// TestStateFile writes the given state to the path. +func TestStateFile(t *testing.T, path string, state *State) { + f, err := os.Create(path) + if err != nil { + t.Fatalf("err: %s", err) + } + defer f.Close() + + if err := WriteState(state, f); err != nil { + t.Fatalf("err: %s", err) + } +} diff --git a/vendor/github.com/hashicorp/terraform/terraform/transform.go b/vendor/github.com/hashicorp/terraform/terraform/transform.go new file mode 100644 index 0000000000..f4a431a674 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/terraform/transform.go @@ -0,0 +1,52 @@ +package terraform + +import ( + "github.com/hashicorp/terraform/dag" +) + +// GraphTransformer is the interface that transformers implement. This +// interface is only for transforms that need entire graph visibility. +type GraphTransformer interface { + Transform(*Graph) error +} + +// GraphVertexTransformer is an interface that transforms a single +// Vertex within with graph. This is a specialization of GraphTransformer +// that makes it easy to do vertex replacement. +// +// The GraphTransformer that runs through the GraphVertexTransformers is +// VertexTransformer. +type GraphVertexTransformer interface { + Transform(dag.Vertex) (dag.Vertex, error) +} + +// GraphTransformIf is a helper function that conditionally returns a +// GraphTransformer given. This is useful for calling inline a sequence +// of transforms without having to split it up into multiple append() calls. +func GraphTransformIf(f func() bool, then GraphTransformer) GraphTransformer { + if f() { + return then + } + + return nil +} + +type graphTransformerMulti struct { + Transforms []GraphTransformer +} + +func (t *graphTransformerMulti) Transform(g *Graph) error { + for _, t := range t.Transforms { + if err := t.Transform(g); err != nil { + return err + } + } + + return nil +} + +// GraphTransformMulti combines multiple graph transformers into a single +// GraphTransformer that runs all the individual graph transformers. +func GraphTransformMulti(ts ...GraphTransformer) GraphTransformer { + return &graphTransformerMulti{Transforms: ts} +} diff --git a/vendor/github.com/hashicorp/terraform/terraform/transform_attach_config_provider.go b/vendor/github.com/hashicorp/terraform/terraform/transform_attach_config_provider.go new file mode 100644 index 0000000000..10506ea060 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/terraform/transform_attach_config_provider.go @@ -0,0 +1,80 @@ +package terraform + +import ( + "log" + + "github.com/hashicorp/terraform/config" + "github.com/hashicorp/terraform/config/module" +) + +// GraphNodeAttachProvider is an interface that must be implemented by nodes +// that want provider configurations attached. +type GraphNodeAttachProvider interface { + // Must be implemented to determine the path for the configuration + GraphNodeSubPath + + // ProviderName with no module prefix. Example: "aws". + ProviderName() string + + // Sets the configuration + AttachProvider(*config.ProviderConfig) +} + +// AttachProviderConfigTransformer goes through the graph and attaches +// provider configuration structures to nodes that implement the interfaces +// above. +// +// The attached configuration structures are directly from the configuration. +// If they're going to be modified, a copy should be made. +type AttachProviderConfigTransformer struct { + Module *module.Tree // Module is the root module for the config +} + +func (t *AttachProviderConfigTransformer) Transform(g *Graph) error { + if err := t.attachProviders(g); err != nil { + return err + } + + return nil +} + +func (t *AttachProviderConfigTransformer) attachProviders(g *Graph) error { + // Go through and find GraphNodeAttachProvider + for _, v := range g.Vertices() { + // Only care about GraphNodeAttachProvider implementations + apn, ok := v.(GraphNodeAttachProvider) + if !ok { + continue + } + + // Determine what we're looking for + path := normalizeModulePath(apn.Path()) + path = path[1:] + name := apn.ProviderName() + log.Printf("[TRACE] Attach provider request: %#v %s", path, name) + + // Get the configuration. + tree := t.Module.Child(path) + if tree == nil { + continue + } + + // Go through the provider configs to find the matching config + for _, p := range tree.Config().ProviderConfigs { + // Build the name, which is "name.alias" if an alias exists + current := p.Name + if p.Alias != "" { + current += "." + p.Alias + } + + // If the configs match then attach! + if current == name { + log.Printf("[TRACE] Attaching provider config: %#v", p) + apn.AttachProvider(p) + break + } + } + } + + return nil +} diff --git a/vendor/github.com/hashicorp/terraform/terraform/transform_attach_config_resource.go b/vendor/github.com/hashicorp/terraform/terraform/transform_attach_config_resource.go new file mode 100644 index 0000000000..f2ee37e56b --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/terraform/transform_attach_config_resource.go @@ -0,0 +1,78 @@ +package terraform + +import ( + "fmt" + "log" + + "github.com/hashicorp/terraform/config" + "github.com/hashicorp/terraform/config/module" +) + +// GraphNodeAttachResourceConfig is an interface that must be implemented by nodes +// that want resource configurations attached. +type GraphNodeAttachResourceConfig interface { + // ResourceAddr is the address to the resource + ResourceAddr() *ResourceAddress + + // Sets the configuration + AttachResourceConfig(*config.Resource) +} + +// AttachResourceConfigTransformer goes through the graph and attaches +// resource configuration structures to nodes that implement the interfaces +// above. +// +// The attached configuration structures are directly from the configuration. +// If they're going to be modified, a copy should be made. +type AttachResourceConfigTransformer struct { + Module *module.Tree // Module is the root module for the config +} + +func (t *AttachResourceConfigTransformer) Transform(g *Graph) error { + log.Printf("[TRACE] AttachResourceConfigTransformer: Beginning...") + + // Go through and find GraphNodeAttachResource + for _, v := range g.Vertices() { + // Only care about GraphNodeAttachResource implementations + arn, ok := v.(GraphNodeAttachResourceConfig) + if !ok { + continue + } + + // Determine what we're looking for + addr := arn.ResourceAddr() + log.Printf( + "[TRACE] AttachResourceConfigTransformer: Attach resource "+ + "config request: %s", addr) + + // Get the configuration. + path := normalizeModulePath(addr.Path) + path = path[1:] + tree := t.Module.Child(path) + if tree == nil { + continue + } + + // Go through the resource configs to find the matching config + for _, r := range tree.Config().Resources { + // Get a resource address so we can compare + a, err := parseResourceAddressConfig(r) + if err != nil { + panic(fmt.Sprintf( + "Error parsing config address, this is a bug: %#v", r)) + } + a.Path = addr.Path + + // If this is not the same resource, then continue + if !a.Equals(addr) { + continue + } + + log.Printf("[TRACE] Attaching resource config: %#v", r) + arn.AttachResourceConfig(r) + break + } + } + + return nil +} diff --git a/vendor/github.com/hashicorp/terraform/terraform/transform_attach_state.go b/vendor/github.com/hashicorp/terraform/terraform/transform_attach_state.go new file mode 100644 index 0000000000..564ff08f1f --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/terraform/transform_attach_state.go @@ -0,0 +1,68 @@ +package terraform + +import ( + "log" + + "github.com/hashicorp/terraform/dag" +) + +// GraphNodeAttachResourceState is an interface that can be implemented +// to request that a ResourceState is attached to the node. +type GraphNodeAttachResourceState interface { + // The address to the resource for the state + ResourceAddr() *ResourceAddress + + // Sets the state + AttachResourceState(*ResourceState) +} + +// AttachStateTransformer goes through the graph and attaches +// state to nodes that implement the interfaces above. +type AttachStateTransformer struct { + State *State // State is the root state +} + +func (t *AttachStateTransformer) Transform(g *Graph) error { + // If no state, then nothing to do + if t.State == nil { + log.Printf("[DEBUG] Not attaching any state: state is nil") + return nil + } + + filter := &StateFilter{State: t.State} + for _, v := range g.Vertices() { + // Only care about nodes requesting we're adding state + an, ok := v.(GraphNodeAttachResourceState) + if !ok { + continue + } + addr := an.ResourceAddr() + + // Get the module state + results, err := filter.Filter(addr.String()) + if err != nil { + return err + } + + // Attach the first resource state we get + found := false + for _, result := range results { + if rs, ok := result.Value.(*ResourceState); ok { + log.Printf( + "[DEBUG] Attaching resource state to %q: %#v", + dag.VertexName(v), rs) + an.AttachResourceState(rs) + found = true + break + } + } + + if !found { + log.Printf( + "[DEBUG] Resource state not found for %q: %s", + dag.VertexName(v), addr) + } + } + + return nil +} diff --git a/vendor/github.com/hashicorp/terraform/terraform/transform_config.go b/vendor/github.com/hashicorp/terraform/terraform/transform_config.go new file mode 100644 index 0000000000..61bce8532a --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/terraform/transform_config.go @@ -0,0 +1,135 @@ +package terraform + +import ( + "errors" + "fmt" + "log" + "sync" + + "github.com/hashicorp/terraform/config" + "github.com/hashicorp/terraform/config/module" + "github.com/hashicorp/terraform/dag" +) + +// ConfigTransformer is a GraphTransformer that adds all the resources +// from the configuration to the graph. +// +// The module used to configure this transformer must be the root module. +// +// Only resources are added to the graph. Variables, outputs, and +// providers must be added via other transforms. +// +// Unlike ConfigTransformerOld, this transformer creates a graph with +// all resources including module resources, rather than creating module +// nodes that are then "flattened". +type ConfigTransformer struct { + Concrete ConcreteResourceNodeFunc + + // Module is the module to add resources from. + Module *module.Tree + + // Unique will only add resources that aren't already present in the graph. + Unique bool + + // Mode will only add resources that match the given mode + ModeFilter bool + Mode config.ResourceMode + + l sync.Mutex + uniqueMap map[string]struct{} +} + +func (t *ConfigTransformer) Transform(g *Graph) error { + // Lock since we use some internal state + t.l.Lock() + defer t.l.Unlock() + + // If no module is given, we don't do anything + if t.Module == nil { + return nil + } + + // If the module isn't loaded, that is simply an error + if !t.Module.Loaded() { + return errors.New("module must be loaded for ConfigTransformer") + } + + // Reset the uniqueness map. If we're tracking uniques, then populate + // it with addresses. + t.uniqueMap = make(map[string]struct{}) + defer func() { t.uniqueMap = nil }() + if t.Unique { + for _, v := range g.Vertices() { + if rn, ok := v.(GraphNodeResource); ok { + t.uniqueMap[rn.ResourceAddr().String()] = struct{}{} + } + } + } + + // Start the transformation process + return t.transform(g, t.Module) +} + +func (t *ConfigTransformer) transform(g *Graph, m *module.Tree) error { + // If no config, do nothing + if m == nil { + return nil + } + + // Add our resources + if err := t.transformSingle(g, m); err != nil { + return err + } + + // Transform all the children. + for _, c := range m.Children() { + if err := t.transform(g, c); err != nil { + return err + } + } + + return nil +} + +func (t *ConfigTransformer) transformSingle(g *Graph, m *module.Tree) error { + log.Printf("[TRACE] ConfigTransformer: Starting for path: %v", m.Path()) + + // Get the configuration for this module + conf := m.Config() + + // Build the path we're at + path := m.Path() + + // Write all the resources out + for _, r := range conf.Resources { + // Build the resource address + addr, err := parseResourceAddressConfig(r) + if err != nil { + panic(fmt.Sprintf( + "Error parsing config address, this is a bug: %#v", r)) + } + addr.Path = path + + // If this is already in our uniqueness map, don't add it again + if _, ok := t.uniqueMap[addr.String()]; ok { + continue + } + + // Remove non-matching modes + if t.ModeFilter && addr.Mode != t.Mode { + continue + } + + // Build the abstract node and the concrete one + abstract := &NodeAbstractResource{Addr: addr} + var node dag.Vertex = abstract + if f := t.Concrete; f != nil { + node = f(abstract) + } + + // Add it to the graph + g.Add(node) + } + + return nil +} diff --git a/vendor/github.com/hashicorp/terraform/terraform/transform_config_flat.go b/vendor/github.com/hashicorp/terraform/terraform/transform_config_flat.go new file mode 100644 index 0000000000..92f9888d6d --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/terraform/transform_config_flat.go @@ -0,0 +1,80 @@ +package terraform + +import ( + "errors" + + "github.com/hashicorp/terraform/config/module" + "github.com/hashicorp/terraform/dag" +) + +// FlatConfigTransformer is a GraphTransformer that adds the configuration +// to the graph. The module used to configure this transformer must be +// the root module. +// +// This transform adds the nodes but doesn't connect any of the references. +// The ReferenceTransformer should be used for that. +// +// NOTE: In relation to ConfigTransformer: this is a newer generation config +// transformer. It puts the _entire_ config into the graph (there is no +// "flattening" step as before). +type FlatConfigTransformer struct { + Concrete ConcreteResourceNodeFunc // What to turn resources into + + Module *module.Tree +} + +func (t *FlatConfigTransformer) Transform(g *Graph) error { + // If no module, we do nothing + if t.Module == nil { + return nil + } + + // If the module is not loaded, that is an error + if !t.Module.Loaded() { + return errors.New("module must be loaded") + } + + return t.transform(g, t.Module) +} + +func (t *FlatConfigTransformer) transform(g *Graph, m *module.Tree) error { + // If no module, no problem + if m == nil { + return nil + } + + // Transform all the children. + for _, c := range m.Children() { + if err := t.transform(g, c); err != nil { + return err + } + } + + // Get the configuration for this module + config := m.Config() + + // Write all the resources out + for _, r := range config.Resources { + // Grab the address for this resource + addr, err := parseResourceAddressConfig(r) + if err != nil { + return err + } + addr.Path = m.Path() + + // Build the abstract resource. We have the config already so + // we'll just pre-populate that. + abstract := &NodeAbstractResource{ + Addr: addr, + Config: r, + } + var node dag.Vertex = abstract + if f := t.Concrete; f != nil { + node = f(abstract) + } + + g.Add(node) + } + + return nil +} diff --git a/vendor/github.com/hashicorp/terraform/terraform/transform_config_old.go b/vendor/github.com/hashicorp/terraform/terraform/transform_config_old.go new file mode 100644 index 0000000000..ec4125822e --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/terraform/transform_config_old.go @@ -0,0 +1,23 @@ +package terraform + +import ( + "fmt" + + "github.com/hashicorp/terraform/config" +) + +// varNameForVar returns the VarName value for an interpolated variable. +// This value is compared to the VarName() value for the nodes within the +// graph to build the graph edges. +func varNameForVar(raw config.InterpolatedVariable) string { + switch v := raw.(type) { + case *config.ModuleVariable: + return fmt.Sprintf("module.%s.output.%s", v.Name, v.Field) + case *config.ResourceVariable: + return v.ResourceId() + case *config.UserVariable: + return fmt.Sprintf("var.%s", v.Name) + default: + return "" + } +} diff --git a/vendor/github.com/hashicorp/terraform/terraform/transform_count_boundary.go b/vendor/github.com/hashicorp/terraform/terraform/transform_count_boundary.go new file mode 100644 index 0000000000..83415f3525 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/terraform/transform_count_boundary.go @@ -0,0 +1,28 @@ +package terraform + +import ( + "github.com/hashicorp/terraform/dag" +) + +// CountBoundaryTransformer adds a node that depends on everything else +// so that it runs last in order to clean up the state for nodes that +// are on the "count boundary": "foo.0" when only one exists becomes "foo" +type CountBoundaryTransformer struct{} + +func (t *CountBoundaryTransformer) Transform(g *Graph) error { + node := &NodeCountBoundary{} + g.Add(node) + + // Depends on everything + for _, v := range g.Vertices() { + // Don't connect to ourselves + if v == node { + continue + } + + // Connect! + g.Connect(dag.BasicEdge(node, v)) + } + + return nil +} diff --git a/vendor/github.com/hashicorp/terraform/terraform/transform_deposed.go b/vendor/github.com/hashicorp/terraform/terraform/transform_deposed.go new file mode 100644 index 0000000000..2148cef479 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/terraform/transform_deposed.go @@ -0,0 +1,168 @@ +package terraform + +import "fmt" + +// DeposedTransformer is a GraphTransformer that adds deposed resources +// to the graph. +type DeposedTransformer struct { + // State is the global state. We'll automatically find the correct + // ModuleState based on the Graph.Path that is being transformed. + State *State + + // View, if non-empty, is the ModuleState.View used around the state + // to find deposed resources. + View string +} + +func (t *DeposedTransformer) Transform(g *Graph) error { + state := t.State.ModuleByPath(g.Path) + if state == nil { + // If there is no state for our module there can't be any deposed + // resources, since they live in the state. + return nil + } + + // If we have a view, apply it now + if t.View != "" { + state = state.View(t.View) + } + + // Go through all the resources in our state to look for deposed resources + for k, rs := range state.Resources { + // If we have no deposed resources, then move on + if len(rs.Deposed) == 0 { + continue + } + deposed := rs.Deposed + + for i, _ := range deposed { + g.Add(&graphNodeDeposedResource{ + Index: i, + ResourceName: k, + ResourceType: rs.Type, + Provider: rs.Provider, + }) + } + } + + return nil +} + +// graphNodeDeposedResource is the graph vertex representing a deposed resource. +type graphNodeDeposedResource struct { + Index int + ResourceName string + ResourceType string + Provider string +} + +func (n *graphNodeDeposedResource) Name() string { + return fmt.Sprintf("%s (deposed #%d)", n.ResourceName, n.Index) +} + +func (n *graphNodeDeposedResource) ProvidedBy() []string { + return []string{resourceProvider(n.ResourceName, n.Provider)} +} + +// GraphNodeEvalable impl. +func (n *graphNodeDeposedResource) EvalTree() EvalNode { + var provider ResourceProvider + var state *InstanceState + + seq := &EvalSequence{Nodes: make([]EvalNode, 0, 5)} + + // Build instance info + info := &InstanceInfo{Id: n.Name(), Type: n.ResourceType} + seq.Nodes = append(seq.Nodes, &EvalInstanceInfo{Info: info}) + + // Refresh the resource + seq.Nodes = append(seq.Nodes, &EvalOpFilter{ + Ops: []walkOperation{walkRefresh}, + Node: &EvalSequence{ + Nodes: []EvalNode{ + &EvalGetProvider{ + Name: n.ProvidedBy()[0], + Output: &provider, + }, + &EvalReadStateDeposed{ + Name: n.ResourceName, + Output: &state, + Index: n.Index, + }, + &EvalRefresh{ + Info: info, + Provider: &provider, + State: &state, + Output: &state, + }, + &EvalWriteStateDeposed{ + Name: n.ResourceName, + ResourceType: n.ResourceType, + Provider: n.Provider, + State: &state, + Index: n.Index, + }, + }, + }, + }) + + // Apply + var diff *InstanceDiff + var err error + seq.Nodes = append(seq.Nodes, &EvalOpFilter{ + Ops: []walkOperation{walkApply, walkDestroy}, + Node: &EvalSequence{ + Nodes: []EvalNode{ + &EvalGetProvider{ + Name: n.ProvidedBy()[0], + Output: &provider, + }, + &EvalReadStateDeposed{ + Name: n.ResourceName, + Output: &state, + Index: n.Index, + }, + &EvalDiffDestroy{ + Info: info, + State: &state, + Output: &diff, + }, + // Call pre-apply hook + &EvalApplyPre{ + Info: info, + State: &state, + Diff: &diff, + }, + &EvalApply{ + Info: info, + State: &state, + Diff: &diff, + Provider: &provider, + Output: &state, + Error: &err, + }, + // Always write the resource back to the state deposed... if it + // was successfully destroyed it will be pruned. If it was not, it will + // be caught on the next run. + &EvalWriteStateDeposed{ + Name: n.ResourceName, + ResourceType: n.ResourceType, + Provider: n.Provider, + State: &state, + Index: n.Index, + }, + &EvalApplyPost{ + Info: info, + State: &state, + Error: &err, + }, + &EvalReturnError{ + Error: &err, + }, + &EvalUpdateStateHook{}, + }, + }, + }) + + return seq +} diff --git a/vendor/github.com/hashicorp/terraform/terraform/transform_destroy_cbd.go b/vendor/github.com/hashicorp/terraform/terraform/transform_destroy_cbd.go new file mode 100644 index 0000000000..edfb460bfd --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/terraform/transform_destroy_cbd.go @@ -0,0 +1,257 @@ +package terraform + +import ( + "fmt" + "log" + + "github.com/hashicorp/terraform/config/module" + "github.com/hashicorp/terraform/dag" +) + +// GraphNodeDestroyerCBD must be implemented by nodes that might be +// create-before-destroy destroyers. +type GraphNodeDestroyerCBD interface { + GraphNodeDestroyer + + // CreateBeforeDestroy returns true if this node represents a node + // that is doing a CBD. + CreateBeforeDestroy() bool + + // ModifyCreateBeforeDestroy is called when the CBD state of a node + // is changed dynamically. This can return an error if this isn't + // allowed. + ModifyCreateBeforeDestroy(bool) error +} + +// CBDEdgeTransformer modifies the edges of CBD nodes that went through +// the DestroyEdgeTransformer to have the right dependencies. There are +// two real tasks here: +// +// 1. With CBD, the destroy edge is inverted: the destroy depends on +// the creation. +// +// 2. A_d must depend on resources that depend on A. This is to enable +// the destroy to only happen once nodes that depend on A successfully +// update to A. Example: adding a web server updates the load balancer +// before deleting the old web server. +// +type CBDEdgeTransformer struct { + // Module and State are only needed to look up dependencies in + // any way possible. Either can be nil if not availabile. + Module *module.Tree + State *State +} + +func (t *CBDEdgeTransformer) Transform(g *Graph) error { + log.Printf("[TRACE] CBDEdgeTransformer: Beginning CBD transformation...") + + // Go through and reverse any destroy edges + destroyMap := make(map[string][]dag.Vertex) + for _, v := range g.Vertices() { + dn, ok := v.(GraphNodeDestroyerCBD) + if !ok { + continue + } + + if !dn.CreateBeforeDestroy() { + // If there are no CBD ancestors (dependent nodes), then we + // do nothing here. + if !t.hasCBDAncestor(g, v) { + continue + } + + // If this isn't naturally a CBD node, this means that an ancestor is + // and we need to auto-upgrade this node to CBD. We do this because + // a CBD node depending on non-CBD will result in cycles. To avoid this, + // we always attempt to upgrade it. + if err := dn.ModifyCreateBeforeDestroy(true); err != nil { + return fmt.Errorf( + "%s: must have create before destroy enabled because "+ + "a dependent resource has CBD enabled. However, when "+ + "attempting to automatically do this, an error occurred: %s", + dag.VertexName(v), err) + } + } + + // Find the destroy edge. There should only be one. + for _, e := range g.EdgesTo(v) { + // Not a destroy edge, ignore it + de, ok := e.(*DestroyEdge) + if !ok { + continue + } + + log.Printf("[TRACE] CBDEdgeTransformer: inverting edge: %s => %s", + dag.VertexName(de.Source()), dag.VertexName(de.Target())) + + // Found it! Invert. + g.RemoveEdge(de) + g.Connect(&DestroyEdge{S: de.Target(), T: de.Source()}) + } + + // If the address has an index, we strip that. Our depMap creation + // graph doesn't expand counts so we don't currently get _exact_ + // dependencies. One day when we limit dependencies more exactly + // this will have to change. We have a test case covering this + // (depNonCBDCountBoth) so it'll be caught. + addr := dn.DestroyAddr() + if addr.Index >= 0 { + addr = addr.Copy() // Copy so that we don't modify any pointers + addr.Index = -1 + } + + // Add this to the list of nodes that we need to fix up + // the edges for (step 2 above in the docs). + key := addr.String() + destroyMap[key] = append(destroyMap[key], v) + } + + // If we have no CBD nodes, then our work here is done + if len(destroyMap) == 0 { + return nil + } + + // We have CBD nodes. We now have to move on to the much more difficult + // task of connecting dependencies of the creation side of the destroy + // to the destruction node. The easiest way to explain this is an example: + // + // Given a pre-destroy dependence of: A => B + // And A has CBD set. + // + // The resulting graph should be: A => B => A_d + // + // They key here is that B happens before A is destroyed. This is to + // facilitate the primary purpose for CBD: making sure that downstreams + // are properly updated to avoid downtime before the resource is destroyed. + // + // We can't trust that the resource being destroyed or anything that + // depends on it is actually in our current graph so we make a new + // graph in order to determine those dependencies and add them in. + log.Printf("[TRACE] CBDEdgeTransformer: building graph to find dependencies...") + depMap, err := t.depMap(destroyMap) + if err != nil { + return err + } + + // We now have the mapping of resource addresses to the destroy + // nodes they need to depend on. We now go through our own vertices to + // find any matching these addresses and make the connection. + for _, v := range g.Vertices() { + // We're looking for creators + rn, ok := v.(GraphNodeCreator) + if !ok { + continue + } + + // Get the address + addr := rn.CreateAddr() + + // If the address has an index, we strip that. Our depMap creation + // graph doesn't expand counts so we don't currently get _exact_ + // dependencies. One day when we limit dependencies more exactly + // this will have to change. We have a test case covering this + // (depNonCBDCount) so it'll be caught. + if addr.Index >= 0 { + addr = addr.Copy() // Copy so that we don't modify any pointers + addr.Index = -1 + } + + // If there is nothing this resource should depend on, ignore it + key := addr.String() + dns, ok := depMap[key] + if !ok { + continue + } + + // We have nodes! Make the connection + for _, dn := range dns { + log.Printf("[TRACE] CBDEdgeTransformer: destroy depends on dependence: %s => %s", + dag.VertexName(dn), dag.VertexName(v)) + g.Connect(dag.BasicEdge(dn, v)) + } + } + + return nil +} + +func (t *CBDEdgeTransformer) depMap( + destroyMap map[string][]dag.Vertex) (map[string][]dag.Vertex, error) { + // Build the graph of our config, this ensures that all resources + // are present in the graph. + g, err := (&BasicGraphBuilder{ + Steps: []GraphTransformer{ + &FlatConfigTransformer{Module: t.Module}, + &AttachResourceConfigTransformer{Module: t.Module}, + &AttachStateTransformer{State: t.State}, + &ReferenceTransformer{}, + }, + Name: "CBDEdgeTransformer", + }).Build(nil) + if err != nil { + return nil, err + } + + // Using this graph, build the list of destroy nodes that each resource + // address should depend on. For example, when we find B, we map the + // address of B to A_d in the "depMap" variable below. + depMap := make(map[string][]dag.Vertex) + for _, v := range g.Vertices() { + // We're looking for resources. + rn, ok := v.(GraphNodeResource) + if !ok { + continue + } + + // Get the address + addr := rn.ResourceAddr() + key := addr.String() + + // Get the destroy nodes that are destroying this resource. + // If there aren't any, then we don't need to worry about + // any connections. + dns, ok := destroyMap[key] + if !ok { + continue + } + + // Get the nodes that depend on this on. In the example above: + // finding B in A => B. + for _, v := range g.UpEdges(v).List() { + // We're looking for resources. + rn, ok := v.(GraphNodeResource) + if !ok { + continue + } + + // Keep track of the destroy nodes that this address + // needs to depend on. + key := rn.ResourceAddr().String() + depMap[key] = append(depMap[key], dns...) + } + } + + return depMap, nil +} + +// hasCBDAncestor returns true if any ancestor (node that depends on this) +// has CBD set. +func (t *CBDEdgeTransformer) hasCBDAncestor(g *Graph, v dag.Vertex) bool { + s, _ := g.Ancestors(v) + if s == nil { + return true + } + + for _, v := range s.List() { + dn, ok := v.(GraphNodeDestroyerCBD) + if !ok { + continue + } + + if dn.CreateBeforeDestroy() { + // some ancestor is CreateBeforeDestroy, so we need to follow suit + return true + } + } + + return false +} diff --git a/vendor/github.com/hashicorp/terraform/terraform/transform_destroy_edge.go b/vendor/github.com/hashicorp/terraform/terraform/transform_destroy_edge.go new file mode 100644 index 0000000000..22be1ab62a --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/terraform/transform_destroy_edge.go @@ -0,0 +1,269 @@ +package terraform + +import ( + "log" + + "github.com/hashicorp/terraform/config/module" + "github.com/hashicorp/terraform/dag" +) + +// GraphNodeDestroyer must be implemented by nodes that destroy resources. +type GraphNodeDestroyer interface { + dag.Vertex + + // ResourceAddr is the address of the resource that is being + // destroyed by this node. If this returns nil, then this node + // is not destroying anything. + DestroyAddr() *ResourceAddress +} + +// GraphNodeCreator must be implemented by nodes that create OR update resources. +type GraphNodeCreator interface { + // ResourceAddr is the address of the resource being created or updated + CreateAddr() *ResourceAddress +} + +// DestroyEdgeTransformer is a GraphTransformer that creates the proper +// references for destroy resources. Destroy resources are more complex +// in that they must be depend on the destruction of resources that +// in turn depend on the CREATION of the node being destroy. +// +// That is complicated. Visually: +// +// B_d -> A_d -> A -> B +// +// Notice that A destroy depends on B destroy, while B create depends on +// A create. They're inverted. This must be done for example because often +// dependent resources will block parent resources from deleting. Concrete +// example: VPC with subnets, the VPC can't be deleted while there are +// still subnets. +type DestroyEdgeTransformer struct { + // These are needed to properly build the graph of dependencies + // to determine what a destroy node depends on. Any of these can be nil. + Module *module.Tree + State *State +} + +func (t *DestroyEdgeTransformer) Transform(g *Graph) error { + log.Printf("[TRACE] DestroyEdgeTransformer: Beginning destroy edge transformation...") + + // Build a map of what is being destroyed (by address string) to + // the list of destroyers. In general there will only be one destroyer + // but to make it more robust we support multiple. + destroyers := make(map[string][]GraphNodeDestroyer) + for _, v := range g.Vertices() { + dn, ok := v.(GraphNodeDestroyer) + if !ok { + continue + } + + addr := dn.DestroyAddr() + if addr == nil { + continue + } + + key := addr.String() + log.Printf( + "[TRACE] DestroyEdgeTransformer: %s destroying %q", + dag.VertexName(dn), key) + destroyers[key] = append(destroyers[key], dn) + } + + // If we aren't destroying anything, there will be no edges to make + // so just exit early and avoid future work. + if len(destroyers) == 0 { + return nil + } + + // Go through and connect creators to destroyers. Going along with + // our example, this makes: A_d => A + for _, v := range g.Vertices() { + cn, ok := v.(GraphNodeCreator) + if !ok { + continue + } + + addr := cn.CreateAddr() + if addr == nil { + continue + } + + key := addr.String() + ds := destroyers[key] + if len(ds) == 0 { + continue + } + + for _, d := range ds { + // For illustrating our example + a_d := d.(dag.Vertex) + a := v + + log.Printf( + "[TRACE] DestroyEdgeTransformer: connecting creator/destroyer: %s, %s", + dag.VertexName(a), dag.VertexName(a_d)) + + g.Connect(&DestroyEdge{S: a, T: a_d}) + } + } + + // This is strange but is the easiest way to get the dependencies + // of a node that is being destroyed. We use another graph to make sure + // the resource is in the graph and ask for references. We have to do this + // because the node that is being destroyed may NOT be in the graph. + // + // Example: resource A is force new, then destroy A AND create A are + // in the graph. BUT if resource A is just pure destroy, then only + // destroy A is in the graph, and create A is not. + providerFn := func(a *NodeAbstractProvider) dag.Vertex { + return &NodeApplyableProvider{NodeAbstractProvider: a} + } + steps := []GraphTransformer{ + // Add outputs and metadata + &OutputTransformer{Module: t.Module}, + &AttachResourceConfigTransformer{Module: t.Module}, + &AttachStateTransformer{State: t.State}, + + // Add providers since they can affect destroy order as well + &MissingProviderTransformer{AllowAny: true, Concrete: providerFn}, + &ProviderTransformer{}, + &DisableProviderTransformer{}, + &ParentProviderTransformer{}, + &AttachProviderConfigTransformer{Module: t.Module}, + + // Add all the variables. We can depend on resources through + // variables due to module parameters, and we need to properly + // determine that. + &RootVariableTransformer{Module: t.Module}, + &ModuleVariableTransformer{Module: t.Module}, + + &ReferenceTransformer{}, + } + + // Go through all the nodes being destroyed and create a graph. + // The resulting graph is only of things being CREATED. For example, + // following our example, the resulting graph would be: + // + // A, B (with no edges) + // + var tempG Graph + var tempDestroyed []dag.Vertex + for d, _ := range destroyers { + // d is what is being destroyed. We parse the resource address + // which it came from it is a panic if this fails. + addr, err := ParseResourceAddress(d) + if err != nil { + panic(err) + } + + // This part is a little bit weird but is the best way to + // find the dependencies we need to: build a graph and use the + // attach config and state transformers then ask for references. + abstract := &NodeAbstractResource{Addr: addr} + tempG.Add(abstract) + tempDestroyed = append(tempDestroyed, abstract) + + // We also add the destroy version here since the destroy can + // depend on things that the creation doesn't (destroy provisioners). + destroy := &NodeDestroyResource{NodeAbstractResource: abstract} + tempG.Add(destroy) + tempDestroyed = append(tempDestroyed, destroy) + } + + // Run the graph transforms so we have the information we need to + // build references. + for _, s := range steps { + if err := s.Transform(&tempG); err != nil { + return err + } + } + + log.Printf("[TRACE] DestroyEdgeTransformer: reference graph: %s", tempG.String()) + + // Go through all the nodes in the graph and determine what they + // depend on. + for _, v := range tempDestroyed { + // Find all ancestors of this to determine the edges we'll depend on + vs, err := tempG.Ancestors(v) + if err != nil { + return err + } + + refs := make([]dag.Vertex, 0, vs.Len()) + for _, raw := range vs.List() { + refs = append(refs, raw.(dag.Vertex)) + } + + refNames := make([]string, len(refs)) + for i, ref := range refs { + refNames[i] = dag.VertexName(ref) + } + log.Printf( + "[TRACE] DestroyEdgeTransformer: creation node %q references %s", + dag.VertexName(v), refNames) + + // If we have no references, then we won't need to do anything + if len(refs) == 0 { + continue + } + + // Get the destroy node for this. In the example of our struct, + // we are currently at B and we're looking for B_d. + rn, ok := v.(GraphNodeResource) + if !ok { + continue + } + + addr := rn.ResourceAddr() + if addr == nil { + continue + } + + dns := destroyers[addr.String()] + + // We have dependencies, check if any are being destroyed + // to build the list of things that we must depend on! + // + // In the example of the struct, if we have: + // + // B_d => A_d => A => B + // + // Then at this point in the algorithm we started with B_d, + // we built B (to get dependencies), and we found A. We're now looking + // to see if A_d exists. + var depDestroyers []dag.Vertex + for _, v := range refs { + rn, ok := v.(GraphNodeResource) + if !ok { + continue + } + + addr := rn.ResourceAddr() + if addr == nil { + continue + } + + key := addr.String() + if ds, ok := destroyers[key]; ok { + for _, d := range ds { + depDestroyers = append(depDestroyers, d.(dag.Vertex)) + log.Printf( + "[TRACE] DestroyEdgeTransformer: destruction of %q depends on %s", + key, dag.VertexName(d)) + } + } + } + + // Go through and make the connections. Use the variable + // names "a_d" and "b_d" to reference our example. + for _, a_d := range dns { + for _, b_d := range depDestroyers { + if b_d != a_d { + g.Connect(dag.BasicEdge(b_d, a_d)) + } + } + } + } + + return nil +} diff --git a/vendor/github.com/hashicorp/terraform/terraform/transform_diff.go b/vendor/github.com/hashicorp/terraform/terraform/transform_diff.go new file mode 100644 index 0000000000..ad46d3c612 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/terraform/transform_diff.go @@ -0,0 +1,86 @@ +package terraform + +import ( + "fmt" + "log" + + "github.com/hashicorp/terraform/config/module" + "github.com/hashicorp/terraform/dag" +) + +// DiffTransformer is a GraphTransformer that adds the elements of +// the diff to the graph. +// +// This transform is used for example by the ApplyGraphBuilder to ensure +// that only resources that are being modified are represented in the graph. +// +// Module and State is still required for the DiffTransformer for annotations +// since the Diff doesn't contain all the information required to build the +// complete graph (such as create-before-destroy information). The graph +// is built based on the diff first, though, ensuring that only resources +// that are being modified are present in the graph. +type DiffTransformer struct { + Concrete ConcreteResourceNodeFunc + + Diff *Diff + Module *module.Tree + State *State +} + +func (t *DiffTransformer) Transform(g *Graph) error { + // If the diff is nil or empty (nil is empty) then do nothing + if t.Diff.Empty() { + return nil + } + + // Go through all the modules in the diff. + log.Printf("[TRACE] DiffTransformer: starting") + var nodes []dag.Vertex + for _, m := range t.Diff.Modules { + log.Printf("[TRACE] DiffTransformer: Module: %s", m) + // TODO: If this is a destroy diff then add a module destroy node + + // Go through all the resources in this module. + for name, inst := range m.Resources { + log.Printf("[TRACE] DiffTransformer: Resource %q: %#v", name, inst) + + // We have changes! This is a create or update operation. + // First grab the address so we have a unique way to + // reference this resource. + addr, err := parseResourceAddressInternal(name) + if err != nil { + panic(fmt.Sprintf( + "Error parsing internal name, this is a bug: %q", name)) + } + + // Very important: add the module path for this resource to + // the address. Remove "root" from it. + addr.Path = m.Path[1:] + + // If we're destroying, add the destroy node + if inst.Destroy || inst.GetDestroyDeposed() { + abstract := &NodeAbstractResource{Addr: addr} + g.Add(&NodeDestroyResource{NodeAbstractResource: abstract}) + } + + // If we have changes, then add the applyable version + if len(inst.Attributes) > 0 { + // Add the resource to the graph + abstract := &NodeAbstractResource{Addr: addr} + var node dag.Vertex = abstract + if f := t.Concrete; f != nil { + node = f(abstract) + } + + nodes = append(nodes, node) + } + } + } + + // Add all the nodes to the graph + for _, n := range nodes { + g.Add(n) + } + + return nil +} diff --git a/vendor/github.com/hashicorp/terraform/terraform/transform_expand.go b/vendor/github.com/hashicorp/terraform/terraform/transform_expand.go new file mode 100644 index 0000000000..982c098b81 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/terraform/transform_expand.go @@ -0,0 +1,48 @@ +package terraform + +import ( + "log" + + "github.com/hashicorp/terraform/dag" +) + +// GraphNodeExapndable is an interface that nodes can implement to +// signal that they can be expanded. Expanded nodes turn into +// GraphNodeSubgraph nodes within the graph. +type GraphNodeExpandable interface { + Expand(GraphBuilder) (GraphNodeSubgraph, error) +} + +// GraphNodeDynamicExpandable is an interface that nodes can implement +// to signal that they can be expanded at eval-time (hence dynamic). +// These nodes are given the eval context and are expected to return +// a new subgraph. +type GraphNodeDynamicExpandable interface { + DynamicExpand(EvalContext) (*Graph, error) +} + +// GraphNodeSubgraph is an interface a node can implement if it has +// a larger subgraph that should be walked. +type GraphNodeSubgraph interface { + Subgraph() dag.Grapher +} + +// ExpandTransform is a transformer that does a subgraph expansion +// at graph transform time (vs. at eval time). The benefit of earlier +// subgraph expansion is that errors with the graph build can be detected +// at an earlier stage. +type ExpandTransform struct { + Builder GraphBuilder +} + +func (t *ExpandTransform) Transform(v dag.Vertex) (dag.Vertex, error) { + ev, ok := v.(GraphNodeExpandable) + if !ok { + // This isn't an expandable vertex, so just ignore it. + return v, nil + } + + // Expand the subgraph! + log.Printf("[DEBUG] vertex %q: static expanding", dag.VertexName(ev)) + return ev.Expand(t.Builder) +} diff --git a/vendor/github.com/hashicorp/terraform/terraform/transform_import_provider.go b/vendor/github.com/hashicorp/terraform/terraform/transform_import_provider.go new file mode 100644 index 0000000000..3673771ca2 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/terraform/transform_import_provider.go @@ -0,0 +1,38 @@ +package terraform + +import ( + "fmt" + "strings" +) + +// ImportProviderValidateTransformer is a GraphTransformer that goes through +// the providers in the graph and validates that they only depend on variables. +type ImportProviderValidateTransformer struct{} + +func (t *ImportProviderValidateTransformer) Transform(g *Graph) error { + for _, v := range g.Vertices() { + // We only care about providers + pv, ok := v.(GraphNodeProvider) + if !ok { + continue + } + + // We only care about providers that reference things + rn, ok := pv.(GraphNodeReferencer) + if !ok { + continue + } + + for _, ref := range rn.References() { + if !strings.HasPrefix(ref, "var.") { + return fmt.Errorf( + "Provider %q depends on non-var %q. Providers for import can currently\n"+ + "only depend on variables or must be hardcoded. You can stop import\n"+ + "from loading configurations by specifying `-config=\"\"`.", + pv.ProviderName(), ref) + } + } + } + + return nil +} diff --git a/vendor/github.com/hashicorp/terraform/terraform/transform_import_state.go b/vendor/github.com/hashicorp/terraform/terraform/transform_import_state.go new file mode 100644 index 0000000000..081df2f84d --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/terraform/transform_import_state.go @@ -0,0 +1,241 @@ +package terraform + +import ( + "fmt" +) + +// ImportStateTransformer is a GraphTransformer that adds nodes to the +// graph to represent the imports we want to do for resources. +type ImportStateTransformer struct { + Targets []*ImportTarget +} + +func (t *ImportStateTransformer) Transform(g *Graph) error { + nodes := make([]*graphNodeImportState, 0, len(t.Targets)) + for _, target := range t.Targets { + addr, err := ParseResourceAddress(target.Addr) + if err != nil { + return fmt.Errorf( + "failed to parse resource address '%s': %s", + target.Addr, err) + } + + nodes = append(nodes, &graphNodeImportState{ + Addr: addr, + ID: target.ID, + Provider: target.Provider, + }) + } + + // Build the graph vertices + for _, n := range nodes { + g.Add(n) + } + + return nil +} + +type graphNodeImportState struct { + Addr *ResourceAddress // Addr is the resource address to import to + ID string // ID is the ID to import as + Provider string // Provider string + + states []*InstanceState +} + +func (n *graphNodeImportState) Name() string { + return fmt.Sprintf("%s (import id: %s)", n.Addr, n.ID) +} + +func (n *graphNodeImportState) ProvidedBy() []string { + return []string{resourceProvider(n.Addr.Type, n.Provider)} +} + +// GraphNodeSubPath +func (n *graphNodeImportState) Path() []string { + return normalizeModulePath(n.Addr.Path) +} + +// GraphNodeEvalable impl. +func (n *graphNodeImportState) EvalTree() EvalNode { + var provider ResourceProvider + info := &InstanceInfo{ + Id: fmt.Sprintf("%s.%s", n.Addr.Type, n.Addr.Name), + ModulePath: n.Path(), + Type: n.Addr.Type, + } + + // Reset our states + n.states = nil + + // Return our sequence + return &EvalSequence{ + Nodes: []EvalNode{ + &EvalGetProvider{ + Name: n.ProvidedBy()[0], + Output: &provider, + }, + &EvalImportState{ + Provider: &provider, + Info: info, + Id: n.ID, + Output: &n.states, + }, + }, + } +} + +// GraphNodeDynamicExpandable impl. +// +// We use DynamicExpand as a way to generate the subgraph of refreshes +// and state inserts we need to do for our import state. Since they're new +// resources they don't depend on anything else and refreshes are isolated +// so this is nearly a perfect use case for dynamic expand. +func (n *graphNodeImportState) DynamicExpand(ctx EvalContext) (*Graph, error) { + g := &Graph{Path: ctx.Path()} + + // nameCounter is used to de-dup names in the state. + nameCounter := make(map[string]int) + + // Compile the list of addresses that we'll be inserting into the state. + // We do this ahead of time so we can verify that we aren't importing + // something that already exists. + addrs := make([]*ResourceAddress, len(n.states)) + for i, state := range n.states { + addr := *n.Addr + if t := state.Ephemeral.Type; t != "" { + addr.Type = t + } + + // Determine if we need to suffix the name to de-dup + key := addr.String() + count, ok := nameCounter[key] + if ok { + count++ + addr.Name += fmt.Sprintf("-%d", count) + } + nameCounter[key] = count + + // Add it to our list + addrs[i] = &addr + } + + // Verify that all the addresses are clear + state, lock := ctx.State() + lock.RLock() + defer lock.RUnlock() + filter := &StateFilter{State: state} + for _, addr := range addrs { + result, err := filter.Filter(addr.String()) + if err != nil { + return nil, fmt.Errorf("Error verifying address %s: %s", addr, err) + } + + // Go through the filter results and it is an error if we find + // a matching InstanceState, meaning that we would have a collision. + for _, r := range result { + if _, ok := r.Value.(*InstanceState); ok { + return nil, fmt.Errorf( + "Can't import %s, would collide with an existing resource.\n\n"+ + "Please remove or rename this resource before continuing.", + addr) + } + } + } + + // For each of the states, we add a node to handle the refresh/add to state. + // "n.states" is populated by our own EvalTree with the result of + // ImportState. Since DynamicExpand is always called after EvalTree, this + // is safe. + for i, state := range n.states { + g.Add(&graphNodeImportStateSub{ + Target: addrs[i], + Path_: n.Path(), + State: state, + Provider: n.Provider, + }) + } + + // Root transform for a single root + t := &RootTransformer{} + if err := t.Transform(g); err != nil { + return nil, err + } + + // Done! + return g, nil +} + +// graphNodeImportStateSub is the sub-node of graphNodeImportState +// and is part of the subgraph. This node is responsible for refreshing +// and adding a resource to the state once it is imported. +type graphNodeImportStateSub struct { + Target *ResourceAddress + State *InstanceState + Path_ []string + Provider string +} + +func (n *graphNodeImportStateSub) Name() string { + return fmt.Sprintf("import %s result: %s", n.Target, n.State.ID) +} + +func (n *graphNodeImportStateSub) Path() []string { + return n.Path_ +} + +// GraphNodeEvalable impl. +func (n *graphNodeImportStateSub) EvalTree() EvalNode { + // If the Ephemeral type isn't set, then it is an error + if n.State.Ephemeral.Type == "" { + err := fmt.Errorf( + "import of %s didn't set type for %s", + n.Target.String(), n.State.ID) + return &EvalReturnError{Error: &err} + } + + // DeepCopy so we're only modifying our local copy + state := n.State.DeepCopy() + + // Build the resource info + info := &InstanceInfo{ + Id: fmt.Sprintf("%s.%s", n.Target.Type, n.Target.Name), + ModulePath: n.Path_, + Type: n.State.Ephemeral.Type, + } + + // Key is the resource key + key := &ResourceStateKey{ + Name: n.Target.Name, + Type: info.Type, + Index: n.Target.Index, + } + + // The eval sequence + var provider ResourceProvider + return &EvalSequence{ + Nodes: []EvalNode{ + &EvalGetProvider{ + Name: resourceProvider(info.Type, n.Provider), + Output: &provider, + }, + &EvalRefresh{ + Provider: &provider, + State: &state, + Info: info, + Output: &state, + }, + &EvalImportStateVerify{ + Info: info, + Id: n.State.ID, + State: &state, + }, + &EvalWriteState{ + Name: key.String(), + ResourceType: info.Type, + Provider: resourceProvider(info.Type, n.Provider), + State: &state, + }, + }, + } +} diff --git a/vendor/github.com/hashicorp/terraform/terraform/transform_module_variable.go b/vendor/github.com/hashicorp/terraform/terraform/transform_module_variable.go new file mode 100644 index 0000000000..467950bdc6 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/terraform/transform_module_variable.go @@ -0,0 +1,120 @@ +package terraform + +import ( + "log" + + "github.com/hashicorp/terraform/config" + "github.com/hashicorp/terraform/config/module" + "github.com/hashicorp/terraform/dag" +) + +// ModuleVariableTransformer is a GraphTransformer that adds all the variables +// in the configuration to the graph. +// +// This only adds variables that are referenced by other things in the graph. +// If a module variable is not referenced, it won't be added to the graph. +type ModuleVariableTransformer struct { + Module *module.Tree + + DisablePrune bool // True if pruning unreferenced should be disabled +} + +func (t *ModuleVariableTransformer) Transform(g *Graph) error { + return t.transform(g, nil, t.Module) +} + +func (t *ModuleVariableTransformer) transform(g *Graph, parent, m *module.Tree) error { + // If no config, no variables + if m == nil { + return nil + } + + // Transform all the children. This must be done BEFORE the transform + // above since child module variables can reference parent module variables. + for _, c := range m.Children() { + if err := t.transform(g, m, c); err != nil { + return err + } + } + + // If we have a parent, we can determine if a module variable is being + // used, so we transform this. + if parent != nil { + if err := t.transformSingle(g, parent, m); err != nil { + return err + } + } + + return nil +} + +func (t *ModuleVariableTransformer) transformSingle(g *Graph, parent, m *module.Tree) error { + // If we have no vars, we're done! + vars := m.Config().Variables + if len(vars) == 0 { + log.Printf("[TRACE] Module %#v has no variables, skipping.", m.Path()) + return nil + } + + // Look for usage of this module + var mod *config.Module + for _, modUse := range parent.Config().Modules { + if modUse.Name == m.Name() { + mod = modUse + break + } + } + if mod == nil { + log.Printf("[INFO] Module %#v not used, not adding variables", m.Path()) + return nil + } + + // Build the reference map so we can determine if we're referencing things. + refMap := NewReferenceMap(g.Vertices()) + + // Add all variables here + for _, v := range vars { + // Determine the value of the variable. If it isn't in the + // configuration then it was never set and that's not a problem. + var value *config.RawConfig + if raw, ok := mod.RawConfig.Raw[v.Name]; ok { + var err error + value, err = config.NewRawConfig(map[string]interface{}{ + v.Name: raw, + }) + if err != nil { + // This shouldn't happen because it is already in + // a RawConfig above meaning it worked once before. + panic(err) + } + } + + // Build the node. + // + // NOTE: For now this is just an "applyable" variable. As we build + // new graph builders for the other operations I suspect we'll + // find a way to parameterize this, require new transforms, etc. + node := &NodeApplyableModuleVariable{ + PathValue: normalizeModulePath(m.Path()), + Config: v, + Value: value, + Module: t.Module, + } + + if !t.DisablePrune { + // If the node is not referenced by anything, then we don't need + // to include it since it won't be used. + if matches := refMap.ReferencedBy(node); len(matches) == 0 { + log.Printf( + "[INFO] Not including %q in graph, nothing depends on it", + dag.VertexName(node)) + continue + } + } + + // Add it! + g.Add(node) + } + + return nil +} diff --git a/vendor/github.com/hashicorp/terraform/terraform/transform_orphan_count.go b/vendor/github.com/hashicorp/terraform/terraform/transform_orphan_count.go new file mode 100644 index 0000000000..b256a25b7b --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/terraform/transform_orphan_count.go @@ -0,0 +1,110 @@ +package terraform + +import ( + "log" + + "github.com/hashicorp/terraform/dag" +) + +// OrphanResourceCountTransformer is a GraphTransformer that adds orphans +// for an expanded count to the graph. The determination of this depends +// on the count argument given. +// +// Orphans are found by comparing the count to what is found in the state. +// This transform assumes that if an element in the state is within the count +// bounds given, that it is not an orphan. +type OrphanResourceCountTransformer struct { + Concrete ConcreteResourceNodeFunc + + Count int // Actual count of the resource + Addr *ResourceAddress // Addr of the resource to look for orphans + State *State // Full global state +} + +func (t *OrphanResourceCountTransformer) Transform(g *Graph) error { + log.Printf("[TRACE] OrphanResourceCount: Starting...") + + // Grab the module in the state just for this resource address + ms := t.State.ModuleByPath(normalizeModulePath(t.Addr.Path)) + if ms == nil { + // If no state, there can't be orphans + return nil + } + + orphanIndex := -1 + if t.Count == 1 { + orphanIndex = 0 + } + + // Go through the orphans and add them all to the state + for key, _ := range ms.Resources { + // Build the address + addr, err := parseResourceAddressInternal(key) + if err != nil { + return err + } + addr.Path = ms.Path[1:] + + // Copy the address for comparison. If we aren't looking at + // the same resource, then just ignore it. + addrCopy := addr.Copy() + addrCopy.Index = -1 + if !addrCopy.Equals(t.Addr) { + continue + } + + log.Printf("[TRACE] OrphanResourceCount: Checking: %s", addr) + + idx := addr.Index + + // If we have zero and the index here is 0 or 1, then we + // change the index to a high number so that we treat it as + // an orphan. + if t.Count <= 0 && idx <= 0 { + idx = t.Count + 1 + } + + // If we have a count greater than 0 and we're at the zero index, + // we do a special case check to see if our state also has a + // -1 index value. If so, this is an orphan because our rules are + // that if both a -1 and 0 are in the state, the 0 is destroyed. + if t.Count > 0 && idx == orphanIndex { + // This is a piece of cleverness (beware), but its simple: + // if orphanIndex is 0, then check -1, else check 0. + checkIndex := (orphanIndex + 1) * -1 + + key := &ResourceStateKey{ + Name: addr.Name, + Type: addr.Type, + Mode: addr.Mode, + Index: checkIndex, + } + + if _, ok := ms.Resources[key.String()]; ok { + // We have a -1 index, too. Make an arbitrarily high + // index so that we always mark this as an orphan. + log.Printf( + "[WARN] OrphanResourceCount: %q both -1 and 0 index found, orphaning %d", + addr, orphanIndex) + idx = t.Count + 1 + } + } + + // If the index is within the count bounds, it is not an orphan + if idx < t.Count { + continue + } + + // Build the abstract node and the concrete one + abstract := &NodeAbstractResource{Addr: addr} + var node dag.Vertex = abstract + if f := t.Concrete; f != nil { + node = f(abstract) + } + + // Add it to the graph + g.Add(node) + } + + return nil +} diff --git a/vendor/github.com/hashicorp/terraform/terraform/transform_orphan_output.go b/vendor/github.com/hashicorp/terraform/terraform/transform_orphan_output.go new file mode 100644 index 0000000000..49568d5bcd --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/terraform/transform_orphan_output.go @@ -0,0 +1,64 @@ +package terraform + +import ( + "log" + + "github.com/hashicorp/terraform/config" + "github.com/hashicorp/terraform/config/module" +) + +// OrphanOutputTransformer finds the outputs that aren't present +// in the given config that are in the state and adds them to the graph +// for deletion. +type OrphanOutputTransformer struct { + Module *module.Tree // Root module + State *State // State is the root state +} + +func (t *OrphanOutputTransformer) Transform(g *Graph) error { + if t.State == nil { + log.Printf("[DEBUG] No state, no orphan outputs") + return nil + } + + return t.transform(g, t.Module) +} + +func (t *OrphanOutputTransformer) transform(g *Graph, m *module.Tree) error { + // Get our configuration, and recurse into children + var c *config.Config + if m != nil { + c = m.Config() + for _, child := range m.Children() { + if err := t.transform(g, child); err != nil { + return err + } + } + } + + // Get the state. If there is no state, then we have no orphans! + path := normalizeModulePath(m.Path()) + state := t.State.ModuleByPath(path) + if state == nil { + return nil + } + + // Make a map of the valid outputs + valid := make(map[string]struct{}) + for _, o := range c.Outputs { + valid[o.Name] = struct{}{} + } + + // Go through the outputs and find the ones that aren't in our config. + for n, _ := range state.Outputs { + // If it is in the valid map, then ignore + if _, ok := valid[n]; ok { + continue + } + + // Orphan! + g.Add(&NodeOutputOrphan{OutputName: n, PathValue: path}) + } + + return nil +} diff --git a/vendor/github.com/hashicorp/terraform/terraform/transform_orphan_resource.go b/vendor/github.com/hashicorp/terraform/terraform/transform_orphan_resource.go new file mode 100644 index 0000000000..e42d3c8495 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/terraform/transform_orphan_resource.go @@ -0,0 +1,78 @@ +package terraform + +import ( + "github.com/hashicorp/terraform/config" + "github.com/hashicorp/terraform/config/module" + "github.com/hashicorp/terraform/dag" +) + +// OrphanResourceTransformer is a GraphTransformer that adds resource +// orphans to the graph. A resource orphan is a resource that is +// represented in the state but not in the configuration. +// +// This only adds orphans that have no representation at all in the +// configuration. +type OrphanResourceTransformer struct { + Concrete ConcreteResourceNodeFunc + + // State is the global state. We require the global state to + // properly find module orphans at our path. + State *State + + // Module is the root module. We'll look up the proper configuration + // using the graph path. + Module *module.Tree +} + +func (t *OrphanResourceTransformer) Transform(g *Graph) error { + if t.State == nil { + // If the entire state is nil, there can't be any orphans + return nil + } + + // Go through the modules and for each module transform in order + // to add the orphan. + for _, ms := range t.State.Modules { + if err := t.transform(g, ms); err != nil { + return err + } + } + + return nil +} + +func (t *OrphanResourceTransformer) transform(g *Graph, ms *ModuleState) error { + if ms == nil { + return nil + } + + // Get the configuration for this path. The configuration might be + // nil if the module was removed from the configuration. This is okay, + // this just means that every resource is an orphan. + var c *config.Config + if m := t.Module.Child(ms.Path[1:]); m != nil { + c = m.Config() + } + + // Go through the orphans and add them all to the state + for _, key := range ms.Orphans(c) { + // Build the abstract resource + addr, err := parseResourceAddressInternal(key) + if err != nil { + return err + } + addr.Path = ms.Path[1:] + + // Build the abstract node and the concrete one + abstract := &NodeAbstractResource{Addr: addr} + var node dag.Vertex = abstract + if f := t.Concrete; f != nil { + node = f(abstract) + } + + // Add it to the graph + g.Add(node) + } + + return nil +} diff --git a/vendor/github.com/hashicorp/terraform/terraform/transform_output.go b/vendor/github.com/hashicorp/terraform/terraform/transform_output.go new file mode 100644 index 0000000000..b260f4caa1 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/terraform/transform_output.go @@ -0,0 +1,59 @@ +package terraform + +import ( + "github.com/hashicorp/terraform/config/module" +) + +// OutputTransformer is a GraphTransformer that adds all the outputs +// in the configuration to the graph. +// +// This is done for the apply graph builder even if dependent nodes +// aren't changing since there is no downside: the state will be available +// even if the dependent items aren't changing. +type OutputTransformer struct { + Module *module.Tree +} + +func (t *OutputTransformer) Transform(g *Graph) error { + return t.transform(g, t.Module) +} + +func (t *OutputTransformer) transform(g *Graph, m *module.Tree) error { + // If no config, no outputs + if m == nil { + return nil + } + + // Transform all the children. We must do this first because + // we can reference module outputs and they must show up in the + // reference map. + for _, c := range m.Children() { + if err := t.transform(g, c); err != nil { + return err + } + } + + // If we have no outputs, we're done! + os := m.Config().Outputs + if len(os) == 0 { + return nil + } + + // Add all outputs here + for _, o := range os { + // Build the node. + // + // NOTE: For now this is just an "applyable" output. As we build + // new graph builders for the other operations I suspect we'll + // find a way to parameterize this, require new transforms, etc. + node := &NodeApplyableOutput{ + PathValue: normalizeModulePath(m.Path()), + Config: o, + } + + // Add it! + g.Add(node) + } + + return nil +} diff --git a/vendor/github.com/hashicorp/terraform/terraform/transform_provider.go b/vendor/github.com/hashicorp/terraform/terraform/transform_provider.go new file mode 100644 index 0000000000..b9695d5242 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/terraform/transform_provider.go @@ -0,0 +1,380 @@ +package terraform + +import ( + "fmt" + "log" + "strings" + + "github.com/hashicorp/go-multierror" + "github.com/hashicorp/terraform/dag" +) + +// GraphNodeProvider is an interface that nodes that can be a provider +// must implement. The ProviderName returned is the name of the provider +// they satisfy. +type GraphNodeProvider interface { + ProviderName() string +} + +// GraphNodeCloseProvider is an interface that nodes that can be a close +// provider must implement. The CloseProviderName returned is the name of +// the provider they satisfy. +type GraphNodeCloseProvider interface { + CloseProviderName() string +} + +// GraphNodeProviderConsumer is an interface that nodes that require +// a provider must implement. ProvidedBy must return the name of the provider +// to use. +type GraphNodeProviderConsumer interface { + ProvidedBy() []string +} + +// ProviderTransformer is a GraphTransformer that maps resources to +// providers within the graph. This will error if there are any resources +// that don't map to proper resources. +type ProviderTransformer struct{} + +func (t *ProviderTransformer) Transform(g *Graph) error { + // Go through the other nodes and match them to providers they need + var err error + m := providerVertexMap(g) + for _, v := range g.Vertices() { + if pv, ok := v.(GraphNodeProviderConsumer); ok { + for _, p := range pv.ProvidedBy() { + target := m[providerMapKey(p, pv)] + if target == nil { + println(fmt.Sprintf("%#v\n\n%#v", m, providerMapKey(p, pv))) + err = multierror.Append(err, fmt.Errorf( + "%s: provider %s couldn't be found", + dag.VertexName(v), p)) + continue + } + + g.Connect(dag.BasicEdge(v, target)) + } + } + } + + return err +} + +// CloseProviderTransformer is a GraphTransformer that adds nodes to the +// graph that will close open provider connections that aren't needed anymore. +// A provider connection is not needed anymore once all depended resources +// in the graph are evaluated. +type CloseProviderTransformer struct{} + +func (t *CloseProviderTransformer) Transform(g *Graph) error { + pm := providerVertexMap(g) + cpm := closeProviderVertexMap(g) + var err error + for _, v := range g.Vertices() { + if pv, ok := v.(GraphNodeProviderConsumer); ok { + for _, p := range pv.ProvidedBy() { + key := p + source := cpm[key] + + if source == nil { + // Create a new graphNodeCloseProvider and add it to the graph + source = &graphNodeCloseProvider{ProviderNameValue: p} + g.Add(source) + + // Close node needs to depend on provider + provider, ok := pm[key] + if !ok { + err = multierror.Append(err, fmt.Errorf( + "%s: provider %s couldn't be found for closing", + dag.VertexName(v), p)) + continue + } + g.Connect(dag.BasicEdge(source, provider)) + + // Make sure we also add the new graphNodeCloseProvider to the map + // so we don't create and add any duplicate graphNodeCloseProviders. + cpm[key] = source + } + + // Close node depends on all nodes provided by the provider + g.Connect(dag.BasicEdge(source, v)) + } + } + } + + return err +} + +// MissingProviderTransformer is a GraphTransformer that adds nodes +// for missing providers into the graph. Specifically, it creates provider +// configuration nodes for all the providers that we support. These are +// pruned later during an optimization pass. +type MissingProviderTransformer struct { + // Providers is the list of providers we support. + Providers []string + + // AllowAny will not check that a provider is supported before adding + // it to the graph. + AllowAny bool + + // Concrete, if set, overrides how the providers are made. + Concrete ConcreteProviderNodeFunc +} + +func (t *MissingProviderTransformer) Transform(g *Graph) error { + // Initialize factory + if t.Concrete == nil { + t.Concrete = func(a *NodeAbstractProvider) dag.Vertex { + return a + } + } + + // Create a set of our supported providers + supported := make(map[string]struct{}, len(t.Providers)) + for _, v := range t.Providers { + supported[v] = struct{}{} + } + + // Get the map of providers we already have in our graph + m := providerVertexMap(g) + + // Go through all the provider consumers and make sure we add + // that provider if it is missing. We use a for loop here instead + // of "range" since we'll modify check as we go to add more to check. + check := g.Vertices() + for i := 0; i < len(check); i++ { + v := check[i] + + pv, ok := v.(GraphNodeProviderConsumer) + if !ok { + continue + } + + // If this node has a subpath, then we use that as a prefix + // into our map to check for an existing provider. + var path []string + if sp, ok := pv.(GraphNodeSubPath); ok { + raw := normalizeModulePath(sp.Path()) + if len(raw) > len(rootModulePath) { + path = raw + } + } + + for _, p := range pv.ProvidedBy() { + key := providerMapKey(p, pv) + if _, ok := m[key]; ok { + // This provider already exists as a configure node + continue + } + + // If the provider has an alias in it, we just want the type + ptype := p + if idx := strings.IndexRune(p, '.'); idx != -1 { + ptype = p[:idx] + } + + if !t.AllowAny { + if _, ok := supported[ptype]; !ok { + // If we don't support the provider type, skip it. + // Validation later will catch this as an error. + continue + } + } + + // Add the missing provider node to the graph + v := t.Concrete(&NodeAbstractProvider{ + NameValue: p, + PathValue: path, + }).(dag.Vertex) + if len(path) > 0 { + // We'll need the parent provider as well, so let's + // add a dummy node to check to make sure that we add + // that parent provider. + check = append(check, &graphNodeProviderConsumerDummy{ + ProviderValue: p, + PathValue: path[:len(path)-1], + }) + } + + m[key] = g.Add(v) + } + } + + return nil +} + +// ParentProviderTransformer connects provider nodes to their parents. +// +// This works by finding nodes that are both GraphNodeProviders and +// GraphNodeSubPath. It then connects the providers to their parent +// path. +type ParentProviderTransformer struct{} + +func (t *ParentProviderTransformer) Transform(g *Graph) error { + // Make a mapping of path to dag.Vertex, where path is: "path.name" + m := make(map[string]dag.Vertex) + + // Also create a map that maps a provider to its parent + parentMap := make(map[dag.Vertex]string) + for _, raw := range g.Vertices() { + // If it is the flat version, then make it the non-flat version. + // We eventually want to get rid of the flat version entirely so + // this is a stop-gap while it still exists. + var v dag.Vertex = raw + + // Only care about providers + pn, ok := v.(GraphNodeProvider) + if !ok || pn.ProviderName() == "" { + continue + } + + // Also require a subpath, if there is no subpath then we + // just totally ignore it. The expectation of this transform is + // that it is used with a graph builder that is already flattened. + var path []string + if pn, ok := raw.(GraphNodeSubPath); ok { + path = pn.Path() + } + path = normalizeModulePath(path) + + // Build the key with path.name i.e. "child.subchild.aws" + key := fmt.Sprintf("%s.%s", strings.Join(path, "."), pn.ProviderName()) + m[key] = raw + + // Determine the parent if we're non-root. This is length 1 since + // the 0 index should be "root" since we normalize above. + if len(path) > 1 { + path = path[:len(path)-1] + key := fmt.Sprintf("%s.%s", strings.Join(path, "."), pn.ProviderName()) + parentMap[raw] = key + } + } + + // Connect! + for v, key := range parentMap { + if parent, ok := m[key]; ok { + g.Connect(dag.BasicEdge(v, parent)) + } + } + + return nil +} + +// PruneProviderTransformer is a GraphTransformer that prunes all the +// providers that aren't needed from the graph. A provider is unneeded if +// no resource or module is using that provider. +type PruneProviderTransformer struct{} + +func (t *PruneProviderTransformer) Transform(g *Graph) error { + for _, v := range g.Vertices() { + // We only care about the providers + if pn, ok := v.(GraphNodeProvider); !ok || pn.ProviderName() == "" { + continue + } + // Does anything depend on this? If not, then prune it. + if s := g.UpEdges(v); s.Len() == 0 { + if nv, ok := v.(dag.NamedVertex); ok { + log.Printf("[DEBUG] Pruning provider with no dependencies: %s", nv.Name()) + } + g.Remove(v) + } + } + + return nil +} + +// providerMapKey is a helper that gives us the key to use for the +// maps returned by things such as providerVertexMap. +func providerMapKey(k string, v dag.Vertex) string { + pathPrefix := "" + if sp, ok := v.(GraphNodeSubPath); ok { + raw := normalizeModulePath(sp.Path()) + if len(raw) > len(rootModulePath) { + pathPrefix = modulePrefixStr(raw) + "." + } + } + + return pathPrefix + k +} + +func providerVertexMap(g *Graph) map[string]dag.Vertex { + m := make(map[string]dag.Vertex) + for _, v := range g.Vertices() { + if pv, ok := v.(GraphNodeProvider); ok { + key := providerMapKey(pv.ProviderName(), v) + m[key] = v + } + } + + return m +} + +func closeProviderVertexMap(g *Graph) map[string]dag.Vertex { + m := make(map[string]dag.Vertex) + for _, v := range g.Vertices() { + if pv, ok := v.(GraphNodeCloseProvider); ok { + m[pv.CloseProviderName()] = v + } + } + + return m +} + +type graphNodeCloseProvider struct { + ProviderNameValue string +} + +func (n *graphNodeCloseProvider) Name() string { + return fmt.Sprintf("provider.%s (close)", n.ProviderNameValue) +} + +// GraphNodeEvalable impl. +func (n *graphNodeCloseProvider) EvalTree() EvalNode { + return CloseProviderEvalTree(n.ProviderNameValue) +} + +// GraphNodeDependable impl. +func (n *graphNodeCloseProvider) DependableName() []string { + return []string{n.Name()} +} + +func (n *graphNodeCloseProvider) CloseProviderName() string { + return n.ProviderNameValue +} + +// GraphNodeDotter impl. +func (n *graphNodeCloseProvider) DotNode(name string, opts *dag.DotOpts) *dag.DotNode { + if !opts.Verbose { + return nil + } + return &dag.DotNode{ + Name: name, + Attrs: map[string]string{ + "label": n.Name(), + "shape": "diamond", + }, + } +} + +// RemovableIfNotTargeted +func (n *graphNodeCloseProvider) RemoveIfNotTargeted() bool { + // We need to add this so that this node will be removed if + // it isn't targeted or a dependency of a target. + return true +} + +// graphNodeProviderConsumerDummy is a struct that never enters the real +// graph (though it could to no ill effect). It implements +// GraphNodeProviderConsumer and GraphNodeSubpath as a way to force +// certain transformations. +type graphNodeProviderConsumerDummy struct { + ProviderValue string + PathValue []string +} + +func (n *graphNodeProviderConsumerDummy) Path() []string { + return n.PathValue +} + +func (n *graphNodeProviderConsumerDummy) ProvidedBy() []string { + return []string{n.ProviderValue} +} diff --git a/vendor/github.com/hashicorp/terraform/terraform/transform_provider_disable.go b/vendor/github.com/hashicorp/terraform/terraform/transform_provider_disable.go new file mode 100644 index 0000000000..d9919f3a77 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/terraform/transform_provider_disable.go @@ -0,0 +1,50 @@ +package terraform + +import ( + "fmt" + + "github.com/hashicorp/terraform/dag" +) + +// DisableProviderTransformer "disables" any providers that are not actually +// used by anything. This avoids the provider being initialized and configured. +// This both saves resources but also avoids errors since configuration +// may imply initialization which may require auth. +type DisableProviderTransformer struct{} + +func (t *DisableProviderTransformer) Transform(g *Graph) error { + for _, v := range g.Vertices() { + // We only care about providers + pn, ok := v.(GraphNodeProvider) + if !ok || pn.ProviderName() == "" { + continue + } + + // If we have dependencies, then don't disable + if g.UpEdges(v).Len() > 0 { + continue + } + + // Get the path + var path []string + if pn, ok := v.(GraphNodeSubPath); ok { + path = pn.Path() + } + + // Disable the provider by replacing it with a "disabled" provider + disabled := &NodeDisabledProvider{ + NodeAbstractProvider: &NodeAbstractProvider{ + NameValue: pn.ProviderName(), + PathValue: path, + }, + } + + if !g.Replace(v, disabled) { + panic(fmt.Sprintf( + "vertex disappeared from under us: %s", + dag.VertexName(v))) + } + } + + return nil +} diff --git a/vendor/github.com/hashicorp/terraform/terraform/transform_provisioner.go b/vendor/github.com/hashicorp/terraform/terraform/transform_provisioner.go new file mode 100644 index 0000000000..f49d824107 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/terraform/transform_provisioner.go @@ -0,0 +1,206 @@ +package terraform + +import ( + "fmt" + + "github.com/hashicorp/go-multierror" + "github.com/hashicorp/terraform/dag" +) + +// GraphNodeProvisioner is an interface that nodes that can be a provisioner +// must implement. The ProvisionerName returned is the name of the provisioner +// they satisfy. +type GraphNodeProvisioner interface { + ProvisionerName() string +} + +// GraphNodeCloseProvisioner is an interface that nodes that can be a close +// provisioner must implement. The CloseProvisionerName returned is the name +// of the provisioner they satisfy. +type GraphNodeCloseProvisioner interface { + CloseProvisionerName() string +} + +// GraphNodeProvisionerConsumer is an interface that nodes that require +// a provisioner must implement. ProvisionedBy must return the name of the +// provisioner to use. +type GraphNodeProvisionerConsumer interface { + ProvisionedBy() []string +} + +// ProvisionerTransformer is a GraphTransformer that maps resources to +// provisioners within the graph. This will error if there are any resources +// that don't map to proper resources. +type ProvisionerTransformer struct{} + +func (t *ProvisionerTransformer) Transform(g *Graph) error { + // Go through the other nodes and match them to provisioners they need + var err error + m := provisionerVertexMap(g) + for _, v := range g.Vertices() { + if pv, ok := v.(GraphNodeProvisionerConsumer); ok { + for _, p := range pv.ProvisionedBy() { + key := provisionerMapKey(p, pv) + if m[key] == nil { + err = multierror.Append(err, fmt.Errorf( + "%s: provisioner %s couldn't be found", + dag.VertexName(v), p)) + continue + } + + g.Connect(dag.BasicEdge(v, m[key])) + } + } + } + + return err +} + +// MissingProvisionerTransformer is a GraphTransformer that adds nodes +// for missing provisioners into the graph. +type MissingProvisionerTransformer struct { + // Provisioners is the list of provisioners we support. + Provisioners []string +} + +func (t *MissingProvisionerTransformer) Transform(g *Graph) error { + // Create a set of our supported provisioners + supported := make(map[string]struct{}, len(t.Provisioners)) + for _, v := range t.Provisioners { + supported[v] = struct{}{} + } + + // Get the map of provisioners we already have in our graph + m := provisionerVertexMap(g) + + // Go through all the provisioner consumers and make sure we add + // that provisioner if it is missing. + for _, v := range g.Vertices() { + pv, ok := v.(GraphNodeProvisionerConsumer) + if !ok { + continue + } + + // If this node has a subpath, then we use that as a prefix + // into our map to check for an existing provider. + var path []string + if sp, ok := pv.(GraphNodeSubPath); ok { + raw := normalizeModulePath(sp.Path()) + if len(raw) > len(rootModulePath) { + path = raw + } + } + + for _, p := range pv.ProvisionedBy() { + // Build the key for storing in the map + key := provisionerMapKey(p, pv) + + if _, ok := m[key]; ok { + // This provisioner already exists as a configure node + continue + } + + if _, ok := supported[p]; !ok { + // If we don't support the provisioner type, skip it. + // Validation later will catch this as an error. + continue + } + + // Build the vertex + var newV dag.Vertex = &NodeProvisioner{ + NameValue: p, + PathValue: path, + } + + // Add the missing provisioner node to the graph + m[key] = g.Add(newV) + } + } + + return nil +} + +// CloseProvisionerTransformer is a GraphTransformer that adds nodes to the +// graph that will close open provisioner connections that aren't needed +// anymore. A provisioner connection is not needed anymore once all depended +// resources in the graph are evaluated. +type CloseProvisionerTransformer struct{} + +func (t *CloseProvisionerTransformer) Transform(g *Graph) error { + m := closeProvisionerVertexMap(g) + for _, v := range g.Vertices() { + if pv, ok := v.(GraphNodeProvisionerConsumer); ok { + for _, p := range pv.ProvisionedBy() { + source := m[p] + + if source == nil { + // Create a new graphNodeCloseProvisioner and add it to the graph + source = &graphNodeCloseProvisioner{ProvisionerNameValue: p} + g.Add(source) + + // Make sure we also add the new graphNodeCloseProvisioner to the map + // so we don't create and add any duplicate graphNodeCloseProvisioners. + m[p] = source + } + + g.Connect(dag.BasicEdge(source, v)) + } + } + } + + return nil +} + +// provisionerMapKey is a helper that gives us the key to use for the +// maps returned by things such as provisionerVertexMap. +func provisionerMapKey(k string, v dag.Vertex) string { + pathPrefix := "" + if sp, ok := v.(GraphNodeSubPath); ok { + raw := normalizeModulePath(sp.Path()) + if len(raw) > len(rootModulePath) { + pathPrefix = modulePrefixStr(raw) + "." + } + } + + return pathPrefix + k +} + +func provisionerVertexMap(g *Graph) map[string]dag.Vertex { + m := make(map[string]dag.Vertex) + for _, v := range g.Vertices() { + if pv, ok := v.(GraphNodeProvisioner); ok { + key := provisionerMapKey(pv.ProvisionerName(), v) + m[key] = v + } + } + + return m +} + +func closeProvisionerVertexMap(g *Graph) map[string]dag.Vertex { + m := make(map[string]dag.Vertex) + for _, v := range g.Vertices() { + if pv, ok := v.(GraphNodeCloseProvisioner); ok { + m[pv.CloseProvisionerName()] = v + } + } + + return m +} + +type graphNodeCloseProvisioner struct { + ProvisionerNameValue string +} + +func (n *graphNodeCloseProvisioner) Name() string { + return fmt.Sprintf("provisioner.%s (close)", n.ProvisionerNameValue) +} + +// GraphNodeEvalable impl. +func (n *graphNodeCloseProvisioner) EvalTree() EvalNode { + return &EvalCloseProvisioner{Name: n.ProvisionerNameValue} +} + +func (n *graphNodeCloseProvisioner) CloseProvisionerName() string { + return n.ProvisionerNameValue +} diff --git a/vendor/github.com/hashicorp/terraform/terraform/transform_reference.go b/vendor/github.com/hashicorp/terraform/terraform/transform_reference.go new file mode 100644 index 0000000000..c5452354d4 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/terraform/transform_reference.go @@ -0,0 +1,321 @@ +package terraform + +import ( + "fmt" + "log" + "strings" + + "github.com/hashicorp/terraform/config" + "github.com/hashicorp/terraform/dag" +) + +// GraphNodeReferenceable must be implemented by any node that represents +// a Terraform thing that can be referenced (resource, module, etc.). +// +// Even if the thing has no name, this should return an empty list. By +// implementing this and returning a non-nil result, you say that this CAN +// be referenced and other methods of referencing may still be possible (such +// as by path!) +type GraphNodeReferenceable interface { + // ReferenceableName is the name by which this can be referenced. + // This can be either just the type, or include the field. Example: + // "aws_instance.bar" or "aws_instance.bar.id". + ReferenceableName() []string +} + +// GraphNodeReferencer must be implemented by nodes that reference other +// Terraform items and therefore depend on them. +type GraphNodeReferencer interface { + // References are the list of things that this node references. This + // can include fields or just the type, just like GraphNodeReferenceable + // above. + References() []string +} + +// GraphNodeReferenceGlobal is an interface that can optionally be +// implemented. If ReferenceGlobal returns true, then the References() +// and ReferenceableName() must be _fully qualified_ with "module.foo.bar" +// etc. +// +// This allows a node to reference and be referenced by a specific name +// that may cross module boundaries. This can be very dangerous so use +// this wisely. +// +// The primary use case for this is module boundaries (variables coming in). +type GraphNodeReferenceGlobal interface { + // Set to true to signal that references and name are fully + // qualified. See the above docs for more information. + ReferenceGlobal() bool +} + +// ReferenceTransformer is a GraphTransformer that connects all the +// nodes that reference each other in order to form the proper ordering. +type ReferenceTransformer struct{} + +func (t *ReferenceTransformer) Transform(g *Graph) error { + // Build a reference map so we can efficiently look up the references + vs := g.Vertices() + m := NewReferenceMap(vs) + + // Find the things that reference things and connect them + for _, v := range vs { + parents, _ := m.References(v) + parentsDbg := make([]string, len(parents)) + for i, v := range parents { + parentsDbg[i] = dag.VertexName(v) + } + log.Printf( + "[DEBUG] ReferenceTransformer: %q references: %v", + dag.VertexName(v), parentsDbg) + + for _, parent := range parents { + g.Connect(dag.BasicEdge(v, parent)) + } + } + + return nil +} + +// ReferenceMap is a structure that can be used to efficiently check +// for references on a graph. +type ReferenceMap struct { + // m is the mapping of referenceable name to list of verticies that + // implement that name. This is built on initialization. + references map[string][]dag.Vertex + referencedBy map[string][]dag.Vertex +} + +// References returns the list of vertices that this vertex +// references along with any missing references. +func (m *ReferenceMap) References(v dag.Vertex) ([]dag.Vertex, []string) { + rn, ok := v.(GraphNodeReferencer) + if !ok { + return nil, nil + } + + var matches []dag.Vertex + var missing []string + prefix := m.prefix(v) + for _, ns := range rn.References() { + found := false + for _, n := range strings.Split(ns, "/") { + n = prefix + n + parents, ok := m.references[n] + if !ok { + continue + } + + // Mark that we found a match + found = true + + // Make sure this isn't a self reference, which isn't included + selfRef := false + for _, p := range parents { + if p == v { + selfRef = true + break + } + } + if selfRef { + continue + } + + matches = append(matches, parents...) + break + } + + if !found { + missing = append(missing, ns) + } + } + + return matches, missing +} + +// ReferencedBy returns the list of vertices that reference the +// vertex passed in. +func (m *ReferenceMap) ReferencedBy(v dag.Vertex) []dag.Vertex { + rn, ok := v.(GraphNodeReferenceable) + if !ok { + return nil + } + + var matches []dag.Vertex + prefix := m.prefix(v) + for _, n := range rn.ReferenceableName() { + n = prefix + n + children, ok := m.referencedBy[n] + if !ok { + continue + } + + // Make sure this isn't a self reference, which isn't included + selfRef := false + for _, p := range children { + if p == v { + selfRef = true + break + } + } + if selfRef { + continue + } + + matches = append(matches, children...) + } + + return matches +} + +func (m *ReferenceMap) prefix(v dag.Vertex) string { + // If the node is stating it is already fully qualified then + // we don't have to create the prefix! + if gn, ok := v.(GraphNodeReferenceGlobal); ok && gn.ReferenceGlobal() { + return "" + } + + // Create the prefix based on the path + var prefix string + if pn, ok := v.(GraphNodeSubPath); ok { + if path := normalizeModulePath(pn.Path()); len(path) > 1 { + prefix = modulePrefixStr(path) + "." + } + } + + return prefix +} + +// NewReferenceMap is used to create a new reference map for the +// given set of vertices. +func NewReferenceMap(vs []dag.Vertex) *ReferenceMap { + var m ReferenceMap + + // Build the lookup table + refMap := make(map[string][]dag.Vertex) + for _, v := range vs { + // We're only looking for referenceable nodes + rn, ok := v.(GraphNodeReferenceable) + if !ok { + continue + } + + // Go through and cache them + prefix := m.prefix(v) + for _, n := range rn.ReferenceableName() { + n = prefix + n + refMap[n] = append(refMap[n], v) + } + + // If there is a path, it is always referenceable by that. For + // example, if this is a referenceable thing at path []string{"foo"}, + // then it can be referenced at "module.foo" + if pn, ok := v.(GraphNodeSubPath); ok { + for _, p := range ReferenceModulePath(pn.Path()) { + refMap[p] = append(refMap[p], v) + } + } + } + + // Build the lookup table for referenced by + refByMap := make(map[string][]dag.Vertex) + for _, v := range vs { + // We're only looking for referenceable nodes + rn, ok := v.(GraphNodeReferencer) + if !ok { + continue + } + + // Go through and cache them + prefix := m.prefix(v) + for _, n := range rn.References() { + n = prefix + n + refByMap[n] = append(refByMap[n], v) + } + } + + m.references = refMap + m.referencedBy = refByMap + return &m +} + +// Returns the reference name for a module path. The path "foo" would return +// "module.foo". If this is a deeply nested module, it will be every parent +// as well. For example: ["foo", "bar"] would return both "module.foo" and +// "module.foo.module.bar" +func ReferenceModulePath(p []string) []string { + p = normalizeModulePath(p) + if len(p) == 1 { + // Root, no name + return nil + } + + result := make([]string, 0, len(p)-1) + for i := len(p); i > 1; i-- { + result = append(result, modulePrefixStr(p[:i])) + } + + return result +} + +// ReferencesFromConfig returns the references that a configuration has +// based on the interpolated variables in a configuration. +func ReferencesFromConfig(c *config.RawConfig) []string { + var result []string + for _, v := range c.Variables { + if r := ReferenceFromInterpolatedVar(v); len(r) > 0 { + result = append(result, r...) + } + } + + return result +} + +// ReferenceFromInterpolatedVar returns the reference from this variable, +// or an empty string if there is no reference. +func ReferenceFromInterpolatedVar(v config.InterpolatedVariable) []string { + switch v := v.(type) { + case *config.ModuleVariable: + return []string{fmt.Sprintf("module.%s.output.%s", v.Name, v.Field)} + case *config.ResourceVariable: + id := v.ResourceId() + + // If we have a multi-reference (splat), then we depend on ALL + // resources with this type/name. + if v.Multi && v.Index == -1 { + return []string{fmt.Sprintf("%s.*", id)} + } + + // Otherwise, we depend on a specific index. + idx := v.Index + if !v.Multi || v.Index == -1 { + idx = 0 + } + + // Depend on the index, as well as "N" which represents the + // un-expanded set of resources. + return []string{fmt.Sprintf("%s.%d/%s.N", id, idx, id)} + case *config.UserVariable: + return []string{fmt.Sprintf("var.%s", v.Name)} + default: + return nil + } +} + +func modulePrefixStr(p []string) string { + parts := make([]string, 0, len(p)*2) + for _, p := range p[1:] { + parts = append(parts, "module", p) + } + + return strings.Join(parts, ".") +} + +func modulePrefixList(result []string, prefix string) []string { + if prefix != "" { + for i, v := range result { + result[i] = fmt.Sprintf("%s.%s", prefix, v) + } + } + + return result +} diff --git a/vendor/github.com/hashicorp/terraform/terraform/transform_resource_count.go b/vendor/github.com/hashicorp/terraform/terraform/transform_resource_count.go new file mode 100644 index 0000000000..cda35cb7bd --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/terraform/transform_resource_count.go @@ -0,0 +1,51 @@ +package terraform + +import ( + "fmt" + + "github.com/hashicorp/terraform/dag" +) + +// ResourceCountTransformer is a GraphTransformer that expands the count +// out for a specific resource. +// +// This assumes that the count is already interpolated. +type ResourceCountTransformer struct { + Concrete ConcreteResourceNodeFunc + + Count int + Addr *ResourceAddress +} + +func (t *ResourceCountTransformer) Transform(g *Graph) error { + // Don't allow the count to be negative + if t.Count < 0 { + return fmt.Errorf("negative count: %d", t.Count) + } + + // For each count, build and add the node + for i := 0; i < t.Count; i++ { + // Set the index. If our count is 1 we special case it so that + // we handle the "resource.0" and "resource" boundary properly. + index := i + if t.Count == 1 { + index = -1 + } + + // Build the resource address + addr := t.Addr.Copy() + addr.Index = index + + // Build the abstract node and the concrete one + abstract := &NodeAbstractResource{Addr: addr} + var node dag.Vertex = abstract + if f := t.Concrete; f != nil { + node = f(abstract) + } + + // Add it to the graph + g.Add(node) + } + + return nil +} diff --git a/vendor/github.com/hashicorp/terraform/terraform/transform_resource_refresh_plannable.go b/vendor/github.com/hashicorp/terraform/terraform/transform_resource_refresh_plannable.go new file mode 100644 index 0000000000..35358a3180 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/terraform/transform_resource_refresh_plannable.go @@ -0,0 +1,55 @@ +package terraform + +import ( + "fmt" + "log" +) + +// ResourceRefreshPlannableTransformer is a GraphTransformer that replaces any +// nodes that don't have state yet exist in config with +// NodePlannableResourceInstance. +// +// This transformer is used when expanding count on managed resource nodes +// during the refresh phase to ensure that data sources that have +// interpolations that depend on resources existing in the graph can be walked +// properly. +type ResourceRefreshPlannableTransformer struct { + // The full global state. + State *State +} + +// Transform implements GraphTransformer for +// ResourceRefreshPlannableTransformer. +func (t *ResourceRefreshPlannableTransformer) Transform(g *Graph) error { +nextVertex: + for _, v := range g.Vertices() { + addr := v.(*NodeRefreshableManagedResourceInstance).Addr + + // Find the state for this address, if there is one + filter := &StateFilter{State: t.State} + results, err := filter.Filter(addr.String()) + if err != nil { + return err + } + + // Check to see if we have a state for this resource. If we do, skip this + // node. + for _, result := range results { + if _, ok := result.Value.(*ResourceState); ok { + continue nextVertex + } + } + // If we don't, convert this resource to a NodePlannableResourceInstance node + // with all of the data we need to make it happen. + log.Printf("[TRACE] No state for %s, converting to NodePlannableResourceInstance", addr.String()) + new := &NodePlannableResourceInstance{ + NodeAbstractResource: v.(*NodeRefreshableManagedResourceInstance).NodeAbstractResource, + } + // Replace the node in the graph + if !g.Replace(v, new) { + return fmt.Errorf("ResourceRefreshPlannableTransformer: Could not replace node %#v with %#v", v, new) + } + } + + return nil +} diff --git a/vendor/github.com/hashicorp/terraform/terraform/transform_root.go b/vendor/github.com/hashicorp/terraform/terraform/transform_root.go new file mode 100644 index 0000000000..aee053d175 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/terraform/transform_root.go @@ -0,0 +1,38 @@ +package terraform + +import "github.com/hashicorp/terraform/dag" + +const rootNodeName = "root" + +// RootTransformer is a GraphTransformer that adds a root to the graph. +type RootTransformer struct{} + +func (t *RootTransformer) Transform(g *Graph) error { + // If we already have a good root, we're done + if _, err := g.Root(); err == nil { + return nil + } + + // Add a root + var root graphNodeRoot + g.Add(root) + + // Connect the root to all the edges that need it + for _, v := range g.Vertices() { + if v == root { + continue + } + + if g.UpEdges(v).Len() == 0 { + g.Connect(dag.BasicEdge(root, v)) + } + } + + return nil +} + +type graphNodeRoot struct{} + +func (n graphNodeRoot) Name() string { + return rootNodeName +} diff --git a/vendor/github.com/hashicorp/terraform/terraform/transform_state.go b/vendor/github.com/hashicorp/terraform/terraform/transform_state.go new file mode 100644 index 0000000000..471cd74657 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/terraform/transform_state.go @@ -0,0 +1,65 @@ +package terraform + +import ( + "fmt" + "log" + + "github.com/hashicorp/terraform/dag" +) + +// StateTransformer is a GraphTransformer that adds the elements of +// the state to the graph. +// +// This transform is used for example by the DestroyPlanGraphBuilder to ensure +// that only resources that are in the state are represented in the graph. +type StateTransformer struct { + Concrete ConcreteResourceNodeFunc + + State *State +} + +func (t *StateTransformer) Transform(g *Graph) error { + // If the state is nil or empty (nil is empty) then do nothing + if t.State.Empty() { + return nil + } + + // Go through all the modules in the diff. + log.Printf("[TRACE] StateTransformer: starting") + var nodes []dag.Vertex + for _, ms := range t.State.Modules { + log.Printf("[TRACE] StateTransformer: Module: %v", ms.Path) + + // Go through all the resources in this module. + for name, rs := range ms.Resources { + log.Printf("[TRACE] StateTransformer: Resource %q: %#v", name, rs) + + // Add the resource to the graph + addr, err := parseResourceAddressInternal(name) + if err != nil { + panic(fmt.Sprintf( + "Error parsing internal name, this is a bug: %q", name)) + } + + // Very important: add the module path for this resource to + // the address. Remove "root" from it. + addr.Path = ms.Path[1:] + + // Add the resource to the graph + abstract := &NodeAbstractResource{Addr: addr} + var node dag.Vertex = abstract + if f := t.Concrete; f != nil { + node = f(abstract) + } + + nodes = append(nodes, node) + } + } + + // Add all the nodes to the graph + for _, n := range nodes { + g.Add(n) + } + + return nil +} diff --git a/vendor/github.com/hashicorp/terraform/terraform/transform_targets.go b/vendor/github.com/hashicorp/terraform/terraform/transform_targets.go new file mode 100644 index 0000000000..125f9e3021 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/terraform/transform_targets.go @@ -0,0 +1,219 @@ +package terraform + +import ( + "log" + + "github.com/hashicorp/terraform/dag" +) + +// GraphNodeTargetable is an interface for graph nodes to implement when they +// need to be told about incoming targets. This is useful for nodes that need +// to respect targets as they dynamically expand. Note that the list of targets +// provided will contain every target provided, and each implementing graph +// node must filter this list to targets considered relevant. +type GraphNodeTargetable interface { + SetTargets([]ResourceAddress) +} + +// GraphNodeTargetDownstream is an interface for graph nodes that need to +// be remain present under targeting if any of their dependencies are targeted. +// TargetDownstream is called with the set of vertices that are direct +// dependencies for the node, and it should return true if the node must remain +// in the graph in support of those dependencies. +// +// This is used in situations where the dependency edges are representing an +// ordering relationship but the dependency must still be visited if its +// dependencies are visited. This is true for outputs, for example, since +// they must get updated if any of their dependent resources get updated, +// which would not normally be true if one of their dependencies were targeted. +type GraphNodeTargetDownstream interface { + TargetDownstream(targeted, untargeted *dag.Set) bool +} + +// TargetsTransformer is a GraphTransformer that, when the user specifies a +// list of resources to target, limits the graph to only those resources and +// their dependencies. +type TargetsTransformer struct { + // List of targeted resource names specified by the user + Targets []string + + // List of parsed targets, provided by callers like ResourceCountTransform + // that already have the targets parsed + ParsedTargets []ResourceAddress + + // Set to true when we're in a `terraform destroy` or a + // `terraform plan -destroy` + Destroy bool +} + +func (t *TargetsTransformer) Transform(g *Graph) error { + if len(t.Targets) > 0 && len(t.ParsedTargets) == 0 { + addrs, err := t.parseTargetAddresses() + if err != nil { + return err + } + + t.ParsedTargets = addrs + } + + if len(t.ParsedTargets) > 0 { + targetedNodes, err := t.selectTargetedNodes(g, t.ParsedTargets) + if err != nil { + return err + } + + for _, v := range g.Vertices() { + removable := false + if _, ok := v.(GraphNodeResource); ok { + removable = true + } + if vr, ok := v.(RemovableIfNotTargeted); ok { + removable = vr.RemoveIfNotTargeted() + } + if removable && !targetedNodes.Include(v) { + log.Printf("[DEBUG] Removing %q, filtered by targeting.", dag.VertexName(v)) + g.Remove(v) + } + } + } + + return nil +} + +func (t *TargetsTransformer) parseTargetAddresses() ([]ResourceAddress, error) { + addrs := make([]ResourceAddress, len(t.Targets)) + for i, target := range t.Targets { + ta, err := ParseResourceAddress(target) + if err != nil { + return nil, err + } + addrs[i] = *ta + } + + return addrs, nil +} + +// Returns the list of targeted nodes. A targeted node is either addressed +// directly, or is an Ancestor of a targeted node. Destroy mode keeps +// Descendents instead of Ancestors. +func (t *TargetsTransformer) selectTargetedNodes( + g *Graph, addrs []ResourceAddress) (*dag.Set, error) { + targetedNodes := new(dag.Set) + + vertices := g.Vertices() + + for _, v := range vertices { + if t.nodeIsTarget(v, addrs) { + targetedNodes.Add(v) + + // We inform nodes that ask about the list of targets - helps for nodes + // that need to dynamically expand. Note that this only occurs for nodes + // that are already directly targeted. + if tn, ok := v.(GraphNodeTargetable); ok { + tn.SetTargets(addrs) + } + + var deps *dag.Set + var err error + if t.Destroy { + deps, err = g.Descendents(v) + } else { + deps, err = g.Ancestors(v) + } + if err != nil { + return nil, err + } + + for _, d := range deps.List() { + targetedNodes.Add(d) + } + } + } + + // Handle nodes that need to be included if their dependencies are included. + // This requires multiple passes since we need to catch transitive + // dependencies if and only if they are via other nodes that also + // support TargetDownstream. For example: + // output -> output -> targeted-resource: both outputs need to be targeted + // output -> non-targeted-resource -> targeted-resource: output not targeted + // + // We'll keep looping until we stop targeting more nodes. + queue := targetedNodes.List() + for len(queue) > 0 { + vertices := queue + queue = nil // ready to append for next iteration if neccessary + for _, v := range vertices { + dependers := g.UpEdges(v) + if dependers == nil { + // indicates that there are no up edges for this node, so + // we have nothing to do here. + continue + } + + dependers = dependers.Filter(func(dv interface{}) bool { + // Can ignore nodes that are already targeted + /*if targetedNodes.Include(dv) { + return false + }*/ + + _, ok := dv.(GraphNodeTargetDownstream) + return ok + }) + + if dependers.Len() == 0 { + continue + } + + for _, dv := range dependers.List() { + if targetedNodes.Include(dv) { + // Already present, so nothing to do + continue + } + + // We'll give the node some information about what it's + // depending on in case that informs its decision about whether + // it is safe to be targeted. + deps := g.DownEdges(v) + depsTargeted := deps.Intersection(targetedNodes) + depsUntargeted := deps.Difference(depsTargeted) + + if dv.(GraphNodeTargetDownstream).TargetDownstream(depsTargeted, depsUntargeted) { + targetedNodes.Add(dv) + // Need to visit this node on the next pass to see if it + // has any transitive dependers. + queue = append(queue, dv) + } + } + } + } + + return targetedNodes, nil +} + +func (t *TargetsTransformer) nodeIsTarget( + v dag.Vertex, addrs []ResourceAddress) bool { + r, ok := v.(GraphNodeResource) + if !ok { + return false + } + + addr := r.ResourceAddr() + for _, targetAddr := range addrs { + if targetAddr.Equals(addr) { + return true + } + } + + return false +} + +// RemovableIfNotTargeted is a special interface for graph nodes that +// aren't directly addressable, but need to be removed from the graph when they +// are not targeted. (Nodes that are not directly targeted end up in the set of +// targeted nodes because something that _is_ targeted depends on them.) The +// initial use case for this interface is GraphNodeConfigVariable, which was +// having trouble interpolating for module variables in targeted scenarios that +// filtered out the resource node being referenced. +type RemovableIfNotTargeted interface { + RemoveIfNotTargeted() bool +} diff --git a/vendor/github.com/hashicorp/terraform/terraform/transform_transitive_reduction.go b/vendor/github.com/hashicorp/terraform/terraform/transform_transitive_reduction.go new file mode 100644 index 0000000000..21842789cf --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/terraform/transform_transitive_reduction.go @@ -0,0 +1,20 @@ +package terraform + +// TransitiveReductionTransformer is a GraphTransformer that performs +// finds the transitive reduction of the graph. For a definition of +// transitive reduction, see Wikipedia. +type TransitiveReductionTransformer struct{} + +func (t *TransitiveReductionTransformer) Transform(g *Graph) error { + // If the graph isn't valid, skip the transitive reduction. + // We don't error here because Terraform itself handles graph + // validation in a better way, or we assume it does. + if err := g.Validate(); err != nil { + return nil + } + + // Do it + g.TransitiveReduction() + + return nil +} diff --git a/vendor/github.com/hashicorp/terraform/terraform/transform_variable.go b/vendor/github.com/hashicorp/terraform/terraform/transform_variable.go new file mode 100644 index 0000000000..b31e2c765f --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/terraform/transform_variable.go @@ -0,0 +1,40 @@ +package terraform + +import ( + "github.com/hashicorp/terraform/config/module" +) + +// RootVariableTransformer is a GraphTransformer that adds all the root +// variables to the graph. +// +// Root variables are currently no-ops but they must be added to the +// graph since downstream things that depend on them must be able to +// reach them. +type RootVariableTransformer struct { + Module *module.Tree +} + +func (t *RootVariableTransformer) Transform(g *Graph) error { + // If no config, no variables + if t.Module == nil { + return nil + } + + // If we have no vars, we're done! + vars := t.Module.Config().Variables + if len(vars) == 0 { + return nil + } + + // Add all variables here + for _, v := range vars { + node := &NodeRootVariable{ + Config: v, + } + + // Add it! + g.Add(node) + } + + return nil +} diff --git a/vendor/github.com/hashicorp/terraform/terraform/transform_vertex.go b/vendor/github.com/hashicorp/terraform/terraform/transform_vertex.go new file mode 100644 index 0000000000..6b1293fc29 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/terraform/transform_vertex.go @@ -0,0 +1,44 @@ +package terraform + +import ( + "fmt" + + "github.com/hashicorp/terraform/dag" +) + +// VertexTransformer is a GraphTransformer that transforms vertices +// using the GraphVertexTransformers. The Transforms are run in sequential +// order. If a transform replaces a vertex then the next transform will see +// the new vertex. +type VertexTransformer struct { + Transforms []GraphVertexTransformer +} + +func (t *VertexTransformer) Transform(g *Graph) error { + for _, v := range g.Vertices() { + for _, vt := range t.Transforms { + newV, err := vt.Transform(v) + if err != nil { + return err + } + + // If the vertex didn't change, then don't do anything more + if newV == v { + continue + } + + // Vertex changed, replace it within the graph + if ok := g.Replace(v, newV); !ok { + // This should never happen, big problem + return fmt.Errorf( + "Failed to replace %s with %s!\n\nSource: %#v\n\nTarget: %#v", + dag.VertexName(v), dag.VertexName(newV), v, newV) + } + + // Replace v so that future transforms use the proper vertex + v = newV + } + } + + return nil +} diff --git a/vendor/github.com/hashicorp/terraform/terraform/ui_input.go b/vendor/github.com/hashicorp/terraform/terraform/ui_input.go new file mode 100644 index 0000000000..7c87459220 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/terraform/ui_input.go @@ -0,0 +1,26 @@ +package terraform + +// UIInput is the interface that must be implemented to ask for input +// from this user. This should forward the request to wherever the user +// inputs things to ask for values. +type UIInput interface { + Input(*InputOpts) (string, error) +} + +// InputOpts are options for asking for input. +type InputOpts struct { + // Id is a unique ID for the question being asked that might be + // used for logging or to look up a prior answered question. + Id string + + // Query is a human-friendly question for inputting this value. + Query string + + // Description is a description about what this option is. Be wary + // that this will probably be in a terminal so split lines as you see + // necessary. + Description string + + // Default will be the value returned if no data is entered. + Default string +} diff --git a/vendor/github.com/hashicorp/terraform/terraform/ui_input_mock.go b/vendor/github.com/hashicorp/terraform/terraform/ui_input_mock.go new file mode 100644 index 0000000000..e3a07efa33 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/terraform/ui_input_mock.go @@ -0,0 +1,23 @@ +package terraform + +// MockUIInput is an implementation of UIInput that can be used for tests. +type MockUIInput struct { + InputCalled bool + InputOpts *InputOpts + InputReturnMap map[string]string + InputReturnString string + InputReturnError error + InputFn func(*InputOpts) (string, error) +} + +func (i *MockUIInput) Input(opts *InputOpts) (string, error) { + i.InputCalled = true + i.InputOpts = opts + if i.InputFn != nil { + return i.InputFn(opts) + } + if i.InputReturnMap != nil { + return i.InputReturnMap[opts.Id], i.InputReturnError + } + return i.InputReturnString, i.InputReturnError +} diff --git a/vendor/github.com/hashicorp/terraform/terraform/ui_input_prefix.go b/vendor/github.com/hashicorp/terraform/terraform/ui_input_prefix.go new file mode 100644 index 0000000000..2207d1d0fd --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/terraform/ui_input_prefix.go @@ -0,0 +1,19 @@ +package terraform + +import ( + "fmt" +) + +// PrefixUIInput is an implementation of UIInput that prefixes the ID +// with a string, allowing queries to be namespaced. +type PrefixUIInput struct { + IdPrefix string + QueryPrefix string + UIInput UIInput +} + +func (i *PrefixUIInput) Input(opts *InputOpts) (string, error) { + opts.Id = fmt.Sprintf("%s.%s", i.IdPrefix, opts.Id) + opts.Query = fmt.Sprintf("%s%s", i.QueryPrefix, opts.Query) + return i.UIInput.Input(opts) +} diff --git a/vendor/github.com/hashicorp/terraform/terraform/ui_output.go b/vendor/github.com/hashicorp/terraform/terraform/ui_output.go new file mode 100644 index 0000000000..84427c63de --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/terraform/ui_output.go @@ -0,0 +1,7 @@ +package terraform + +// UIOutput is the interface that must be implemented to output +// data to the end user. +type UIOutput interface { + Output(string) +} diff --git a/vendor/github.com/hashicorp/terraform/terraform/ui_output_callback.go b/vendor/github.com/hashicorp/terraform/terraform/ui_output_callback.go new file mode 100644 index 0000000000..135a91c5f0 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/terraform/ui_output_callback.go @@ -0,0 +1,9 @@ +package terraform + +type CallbackUIOutput struct { + OutputFn func(string) +} + +func (o *CallbackUIOutput) Output(v string) { + o.OutputFn(v) +} diff --git a/vendor/github.com/hashicorp/terraform/terraform/ui_output_mock.go b/vendor/github.com/hashicorp/terraform/terraform/ui_output_mock.go new file mode 100644 index 0000000000..7852bc4237 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/terraform/ui_output_mock.go @@ -0,0 +1,16 @@ +package terraform + +// MockUIOutput is an implementation of UIOutput that can be used for tests. +type MockUIOutput struct { + OutputCalled bool + OutputMessage string + OutputFn func(string) +} + +func (o *MockUIOutput) Output(v string) { + o.OutputCalled = true + o.OutputMessage = v + if o.OutputFn != nil { + o.OutputFn(v) + } +} diff --git a/vendor/github.com/hashicorp/terraform/terraform/ui_output_provisioner.go b/vendor/github.com/hashicorp/terraform/terraform/ui_output_provisioner.go new file mode 100644 index 0000000000..878a03122f --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/terraform/ui_output_provisioner.go @@ -0,0 +1,15 @@ +package terraform + +// ProvisionerUIOutput is an implementation of UIOutput that calls a hook +// for the output so that the hooks can handle it. +type ProvisionerUIOutput struct { + Info *InstanceInfo + Type string + Hooks []Hook +} + +func (o *ProvisionerUIOutput) Output(msg string) { + for _, h := range o.Hooks { + h.ProvisionOutput(o.Info, o.Type, msg) + } +} diff --git a/vendor/github.com/hashicorp/terraform/terraform/user_agent.go b/vendor/github.com/hashicorp/terraform/terraform/user_agent.go new file mode 100644 index 0000000000..700be2ae20 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/terraform/user_agent.go @@ -0,0 +1,14 @@ +package terraform + +import ( + "fmt" + "runtime" +) + +// The standard Terraform User-Agent format +const UserAgent = "Terraform %s (%s)" + +// Generate a UserAgent string +func UserAgentString() string { + return fmt.Sprintf(UserAgent, VersionString(), runtime.Version()) +} diff --git a/vendor/github.com/hashicorp/terraform/terraform/util.go b/vendor/github.com/hashicorp/terraform/terraform/util.go new file mode 100644 index 0000000000..f41f0d7d63 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/terraform/util.go @@ -0,0 +1,93 @@ +package terraform + +import ( + "sort" + "strings" +) + +// Semaphore is a wrapper around a channel to provide +// utility methods to clarify that we are treating the +// channel as a semaphore +type Semaphore chan struct{} + +// NewSemaphore creates a semaphore that allows up +// to a given limit of simultaneous acquisitions +func NewSemaphore(n int) Semaphore { + if n == 0 { + panic("semaphore with limit 0") + } + ch := make(chan struct{}, n) + return Semaphore(ch) +} + +// Acquire is used to acquire an available slot. +// Blocks until available. +func (s Semaphore) Acquire() { + s <- struct{}{} +} + +// TryAcquire is used to do a non-blocking acquire. +// Returns a bool indicating success +func (s Semaphore) TryAcquire() bool { + select { + case s <- struct{}{}: + return true + default: + return false + } +} + +// Release is used to return a slot. Acquire must +// be called as a pre-condition. +func (s Semaphore) Release() { + select { + case <-s: + default: + panic("release without an acquire") + } +} + +// resourceProvider returns the provider name for the given type. +func resourceProvider(t, alias string) string { + if alias != "" { + return alias + } + + idx := strings.IndexRune(t, '_') + if idx == -1 { + // If no underscores, the resource name is assumed to be + // also the provider name, e.g. if the provider exposes + // only a single resource of each type. + return t + } + + return t[:idx] +} + +// strSliceContains checks if a given string is contained in a slice +// When anybody asks why Go needs generics, here you go. +func strSliceContains(haystack []string, needle string) bool { + for _, s := range haystack { + if s == needle { + return true + } + } + return false +} + +// deduplicate a slice of strings +func uniqueStrings(s []string) []string { + if len(s) < 2 { + return s + } + + sort.Strings(s) + result := make([]string, 1, len(s)) + result[0] = s[0] + for i := 1; i < len(s); i++ { + if s[i] != result[len(result)-1] { + result = append(result, s[i]) + } + } + return result +} diff --git a/vendor/github.com/hashicorp/terraform/terraform/variables.go b/vendor/github.com/hashicorp/terraform/terraform/variables.go new file mode 100644 index 0000000000..300f2adb1a --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/terraform/variables.go @@ -0,0 +1,166 @@ +package terraform + +import ( + "fmt" + "os" + "strings" + + "github.com/hashicorp/terraform/config" + "github.com/hashicorp/terraform/config/module" + "github.com/hashicorp/terraform/helper/hilmapstructure" +) + +// Variables returns the fully loaded set of variables to use with +// ContextOpts and NewContext, loading any additional variables from +// the environment or any other sources. +// +// The given module tree doesn't need to be loaded. +func Variables( + m *module.Tree, + override map[string]interface{}) (map[string]interface{}, error) { + result := make(map[string]interface{}) + + // Variables are loaded in the following sequence. Each additional step + // will override conflicting variable keys from prior steps: + // + // * Take default values from config + // * Take values from TF_VAR_x env vars + // * Take values specified in the "override" param which is usually + // from -var, -var-file, etc. + // + + // First load from the config + for _, v := range m.Config().Variables { + // If the var has no default, ignore + if v.Default == nil { + continue + } + + // If the type isn't a string, we use it as-is since it is a rich type + if v.Type() != config.VariableTypeString { + result[v.Name] = v.Default + continue + } + + // v.Default has already been parsed as HCL but it may be an int type + switch typedDefault := v.Default.(type) { + case string: + if typedDefault == "" { + continue + } + result[v.Name] = typedDefault + case int, int64: + result[v.Name] = fmt.Sprintf("%d", typedDefault) + case float32, float64: + result[v.Name] = fmt.Sprintf("%f", typedDefault) + case bool: + result[v.Name] = fmt.Sprintf("%t", typedDefault) + default: + panic(fmt.Sprintf( + "Unknown default var type: %T\n\n"+ + "THIS IS A BUG. Please report it.", + v.Default)) + } + } + + // Load from env vars + for _, v := range os.Environ() { + if !strings.HasPrefix(v, VarEnvPrefix) { + continue + } + + // Strip off the prefix and get the value after the first "=" + idx := strings.Index(v, "=") + k := v[len(VarEnvPrefix):idx] + v = v[idx+1:] + + // Override the configuration-default values. Note that *not* finding the variable + // in configuration is OK, as we don't want to preclude people from having multiple + // sets of TF_VAR_whatever in their environment even if it is a little weird. + for _, schema := range m.Config().Variables { + if schema.Name != k { + continue + } + + varType := schema.Type() + varVal, err := parseVariableAsHCL(k, v, varType) + if err != nil { + return nil, err + } + + switch varType { + case config.VariableTypeMap: + if err := varSetMap(result, k, varVal); err != nil { + return nil, err + } + default: + result[k] = varVal + } + } + } + + // Load from overrides + for k, v := range override { + for _, schema := range m.Config().Variables { + if schema.Name != k { + continue + } + + switch schema.Type() { + case config.VariableTypeList: + result[k] = v + case config.VariableTypeMap: + if err := varSetMap(result, k, v); err != nil { + return nil, err + } + case config.VariableTypeString: + // Convert to a string and set. We don't catch any errors + // here because the validation step later should catch + // any type errors. + var strVal string + if err := hilmapstructure.WeakDecode(v, &strVal); err == nil { + result[k] = strVal + } else { + result[k] = v + } + default: + panic(fmt.Sprintf( + "Unhandled var type: %T\n\n"+ + "THIS IS A BUG. Please report it.", + schema.Type())) + } + } + } + + return result, nil +} + +// varSetMap sets or merges the map in "v" with the key "k" in the +// "current" set of variables. This is just a private function to remove +// duplicate logic in Variables +func varSetMap(current map[string]interface{}, k string, v interface{}) error { + existing, ok := current[k] + if !ok { + current[k] = v + return nil + } + + existingMap, ok := existing.(map[string]interface{}) + if !ok { + panic(fmt.Sprintf("%q is not a map, this is a bug in Terraform.", k)) + } + + switch typedV := v.(type) { + case []map[string]interface{}: + for newKey, newVal := range typedV[0] { + existingMap[newKey] = newVal + } + case map[string]interface{}: + for newKey, newVal := range typedV { + existingMap[newKey] = newVal + } + default: + return fmt.Errorf("variable %q should be type map, got %s", k, hclTypeName(v)) + } + return nil +} diff --git a/vendor/github.com/hashicorp/terraform/terraform/version.go b/vendor/github.com/hashicorp/terraform/terraform/version.go new file mode 100644 index 0000000000..cdfb8fb665 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/terraform/version.go @@ -0,0 +1,31 @@ +package terraform + +import ( + "fmt" + + "github.com/hashicorp/go-version" +) + +// The main version number that is being run at the moment. +const Version = "0.9.8" + +// A pre-release marker for the version. If this is "" (empty string) +// then it means that it is a final release. Otherwise, this is a pre-release +// such as "dev" (in development), "beta", "rc1", etc. +var VersionPrerelease = "" + +// SemVersion is an instance of version.Version. This has the secondary +// benefit of verifying during tests and init time that our version is a +// proper semantic version, which should always be the case. +var SemVersion = version.Must(version.NewVersion(Version)) + +// VersionHeader is the header name used to send the current terraform version +// in http requests. +const VersionHeader = "Terraform-Version" + +func VersionString() string { + if VersionPrerelease != "" { + return fmt.Sprintf("%s-%s", Version, VersionPrerelease) + } + return Version +} diff --git a/vendor/github.com/hashicorp/terraform/terraform/version_required.go b/vendor/github.com/hashicorp/terraform/terraform/version_required.go new file mode 100644 index 0000000000..3cbbf56085 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/terraform/version_required.go @@ -0,0 +1,69 @@ +package terraform + +import ( + "fmt" + + "github.com/hashicorp/go-version" + "github.com/hashicorp/terraform/config" + "github.com/hashicorp/terraform/config/module" +) + +// checkRequiredVersion verifies that any version requirements specified by +// the configuration are met. +// +// This checks the root module as well as any additional version requirements +// from child modules. +// +// This is tested in context_test.go. +func checkRequiredVersion(m *module.Tree) error { + // Check any children + for _, c := range m.Children() { + if err := checkRequiredVersion(c); err != nil { + return err + } + } + + var tf *config.Terraform + if c := m.Config(); c != nil { + tf = c.Terraform + } + + // If there is no Terraform config or the required version isn't set, + // we move on. + if tf == nil || tf.RequiredVersion == "" { + return nil + } + + // Path for errors + module := "root" + if path := normalizeModulePath(m.Path()); len(path) > 1 { + module = modulePrefixStr(path) + } + + // Check this version requirement of this module + cs, err := version.NewConstraint(tf.RequiredVersion) + if err != nil { + return fmt.Errorf( + "%s: terraform.required_version %q syntax error: %s", + module, + tf.RequiredVersion, err) + } + + if !cs.Check(SemVersion) { + return fmt.Errorf( + "The currently running version of Terraform doesn't meet the\n"+ + "version requirements explicitly specified by the configuration.\n"+ + "Please use the required version or update the configuration.\n"+ + "Note that version requirements are usually set for a reason, so\n"+ + "we recommend verifying with whoever set the version requirements\n"+ + "prior to making any manual changes.\n\n"+ + " Module: %s\n"+ + " Required version: %s\n"+ + " Current version: %s", + module, + tf.RequiredVersion, + SemVersion) + } + + return nil +} diff --git a/vendor/github.com/hashicorp/terraform/terraform/walkoperation_string.go b/vendor/github.com/hashicorp/terraform/terraform/walkoperation_string.go new file mode 100644 index 0000000000..cbd78dd93f --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/terraform/walkoperation_string.go @@ -0,0 +1,16 @@ +// Code generated by "stringer -type=walkOperation graph_walk_operation.go"; DO NOT EDIT. + +package terraform + +import "fmt" + +const _walkOperation_name = "walkInvalidwalkInputwalkApplywalkPlanwalkPlanDestroywalkRefreshwalkValidatewalkDestroywalkImport" + +var _walkOperation_index = [...]uint8{0, 11, 20, 29, 37, 52, 63, 75, 86, 96} + +func (i walkOperation) String() string { + if i >= walkOperation(len(_walkOperation_index)-1) { + return fmt.Sprintf("walkOperation(%d)", i) + } + return _walkOperation_name[_walkOperation_index[i]:_walkOperation_index[i+1]] +} diff --git a/vendor/github.com/hashicorp/yamux/LICENSE b/vendor/github.com/hashicorp/yamux/LICENSE new file mode 100644 index 0000000000..f0e5c79e18 --- /dev/null +++ b/vendor/github.com/hashicorp/yamux/LICENSE @@ -0,0 +1,362 @@ +Mozilla Public License, version 2.0 + +1. Definitions + +1.1. "Contributor" + + means each individual or legal entity that creates, contributes to the + creation of, or owns Covered Software. + +1.2. "Contributor Version" + + means the combination of the Contributions of others (if any) used by a + Contributor and that particular Contributor's Contribution. + +1.3. "Contribution" + + means Covered Software of a particular Contributor. + +1.4. "Covered Software" + + means Source Code Form to which the initial Contributor has attached the + notice in Exhibit A, the Executable Form of such Source Code Form, and + Modifications of such Source Code Form, in each case including portions + thereof. + +1.5. "Incompatible With Secondary Licenses" + means + + a. that the initial Contributor has attached the notice described in + Exhibit B to the Covered Software; or + + b. that the Covered Software was made available under the terms of + version 1.1 or earlier of the License, but not also under the terms of + a Secondary License. + +1.6. "Executable Form" + + means any form of the work other than Source Code Form. + +1.7. "Larger Work" + + means a work that combines Covered Software with other material, in a + separate file or files, that is not Covered Software. + +1.8. "License" + + means this document. + +1.9. "Licensable" + + means having the right to grant, to the maximum extent possible, whether + at the time of the initial grant or subsequently, any and all of the + rights conveyed by this License. + +1.10. "Modifications" + + means any of the following: + + a. any file in Source Code Form that results from an addition to, + deletion from, or modification of the contents of Covered Software; or + + b. any new file in Source Code Form that contains any Covered Software. + +1.11. "Patent Claims" of a Contributor + + means any patent claim(s), including without limitation, method, + process, and apparatus claims, in any patent Licensable by such + Contributor that would be infringed, but for the grant of the License, + by the making, using, selling, offering for sale, having made, import, + or transfer of either its Contributions or its Contributor Version. + +1.12. "Secondary License" + + means either the GNU General Public License, Version 2.0, the GNU Lesser + General Public License, Version 2.1, the GNU Affero General Public + License, Version 3.0, or any later versions of those licenses. + +1.13. "Source Code Form" + + means the form of the work preferred for making modifications. + +1.14. "You" (or "Your") + + means an individual or a legal entity exercising rights under this + License. For legal entities, "You" includes any entity that controls, is + controlled by, or is under common control with You. For purposes of this + definition, "control" means (a) the power, direct or indirect, to cause + the direction or management of such entity, whether by contract or + otherwise, or (b) ownership of more than fifty percent (50%) of the + outstanding shares or beneficial ownership of such entity. + + +2. License Grants and Conditions + +2.1. Grants + + Each Contributor hereby grants You a world-wide, royalty-free, + non-exclusive license: + + a. under intellectual property rights (other than patent or trademark) + Licensable by such Contributor to use, reproduce, make available, + modify, display, perform, distribute, and otherwise exploit its + Contributions, either on an unmodified basis, with Modifications, or + as part of a Larger Work; and + + b. under Patent Claims of such Contributor to make, use, sell, offer for + sale, have made, import, and otherwise transfer either its + Contributions or its Contributor Version. + +2.2. Effective Date + + The licenses granted in Section 2.1 with respect to any Contribution + become effective for each Contribution on the date the Contributor first + distributes such Contribution. + +2.3. Limitations on Grant Scope + + The licenses granted in this Section 2 are the only rights granted under + this License. No additional rights or licenses will be implied from the + distribution or licensing of Covered Software under this License. + Notwithstanding Section 2.1(b) above, no patent license is granted by a + Contributor: + + a. for any code that a Contributor has removed from Covered Software; or + + b. for infringements caused by: (i) Your and any other third party's + modifications of Covered Software, or (ii) the combination of its + Contributions with other software (except as part of its Contributor + Version); or + + c. under Patent Claims infringed by Covered Software in the absence of + its Contributions. + + This License does not grant any rights in the trademarks, service marks, + or logos of any Contributor (except as may be necessary to comply with + the notice requirements in Section 3.4). + +2.4. Subsequent Licenses + + No Contributor makes additional grants as a result of Your choice to + distribute the Covered Software under a subsequent version of this + License (see Section 10.2) or under the terms of a Secondary License (if + permitted under the terms of Section 3.3). + +2.5. Representation + + Each Contributor represents that the Contributor believes its + Contributions are its original creation(s) or it has sufficient rights to + grant the rights to its Contributions conveyed by this License. + +2.6. Fair Use + + This License is not intended to limit any rights You have under + applicable copyright doctrines of fair use, fair dealing, or other + equivalents. + +2.7. Conditions + + Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in + Section 2.1. + + +3. Responsibilities + +3.1. Distribution of Source Form + + All distribution of Covered Software in Source Code Form, including any + Modifications that You create or to which You contribute, must be under + the terms of this License. You must inform recipients that the Source + Code Form of the Covered Software is governed by the terms of this + License, and how they can obtain a copy of this License. You may not + attempt to alter or restrict the recipients' rights in the Source Code + Form. + +3.2. Distribution of Executable Form + + If You distribute Covered Software in Executable Form then: + + a. such Covered Software must also be made available in Source Code Form, + as described in Section 3.1, and You must inform recipients of the + Executable Form how they can obtain a copy of such Source Code Form by + reasonable means in a timely manner, at a charge no more than the cost + of distribution to the recipient; and + + b. You may distribute such Executable Form under the terms of this + License, or sublicense it under different terms, provided that the + license for the Executable Form does not attempt to limit or alter the + recipients' rights in the Source Code Form under this License. + +3.3. Distribution of a Larger Work + + You may create and distribute a Larger Work under terms of Your choice, + provided that You also comply with the requirements of this License for + the Covered Software. If the Larger Work is a combination of Covered + Software with a work governed by one or more Secondary Licenses, and the + Covered Software is not Incompatible With Secondary Licenses, this + License permits You to additionally distribute such Covered Software + under the terms of such Secondary License(s), so that the recipient of + the Larger Work may, at their option, further distribute the Covered + Software under the terms of either this License or such Secondary + License(s). + +3.4. Notices + + You may not remove or alter the substance of any license notices + (including copyright notices, patent notices, disclaimers of warranty, or + limitations of liability) contained within the Source Code Form of the + Covered Software, except that You may alter any license notices to the + extent required to remedy known factual inaccuracies. + +3.5. Application of Additional Terms + + You may choose to offer, and to charge a fee for, warranty, support, + indemnity or liability obligations to one or more recipients of Covered + Software. However, You may do so only on Your own behalf, and not on + behalf of any Contributor. You must make it absolutely clear that any + such warranty, support, indemnity, or liability obligation is offered by + You alone, and You hereby agree to indemnify every Contributor for any + liability incurred by such Contributor as a result of warranty, support, + indemnity or liability terms You offer. You may include additional + disclaimers of warranty and limitations of liability specific to any + jurisdiction. + +4. Inability to Comply Due to Statute or Regulation + + If it is impossible for You to comply with any of the terms of this License + with respect to some or all of the Covered Software due to statute, + judicial order, or regulation then You must: (a) comply with the terms of + this License to the maximum extent possible; and (b) describe the + limitations and the code they affect. Such description must be placed in a + text file included with all distributions of the Covered Software under + this License. Except to the extent prohibited by statute or regulation, + such description must be sufficiently detailed for a recipient of ordinary + skill to be able to understand it. + +5. Termination + +5.1. The rights granted under this License will terminate automatically if You + fail to comply with any of its terms. However, if You become compliant, + then the rights granted under this License from a particular Contributor + are reinstated (a) provisionally, unless and until such Contributor + explicitly and finally terminates Your grants, and (b) on an ongoing + basis, if such Contributor fails to notify You of the non-compliance by + some reasonable means prior to 60 days after You have come back into + compliance. Moreover, Your grants from a particular Contributor are + reinstated on an ongoing basis if such Contributor notifies You of the + non-compliance by some reasonable means, this is the first time You have + received notice of non-compliance with this License from such + Contributor, and You become compliant prior to 30 days after Your receipt + of the notice. + +5.2. If You initiate litigation against any entity by asserting a patent + infringement claim (excluding declaratory judgment actions, + counter-claims, and cross-claims) alleging that a Contributor Version + directly or indirectly infringes any patent, then the rights granted to + You by any and all Contributors for the Covered Software under Section + 2.1 of this License shall terminate. + +5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user + license agreements (excluding distributors and resellers) which have been + validly granted by You or Your distributors under this License prior to + termination shall survive termination. + +6. Disclaimer of Warranty + + Covered Software is provided under this License on an "as is" basis, + without warranty of any kind, either expressed, implied, or statutory, + including, without limitation, warranties that the Covered Software is free + of defects, merchantable, fit for a particular purpose or non-infringing. + The entire risk as to the quality and performance of the Covered Software + is with You. Should any Covered Software prove defective in any respect, + You (not any Contributor) assume the cost of any necessary servicing, + repair, or correction. This disclaimer of warranty constitutes an essential + part of this License. No use of any Covered Software is authorized under + this License except under this disclaimer. + +7. Limitation of Liability + + Under no circumstances and under no legal theory, whether tort (including + negligence), contract, or otherwise, shall any Contributor, or anyone who + distributes Covered Software as permitted above, be liable to You for any + direct, indirect, special, incidental, or consequential damages of any + character including, without limitation, damages for lost profits, loss of + goodwill, work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses, even if such party shall have been + informed of the possibility of such damages. This limitation of liability + shall not apply to liability for death or personal injury resulting from + such party's negligence to the extent applicable law prohibits such + limitation. Some jurisdictions do not allow the exclusion or limitation of + incidental or consequential damages, so this exclusion and limitation may + not apply to You. + +8. Litigation + + Any litigation relating to this License may be brought only in the courts + of a jurisdiction where the defendant maintains its principal place of + business and such litigation shall be governed by laws of that + jurisdiction, without reference to its conflict-of-law provisions. Nothing + in this Section shall prevent a party's ability to bring cross-claims or + counter-claims. + +9. Miscellaneous + + This License represents the complete agreement concerning the subject + matter hereof. If any provision of this License is held to be + unenforceable, such provision shall be reformed only to the extent + necessary to make it enforceable. Any law or regulation which provides that + the language of a contract shall be construed against the drafter shall not + be used to construe this License against a Contributor. + + +10. Versions of the License + +10.1. New Versions + + Mozilla Foundation is the license steward. Except as provided in Section + 10.3, no one other than the license steward has the right to modify or + publish new versions of this License. Each version will be given a + distinguishing version number. + +10.2. Effect of New Versions + + You may distribute the Covered Software under the terms of the version + of the License under which You originally received the Covered Software, + or under the terms of any subsequent version published by the license + steward. + +10.3. Modified Versions + + If you create software not governed by this License, and you want to + create a new license for such software, you may create and use a + modified version of this License if you rename the license and remove + any references to the name of the license steward (except to note that + such modified license differs from this License). + +10.4. Distributing Source Code Form that is Incompatible With Secondary + Licenses If You choose to distribute Source Code Form that is + Incompatible With Secondary Licenses under the terms of this version of + the License, the notice described in Exhibit B of this License must be + attached. + +Exhibit A - Source Code Form License Notice + + This Source Code Form is subject to the + terms of the Mozilla Public License, v. + 2.0. If a copy of the MPL was not + distributed with this file, You can + obtain one at + http://mozilla.org/MPL/2.0/. + +If it is not possible or desirable to put the notice in a particular file, +then You may include the notice in a location (such as a LICENSE file in a +relevant directory) where a recipient would be likely to look for such a +notice. + +You may add additional accurate notices of copyright ownership. + +Exhibit B - "Incompatible With Secondary Licenses" Notice + + This Source Code Form is "Incompatible + With Secondary Licenses", as defined by + the Mozilla Public License, v. 2.0. \ No newline at end of file diff --git a/vendor/github.com/hashicorp/yamux/README.md b/vendor/github.com/hashicorp/yamux/README.md new file mode 100644 index 0000000000..d4db7fc99b --- /dev/null +++ b/vendor/github.com/hashicorp/yamux/README.md @@ -0,0 +1,86 @@ +# Yamux + +Yamux (Yet another Multiplexer) is a multiplexing library for Golang. +It relies on an underlying connection to provide reliability +and ordering, such as TCP or Unix domain sockets, and provides +stream-oriented multiplexing. It is inspired by SPDY but is not +interoperable with it. + +Yamux features include: + +* Bi-directional streams + * Streams can be opened by either client or server + * Useful for NAT traversal + * Server-side push support +* Flow control + * Avoid starvation + * Back-pressure to prevent overwhelming a receiver +* Keep Alives + * Enables persistent connections over a load balancer +* Efficient + * Enables thousands of logical streams with low overhead + +## Documentation + +For complete documentation, see the associated [Godoc](http://godoc.org/github.com/hashicorp/yamux). + +## Specification + +The full specification for Yamux is provided in the `spec.md` file. +It can be used as a guide to implementors of interoperable libraries. + +## Usage + +Using Yamux is remarkably simple: + +```go + +func client() { + // Get a TCP connection + conn, err := net.Dial(...) + if err != nil { + panic(err) + } + + // Setup client side of yamux + session, err := yamux.Client(conn, nil) + if err != nil { + panic(err) + } + + // Open a new stream + stream, err := session.Open() + if err != nil { + panic(err) + } + + // Stream implements net.Conn + stream.Write([]byte("ping")) +} + +func server() { + // Accept a TCP connection + conn, err := listener.Accept() + if err != nil { + panic(err) + } + + // Setup server side of yamux + session, err := yamux.Server(conn, nil) + if err != nil { + panic(err) + } + + // Accept a stream + stream, err := session.Accept() + if err != nil { + panic(err) + } + + // Listen for a message + buf := make([]byte, 4) + stream.Read(buf) +} + +``` + diff --git a/vendor/github.com/hashicorp/yamux/addr.go b/vendor/github.com/hashicorp/yamux/addr.go new file mode 100644 index 0000000000..be6ebca9c7 --- /dev/null +++ b/vendor/github.com/hashicorp/yamux/addr.go @@ -0,0 +1,60 @@ +package yamux + +import ( + "fmt" + "net" +) + +// hasAddr is used to get the address from the underlying connection +type hasAddr interface { + LocalAddr() net.Addr + RemoteAddr() net.Addr +} + +// yamuxAddr is used when we cannot get the underlying address +type yamuxAddr struct { + Addr string +} + +func (*yamuxAddr) Network() string { + return "yamux" +} + +func (y *yamuxAddr) String() string { + return fmt.Sprintf("yamux:%s", y.Addr) +} + +// Addr is used to get the address of the listener. +func (s *Session) Addr() net.Addr { + return s.LocalAddr() +} + +// LocalAddr is used to get the local address of the +// underlying connection. +func (s *Session) LocalAddr() net.Addr { + addr, ok := s.conn.(hasAddr) + if !ok { + return &yamuxAddr{"local"} + } + return addr.LocalAddr() +} + +// RemoteAddr is used to get the address of remote end +// of the underlying connection +func (s *Session) RemoteAddr() net.Addr { + addr, ok := s.conn.(hasAddr) + if !ok { + return &yamuxAddr{"remote"} + } + return addr.RemoteAddr() +} + +// LocalAddr returns the local address +func (s *Stream) LocalAddr() net.Addr { + return s.session.LocalAddr() +} + +// LocalAddr returns the remote address +func (s *Stream) RemoteAddr() net.Addr { + return s.session.RemoteAddr() +} diff --git a/vendor/github.com/hashicorp/yamux/const.go b/vendor/github.com/hashicorp/yamux/const.go new file mode 100644 index 0000000000..4f52938287 --- /dev/null +++ b/vendor/github.com/hashicorp/yamux/const.go @@ -0,0 +1,157 @@ +package yamux + +import ( + "encoding/binary" + "fmt" +) + +var ( + // ErrInvalidVersion means we received a frame with an + // invalid version + ErrInvalidVersion = fmt.Errorf("invalid protocol version") + + // ErrInvalidMsgType means we received a frame with an + // invalid message type + ErrInvalidMsgType = fmt.Errorf("invalid msg type") + + // ErrSessionShutdown is used if there is a shutdown during + // an operation + ErrSessionShutdown = fmt.Errorf("session shutdown") + + // ErrStreamsExhausted is returned if we have no more + // stream ids to issue + ErrStreamsExhausted = fmt.Errorf("streams exhausted") + + // ErrDuplicateStream is used if a duplicate stream is + // opened inbound + ErrDuplicateStream = fmt.Errorf("duplicate stream initiated") + + // ErrReceiveWindowExceeded indicates the window was exceeded + ErrRecvWindowExceeded = fmt.Errorf("recv window exceeded") + + // ErrTimeout is used when we reach an IO deadline + ErrTimeout = fmt.Errorf("i/o deadline reached") + + // ErrStreamClosed is returned when using a closed stream + ErrStreamClosed = fmt.Errorf("stream closed") + + // ErrUnexpectedFlag is set when we get an unexpected flag + ErrUnexpectedFlag = fmt.Errorf("unexpected flag") + + // ErrRemoteGoAway is used when we get a go away from the other side + ErrRemoteGoAway = fmt.Errorf("remote end is not accepting connections") + + // ErrConnectionReset is sent if a stream is reset. This can happen + // if the backlog is exceeded, or if there was a remote GoAway. + ErrConnectionReset = fmt.Errorf("connection reset") + + // ErrConnectionWriteTimeout indicates that we hit the "safety valve" + // timeout writing to the underlying stream connection. + ErrConnectionWriteTimeout = fmt.Errorf("connection write timeout") + + // ErrKeepAliveTimeout is sent if a missed keepalive caused the stream close + ErrKeepAliveTimeout = fmt.Errorf("keepalive timeout") +) + +const ( + // protoVersion is the only version we support + protoVersion uint8 = 0 +) + +const ( + // Data is used for data frames. They are followed + // by length bytes worth of payload. + typeData uint8 = iota + + // WindowUpdate is used to change the window of + // a given stream. The length indicates the delta + // update to the window. + typeWindowUpdate + + // Ping is sent as a keep-alive or to measure + // the RTT. The StreamID and Length value are echoed + // back in the response. + typePing + + // GoAway is sent to terminate a session. The StreamID + // should be 0 and the length is an error code. + typeGoAway +) + +const ( + // SYN is sent to signal a new stream. May + // be sent with a data payload + flagSYN uint16 = 1 << iota + + // ACK is sent to acknowledge a new stream. May + // be sent with a data payload + flagACK + + // FIN is sent to half-close the given stream. + // May be sent with a data payload. + flagFIN + + // RST is used to hard close a given stream. + flagRST +) + +const ( + // initialStreamWindow is the initial stream window size + initialStreamWindow uint32 = 256 * 1024 +) + +const ( + // goAwayNormal is sent on a normal termination + goAwayNormal uint32 = iota + + // goAwayProtoErr sent on a protocol error + goAwayProtoErr + + // goAwayInternalErr sent on an internal error + goAwayInternalErr +) + +const ( + sizeOfVersion = 1 + sizeOfType = 1 + sizeOfFlags = 2 + sizeOfStreamID = 4 + sizeOfLength = 4 + headerSize = sizeOfVersion + sizeOfType + sizeOfFlags + + sizeOfStreamID + sizeOfLength +) + +type header []byte + +func (h header) Version() uint8 { + return h[0] +} + +func (h header) MsgType() uint8 { + return h[1] +} + +func (h header) Flags() uint16 { + return binary.BigEndian.Uint16(h[2:4]) +} + +func (h header) StreamID() uint32 { + return binary.BigEndian.Uint32(h[4:8]) +} + +func (h header) Length() uint32 { + return binary.BigEndian.Uint32(h[8:12]) +} + +func (h header) String() string { + return fmt.Sprintf("Vsn:%d Type:%d Flags:%d StreamID:%d Length:%d", + h.Version(), h.MsgType(), h.Flags(), h.StreamID(), h.Length()) +} + +func (h header) encode(msgType uint8, flags uint16, streamID uint32, length uint32) { + h[0] = protoVersion + h[1] = msgType + binary.BigEndian.PutUint16(h[2:4], flags) + binary.BigEndian.PutUint32(h[4:8], streamID) + binary.BigEndian.PutUint32(h[8:12], length) +} diff --git a/vendor/github.com/hashicorp/yamux/mux.go b/vendor/github.com/hashicorp/yamux/mux.go new file mode 100644 index 0000000000..7abc7c744c --- /dev/null +++ b/vendor/github.com/hashicorp/yamux/mux.go @@ -0,0 +1,87 @@ +package yamux + +import ( + "fmt" + "io" + "os" + "time" +) + +// Config is used to tune the Yamux session +type Config struct { + // AcceptBacklog is used to limit how many streams may be + // waiting an accept. + AcceptBacklog int + + // EnableKeepalive is used to do a period keep alive + // messages using a ping. + EnableKeepAlive bool + + // KeepAliveInterval is how often to perform the keep alive + KeepAliveInterval time.Duration + + // ConnectionWriteTimeout is meant to be a "safety valve" timeout after + // we which will suspect a problem with the underlying connection and + // close it. This is only applied to writes, where's there's generally + // an expectation that things will move along quickly. + ConnectionWriteTimeout time.Duration + + // MaxStreamWindowSize is used to control the maximum + // window size that we allow for a stream. + MaxStreamWindowSize uint32 + + // LogOutput is used to control the log destination + LogOutput io.Writer +} + +// DefaultConfig is used to return a default configuration +func DefaultConfig() *Config { + return &Config{ + AcceptBacklog: 256, + EnableKeepAlive: true, + KeepAliveInterval: 30 * time.Second, + ConnectionWriteTimeout: 10 * time.Second, + MaxStreamWindowSize: initialStreamWindow, + LogOutput: os.Stderr, + } +} + +// VerifyConfig is used to verify the sanity of configuration +func VerifyConfig(config *Config) error { + if config.AcceptBacklog <= 0 { + return fmt.Errorf("backlog must be positive") + } + if config.KeepAliveInterval == 0 { + return fmt.Errorf("keep-alive interval must be positive") + } + if config.MaxStreamWindowSize < initialStreamWindow { + return fmt.Errorf("MaxStreamWindowSize must be larger than %d", initialStreamWindow) + } + return nil +} + +// Server is used to initialize a new server-side connection. +// There must be at most one server-side connection. If a nil config is +// provided, the DefaultConfiguration will be used. +func Server(conn io.ReadWriteCloser, config *Config) (*Session, error) { + if config == nil { + config = DefaultConfig() + } + if err := VerifyConfig(config); err != nil { + return nil, err + } + return newSession(config, conn, false), nil +} + +// Client is used to initialize a new client-side connection. +// There must be at most one client-side connection. +func Client(conn io.ReadWriteCloser, config *Config) (*Session, error) { + if config == nil { + config = DefaultConfig() + } + + if err := VerifyConfig(config); err != nil { + return nil, err + } + return newSession(config, conn, true), nil +} diff --git a/vendor/github.com/hashicorp/yamux/session.go b/vendor/github.com/hashicorp/yamux/session.go new file mode 100644 index 0000000000..e17981839f --- /dev/null +++ b/vendor/github.com/hashicorp/yamux/session.go @@ -0,0 +1,623 @@ +package yamux + +import ( + "bufio" + "fmt" + "io" + "io/ioutil" + "log" + "math" + "net" + "strings" + "sync" + "sync/atomic" + "time" +) + +// Session is used to wrap a reliable ordered connection and to +// multiplex it into multiple streams. +type Session struct { + // remoteGoAway indicates the remote side does + // not want futher connections. Must be first for alignment. + remoteGoAway int32 + + // localGoAway indicates that we should stop + // accepting futher connections. Must be first for alignment. + localGoAway int32 + + // nextStreamID is the next stream we should + // send. This depends if we are a client/server. + nextStreamID uint32 + + // config holds our configuration + config *Config + + // logger is used for our logs + logger *log.Logger + + // conn is the underlying connection + conn io.ReadWriteCloser + + // bufRead is a buffered reader + bufRead *bufio.Reader + + // pings is used to track inflight pings + pings map[uint32]chan struct{} + pingID uint32 + pingLock sync.Mutex + + // streams maps a stream id to a stream, and inflight has an entry + // for any outgoing stream that has not yet been established. Both are + // protected by streamLock. + streams map[uint32]*Stream + inflight map[uint32]struct{} + streamLock sync.Mutex + + // synCh acts like a semaphore. It is sized to the AcceptBacklog which + // is assumed to be symmetric between the client and server. This allows + // the client to avoid exceeding the backlog and instead blocks the open. + synCh chan struct{} + + // acceptCh is used to pass ready streams to the client + acceptCh chan *Stream + + // sendCh is used to mark a stream as ready to send, + // or to send a header out directly. + sendCh chan sendReady + + // recvDoneCh is closed when recv() exits to avoid a race + // between stream registration and stream shutdown + recvDoneCh chan struct{} + + // shutdown is used to safely close a session + shutdown bool + shutdownErr error + shutdownCh chan struct{} + shutdownLock sync.Mutex +} + +// sendReady is used to either mark a stream as ready +// or to directly send a header +type sendReady struct { + Hdr []byte + Body io.Reader + Err chan error +} + +// newSession is used to construct a new session +func newSession(config *Config, conn io.ReadWriteCloser, client bool) *Session { + s := &Session{ + config: config, + logger: log.New(config.LogOutput, "", log.LstdFlags), + conn: conn, + bufRead: bufio.NewReader(conn), + pings: make(map[uint32]chan struct{}), + streams: make(map[uint32]*Stream), + inflight: make(map[uint32]struct{}), + synCh: make(chan struct{}, config.AcceptBacklog), + acceptCh: make(chan *Stream, config.AcceptBacklog), + sendCh: make(chan sendReady, 64), + recvDoneCh: make(chan struct{}), + shutdownCh: make(chan struct{}), + } + if client { + s.nextStreamID = 1 + } else { + s.nextStreamID = 2 + } + go s.recv() + go s.send() + if config.EnableKeepAlive { + go s.keepalive() + } + return s +} + +// IsClosed does a safe check to see if we have shutdown +func (s *Session) IsClosed() bool { + select { + case <-s.shutdownCh: + return true + default: + return false + } +} + +// NumStreams returns the number of currently open streams +func (s *Session) NumStreams() int { + s.streamLock.Lock() + num := len(s.streams) + s.streamLock.Unlock() + return num +} + +// Open is used to create a new stream as a net.Conn +func (s *Session) Open() (net.Conn, error) { + conn, err := s.OpenStream() + if err != nil { + return nil, err + } + return conn, nil +} + +// OpenStream is used to create a new stream +func (s *Session) OpenStream() (*Stream, error) { + if s.IsClosed() { + return nil, ErrSessionShutdown + } + if atomic.LoadInt32(&s.remoteGoAway) == 1 { + return nil, ErrRemoteGoAway + } + + // Block if we have too many inflight SYNs + select { + case s.synCh <- struct{}{}: + case <-s.shutdownCh: + return nil, ErrSessionShutdown + } + +GET_ID: + // Get an ID, and check for stream exhaustion + id := atomic.LoadUint32(&s.nextStreamID) + if id >= math.MaxUint32-1 { + return nil, ErrStreamsExhausted + } + if !atomic.CompareAndSwapUint32(&s.nextStreamID, id, id+2) { + goto GET_ID + } + + // Register the stream + stream := newStream(s, id, streamInit) + s.streamLock.Lock() + s.streams[id] = stream + s.inflight[id] = struct{}{} + s.streamLock.Unlock() + + // Send the window update to create + if err := stream.sendWindowUpdate(); err != nil { + select { + case <-s.synCh: + default: + s.logger.Printf("[ERR] yamux: aborted stream open without inflight syn semaphore") + } + return nil, err + } + return stream, nil +} + +// Accept is used to block until the next available stream +// is ready to be accepted. +func (s *Session) Accept() (net.Conn, error) { + conn, err := s.AcceptStream() + if err != nil { + return nil, err + } + return conn, err +} + +// AcceptStream is used to block until the next available stream +// is ready to be accepted. +func (s *Session) AcceptStream() (*Stream, error) { + select { + case stream := <-s.acceptCh: + if err := stream.sendWindowUpdate(); err != nil { + return nil, err + } + return stream, nil + case <-s.shutdownCh: + return nil, s.shutdownErr + } +} + +// Close is used to close the session and all streams. +// Attempts to send a GoAway before closing the connection. +func (s *Session) Close() error { + s.shutdownLock.Lock() + defer s.shutdownLock.Unlock() + + if s.shutdown { + return nil + } + s.shutdown = true + if s.shutdownErr == nil { + s.shutdownErr = ErrSessionShutdown + } + close(s.shutdownCh) + s.conn.Close() + <-s.recvDoneCh + + s.streamLock.Lock() + defer s.streamLock.Unlock() + for _, stream := range s.streams { + stream.forceClose() + } + return nil +} + +// exitErr is used to handle an error that is causing the +// session to terminate. +func (s *Session) exitErr(err error) { + s.shutdownLock.Lock() + if s.shutdownErr == nil { + s.shutdownErr = err + } + s.shutdownLock.Unlock() + s.Close() +} + +// GoAway can be used to prevent accepting further +// connections. It does not close the underlying conn. +func (s *Session) GoAway() error { + return s.waitForSend(s.goAway(goAwayNormal), nil) +} + +// goAway is used to send a goAway message +func (s *Session) goAway(reason uint32) header { + atomic.SwapInt32(&s.localGoAway, 1) + hdr := header(make([]byte, headerSize)) + hdr.encode(typeGoAway, 0, 0, reason) + return hdr +} + +// Ping is used to measure the RTT response time +func (s *Session) Ping() (time.Duration, error) { + // Get a channel for the ping + ch := make(chan struct{}) + + // Get a new ping id, mark as pending + s.pingLock.Lock() + id := s.pingID + s.pingID++ + s.pings[id] = ch + s.pingLock.Unlock() + + // Send the ping request + hdr := header(make([]byte, headerSize)) + hdr.encode(typePing, flagSYN, 0, id) + if err := s.waitForSend(hdr, nil); err != nil { + return 0, err + } + + // Wait for a response + start := time.Now() + select { + case <-ch: + case <-time.After(s.config.ConnectionWriteTimeout): + s.pingLock.Lock() + delete(s.pings, id) // Ignore it if a response comes later. + s.pingLock.Unlock() + return 0, ErrTimeout + case <-s.shutdownCh: + return 0, ErrSessionShutdown + } + + // Compute the RTT + return time.Now().Sub(start), nil +} + +// keepalive is a long running goroutine that periodically does +// a ping to keep the connection alive. +func (s *Session) keepalive() { + for { + select { + case <-time.After(s.config.KeepAliveInterval): + _, err := s.Ping() + if err != nil { + s.logger.Printf("[ERR] yamux: keepalive failed: %v", err) + s.exitErr(ErrKeepAliveTimeout) + return + } + case <-s.shutdownCh: + return + } + } +} + +// waitForSendErr waits to send a header, checking for a potential shutdown +func (s *Session) waitForSend(hdr header, body io.Reader) error { + errCh := make(chan error, 1) + return s.waitForSendErr(hdr, body, errCh) +} + +// waitForSendErr waits to send a header with optional data, checking for a +// potential shutdown. Since there's the expectation that sends can happen +// in a timely manner, we enforce the connection write timeout here. +func (s *Session) waitForSendErr(hdr header, body io.Reader, errCh chan error) error { + timer := time.NewTimer(s.config.ConnectionWriteTimeout) + defer timer.Stop() + + ready := sendReady{Hdr: hdr, Body: body, Err: errCh} + select { + case s.sendCh <- ready: + case <-s.shutdownCh: + return ErrSessionShutdown + case <-timer.C: + return ErrConnectionWriteTimeout + } + + select { + case err := <-errCh: + return err + case <-s.shutdownCh: + return ErrSessionShutdown + case <-timer.C: + return ErrConnectionWriteTimeout + } +} + +// sendNoWait does a send without waiting. Since there's the expectation that +// the send happens right here, we enforce the connection write timeout if we +// can't queue the header to be sent. +func (s *Session) sendNoWait(hdr header) error { + timer := time.NewTimer(s.config.ConnectionWriteTimeout) + defer timer.Stop() + + select { + case s.sendCh <- sendReady{Hdr: hdr}: + return nil + case <-s.shutdownCh: + return ErrSessionShutdown + case <-timer.C: + return ErrConnectionWriteTimeout + } +} + +// send is a long running goroutine that sends data +func (s *Session) send() { + for { + select { + case ready := <-s.sendCh: + // Send a header if ready + if ready.Hdr != nil { + sent := 0 + for sent < len(ready.Hdr) { + n, err := s.conn.Write(ready.Hdr[sent:]) + if err != nil { + s.logger.Printf("[ERR] yamux: Failed to write header: %v", err) + asyncSendErr(ready.Err, err) + s.exitErr(err) + return + } + sent += n + } + } + + // Send data from a body if given + if ready.Body != nil { + _, err := io.Copy(s.conn, ready.Body) + if err != nil { + s.logger.Printf("[ERR] yamux: Failed to write body: %v", err) + asyncSendErr(ready.Err, err) + s.exitErr(err) + return + } + } + + // No error, successful send + asyncSendErr(ready.Err, nil) + case <-s.shutdownCh: + return + } + } +} + +// recv is a long running goroutine that accepts new data +func (s *Session) recv() { + if err := s.recvLoop(); err != nil { + s.exitErr(err) + } +} + +// recvLoop continues to receive data until a fatal error is encountered +func (s *Session) recvLoop() error { + defer close(s.recvDoneCh) + hdr := header(make([]byte, headerSize)) + var handler func(header) error + for { + // Read the header + if _, err := io.ReadFull(s.bufRead, hdr); err != nil { + if err != io.EOF && !strings.Contains(err.Error(), "closed") && !strings.Contains(err.Error(), "reset by peer") { + s.logger.Printf("[ERR] yamux: Failed to read header: %v", err) + } + return err + } + + // Verify the version + if hdr.Version() != protoVersion { + s.logger.Printf("[ERR] yamux: Invalid protocol version: %d", hdr.Version()) + return ErrInvalidVersion + } + + // Switch on the type + switch hdr.MsgType() { + case typeData: + handler = s.handleStreamMessage + case typeWindowUpdate: + handler = s.handleStreamMessage + case typeGoAway: + handler = s.handleGoAway + case typePing: + handler = s.handlePing + default: + return ErrInvalidMsgType + } + + // Invoke the handler + if err := handler(hdr); err != nil { + return err + } + } +} + +// handleStreamMessage handles either a data or window update frame +func (s *Session) handleStreamMessage(hdr header) error { + // Check for a new stream creation + id := hdr.StreamID() + flags := hdr.Flags() + if flags&flagSYN == flagSYN { + if err := s.incomingStream(id); err != nil { + return err + } + } + + // Get the stream + s.streamLock.Lock() + stream := s.streams[id] + s.streamLock.Unlock() + + // If we do not have a stream, likely we sent a RST + if stream == nil { + // Drain any data on the wire + if hdr.MsgType() == typeData && hdr.Length() > 0 { + s.logger.Printf("[WARN] yamux: Discarding data for stream: %d", id) + if _, err := io.CopyN(ioutil.Discard, s.bufRead, int64(hdr.Length())); err != nil { + s.logger.Printf("[ERR] yamux: Failed to discard data: %v", err) + return nil + } + } else { + s.logger.Printf("[WARN] yamux: frame for missing stream: %v", hdr) + } + return nil + } + + // Check if this is a window update + if hdr.MsgType() == typeWindowUpdate { + if err := stream.incrSendWindow(hdr, flags); err != nil { + if sendErr := s.sendNoWait(s.goAway(goAwayProtoErr)); sendErr != nil { + s.logger.Printf("[WARN] yamux: failed to send go away: %v", sendErr) + } + return err + } + return nil + } + + // Read the new data + if err := stream.readData(hdr, flags, s.bufRead); err != nil { + if sendErr := s.sendNoWait(s.goAway(goAwayProtoErr)); sendErr != nil { + s.logger.Printf("[WARN] yamux: failed to send go away: %v", sendErr) + } + return err + } + return nil +} + +// handlePing is invokde for a typePing frame +func (s *Session) handlePing(hdr header) error { + flags := hdr.Flags() + pingID := hdr.Length() + + // Check if this is a query, respond back in a separate context so we + // don't interfere with the receiving thread blocking for the write. + if flags&flagSYN == flagSYN { + go func() { + hdr := header(make([]byte, headerSize)) + hdr.encode(typePing, flagACK, 0, pingID) + if err := s.sendNoWait(hdr); err != nil { + s.logger.Printf("[WARN] yamux: failed to send ping reply: %v", err) + } + }() + return nil + } + + // Handle a response + s.pingLock.Lock() + ch := s.pings[pingID] + if ch != nil { + delete(s.pings, pingID) + close(ch) + } + s.pingLock.Unlock() + return nil +} + +// handleGoAway is invokde for a typeGoAway frame +func (s *Session) handleGoAway(hdr header) error { + code := hdr.Length() + switch code { + case goAwayNormal: + atomic.SwapInt32(&s.remoteGoAway, 1) + case goAwayProtoErr: + s.logger.Printf("[ERR] yamux: received protocol error go away") + return fmt.Errorf("yamux protocol error") + case goAwayInternalErr: + s.logger.Printf("[ERR] yamux: received internal error go away") + return fmt.Errorf("remote yamux internal error") + default: + s.logger.Printf("[ERR] yamux: received unexpected go away") + return fmt.Errorf("unexpected go away received") + } + return nil +} + +// incomingStream is used to create a new incoming stream +func (s *Session) incomingStream(id uint32) error { + // Reject immediately if we are doing a go away + if atomic.LoadInt32(&s.localGoAway) == 1 { + hdr := header(make([]byte, headerSize)) + hdr.encode(typeWindowUpdate, flagRST, id, 0) + return s.sendNoWait(hdr) + } + + // Allocate a new stream + stream := newStream(s, id, streamSYNReceived) + + s.streamLock.Lock() + defer s.streamLock.Unlock() + + // Check if stream already exists + if _, ok := s.streams[id]; ok { + s.logger.Printf("[ERR] yamux: duplicate stream declared") + if sendErr := s.sendNoWait(s.goAway(goAwayProtoErr)); sendErr != nil { + s.logger.Printf("[WARN] yamux: failed to send go away: %v", sendErr) + } + return ErrDuplicateStream + } + + // Register the stream + s.streams[id] = stream + + // Check if we've exceeded the backlog + select { + case s.acceptCh <- stream: + return nil + default: + // Backlog exceeded! RST the stream + s.logger.Printf("[WARN] yamux: backlog exceeded, forcing connection reset") + delete(s.streams, id) + stream.sendHdr.encode(typeWindowUpdate, flagRST, id, 0) + return s.sendNoWait(stream.sendHdr) + } +} + +// closeStream is used to close a stream once both sides have +// issued a close. If there was an in-flight SYN and the stream +// was not yet established, then this will give the credit back. +func (s *Session) closeStream(id uint32) { + s.streamLock.Lock() + if _, ok := s.inflight[id]; ok { + select { + case <-s.synCh: + default: + s.logger.Printf("[ERR] yamux: SYN tracking out of sync") + } + } + delete(s.streams, id) + s.streamLock.Unlock() +} + +// establishStream is used to mark a stream that was in the +// SYN Sent state as established. +func (s *Session) establishStream(id uint32) { + s.streamLock.Lock() + if _, ok := s.inflight[id]; ok { + delete(s.inflight, id) + } else { + s.logger.Printf("[ERR] yamux: established stream without inflight SYN (no tracking entry)") + } + select { + case <-s.synCh: + default: + s.logger.Printf("[ERR] yamux: established stream without inflight SYN (didn't have semaphore)") + } + s.streamLock.Unlock() +} diff --git a/vendor/github.com/hashicorp/yamux/spec.md b/vendor/github.com/hashicorp/yamux/spec.md new file mode 100644 index 0000000000..183d797bde --- /dev/null +++ b/vendor/github.com/hashicorp/yamux/spec.md @@ -0,0 +1,140 @@ +# Specification + +We use this document to detail the internal specification of Yamux. +This is used both as a guide for implementing Yamux, but also for +alternative interoperable libraries to be built. + +# Framing + +Yamux uses a streaming connection underneath, but imposes a message +framing so that it can be shared between many logical streams. Each +frame contains a header like: + +* Version (8 bits) +* Type (8 bits) +* Flags (16 bits) +* StreamID (32 bits) +* Length (32 bits) + +This means that each header has a 12 byte overhead. +All fields are encoded in network order (big endian). +Each field is described below: + +## Version Field + +The version field is used for future backward compatibility. At the +current time, the field is always set to 0, to indicate the initial +version. + +## Type Field + +The type field is used to switch the frame message type. The following +message types are supported: + +* 0x0 Data - Used to transmit data. May transmit zero length payloads + depending on the flags. + +* 0x1 Window Update - Used to updated the senders receive window size. + This is used to implement per-session flow control. + +* 0x2 Ping - Used to measure RTT. It can also be used to heart-beat + and do keep-alives over TCP. + +* 0x3 Go Away - Used to close a session. + +## Flag Field + +The flags field is used to provide additional information related +to the message type. The following flags are supported: + +* 0x1 SYN - Signals the start of a new stream. May be sent with a data or + window update message. Also sent with a ping to indicate outbound. + +* 0x2 ACK - Acknowledges the start of a new stream. May be sent with a data + or window update message. Also sent with a ping to indicate response. + +* 0x4 FIN - Performs a half-close of a stream. May be sent with a data + message or window update. + +* 0x8 RST - Reset a stream immediately. May be sent with a data or + window update message. + +## StreamID Field + +The StreamID field is used to identify the logical stream the frame +is addressing. The client side should use odd ID's, and the server even. +This prevents any collisions. Additionally, the 0 ID is reserved to represent +the session. + +Both Ping and Go Away messages should always use the 0 StreamID. + +## Length Field + +The meaning of the length field depends on the message type: + +* Data - provides the length of bytes following the header +* Window update - provides a delta update to the window size +* Ping - Contains an opaque value, echoed back +* Go Away - Contains an error code + +# Message Flow + +There is no explicit connection setup, as Yamux relies on an underlying +transport to be provided. However, there is a distinction between client +and server side of the connection. + +## Opening a stream + +To open a stream, an initial data or window update frame is sent +with a new StreamID. The SYN flag should be set to signal a new stream. + +The receiver must then reply with either a data or window update frame +with the StreamID along with the ACK flag to accept the stream or with +the RST flag to reject the stream. + +Because we are relying on the reliable stream underneath, a connection +can begin sending data once the SYN flag is sent. The corresponding +ACK does not need to be received. This is particularly well suited +for an RPC system where a client wants to open a stream and immediately +fire a request without waiting for the RTT of the ACK. + +This does introduce the possibility of a connection being rejected +after data has been sent already. This is a slight semantic difference +from TCP, where the conection cannot be refused after it is opened. +Clients should be prepared to handle this by checking for an error +that indicates a RST was received. + +## Closing a stream + +To close a stream, either side sends a data or window update frame +along with the FIN flag. This does a half-close indicating the sender +will send no further data. + +Once both sides have closed the connection, the stream is closed. + +Alternatively, if an error occurs, the RST flag can be used to +hard close a stream immediately. + +## Flow Control + +When Yamux is initially starts each stream with a 256KB window size. +There is no window size for the session. + +To prevent the streams from stalling, window update frames should be +sent regularly. Yamux can be configured to provide a larger limit for +windows sizes. Both sides assume the initial 256KB window, but can +immediately send a window update as part of the SYN/ACK indicating a +larger window. + +Both sides should track the number of bytes sent in Data frames +only, as only they are tracked as part of the window size. + +## Session termination + +When a session is being terminated, the Go Away message should +be sent. The Length should be set to one of the following to +provide an error code: + +* 0x0 Normal termination +* 0x1 Protocol error +* 0x2 Internal error diff --git a/vendor/github.com/hashicorp/yamux/stream.go b/vendor/github.com/hashicorp/yamux/stream.go new file mode 100644 index 0000000000..d216e281ca --- /dev/null +++ b/vendor/github.com/hashicorp/yamux/stream.go @@ -0,0 +1,457 @@ +package yamux + +import ( + "bytes" + "io" + "sync" + "sync/atomic" + "time" +) + +type streamState int + +const ( + streamInit streamState = iota + streamSYNSent + streamSYNReceived + streamEstablished + streamLocalClose + streamRemoteClose + streamClosed + streamReset +) + +// Stream is used to represent a logical stream +// within a session. +type Stream struct { + recvWindow uint32 + sendWindow uint32 + + id uint32 + session *Session + + state streamState + stateLock sync.Mutex + + recvBuf *bytes.Buffer + recvLock sync.Mutex + + controlHdr header + controlErr chan error + controlHdrLock sync.Mutex + + sendHdr header + sendErr chan error + sendLock sync.Mutex + + recvNotifyCh chan struct{} + sendNotifyCh chan struct{} + + readDeadline time.Time + writeDeadline time.Time +} + +// newStream is used to construct a new stream within +// a given session for an ID +func newStream(session *Session, id uint32, state streamState) *Stream { + s := &Stream{ + id: id, + session: session, + state: state, + controlHdr: header(make([]byte, headerSize)), + controlErr: make(chan error, 1), + sendHdr: header(make([]byte, headerSize)), + sendErr: make(chan error, 1), + recvWindow: initialStreamWindow, + sendWindow: initialStreamWindow, + recvNotifyCh: make(chan struct{}, 1), + sendNotifyCh: make(chan struct{}, 1), + } + return s +} + +// Session returns the associated stream session +func (s *Stream) Session() *Session { + return s.session +} + +// StreamID returns the ID of this stream +func (s *Stream) StreamID() uint32 { + return s.id +} + +// Read is used to read from the stream +func (s *Stream) Read(b []byte) (n int, err error) { + defer asyncNotify(s.recvNotifyCh) +START: + s.stateLock.Lock() + switch s.state { + case streamLocalClose: + fallthrough + case streamRemoteClose: + fallthrough + case streamClosed: + s.recvLock.Lock() + if s.recvBuf == nil || s.recvBuf.Len() == 0 { + s.recvLock.Unlock() + s.stateLock.Unlock() + return 0, io.EOF + } + s.recvLock.Unlock() + case streamReset: + s.stateLock.Unlock() + return 0, ErrConnectionReset + } + s.stateLock.Unlock() + + // If there is no data available, block + s.recvLock.Lock() + if s.recvBuf == nil || s.recvBuf.Len() == 0 { + s.recvLock.Unlock() + goto WAIT + } + + // Read any bytes + n, _ = s.recvBuf.Read(b) + s.recvLock.Unlock() + + // Send a window update potentially + err = s.sendWindowUpdate() + return n, err + +WAIT: + var timeout <-chan time.Time + var timer *time.Timer + if !s.readDeadline.IsZero() { + delay := s.readDeadline.Sub(time.Now()) + timer = time.NewTimer(delay) + timeout = timer.C + } + select { + case <-s.recvNotifyCh: + if timer != nil { + timer.Stop() + } + goto START + case <-timeout: + return 0, ErrTimeout + } +} + +// Write is used to write to the stream +func (s *Stream) Write(b []byte) (n int, err error) { + s.sendLock.Lock() + defer s.sendLock.Unlock() + total := 0 + for total < len(b) { + n, err := s.write(b[total:]) + total += n + if err != nil { + return total, err + } + } + return total, nil +} + +// write is used to write to the stream, may return on +// a short write. +func (s *Stream) write(b []byte) (n int, err error) { + var flags uint16 + var max uint32 + var body io.Reader +START: + s.stateLock.Lock() + switch s.state { + case streamLocalClose: + fallthrough + case streamClosed: + s.stateLock.Unlock() + return 0, ErrStreamClosed + case streamReset: + s.stateLock.Unlock() + return 0, ErrConnectionReset + } + s.stateLock.Unlock() + + // If there is no data available, block + window := atomic.LoadUint32(&s.sendWindow) + if window == 0 { + goto WAIT + } + + // Determine the flags if any + flags = s.sendFlags() + + // Send up to our send window + max = min(window, uint32(len(b))) + body = bytes.NewReader(b[:max]) + + // Send the header + s.sendHdr.encode(typeData, flags, s.id, max) + if err := s.session.waitForSendErr(s.sendHdr, body, s.sendErr); err != nil { + return 0, err + } + + // Reduce our send window + atomic.AddUint32(&s.sendWindow, ^uint32(max-1)) + + // Unlock + return int(max), err + +WAIT: + var timeout <-chan time.Time + if !s.writeDeadline.IsZero() { + delay := s.writeDeadline.Sub(time.Now()) + timeout = time.After(delay) + } + select { + case <-s.sendNotifyCh: + goto START + case <-timeout: + return 0, ErrTimeout + } + return 0, nil +} + +// sendFlags determines any flags that are appropriate +// based on the current stream state +func (s *Stream) sendFlags() uint16 { + s.stateLock.Lock() + defer s.stateLock.Unlock() + var flags uint16 + switch s.state { + case streamInit: + flags |= flagSYN + s.state = streamSYNSent + case streamSYNReceived: + flags |= flagACK + s.state = streamEstablished + } + return flags +} + +// sendWindowUpdate potentially sends a window update enabling +// further writes to take place. Must be invoked with the lock. +func (s *Stream) sendWindowUpdate() error { + s.controlHdrLock.Lock() + defer s.controlHdrLock.Unlock() + + // Determine the delta update + max := s.session.config.MaxStreamWindowSize + delta := max - atomic.LoadUint32(&s.recvWindow) + + // Determine the flags if any + flags := s.sendFlags() + + // Check if we can omit the update + if delta < (max/2) && flags == 0 { + return nil + } + + // Update our window + atomic.AddUint32(&s.recvWindow, delta) + + // Send the header + s.controlHdr.encode(typeWindowUpdate, flags, s.id, delta) + if err := s.session.waitForSendErr(s.controlHdr, nil, s.controlErr); err != nil { + return err + } + return nil +} + +// sendClose is used to send a FIN +func (s *Stream) sendClose() error { + s.controlHdrLock.Lock() + defer s.controlHdrLock.Unlock() + + flags := s.sendFlags() + flags |= flagFIN + s.controlHdr.encode(typeWindowUpdate, flags, s.id, 0) + if err := s.session.waitForSendErr(s.controlHdr, nil, s.controlErr); err != nil { + return err + } + return nil +} + +// Close is used to close the stream +func (s *Stream) Close() error { + closeStream := false + s.stateLock.Lock() + switch s.state { + // Opened means we need to signal a close + case streamSYNSent: + fallthrough + case streamSYNReceived: + fallthrough + case streamEstablished: + s.state = streamLocalClose + goto SEND_CLOSE + + case streamLocalClose: + case streamRemoteClose: + s.state = streamClosed + closeStream = true + goto SEND_CLOSE + + case streamClosed: + case streamReset: + default: + panic("unhandled state") + } + s.stateLock.Unlock() + return nil +SEND_CLOSE: + s.stateLock.Unlock() + s.sendClose() + s.notifyWaiting() + if closeStream { + s.session.closeStream(s.id) + } + return nil +} + +// forceClose is used for when the session is exiting +func (s *Stream) forceClose() { + s.stateLock.Lock() + s.state = streamClosed + s.stateLock.Unlock() + s.notifyWaiting() +} + +// processFlags is used to update the state of the stream +// based on set flags, if any. Lock must be held +func (s *Stream) processFlags(flags uint16) error { + // Close the stream without holding the state lock + closeStream := false + defer func() { + if closeStream { + s.session.closeStream(s.id) + } + }() + + s.stateLock.Lock() + defer s.stateLock.Unlock() + if flags&flagACK == flagACK { + if s.state == streamSYNSent { + s.state = streamEstablished + } + s.session.establishStream(s.id) + } + if flags&flagFIN == flagFIN { + switch s.state { + case streamSYNSent: + fallthrough + case streamSYNReceived: + fallthrough + case streamEstablished: + s.state = streamRemoteClose + s.notifyWaiting() + case streamLocalClose: + s.state = streamClosed + closeStream = true + s.notifyWaiting() + default: + s.session.logger.Printf("[ERR] yamux: unexpected FIN flag in state %d", s.state) + return ErrUnexpectedFlag + } + } + if flags&flagRST == flagRST { + s.state = streamReset + closeStream = true + s.notifyWaiting() + } + return nil +} + +// notifyWaiting notifies all the waiting channels +func (s *Stream) notifyWaiting() { + asyncNotify(s.recvNotifyCh) + asyncNotify(s.sendNotifyCh) +} + +// incrSendWindow updates the size of our send window +func (s *Stream) incrSendWindow(hdr header, flags uint16) error { + if err := s.processFlags(flags); err != nil { + return err + } + + // Increase window, unblock a sender + atomic.AddUint32(&s.sendWindow, hdr.Length()) + asyncNotify(s.sendNotifyCh) + return nil +} + +// readData is used to handle a data frame +func (s *Stream) readData(hdr header, flags uint16, conn io.Reader) error { + if err := s.processFlags(flags); err != nil { + return err + } + + // Check that our recv window is not exceeded + length := hdr.Length() + if length == 0 { + return nil + } + if remain := atomic.LoadUint32(&s.recvWindow); length > remain { + s.session.logger.Printf("[ERR] yamux: receive window exceeded (stream: %d, remain: %d, recv: %d)", s.id, remain, length) + return ErrRecvWindowExceeded + } + + // Wrap in a limited reader + conn = &io.LimitedReader{R: conn, N: int64(length)} + + // Copy into buffer + s.recvLock.Lock() + if s.recvBuf == nil { + // Allocate the receive buffer just-in-time to fit the full data frame. + // This way we can read in the whole packet without further allocations. + s.recvBuf = bytes.NewBuffer(make([]byte, 0, length)) + } + if _, err := io.Copy(s.recvBuf, conn); err != nil { + s.session.logger.Printf("[ERR] yamux: Failed to read stream data: %v", err) + s.recvLock.Unlock() + return err + } + + // Decrement the receive window + atomic.AddUint32(&s.recvWindow, ^uint32(length-1)) + s.recvLock.Unlock() + + // Unblock any readers + asyncNotify(s.recvNotifyCh) + return nil +} + +// SetDeadline sets the read and write deadlines +func (s *Stream) SetDeadline(t time.Time) error { + if err := s.SetReadDeadline(t); err != nil { + return err + } + if err := s.SetWriteDeadline(t); err != nil { + return err + } + return nil +} + +// SetReadDeadline sets the deadline for future Read calls. +func (s *Stream) SetReadDeadline(t time.Time) error { + s.readDeadline = t + return nil +} + +// SetWriteDeadline sets the deadline for future Write calls +func (s *Stream) SetWriteDeadline(t time.Time) error { + s.writeDeadline = t + return nil +} + +// Shrink is used to compact the amount of buffers utilized +// This is useful when using Yamux in a connection pool to reduce +// the idle memory utilization. +func (s *Stream) Shrink() { + s.recvLock.Lock() + if s.recvBuf != nil && s.recvBuf.Len() == 0 { + s.recvBuf = nil + } + s.recvLock.Unlock() +} diff --git a/vendor/github.com/hashicorp/yamux/util.go b/vendor/github.com/hashicorp/yamux/util.go new file mode 100644 index 0000000000..5fe45afcdf --- /dev/null +++ b/vendor/github.com/hashicorp/yamux/util.go @@ -0,0 +1,28 @@ +package yamux + +// asyncSendErr is used to try an async send of an error +func asyncSendErr(ch chan error, err error) { + if ch == nil { + return + } + select { + case ch <- err: + default: + } +} + +// asyncNotify is used to signal a waiting goroutine +func asyncNotify(ch chan struct{}) { + select { + case ch <- struct{}{}: + default: + } +} + +// min computes the minimum of two values +func min(a, b uint32) uint32 { + if a < b { + return a + } + return b +} diff --git a/vendor/github.com/jmespath/go-jmespath/LICENSE b/vendor/github.com/jmespath/go-jmespath/LICENSE new file mode 100644 index 0000000000..b03310a91f --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/LICENSE @@ -0,0 +1,13 @@ +Copyright 2015 James Saryerwinnie + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. diff --git a/vendor/github.com/jmespath/go-jmespath/Makefile b/vendor/github.com/jmespath/go-jmespath/Makefile new file mode 100644 index 0000000000..a828d2848f --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/Makefile @@ -0,0 +1,44 @@ + +CMD = jpgo + +help: + @echo "Please use \`make ' where is one of" + @echo " test to run all the tests" + @echo " build to build the library and jp executable" + @echo " generate to run codegen" + + +generate: + go generate ./... + +build: + rm -f $(CMD) + go build ./... + rm -f cmd/$(CMD)/$(CMD) && cd cmd/$(CMD)/ && go build ./... + mv cmd/$(CMD)/$(CMD) . + +test: + go test -v ./... + +check: + go vet ./... + @echo "golint ./..." + @lint=`golint ./...`; \ + lint=`echo "$$lint" | grep -v "astnodetype_string.go" | grep -v "toktype_string.go"`; \ + echo "$$lint"; \ + if [ "$$lint" != "" ]; then exit 1; fi + +htmlc: + go test -coverprofile="/tmp/jpcov" && go tool cover -html="/tmp/jpcov" && unlink /tmp/jpcov + +buildfuzz: + go-fuzz-build github.com/jmespath/go-jmespath/fuzz + +fuzz: buildfuzz + go-fuzz -bin=./jmespath-fuzz.zip -workdir=fuzz/testdata + +bench: + go test -bench . -cpuprofile cpu.out + +pprof-cpu: + go tool pprof ./go-jmespath.test ./cpu.out diff --git a/vendor/github.com/jmespath/go-jmespath/README.md b/vendor/github.com/jmespath/go-jmespath/README.md new file mode 100644 index 0000000000..187ef676dc --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/README.md @@ -0,0 +1,7 @@ +# go-jmespath - A JMESPath implementation in Go + +[![Build Status](https://img.shields.io/travis/jmespath/go-jmespath.svg)](https://travis-ci.org/jmespath/go-jmespath) + + + +See http://jmespath.org for more info. diff --git a/vendor/github.com/jmespath/go-jmespath/api.go b/vendor/github.com/jmespath/go-jmespath/api.go new file mode 100644 index 0000000000..9cfa988bc5 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/api.go @@ -0,0 +1,49 @@ +package jmespath + +import "strconv" + +// JmesPath is the epresentation of a compiled JMES path query. A JmesPath is +// safe for concurrent use by multiple goroutines. +type JMESPath struct { + ast ASTNode + intr *treeInterpreter +} + +// Compile parses a JMESPath expression and returns, if successful, a JMESPath +// object that can be used to match against data. +func Compile(expression string) (*JMESPath, error) { + parser := NewParser() + ast, err := parser.Parse(expression) + if err != nil { + return nil, err + } + jmespath := &JMESPath{ast: ast, intr: newInterpreter()} + return jmespath, nil +} + +// MustCompile is like Compile but panics if the expression cannot be parsed. +// It simplifies safe initialization of global variables holding compiled +// JMESPaths. +func MustCompile(expression string) *JMESPath { + jmespath, err := Compile(expression) + if err != nil { + panic(`jmespath: Compile(` + strconv.Quote(expression) + `): ` + err.Error()) + } + return jmespath +} + +// Search evaluates a JMESPath expression against input data and returns the result. +func (jp *JMESPath) Search(data interface{}) (interface{}, error) { + return jp.intr.Execute(jp.ast, data) +} + +// Search evaluates a JMESPath expression against input data and returns the result. +func Search(expression string, data interface{}) (interface{}, error) { + intr := newInterpreter() + parser := NewParser() + ast, err := parser.Parse(expression) + if err != nil { + return nil, err + } + return intr.Execute(ast, data) +} diff --git a/vendor/github.com/jmespath/go-jmespath/astnodetype_string.go b/vendor/github.com/jmespath/go-jmespath/astnodetype_string.go new file mode 100644 index 0000000000..1cd2d239c9 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/astnodetype_string.go @@ -0,0 +1,16 @@ +// generated by stringer -type astNodeType; DO NOT EDIT + +package jmespath + +import "fmt" + +const _astNodeType_name = "ASTEmptyASTComparatorASTCurrentNodeASTExpRefASTFunctionExpressionASTFieldASTFilterProjectionASTFlattenASTIdentityASTIndexASTIndexExpressionASTKeyValPairASTLiteralASTMultiSelectHashASTMultiSelectListASTOrExpressionASTAndExpressionASTNotExpressionASTPipeASTProjectionASTSubexpressionASTSliceASTValueProjection" + +var _astNodeType_index = [...]uint16{0, 8, 21, 35, 44, 65, 73, 92, 102, 113, 121, 139, 152, 162, 180, 198, 213, 229, 245, 252, 265, 281, 289, 307} + +func (i astNodeType) String() string { + if i < 0 || i >= astNodeType(len(_astNodeType_index)-1) { + return fmt.Sprintf("astNodeType(%d)", i) + } + return _astNodeType_name[_astNodeType_index[i]:_astNodeType_index[i+1]] +} diff --git a/vendor/github.com/jmespath/go-jmespath/functions.go b/vendor/github.com/jmespath/go-jmespath/functions.go new file mode 100644 index 0000000000..9b7cd89b4b --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/functions.go @@ -0,0 +1,842 @@ +package jmespath + +import ( + "encoding/json" + "errors" + "fmt" + "math" + "reflect" + "sort" + "strconv" + "strings" + "unicode/utf8" +) + +type jpFunction func(arguments []interface{}) (interface{}, error) + +type jpType string + +const ( + jpUnknown jpType = "unknown" + jpNumber jpType = "number" + jpString jpType = "string" + jpArray jpType = "array" + jpObject jpType = "object" + jpArrayNumber jpType = "array[number]" + jpArrayString jpType = "array[string]" + jpExpref jpType = "expref" + jpAny jpType = "any" +) + +type functionEntry struct { + name string + arguments []argSpec + handler jpFunction + hasExpRef bool +} + +type argSpec struct { + types []jpType + variadic bool +} + +type byExprString struct { + intr *treeInterpreter + node ASTNode + items []interface{} + hasError bool +} + +func (a *byExprString) Len() int { + return len(a.items) +} +func (a *byExprString) Swap(i, j int) { + a.items[i], a.items[j] = a.items[j], a.items[i] +} +func (a *byExprString) Less(i, j int) bool { + first, err := a.intr.Execute(a.node, a.items[i]) + if err != nil { + a.hasError = true + // Return a dummy value. + return true + } + ith, ok := first.(string) + if !ok { + a.hasError = true + return true + } + second, err := a.intr.Execute(a.node, a.items[j]) + if err != nil { + a.hasError = true + // Return a dummy value. + return true + } + jth, ok := second.(string) + if !ok { + a.hasError = true + return true + } + return ith < jth +} + +type byExprFloat struct { + intr *treeInterpreter + node ASTNode + items []interface{} + hasError bool +} + +func (a *byExprFloat) Len() int { + return len(a.items) +} +func (a *byExprFloat) Swap(i, j int) { + a.items[i], a.items[j] = a.items[j], a.items[i] +} +func (a *byExprFloat) Less(i, j int) bool { + first, err := a.intr.Execute(a.node, a.items[i]) + if err != nil { + a.hasError = true + // Return a dummy value. + return true + } + ith, ok := first.(float64) + if !ok { + a.hasError = true + return true + } + second, err := a.intr.Execute(a.node, a.items[j]) + if err != nil { + a.hasError = true + // Return a dummy value. + return true + } + jth, ok := second.(float64) + if !ok { + a.hasError = true + return true + } + return ith < jth +} + +type functionCaller struct { + functionTable map[string]functionEntry +} + +func newFunctionCaller() *functionCaller { + caller := &functionCaller{} + caller.functionTable = map[string]functionEntry{ + "length": { + name: "length", + arguments: []argSpec{ + {types: []jpType{jpString, jpArray, jpObject}}, + }, + handler: jpfLength, + }, + "starts_with": { + name: "starts_with", + arguments: []argSpec{ + {types: []jpType{jpString}}, + {types: []jpType{jpString}}, + }, + handler: jpfStartsWith, + }, + "abs": { + name: "abs", + arguments: []argSpec{ + {types: []jpType{jpNumber}}, + }, + handler: jpfAbs, + }, + "avg": { + name: "avg", + arguments: []argSpec{ + {types: []jpType{jpArrayNumber}}, + }, + handler: jpfAvg, + }, + "ceil": { + name: "ceil", + arguments: []argSpec{ + {types: []jpType{jpNumber}}, + }, + handler: jpfCeil, + }, + "contains": { + name: "contains", + arguments: []argSpec{ + {types: []jpType{jpArray, jpString}}, + {types: []jpType{jpAny}}, + }, + handler: jpfContains, + }, + "ends_with": { + name: "ends_with", + arguments: []argSpec{ + {types: []jpType{jpString}}, + {types: []jpType{jpString}}, + }, + handler: jpfEndsWith, + }, + "floor": { + name: "floor", + arguments: []argSpec{ + {types: []jpType{jpNumber}}, + }, + handler: jpfFloor, + }, + "map": { + name: "amp", + arguments: []argSpec{ + {types: []jpType{jpExpref}}, + {types: []jpType{jpArray}}, + }, + handler: jpfMap, + hasExpRef: true, + }, + "max": { + name: "max", + arguments: []argSpec{ + {types: []jpType{jpArrayNumber, jpArrayString}}, + }, + handler: jpfMax, + }, + "merge": { + name: "merge", + arguments: []argSpec{ + {types: []jpType{jpObject}, variadic: true}, + }, + handler: jpfMerge, + }, + "max_by": { + name: "max_by", + arguments: []argSpec{ + {types: []jpType{jpArray}}, + {types: []jpType{jpExpref}}, + }, + handler: jpfMaxBy, + hasExpRef: true, + }, + "sum": { + name: "sum", + arguments: []argSpec{ + {types: []jpType{jpArrayNumber}}, + }, + handler: jpfSum, + }, + "min": { + name: "min", + arguments: []argSpec{ + {types: []jpType{jpArrayNumber, jpArrayString}}, + }, + handler: jpfMin, + }, + "min_by": { + name: "min_by", + arguments: []argSpec{ + {types: []jpType{jpArray}}, + {types: []jpType{jpExpref}}, + }, + handler: jpfMinBy, + hasExpRef: true, + }, + "type": { + name: "type", + arguments: []argSpec{ + {types: []jpType{jpAny}}, + }, + handler: jpfType, + }, + "keys": { + name: "keys", + arguments: []argSpec{ + {types: []jpType{jpObject}}, + }, + handler: jpfKeys, + }, + "values": { + name: "values", + arguments: []argSpec{ + {types: []jpType{jpObject}}, + }, + handler: jpfValues, + }, + "sort": { + name: "sort", + arguments: []argSpec{ + {types: []jpType{jpArrayString, jpArrayNumber}}, + }, + handler: jpfSort, + }, + "sort_by": { + name: "sort_by", + arguments: []argSpec{ + {types: []jpType{jpArray}}, + {types: []jpType{jpExpref}}, + }, + handler: jpfSortBy, + hasExpRef: true, + }, + "join": { + name: "join", + arguments: []argSpec{ + {types: []jpType{jpString}}, + {types: []jpType{jpArrayString}}, + }, + handler: jpfJoin, + }, + "reverse": { + name: "reverse", + arguments: []argSpec{ + {types: []jpType{jpArray, jpString}}, + }, + handler: jpfReverse, + }, + "to_array": { + name: "to_array", + arguments: []argSpec{ + {types: []jpType{jpAny}}, + }, + handler: jpfToArray, + }, + "to_string": { + name: "to_string", + arguments: []argSpec{ + {types: []jpType{jpAny}}, + }, + handler: jpfToString, + }, + "to_number": { + name: "to_number", + arguments: []argSpec{ + {types: []jpType{jpAny}}, + }, + handler: jpfToNumber, + }, + "not_null": { + name: "not_null", + arguments: []argSpec{ + {types: []jpType{jpAny}, variadic: true}, + }, + handler: jpfNotNull, + }, + } + return caller +} + +func (e *functionEntry) resolveArgs(arguments []interface{}) ([]interface{}, error) { + if len(e.arguments) == 0 { + return arguments, nil + } + if !e.arguments[len(e.arguments)-1].variadic { + if len(e.arguments) != len(arguments) { + return nil, errors.New("incorrect number of args") + } + for i, spec := range e.arguments { + userArg := arguments[i] + err := spec.typeCheck(userArg) + if err != nil { + return nil, err + } + } + return arguments, nil + } + if len(arguments) < len(e.arguments) { + return nil, errors.New("Invalid arity.") + } + return arguments, nil +} + +func (a *argSpec) typeCheck(arg interface{}) error { + for _, t := range a.types { + switch t { + case jpNumber: + if _, ok := arg.(float64); ok { + return nil + } + case jpString: + if _, ok := arg.(string); ok { + return nil + } + case jpArray: + if isSliceType(arg) { + return nil + } + case jpObject: + if _, ok := arg.(map[string]interface{}); ok { + return nil + } + case jpArrayNumber: + if _, ok := toArrayNum(arg); ok { + return nil + } + case jpArrayString: + if _, ok := toArrayStr(arg); ok { + return nil + } + case jpAny: + return nil + case jpExpref: + if _, ok := arg.(expRef); ok { + return nil + } + } + } + return fmt.Errorf("Invalid type for: %v, expected: %#v", arg, a.types) +} + +func (f *functionCaller) CallFunction(name string, arguments []interface{}, intr *treeInterpreter) (interface{}, error) { + entry, ok := f.functionTable[name] + if !ok { + return nil, errors.New("unknown function: " + name) + } + resolvedArgs, err := entry.resolveArgs(arguments) + if err != nil { + return nil, err + } + if entry.hasExpRef { + var extra []interface{} + extra = append(extra, intr) + resolvedArgs = append(extra, resolvedArgs...) + } + return entry.handler(resolvedArgs) +} + +func jpfAbs(arguments []interface{}) (interface{}, error) { + num := arguments[0].(float64) + return math.Abs(num), nil +} + +func jpfLength(arguments []interface{}) (interface{}, error) { + arg := arguments[0] + if c, ok := arg.(string); ok { + return float64(utf8.RuneCountInString(c)), nil + } else if isSliceType(arg) { + v := reflect.ValueOf(arg) + return float64(v.Len()), nil + } else if c, ok := arg.(map[string]interface{}); ok { + return float64(len(c)), nil + } + return nil, errors.New("could not compute length()") +} + +func jpfStartsWith(arguments []interface{}) (interface{}, error) { + search := arguments[0].(string) + prefix := arguments[1].(string) + return strings.HasPrefix(search, prefix), nil +} + +func jpfAvg(arguments []interface{}) (interface{}, error) { + // We've already type checked the value so we can safely use + // type assertions. + args := arguments[0].([]interface{}) + length := float64(len(args)) + numerator := 0.0 + for _, n := range args { + numerator += n.(float64) + } + return numerator / length, nil +} +func jpfCeil(arguments []interface{}) (interface{}, error) { + val := arguments[0].(float64) + return math.Ceil(val), nil +} +func jpfContains(arguments []interface{}) (interface{}, error) { + search := arguments[0] + el := arguments[1] + if searchStr, ok := search.(string); ok { + if elStr, ok := el.(string); ok { + return strings.Index(searchStr, elStr) != -1, nil + } + return false, nil + } + // Otherwise this is a generic contains for []interface{} + general := search.([]interface{}) + for _, item := range general { + if item == el { + return true, nil + } + } + return false, nil +} +func jpfEndsWith(arguments []interface{}) (interface{}, error) { + search := arguments[0].(string) + suffix := arguments[1].(string) + return strings.HasSuffix(search, suffix), nil +} +func jpfFloor(arguments []interface{}) (interface{}, error) { + val := arguments[0].(float64) + return math.Floor(val), nil +} +func jpfMap(arguments []interface{}) (interface{}, error) { + intr := arguments[0].(*treeInterpreter) + exp := arguments[1].(expRef) + node := exp.ref + arr := arguments[2].([]interface{}) + mapped := make([]interface{}, 0, len(arr)) + for _, value := range arr { + current, err := intr.Execute(node, value) + if err != nil { + return nil, err + } + mapped = append(mapped, current) + } + return mapped, nil +} +func jpfMax(arguments []interface{}) (interface{}, error) { + if items, ok := toArrayNum(arguments[0]); ok { + if len(items) == 0 { + return nil, nil + } + if len(items) == 1 { + return items[0], nil + } + best := items[0] + for _, item := range items[1:] { + if item > best { + best = item + } + } + return best, nil + } + // Otherwise we're dealing with a max() of strings. + items, _ := toArrayStr(arguments[0]) + if len(items) == 0 { + return nil, nil + } + if len(items) == 1 { + return items[0], nil + } + best := items[0] + for _, item := range items[1:] { + if item > best { + best = item + } + } + return best, nil +} +func jpfMerge(arguments []interface{}) (interface{}, error) { + final := make(map[string]interface{}) + for _, m := range arguments { + mapped := m.(map[string]interface{}) + for key, value := range mapped { + final[key] = value + } + } + return final, nil +} +func jpfMaxBy(arguments []interface{}) (interface{}, error) { + intr := arguments[0].(*treeInterpreter) + arr := arguments[1].([]interface{}) + exp := arguments[2].(expRef) + node := exp.ref + if len(arr) == 0 { + return nil, nil + } else if len(arr) == 1 { + return arr[0], nil + } + start, err := intr.Execute(node, arr[0]) + if err != nil { + return nil, err + } + switch t := start.(type) { + case float64: + bestVal := t + bestItem := arr[0] + for _, item := range arr[1:] { + result, err := intr.Execute(node, item) + if err != nil { + return nil, err + } + current, ok := result.(float64) + if !ok { + return nil, errors.New("invalid type, must be number") + } + if current > bestVal { + bestVal = current + bestItem = item + } + } + return bestItem, nil + case string: + bestVal := t + bestItem := arr[0] + for _, item := range arr[1:] { + result, err := intr.Execute(node, item) + if err != nil { + return nil, err + } + current, ok := result.(string) + if !ok { + return nil, errors.New("invalid type, must be string") + } + if current > bestVal { + bestVal = current + bestItem = item + } + } + return bestItem, nil + default: + return nil, errors.New("invalid type, must be number of string") + } +} +func jpfSum(arguments []interface{}) (interface{}, error) { + items, _ := toArrayNum(arguments[0]) + sum := 0.0 + for _, item := range items { + sum += item + } + return sum, nil +} + +func jpfMin(arguments []interface{}) (interface{}, error) { + if items, ok := toArrayNum(arguments[0]); ok { + if len(items) == 0 { + return nil, nil + } + if len(items) == 1 { + return items[0], nil + } + best := items[0] + for _, item := range items[1:] { + if item < best { + best = item + } + } + return best, nil + } + items, _ := toArrayStr(arguments[0]) + if len(items) == 0 { + return nil, nil + } + if len(items) == 1 { + return items[0], nil + } + best := items[0] + for _, item := range items[1:] { + if item < best { + best = item + } + } + return best, nil +} + +func jpfMinBy(arguments []interface{}) (interface{}, error) { + intr := arguments[0].(*treeInterpreter) + arr := arguments[1].([]interface{}) + exp := arguments[2].(expRef) + node := exp.ref + if len(arr) == 0 { + return nil, nil + } else if len(arr) == 1 { + return arr[0], nil + } + start, err := intr.Execute(node, arr[0]) + if err != nil { + return nil, err + } + if t, ok := start.(float64); ok { + bestVal := t + bestItem := arr[0] + for _, item := range arr[1:] { + result, err := intr.Execute(node, item) + if err != nil { + return nil, err + } + current, ok := result.(float64) + if !ok { + return nil, errors.New("invalid type, must be number") + } + if current < bestVal { + bestVal = current + bestItem = item + } + } + return bestItem, nil + } else if t, ok := start.(string); ok { + bestVal := t + bestItem := arr[0] + for _, item := range arr[1:] { + result, err := intr.Execute(node, item) + if err != nil { + return nil, err + } + current, ok := result.(string) + if !ok { + return nil, errors.New("invalid type, must be string") + } + if current < bestVal { + bestVal = current + bestItem = item + } + } + return bestItem, nil + } else { + return nil, errors.New("invalid type, must be number of string") + } +} +func jpfType(arguments []interface{}) (interface{}, error) { + arg := arguments[0] + if _, ok := arg.(float64); ok { + return "number", nil + } + if _, ok := arg.(string); ok { + return "string", nil + } + if _, ok := arg.([]interface{}); ok { + return "array", nil + } + if _, ok := arg.(map[string]interface{}); ok { + return "object", nil + } + if arg == nil { + return "null", nil + } + if arg == true || arg == false { + return "boolean", nil + } + return nil, errors.New("unknown type") +} +func jpfKeys(arguments []interface{}) (interface{}, error) { + arg := arguments[0].(map[string]interface{}) + collected := make([]interface{}, 0, len(arg)) + for key := range arg { + collected = append(collected, key) + } + return collected, nil +} +func jpfValues(arguments []interface{}) (interface{}, error) { + arg := arguments[0].(map[string]interface{}) + collected := make([]interface{}, 0, len(arg)) + for _, value := range arg { + collected = append(collected, value) + } + return collected, nil +} +func jpfSort(arguments []interface{}) (interface{}, error) { + if items, ok := toArrayNum(arguments[0]); ok { + d := sort.Float64Slice(items) + sort.Stable(d) + final := make([]interface{}, len(d)) + for i, val := range d { + final[i] = val + } + return final, nil + } + // Otherwise we're dealing with sort()'ing strings. + items, _ := toArrayStr(arguments[0]) + d := sort.StringSlice(items) + sort.Stable(d) + final := make([]interface{}, len(d)) + for i, val := range d { + final[i] = val + } + return final, nil +} +func jpfSortBy(arguments []interface{}) (interface{}, error) { + intr := arguments[0].(*treeInterpreter) + arr := arguments[1].([]interface{}) + exp := arguments[2].(expRef) + node := exp.ref + if len(arr) == 0 { + return arr, nil + } else if len(arr) == 1 { + return arr, nil + } + start, err := intr.Execute(node, arr[0]) + if err != nil { + return nil, err + } + if _, ok := start.(float64); ok { + sortable := &byExprFloat{intr, node, arr, false} + sort.Stable(sortable) + if sortable.hasError { + return nil, errors.New("error in sort_by comparison") + } + return arr, nil + } else if _, ok := start.(string); ok { + sortable := &byExprString{intr, node, arr, false} + sort.Stable(sortable) + if sortable.hasError { + return nil, errors.New("error in sort_by comparison") + } + return arr, nil + } else { + return nil, errors.New("invalid type, must be number of string") + } +} +func jpfJoin(arguments []interface{}) (interface{}, error) { + sep := arguments[0].(string) + // We can't just do arguments[1].([]string), we have to + // manually convert each item to a string. + arrayStr := []string{} + for _, item := range arguments[1].([]interface{}) { + arrayStr = append(arrayStr, item.(string)) + } + return strings.Join(arrayStr, sep), nil +} +func jpfReverse(arguments []interface{}) (interface{}, error) { + if s, ok := arguments[0].(string); ok { + r := []rune(s) + for i, j := 0, len(r)-1; i < len(r)/2; i, j = i+1, j-1 { + r[i], r[j] = r[j], r[i] + } + return string(r), nil + } + items := arguments[0].([]interface{}) + length := len(items) + reversed := make([]interface{}, length) + for i, item := range items { + reversed[length-(i+1)] = item + } + return reversed, nil +} +func jpfToArray(arguments []interface{}) (interface{}, error) { + if _, ok := arguments[0].([]interface{}); ok { + return arguments[0], nil + } + return arguments[:1:1], nil +} +func jpfToString(arguments []interface{}) (interface{}, error) { + if v, ok := arguments[0].(string); ok { + return v, nil + } + result, err := json.Marshal(arguments[0]) + if err != nil { + return nil, err + } + return string(result), nil +} +func jpfToNumber(arguments []interface{}) (interface{}, error) { + arg := arguments[0] + if v, ok := arg.(float64); ok { + return v, nil + } + if v, ok := arg.(string); ok { + conv, err := strconv.ParseFloat(v, 64) + if err != nil { + return nil, nil + } + return conv, nil + } + if _, ok := arg.([]interface{}); ok { + return nil, nil + } + if _, ok := arg.(map[string]interface{}); ok { + return nil, nil + } + if arg == nil { + return nil, nil + } + if arg == true || arg == false { + return nil, nil + } + return nil, errors.New("unknown type") +} +func jpfNotNull(arguments []interface{}) (interface{}, error) { + for _, arg := range arguments { + if arg != nil { + return arg, nil + } + } + return nil, nil +} diff --git a/vendor/github.com/jmespath/go-jmespath/interpreter.go b/vendor/github.com/jmespath/go-jmespath/interpreter.go new file mode 100644 index 0000000000..13c74604c2 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/interpreter.go @@ -0,0 +1,418 @@ +package jmespath + +import ( + "errors" + "reflect" + "unicode" + "unicode/utf8" +) + +/* This is a tree based interpreter. It walks the AST and directly + interprets the AST to search through a JSON document. +*/ + +type treeInterpreter struct { + fCall *functionCaller +} + +func newInterpreter() *treeInterpreter { + interpreter := treeInterpreter{} + interpreter.fCall = newFunctionCaller() + return &interpreter +} + +type expRef struct { + ref ASTNode +} + +// Execute takes an ASTNode and input data and interprets the AST directly. +// It will produce the result of applying the JMESPath expression associated +// with the ASTNode to the input data "value". +func (intr *treeInterpreter) Execute(node ASTNode, value interface{}) (interface{}, error) { + switch node.nodeType { + case ASTComparator: + left, err := intr.Execute(node.children[0], value) + if err != nil { + return nil, err + } + right, err := intr.Execute(node.children[1], value) + if err != nil { + return nil, err + } + switch node.value { + case tEQ: + return objsEqual(left, right), nil + case tNE: + return !objsEqual(left, right), nil + } + leftNum, ok := left.(float64) + if !ok { + return nil, nil + } + rightNum, ok := right.(float64) + if !ok { + return nil, nil + } + switch node.value { + case tGT: + return leftNum > rightNum, nil + case tGTE: + return leftNum >= rightNum, nil + case tLT: + return leftNum < rightNum, nil + case tLTE: + return leftNum <= rightNum, nil + } + case ASTExpRef: + return expRef{ref: node.children[0]}, nil + case ASTFunctionExpression: + resolvedArgs := []interface{}{} + for _, arg := range node.children { + current, err := intr.Execute(arg, value) + if err != nil { + return nil, err + } + resolvedArgs = append(resolvedArgs, current) + } + return intr.fCall.CallFunction(node.value.(string), resolvedArgs, intr) + case ASTField: + if m, ok := value.(map[string]interface{}); ok { + key := node.value.(string) + return m[key], nil + } + return intr.fieldFromStruct(node.value.(string), value) + case ASTFilterProjection: + left, err := intr.Execute(node.children[0], value) + if err != nil { + return nil, nil + } + sliceType, ok := left.([]interface{}) + if !ok { + if isSliceType(left) { + return intr.filterProjectionWithReflection(node, left) + } + return nil, nil + } + compareNode := node.children[2] + collected := []interface{}{} + for _, element := range sliceType { + result, err := intr.Execute(compareNode, element) + if err != nil { + return nil, err + } + if !isFalse(result) { + current, err := intr.Execute(node.children[1], element) + if err != nil { + return nil, err + } + if current != nil { + collected = append(collected, current) + } + } + } + return collected, nil + case ASTFlatten: + left, err := intr.Execute(node.children[0], value) + if err != nil { + return nil, nil + } + sliceType, ok := left.([]interface{}) + if !ok { + // If we can't type convert to []interface{}, there's + // a chance this could still work via reflection if we're + // dealing with user provided types. + if isSliceType(left) { + return intr.flattenWithReflection(left) + } + return nil, nil + } + flattened := []interface{}{} + for _, element := range sliceType { + if elementSlice, ok := element.([]interface{}); ok { + flattened = append(flattened, elementSlice...) + } else if isSliceType(element) { + reflectFlat := []interface{}{} + v := reflect.ValueOf(element) + for i := 0; i < v.Len(); i++ { + reflectFlat = append(reflectFlat, v.Index(i).Interface()) + } + flattened = append(flattened, reflectFlat...) + } else { + flattened = append(flattened, element) + } + } + return flattened, nil + case ASTIdentity, ASTCurrentNode: + return value, nil + case ASTIndex: + if sliceType, ok := value.([]interface{}); ok { + index := node.value.(int) + if index < 0 { + index += len(sliceType) + } + if index < len(sliceType) && index >= 0 { + return sliceType[index], nil + } + return nil, nil + } + // Otherwise try via reflection. + rv := reflect.ValueOf(value) + if rv.Kind() == reflect.Slice { + index := node.value.(int) + if index < 0 { + index += rv.Len() + } + if index < rv.Len() && index >= 0 { + v := rv.Index(index) + return v.Interface(), nil + } + } + return nil, nil + case ASTKeyValPair: + return intr.Execute(node.children[0], value) + case ASTLiteral: + return node.value, nil + case ASTMultiSelectHash: + if value == nil { + return nil, nil + } + collected := make(map[string]interface{}) + for _, child := range node.children { + current, err := intr.Execute(child, value) + if err != nil { + return nil, err + } + key := child.value.(string) + collected[key] = current + } + return collected, nil + case ASTMultiSelectList: + if value == nil { + return nil, nil + } + collected := []interface{}{} + for _, child := range node.children { + current, err := intr.Execute(child, value) + if err != nil { + return nil, err + } + collected = append(collected, current) + } + return collected, nil + case ASTOrExpression: + matched, err := intr.Execute(node.children[0], value) + if err != nil { + return nil, err + } + if isFalse(matched) { + matched, err = intr.Execute(node.children[1], value) + if err != nil { + return nil, err + } + } + return matched, nil + case ASTAndExpression: + matched, err := intr.Execute(node.children[0], value) + if err != nil { + return nil, err + } + if isFalse(matched) { + return matched, nil + } + return intr.Execute(node.children[1], value) + case ASTNotExpression: + matched, err := intr.Execute(node.children[0], value) + if err != nil { + return nil, err + } + if isFalse(matched) { + return true, nil + } + return false, nil + case ASTPipe: + result := value + var err error + for _, child := range node.children { + result, err = intr.Execute(child, result) + if err != nil { + return nil, err + } + } + return result, nil + case ASTProjection: + left, err := intr.Execute(node.children[0], value) + if err != nil { + return nil, err + } + sliceType, ok := left.([]interface{}) + if !ok { + if isSliceType(left) { + return intr.projectWithReflection(node, left) + } + return nil, nil + } + collected := []interface{}{} + var current interface{} + for _, element := range sliceType { + current, err = intr.Execute(node.children[1], element) + if err != nil { + return nil, err + } + if current != nil { + collected = append(collected, current) + } + } + return collected, nil + case ASTSubexpression, ASTIndexExpression: + left, err := intr.Execute(node.children[0], value) + if err != nil { + return nil, err + } + return intr.Execute(node.children[1], left) + case ASTSlice: + sliceType, ok := value.([]interface{}) + if !ok { + if isSliceType(value) { + return intr.sliceWithReflection(node, value) + } + return nil, nil + } + parts := node.value.([]*int) + sliceParams := make([]sliceParam, 3) + for i, part := range parts { + if part != nil { + sliceParams[i].Specified = true + sliceParams[i].N = *part + } + } + return slice(sliceType, sliceParams) + case ASTValueProjection: + left, err := intr.Execute(node.children[0], value) + if err != nil { + return nil, nil + } + mapType, ok := left.(map[string]interface{}) + if !ok { + return nil, nil + } + values := make([]interface{}, len(mapType)) + for _, value := range mapType { + values = append(values, value) + } + collected := []interface{}{} + for _, element := range values { + current, err := intr.Execute(node.children[1], element) + if err != nil { + return nil, err + } + if current != nil { + collected = append(collected, current) + } + } + return collected, nil + } + return nil, errors.New("Unknown AST node: " + node.nodeType.String()) +} + +func (intr *treeInterpreter) fieldFromStruct(key string, value interface{}) (interface{}, error) { + rv := reflect.ValueOf(value) + first, n := utf8.DecodeRuneInString(key) + fieldName := string(unicode.ToUpper(first)) + key[n:] + if rv.Kind() == reflect.Struct { + v := rv.FieldByName(fieldName) + if !v.IsValid() { + return nil, nil + } + return v.Interface(), nil + } else if rv.Kind() == reflect.Ptr { + // Handle multiple levels of indirection? + if rv.IsNil() { + return nil, nil + } + rv = rv.Elem() + v := rv.FieldByName(fieldName) + if !v.IsValid() { + return nil, nil + } + return v.Interface(), nil + } + return nil, nil +} + +func (intr *treeInterpreter) flattenWithReflection(value interface{}) (interface{}, error) { + v := reflect.ValueOf(value) + flattened := []interface{}{} + for i := 0; i < v.Len(); i++ { + element := v.Index(i).Interface() + if reflect.TypeOf(element).Kind() == reflect.Slice { + // Then insert the contents of the element + // slice into the flattened slice, + // i.e flattened = append(flattened, mySlice...) + elementV := reflect.ValueOf(element) + for j := 0; j < elementV.Len(); j++ { + flattened = append( + flattened, elementV.Index(j).Interface()) + } + } else { + flattened = append(flattened, element) + } + } + return flattened, nil +} + +func (intr *treeInterpreter) sliceWithReflection(node ASTNode, value interface{}) (interface{}, error) { + v := reflect.ValueOf(value) + parts := node.value.([]*int) + sliceParams := make([]sliceParam, 3) + for i, part := range parts { + if part != nil { + sliceParams[i].Specified = true + sliceParams[i].N = *part + } + } + final := []interface{}{} + for i := 0; i < v.Len(); i++ { + element := v.Index(i).Interface() + final = append(final, element) + } + return slice(final, sliceParams) +} + +func (intr *treeInterpreter) filterProjectionWithReflection(node ASTNode, value interface{}) (interface{}, error) { + compareNode := node.children[2] + collected := []interface{}{} + v := reflect.ValueOf(value) + for i := 0; i < v.Len(); i++ { + element := v.Index(i).Interface() + result, err := intr.Execute(compareNode, element) + if err != nil { + return nil, err + } + if !isFalse(result) { + current, err := intr.Execute(node.children[1], element) + if err != nil { + return nil, err + } + if current != nil { + collected = append(collected, current) + } + } + } + return collected, nil +} + +func (intr *treeInterpreter) projectWithReflection(node ASTNode, value interface{}) (interface{}, error) { + collected := []interface{}{} + v := reflect.ValueOf(value) + for i := 0; i < v.Len(); i++ { + element := v.Index(i).Interface() + result, err := intr.Execute(node.children[1], element) + if err != nil { + return nil, err + } + if result != nil { + collected = append(collected, result) + } + } + return collected, nil +} diff --git a/vendor/github.com/jmespath/go-jmespath/lexer.go b/vendor/github.com/jmespath/go-jmespath/lexer.go new file mode 100644 index 0000000000..817900c8f5 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/lexer.go @@ -0,0 +1,420 @@ +package jmespath + +import ( + "bytes" + "encoding/json" + "fmt" + "strconv" + "strings" + "unicode/utf8" +) + +type token struct { + tokenType tokType + value string + position int + length int +} + +type tokType int + +const eof = -1 + +// Lexer contains information about the expression being tokenized. +type Lexer struct { + expression string // The expression provided by the user. + currentPos int // The current position in the string. + lastWidth int // The width of the current rune. This + buf bytes.Buffer // Internal buffer used for building up values. +} + +// SyntaxError is the main error used whenever a lexing or parsing error occurs. +type SyntaxError struct { + msg string // Error message displayed to user + Expression string // Expression that generated a SyntaxError + Offset int // The location in the string where the error occurred +} + +func (e SyntaxError) Error() string { + // In the future, it would be good to underline the specific + // location where the error occurred. + return "SyntaxError: " + e.msg +} + +// HighlightLocation will show where the syntax error occurred. +// It will place a "^" character on a line below the expression +// at the point where the syntax error occurred. +func (e SyntaxError) HighlightLocation() string { + return e.Expression + "\n" + strings.Repeat(" ", e.Offset) + "^" +} + +//go:generate stringer -type=tokType +const ( + tUnknown tokType = iota + tStar + tDot + tFilter + tFlatten + tLparen + tRparen + tLbracket + tRbracket + tLbrace + tRbrace + tOr + tPipe + tNumber + tUnquotedIdentifier + tQuotedIdentifier + tComma + tColon + tLT + tLTE + tGT + tGTE + tEQ + tNE + tJSONLiteral + tStringLiteral + tCurrent + tExpref + tAnd + tNot + tEOF +) + +var basicTokens = map[rune]tokType{ + '.': tDot, + '*': tStar, + ',': tComma, + ':': tColon, + '{': tLbrace, + '}': tRbrace, + ']': tRbracket, // tLbracket not included because it could be "[]" + '(': tLparen, + ')': tRparen, + '@': tCurrent, +} + +// Bit mask for [a-zA-Z_] shifted down 64 bits to fit in a single uint64. +// When using this bitmask just be sure to shift the rune down 64 bits +// before checking against identifierStartBits. +const identifierStartBits uint64 = 576460745995190270 + +// Bit mask for [a-zA-Z0-9], 128 bits -> 2 uint64s. +var identifierTrailingBits = [2]uint64{287948901175001088, 576460745995190270} + +var whiteSpace = map[rune]bool{ + ' ': true, '\t': true, '\n': true, '\r': true, +} + +func (t token) String() string { + return fmt.Sprintf("Token{%+v, %s, %d, %d}", + t.tokenType, t.value, t.position, t.length) +} + +// NewLexer creates a new JMESPath lexer. +func NewLexer() *Lexer { + lexer := Lexer{} + return &lexer +} + +func (lexer *Lexer) next() rune { + if lexer.currentPos >= len(lexer.expression) { + lexer.lastWidth = 0 + return eof + } + r, w := utf8.DecodeRuneInString(lexer.expression[lexer.currentPos:]) + lexer.lastWidth = w + lexer.currentPos += w + return r +} + +func (lexer *Lexer) back() { + lexer.currentPos -= lexer.lastWidth +} + +func (lexer *Lexer) peek() rune { + t := lexer.next() + lexer.back() + return t +} + +// tokenize takes an expression and returns corresponding tokens. +func (lexer *Lexer) tokenize(expression string) ([]token, error) { + var tokens []token + lexer.expression = expression + lexer.currentPos = 0 + lexer.lastWidth = 0 +loop: + for { + r := lexer.next() + if identifierStartBits&(1<<(uint64(r)-64)) > 0 { + t := lexer.consumeUnquotedIdentifier() + tokens = append(tokens, t) + } else if val, ok := basicTokens[r]; ok { + // Basic single char token. + t := token{ + tokenType: val, + value: string(r), + position: lexer.currentPos - lexer.lastWidth, + length: 1, + } + tokens = append(tokens, t) + } else if r == '-' || (r >= '0' && r <= '9') { + t := lexer.consumeNumber() + tokens = append(tokens, t) + } else if r == '[' { + t := lexer.consumeLBracket() + tokens = append(tokens, t) + } else if r == '"' { + t, err := lexer.consumeQuotedIdentifier() + if err != nil { + return tokens, err + } + tokens = append(tokens, t) + } else if r == '\'' { + t, err := lexer.consumeRawStringLiteral() + if err != nil { + return tokens, err + } + tokens = append(tokens, t) + } else if r == '`' { + t, err := lexer.consumeLiteral() + if err != nil { + return tokens, err + } + tokens = append(tokens, t) + } else if r == '|' { + t := lexer.matchOrElse(r, '|', tOr, tPipe) + tokens = append(tokens, t) + } else if r == '<' { + t := lexer.matchOrElse(r, '=', tLTE, tLT) + tokens = append(tokens, t) + } else if r == '>' { + t := lexer.matchOrElse(r, '=', tGTE, tGT) + tokens = append(tokens, t) + } else if r == '!' { + t := lexer.matchOrElse(r, '=', tNE, tNot) + tokens = append(tokens, t) + } else if r == '=' { + t := lexer.matchOrElse(r, '=', tEQ, tUnknown) + tokens = append(tokens, t) + } else if r == '&' { + t := lexer.matchOrElse(r, '&', tAnd, tExpref) + tokens = append(tokens, t) + } else if r == eof { + break loop + } else if _, ok := whiteSpace[r]; ok { + // Ignore whitespace + } else { + return tokens, lexer.syntaxError(fmt.Sprintf("Unknown char: %s", strconv.QuoteRuneToASCII(r))) + } + } + tokens = append(tokens, token{tEOF, "", len(lexer.expression), 0}) + return tokens, nil +} + +// Consume characters until the ending rune "r" is reached. +// If the end of the expression is reached before seeing the +// terminating rune "r", then an error is returned. +// If no error occurs then the matching substring is returned. +// The returned string will not include the ending rune. +func (lexer *Lexer) consumeUntil(end rune) (string, error) { + start := lexer.currentPos + current := lexer.next() + for current != end && current != eof { + if current == '\\' && lexer.peek() != eof { + lexer.next() + } + current = lexer.next() + } + if lexer.lastWidth == 0 { + // Then we hit an EOF so we never reached the closing + // delimiter. + return "", SyntaxError{ + msg: "Unclosed delimiter: " + string(end), + Expression: lexer.expression, + Offset: len(lexer.expression), + } + } + return lexer.expression[start : lexer.currentPos-lexer.lastWidth], nil +} + +func (lexer *Lexer) consumeLiteral() (token, error) { + start := lexer.currentPos + value, err := lexer.consumeUntil('`') + if err != nil { + return token{}, err + } + value = strings.Replace(value, "\\`", "`", -1) + return token{ + tokenType: tJSONLiteral, + value: value, + position: start, + length: len(value), + }, nil +} + +func (lexer *Lexer) consumeRawStringLiteral() (token, error) { + start := lexer.currentPos + currentIndex := start + current := lexer.next() + for current != '\'' && lexer.peek() != eof { + if current == '\\' && lexer.peek() == '\'' { + chunk := lexer.expression[currentIndex : lexer.currentPos-1] + lexer.buf.WriteString(chunk) + lexer.buf.WriteString("'") + lexer.next() + currentIndex = lexer.currentPos + } + current = lexer.next() + } + if lexer.lastWidth == 0 { + // Then we hit an EOF so we never reached the closing + // delimiter. + return token{}, SyntaxError{ + msg: "Unclosed delimiter: '", + Expression: lexer.expression, + Offset: len(lexer.expression), + } + } + if currentIndex < lexer.currentPos { + lexer.buf.WriteString(lexer.expression[currentIndex : lexer.currentPos-1]) + } + value := lexer.buf.String() + // Reset the buffer so it can reused again. + lexer.buf.Reset() + return token{ + tokenType: tStringLiteral, + value: value, + position: start, + length: len(value), + }, nil +} + +func (lexer *Lexer) syntaxError(msg string) SyntaxError { + return SyntaxError{ + msg: msg, + Expression: lexer.expression, + Offset: lexer.currentPos - 1, + } +} + +// Checks for a two char token, otherwise matches a single character +// token. This is used whenever a two char token overlaps a single +// char token, e.g. "||" -> tPipe, "|" -> tOr. +func (lexer *Lexer) matchOrElse(first rune, second rune, matchedType tokType, singleCharType tokType) token { + start := lexer.currentPos - lexer.lastWidth + nextRune := lexer.next() + var t token + if nextRune == second { + t = token{ + tokenType: matchedType, + value: string(first) + string(second), + position: start, + length: 2, + } + } else { + lexer.back() + t = token{ + tokenType: singleCharType, + value: string(first), + position: start, + length: 1, + } + } + return t +} + +func (lexer *Lexer) consumeLBracket() token { + // There's three options here: + // 1. A filter expression "[?" + // 2. A flatten operator "[]" + // 3. A bare rbracket "[" + start := lexer.currentPos - lexer.lastWidth + nextRune := lexer.next() + var t token + if nextRune == '?' { + t = token{ + tokenType: tFilter, + value: "[?", + position: start, + length: 2, + } + } else if nextRune == ']' { + t = token{ + tokenType: tFlatten, + value: "[]", + position: start, + length: 2, + } + } else { + t = token{ + tokenType: tLbracket, + value: "[", + position: start, + length: 1, + } + lexer.back() + } + return t +} + +func (lexer *Lexer) consumeQuotedIdentifier() (token, error) { + start := lexer.currentPos + value, err := lexer.consumeUntil('"') + if err != nil { + return token{}, err + } + var decoded string + asJSON := []byte("\"" + value + "\"") + if err := json.Unmarshal([]byte(asJSON), &decoded); err != nil { + return token{}, err + } + return token{ + tokenType: tQuotedIdentifier, + value: decoded, + position: start - 1, + length: len(decoded), + }, nil +} + +func (lexer *Lexer) consumeUnquotedIdentifier() token { + // Consume runes until we reach the end of an unquoted + // identifier. + start := lexer.currentPos - lexer.lastWidth + for { + r := lexer.next() + if r < 0 || r > 128 || identifierTrailingBits[uint64(r)/64]&(1<<(uint64(r)%64)) == 0 { + lexer.back() + break + } + } + value := lexer.expression[start:lexer.currentPos] + return token{ + tokenType: tUnquotedIdentifier, + value: value, + position: start, + length: lexer.currentPos - start, + } +} + +func (lexer *Lexer) consumeNumber() token { + // Consume runes until we reach something that's not a number. + start := lexer.currentPos - lexer.lastWidth + for { + r := lexer.next() + if r < '0' || r > '9' { + lexer.back() + break + } + } + value := lexer.expression[start:lexer.currentPos] + return token{ + tokenType: tNumber, + value: value, + position: start, + length: lexer.currentPos - start, + } +} diff --git a/vendor/github.com/jmespath/go-jmespath/parser.go b/vendor/github.com/jmespath/go-jmespath/parser.go new file mode 100644 index 0000000000..1240a17552 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/parser.go @@ -0,0 +1,603 @@ +package jmespath + +import ( + "encoding/json" + "fmt" + "strconv" + "strings" +) + +type astNodeType int + +//go:generate stringer -type astNodeType +const ( + ASTEmpty astNodeType = iota + ASTComparator + ASTCurrentNode + ASTExpRef + ASTFunctionExpression + ASTField + ASTFilterProjection + ASTFlatten + ASTIdentity + ASTIndex + ASTIndexExpression + ASTKeyValPair + ASTLiteral + ASTMultiSelectHash + ASTMultiSelectList + ASTOrExpression + ASTAndExpression + ASTNotExpression + ASTPipe + ASTProjection + ASTSubexpression + ASTSlice + ASTValueProjection +) + +// ASTNode represents the abstract syntax tree of a JMESPath expression. +type ASTNode struct { + nodeType astNodeType + value interface{} + children []ASTNode +} + +func (node ASTNode) String() string { + return node.PrettyPrint(0) +} + +// PrettyPrint will pretty print the parsed AST. +// The AST is an implementation detail and this pretty print +// function is provided as a convenience method to help with +// debugging. You should not rely on its output as the internal +// structure of the AST may change at any time. +func (node ASTNode) PrettyPrint(indent int) string { + spaces := strings.Repeat(" ", indent) + output := fmt.Sprintf("%s%s {\n", spaces, node.nodeType) + nextIndent := indent + 2 + if node.value != nil { + if converted, ok := node.value.(fmt.Stringer); ok { + // Account for things like comparator nodes + // that are enums with a String() method. + output += fmt.Sprintf("%svalue: %s\n", strings.Repeat(" ", nextIndent), converted.String()) + } else { + output += fmt.Sprintf("%svalue: %#v\n", strings.Repeat(" ", nextIndent), node.value) + } + } + lastIndex := len(node.children) + if lastIndex > 0 { + output += fmt.Sprintf("%schildren: {\n", strings.Repeat(" ", nextIndent)) + childIndent := nextIndent + 2 + for _, elem := range node.children { + output += elem.PrettyPrint(childIndent) + } + } + output += fmt.Sprintf("%s}\n", spaces) + return output +} + +var bindingPowers = map[tokType]int{ + tEOF: 0, + tUnquotedIdentifier: 0, + tQuotedIdentifier: 0, + tRbracket: 0, + tRparen: 0, + tComma: 0, + tRbrace: 0, + tNumber: 0, + tCurrent: 0, + tExpref: 0, + tColon: 0, + tPipe: 1, + tOr: 2, + tAnd: 3, + tEQ: 5, + tLT: 5, + tLTE: 5, + tGT: 5, + tGTE: 5, + tNE: 5, + tFlatten: 9, + tStar: 20, + tFilter: 21, + tDot: 40, + tNot: 45, + tLbrace: 50, + tLbracket: 55, + tLparen: 60, +} + +// Parser holds state about the current expression being parsed. +type Parser struct { + expression string + tokens []token + index int +} + +// NewParser creates a new JMESPath parser. +func NewParser() *Parser { + p := Parser{} + return &p +} + +// Parse will compile a JMESPath expression. +func (p *Parser) Parse(expression string) (ASTNode, error) { + lexer := NewLexer() + p.expression = expression + p.index = 0 + tokens, err := lexer.tokenize(expression) + if err != nil { + return ASTNode{}, err + } + p.tokens = tokens + parsed, err := p.parseExpression(0) + if err != nil { + return ASTNode{}, err + } + if p.current() != tEOF { + return ASTNode{}, p.syntaxError(fmt.Sprintf( + "Unexpected token at the end of the expresssion: %s", p.current())) + } + return parsed, nil +} + +func (p *Parser) parseExpression(bindingPower int) (ASTNode, error) { + var err error + leftToken := p.lookaheadToken(0) + p.advance() + leftNode, err := p.nud(leftToken) + if err != nil { + return ASTNode{}, err + } + currentToken := p.current() + for bindingPower < bindingPowers[currentToken] { + p.advance() + leftNode, err = p.led(currentToken, leftNode) + if err != nil { + return ASTNode{}, err + } + currentToken = p.current() + } + return leftNode, nil +} + +func (p *Parser) parseIndexExpression() (ASTNode, error) { + if p.lookahead(0) == tColon || p.lookahead(1) == tColon { + return p.parseSliceExpression() + } + indexStr := p.lookaheadToken(0).value + parsedInt, err := strconv.Atoi(indexStr) + if err != nil { + return ASTNode{}, err + } + indexNode := ASTNode{nodeType: ASTIndex, value: parsedInt} + p.advance() + if err := p.match(tRbracket); err != nil { + return ASTNode{}, err + } + return indexNode, nil +} + +func (p *Parser) parseSliceExpression() (ASTNode, error) { + parts := []*int{nil, nil, nil} + index := 0 + current := p.current() + for current != tRbracket && index < 3 { + if current == tColon { + index++ + p.advance() + } else if current == tNumber { + parsedInt, err := strconv.Atoi(p.lookaheadToken(0).value) + if err != nil { + return ASTNode{}, err + } + parts[index] = &parsedInt + p.advance() + } else { + return ASTNode{}, p.syntaxError( + "Expected tColon or tNumber" + ", received: " + p.current().String()) + } + current = p.current() + } + if err := p.match(tRbracket); err != nil { + return ASTNode{}, err + } + return ASTNode{ + nodeType: ASTSlice, + value: parts, + }, nil +} + +func (p *Parser) match(tokenType tokType) error { + if p.current() == tokenType { + p.advance() + return nil + } + return p.syntaxError("Expected " + tokenType.String() + ", received: " + p.current().String()) +} + +func (p *Parser) led(tokenType tokType, node ASTNode) (ASTNode, error) { + switch tokenType { + case tDot: + if p.current() != tStar { + right, err := p.parseDotRHS(bindingPowers[tDot]) + return ASTNode{ + nodeType: ASTSubexpression, + children: []ASTNode{node, right}, + }, err + } + p.advance() + right, err := p.parseProjectionRHS(bindingPowers[tDot]) + return ASTNode{ + nodeType: ASTValueProjection, + children: []ASTNode{node, right}, + }, err + case tPipe: + right, err := p.parseExpression(bindingPowers[tPipe]) + return ASTNode{nodeType: ASTPipe, children: []ASTNode{node, right}}, err + case tOr: + right, err := p.parseExpression(bindingPowers[tOr]) + return ASTNode{nodeType: ASTOrExpression, children: []ASTNode{node, right}}, err + case tAnd: + right, err := p.parseExpression(bindingPowers[tAnd]) + return ASTNode{nodeType: ASTAndExpression, children: []ASTNode{node, right}}, err + case tLparen: + name := node.value + var args []ASTNode + for p.current() != tRparen { + expression, err := p.parseExpression(0) + if err != nil { + return ASTNode{}, err + } + if p.current() == tComma { + if err := p.match(tComma); err != nil { + return ASTNode{}, err + } + } + args = append(args, expression) + } + if err := p.match(tRparen); err != nil { + return ASTNode{}, err + } + return ASTNode{ + nodeType: ASTFunctionExpression, + value: name, + children: args, + }, nil + case tFilter: + return p.parseFilter(node) + case tFlatten: + left := ASTNode{nodeType: ASTFlatten, children: []ASTNode{node}} + right, err := p.parseProjectionRHS(bindingPowers[tFlatten]) + return ASTNode{ + nodeType: ASTProjection, + children: []ASTNode{left, right}, + }, err + case tEQ, tNE, tGT, tGTE, tLT, tLTE: + right, err := p.parseExpression(bindingPowers[tokenType]) + if err != nil { + return ASTNode{}, err + } + return ASTNode{ + nodeType: ASTComparator, + value: tokenType, + children: []ASTNode{node, right}, + }, nil + case tLbracket: + tokenType := p.current() + var right ASTNode + var err error + if tokenType == tNumber || tokenType == tColon { + right, err = p.parseIndexExpression() + if err != nil { + return ASTNode{}, err + } + return p.projectIfSlice(node, right) + } + // Otherwise this is a projection. + if err := p.match(tStar); err != nil { + return ASTNode{}, err + } + if err := p.match(tRbracket); err != nil { + return ASTNode{}, err + } + right, err = p.parseProjectionRHS(bindingPowers[tStar]) + if err != nil { + return ASTNode{}, err + } + return ASTNode{ + nodeType: ASTProjection, + children: []ASTNode{node, right}, + }, nil + } + return ASTNode{}, p.syntaxError("Unexpected token: " + tokenType.String()) +} + +func (p *Parser) nud(token token) (ASTNode, error) { + switch token.tokenType { + case tJSONLiteral: + var parsed interface{} + err := json.Unmarshal([]byte(token.value), &parsed) + if err != nil { + return ASTNode{}, err + } + return ASTNode{nodeType: ASTLiteral, value: parsed}, nil + case tStringLiteral: + return ASTNode{nodeType: ASTLiteral, value: token.value}, nil + case tUnquotedIdentifier: + return ASTNode{ + nodeType: ASTField, + value: token.value, + }, nil + case tQuotedIdentifier: + node := ASTNode{nodeType: ASTField, value: token.value} + if p.current() == tLparen { + return ASTNode{}, p.syntaxErrorToken("Can't have quoted identifier as function name.", token) + } + return node, nil + case tStar: + left := ASTNode{nodeType: ASTIdentity} + var right ASTNode + var err error + if p.current() == tRbracket { + right = ASTNode{nodeType: ASTIdentity} + } else { + right, err = p.parseProjectionRHS(bindingPowers[tStar]) + } + return ASTNode{nodeType: ASTValueProjection, children: []ASTNode{left, right}}, err + case tFilter: + return p.parseFilter(ASTNode{nodeType: ASTIdentity}) + case tLbrace: + return p.parseMultiSelectHash() + case tFlatten: + left := ASTNode{ + nodeType: ASTFlatten, + children: []ASTNode{{nodeType: ASTIdentity}}, + } + right, err := p.parseProjectionRHS(bindingPowers[tFlatten]) + if err != nil { + return ASTNode{}, err + } + return ASTNode{nodeType: ASTProjection, children: []ASTNode{left, right}}, nil + case tLbracket: + tokenType := p.current() + //var right ASTNode + if tokenType == tNumber || tokenType == tColon { + right, err := p.parseIndexExpression() + if err != nil { + return ASTNode{}, nil + } + return p.projectIfSlice(ASTNode{nodeType: ASTIdentity}, right) + } else if tokenType == tStar && p.lookahead(1) == tRbracket { + p.advance() + p.advance() + right, err := p.parseProjectionRHS(bindingPowers[tStar]) + if err != nil { + return ASTNode{}, err + } + return ASTNode{ + nodeType: ASTProjection, + children: []ASTNode{{nodeType: ASTIdentity}, right}, + }, nil + } else { + return p.parseMultiSelectList() + } + case tCurrent: + return ASTNode{nodeType: ASTCurrentNode}, nil + case tExpref: + expression, err := p.parseExpression(bindingPowers[tExpref]) + if err != nil { + return ASTNode{}, err + } + return ASTNode{nodeType: ASTExpRef, children: []ASTNode{expression}}, nil + case tNot: + expression, err := p.parseExpression(bindingPowers[tNot]) + if err != nil { + return ASTNode{}, err + } + return ASTNode{nodeType: ASTNotExpression, children: []ASTNode{expression}}, nil + case tLparen: + expression, err := p.parseExpression(0) + if err != nil { + return ASTNode{}, err + } + if err := p.match(tRparen); err != nil { + return ASTNode{}, err + } + return expression, nil + case tEOF: + return ASTNode{}, p.syntaxErrorToken("Incomplete expression", token) + } + + return ASTNode{}, p.syntaxErrorToken("Invalid token: "+token.tokenType.String(), token) +} + +func (p *Parser) parseMultiSelectList() (ASTNode, error) { + var expressions []ASTNode + for { + expression, err := p.parseExpression(0) + if err != nil { + return ASTNode{}, err + } + expressions = append(expressions, expression) + if p.current() == tRbracket { + break + } + err = p.match(tComma) + if err != nil { + return ASTNode{}, err + } + } + err := p.match(tRbracket) + if err != nil { + return ASTNode{}, err + } + return ASTNode{ + nodeType: ASTMultiSelectList, + children: expressions, + }, nil +} + +func (p *Parser) parseMultiSelectHash() (ASTNode, error) { + var children []ASTNode + for { + keyToken := p.lookaheadToken(0) + if err := p.match(tUnquotedIdentifier); err != nil { + if err := p.match(tQuotedIdentifier); err != nil { + return ASTNode{}, p.syntaxError("Expected tQuotedIdentifier or tUnquotedIdentifier") + } + } + keyName := keyToken.value + err := p.match(tColon) + if err != nil { + return ASTNode{}, err + } + value, err := p.parseExpression(0) + if err != nil { + return ASTNode{}, err + } + node := ASTNode{ + nodeType: ASTKeyValPair, + value: keyName, + children: []ASTNode{value}, + } + children = append(children, node) + if p.current() == tComma { + err := p.match(tComma) + if err != nil { + return ASTNode{}, nil + } + } else if p.current() == tRbrace { + err := p.match(tRbrace) + if err != nil { + return ASTNode{}, nil + } + break + } + } + return ASTNode{ + nodeType: ASTMultiSelectHash, + children: children, + }, nil +} + +func (p *Parser) projectIfSlice(left ASTNode, right ASTNode) (ASTNode, error) { + indexExpr := ASTNode{ + nodeType: ASTIndexExpression, + children: []ASTNode{left, right}, + } + if right.nodeType == ASTSlice { + right, err := p.parseProjectionRHS(bindingPowers[tStar]) + return ASTNode{ + nodeType: ASTProjection, + children: []ASTNode{indexExpr, right}, + }, err + } + return indexExpr, nil +} +func (p *Parser) parseFilter(node ASTNode) (ASTNode, error) { + var right, condition ASTNode + var err error + condition, err = p.parseExpression(0) + if err != nil { + return ASTNode{}, err + } + if err := p.match(tRbracket); err != nil { + return ASTNode{}, err + } + if p.current() == tFlatten { + right = ASTNode{nodeType: ASTIdentity} + } else { + right, err = p.parseProjectionRHS(bindingPowers[tFilter]) + if err != nil { + return ASTNode{}, err + } + } + + return ASTNode{ + nodeType: ASTFilterProjection, + children: []ASTNode{node, right, condition}, + }, nil +} + +func (p *Parser) parseDotRHS(bindingPower int) (ASTNode, error) { + lookahead := p.current() + if tokensOneOf([]tokType{tQuotedIdentifier, tUnquotedIdentifier, tStar}, lookahead) { + return p.parseExpression(bindingPower) + } else if lookahead == tLbracket { + if err := p.match(tLbracket); err != nil { + return ASTNode{}, err + } + return p.parseMultiSelectList() + } else if lookahead == tLbrace { + if err := p.match(tLbrace); err != nil { + return ASTNode{}, err + } + return p.parseMultiSelectHash() + } + return ASTNode{}, p.syntaxError("Expected identifier, lbracket, or lbrace") +} + +func (p *Parser) parseProjectionRHS(bindingPower int) (ASTNode, error) { + current := p.current() + if bindingPowers[current] < 10 { + return ASTNode{nodeType: ASTIdentity}, nil + } else if current == tLbracket { + return p.parseExpression(bindingPower) + } else if current == tFilter { + return p.parseExpression(bindingPower) + } else if current == tDot { + err := p.match(tDot) + if err != nil { + return ASTNode{}, err + } + return p.parseDotRHS(bindingPower) + } else { + return ASTNode{}, p.syntaxError("Error") + } +} + +func (p *Parser) lookahead(number int) tokType { + return p.lookaheadToken(number).tokenType +} + +func (p *Parser) current() tokType { + return p.lookahead(0) +} + +func (p *Parser) lookaheadToken(number int) token { + return p.tokens[p.index+number] +} + +func (p *Parser) advance() { + p.index++ +} + +func tokensOneOf(elements []tokType, token tokType) bool { + for _, elem := range elements { + if elem == token { + return true + } + } + return false +} + +func (p *Parser) syntaxError(msg string) SyntaxError { + return SyntaxError{ + msg: msg, + Expression: p.expression, + Offset: p.lookaheadToken(0).position, + } +} + +// Create a SyntaxError based on the provided token. +// This differs from syntaxError() which creates a SyntaxError +// based on the current lookahead token. +func (p *Parser) syntaxErrorToken(msg string, t token) SyntaxError { + return SyntaxError{ + msg: msg, + Expression: p.expression, + Offset: t.position, + } +} diff --git a/vendor/github.com/jmespath/go-jmespath/toktype_string.go b/vendor/github.com/jmespath/go-jmespath/toktype_string.go new file mode 100644 index 0000000000..dae79cbdf3 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/toktype_string.go @@ -0,0 +1,16 @@ +// generated by stringer -type=tokType; DO NOT EDIT + +package jmespath + +import "fmt" + +const _tokType_name = "tUnknowntStartDottFiltertFlattentLparentRparentLbrackettRbrackettLbracetRbracetOrtPipetNumbertUnquotedIdentifiertQuotedIdentifiertCommatColontLTtLTEtGTtGTEtEQtNEtJSONLiteraltStringLiteraltCurrenttExpreftAndtNottEOF" + +var _tokType_index = [...]uint8{0, 8, 13, 17, 24, 32, 39, 46, 55, 64, 71, 78, 81, 86, 93, 112, 129, 135, 141, 144, 148, 151, 155, 158, 161, 173, 187, 195, 202, 206, 210, 214} + +func (i tokType) String() string { + if i < 0 || i >= tokType(len(_tokType_index)-1) { + return fmt.Sprintf("tokType(%d)", i) + } + return _tokType_name[_tokType_index[i]:_tokType_index[i+1]] +} diff --git a/vendor/github.com/jmespath/go-jmespath/util.go b/vendor/github.com/jmespath/go-jmespath/util.go new file mode 100644 index 0000000000..ddc1b7d7d4 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/util.go @@ -0,0 +1,185 @@ +package jmespath + +import ( + "errors" + "reflect" +) + +// IsFalse determines if an object is false based on the JMESPath spec. +// JMESPath defines false values to be any of: +// - An empty string array, or hash. +// - The boolean value false. +// - nil +func isFalse(value interface{}) bool { + switch v := value.(type) { + case bool: + return !v + case []interface{}: + return len(v) == 0 + case map[string]interface{}: + return len(v) == 0 + case string: + return len(v) == 0 + case nil: + return true + } + // Try the reflection cases before returning false. + rv := reflect.ValueOf(value) + switch rv.Kind() { + case reflect.Struct: + // A struct type will never be false, even if + // all of its values are the zero type. + return false + case reflect.Slice, reflect.Map: + return rv.Len() == 0 + case reflect.Ptr: + if rv.IsNil() { + return true + } + // If it's a pointer type, we'll try to deref the pointer + // and evaluate the pointer value for isFalse. + element := rv.Elem() + return isFalse(element.Interface()) + } + return false +} + +// ObjsEqual is a generic object equality check. +// It will take two arbitrary objects and recursively determine +// if they are equal. +func objsEqual(left interface{}, right interface{}) bool { + return reflect.DeepEqual(left, right) +} + +// SliceParam refers to a single part of a slice. +// A slice consists of a start, a stop, and a step, similar to +// python slices. +type sliceParam struct { + N int + Specified bool +} + +// Slice supports [start:stop:step] style slicing that's supported in JMESPath. +func slice(slice []interface{}, parts []sliceParam) ([]interface{}, error) { + computed, err := computeSliceParams(len(slice), parts) + if err != nil { + return nil, err + } + start, stop, step := computed[0], computed[1], computed[2] + result := []interface{}{} + if step > 0 { + for i := start; i < stop; i += step { + result = append(result, slice[i]) + } + } else { + for i := start; i > stop; i += step { + result = append(result, slice[i]) + } + } + return result, nil +} + +func computeSliceParams(length int, parts []sliceParam) ([]int, error) { + var start, stop, step int + if !parts[2].Specified { + step = 1 + } else if parts[2].N == 0 { + return nil, errors.New("Invalid slice, step cannot be 0") + } else { + step = parts[2].N + } + var stepValueNegative bool + if step < 0 { + stepValueNegative = true + } else { + stepValueNegative = false + } + + if !parts[0].Specified { + if stepValueNegative { + start = length - 1 + } else { + start = 0 + } + } else { + start = capSlice(length, parts[0].N, step) + } + + if !parts[1].Specified { + if stepValueNegative { + stop = -1 + } else { + stop = length + } + } else { + stop = capSlice(length, parts[1].N, step) + } + return []int{start, stop, step}, nil +} + +func capSlice(length int, actual int, step int) int { + if actual < 0 { + actual += length + if actual < 0 { + if step < 0 { + actual = -1 + } else { + actual = 0 + } + } + } else if actual >= length { + if step < 0 { + actual = length - 1 + } else { + actual = length + } + } + return actual +} + +// ToArrayNum converts an empty interface type to a slice of float64. +// If any element in the array cannot be converted, then nil is returned +// along with a second value of false. +func toArrayNum(data interface{}) ([]float64, bool) { + // Is there a better way to do this with reflect? + if d, ok := data.([]interface{}); ok { + result := make([]float64, len(d)) + for i, el := range d { + item, ok := el.(float64) + if !ok { + return nil, false + } + result[i] = item + } + return result, true + } + return nil, false +} + +// ToArrayStr converts an empty interface type to a slice of strings. +// If any element in the array cannot be converted, then nil is returned +// along with a second value of false. If the input data could be entirely +// converted, then the converted data, along with a second value of true, +// will be returned. +func toArrayStr(data interface{}) ([]string, bool) { + // Is there a better way to do this with reflect? + if d, ok := data.([]interface{}); ok { + result := make([]string, len(d)) + for i, el := range d { + item, ok := el.(string) + if !ok { + return nil, false + } + result[i] = item + } + return result, true + } + return nil, false +} + +func isSliceType(v interface{}) bool { + if v == nil { + return false + } + return reflect.TypeOf(v).Kind() == reflect.Slice +} diff --git a/vendor/github.com/minsikl/netscaler-nitro-go/client/client.go b/vendor/github.com/minsikl/netscaler-nitro-go/client/client.go new file mode 100644 index 0000000000..37fe211ebe --- /dev/null +++ b/vendor/github.com/minsikl/netscaler-nitro-go/client/client.go @@ -0,0 +1,242 @@ +package client + +import ( + "bytes" + "encoding/json" + "fmt" + "github.com/minsikl/netscaler-nitro-go/datatypes" + "io/ioutil" + "log" + "net/http" + "reflect" + "strings" +) + +// Nitro Client +type NitroClient struct { + Protocol string + IpAddress string + Mode string + User string + Password string + Debug bool +} + +func (n *NitroClient) Add(req interface{}, options ...string) error { + resource, err := getResourceStringByObject(req) + if err != nil { + return err + } + reqJson, err := json.Marshal(req) + if err != nil { + return err + } + requestQuery := resource + getOptions(options) + responseBody, _, err := HTTPRequest(n, requestQuery, "POST", reqJson) + if err != nil { + return fmt.Errorf("Error in POST 's'", err.Error()) + } + if len(responseBody) > 0 { + res := datatypes.BaseRes{} + err = json.Unmarshal(responseBody, &res) + if err != nil { + return fmt.Errorf("Error in Unmarshal '%s'", err.Error()) + } + if *res.Severity == "ERROR" { + return fmt.Errorf("Error in POST : Errorcode '%d' Message '%s' Severity '%s'\r\n", *res.Errorcode, *res.Message, *res.Severity) + } + } + return nil +} + +func (n *NitroClient) Update(req interface{}, options ...string) error { + resource, err := getResourceStringByObject(req) + if err != nil { + return err + } + reqJson, err := json.Marshal(req) + if err != nil { + return err + } + requestQuery := resource + getOptions(options) + responseBody, _, err := HTTPRequest(n, requestQuery, "PUT", reqJson) + if err != nil { + return fmt.Errorf("Error in PUT 's'", err.Error()) + } + if len(responseBody) > 0 { + res := datatypes.BaseRes{} + err = json.Unmarshal(responseBody, &res) + if err != nil { + return fmt.Errorf("Error in Unmarshal '%s'", err.Error()) + } + if *res.Severity == "ERROR" { + return fmt.Errorf("Error in POST : Errorcode '%d' Message '%s' Severity '%s'\r\n", *res.Errorcode, *res.Message, *res.Severity) + } + } + return nil +} + +func (n *NitroClient) Get(res interface{}, resourceName string, options ...string) error { + resource, err := getResourceStringByObject(res) + if err != nil { + return err + } + + requestQuery := resource + "/" + resourceName + getOptions(options) + responseBody, _, err := HTTPRequest(n, requestQuery, "GET", nil) + if err != nil { + return err + } + + err = json.Unmarshal(responseBody, res) + if err != nil { + return fmt.Errorf("Error in Unmarshal '%s'", err.Error()) + } + resMessage := datatypes.BaseRes{} + err = json.Unmarshal(responseBody, &resMessage) + if err != nil { + return fmt.Errorf("Error in Unmarshal '%s'", err.Error()) + } + if *resMessage.Severity == "ERROR" { + return fmt.Errorf("Error in POST : Errorcode '%d' Message '%s' Severity '%s'\r\n", *resMessage.Errorcode, *resMessage.Message, *resMessage.Severity) + } + + return nil +} + +func (n *NitroClient) Delete(req interface{}, resourceName string, options ...string) error { + resource, err := getResourceStringByObject(req) + if err != nil { + return err + } + + requestQuery := resource + "/" + resourceName + getOptions(options) + responseBody, _, err := HTTPRequest(n, requestQuery, "DELETE", nil) + if err != nil { + return err + } + resMessage := datatypes.BaseRes{} + err = json.Unmarshal(responseBody, &resMessage) + if *resMessage.Severity == "ERROR" { + return fmt.Errorf("Error in POST : Errorcode '%d' Message '%s' Severity '%s'\r\n", *resMessage.Errorcode, *resMessage.Message, *resMessage.Severity) + } + + return nil +} + +func (n *NitroClient) Enable(req interface{}, enable bool) error { + resource, err := getResourceStringByObject(req) + if err != nil { + return err + } + reqJson, err := json.Marshal(req) + log.Printf(string(reqJson)) + if err != nil { + return err + } + action := "/?action=enable" + if enable == false { + action = "/?action=disable" + } + query := resource+action + log.Println("QUERY : " + query) + responseBody, _, err := HTTPRequest(n, query, "POST", reqJson) + if err != nil { + return fmt.Errorf("Error in POST '%s' for Enable", err.Error()) + } + if len(responseBody) > 0 { + res := datatypes.BaseRes{} + err = json.Unmarshal(responseBody, &res) + if err != nil { + return fmt.Errorf("Error in Unmarshal '%s'", err.Error()) + } + if *res.Severity == "ERROR" { + return fmt.Errorf("Error in POST : Errorcode '%d' Message '%s' Severity '%s'\r\n", *res.Errorcode, *res.Message, *res.Severity) + } + } + return nil +} + +func NewNitroClient(protocol string, ipAddress string, mode string, user string, password string, debug bool) *NitroClient { + nClient := NitroClient{ + Protocol: protocol, + IpAddress: ipAddress, + Mode: mode, + User: user, + Password: password, + Debug: debug, + } + return &nClient +} + +func HTTPRequest(nClient *NitroClient, requestQuery string, requestType string, requestBody []byte) ([]byte, int, error) { + + // Create a request + Url := nClient.Protocol + "://" + nClient.IpAddress + "/nitro/v1/" + nClient.Mode + "/" + requestQuery + requestBodyBuffer := bytes.NewBuffer(requestBody) + req, err := http.NewRequest(requestType, Url, requestBodyBuffer) + if err != nil { + return nil, 0, err + } + + req.Header.Set("Accept", "application/json") + req.Header.Set("Content-Type", "application/json") + req.Header.Set("X-NITRO-USER", nClient.User) + req.Header.Set("X-NITRO-PASS", nClient.Password) + + if nClient.Debug { + log.Println("[DEBUG] Nitro Request Path: ", requestType, req.URL) + log.Println("[DEBUG] Nitro Request Parameters: ", requestBodyBuffer.String()) + } + + // Execute http request + resp, err := http.DefaultClient.Do(req) + if err != nil { + return nil, 0, err + } + + defer resp.Body.Close() + + responseBody, err := ioutil.ReadAll(resp.Body) + if err != nil { + return nil, resp.StatusCode, err + } + + if nClient.Debug { + log.Println("[DEBUG] Nitro Response: ", string(responseBody)) + } + return responseBody, resp.StatusCode, nil +} + +func getResourceStringByObject(obj interface{}) (string, error) { + resourceType := reflect.TypeOf(obj).Elem().Name() + if len(resourceType) < 4 || (!strings.Contains(resourceType, "Req") && !strings.Contains(resourceType, "Res")) { + return "", fmt.Errorf("Unable to get resource name from '%s'", resourceType) + } + resourceName := resourceType[:len(resourceType)-3] + resourceBytes := make([]byte, 0) + for index, character := range []byte(resourceName) { + if index > 0 && character < 97 { + resourceBytes = append(resourceBytes, []byte("_"+string(character+32))...) + } else if character < 97 { + resourceBytes = append(resourceBytes, character + 32) + } else { + resourceBytes = append(resourceBytes, character) + } + } + return string(resourceBytes), nil +} + +func getOptions(options []string) string { + res := "" + if len(options) > 0 { + for index, option := range options { + if index == 0 { + res = "?" + option + } else { + res = res + "&" + option + } + } + } + return res +} \ No newline at end of file diff --git a/vendor/github.com/minsikl/netscaler-nitro-go/datatypes/datatypes.go b/vendor/github.com/minsikl/netscaler-nitro-go/datatypes/datatypes.go new file mode 100644 index 0000000000..2875f00ed2 --- /dev/null +++ b/vendor/github.com/minsikl/netscaler-nitro-go/datatypes/datatypes.go @@ -0,0 +1,205 @@ +package datatypes + +const ( + CONFIG = "config" + STAT = "stat" + +) + +// Base +type BaseRes struct { + Errorcode *int `json:"errorcode,omitempty"` + Message *string `json:"message,omitempty"` + Severity *string `json:"severity,omitempty"` +} + +// service +type Service struct { + Name *string `json:"name,omitempty"` + Ip *string `json:"ip,omitempty"` + Ipaddress *string `json:"ipaddress,omitempty"` + ServiceType *string `json:"servicetype,omitempty"` + Port *int `json:"port,omitempty"` + Weight *int `json:"weight,omitempty"` + Maxclient *string `json:"maxclient,omitempty"` +} + +type ServiceReq struct { + Service *Service `json:"service,omitempty"` +} + +type ServiceRes struct { + BaseRes + Service []Service `json:"service,omitempty"` +} + +// lbvserver +type Lbvserver struct { + Name *string `json:"name,omitempty"` + ServiceType *string `json:"servicetype,omitempty"` + Port *int `json:"port,omitempty"` + Lbmethod *string `json:"lbmethod,omitempty"` + Ipv46 *string `json:"ipv46,omitempty"` + Persistencetype *string `json:"persistencetype,omitempty"` +} + +type LbvserverReq struct { + Lbvserver *Lbvserver `json:"lbvserver,omitempty"` +} + +type LbvserverRes struct { + BaseRes + Lbvserver []Lbvserver `json:"lbvserver,omitempty"` +} + +//lbvserver_service_binding +type LbvserverServiceBinding struct { + Name *string `json:"name,omitempty"` + ServiceName *string `json:"serviceName,omitempty"` +} + +type LbvserverServiceBindingReq struct { + LbvserverServiceBinding *LbvserverServiceBinding `json:"lbvserver_service_binding,omitempty"` +} + +type LbvserverServiceBindingRes struct { + BaseRes + LbvserverServiceBinding []LbvserverServiceBinding `json:"lbvserver_service_binding,omitempty"` +} + +// systemfile +type Systemfile struct { + Filename *string `json:"filename,omitempty"` + Filelocation *string `json:"filelocation,omitempty"` + Filecontent *string `json:"filecontent,omitempty"` + Fileencoding *string `json:"fileencoding,omitempty"` +} + +type SystemfileReq struct { + Systemfile *Systemfile `json:"systemfile,omitempty"` +} + +type SystemfileRes struct { + BaseRes + Systemfile []Systemfile `json:"systemfile,omitempty"` +} + +// nsfeature +type Nsfeature struct { + Feature []string `json:"feature"` +} + +type NsfeatureReq struct { + Nsfeature *Nsfeature `json:"nsfeature,omitempty"` +} + +type NsfeatureRes struct { + BaseRes + Nsfeature []Nsfeature `json:"nsfeature,omitempty"` +} + +// sslcertkey +type Sslcertkey struct { + Certkey *string `json:"certkey,omitempty"` + Cert *string `json:"cert,omitempty"` + Key *string `json:"key,omitempty"` +} + +type SslcertkeyReq struct { + Sslcertkey *Sslcertkey `json:"sslcertkey,omitempty"` +} + +type SslcertkeyRes struct { + BaseRes + Sslcertkey []Sslcertkey `json:"sslcertkey,omitempty"` +} + +// sslvserver_sslcertkey_binding +type SslvserverSslcertkeyBinding struct { + Vservername *string `json:"vservername,omitempty"` + Certkeyname *string `json:"certkeyname,omitempty"` +} + +type SslvserverSslcertkeyBindingReq struct { + SslvserverSslcertkeyBinding *SslvserverSslcertkeyBinding `json:"sslvserver_sslcertkey_binding,omitempty"` +} + +type SslvserverSslcertkeyBindingRes struct { + BaseRes + SslvserverSslcertkeyBinding []SslvserverSslcertkeyBinding `json:"sslvserver_sslcertkey_binding,omitempty"` +} + +// systemuser +type Systemuser struct { + Username *string `json:"username,omitempty"` + Password *string `json:"password,omitempty"` +} + +type SystemuserReq struct { + Systemuser *Systemuser `json:"systemuser,omitempty"` +} + +type SystemuserRes struct { + BaseRes + Systemuser []Systemuser `json:"systemuser,omitempty"` +} + +// hanode +type Hanode struct { + Id *string `json:"id,omitempty"` + Ipaddress *string `json:"ipaddress,omitempty"` + Hastatus *string `json:"hastatus,omitempty"` +} + +type HanodeReq struct { + Hanode *Hanode `json:"hanode,omitempty"` +} + +type HanodeRes struct { + BaseRes + Hanode []Hanode `json:"hanode,omitempty"` +} + +// nsrpcnode +type Nsrpcnode struct { + Ipaddress *string `json:"ipaddress,omitempty"` + Password *string `json:"password,omitempty"` +} + +type NsrpcnodeReq struct { + Nsrpcnode *Nsrpcnode `json:"nsrpcnode,omitempty"` +} + +type NsrpcnodeRes struct { + BaseRes + Nsrpcnode []Nsrpcnode `json:"nsrpcnode,omitempty"` +} + +// hafiles +type Hafiles struct { + Mode []string `json:"mode,omitempty"` +} + +type HafilesReq struct { + Hafiles *Hafiles `json:"hafiles,omitempty"` +} + +type HafilesRes struct { + BaseRes + Hafiles []Hafiles `json:"hafiles,omitempty"` +} + +//service_lbmonitor_binding +type ServiceLbmonitorBinding struct { + Name *string `json:"name,omitempty"` + MonitorName *string `json:"monitor_name,omitempty"` +} + +type ServiceLbmonitorBindingReq struct { + ServiceLbmonitorBinding *ServiceLbmonitorBinding `json:"service_lbmonitor_binding,omitempty"` +} + +type ServiceLbmonitorBindingRes struct { + BaseRes + ServiceLbmonitorBinding []ServiceLbmonitorBinding `json:"service_lbmonitor_binding,omitempty"` +} diff --git a/vendor/github.com/minsikl/netscaler-nitro-go/op/helpers.go b/vendor/github.com/minsikl/netscaler-nitro-go/op/helpers.go new file mode 100644 index 0000000000..5183f7df64 --- /dev/null +++ b/vendor/github.com/minsikl/netscaler-nitro-go/op/helpers.go @@ -0,0 +1,18 @@ +package op + +// Convenience functions for returning pointers to values + +// Int returns a pointer to the int value provided +func Int(v int) *int { + return &v +} + +// String returns a pointer to the string value provided +func String(v string) *string { + return &v +} + +// Bool returns a pointer to the bool value provided +func Bool(v bool) *bool { + return &v +} diff --git a/vendor/github.com/mitchellh/copystructure/LICENSE b/vendor/github.com/mitchellh/copystructure/LICENSE new file mode 100644 index 0000000000..2298515904 --- /dev/null +++ b/vendor/github.com/mitchellh/copystructure/LICENSE @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2014 Mitchell Hashimoto + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/vendor/github.com/mitchellh/copystructure/README.md b/vendor/github.com/mitchellh/copystructure/README.md new file mode 100644 index 0000000000..bcb8c8d2cb --- /dev/null +++ b/vendor/github.com/mitchellh/copystructure/README.md @@ -0,0 +1,21 @@ +# copystructure + +copystructure is a Go library for deep copying values in Go. + +This allows you to copy Go values that may contain reference values +such as maps, slices, or pointers, and copy their data as well instead +of just their references. + +## Installation + +Standard `go get`: + +``` +$ go get github.com/mitchellh/copystructure +``` + +## Usage & Example + +For usage and examples see the [Godoc](http://godoc.org/github.com/mitchellh/copystructure). + +The `Copy` function has examples associated with it there. diff --git a/vendor/github.com/mitchellh/copystructure/copier_time.go b/vendor/github.com/mitchellh/copystructure/copier_time.go new file mode 100644 index 0000000000..db6a6aa1a1 --- /dev/null +++ b/vendor/github.com/mitchellh/copystructure/copier_time.go @@ -0,0 +1,15 @@ +package copystructure + +import ( + "reflect" + "time" +) + +func init() { + Copiers[reflect.TypeOf(time.Time{})] = timeCopier +} + +func timeCopier(v interface{}) (interface{}, error) { + // Just... copy it. + return v.(time.Time), nil +} diff --git a/vendor/github.com/mitchellh/copystructure/copystructure.go b/vendor/github.com/mitchellh/copystructure/copystructure.go new file mode 100644 index 0000000000..0e725ea723 --- /dev/null +++ b/vendor/github.com/mitchellh/copystructure/copystructure.go @@ -0,0 +1,477 @@ +package copystructure + +import ( + "errors" + "reflect" + "sync" + + "github.com/mitchellh/reflectwalk" +) + +// Copy returns a deep copy of v. +func Copy(v interface{}) (interface{}, error) { + return Config{}.Copy(v) +} + +// CopierFunc is a function that knows how to deep copy a specific type. +// Register these globally with the Copiers variable. +type CopierFunc func(interface{}) (interface{}, error) + +// Copiers is a map of types that behave specially when they are copied. +// If a type is found in this map while deep copying, this function +// will be called to copy it instead of attempting to copy all fields. +// +// The key should be the type, obtained using: reflect.TypeOf(value with type). +// +// It is unsafe to write to this map after Copies have started. If you +// are writing to this map while also copying, wrap all modifications to +// this map as well as to Copy in a mutex. +var Copiers map[reflect.Type]CopierFunc = make(map[reflect.Type]CopierFunc) + +// Must is a helper that wraps a call to a function returning +// (interface{}, error) and panics if the error is non-nil. It is intended +// for use in variable initializations and should only be used when a copy +// error should be a crashing case. +func Must(v interface{}, err error) interface{} { + if err != nil { + panic("copy error: " + err.Error()) + } + + return v +} + +var errPointerRequired = errors.New("Copy argument must be a pointer when Lock is true") + +type Config struct { + // Lock any types that are a sync.Locker and are not a mutex while copying. + // If there is an RLocker method, use that to get the sync.Locker. + Lock bool + + // Copiers is a map of types associated with a CopierFunc. Use the global + // Copiers map if this is nil. + Copiers map[reflect.Type]CopierFunc +} + +func (c Config) Copy(v interface{}) (interface{}, error) { + if c.Lock && reflect.ValueOf(v).Kind() != reflect.Ptr { + return nil, errPointerRequired + } + + w := new(walker) + if c.Lock { + w.useLocks = true + } + + if c.Copiers == nil { + c.Copiers = Copiers + } + + err := reflectwalk.Walk(v, w) + if err != nil { + return nil, err + } + + // Get the result. If the result is nil, then we want to turn it + // into a typed nil if we can. + result := w.Result + if result == nil { + val := reflect.ValueOf(v) + result = reflect.Indirect(reflect.New(val.Type())).Interface() + } + + return result, nil +} + +// Return the key used to index interfaces types we've seen. Store the number +// of pointers in the upper 32bits, and the depth in the lower 32bits. This is +// easy to calculate, easy to match a key with our current depth, and we don't +// need to deal with initializing and cleaning up nested maps or slices. +func ifaceKey(pointers, depth int) uint64 { + return uint64(pointers)<<32 | uint64(depth) +} + +type walker struct { + Result interface{} + + depth int + ignoreDepth int + vals []reflect.Value + cs []reflect.Value + + // This stores the number of pointers we've walked over, indexed by depth. + ps []int + + // If an interface is indirected by a pointer, we need to know the type of + // interface to create when creating the new value. Store the interface + // types here, indexed by both the walk depth and the number of pointers + // already seen at that depth. Use ifaceKey to calculate the proper uint64 + // value. + ifaceTypes map[uint64]reflect.Type + + // any locks we've taken, indexed by depth + locks []sync.Locker + // take locks while walking the structure + useLocks bool +} + +func (w *walker) Enter(l reflectwalk.Location) error { + w.depth++ + + // ensure we have enough elements to index via w.depth + for w.depth >= len(w.locks) { + w.locks = append(w.locks, nil) + } + + for len(w.ps) < w.depth+1 { + w.ps = append(w.ps, 0) + } + + return nil +} + +func (w *walker) Exit(l reflectwalk.Location) error { + locker := w.locks[w.depth] + w.locks[w.depth] = nil + if locker != nil { + defer locker.Unlock() + } + + // clear out pointers and interfaces as we exit the stack + w.ps[w.depth] = 0 + + for k := range w.ifaceTypes { + mask := uint64(^uint32(0)) + if k&mask == uint64(w.depth) { + delete(w.ifaceTypes, k) + } + } + + w.depth-- + if w.ignoreDepth > w.depth { + w.ignoreDepth = 0 + } + + if w.ignoring() { + return nil + } + + switch l { + case reflectwalk.Map: + fallthrough + case reflectwalk.Slice: + // Pop map off our container + w.cs = w.cs[:len(w.cs)-1] + case reflectwalk.MapValue: + // Pop off the key and value + mv := w.valPop() + mk := w.valPop() + m := w.cs[len(w.cs)-1] + + // If mv is the zero value, SetMapIndex deletes the key form the map, + // or in this case never adds it. We need to create a properly typed + // zero value so that this key can be set. + if !mv.IsValid() { + mv = reflect.Zero(m.Type().Elem()) + } + m.SetMapIndex(mk, mv) + case reflectwalk.SliceElem: + // Pop off the value and the index and set it on the slice + v := w.valPop() + i := w.valPop().Interface().(int) + if v.IsValid() { + s := w.cs[len(w.cs)-1] + se := s.Index(i) + if se.CanSet() { + se.Set(v) + } + } + case reflectwalk.Struct: + w.replacePointerMaybe() + + // Remove the struct from the container stack + w.cs = w.cs[:len(w.cs)-1] + case reflectwalk.StructField: + // Pop off the value and the field + v := w.valPop() + f := w.valPop().Interface().(reflect.StructField) + if v.IsValid() { + s := w.cs[len(w.cs)-1] + sf := reflect.Indirect(s).FieldByName(f.Name) + + if sf.CanSet() { + sf.Set(v) + } + } + case reflectwalk.WalkLoc: + // Clear out the slices for GC + w.cs = nil + w.vals = nil + } + + return nil +} + +func (w *walker) Map(m reflect.Value) error { + if w.ignoring() { + return nil + } + w.lock(m) + + // Create the map. If the map itself is nil, then just make a nil map + var newMap reflect.Value + if m.IsNil() { + newMap = reflect.Indirect(reflect.New(m.Type())) + } else { + newMap = reflect.MakeMap(m.Type()) + } + + w.cs = append(w.cs, newMap) + w.valPush(newMap) + return nil +} + +func (w *walker) MapElem(m, k, v reflect.Value) error { + return nil +} + +func (w *walker) PointerEnter(v bool) error { + if v { + w.ps[w.depth]++ + } + return nil +} + +func (w *walker) PointerExit(v bool) error { + if v { + w.ps[w.depth]-- + } + return nil +} + +func (w *walker) Interface(v reflect.Value) error { + if !v.IsValid() { + return nil + } + if w.ifaceTypes == nil { + w.ifaceTypes = make(map[uint64]reflect.Type) + } + + w.ifaceTypes[ifaceKey(w.ps[w.depth], w.depth)] = v.Type() + return nil +} + +func (w *walker) Primitive(v reflect.Value) error { + if w.ignoring() { + return nil + } + w.lock(v) + + // IsValid verifies the v is non-zero and CanInterface verifies + // that we're allowed to read this value (unexported fields). + var newV reflect.Value + if v.IsValid() && v.CanInterface() { + newV = reflect.New(v.Type()) + newV.Elem().Set(v) + } + + w.valPush(newV) + w.replacePointerMaybe() + return nil +} + +func (w *walker) Slice(s reflect.Value) error { + if w.ignoring() { + return nil + } + w.lock(s) + + var newS reflect.Value + if s.IsNil() { + newS = reflect.Indirect(reflect.New(s.Type())) + } else { + newS = reflect.MakeSlice(s.Type(), s.Len(), s.Cap()) + } + + w.cs = append(w.cs, newS) + w.valPush(newS) + return nil +} + +func (w *walker) SliceElem(i int, elem reflect.Value) error { + if w.ignoring() { + return nil + } + + // We don't write the slice here because elem might still be + // arbitrarily complex. Just record the index and continue on. + w.valPush(reflect.ValueOf(i)) + + return nil +} + +func (w *walker) Struct(s reflect.Value) error { + if w.ignoring() { + return nil + } + w.lock(s) + + var v reflect.Value + if c, ok := Copiers[s.Type()]; ok { + // We have a Copier for this struct, so we use that copier to + // get the copy, and we ignore anything deeper than this. + w.ignoreDepth = w.depth + + dup, err := c(s.Interface()) + if err != nil { + return err + } + + v = reflect.ValueOf(dup) + } else { + // No copier, we copy ourselves and allow reflectwalk to guide + // us deeper into the structure for copying. + v = reflect.New(s.Type()) + } + + // Push the value onto the value stack for setting the struct field, + // and add the struct itself to the containers stack in case we walk + // deeper so that its own fields can be modified. + w.valPush(v) + w.cs = append(w.cs, v) + + return nil +} + +func (w *walker) StructField(f reflect.StructField, v reflect.Value) error { + if w.ignoring() { + return nil + } + + // If PkgPath is non-empty, this is a private (unexported) field. + // We do not set this unexported since the Go runtime doesn't allow us. + if f.PkgPath != "" { + return reflectwalk.SkipEntry + } + + // Push the field onto the stack, we'll handle it when we exit + // the struct field in Exit... + w.valPush(reflect.ValueOf(f)) + return nil +} + +// ignore causes the walker to ignore any more values until we exit this on +func (w *walker) ignore() { + w.ignoreDepth = w.depth +} + +func (w *walker) ignoring() bool { + return w.ignoreDepth > 0 && w.depth >= w.ignoreDepth +} + +func (w *walker) pointerPeek() bool { + return w.ps[w.depth] > 0 +} + +func (w *walker) valPop() reflect.Value { + result := w.vals[len(w.vals)-1] + w.vals = w.vals[:len(w.vals)-1] + + // If we're out of values, that means we popped everything off. In + // this case, we reset the result so the next pushed value becomes + // the result. + if len(w.vals) == 0 { + w.Result = nil + } + + return result +} + +func (w *walker) valPush(v reflect.Value) { + w.vals = append(w.vals, v) + + // If we haven't set the result yet, then this is the result since + // it is the first (outermost) value we're seeing. + if w.Result == nil && v.IsValid() { + w.Result = v.Interface() + } +} + +func (w *walker) replacePointerMaybe() { + // Determine the last pointer value. If it is NOT a pointer, then + // we need to push that onto the stack. + if !w.pointerPeek() { + w.valPush(reflect.Indirect(w.valPop())) + return + } + + v := w.valPop() + for i := 1; i < w.ps[w.depth]; i++ { + if iType, ok := w.ifaceTypes[ifaceKey(w.ps[w.depth]-i, w.depth)]; ok { + iface := reflect.New(iType).Elem() + iface.Set(v) + v = iface + } + + p := reflect.New(v.Type()) + p.Elem().Set(v) + v = p + } + + w.valPush(v) +} + +// if this value is a Locker, lock it and add it to the locks slice +func (w *walker) lock(v reflect.Value) { + if !w.useLocks { + return + } + + if !v.IsValid() || !v.CanInterface() { + return + } + + type rlocker interface { + RLocker() sync.Locker + } + + var locker sync.Locker + + // We can't call Interface() on a value directly, since that requires + // a copy. This is OK, since the pointer to a value which is a sync.Locker + // is also a sync.Locker. + if v.Kind() == reflect.Ptr { + switch l := v.Interface().(type) { + case rlocker: + // don't lock a mutex directly + if _, ok := l.(*sync.RWMutex); !ok { + locker = l.RLocker() + } + case sync.Locker: + locker = l + } + } else if v.CanAddr() { + switch l := v.Addr().Interface().(type) { + case rlocker: + // don't lock a mutex directly + if _, ok := l.(*sync.RWMutex); !ok { + locker = l.RLocker() + } + case sync.Locker: + locker = l + } + } + + // still no callable locker + if locker == nil { + return + } + + // don't lock a mutex directly + switch locker.(type) { + case *sync.Mutex, *sync.RWMutex: + return + } + + locker.Lock() + w.locks[w.depth] = locker +} diff --git a/vendor/github.com/mitchellh/go-homedir/LICENSE b/vendor/github.com/mitchellh/go-homedir/LICENSE new file mode 100644 index 0000000000..f9c841a51e --- /dev/null +++ b/vendor/github.com/mitchellh/go-homedir/LICENSE @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2013 Mitchell Hashimoto + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/vendor/github.com/mitchellh/go-homedir/README.md b/vendor/github.com/mitchellh/go-homedir/README.md new file mode 100644 index 0000000000..d70706d5b3 --- /dev/null +++ b/vendor/github.com/mitchellh/go-homedir/README.md @@ -0,0 +1,14 @@ +# go-homedir + +This is a Go library for detecting the user's home directory without +the use of cgo, so the library can be used in cross-compilation environments. + +Usage is incredibly simple, just call `homedir.Dir()` to get the home directory +for a user, and `homedir.Expand()` to expand the `~` in a path to the home +directory. + +**Why not just use `os/user`?** The built-in `os/user` package requires +cgo on Darwin systems. This means that any Go code that uses that package +cannot cross compile. But 99% of the time the use for `os/user` is just to +retrieve the home directory, which we can do for the current user without +cgo. This library does that, enabling cross-compilation. diff --git a/vendor/github.com/mitchellh/go-homedir/homedir.go b/vendor/github.com/mitchellh/go-homedir/homedir.go new file mode 100644 index 0000000000..47e1f9ef8e --- /dev/null +++ b/vendor/github.com/mitchellh/go-homedir/homedir.go @@ -0,0 +1,137 @@ +package homedir + +import ( + "bytes" + "errors" + "os" + "os/exec" + "path/filepath" + "runtime" + "strconv" + "strings" + "sync" +) + +// DisableCache will disable caching of the home directory. Caching is enabled +// by default. +var DisableCache bool + +var homedirCache string +var cacheLock sync.RWMutex + +// Dir returns the home directory for the executing user. +// +// This uses an OS-specific method for discovering the home directory. +// An error is returned if a home directory cannot be detected. +func Dir() (string, error) { + if !DisableCache { + cacheLock.RLock() + cached := homedirCache + cacheLock.RUnlock() + if cached != "" { + return cached, nil + } + } + + cacheLock.Lock() + defer cacheLock.Unlock() + + var result string + var err error + if runtime.GOOS == "windows" { + result, err = dirWindows() + } else { + // Unix-like system, so just assume Unix + result, err = dirUnix() + } + + if err != nil { + return "", err + } + homedirCache = result + return result, nil +} + +// Expand expands the path to include the home directory if the path +// is prefixed with `~`. If it isn't prefixed with `~`, the path is +// returned as-is. +func Expand(path string) (string, error) { + if len(path) == 0 { + return path, nil + } + + if path[0] != '~' { + return path, nil + } + + if len(path) > 1 && path[1] != '/' && path[1] != '\\' { + return "", errors.New("cannot expand user-specific home dir") + } + + dir, err := Dir() + if err != nil { + return "", err + } + + return filepath.Join(dir, path[1:]), nil +} + +func dirUnix() (string, error) { + // First prefer the HOME environmental variable + if home := os.Getenv("HOME"); home != "" { + return home, nil + } + + // If that fails, try getent + var stdout bytes.Buffer + cmd := exec.Command("getent", "passwd", strconv.Itoa(os.Getuid())) + cmd.Stdout = &stdout + if err := cmd.Run(); err != nil { + // If the error is ErrNotFound, we ignore it. Otherwise, return it. + if err != exec.ErrNotFound { + return "", err + } + } else { + if passwd := strings.TrimSpace(stdout.String()); passwd != "" { + // username:password:uid:gid:gecos:home:shell + passwdParts := strings.SplitN(passwd, ":", 7) + if len(passwdParts) > 5 { + return passwdParts[5], nil + } + } + } + + // If all else fails, try the shell + stdout.Reset() + cmd = exec.Command("sh", "-c", "cd && pwd") + cmd.Stdout = &stdout + if err := cmd.Run(); err != nil { + return "", err + } + + result := strings.TrimSpace(stdout.String()) + if result == "" { + return "", errors.New("blank output when reading home directory") + } + + return result, nil +} + +func dirWindows() (string, error) { + // First prefer the HOME environmental variable + if home := os.Getenv("HOME"); home != "" { + return home, nil + } + + drive := os.Getenv("HOMEDRIVE") + path := os.Getenv("HOMEPATH") + home := drive + path + if drive == "" || path == "" { + home = os.Getenv("USERPROFILE") + } + if home == "" { + return "", errors.New("HOMEDRIVE, HOMEPATH, and USERPROFILE are blank") + } + + return home, nil +} diff --git a/vendor/github.com/mitchellh/hashstructure/LICENSE b/vendor/github.com/mitchellh/hashstructure/LICENSE new file mode 100644 index 0000000000..a3866a291f --- /dev/null +++ b/vendor/github.com/mitchellh/hashstructure/LICENSE @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2016 Mitchell Hashimoto + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/vendor/github.com/mitchellh/hashstructure/README.md b/vendor/github.com/mitchellh/hashstructure/README.md new file mode 100644 index 0000000000..7d0de5bf5a --- /dev/null +++ b/vendor/github.com/mitchellh/hashstructure/README.md @@ -0,0 +1,61 @@ +# hashstructure + +hashstructure is a Go library for creating a unique hash value +for arbitrary values in Go. + +This can be used to key values in a hash (for use in a map, set, etc.) +that are complex. The most common use case is comparing two values without +sending data across the network, caching values locally (de-dup), and so on. + +## Features + + * Hash any arbitrary Go value, including complex types. + + * Tag a struct field to ignore it and not affect the hash value. + + * Tag a slice type struct field to treat it as a set where ordering + doesn't affect the hash code but the field itself is still taken into + account to create the hash value. + + * Optionally specify a custom hash function to optimize for speed, collision + avoidance for your data set, etc. + +## Installation + +Standard `go get`: + +``` +$ go get github.com/mitchellh/hashstructure +``` + +## Usage & Example + +For usage and examples see the [Godoc](http://godoc.org/github.com/mitchellh/hashstructure). + +A quick code example is shown below: + + + type ComplexStruct struct { + Name string + Age uint + Metadata map[string]interface{} + } + + v := ComplexStruct{ + Name: "mitchellh", + Age: 64, + Metadata: map[string]interface{}{ + "car": true, + "location": "California", + "siblings": []string{"Bob", "John"}, + }, + } + + hash, err := hashstructure.Hash(v, nil) + if err != nil { + panic(err) + } + + fmt.Printf("%d", hash) + // Output: + // 2307517237273902113 diff --git a/vendor/github.com/mitchellh/hashstructure/hashstructure.go b/vendor/github.com/mitchellh/hashstructure/hashstructure.go new file mode 100644 index 0000000000..6f586fa772 --- /dev/null +++ b/vendor/github.com/mitchellh/hashstructure/hashstructure.go @@ -0,0 +1,323 @@ +package hashstructure + +import ( + "encoding/binary" + "fmt" + "hash" + "hash/fnv" + "reflect" +) + +// HashOptions are options that are available for hashing. +type HashOptions struct { + // Hasher is the hash function to use. If this isn't set, it will + // default to FNV. + Hasher hash.Hash64 + + // TagName is the struct tag to look at when hashing the structure. + // By default this is "hash". + TagName string +} + +// Hash returns the hash value of an arbitrary value. +// +// If opts is nil, then default options will be used. See HashOptions +// for the default values. +// +// Notes on the value: +// +// * Unexported fields on structs are ignored and do not affect the +// hash value. +// +// * Adding an exported field to a struct with the zero value will change +// the hash value. +// +// For structs, the hashing can be controlled using tags. For example: +// +// struct { +// Name string +// UUID string `hash:"ignore"` +// } +// +// The available tag values are: +// +// * "ignore" - The field will be ignored and not affect the hash code. +// +// * "set" - The field will be treated as a set, where ordering doesn't +// affect the hash code. This only works for slices. +// +func Hash(v interface{}, opts *HashOptions) (uint64, error) { + // Create default options + if opts == nil { + opts = &HashOptions{} + } + if opts.Hasher == nil { + opts.Hasher = fnv.New64() + } + if opts.TagName == "" { + opts.TagName = "hash" + } + + // Reset the hash + opts.Hasher.Reset() + + // Create our walker and walk the structure + w := &walker{ + h: opts.Hasher, + tag: opts.TagName, + } + return w.visit(reflect.ValueOf(v), nil) +} + +type walker struct { + h hash.Hash64 + tag string +} + +type visitOpts struct { + // Flags are a bitmask of flags to affect behavior of this visit + Flags visitFlag + + // Information about the struct containing this field + Struct interface{} + StructField string +} + +func (w *walker) visit(v reflect.Value, opts *visitOpts) (uint64, error) { + // Loop since these can be wrapped in multiple layers of pointers + // and interfaces. + for { + // If we have an interface, dereference it. We have to do this up + // here because it might be a nil in there and the check below must + // catch that. + if v.Kind() == reflect.Interface { + v = v.Elem() + continue + } + + if v.Kind() == reflect.Ptr { + v = reflect.Indirect(v) + continue + } + + break + } + + // If it is nil, treat it like a zero. + if !v.IsValid() { + var tmp int8 + v = reflect.ValueOf(tmp) + } + + // Binary writing can use raw ints, we have to convert to + // a sized-int, we'll choose the largest... + switch v.Kind() { + case reflect.Int: + v = reflect.ValueOf(int64(v.Int())) + case reflect.Uint: + v = reflect.ValueOf(uint64(v.Uint())) + case reflect.Bool: + var tmp int8 + if v.Bool() { + tmp = 1 + } + v = reflect.ValueOf(tmp) + } + + k := v.Kind() + + // We can shortcut numeric values by directly binary writing them + if k >= reflect.Int && k <= reflect.Complex64 { + // A direct hash calculation + w.h.Reset() + err := binary.Write(w.h, binary.LittleEndian, v.Interface()) + return w.h.Sum64(), err + } + + switch k { + case reflect.Array: + var h uint64 + l := v.Len() + for i := 0; i < l; i++ { + current, err := w.visit(v.Index(i), nil) + if err != nil { + return 0, err + } + + h = hashUpdateOrdered(w.h, h, current) + } + + return h, nil + + case reflect.Map: + var includeMap IncludableMap + if opts != nil && opts.Struct != nil { + if v, ok := opts.Struct.(IncludableMap); ok { + includeMap = v + } + } + + // Build the hash for the map. We do this by XOR-ing all the key + // and value hashes. This makes it deterministic despite ordering. + var h uint64 + for _, k := range v.MapKeys() { + v := v.MapIndex(k) + if includeMap != nil { + incl, err := includeMap.HashIncludeMap( + opts.StructField, k.Interface(), v.Interface()) + if err != nil { + return 0, err + } + if !incl { + continue + } + } + + kh, err := w.visit(k, nil) + if err != nil { + return 0, err + } + vh, err := w.visit(v, nil) + if err != nil { + return 0, err + } + + fieldHash := hashUpdateOrdered(w.h, kh, vh) + h = hashUpdateUnordered(h, fieldHash) + } + + return h, nil + + case reflect.Struct: + var include Includable + parent := v.Interface() + if impl, ok := parent.(Includable); ok { + include = impl + } + + t := v.Type() + h, err := w.visit(reflect.ValueOf(t.Name()), nil) + if err != nil { + return 0, err + } + + l := v.NumField() + for i := 0; i < l; i++ { + if v := v.Field(i); v.CanSet() || t.Field(i).Name != "_" { + var f visitFlag + fieldType := t.Field(i) + if fieldType.PkgPath != "" { + // Unexported + continue + } + + tag := fieldType.Tag.Get(w.tag) + if tag == "ignore" { + // Ignore this field + continue + } + + // Check if we implement includable and check it + if include != nil { + incl, err := include.HashInclude(fieldType.Name, v) + if err != nil { + return 0, err + } + if !incl { + continue + } + } + + switch tag { + case "set": + f |= visitFlagSet + } + + kh, err := w.visit(reflect.ValueOf(fieldType.Name), nil) + if err != nil { + return 0, err + } + + vh, err := w.visit(v, &visitOpts{ + Flags: f, + Struct: parent, + StructField: fieldType.Name, + }) + if err != nil { + return 0, err + } + + fieldHash := hashUpdateOrdered(w.h, kh, vh) + h = hashUpdateUnordered(h, fieldHash) + } + } + + return h, nil + + case reflect.Slice: + // We have two behaviors here. If it isn't a set, then we just + // visit all the elements. If it is a set, then we do a deterministic + // hash code. + var h uint64 + var set bool + if opts != nil { + set = (opts.Flags & visitFlagSet) != 0 + } + l := v.Len() + for i := 0; i < l; i++ { + current, err := w.visit(v.Index(i), nil) + if err != nil { + return 0, err + } + + if set { + h = hashUpdateUnordered(h, current) + } else { + h = hashUpdateOrdered(w.h, h, current) + } + } + + return h, nil + + case reflect.String: + // Directly hash + w.h.Reset() + _, err := w.h.Write([]byte(v.String())) + return w.h.Sum64(), err + + default: + return 0, fmt.Errorf("unknown kind to hash: %s", k) + } + + return 0, nil +} + +func hashUpdateOrdered(h hash.Hash64, a, b uint64) uint64 { + // For ordered updates, use a real hash function + h.Reset() + + // We just panic if the binary writes fail because we are writing + // an int64 which should never be fail-able. + e1 := binary.Write(h, binary.LittleEndian, a) + e2 := binary.Write(h, binary.LittleEndian, b) + if e1 != nil { + panic(e1) + } + if e2 != nil { + panic(e2) + } + + return h.Sum64() +} + +func hashUpdateUnordered(a, b uint64) uint64 { + return a ^ b +} + +// visitFlag is used as a bitmask for affecting visit behavior +type visitFlag uint + +const ( + visitFlagInvalid visitFlag = iota + visitFlagSet = iota << 1 +) diff --git a/vendor/github.com/mitchellh/hashstructure/include.go b/vendor/github.com/mitchellh/hashstructure/include.go new file mode 100644 index 0000000000..b6289c0bee --- /dev/null +++ b/vendor/github.com/mitchellh/hashstructure/include.go @@ -0,0 +1,15 @@ +package hashstructure + +// Includable is an interface that can optionally be implemented by +// a struct. It will be called for each field in the struct to check whether +// it should be included in the hash. +type Includable interface { + HashInclude(field string, v interface{}) (bool, error) +} + +// IncludableMap is an interface that can optionally be implemented by +// a struct. It will be called when a map-type field is found to ask the +// struct if the map item should be included in the hash. +type IncludableMap interface { + HashIncludeMap(field string, k, v interface{}) (bool, error) +} diff --git a/vendor/github.com/mitchellh/mapstructure/LICENSE b/vendor/github.com/mitchellh/mapstructure/LICENSE new file mode 100644 index 0000000000..f9c841a51e --- /dev/null +++ b/vendor/github.com/mitchellh/mapstructure/LICENSE @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2013 Mitchell Hashimoto + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/vendor/github.com/mitchellh/mapstructure/README.md b/vendor/github.com/mitchellh/mapstructure/README.md new file mode 100644 index 0000000000..659d6885fc --- /dev/null +++ b/vendor/github.com/mitchellh/mapstructure/README.md @@ -0,0 +1,46 @@ +# mapstructure + +mapstructure is a Go library for decoding generic map values to structures +and vice versa, while providing helpful error handling. + +This library is most useful when decoding values from some data stream (JSON, +Gob, etc.) where you don't _quite_ know the structure of the underlying data +until you read a part of it. You can therefore read a `map[string]interface{}` +and use this library to decode it into the proper underlying native Go +structure. + +## Installation + +Standard `go get`: + +``` +$ go get github.com/mitchellh/mapstructure +``` + +## Usage & Example + +For usage and examples see the [Godoc](http://godoc.org/github.com/mitchellh/mapstructure). + +The `Decode` function has examples associated with it there. + +## But Why?! + +Go offers fantastic standard libraries for decoding formats such as JSON. +The standard method is to have a struct pre-created, and populate that struct +from the bytes of the encoded format. This is great, but the problem is if +you have configuration or an encoding that changes slightly depending on +specific fields. For example, consider this JSON: + +```json +{ + "type": "person", + "name": "Mitchell" +} +``` + +Perhaps we can't populate a specific structure without first reading +the "type" field from the JSON. We could always do two passes over the +decoding of the JSON (reading the "type" first, and the rest later). +However, it is much simpler to just decode this into a `map[string]interface{}` +structure, read the "type" key, then use something like this library +to decode it into the proper structure. diff --git a/vendor/github.com/mitchellh/mapstructure/decode_hooks.go b/vendor/github.com/mitchellh/mapstructure/decode_hooks.go new file mode 100644 index 0000000000..115ae67c11 --- /dev/null +++ b/vendor/github.com/mitchellh/mapstructure/decode_hooks.go @@ -0,0 +1,154 @@ +package mapstructure + +import ( + "errors" + "reflect" + "strconv" + "strings" + "time" +) + +// typedDecodeHook takes a raw DecodeHookFunc (an interface{}) and turns +// it into the proper DecodeHookFunc type, such as DecodeHookFuncType. +func typedDecodeHook(h DecodeHookFunc) DecodeHookFunc { + // Create variables here so we can reference them with the reflect pkg + var f1 DecodeHookFuncType + var f2 DecodeHookFuncKind + + // Fill in the variables into this interface and the rest is done + // automatically using the reflect package. + potential := []interface{}{f1, f2} + + v := reflect.ValueOf(h) + vt := v.Type() + for _, raw := range potential { + pt := reflect.ValueOf(raw).Type() + if vt.ConvertibleTo(pt) { + return v.Convert(pt).Interface() + } + } + + return nil +} + +// DecodeHookExec executes the given decode hook. This should be used +// since it'll naturally degrade to the older backwards compatible DecodeHookFunc +// that took reflect.Kind instead of reflect.Type. +func DecodeHookExec( + raw DecodeHookFunc, + from reflect.Type, to reflect.Type, + data interface{}) (interface{}, error) { + // Build our arguments that reflect expects + argVals := make([]reflect.Value, 3) + argVals[0] = reflect.ValueOf(from) + argVals[1] = reflect.ValueOf(to) + argVals[2] = reflect.ValueOf(data) + + switch f := typedDecodeHook(raw).(type) { + case DecodeHookFuncType: + return f(from, to, data) + case DecodeHookFuncKind: + return f(from.Kind(), to.Kind(), data) + default: + return nil, errors.New("invalid decode hook signature") + } +} + +// ComposeDecodeHookFunc creates a single DecodeHookFunc that +// automatically composes multiple DecodeHookFuncs. +// +// The composed funcs are called in order, with the result of the +// previous transformation. +func ComposeDecodeHookFunc(fs ...DecodeHookFunc) DecodeHookFunc { + return func( + f reflect.Type, + t reflect.Type, + data interface{}) (interface{}, error) { + var err error + for _, f1 := range fs { + data, err = DecodeHookExec(f1, f, t, data) + if err != nil { + return nil, err + } + + // Modify the from kind to be correct with the new data + f = nil + if val := reflect.ValueOf(data); val.IsValid() { + f = val.Type() + } + } + + return data, nil + } +} + +// StringToSliceHookFunc returns a DecodeHookFunc that converts +// string to []string by splitting on the given sep. +func StringToSliceHookFunc(sep string) DecodeHookFunc { + return func( + f reflect.Kind, + t reflect.Kind, + data interface{}) (interface{}, error) { + if f != reflect.String || t != reflect.Slice { + return data, nil + } + + raw := data.(string) + if raw == "" { + return []string{}, nil + } + + return strings.Split(raw, sep), nil + } +} + +// StringToTimeDurationHookFunc returns a DecodeHookFunc that converts +// strings to time.Duration. +func StringToTimeDurationHookFunc() DecodeHookFunc { + return func( + f reflect.Type, + t reflect.Type, + data interface{}) (interface{}, error) { + if f.Kind() != reflect.String { + return data, nil + } + if t != reflect.TypeOf(time.Duration(5)) { + return data, nil + } + + // Convert it by parsing + return time.ParseDuration(data.(string)) + } +} + +func WeaklyTypedHook( + f reflect.Kind, + t reflect.Kind, + data interface{}) (interface{}, error) { + dataVal := reflect.ValueOf(data) + switch t { + case reflect.String: + switch f { + case reflect.Bool: + if dataVal.Bool() { + return "1", nil + } else { + return "0", nil + } + case reflect.Float32: + return strconv.FormatFloat(dataVal.Float(), 'f', -1, 64), nil + case reflect.Int: + return strconv.FormatInt(dataVal.Int(), 10), nil + case reflect.Slice: + dataType := dataVal.Type() + elemKind := dataType.Elem().Kind() + if elemKind == reflect.Uint8 { + return string(dataVal.Interface().([]uint8)), nil + } + case reflect.Uint: + return strconv.FormatUint(dataVal.Uint(), 10), nil + } + } + + return data, nil +} diff --git a/vendor/github.com/mitchellh/mapstructure/error.go b/vendor/github.com/mitchellh/mapstructure/error.go new file mode 100644 index 0000000000..47a99e5af3 --- /dev/null +++ b/vendor/github.com/mitchellh/mapstructure/error.go @@ -0,0 +1,50 @@ +package mapstructure + +import ( + "errors" + "fmt" + "sort" + "strings" +) + +// Error implements the error interface and can represents multiple +// errors that occur in the course of a single decode. +type Error struct { + Errors []string +} + +func (e *Error) Error() string { + points := make([]string, len(e.Errors)) + for i, err := range e.Errors { + points[i] = fmt.Sprintf("* %s", err) + } + + sort.Strings(points) + return fmt.Sprintf( + "%d error(s) decoding:\n\n%s", + len(e.Errors), strings.Join(points, "\n")) +} + +// WrappedErrors implements the errwrap.Wrapper interface to make this +// return value more useful with the errwrap and go-multierror libraries. +func (e *Error) WrappedErrors() []error { + if e == nil { + return nil + } + + result := make([]error, len(e.Errors)) + for i, e := range e.Errors { + result[i] = errors.New(e) + } + + return result +} + +func appendErrors(errors []string, err error) []string { + switch e := err.(type) { + case *Error: + return append(errors, e.Errors...) + default: + return append(errors, e.Error()) + } +} diff --git a/vendor/github.com/mitchellh/mapstructure/mapstructure.go b/vendor/github.com/mitchellh/mapstructure/mapstructure.go new file mode 100644 index 0000000000..6dee0ef0a2 --- /dev/null +++ b/vendor/github.com/mitchellh/mapstructure/mapstructure.go @@ -0,0 +1,823 @@ +// The mapstructure package exposes functionality to convert an +// arbitrary map[string]interface{} into a native Go structure. +// +// The Go structure can be arbitrarily complex, containing slices, +// other structs, etc. and the decoder will properly decode nested +// maps and so on into the proper structures in the native Go struct. +// See the examples to see what the decoder is capable of. +package mapstructure + +import ( + "encoding/json" + "errors" + "fmt" + "reflect" + "sort" + "strconv" + "strings" +) + +// DecodeHookFunc is the callback function that can be used for +// data transformations. See "DecodeHook" in the DecoderConfig +// struct. +// +// The type should be DecodeHookFuncType or DecodeHookFuncKind. +// Either is accepted. Types are a superset of Kinds (Types can return +// Kinds) and are generally a richer thing to use, but Kinds are simpler +// if you only need those. +// +// The reason DecodeHookFunc is multi-typed is for backwards compatibility: +// we started with Kinds and then realized Types were the better solution, +// but have a promise to not break backwards compat so we now support +// both. +type DecodeHookFunc interface{} + +type DecodeHookFuncType func(reflect.Type, reflect.Type, interface{}) (interface{}, error) +type DecodeHookFuncKind func(reflect.Kind, reflect.Kind, interface{}) (interface{}, error) + +// DecoderConfig is the configuration that is used to create a new decoder +// and allows customization of various aspects of decoding. +type DecoderConfig struct { + // DecodeHook, if set, will be called before any decoding and any + // type conversion (if WeaklyTypedInput is on). This lets you modify + // the values before they're set down onto the resulting struct. + // + // If an error is returned, the entire decode will fail with that + // error. + DecodeHook DecodeHookFunc + + // If ErrorUnused is true, then it is an error for there to exist + // keys in the original map that were unused in the decoding process + // (extra keys). + ErrorUnused bool + + // ZeroFields, if set to true, will zero fields before writing them. + // For example, a map will be emptied before decoded values are put in + // it. If this is false, a map will be merged. + ZeroFields bool + + // If WeaklyTypedInput is true, the decoder will make the following + // "weak" conversions: + // + // - bools to string (true = "1", false = "0") + // - numbers to string (base 10) + // - bools to int/uint (true = 1, false = 0) + // - strings to int/uint (base implied by prefix) + // - int to bool (true if value != 0) + // - string to bool (accepts: 1, t, T, TRUE, true, True, 0, f, F, + // FALSE, false, False. Anything else is an error) + // - empty array = empty map and vice versa + // - negative numbers to overflowed uint values (base 10) + // - slice of maps to a merged map + // - single values are converted to slices if required. Each + // element is weakly decoded. For example: "4" can become []int{4} + // if the target type is an int slice. + // + WeaklyTypedInput bool + + // Metadata is the struct that will contain extra metadata about + // the decoding. If this is nil, then no metadata will be tracked. + Metadata *Metadata + + // Result is a pointer to the struct that will contain the decoded + // value. + Result interface{} + + // The tag name that mapstructure reads for field names. This + // defaults to "mapstructure" + TagName string +} + +// A Decoder takes a raw interface value and turns it into structured +// data, keeping track of rich error information along the way in case +// anything goes wrong. Unlike the basic top-level Decode method, you can +// more finely control how the Decoder behaves using the DecoderConfig +// structure. The top-level Decode method is just a convenience that sets +// up the most basic Decoder. +type Decoder struct { + config *DecoderConfig +} + +// Metadata contains information about decoding a structure that +// is tedious or difficult to get otherwise. +type Metadata struct { + // Keys are the keys of the structure which were successfully decoded + Keys []string + + // Unused is a slice of keys that were found in the raw value but + // weren't decoded since there was no matching field in the result interface + Unused []string +} + +// Decode takes a map and uses reflection to convert it into the +// given Go native structure. val must be a pointer to a struct. +func Decode(m interface{}, rawVal interface{}) error { + config := &DecoderConfig{ + Metadata: nil, + Result: rawVal, + } + + decoder, err := NewDecoder(config) + if err != nil { + return err + } + + return decoder.Decode(m) +} + +// WeakDecode is the same as Decode but is shorthand to enable +// WeaklyTypedInput. See DecoderConfig for more info. +func WeakDecode(input, output interface{}) error { + config := &DecoderConfig{ + Metadata: nil, + Result: output, + WeaklyTypedInput: true, + } + + decoder, err := NewDecoder(config) + if err != nil { + return err + } + + return decoder.Decode(input) +} + +// NewDecoder returns a new decoder for the given configuration. Once +// a decoder has been returned, the same configuration must not be used +// again. +func NewDecoder(config *DecoderConfig) (*Decoder, error) { + val := reflect.ValueOf(config.Result) + if val.Kind() != reflect.Ptr { + return nil, errors.New("result must be a pointer") + } + + val = val.Elem() + if !val.CanAddr() { + return nil, errors.New("result must be addressable (a pointer)") + } + + if config.Metadata != nil { + if config.Metadata.Keys == nil { + config.Metadata.Keys = make([]string, 0) + } + + if config.Metadata.Unused == nil { + config.Metadata.Unused = make([]string, 0) + } + } + + if config.TagName == "" { + config.TagName = "mapstructure" + } + + result := &Decoder{ + config: config, + } + + return result, nil +} + +// Decode decodes the given raw interface to the target pointer specified +// by the configuration. +func (d *Decoder) Decode(raw interface{}) error { + return d.decode("", raw, reflect.ValueOf(d.config.Result).Elem()) +} + +// Decodes an unknown data type into a specific reflection value. +func (d *Decoder) decode(name string, data interface{}, val reflect.Value) error { + if data == nil { + // If the data is nil, then we don't set anything. + return nil + } + + dataVal := reflect.ValueOf(data) + if !dataVal.IsValid() { + // If the data value is invalid, then we just set the value + // to be the zero value. + val.Set(reflect.Zero(val.Type())) + return nil + } + + if d.config.DecodeHook != nil { + // We have a DecodeHook, so let's pre-process the data. + var err error + data, err = DecodeHookExec( + d.config.DecodeHook, + dataVal.Type(), val.Type(), data) + if err != nil { + return fmt.Errorf("error decoding '%s': %s", name, err) + } + } + + var err error + dataKind := getKind(val) + switch dataKind { + case reflect.Bool: + err = d.decodeBool(name, data, val) + case reflect.Interface: + err = d.decodeBasic(name, data, val) + case reflect.String: + err = d.decodeString(name, data, val) + case reflect.Int: + err = d.decodeInt(name, data, val) + case reflect.Uint: + err = d.decodeUint(name, data, val) + case reflect.Float32: + err = d.decodeFloat(name, data, val) + case reflect.Struct: + err = d.decodeStruct(name, data, val) + case reflect.Map: + err = d.decodeMap(name, data, val) + case reflect.Ptr: + err = d.decodePtr(name, data, val) + case reflect.Slice: + err = d.decodeSlice(name, data, val) + case reflect.Func: + err = d.decodeFunc(name, data, val) + default: + // If we reached this point then we weren't able to decode it + return fmt.Errorf("%s: unsupported type: %s", name, dataKind) + } + + // If we reached here, then we successfully decoded SOMETHING, so + // mark the key as used if we're tracking metadata. + if d.config.Metadata != nil && name != "" { + d.config.Metadata.Keys = append(d.config.Metadata.Keys, name) + } + + return err +} + +// This decodes a basic type (bool, int, string, etc.) and sets the +// value to "data" of that type. +func (d *Decoder) decodeBasic(name string, data interface{}, val reflect.Value) error { + dataVal := reflect.ValueOf(data) + if !dataVal.IsValid() { + dataVal = reflect.Zero(val.Type()) + } + + dataValType := dataVal.Type() + if !dataValType.AssignableTo(val.Type()) { + return fmt.Errorf( + "'%s' expected type '%s', got '%s'", + name, val.Type(), dataValType) + } + + val.Set(dataVal) + return nil +} + +func (d *Decoder) decodeString(name string, data interface{}, val reflect.Value) error { + dataVal := reflect.ValueOf(data) + dataKind := getKind(dataVal) + + converted := true + switch { + case dataKind == reflect.String: + val.SetString(dataVal.String()) + case dataKind == reflect.Bool && d.config.WeaklyTypedInput: + if dataVal.Bool() { + val.SetString("1") + } else { + val.SetString("0") + } + case dataKind == reflect.Int && d.config.WeaklyTypedInput: + val.SetString(strconv.FormatInt(dataVal.Int(), 10)) + case dataKind == reflect.Uint && d.config.WeaklyTypedInput: + val.SetString(strconv.FormatUint(dataVal.Uint(), 10)) + case dataKind == reflect.Float32 && d.config.WeaklyTypedInput: + val.SetString(strconv.FormatFloat(dataVal.Float(), 'f', -1, 64)) + case dataKind == reflect.Slice && d.config.WeaklyTypedInput: + dataType := dataVal.Type() + elemKind := dataType.Elem().Kind() + switch { + case elemKind == reflect.Uint8: + val.SetString(string(dataVal.Interface().([]uint8))) + default: + converted = false + } + default: + converted = false + } + + if !converted { + return fmt.Errorf( + "'%s' expected type '%s', got unconvertible type '%s'", + name, val.Type(), dataVal.Type()) + } + + return nil +} + +func (d *Decoder) decodeInt(name string, data interface{}, val reflect.Value) error { + dataVal := reflect.ValueOf(data) + dataKind := getKind(dataVal) + dataType := dataVal.Type() + + switch { + case dataKind == reflect.Int: + val.SetInt(dataVal.Int()) + case dataKind == reflect.Uint: + val.SetInt(int64(dataVal.Uint())) + case dataKind == reflect.Float32: + val.SetInt(int64(dataVal.Float())) + case dataKind == reflect.Bool && d.config.WeaklyTypedInput: + if dataVal.Bool() { + val.SetInt(1) + } else { + val.SetInt(0) + } + case dataKind == reflect.String && d.config.WeaklyTypedInput: + i, err := strconv.ParseInt(dataVal.String(), 0, val.Type().Bits()) + if err == nil { + val.SetInt(i) + } else { + return fmt.Errorf("cannot parse '%s' as int: %s", name, err) + } + case dataType.PkgPath() == "encoding/json" && dataType.Name() == "Number": + jn := data.(json.Number) + i, err := jn.Int64() + if err != nil { + return fmt.Errorf( + "error decoding json.Number into %s: %s", name, err) + } + val.SetInt(i) + default: + return fmt.Errorf( + "'%s' expected type '%s', got unconvertible type '%s'", + name, val.Type(), dataVal.Type()) + } + + return nil +} + +func (d *Decoder) decodeUint(name string, data interface{}, val reflect.Value) error { + dataVal := reflect.ValueOf(data) + dataKind := getKind(dataVal) + + switch { + case dataKind == reflect.Int: + i := dataVal.Int() + if i < 0 && !d.config.WeaklyTypedInput { + return fmt.Errorf("cannot parse '%s', %d overflows uint", + name, i) + } + val.SetUint(uint64(i)) + case dataKind == reflect.Uint: + val.SetUint(dataVal.Uint()) + case dataKind == reflect.Float32: + f := dataVal.Float() + if f < 0 && !d.config.WeaklyTypedInput { + return fmt.Errorf("cannot parse '%s', %f overflows uint", + name, f) + } + val.SetUint(uint64(f)) + case dataKind == reflect.Bool && d.config.WeaklyTypedInput: + if dataVal.Bool() { + val.SetUint(1) + } else { + val.SetUint(0) + } + case dataKind == reflect.String && d.config.WeaklyTypedInput: + i, err := strconv.ParseUint(dataVal.String(), 0, val.Type().Bits()) + if err == nil { + val.SetUint(i) + } else { + return fmt.Errorf("cannot parse '%s' as uint: %s", name, err) + } + default: + return fmt.Errorf( + "'%s' expected type '%s', got unconvertible type '%s'", + name, val.Type(), dataVal.Type()) + } + + return nil +} + +func (d *Decoder) decodeBool(name string, data interface{}, val reflect.Value) error { + dataVal := reflect.ValueOf(data) + dataKind := getKind(dataVal) + + switch { + case dataKind == reflect.Bool: + val.SetBool(dataVal.Bool()) + case dataKind == reflect.Int && d.config.WeaklyTypedInput: + val.SetBool(dataVal.Int() != 0) + case dataKind == reflect.Uint && d.config.WeaklyTypedInput: + val.SetBool(dataVal.Uint() != 0) + case dataKind == reflect.Float32 && d.config.WeaklyTypedInput: + val.SetBool(dataVal.Float() != 0) + case dataKind == reflect.String && d.config.WeaklyTypedInput: + b, err := strconv.ParseBool(dataVal.String()) + if err == nil { + val.SetBool(b) + } else if dataVal.String() == "" { + val.SetBool(false) + } else { + return fmt.Errorf("cannot parse '%s' as bool: %s", name, err) + } + default: + return fmt.Errorf( + "'%s' expected type '%s', got unconvertible type '%s'", + name, val.Type(), dataVal.Type()) + } + + return nil +} + +func (d *Decoder) decodeFloat(name string, data interface{}, val reflect.Value) error { + dataVal := reflect.ValueOf(data) + dataKind := getKind(dataVal) + dataType := dataVal.Type() + + switch { + case dataKind == reflect.Int: + val.SetFloat(float64(dataVal.Int())) + case dataKind == reflect.Uint: + val.SetFloat(float64(dataVal.Uint())) + case dataKind == reflect.Float32: + val.SetFloat(float64(dataVal.Float())) + case dataKind == reflect.Bool && d.config.WeaklyTypedInput: + if dataVal.Bool() { + val.SetFloat(1) + } else { + val.SetFloat(0) + } + case dataKind == reflect.String && d.config.WeaklyTypedInput: + f, err := strconv.ParseFloat(dataVal.String(), val.Type().Bits()) + if err == nil { + val.SetFloat(f) + } else { + return fmt.Errorf("cannot parse '%s' as float: %s", name, err) + } + case dataType.PkgPath() == "encoding/json" && dataType.Name() == "Number": + jn := data.(json.Number) + i, err := jn.Float64() + if err != nil { + return fmt.Errorf( + "error decoding json.Number into %s: %s", name, err) + } + val.SetFloat(i) + default: + return fmt.Errorf( + "'%s' expected type '%s', got unconvertible type '%s'", + name, val.Type(), dataVal.Type()) + } + + return nil +} + +func (d *Decoder) decodeMap(name string, data interface{}, val reflect.Value) error { + valType := val.Type() + valKeyType := valType.Key() + valElemType := valType.Elem() + + // By default we overwrite keys in the current map + valMap := val + + // If the map is nil or we're purposely zeroing fields, make a new map + if valMap.IsNil() || d.config.ZeroFields { + // Make a new map to hold our result + mapType := reflect.MapOf(valKeyType, valElemType) + valMap = reflect.MakeMap(mapType) + } + + // Check input type + dataVal := reflect.Indirect(reflect.ValueOf(data)) + if dataVal.Kind() != reflect.Map { + // In weak mode, we accept a slice of maps as an input... + if d.config.WeaklyTypedInput { + switch dataVal.Kind() { + case reflect.Array, reflect.Slice: + // Special case for BC reasons (covered by tests) + if dataVal.Len() == 0 { + val.Set(valMap) + return nil + } + + for i := 0; i < dataVal.Len(); i++ { + err := d.decode( + fmt.Sprintf("%s[%d]", name, i), + dataVal.Index(i).Interface(), val) + if err != nil { + return err + } + } + + return nil + } + } + + return fmt.Errorf("'%s' expected a map, got '%s'", name, dataVal.Kind()) + } + + // Accumulate errors + errors := make([]string, 0) + + for _, k := range dataVal.MapKeys() { + fieldName := fmt.Sprintf("%s[%s]", name, k) + + // First decode the key into the proper type + currentKey := reflect.Indirect(reflect.New(valKeyType)) + if err := d.decode(fieldName, k.Interface(), currentKey); err != nil { + errors = appendErrors(errors, err) + continue + } + + // Next decode the data into the proper type + v := dataVal.MapIndex(k).Interface() + currentVal := reflect.Indirect(reflect.New(valElemType)) + if err := d.decode(fieldName, v, currentVal); err != nil { + errors = appendErrors(errors, err) + continue + } + + valMap.SetMapIndex(currentKey, currentVal) + } + + // Set the built up map to the value + val.Set(valMap) + + // If we had errors, return those + if len(errors) > 0 { + return &Error{errors} + } + + return nil +} + +func (d *Decoder) decodePtr(name string, data interface{}, val reflect.Value) error { + // Create an element of the concrete (non pointer) type and decode + // into that. Then set the value of the pointer to this type. + valType := val.Type() + valElemType := valType.Elem() + + realVal := val + if realVal.IsNil() || d.config.ZeroFields { + realVal = reflect.New(valElemType) + } + + if err := d.decode(name, data, reflect.Indirect(realVal)); err != nil { + return err + } + + val.Set(realVal) + return nil +} + +func (d *Decoder) decodeFunc(name string, data interface{}, val reflect.Value) error { + // Create an element of the concrete (non pointer) type and decode + // into that. Then set the value of the pointer to this type. + dataVal := reflect.Indirect(reflect.ValueOf(data)) + if val.Type() != dataVal.Type() { + return fmt.Errorf( + "'%s' expected type '%s', got unconvertible type '%s'", + name, val.Type(), dataVal.Type()) + } + val.Set(dataVal) + return nil +} + +func (d *Decoder) decodeSlice(name string, data interface{}, val reflect.Value) error { + dataVal := reflect.Indirect(reflect.ValueOf(data)) + dataValKind := dataVal.Kind() + valType := val.Type() + valElemType := valType.Elem() + sliceType := reflect.SliceOf(valElemType) + + valSlice := val + if valSlice.IsNil() || d.config.ZeroFields { + // Check input type + if dataValKind != reflect.Array && dataValKind != reflect.Slice { + if d.config.WeaklyTypedInput { + switch { + // Empty maps turn into empty slices + case dataValKind == reflect.Map: + if dataVal.Len() == 0 { + val.Set(reflect.MakeSlice(sliceType, 0, 0)) + return nil + } + + // All other types we try to convert to the slice type + // and "lift" it into it. i.e. a string becomes a string slice. + default: + // Just re-try this function with data as a slice. + return d.decodeSlice(name, []interface{}{data}, val) + } + } + + return fmt.Errorf( + "'%s': source data must be an array or slice, got %s", name, dataValKind) + + } + + // Make a new slice to hold our result, same size as the original data. + valSlice = reflect.MakeSlice(sliceType, dataVal.Len(), dataVal.Len()) + } + + // Accumulate any errors + errors := make([]string, 0) + + for i := 0; i < dataVal.Len(); i++ { + currentData := dataVal.Index(i).Interface() + for valSlice.Len() <= i { + valSlice = reflect.Append(valSlice, reflect.Zero(valElemType)) + } + currentField := valSlice.Index(i) + + fieldName := fmt.Sprintf("%s[%d]", name, i) + if err := d.decode(fieldName, currentData, currentField); err != nil { + errors = appendErrors(errors, err) + } + } + + // Finally, set the value to the slice we built up + val.Set(valSlice) + + // If there were errors, we return those + if len(errors) > 0 { + return &Error{errors} + } + + return nil +} + +func (d *Decoder) decodeStruct(name string, data interface{}, val reflect.Value) error { + dataVal := reflect.Indirect(reflect.ValueOf(data)) + + // If the type of the value to write to and the data match directly, + // then we just set it directly instead of recursing into the structure. + if dataVal.Type() == val.Type() { + val.Set(dataVal) + return nil + } + + dataValKind := dataVal.Kind() + if dataValKind != reflect.Map { + return fmt.Errorf("'%s' expected a map, got '%s'", name, dataValKind) + } + + dataValType := dataVal.Type() + if kind := dataValType.Key().Kind(); kind != reflect.String && kind != reflect.Interface { + return fmt.Errorf( + "'%s' needs a map with string keys, has '%s' keys", + name, dataValType.Key().Kind()) + } + + dataValKeys := make(map[reflect.Value]struct{}) + dataValKeysUnused := make(map[interface{}]struct{}) + for _, dataValKey := range dataVal.MapKeys() { + dataValKeys[dataValKey] = struct{}{} + dataValKeysUnused[dataValKey.Interface()] = struct{}{} + } + + errors := make([]string, 0) + + // This slice will keep track of all the structs we'll be decoding. + // There can be more than one struct if there are embedded structs + // that are squashed. + structs := make([]reflect.Value, 1, 5) + structs[0] = val + + // Compile the list of all the fields that we're going to be decoding + // from all the structs. + fields := make(map[*reflect.StructField]reflect.Value) + for len(structs) > 0 { + structVal := structs[0] + structs = structs[1:] + + structType := structVal.Type() + + for i := 0; i < structType.NumField(); i++ { + fieldType := structType.Field(i) + fieldKind := fieldType.Type.Kind() + + // If "squash" is specified in the tag, we squash the field down. + squash := false + tagParts := strings.Split(fieldType.Tag.Get(d.config.TagName), ",") + for _, tag := range tagParts[1:] { + if tag == "squash" { + squash = true + break + } + } + + if squash { + if fieldKind != reflect.Struct { + errors = appendErrors(errors, + fmt.Errorf("%s: unsupported type for squash: %s", fieldType.Name, fieldKind)) + } else { + structs = append(structs, val.FieldByName(fieldType.Name)) + } + continue + } + + // Normal struct field, store it away + fields[&fieldType] = structVal.Field(i) + } + } + + for fieldType, field := range fields { + fieldName := fieldType.Name + + tagValue := fieldType.Tag.Get(d.config.TagName) + tagValue = strings.SplitN(tagValue, ",", 2)[0] + if tagValue != "" { + fieldName = tagValue + } + + rawMapKey := reflect.ValueOf(fieldName) + rawMapVal := dataVal.MapIndex(rawMapKey) + if !rawMapVal.IsValid() { + // Do a slower search by iterating over each key and + // doing case-insensitive search. + for dataValKey := range dataValKeys { + mK, ok := dataValKey.Interface().(string) + if !ok { + // Not a string key + continue + } + + if strings.EqualFold(mK, fieldName) { + rawMapKey = dataValKey + rawMapVal = dataVal.MapIndex(dataValKey) + break + } + } + + if !rawMapVal.IsValid() { + // There was no matching key in the map for the value in + // the struct. Just ignore. + continue + } + } + + // Delete the key we're using from the unused map so we stop tracking + delete(dataValKeysUnused, rawMapKey.Interface()) + + if !field.IsValid() { + // This should never happen + panic("field is not valid") + } + + // If we can't set the field, then it is unexported or something, + // and we just continue onwards. + if !field.CanSet() { + continue + } + + // If the name is empty string, then we're at the root, and we + // don't dot-join the fields. + if name != "" { + fieldName = fmt.Sprintf("%s.%s", name, fieldName) + } + + if err := d.decode(fieldName, rawMapVal.Interface(), field); err != nil { + errors = appendErrors(errors, err) + } + } + + if d.config.ErrorUnused && len(dataValKeysUnused) > 0 { + keys := make([]string, 0, len(dataValKeysUnused)) + for rawKey := range dataValKeysUnused { + keys = append(keys, rawKey.(string)) + } + sort.Strings(keys) + + err := fmt.Errorf("'%s' has invalid keys: %s", name, strings.Join(keys, ", ")) + errors = appendErrors(errors, err) + } + + if len(errors) > 0 { + return &Error{errors} + } + + // Add the unused keys to the list of unused keys if we're tracking metadata + if d.config.Metadata != nil { + for rawKey := range dataValKeysUnused { + key := rawKey.(string) + if name != "" { + key = fmt.Sprintf("%s.%s", name, key) + } + + d.config.Metadata.Unused = append(d.config.Metadata.Unused, key) + } + } + + return nil +} + +func getKind(val reflect.Value) reflect.Kind { + kind := val.Kind() + + switch { + case kind >= reflect.Int && kind <= reflect.Int64: + return reflect.Int + case kind >= reflect.Uint && kind <= reflect.Uint64: + return reflect.Uint + case kind >= reflect.Float32 && kind <= reflect.Float64: + return reflect.Float32 + default: + return kind + } +} diff --git a/vendor/github.com/mitchellh/reflectwalk/LICENSE b/vendor/github.com/mitchellh/reflectwalk/LICENSE new file mode 100644 index 0000000000..f9c841a51e --- /dev/null +++ b/vendor/github.com/mitchellh/reflectwalk/LICENSE @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2013 Mitchell Hashimoto + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/vendor/github.com/mitchellh/reflectwalk/README.md b/vendor/github.com/mitchellh/reflectwalk/README.md new file mode 100644 index 0000000000..ac82cd2e15 --- /dev/null +++ b/vendor/github.com/mitchellh/reflectwalk/README.md @@ -0,0 +1,6 @@ +# reflectwalk + +reflectwalk is a Go library for "walking" a value in Go using reflection, +in the same way a directory tree can be "walked" on the filesystem. Walking +a complex structure can allow you to do manipulations on unknown structures +such as those decoded from JSON. diff --git a/vendor/github.com/mitchellh/reflectwalk/location.go b/vendor/github.com/mitchellh/reflectwalk/location.go new file mode 100644 index 0000000000..7c59d764c2 --- /dev/null +++ b/vendor/github.com/mitchellh/reflectwalk/location.go @@ -0,0 +1,17 @@ +package reflectwalk + +//go:generate stringer -type=Location location.go + +type Location uint + +const ( + None Location = iota + Map + MapKey + MapValue + Slice + SliceElem + Struct + StructField + WalkLoc +) diff --git a/vendor/github.com/mitchellh/reflectwalk/location_string.go b/vendor/github.com/mitchellh/reflectwalk/location_string.go new file mode 100644 index 0000000000..d3cfe85459 --- /dev/null +++ b/vendor/github.com/mitchellh/reflectwalk/location_string.go @@ -0,0 +1,16 @@ +// generated by stringer -type=Location location.go; DO NOT EDIT + +package reflectwalk + +import "fmt" + +const _Location_name = "NoneMapMapKeyMapValueSliceSliceElemStructStructFieldWalkLoc" + +var _Location_index = [...]uint8{0, 4, 7, 13, 21, 26, 35, 41, 52, 59} + +func (i Location) String() string { + if i+1 >= Location(len(_Location_index)) { + return fmt.Sprintf("Location(%d)", i) + } + return _Location_name[_Location_index[i]:_Location_index[i+1]] +} diff --git a/vendor/github.com/mitchellh/reflectwalk/reflectwalk.go b/vendor/github.com/mitchellh/reflectwalk/reflectwalk.go new file mode 100644 index 0000000000..ec0a62337e --- /dev/null +++ b/vendor/github.com/mitchellh/reflectwalk/reflectwalk.go @@ -0,0 +1,339 @@ +// reflectwalk is a package that allows you to "walk" complex structures +// similar to how you may "walk" a filesystem: visiting every element one +// by one and calling callback functions allowing you to handle and manipulate +// those elements. +package reflectwalk + +import ( + "errors" + "reflect" +) + +// PrimitiveWalker implementations are able to handle primitive values +// within complex structures. Primitive values are numbers, strings, +// booleans, funcs, chans. +// +// These primitive values are often members of more complex +// structures (slices, maps, etc.) that are walkable by other interfaces. +type PrimitiveWalker interface { + Primitive(reflect.Value) error +} + +// InterfaceWalker implementations are able to handle interface values as they +// are encountered during the walk. +type InterfaceWalker interface { + Interface(reflect.Value) error +} + +// MapWalker implementations are able to handle individual elements +// found within a map structure. +type MapWalker interface { + Map(m reflect.Value) error + MapElem(m, k, v reflect.Value) error +} + +// SliceWalker implementations are able to handle slice elements found +// within complex structures. +type SliceWalker interface { + Slice(reflect.Value) error + SliceElem(int, reflect.Value) error +} + +// StructWalker is an interface that has methods that are called for +// structs when a Walk is done. +type StructWalker interface { + Struct(reflect.Value) error + StructField(reflect.StructField, reflect.Value) error +} + +// EnterExitWalker implementations are notified before and after +// they walk deeper into complex structures (into struct fields, +// into slice elements, etc.) +type EnterExitWalker interface { + Enter(Location) error + Exit(Location) error +} + +// PointerWalker implementations are notified when the value they're +// walking is a pointer or not. Pointer is called for _every_ value whether +// it is a pointer or not. +type PointerWalker interface { + PointerEnter(bool) error + PointerExit(bool) error +} + +// SkipEntry can be returned from walk functions to skip walking +// the value of this field. This is only valid in the following functions: +// +// - StructField: skips walking the struct value +// +var SkipEntry = errors.New("skip this entry") + +// Walk takes an arbitrary value and an interface and traverses the +// value, calling callbacks on the interface if they are supported. +// The interface should implement one or more of the walker interfaces +// in this package, such as PrimitiveWalker, StructWalker, etc. +func Walk(data, walker interface{}) (err error) { + v := reflect.ValueOf(data) + ew, ok := walker.(EnterExitWalker) + if ok { + err = ew.Enter(WalkLoc) + } + + if err == nil { + err = walk(v, walker) + } + + if ok && err == nil { + err = ew.Exit(WalkLoc) + } + + return +} + +func walk(v reflect.Value, w interface{}) (err error) { + // Determine if we're receiving a pointer and if so notify the walker. + // The logic here is convoluted but very important (tests will fail if + // almost any part is changed). I will try to explain here. + // + // First, we check if the value is an interface, if so, we really need + // to check the interface's VALUE to see whether it is a pointer. + // + // Check whether the value is then a pointer. If so, then set pointer + // to true to notify the user. + // + // If we still have a pointer or an interface after the indirections, then + // we unwrap another level + // + // At this time, we also set "v" to be the dereferenced value. This is + // because once we've unwrapped the pointer we want to use that value. + pointer := false + pointerV := v + + for { + if pointerV.Kind() == reflect.Interface { + if iw, ok := w.(InterfaceWalker); ok { + if err = iw.Interface(pointerV); err != nil { + return + } + } + + pointerV = pointerV.Elem() + } + + if pointerV.Kind() == reflect.Ptr { + pointer = true + v = reflect.Indirect(pointerV) + } + if pw, ok := w.(PointerWalker); ok { + if err = pw.PointerEnter(pointer); err != nil { + return + } + + defer func(pointer bool) { + if err != nil { + return + } + + err = pw.PointerExit(pointer) + }(pointer) + } + + if pointer { + pointerV = v + } + pointer = false + + // If we still have a pointer or interface we have to indirect another level. + switch pointerV.Kind() { + case reflect.Ptr, reflect.Interface: + continue + } + break + } + + // We preserve the original value here because if it is an interface + // type, we want to pass that directly into the walkPrimitive, so that + // we can set it. + originalV := v + if v.Kind() == reflect.Interface { + v = v.Elem() + } + + k := v.Kind() + if k >= reflect.Int && k <= reflect.Complex128 { + k = reflect.Int + } + + switch k { + // Primitives + case reflect.Bool, reflect.Chan, reflect.Func, reflect.Int, reflect.String, reflect.Invalid: + err = walkPrimitive(originalV, w) + return + case reflect.Map: + err = walkMap(v, w) + return + case reflect.Slice: + err = walkSlice(v, w) + return + case reflect.Struct: + err = walkStruct(v, w) + return + default: + panic("unsupported type: " + k.String()) + } +} + +func walkMap(v reflect.Value, w interface{}) error { + ew, ewok := w.(EnterExitWalker) + if ewok { + ew.Enter(Map) + } + + if mw, ok := w.(MapWalker); ok { + if err := mw.Map(v); err != nil { + return err + } + } + + for _, k := range v.MapKeys() { + kv := v.MapIndex(k) + + if mw, ok := w.(MapWalker); ok { + if err := mw.MapElem(v, k, kv); err != nil { + return err + } + } + + ew, ok := w.(EnterExitWalker) + if ok { + ew.Enter(MapKey) + } + + if err := walk(k, w); err != nil { + return err + } + + if ok { + ew.Exit(MapKey) + ew.Enter(MapValue) + } + + if err := walk(kv, w); err != nil { + return err + } + + if ok { + ew.Exit(MapValue) + } + } + + if ewok { + ew.Exit(Map) + } + + return nil +} + +func walkPrimitive(v reflect.Value, w interface{}) error { + if pw, ok := w.(PrimitiveWalker); ok { + return pw.Primitive(v) + } + + return nil +} + +func walkSlice(v reflect.Value, w interface{}) (err error) { + ew, ok := w.(EnterExitWalker) + if ok { + ew.Enter(Slice) + } + + if sw, ok := w.(SliceWalker); ok { + if err := sw.Slice(v); err != nil { + return err + } + } + + for i := 0; i < v.Len(); i++ { + elem := v.Index(i) + + if sw, ok := w.(SliceWalker); ok { + if err := sw.SliceElem(i, elem); err != nil { + return err + } + } + + ew, ok := w.(EnterExitWalker) + if ok { + ew.Enter(SliceElem) + } + + if err := walk(elem, w); err != nil { + return err + } + + if ok { + ew.Exit(SliceElem) + } + } + + ew, ok = w.(EnterExitWalker) + if ok { + ew.Exit(Slice) + } + + return nil +} + +func walkStruct(v reflect.Value, w interface{}) (err error) { + ew, ewok := w.(EnterExitWalker) + if ewok { + ew.Enter(Struct) + } + + if sw, ok := w.(StructWalker); ok { + if err = sw.Struct(v); err != nil { + return + } + } + + vt := v.Type() + for i := 0; i < vt.NumField(); i++ { + sf := vt.Field(i) + f := v.FieldByIndex([]int{i}) + + if sw, ok := w.(StructWalker); ok { + err = sw.StructField(sf, f) + + // SkipEntry just pretends this field doesn't even exist + if err == SkipEntry { + continue + } + + if err != nil { + return + } + } + + ew, ok := w.(EnterExitWalker) + if ok { + ew.Enter(StructField) + } + + err = walk(f, w) + if err != nil { + return + } + + if ok { + ew.Exit(StructField) + } + } + + if ewok { + ew.Exit(Struct) + } + + return nil +} diff --git a/vendor/github.com/rancher/go-rancher/Dockerfile.dapper b/vendor/github.com/rancher/go-rancher/Dockerfile.dapper new file mode 100644 index 0000000000..d5bfad7a74 --- /dev/null +++ b/vendor/github.com/rancher/go-rancher/Dockerfile.dapper @@ -0,0 +1,30 @@ +FROM ubuntu:16.04 +# FROM arm=armhf/ubuntu:16.04 + +ARG DAPPER_HOST_ARCH=amd64 +ENV HOST_ARCH=${DAPPER_HOST_ARCH} ARCH=${DAPPER_HOST_ARCH} + +RUN apt-get update && \ + apt-get install -y gcc ca-certificates git wget curl vim less file && \ + rm -f /bin/sh && ln -s /bin/bash /bin/sh + +ENV GOLANG_ARCH_amd64=amd64 GOLANG_ARCH_arm=armv6l GOLANG_ARCH=GOLANG_ARCH_${ARCH} \ + GOPATH=/go PATH=/go/bin:/usr/local/go/bin:${PATH} SHELL=/bin/bash + +ENV DOCKER_URL_amd64=https://get.docker.com/builds/Linux/x86_64/docker-1.10.3 \ + DOCKER_URL_arm=https://github.com/rancher/docker/releases/download/v1.10.3-ros1/docker-1.10.3_arm \ + DOCKER_URL=DOCKER_URL_${ARCH} +RUN wget -O - ${!DOCKER_URL} > /usr/bin/docker && chmod +x /usr/bin/docker + +RUN wget -O - https://storage.googleapis.com/golang/go1.7.1.linux-${!GOLANG_ARCH}.tar.gz | tar -xzf - -C /usr/local && \ + go get github.com/rancher/trash && go get github.com/golang/lint/golint + +ENV DAPPER_SOURCE /go/src/github.com/rancher/go-rancher/ +ENV DAPPER_OUTPUT ./bin +ENV DAPPER_DOCKER_SOCKET true +ENV TRASH_CACHE ${DAPPER_SOURCE}/.trash-cache +ENV HOME ${DAPPER_SOURCE} +WORKDIR ${DAPPER_SOURCE} + +ENTRYPOINT ["./scripts/entry"] +CMD ["ci"] diff --git a/vendor/github.com/rancher/go-rancher/LICENSE b/vendor/github.com/rancher/go-rancher/LICENSE new file mode 100644 index 0000000000..f433b1a53f --- /dev/null +++ b/vendor/github.com/rancher/go-rancher/LICENSE @@ -0,0 +1,177 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS diff --git a/vendor/github.com/rancher/go-rancher/Makefile b/vendor/github.com/rancher/go-rancher/Makefile new file mode 100644 index 0000000000..d7d72a16d5 --- /dev/null +++ b/vendor/github.com/rancher/go-rancher/Makefile @@ -0,0 +1,23 @@ +TARGETS := $(shell ls scripts) + +.dapper: + @echo Downloading dapper + @curl -sL https://releases.rancher.com/dapper/latest/dapper-`uname -s`-`uname -m` > .dapper.tmp + @@chmod +x .dapper.tmp + @./.dapper.tmp -v + @mv .dapper.tmp .dapper + +$(TARGETS): .dapper + ./.dapper $@ + +trash: .dapper + ./.dapper -m bind trash + +trash-keep: .dapper + ./.dapper -m bind trash -k + +deps: trash + +.DEFAULT_GOAL := ci + +.PHONY: $(TARGETS) diff --git a/vendor/github.com/rancher/go-rancher/README.md b/vendor/github.com/rancher/go-rancher/README.md new file mode 100644 index 0000000000..58e479a9d4 --- /dev/null +++ b/vendor/github.com/rancher/go-rancher/README.md @@ -0,0 +1,55 @@ +# Go Bindings for Rancher API + +# Generating Code +First, you must have a master version of Rancher running. The best way to do this is: +```sh +docker run -p 8080:8080 -d rancher/server:master +``` + +Once Rancher is running, you can run the gen-schema.sh script: +```sh +./scripts/gen-schema.sh http://:8080 + +# The default url is http://localhost:8080, so if rancher/server is listening on localhost, you can omit the url: +./scripts/gen-schema.sh +``` + +This will add, remove, and modify go files appropriately. Submit a PR that includes *all* these changes. + +## Important caveats +1. If you are running on macOS, you must have gnu-sed installed as sed for this to work properly. +2. If you are running against cattle that is running out of an IDE and you don't have go-machine-service running (you probably don't), you'll see a number of unexpected removed or modified files like `generated_host.go` `generated_machine.go` and `generated_*config.go`. + +# Building + +```sh +godep go build ./client +``` + +# Tests + +```sh +godep go test ./client +``` +# Contact +For bugs, questions, comments, corrections, suggestions, etc., open an issue in + [rancher/rancher](//github.com/rancher/rancher/issues) with a title starting with `[go-rancher] `. + +Or just [click here](//github.com/rancher/rancher/issues/new?title=%5Bgo-rancher%5D%20) to create a new issue. + + +# License +Copyright (c) 2014-2015 [Rancher Labs, Inc.](http://rancher.com) + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + +[http://www.apache.org/licenses/LICENSE-2.0](http://www.apache.org/licenses/LICENSE-2.0) + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. + diff --git a/vendor/github.com/rancher/go-rancher/main.go b/vendor/github.com/rancher/go-rancher/main.go new file mode 100644 index 0000000000..e8274c8a1b --- /dev/null +++ b/vendor/github.com/rancher/go-rancher/main.go @@ -0,0 +1,10 @@ +package main + +import ( + "fmt" + _ "github.com/rancher/go-rancher/client" +) + +func main() { + fmt.Println("I have nothing to do...") +} diff --git a/vendor/github.com/rancher/go-rancher/trash.conf b/vendor/github.com/rancher/go-rancher/trash.conf new file mode 100644 index 0000000000..30cb3a9597 --- /dev/null +++ b/vendor/github.com/rancher/go-rancher/trash.conf @@ -0,0 +1,6 @@ +github.com/pkg/errors 1d2e60385a13aaa66134984235061c2f9302520e +github.com/gorilla/context 215affda49addc4c8ef7e2534915df2c8c35c6cd +github.com/gorilla/mux f15e0c49460fd49eebe2bcc8486b05d1bef68d3a +github.com/gorilla/websocket 1551221275a7bd42978745a376b2531f791d88f3 +github.com/Sirupsen/logrus 26709e2714106fb8ad40b773b711ebce25b78914 +gopkg.in/yaml.v2 a83829b6f1293c91addabc89d0571c246397bbf4 diff --git a/vendor/github.com/renier/xmlrpc/LICENSE b/vendor/github.com/renier/xmlrpc/LICENSE new file mode 100644 index 0000000000..8103dd1391 --- /dev/null +++ b/vendor/github.com/renier/xmlrpc/LICENSE @@ -0,0 +1,19 @@ +Copyright (C) 2012 Dmitry Maksimov + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/vendor/github.com/renier/xmlrpc/README.md b/vendor/github.com/renier/xmlrpc/README.md new file mode 100644 index 0000000000..12b7692e90 --- /dev/null +++ b/vendor/github.com/renier/xmlrpc/README.md @@ -0,0 +1,79 @@ +## Overview + +xmlrpc is an implementation of client side part of XMLRPC protocol in Go language. + +## Installation + +To install xmlrpc package run `go get github.com/kolo/xmlrpc`. To use +it in application add `"github.com/kolo/xmlrpc"` string to `import` +statement. + +## Usage + + client, _ := xmlrpc.NewClient("https://bugzilla.mozilla.org/xmlrpc.cgi", nil) + result := struct{ + Version string `xmlrpc:"version"` + }{} + client.Call("Bugzilla.version", nil, &result) + fmt.Printf("Version: %s\n", result.Version) // Version: 4.2.7+ + +Second argument of NewClient function is an object that implements +[http.RoundTripper](http://golang.org/pkg/net/http/#RoundTripper) +interface, it can be used to get more control over connection options. +By default it initialized by http.DefaultTransport object. + +### Arguments encoding + +xmlrpc package supports encoding of native Go data types to method +arguments. + +Data types encoding rules: +* int, int8, int16, int32, int64 encoded to int; +* float32, float64 encoded to double; +* bool encoded to boolean; +* string encoded to string; +* time.Time encoded to datetime.iso8601; +* xmlrpc.Base64 encoded to base64; +* slice decoded to array; + +Structs decoded to struct by following rules: +* all public field become struct members; +* field name become member name; +* if field has xmlrpc tag, its value become member name. + +Server method can accept few arguments, to handle this case there is +special approach to handle slice of empty interfaces (`[]interface{}`). +Each value of such slice encoded as separate argument. + +### Result decoding + +Result of remote function is decoded to native Go data type. + +Data types decoding rules: +* int, i4 decoded to int, int8, int16, int32, int64; +* double decoded to float32, float64; +* boolean decoded to bool; +* string decoded to string; +* array decoded to slice; +* structs decoded following the rules described in previous section; +* datetime.iso8601 decoded as time.Time data type; +* base64 decoded to string. + +## Implementation details + +xmlrpc package contains clientCodec type, that implements [rpc.ClientCodec](http://golang.org/pkg/net/rpc/#ClientCodec) +interface of [net/rpc](http://golang.org/pkg/net/rpc) package. + +xmlrpc package works over HTTP protocol, but some internal functions +and data type were made public to make it easier to create another +implementation of xmlrpc that works over another protocol. To encode +request body there is EncodeMethodCall function. To decode server +response Response data type can be used. + +## Contribution + +Feel free to fork the project, submit pull requests, ask questions. + +## Authors + +Dmitry Maksimov (dmtmax@gmail.com) diff --git a/vendor/github.com/renier/xmlrpc/client.go b/vendor/github.com/renier/xmlrpc/client.go new file mode 100644 index 0000000000..eed6936dff --- /dev/null +++ b/vendor/github.com/renier/xmlrpc/client.go @@ -0,0 +1,181 @@ +package xmlrpc + +import ( + "fmt" + "io" + "io/ioutil" + "net/http" + "net/http/cookiejar" + "net/rpc" + "net/url" + "strconv" + "sync" + "time" +) + +type Client struct { + *rpc.Client +} + +// clientCodec is rpc.ClientCodec interface implementation. +type clientCodec struct { + // url presents url of xmlrpc service + url *url.URL + + // httpClient works with HTTP protocol + httpClient *http.Client + + // cookies stores cookies received on last request + cookies http.CookieJar + + // responses presents map of active requests. It is required to return request id, that + // rpc.Client can mark them as done. + responsesMu sync.RWMutex + responses map[uint64]*http.Response + + response *Response + + // ready presents channel, that is used to link request and it`s response. + ready chan *uint64 +} + +func (codec *clientCodec) WriteRequest(request *rpc.Request, args interface{}) (err error) { + httpRequest, err := NewRequest(codec.url.String(), request.ServiceMethod, args) + if err != nil { + return err + } + + if codec.cookies != nil { + for _, cookie := range codec.cookies.Cookies(codec.url) { + httpRequest.AddCookie(cookie) + } + } + + httpResponse, err := codec.httpClient.Do(httpRequest) + if err != nil { + return err + } + + if codec.cookies != nil { + codec.cookies.SetCookies(codec.url, httpResponse.Cookies()) + } + + codec.responsesMu.Lock() + codec.responses[request.Seq] = httpResponse + codec.responsesMu.Unlock() + + codec.ready <- &request.Seq + + return nil +} + +func (codec *clientCodec) ReadResponseHeader(response *rpc.Response) error { + seq, ok := <-codec.ready + if !ok { + return io.EOF + } + + codec.responsesMu.RLock() + httpResponse := codec.responses[*seq] + codec.responsesMu.RUnlock() + + defer func() { + httpResponse.Body.Close() + codec.responsesMu.Lock() + delete(codec.responses, *seq) + codec.responsesMu.Unlock() + }() + + contentLength := httpResponse.ContentLength + if contentLength == -1 { + if ntcoentLengthHeader, ok := httpResponse.Header["Ntcoent-Length"]; ok { + ntcoentLength, err := strconv.ParseInt(ntcoentLengthHeader[0], 10, 64) + if err == nil { + contentLength = ntcoentLength + } + } + } + + var respData []byte + var err error + if contentLength != -1 { + respData = make([]byte, contentLength) + _, err = io.ReadFull(httpResponse.Body, respData) + } else { + respData, err = ioutil.ReadAll(httpResponse.Body) + } + if err != nil { + return err + } + + resp := NewResponse(respData, httpResponse.StatusCode) + + if resp.Failed() { + err := resp.Err() + response.Error = fmt.Sprintf("%v", err) + return err + } + + codec.response = resp + + response.Seq = *seq + + if httpResponse.StatusCode < 200 || httpResponse.StatusCode >= 300 { + return &XmlRpcError{HttpStatusCode: httpResponse.StatusCode} + } + + return nil +} + +func (codec *clientCodec) ReadResponseBody(v interface{}) (err error) { + if v == nil { + return nil + } + + if err = codec.response.Unmarshal(v); err != nil { + return err + } + + return nil +} + +func (codec *clientCodec) Close() error { + transport := codec.httpClient.Transport.(*http.Transport) + transport.CloseIdleConnections() + close(codec.ready) + return nil +} + +// NewClient returns instance of rpc.Client object, that is used to send request to xmlrpc service. +func NewClient(requrl string, transport http.RoundTripper, timeout time.Duration) (*Client, error) { + if transport == nil { + transport = http.DefaultTransport + } + + httpClient := &http.Client{ + Transport: transport, + Timeout: timeout, + } + + jar, err := cookiejar.New(nil) + + if err != nil { + return nil, err + } + + u, err := url.Parse(requrl) + + if err != nil { + return nil, err + } + + codec := clientCodec{ + url: u, + httpClient: httpClient, + ready: make(chan *uint64), + responses: make(map[uint64]*http.Response), + cookies: jar, + } + + return &Client{rpc.NewClientWithCodec(&codec)}, nil +} diff --git a/vendor/github.com/renier/xmlrpc/decoder.go b/vendor/github.com/renier/xmlrpc/decoder.go new file mode 100644 index 0000000000..30fa4a91ee --- /dev/null +++ b/vendor/github.com/renier/xmlrpc/decoder.go @@ -0,0 +1,547 @@ +package xmlrpc + +import ( + "bytes" + "encoding/xml" + "errors" + "fmt" + "io" + "reflect" + "regexp" + "strconv" + "strings" + "time" + + "golang.org/x/text/encoding/charmap" +) + +const ( + iso8601 = "20060102T15:04:05" + iso8601hyphen = "2006-01-02T15:04:05Z" + iso8601hyphenTZ = "2006-01-02T15:04:05-07:00" +) + +var ( + // CharsetReader is a function to generate reader which converts a non UTF-8 + // charset into UTF-8. + CharsetReader func(string, io.Reader) (io.Reader, error) + + invalidXmlError = errors.New("invalid xml") + + dateFormats = []string{iso8601, iso8601hyphen, iso8601hyphenTZ} + + topArrayRE = regexp.MustCompile(`^<\?xml version="1.0" encoding=".+"\?>\s*\s*\s*\s*`) +) + +type TypeMismatchError string + +func (e TypeMismatchError) Error() string { return string(e) } + +type decoder struct { + *xml.Decoder +} + +func unmarshal(data []byte, v interface{}) (err error) { + dec := &decoder{xml.NewDecoder(bytes.NewBuffer(data))} + + if CharsetReader != nil { + dec.CharsetReader = CharsetReader + } else { + dec.CharsetReader = defaultCharsetReader + } + + var tok xml.Token + for { + if tok, err = dec.Token(); err != nil { + return err + } + + if t, ok := tok.(xml.StartElement); ok { + if t.Name.Local == "value" { + val := reflect.ValueOf(v) + if val.Kind() != reflect.Ptr { + return errors.New("non-pointer value passed to unmarshal") + } + + val = val.Elem() + // Some APIs that normally return a collection, omit the []'s when + // the API returns a single value. + if val.Kind() == reflect.Slice && !topArrayRE.MatchString(string(data)) { + val.Set(reflect.MakeSlice(val.Type(), 1, 1)) + val = val.Index(0) + } + + if err = dec.decodeValue(val); err != nil { + return err + } + + break + } + } + } + + // read until end of document + err = dec.Skip() + if err != nil && err != io.EOF { + return err + } + + return nil +} + +func (dec *decoder) decodeValue(val reflect.Value) error { + var tok xml.Token + var err error + + if val.Kind() == reflect.Ptr { + if val.IsNil() { + val.Set(reflect.New(val.Type().Elem())) + } + val = val.Elem() + } + + var typeName string + for { + if tok, err = dec.Token(); err != nil { + return err + } + + if t, ok := tok.(xml.EndElement); ok { + if t.Name.Local == "value" { + return nil + } else { + return invalidXmlError + } + } + + if t, ok := tok.(xml.StartElement); ok { + typeName = t.Name.Local + break + } + + // Treat value data without type identifier as string + if t, ok := tok.(xml.CharData); ok { + if value := strings.TrimSpace(string(t)); value != "" { + if err = checkType(val, reflect.String); err != nil { + return err + } + + val.SetString(value) + return nil + } + } + } + + switch typeName { + case "struct": + ismap := false + pmap := val + valType := val.Type() + + if err = checkType(val, reflect.Struct); err != nil { + if checkType(val, reflect.Map) == nil { + if valType.Key().Kind() != reflect.String { + return fmt.Errorf("only maps with string key type can be unmarshalled") + } + ismap = true + } else if checkType(val, reflect.Interface) == nil && val.IsNil() { + var dummy map[string]interface{} + pmap = reflect.New(reflect.TypeOf(dummy)).Elem() + valType = pmap.Type() + ismap = true + } else { + return err + } + } + + var fields map[string]reflect.Value + + if !ismap { + fields = make(map[string]reflect.Value) + buildStructFieldMap(&fields, val) + } else { + // Create initial empty map + pmap.Set(reflect.MakeMap(valType)) + } + + // Process struct members. + StructLoop: + for { + if tok, err = dec.Token(); err != nil { + return err + } + switch t := tok.(type) { + case xml.StartElement: + if t.Name.Local != "member" { + return invalidXmlError + } + + tagName, fieldName, err := dec.readTag() + if err != nil { + return err + } + if tagName != "name" { + return invalidXmlError + } + + var fv reflect.Value + ok := true + + if !ismap { + fv, ok = fields[string(fieldName)] + } else { + fv = reflect.New(valType.Elem()) + } + + if ok { + for { + if tok, err = dec.Token(); err != nil { + return err + } + if t, ok := tok.(xml.StartElement); ok && t.Name.Local == "value" { + if err = dec.decodeValue(fv); err != nil { + return err + } + + // + if err = dec.Skip(); err != nil { + return err + } + + break + } + } + } + + // + if err = dec.Skip(); err != nil { + return err + } + + if ismap { + pmap.SetMapIndex(reflect.ValueOf(string(fieldName)), reflect.Indirect(fv)) + val.Set(pmap) + } + case xml.EndElement: + break StructLoop + } + } + case "array": + pslice := val + if checkType(val, reflect.Interface) == nil && val.IsNil() { + var dummy []interface{} + pslice = reflect.New(reflect.TypeOf(dummy)).Elem() + } else if err = checkType(val, reflect.Slice); err != nil { + // Check to see if we have an unexpected array when we expect + // a struct. Adjust by expecting an array of the struct type + // and see if things still work. + // https://github.com/renier/xmlrpc/pull/2 + if val.Kind() == reflect.Struct { + pslice = reflect.New(reflect.SliceOf(reflect.TypeOf(val.Interface()))).Elem() + val = pslice + } else { + return err + } + } + + ArrayLoop: + for { + if tok, err = dec.Token(); err != nil { + return err + } + + switch t := tok.(type) { + case xml.StartElement: + if t.Name.Local != "data" { + return invalidXmlError + } + + slice := reflect.MakeSlice(pslice.Type(), 0, 0) + + DataLoop: + for { + if tok, err = dec.Token(); err != nil { + return err + } + + switch tt := tok.(type) { + case xml.StartElement: + if tt.Name.Local != "value" { + return invalidXmlError + } + + v := reflect.New(pslice.Type().Elem()) + if err = dec.decodeValue(v); err != nil { + return err + } + + slice = reflect.Append(slice, v.Elem()) + + // + if err = dec.Skip(); err != nil { + return err + } + case xml.EndElement: + pslice.Set(slice) + val.Set(pslice) + break DataLoop + } + } + case xml.EndElement: + break ArrayLoop + } + } + default: + if tok, err = dec.Token(); err != nil { + return err + } + + var data []byte + + switch t := tok.(type) { + case xml.EndElement: + return nil + case xml.CharData: + data = []byte(t.Copy()) + default: + return invalidXmlError + } + + ParseValue: + switch typeName { + case "int", "i4", "i8": + if checkType(val, reflect.Interface) == nil && val.IsNil() { + i, err := strconv.ParseInt(string(data), 10, 64) + if err != nil { + return err + } + + pi := reflect.New(reflect.TypeOf(i)).Elem() + pi.SetInt(i) + val.Set(pi) + } else if err = checkType(val, reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64); err != nil { + return err + } else { + k := val.Kind() + isInt := k == reflect.Int || k == reflect.Int8 || k == reflect.Int16 || k == reflect.Int32 || k == reflect.Int64 + + if isInt { + i, err := strconv.ParseInt(string(data), 10, val.Type().Bits()) + if err != nil { + return err + } + + val.SetInt(i) + } else { + i, err := strconv.ParseUint(string(data), 10, val.Type().Bits()) + if err != nil { + return err + } + + val.SetUint(i) + } + } + case "string", "base64": + str := string(data) + if checkType(val, reflect.Interface) == nil && val.IsNil() { + pstr := reflect.New(reflect.TypeOf(str)).Elem() + pstr.SetString(str) + val.Set(pstr) + } else if err = checkType(val, reflect.String); err != nil { + valName := val.Type().Name() + if valName == "" { + valName = reflect.Indirect(val).Type().Name() + } + + if valName == "Time" { + timeField := val.FieldByName(valName) + if timeField.IsValid() { + val = timeField + } + typeName = "dateTime.iso8601" + goto ParseValue + } else if strings.HasPrefix(strings.ToLower(valName), "float") { + typeName = "double" + goto ParseValue + } + return err + } else { + val.SetString(str) + } + case "dateTime.iso8601": + err = nil + var t time.Time + for _, df := range dateFormats { + t, err = time.Parse(df, string(data)) + if err == nil { + break + } + } + if err != nil { + return err + } + + if checkType(val, reflect.Interface) == nil && val.IsNil() { + ptime := reflect.New(reflect.TypeOf(t)).Elem() + ptime.Set(reflect.ValueOf(t)) + val.Set(ptime) + } else if !reflect.TypeOf((time.Time)(t)).ConvertibleTo(val.Type()) { + return TypeMismatchError( + fmt.Sprintf( + "error: type mismatch error - can't decode %v (%s.%s) to time", + val.Kind(), + val.Type().PkgPath(), + val.Type().Name(), + ), + ) + } else { + val.Set(reflect.ValueOf(t).Convert(val.Type())) + } + case "boolean": + v, err := strconv.ParseBool(string(data)) + if err != nil { + return err + } + + if checkType(val, reflect.Interface) == nil && val.IsNil() { + pv := reflect.New(reflect.TypeOf(v)).Elem() + pv.SetBool(v) + val.Set(pv) + } else if err = checkType(val, reflect.Bool); err != nil { + return err + } else { + val.SetBool(v) + } + case "double": + if checkType(val, reflect.Interface) == nil && val.IsNil() { + i, err := strconv.ParseFloat(string(data), 64) + if err != nil { + return err + } + + pdouble := reflect.New(reflect.TypeOf(i)).Elem() + pdouble.SetFloat(i) + val.Set(pdouble) + } else if err = checkType(val, reflect.Float32, reflect.Float64); err != nil { + return err + } else { + i, err := strconv.ParseFloat(string(data), val.Type().Bits()) + if err != nil { + return err + } + + val.SetFloat(i) + } + default: + return errors.New("unsupported type") + } + + // + if err = dec.Skip(); err != nil { + return err + } + } + + return nil +} + +func (dec *decoder) readTag() (string, []byte, error) { + var tok xml.Token + var err error + + var name string + for { + if tok, err = dec.Token(); err != nil { + return "", nil, err + } + + if t, ok := tok.(xml.StartElement); ok { + name = t.Name.Local + break + } + } + + value, err := dec.readCharData() + if err != nil { + return "", nil, err + } + + return name, value, dec.Skip() +} + +func (dec *decoder) readCharData() ([]byte, error) { + var tok xml.Token + var err error + + if tok, err = dec.Token(); err != nil { + return nil, err + } + + if t, ok := tok.(xml.CharData); ok { + return []byte(t.Copy()), nil + } else { + return nil, invalidXmlError + } +} + +func checkType(val reflect.Value, kinds ...reflect.Kind) error { + if len(kinds) == 0 { + return nil + } + + if val.Kind() == reflect.Ptr { + val = val.Elem() + } + + match := false + + for _, kind := range kinds { + if val.Kind() == kind { + match = true + break + } + } + + if !match { + return TypeMismatchError(fmt.Sprintf("error: type mismatch - can't unmarshal %v to %v", + val.Kind(), kinds[0])) + } + + return nil +} + +func buildStructFieldMap(fieldMap *map[string]reflect.Value, val reflect.Value) { + valType := val.Type() + valFieldNum := valType.NumField() + for i := 0; i < valFieldNum; i++ { + field := valType.Field(i) + fieldVal := val.FieldByName(field.Name) + + if field.Anonymous { + // Drill down into embedded structs + buildStructFieldMap(fieldMap, fieldVal) + continue + } + + if fieldVal.CanSet() { + if fn := field.Tag.Get("xmlrpc"); fn != "" { + fn = strings.Split(fn, ",")[0] + (*fieldMap)[fn] = fieldVal + } else { + (*fieldMap)[field.Name] = fieldVal + } + } + } +} + +// http://stackoverflow.com/a/34712322/3160958 +// https://groups.google.com/forum/#!topic/golang-nuts/VudK_05B62k +func defaultCharsetReader(charset string, input io.Reader) (io.Reader, error) { + if charset == "iso-8859-1" || charset == "ISO-8859-1" { + return charmap.ISO8859_1.NewDecoder().Reader(input), nil + } else if strings.HasPrefix(charset, "utf") || strings.HasPrefix(charset, "UTF") { + return input, nil + } + + return nil, fmt.Errorf("Unknown charset: %s", charset) +} diff --git a/vendor/github.com/renier/xmlrpc/encoder.go b/vendor/github.com/renier/xmlrpc/encoder.go new file mode 100644 index 0000000000..2410aff8e3 --- /dev/null +++ b/vendor/github.com/renier/xmlrpc/encoder.go @@ -0,0 +1,204 @@ +package xmlrpc + +import ( + "bytes" + "encoding/xml" + "fmt" + "reflect" + "strconv" + "strings" + "time" +) + +type encodeFunc func(reflect.Value) ([]byte, error) + +func marshal(v interface{}) ([]byte, error) { + if v == nil { + return []byte{}, nil + } + + val := reflect.ValueOf(v) + return encodeValue(val) +} + +func encodeValue(val reflect.Value) ([]byte, error) { + var b []byte + var err error + + if val.Kind() == reflect.Ptr || val.Kind() == reflect.Interface { + if val.IsNil() { + return []byte(""), nil + } + + val = val.Elem() + } + + switch val.Kind() { + case reflect.Struct: + switch val.Interface().(type) { + case time.Time: + t := val.Interface().(time.Time) + b = []byte(fmt.Sprintf("%s", t.Format(iso8601))) + default: + b, err = encodeStruct(val) + } + case reflect.Map: + b, err = encodeMap(val) + case reflect.Slice: + b, err = encodeSlice(val) + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + b = []byte(fmt.Sprintf("%s", strconv.FormatInt(val.Int(), 10))) + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: + b = []byte(fmt.Sprintf("%s", strconv.FormatUint(val.Uint(), 10))) + case reflect.Float32, reflect.Float64: + b = []byte(fmt.Sprintf("%s", + strconv.FormatFloat(val.Float(), 'g', -1, val.Type().Bits()))) + case reflect.Bool: + if val.Bool() { + b = []byte("1") + } else { + b = []byte("0") + } + case reflect.String: + var buf bytes.Buffer + + xml.Escape(&buf, []byte(val.String())) + + if _, ok := val.Interface().(Base64); ok { + b = []byte(fmt.Sprintf("%s", buf.String())) + } else { + b = []byte(fmt.Sprintf("%s", buf.String())) + } + default: + return nil, fmt.Errorf("xmlrpc encode error: unsupported type") + } + + if err != nil { + return nil, err + } + + return []byte(fmt.Sprintf("%s", string(b))), nil +} + +func encodeStruct(value reflect.Value) ([]byte, error) { + var b bytes.Buffer + + b.WriteString("") + + vals := []reflect.Value{value} + for j := 0; j < len(vals); j++ { + val := vals[j] + t := val.Type() + for i := 0; i < t.NumField(); i++ { + f := t.Field(i) + tag := f.Tag.Get("xmlrpc") + name := f.Name + fieldVal := val.FieldByName(f.Name) + fieldValKind := fieldVal.Kind() + + // Omit unexported fields + if !fieldVal.CanInterface() { + continue + } + + // Omit fields who are structs that contain no fields themselves + if fieldValKind == reflect.Struct && fieldVal.NumField() == 0 { + continue + } + + // Omit empty slices + if fieldValKind == reflect.Slice && fieldVal.Len() == 0 { + continue + } + + // Omit empty fields (defined as nil pointers) + if tag != "" { + parts := strings.Split(tag, ",") + name = parts[0] + if len(parts) > 1 && parts[1] == "omitempty" { + if fieldValKind == reflect.Ptr && fieldVal.IsNil() { + continue + } + } + } + + // Drill down into anonymous/embedded structs and do not expose the + // containing embedded struct in request. + // This will effectively pull up fields in embedded structs to look + // as part of the original struct in the request. + if f.Anonymous { + vals = append(vals, fieldVal) + continue + } + + b.WriteString("") + b.WriteString(fmt.Sprintf("%s", name)) + + p, err := encodeValue(fieldVal) + if err != nil { + return nil, err + } + b.Write(p) + + b.WriteString("") + } + } + + b.WriteString("") + + return b.Bytes(), nil +} + +func encodeMap(val reflect.Value) ([]byte, error) { + var t = val.Type() + + if t.Key().Kind() != reflect.String { + return nil, fmt.Errorf("xmlrpc encode error: only maps with string keys are supported") + } + + var b bytes.Buffer + + b.WriteString("") + + keys := val.MapKeys() + + for i := 0; i < val.Len(); i++ { + key := keys[i] + kval := val.MapIndex(key) + + b.WriteString("") + b.WriteString(fmt.Sprintf("%s", key.String())) + + p, err := encodeValue(kval) + + if err != nil { + return nil, err + } + + b.Write(p) + b.WriteString("") + } + + b.WriteString("") + + return b.Bytes(), nil +} + +func encodeSlice(val reflect.Value) ([]byte, error) { + var b bytes.Buffer + + b.WriteString("") + + for i := 0; i < val.Len(); i++ { + p, err := encodeValue(val.Index(i)) + if err != nil { + return nil, err + } + + b.Write(p) + } + + b.WriteString("") + + return b.Bytes(), nil +} diff --git a/vendor/github.com/renier/xmlrpc/request.go b/vendor/github.com/renier/xmlrpc/request.go new file mode 100644 index 0000000000..acb8251b2b --- /dev/null +++ b/vendor/github.com/renier/xmlrpc/request.go @@ -0,0 +1,57 @@ +package xmlrpc + +import ( + "bytes" + "fmt" + "net/http" +) + +func NewRequest(url string, method string, args interface{}) (*http.Request, error) { + var t []interface{} + var ok bool + if t, ok = args.([]interface{}); !ok { + if args != nil { + t = []interface{}{args} + } + } + + body, err := EncodeMethodCall(method, t...) + if err != nil { + return nil, err + } + + request, err := http.NewRequest("POST", url, bytes.NewReader(body)) + if err != nil { + return nil, err + } + + request.Header.Set("Content-Type", "text/xml") + request.Header.Set("Content-Length", fmt.Sprintf("%d", len(body))) + + return request, nil +} + +func EncodeMethodCall(method string, args ...interface{}) ([]byte, error) { + var b bytes.Buffer + b.WriteString(``) + b.WriteString(fmt.Sprintf("%s", method)) + + if args != nil { + b.WriteString("") + + for _, arg := range args { + p, err := marshal(arg) + if err != nil { + return nil, err + } + + b.WriteString(fmt.Sprintf("%s", string(p))) + } + + b.WriteString("") + } + + b.WriteString("") + + return b.Bytes(), nil +} diff --git a/vendor/github.com/renier/xmlrpc/response.go b/vendor/github.com/renier/xmlrpc/response.go new file mode 100644 index 0000000000..e9166b616a --- /dev/null +++ b/vendor/github.com/renier/xmlrpc/response.go @@ -0,0 +1,57 @@ +package xmlrpc + +import ( + "regexp" +) + +var ( + faultRx = regexp.MustCompile(`(\s|\S)+`) +) + +type failedResponse struct { + Code string `xmlrpc:"faultCode"` + Error string `xmlrpc:"faultString"` + HttpStatusCode int +} + +func (r *failedResponse) err() error { + return &XmlRpcError{ + Code: r.Code, + Err: r.Error, + HttpStatusCode: r.HttpStatusCode, + } +} + +type Response struct { + data []byte + httpStatusCode int +} + +func NewResponse(data []byte, httpStatusCode int) *Response { + return &Response{ + data: data, + httpStatusCode: httpStatusCode, + } +} + +func (r *Response) Failed() bool { + return faultRx.Match(r.data) +} + +func (r *Response) Err() error { + failedResp := new(failedResponse) + if err := unmarshal(r.data, failedResp); err != nil { + return err + } + failedResp.HttpStatusCode = r.httpStatusCode + + return failedResp.err() +} + +func (r *Response) Unmarshal(v interface{}) error { + if err := unmarshal(r.data, v); err != nil { + return err + } + + return nil +} diff --git a/vendor/github.com/renier/xmlrpc/test_server.rb b/vendor/github.com/renier/xmlrpc/test_server.rb new file mode 100644 index 0000000000..1b1ff8760f --- /dev/null +++ b/vendor/github.com/renier/xmlrpc/test_server.rb @@ -0,0 +1,25 @@ +# encoding: utf-8 + +require "xmlrpc/server" + +class Service + def time + Time.now + end + + def upcase(s) + s.upcase + end + + def sum(x, y) + x + y + end + + def error + raise XMLRPC::FaultException.new(500, "Server error") + end +end + +server = XMLRPC::Server.new 5001, 'localhost' +server.add_handler "service", Service.new +server.serve diff --git a/vendor/github.com/renier/xmlrpc/xmlrpc.go b/vendor/github.com/renier/xmlrpc/xmlrpc.go new file mode 100644 index 0000000000..068228254e --- /dev/null +++ b/vendor/github.com/renier/xmlrpc/xmlrpc.go @@ -0,0 +1,28 @@ +package xmlrpc + +import ( + "fmt" +) + +// xmlrpcError represents errors returned on xmlrpc request. +type XmlRpcError struct { + Code string + Err string + HttpStatusCode int +} + +// Error() method implements Error interface +func (e *XmlRpcError) Error() string { + return fmt.Sprintf( + "error: %s, code: %s, http status code: %d", + e.Err, e.Code, e.HttpStatusCode) +} + +// Base64 represents value in base64 encoding +type Base64 string + +type Params struct { + Params []interface{} +} + +type Struct map[string]interface{} diff --git a/vendor/github.com/satori/go.uuid/LICENSE b/vendor/github.com/satori/go.uuid/LICENSE new file mode 100644 index 0000000000..488357b8af --- /dev/null +++ b/vendor/github.com/satori/go.uuid/LICENSE @@ -0,0 +1,20 @@ +Copyright (C) 2013-2016 by Maxim Bublis + +Permission is hereby granted, free of charge, to any person obtaining +a copy of this software and associated documentation files (the +"Software"), to deal in the Software without restriction, including +without limitation the rights to use, copy, modify, merge, publish, +distribute, sublicense, and/or sell copies of the Software, and to +permit persons to whom the Software is furnished to do so, subject to +the following conditions: + +The above copyright notice and this permission notice shall be +included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE +LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION +WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/satori/go.uuid/README.md b/vendor/github.com/satori/go.uuid/README.md new file mode 100644 index 0000000000..b6aad1c813 --- /dev/null +++ b/vendor/github.com/satori/go.uuid/README.md @@ -0,0 +1,65 @@ +# UUID package for Go language + +[![Build Status](https://travis-ci.org/satori/go.uuid.png?branch=master)](https://travis-ci.org/satori/go.uuid) +[![Coverage Status](https://coveralls.io/repos/github/satori/go.uuid/badge.svg?branch=master)](https://coveralls.io/github/satori/go.uuid) +[![GoDoc](http://godoc.org/github.com/satori/go.uuid?status.png)](http://godoc.org/github.com/satori/go.uuid) + +This package provides pure Go implementation of Universally Unique Identifier (UUID). Supported both creation and parsing of UUIDs. + +With 100% test coverage and benchmarks out of box. + +Supported versions: +* Version 1, based on timestamp and MAC address (RFC 4122) +* Version 2, based on timestamp, MAC address and POSIX UID/GID (DCE 1.1) +* Version 3, based on MD5 hashing (RFC 4122) +* Version 4, based on random numbers (RFC 4122) +* Version 5, based on SHA-1 hashing (RFC 4122) + +## Installation + +Use the `go` command: + + $ go get github.com/satori/go.uuid + +## Requirements + +UUID package requires Go >= 1.2. + +## Example + +```go +package main + +import ( + "fmt" + "github.com/satori/go.uuid" +) + +func main() { + // Creating UUID Version 4 + u1 := uuid.NewV4() + fmt.Printf("UUIDv4: %s\n", u1) + + // Parsing UUID from string input + u2, err := uuid.FromString("6ba7b810-9dad-11d1-80b4-00c04fd430c8") + if err != nil { + fmt.Printf("Something gone wrong: %s", err) + } + fmt.Printf("Successfully parsed: %s", u2) +} +``` + +## Documentation + +[Documentation](http://godoc.org/github.com/satori/go.uuid) is hosted at GoDoc project. + +## Links +* [RFC 4122](http://tools.ietf.org/html/rfc4122) +* [DCE 1.1: Authentication and Security Services](http://pubs.opengroup.org/onlinepubs/9696989899/chap5.htm#tagcjh_08_02_01_01) + +## Copyright + +Copyright (C) 2013-2016 by Maxim Bublis . + +UUID package released under MIT License. +See [LICENSE](https://github.com/satori/go.uuid/blob/master/LICENSE) for details. diff --git a/vendor/github.com/satori/go.uuid/uuid.go b/vendor/github.com/satori/go.uuid/uuid.go new file mode 100644 index 0000000000..295f3fc2c5 --- /dev/null +++ b/vendor/github.com/satori/go.uuid/uuid.go @@ -0,0 +1,481 @@ +// Copyright (C) 2013-2015 by Maxim Bublis +// +// Permission is hereby granted, free of charge, to any person obtaining +// a copy of this software and associated documentation files (the +// "Software"), to deal in the Software without restriction, including +// without limitation the rights to use, copy, modify, merge, publish, +// distribute, sublicense, and/or sell copies of the Software, and to +// permit persons to whom the Software is furnished to do so, subject to +// the following conditions: +// +// The above copyright notice and this permission notice shall be +// included in all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +// EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +// NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE +// LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +// OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION +// WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + +// Package uuid provides implementation of Universally Unique Identifier (UUID). +// Supported versions are 1, 3, 4 and 5 (as specified in RFC 4122) and +// version 2 (as specified in DCE 1.1). +package uuid + +import ( + "bytes" + "crypto/md5" + "crypto/rand" + "crypto/sha1" + "database/sql/driver" + "encoding/binary" + "encoding/hex" + "fmt" + "hash" + "net" + "os" + "sync" + "time" +) + +// UUID layout variants. +const ( + VariantNCS = iota + VariantRFC4122 + VariantMicrosoft + VariantFuture +) + +// UUID DCE domains. +const ( + DomainPerson = iota + DomainGroup + DomainOrg +) + +// Difference in 100-nanosecond intervals between +// UUID epoch (October 15, 1582) and Unix epoch (January 1, 1970). +const epochStart = 122192928000000000 + +// Used in string method conversion +const dash byte = '-' + +// UUID v1/v2 storage. +var ( + storageMutex sync.Mutex + storageOnce sync.Once + epochFunc = unixTimeFunc + clockSequence uint16 + lastTime uint64 + hardwareAddr [6]byte + posixUID = uint32(os.Getuid()) + posixGID = uint32(os.Getgid()) +) + +// String parse helpers. +var ( + urnPrefix = []byte("urn:uuid:") + byteGroups = []int{8, 4, 4, 4, 12} +) + +func initClockSequence() { + buf := make([]byte, 2) + safeRandom(buf) + clockSequence = binary.BigEndian.Uint16(buf) +} + +func initHardwareAddr() { + interfaces, err := net.Interfaces() + if err == nil { + for _, iface := range interfaces { + if len(iface.HardwareAddr) >= 6 { + copy(hardwareAddr[:], iface.HardwareAddr) + return + } + } + } + + // Initialize hardwareAddr randomly in case + // of real network interfaces absence + safeRandom(hardwareAddr[:]) + + // Set multicast bit as recommended in RFC 4122 + hardwareAddr[0] |= 0x01 +} + +func initStorage() { + initClockSequence() + initHardwareAddr() +} + +func safeRandom(dest []byte) { + if _, err := rand.Read(dest); err != nil { + panic(err) + } +} + +// Returns difference in 100-nanosecond intervals between +// UUID epoch (October 15, 1582) and current time. +// This is default epoch calculation function. +func unixTimeFunc() uint64 { + return epochStart + uint64(time.Now().UnixNano()/100) +} + +// UUID representation compliant with specification +// described in RFC 4122. +type UUID [16]byte + +// NullUUID can be used with the standard sql package to represent a +// UUID value that can be NULL in the database +type NullUUID struct { + UUID UUID + Valid bool +} + +// The nil UUID is special form of UUID that is specified to have all +// 128 bits set to zero. +var Nil = UUID{} + +// Predefined namespace UUIDs. +var ( + NamespaceDNS, _ = FromString("6ba7b810-9dad-11d1-80b4-00c04fd430c8") + NamespaceURL, _ = FromString("6ba7b811-9dad-11d1-80b4-00c04fd430c8") + NamespaceOID, _ = FromString("6ba7b812-9dad-11d1-80b4-00c04fd430c8") + NamespaceX500, _ = FromString("6ba7b814-9dad-11d1-80b4-00c04fd430c8") +) + +// And returns result of binary AND of two UUIDs. +func And(u1 UUID, u2 UUID) UUID { + u := UUID{} + for i := 0; i < 16; i++ { + u[i] = u1[i] & u2[i] + } + return u +} + +// Or returns result of binary OR of two UUIDs. +func Or(u1 UUID, u2 UUID) UUID { + u := UUID{} + for i := 0; i < 16; i++ { + u[i] = u1[i] | u2[i] + } + return u +} + +// Equal returns true if u1 and u2 equals, otherwise returns false. +func Equal(u1 UUID, u2 UUID) bool { + return bytes.Equal(u1[:], u2[:]) +} + +// Version returns algorithm version used to generate UUID. +func (u UUID) Version() uint { + return uint(u[6] >> 4) +} + +// Variant returns UUID layout variant. +func (u UUID) Variant() uint { + switch { + case (u[8] & 0x80) == 0x00: + return VariantNCS + case (u[8]&0xc0)|0x80 == 0x80: + return VariantRFC4122 + case (u[8]&0xe0)|0xc0 == 0xc0: + return VariantMicrosoft + } + return VariantFuture +} + +// Bytes returns bytes slice representation of UUID. +func (u UUID) Bytes() []byte { + return u[:] +} + +// Returns canonical string representation of UUID: +// xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx. +func (u UUID) String() string { + buf := make([]byte, 36) + + hex.Encode(buf[0:8], u[0:4]) + buf[8] = dash + hex.Encode(buf[9:13], u[4:6]) + buf[13] = dash + hex.Encode(buf[14:18], u[6:8]) + buf[18] = dash + hex.Encode(buf[19:23], u[8:10]) + buf[23] = dash + hex.Encode(buf[24:], u[10:]) + + return string(buf) +} + +// SetVersion sets version bits. +func (u *UUID) SetVersion(v byte) { + u[6] = (u[6] & 0x0f) | (v << 4) +} + +// SetVariant sets variant bits as described in RFC 4122. +func (u *UUID) SetVariant() { + u[8] = (u[8] & 0xbf) | 0x80 +} + +// MarshalText implements the encoding.TextMarshaler interface. +// The encoding is the same as returned by String. +func (u UUID) MarshalText() (text []byte, err error) { + text = []byte(u.String()) + return +} + +// UnmarshalText implements the encoding.TextUnmarshaler interface. +// Following formats are supported: +// "6ba7b810-9dad-11d1-80b4-00c04fd430c8", +// "{6ba7b810-9dad-11d1-80b4-00c04fd430c8}", +// "urn:uuid:6ba7b810-9dad-11d1-80b4-00c04fd430c8" +func (u *UUID) UnmarshalText(text []byte) (err error) { + if len(text) < 32 { + err = fmt.Errorf("uuid: UUID string too short: %s", text) + return + } + + t := text[:] + braced := false + + if bytes.Equal(t[:9], urnPrefix) { + t = t[9:] + } else if t[0] == '{' { + braced = true + t = t[1:] + } + + b := u[:] + + for i, byteGroup := range byteGroups { + if i > 0 { + if t[0] != '-' { + err = fmt.Errorf("uuid: invalid string format") + return + } + t = t[1:] + } + + if len(t) < byteGroup { + err = fmt.Errorf("uuid: UUID string too short: %s", text) + return + } + + if i == 4 && len(t) > byteGroup && + ((braced && t[byteGroup] != '}') || len(t[byteGroup:]) > 1 || !braced) { + err = fmt.Errorf("uuid: UUID string too long: %s", text) + return + } + + _, err = hex.Decode(b[:byteGroup/2], t[:byteGroup]) + if err != nil { + return + } + + t = t[byteGroup:] + b = b[byteGroup/2:] + } + + return +} + +// MarshalBinary implements the encoding.BinaryMarshaler interface. +func (u UUID) MarshalBinary() (data []byte, err error) { + data = u.Bytes() + return +} + +// UnmarshalBinary implements the encoding.BinaryUnmarshaler interface. +// It will return error if the slice isn't 16 bytes long. +func (u *UUID) UnmarshalBinary(data []byte) (err error) { + if len(data) != 16 { + err = fmt.Errorf("uuid: UUID must be exactly 16 bytes long, got %d bytes", len(data)) + return + } + copy(u[:], data) + + return +} + +// Value implements the driver.Valuer interface. +func (u UUID) Value() (driver.Value, error) { + return u.String(), nil +} + +// Scan implements the sql.Scanner interface. +// A 16-byte slice is handled by UnmarshalBinary, while +// a longer byte slice or a string is handled by UnmarshalText. +func (u *UUID) Scan(src interface{}) error { + switch src := src.(type) { + case []byte: + if len(src) == 16 { + return u.UnmarshalBinary(src) + } + return u.UnmarshalText(src) + + case string: + return u.UnmarshalText([]byte(src)) + } + + return fmt.Errorf("uuid: cannot convert %T to UUID", src) +} + +// Value implements the driver.Valuer interface. +func (u NullUUID) Value() (driver.Value, error) { + if !u.Valid { + return nil, nil + } + // Delegate to UUID Value function + return u.UUID.Value() +} + +// Scan implements the sql.Scanner interface. +func (u *NullUUID) Scan(src interface{}) error { + if src == nil { + u.UUID, u.Valid = Nil, false + return nil + } + + // Delegate to UUID Scan function + u.Valid = true + return u.UUID.Scan(src) +} + +// FromBytes returns UUID converted from raw byte slice input. +// It will return error if the slice isn't 16 bytes long. +func FromBytes(input []byte) (u UUID, err error) { + err = u.UnmarshalBinary(input) + return +} + +// FromBytesOrNil returns UUID converted from raw byte slice input. +// Same behavior as FromBytes, but returns a Nil UUID on error. +func FromBytesOrNil(input []byte) UUID { + uuid, err := FromBytes(input) + if err != nil { + return Nil + } + return uuid +} + +// FromString returns UUID parsed from string input. +// Input is expected in a form accepted by UnmarshalText. +func FromString(input string) (u UUID, err error) { + err = u.UnmarshalText([]byte(input)) + return +} + +// FromStringOrNil returns UUID parsed from string input. +// Same behavior as FromString, but returns a Nil UUID on error. +func FromStringOrNil(input string) UUID { + uuid, err := FromString(input) + if err != nil { + return Nil + } + return uuid +} + +// Returns UUID v1/v2 storage state. +// Returns epoch timestamp, clock sequence, and hardware address. +func getStorage() (uint64, uint16, []byte) { + storageOnce.Do(initStorage) + + storageMutex.Lock() + defer storageMutex.Unlock() + + timeNow := epochFunc() + // Clock changed backwards since last UUID generation. + // Should increase clock sequence. + if timeNow <= lastTime { + clockSequence++ + } + lastTime = timeNow + + return timeNow, clockSequence, hardwareAddr[:] +} + +// NewV1 returns UUID based on current timestamp and MAC address. +func NewV1() UUID { + u := UUID{} + + timeNow, clockSeq, hardwareAddr := getStorage() + + binary.BigEndian.PutUint32(u[0:], uint32(timeNow)) + binary.BigEndian.PutUint16(u[4:], uint16(timeNow>>32)) + binary.BigEndian.PutUint16(u[6:], uint16(timeNow>>48)) + binary.BigEndian.PutUint16(u[8:], clockSeq) + + copy(u[10:], hardwareAddr) + + u.SetVersion(1) + u.SetVariant() + + return u +} + +// NewV2 returns DCE Security UUID based on POSIX UID/GID. +func NewV2(domain byte) UUID { + u := UUID{} + + timeNow, clockSeq, hardwareAddr := getStorage() + + switch domain { + case DomainPerson: + binary.BigEndian.PutUint32(u[0:], posixUID) + case DomainGroup: + binary.BigEndian.PutUint32(u[0:], posixGID) + } + + binary.BigEndian.PutUint16(u[4:], uint16(timeNow>>32)) + binary.BigEndian.PutUint16(u[6:], uint16(timeNow>>48)) + binary.BigEndian.PutUint16(u[8:], clockSeq) + u[9] = domain + + copy(u[10:], hardwareAddr) + + u.SetVersion(2) + u.SetVariant() + + return u +} + +// NewV3 returns UUID based on MD5 hash of namespace UUID and name. +func NewV3(ns UUID, name string) UUID { + u := newFromHash(md5.New(), ns, name) + u.SetVersion(3) + u.SetVariant() + + return u +} + +// NewV4 returns random generated UUID. +func NewV4() UUID { + u := UUID{} + safeRandom(u[:]) + u.SetVersion(4) + u.SetVariant() + + return u +} + +// NewV5 returns UUID based on SHA-1 hash of namespace UUID and name. +func NewV5(ns UUID, name string) UUID { + u := newFromHash(sha1.New(), ns, name) + u.SetVersion(5) + u.SetVariant() + + return u +} + +// Returns UUID based on hashing of namespace UUID and name. +func newFromHash(h hash.Hash, ns UUID, name string) UUID { + u := UUID{} + h.Write(ns[:]) + h.Write([]byte(name)) + copy(u[:], h.Sum(nil)) + + return u +} diff --git a/vendor/github.com/softlayer/softlayer-go/LICENSE b/vendor/github.com/softlayer/softlayer-go/LICENSE new file mode 100644 index 0000000000..d645695673 --- /dev/null +++ b/vendor/github.com/softlayer/softlayer-go/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/softlayer/softlayer-go/config/config.go b/vendor/github.com/softlayer/softlayer-go/config/config.go new file mode 100644 index 0000000000..36d5348e9d --- /dev/null +++ b/vendor/github.com/softlayer/softlayer-go/config/config.go @@ -0,0 +1,145 @@ +/** + * // This file is borrowed from https://github.com/vaughan0/go-ini/blob/master/ini.go + * // which is distributed under the MIT license (https://github.com/vaughan0/go-ini/blob/master/LICENSE). + * + * Copyright (c) 2013 Vaughan Newton + * + * Permission is hereby granted, free of charge, to any person obtaining a copy of this software and + * associated documentation files (the "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the + * following conditions: + * + * The above copyright notice and this permission notice shall be included in all copies or substantial + * portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT + * LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. + * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE + * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + */ + +// Package config provides functions for parsing INI configuration files. +package config + +import ( + "bufio" + "fmt" + "io" + "os" + "regexp" + "strings" +) + +var ( + sectionRegex = regexp.MustCompile(`^\[(.*)\]$`) + assignRegex = regexp.MustCompile(`^([^=]+)=(.*)$`) +) + +// ErrSyntax is returned when there is a syntax error in an INI file. +type ErrSyntax struct { + Line int + Source string // The contents of the erroneous line, without leading or trailing whitespace +} + +func (e ErrSyntax) Error() string { + return fmt.Sprintf("invalid INI syntax on line %d: %s", e.Line, e.Source) +} + +// A File represents a parsed INI file. +type File map[string]Section + +// A Section represents a single section of an INI file. +type Section map[string]string + +// Returns a named Section. A Section will be created if one does not already exist for the given name. +func (f File) Section(name string) Section { + section := f[name] + if section == nil { + section = make(Section) + f[name] = section + } + return section +} + +// Looks up a value for a key in a section and returns that value, along with a boolean result similar to a map lookup. +func (f File) Get(section, key string) (value string, ok bool) { + if s := f[section]; s != nil { + value, ok = s[key] + } + return +} + +// Loads INI data from a reader and stores the data in the File. +func (f File) Load(in io.Reader) error { + bufin, ok := in.(*bufio.Reader) + if !ok { + bufin = bufio.NewReader(in) + } + return parseFile(bufin, f) +} + +// Loads INI data from a named file and stores the data in the File. +func (f File) LoadFile(file string) (err error) { + in, err := os.Open(file) + if err != nil { + return + } + defer in.Close() + return f.Load(in) +} + +func parseFile(in *bufio.Reader, file File) (err error) { + section := "" + lineNum := 0 + for done := false; !done; { + var line string + if line, err = in.ReadString('\n'); err != nil { + if err == io.EOF { + done = true + } else { + return + } + } + lineNum++ + line = strings.TrimSpace(line) + if len(line) == 0 { + // Skip blank lines + continue + } + if line[0] == ';' || line[0] == '#' { + // Skip comments + continue + } + + if groups := assignRegex.FindStringSubmatch(line); groups != nil { + key, val := groups[1], groups[2] + key, val = strings.TrimSpace(key), strings.TrimSpace(val) + file.Section(section)[key] = val + } else if groups := sectionRegex.FindStringSubmatch(line); groups != nil { + name := strings.TrimSpace(groups[1]) + section = name + // Create the section if it does not exist + file.Section(section) + } else { + return ErrSyntax{Line: lineNum, Source: line} + } + + } + return nil +} + +// Loads and returns a File from a reader. +func Load(in io.Reader) (File, error) { + file := make(File) + err := file.Load(in) + return file, err +} + +// Loads and returns an INI File from a file on disk. +func LoadFile(filename string) (File, error) { + file := make(File) + err := file.LoadFile(filename) + return file, err +} diff --git a/vendor/github.com/softlayer/softlayer-go/datatypes/abuse.go b/vendor/github.com/softlayer/softlayer-go/datatypes/abuse.go new file mode 100644 index 0000000000..8cc82eb325 --- /dev/null +++ b/vendor/github.com/softlayer/softlayer-go/datatypes/abuse.go @@ -0,0 +1,32 @@ +/** + * Copyright 2016 IBM Corp. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** + * AUTOMATICALLY GENERATED CODE - DO NOT MODIFY + */ + +package datatypes + +// no documentation yet +type Abuse_Lockdown_Resource struct { + Entity + + // no documentation yet + Account *Account `json:"account,omitempty" xmlrpc:"account,omitempty"` + + // no documentation yet + InvoiceItem *Billing_Invoice_Item `json:"invoiceItem,omitempty" xmlrpc:"invoiceItem,omitempty"` +} diff --git a/vendor/github.com/softlayer/softlayer-go/datatypes/account.go b/vendor/github.com/softlayer/softlayer-go/datatypes/account.go new file mode 100644 index 0000000000..8dfecb55c7 --- /dev/null +++ b/vendor/github.com/softlayer/softlayer-go/datatypes/account.go @@ -0,0 +1,2785 @@ +/** + * Copyright 2016 IBM Corp. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** + * AUTOMATICALLY GENERATED CODE - DO NOT MODIFY + */ + +package datatypes + +// The SoftLayer_Account data type contains general information relating to a single SoftLayer customer account. Personal information in this type such as names, addresses, and phone numbers are assigned to the account only and not to users belonging to the account. The SoftLayer_Account data type contains a number of relational properties that are used by the SoftLayer customer portal to quickly present a variety of account related services to it's users. +// +// SoftLayer customers are unable to change their company account information in the portal or the API. If you need to change this information please open a sales ticket in our customer portal and our account management staff will assist you. +type Account struct { + Entity + + // An email address that is responsible for abuse and legal inquiries on behalf of an account. For instance, new legal and abuse tickets are sent to this address. + AbuseEmail *string `json:"abuseEmail,omitempty" xmlrpc:"abuseEmail,omitempty"` + + // A count of email addresses that are responsible for abuse and legal inquiries on behalf of an account. For instance, new legal and abuse tickets are sent to these addresses. + AbuseEmailCount *uint `json:"abuseEmailCount,omitempty" xmlrpc:"abuseEmailCount,omitempty"` + + // Email addresses that are responsible for abuse and legal inquiries on behalf of an account. For instance, new legal and abuse tickets are sent to these addresses. + AbuseEmails []Account_AbuseEmail `json:"abuseEmails,omitempty" xmlrpc:"abuseEmails,omitempty"` + + // A count of the account contacts on an account. + AccountContactCount *uint `json:"accountContactCount,omitempty" xmlrpc:"accountContactCount,omitempty"` + + // The account contacts on an account. + AccountContacts []Account_Contact `json:"accountContacts,omitempty" xmlrpc:"accountContacts,omitempty"` + + // A count of the account software licenses owned by an account + AccountLicenseCount *uint `json:"accountLicenseCount,omitempty" xmlrpc:"accountLicenseCount,omitempty"` + + // The account software licenses owned by an account + AccountLicenses []Software_AccountLicense `json:"accountLicenses,omitempty" xmlrpc:"accountLicenses,omitempty"` + + // A count of + AccountLinkCount *uint `json:"accountLinkCount,omitempty" xmlrpc:"accountLinkCount,omitempty"` + + // no documentation yet + AccountLinks []Account_Link `json:"accountLinks,omitempty" xmlrpc:"accountLinks,omitempty"` + + // A flag indicating that the account has a managed resource. + AccountManagedResourcesFlag *bool `json:"accountManagedResourcesFlag,omitempty" xmlrpc:"accountManagedResourcesFlag,omitempty"` + + // An account's status presented in a more detailed data type. + AccountStatus *Account_Status `json:"accountStatus,omitempty" xmlrpc:"accountStatus,omitempty"` + + // A number reflecting the state of an account. + AccountStatusId *int `json:"accountStatusId,omitempty" xmlrpc:"accountStatusId,omitempty"` + + // The billing item associated with an account's monthly discount. + ActiveAccountDiscountBillingItem *Billing_Item `json:"activeAccountDiscountBillingItem,omitempty" xmlrpc:"activeAccountDiscountBillingItem,omitempty"` + + // A count of the active account software licenses owned by an account + ActiveAccountLicenseCount *uint `json:"activeAccountLicenseCount,omitempty" xmlrpc:"activeAccountLicenseCount,omitempty"` + + // The active account software licenses owned by an account + ActiveAccountLicenses []Software_AccountLicense `json:"activeAccountLicenses,omitempty" xmlrpc:"activeAccountLicenses,omitempty"` + + // A count of the active address(es) that belong to an account. + ActiveAddressCount *uint `json:"activeAddressCount,omitempty" xmlrpc:"activeAddressCount,omitempty"` + + // The active address(es) that belong to an account. + ActiveAddresses []Account_Address `json:"activeAddresses,omitempty" xmlrpc:"activeAddresses,omitempty"` + + // A count of all billing agreements for an account + ActiveBillingAgreementCount *uint `json:"activeBillingAgreementCount,omitempty" xmlrpc:"activeBillingAgreementCount,omitempty"` + + // All billing agreements for an account + ActiveBillingAgreements []Account_Agreement `json:"activeBillingAgreements,omitempty" xmlrpc:"activeBillingAgreements,omitempty"` + + // no documentation yet + ActiveCatalystEnrollment *Catalyst_Enrollment `json:"activeCatalystEnrollment,omitempty" xmlrpc:"activeCatalystEnrollment,omitempty"` + + // A count of the account's active top level colocation containers. + ActiveColocationContainerCount *uint `json:"activeColocationContainerCount,omitempty" xmlrpc:"activeColocationContainerCount,omitempty"` + + // The account's active top level colocation containers. + ActiveColocationContainers []Billing_Item `json:"activeColocationContainers,omitempty" xmlrpc:"activeColocationContainers,omitempty"` + + // Account's currently active Flexible Credit enrollment. + ActiveFlexibleCreditEnrollment *FlexibleCredit_Enrollment `json:"activeFlexibleCreditEnrollment,omitempty" xmlrpc:"activeFlexibleCreditEnrollment,omitempty"` + + // A count of + ActiveNotificationSubscriberCount *uint `json:"activeNotificationSubscriberCount,omitempty" xmlrpc:"activeNotificationSubscriberCount,omitempty"` + + // no documentation yet + ActiveNotificationSubscribers []Notification_Subscriber `json:"activeNotificationSubscribers,omitempty" xmlrpc:"activeNotificationSubscribers,omitempty"` + + // A count of an account's non-expired quotes. + ActiveQuoteCount *uint `json:"activeQuoteCount,omitempty" xmlrpc:"activeQuoteCount,omitempty"` + + // An account's non-expired quotes. + ActiveQuotes []Billing_Order_Quote `json:"activeQuotes,omitempty" xmlrpc:"activeQuotes,omitempty"` + + // A count of the virtual software licenses controlled by an account + ActiveVirtualLicenseCount *uint `json:"activeVirtualLicenseCount,omitempty" xmlrpc:"activeVirtualLicenseCount,omitempty"` + + // The virtual software licenses controlled by an account + ActiveVirtualLicenses []Software_VirtualLicense `json:"activeVirtualLicenses,omitempty" xmlrpc:"activeVirtualLicenses,omitempty"` + + // A count of an account's associated load balancers. + AdcLoadBalancerCount *uint `json:"adcLoadBalancerCount,omitempty" xmlrpc:"adcLoadBalancerCount,omitempty"` + + // An account's associated load balancers. + AdcLoadBalancers []Network_Application_Delivery_Controller_LoadBalancer_VirtualIpAddress `json:"adcLoadBalancers,omitempty" xmlrpc:"adcLoadBalancers,omitempty"` + + // The first line of the mailing address belonging to an account. + Address1 *string `json:"address1,omitempty" xmlrpc:"address1,omitempty"` + + // The second line of the mailing address belonging to an account. + Address2 *string `json:"address2,omitempty" xmlrpc:"address2,omitempty"` + + // A count of all the address(es) that belong to an account. + AddressCount *uint `json:"addressCount,omitempty" xmlrpc:"addressCount,omitempty"` + + // All the address(es) that belong to an account. + Addresses []Account_Address `json:"addresses,omitempty" xmlrpc:"addresses,omitempty"` + + // An affiliate identifier associated with the customer account. + AffiliateId *string `json:"affiliateId,omitempty" xmlrpc:"affiliateId,omitempty"` + + // The billing items that will be on an account's next invoice. + AllBillingItems []Billing_Item `json:"allBillingItems,omitempty" xmlrpc:"allBillingItems,omitempty"` + + // A count of the billing items that will be on an account's next invoice. + AllCommissionBillingItemCount *uint `json:"allCommissionBillingItemCount,omitempty" xmlrpc:"allCommissionBillingItemCount,omitempty"` + + // The billing items that will be on an account's next invoice. + AllCommissionBillingItems []Billing_Item `json:"allCommissionBillingItems,omitempty" xmlrpc:"allCommissionBillingItems,omitempty"` + + // A count of the billing items that will be on an account's next invoice. + AllRecurringTopLevelBillingItemCount *uint `json:"allRecurringTopLevelBillingItemCount,omitempty" xmlrpc:"allRecurringTopLevelBillingItemCount,omitempty"` + + // The billing items that will be on an account's next invoice. + AllRecurringTopLevelBillingItems []Billing_Item `json:"allRecurringTopLevelBillingItems,omitempty" xmlrpc:"allRecurringTopLevelBillingItems,omitempty"` + + // The billing items that will be on an account's next invoice. Does not consider associated items. + AllRecurringTopLevelBillingItemsUnfiltered []Billing_Item `json:"allRecurringTopLevelBillingItemsUnfiltered,omitempty" xmlrpc:"allRecurringTopLevelBillingItemsUnfiltered,omitempty"` + + // A count of the billing items that will be on an account's next invoice. Does not consider associated items. + AllRecurringTopLevelBillingItemsUnfilteredCount *uint `json:"allRecurringTopLevelBillingItemsUnfilteredCount,omitempty" xmlrpc:"allRecurringTopLevelBillingItemsUnfilteredCount,omitempty"` + + // A count of the billing items that will be on an account's next invoice. + AllSubnetBillingItemCount *uint `json:"allSubnetBillingItemCount,omitempty" xmlrpc:"allSubnetBillingItemCount,omitempty"` + + // The billing items that will be on an account's next invoice. + AllSubnetBillingItems []Billing_Item `json:"allSubnetBillingItems,omitempty" xmlrpc:"allSubnetBillingItems,omitempty"` + + // A count of all billing items of an account. + AllTopLevelBillingItemCount *uint `json:"allTopLevelBillingItemCount,omitempty" xmlrpc:"allTopLevelBillingItemCount,omitempty"` + + // All billing items of an account. + AllTopLevelBillingItems []Billing_Item `json:"allTopLevelBillingItems,omitempty" xmlrpc:"allTopLevelBillingItems,omitempty"` + + // The billing items that will be on an account's next invoice. Does not consider associated items. + AllTopLevelBillingItemsUnfiltered []Billing_Item `json:"allTopLevelBillingItemsUnfiltered,omitempty" xmlrpc:"allTopLevelBillingItemsUnfiltered,omitempty"` + + // A count of the billing items that will be on an account's next invoice. Does not consider associated items. + AllTopLevelBillingItemsUnfilteredCount *uint `json:"allTopLevelBillingItemsUnfilteredCount,omitempty" xmlrpc:"allTopLevelBillingItemsUnfilteredCount,omitempty"` + + // Indicates whether this account is allowed to silently migrate to use IBMid Authentication. + AllowIbmIdSilentMigrationFlag *bool `json:"allowIbmIdSilentMigrationFlag,omitempty" xmlrpc:"allowIbmIdSilentMigrationFlag,omitempty"` + + // The number of PPTP VPN users allowed on an account. + AllowedPptpVpnQuantity *int `json:"allowedPptpVpnQuantity,omitempty" xmlrpc:"allowedPptpVpnQuantity,omitempty"` + + // Flag indicating if this account can be linked with Bluemix. + AllowsBluemixAccountLinkingFlag *bool `json:"allowsBluemixAccountLinkingFlag,omitempty" xmlrpc:"allowsBluemixAccountLinkingFlag,omitempty"` + + // A secondary phone number assigned to an account. + AlternatePhone *string `json:"alternatePhone,omitempty" xmlrpc:"alternatePhone,omitempty"` + + // A count of an account's associated application delivery controller records. + ApplicationDeliveryControllerCount *uint `json:"applicationDeliveryControllerCount,omitempty" xmlrpc:"applicationDeliveryControllerCount,omitempty"` + + // An account's associated application delivery controller records. + ApplicationDeliveryControllers []Network_Application_Delivery_Controller `json:"applicationDeliveryControllers,omitempty" xmlrpc:"applicationDeliveryControllers,omitempty"` + + // A count of the account attribute values for a SoftLayer customer account. + AttributeCount *uint `json:"attributeCount,omitempty" xmlrpc:"attributeCount,omitempty"` + + // The account attribute values for a SoftLayer customer account. + Attributes []Account_Attribute `json:"attributes,omitempty" xmlrpc:"attributes,omitempty"` + + // A count of the public network VLANs assigned to an account. + AvailablePublicNetworkVlanCount *uint `json:"availablePublicNetworkVlanCount,omitempty" xmlrpc:"availablePublicNetworkVlanCount,omitempty"` + + // The public network VLANs assigned to an account. + AvailablePublicNetworkVlans []Network_Vlan `json:"availablePublicNetworkVlans,omitempty" xmlrpc:"availablePublicNetworkVlans,omitempty"` + + // The account balance of a SoftLayer customer account. An account's balance is the amount of money owed to SoftLayer by the account holder, returned as a floating point number with two decimal places, measured in US Dollars ($USD). A negative account balance means the account holder has overpaid and is owed money by SoftLayer. + Balance *Float64 `json:"balance,omitempty" xmlrpc:"balance,omitempty"` + + // A count of the bandwidth allotments for an account. + BandwidthAllotmentCount *uint `json:"bandwidthAllotmentCount,omitempty" xmlrpc:"bandwidthAllotmentCount,omitempty"` + + // The bandwidth allotments for an account. + BandwidthAllotments []Network_Bandwidth_Version1_Allotment `json:"bandwidthAllotments,omitempty" xmlrpc:"bandwidthAllotments,omitempty"` + + // The bandwidth allotments for an account currently over allocation. + BandwidthAllotmentsOverAllocation []Network_Bandwidth_Version1_Allotment `json:"bandwidthAllotmentsOverAllocation,omitempty" xmlrpc:"bandwidthAllotmentsOverAllocation,omitempty"` + + // A count of the bandwidth allotments for an account currently over allocation. + BandwidthAllotmentsOverAllocationCount *uint `json:"bandwidthAllotmentsOverAllocationCount,omitempty" xmlrpc:"bandwidthAllotmentsOverAllocationCount,omitempty"` + + // The bandwidth allotments for an account projected to go over allocation. + BandwidthAllotmentsProjectedOverAllocation []Network_Bandwidth_Version1_Allotment `json:"bandwidthAllotmentsProjectedOverAllocation,omitempty" xmlrpc:"bandwidthAllotmentsProjectedOverAllocation,omitempty"` + + // A count of the bandwidth allotments for an account projected to go over allocation. + BandwidthAllotmentsProjectedOverAllocationCount *uint `json:"bandwidthAllotmentsProjectedOverAllocationCount,omitempty" xmlrpc:"bandwidthAllotmentsProjectedOverAllocationCount,omitempty"` + + // A count of an account's associated bare metal server objects. + BareMetalInstanceCount *uint `json:"bareMetalInstanceCount,omitempty" xmlrpc:"bareMetalInstanceCount,omitempty"` + + // An account's associated bare metal server objects. + BareMetalInstances []Hardware `json:"bareMetalInstances,omitempty" xmlrpc:"bareMetalInstances,omitempty"` + + // A count of all billing agreements for an account + BillingAgreementCount *uint `json:"billingAgreementCount,omitempty" xmlrpc:"billingAgreementCount,omitempty"` + + // All billing agreements for an account + BillingAgreements []Account_Agreement `json:"billingAgreements,omitempty" xmlrpc:"billingAgreements,omitempty"` + + // An account's billing information. + BillingInfo *Billing_Info `json:"billingInfo,omitempty" xmlrpc:"billingInfo,omitempty"` + + // A count of private template group objects (parent and children) and the shared template group objects (parent only) for an account. + BlockDeviceTemplateGroupCount *uint `json:"blockDeviceTemplateGroupCount,omitempty" xmlrpc:"blockDeviceTemplateGroupCount,omitempty"` + + // Private template group objects (parent and children) and the shared template group objects (parent only) for an account. + BlockDeviceTemplateGroups []Virtual_Guest_Block_Device_Template_Group `json:"blockDeviceTemplateGroups,omitempty" xmlrpc:"blockDeviceTemplateGroups,omitempty"` + + // Indicates whether this account requires blue id authentication. + BlueIdAuthenticationRequiredFlag *bool `json:"blueIdAuthenticationRequiredFlag,omitempty" xmlrpc:"blueIdAuthenticationRequiredFlag,omitempty"` + + // Returns true if this account is linked to IBM Bluemix, false if not. + BluemixLinkedFlag *bool `json:"bluemixLinkedFlag,omitempty" xmlrpc:"bluemixLinkedFlag,omitempty"` + + // no documentation yet + Brand *Brand `json:"brand,omitempty" xmlrpc:"brand,omitempty"` + + // no documentation yet + BrandAccountFlag *bool `json:"brandAccountFlag,omitempty" xmlrpc:"brandAccountFlag,omitempty"` + + // The Brand tied to an account. + BrandId *int `json:"brandId,omitempty" xmlrpc:"brandId,omitempty"` + + // The brand keyName. + BrandKeyName *string `json:"brandKeyName,omitempty" xmlrpc:"brandKeyName,omitempty"` + + // Indicating whether this account can order additional Vlans. + CanOrderAdditionalVlansFlag *bool `json:"canOrderAdditionalVlansFlag,omitempty" xmlrpc:"canOrderAdditionalVlansFlag,omitempty"` + + // A count of an account's active carts. + CartCount *uint `json:"cartCount,omitempty" xmlrpc:"cartCount,omitempty"` + + // An account's active carts. + Carts []Billing_Order_Quote `json:"carts,omitempty" xmlrpc:"carts,omitempty"` + + // A count of + CatalystEnrollmentCount *uint `json:"catalystEnrollmentCount,omitempty" xmlrpc:"catalystEnrollmentCount,omitempty"` + + // no documentation yet + CatalystEnrollments []Catalyst_Enrollment `json:"catalystEnrollments,omitempty" xmlrpc:"catalystEnrollments,omitempty"` + + // A count of an account's associated CDN accounts. + CdnAccountCount *uint `json:"cdnAccountCount,omitempty" xmlrpc:"cdnAccountCount,omitempty"` + + // An account's associated CDN accounts. + CdnAccounts []Network_ContentDelivery_Account `json:"cdnAccounts,omitempty" xmlrpc:"cdnAccounts,omitempty"` + + // The city of the mailing address belonging to an account. + City *string `json:"city,omitempty" xmlrpc:"city,omitempty"` + + // Whether an account is exempt from taxes on their invoices. + ClaimedTaxExemptTxFlag *bool `json:"claimedTaxExemptTxFlag,omitempty" xmlrpc:"claimedTaxExemptTxFlag,omitempty"` + + // A count of all closed tickets associated with an account. + ClosedTicketCount *uint `json:"closedTicketCount,omitempty" xmlrpc:"closedTicketCount,omitempty"` + + // All closed tickets associated with an account. + ClosedTickets []Ticket `json:"closedTickets,omitempty" xmlrpc:"closedTickets,omitempty"` + + // The company name associated with an account. + CompanyName *string `json:"companyName,omitempty" xmlrpc:"companyName,omitempty"` + + // A two-letter abbreviation of the country in the mailing address belonging to an account. + Country *string `json:"country,omitempty" xmlrpc:"country,omitempty"` + + // The date an account was created. + CreateDate *Time `json:"createDate,omitempty" xmlrpc:"createDate,omitempty"` + + // A count of datacenters which contain subnets that the account has access to route. + DatacentersWithSubnetAllocationCount *uint `json:"datacentersWithSubnetAllocationCount,omitempty" xmlrpc:"datacentersWithSubnetAllocationCount,omitempty"` + + // Datacenters which contain subnets that the account has access to route. + DatacentersWithSubnetAllocations []Location `json:"datacentersWithSubnetAllocations,omitempty" xmlrpc:"datacentersWithSubnetAllocations,omitempty"` + + // A count of an account's associated virtual dedicated host objects. + DedicatedHostCount *uint `json:"dedicatedHostCount,omitempty" xmlrpc:"dedicatedHostCount,omitempty"` + + // An account's associated virtual dedicated host objects. + DedicatedHosts []Virtual_DedicatedHost `json:"dedicatedHosts,omitempty" xmlrpc:"dedicatedHosts,omitempty"` + + // Device Fingerprint Identifier - Used internally and can safely be ignored. + DeviceFingerprintId *string `json:"deviceFingerprintId,omitempty" xmlrpc:"deviceFingerprintId,omitempty"` + + // A flag indicating whether payments are processed for this account. + DisablePaymentProcessingFlag *bool `json:"disablePaymentProcessingFlag,omitempty" xmlrpc:"disablePaymentProcessingFlag,omitempty"` + + // A count of the SoftLayer employees that an account is assigned to. + DisplaySupportRepresentativeAssignmentCount *uint `json:"displaySupportRepresentativeAssignmentCount,omitempty" xmlrpc:"displaySupportRepresentativeAssignmentCount,omitempty"` + + // The SoftLayer employees that an account is assigned to. + DisplaySupportRepresentativeAssignments []Account_Attachment_Employee `json:"displaySupportRepresentativeAssignments,omitempty" xmlrpc:"displaySupportRepresentativeAssignments,omitempty"` + + // A count of the DNS domains associated with an account. + DomainCount *uint `json:"domainCount,omitempty" xmlrpc:"domainCount,omitempty"` + + // A count of + DomainRegistrationCount *uint `json:"domainRegistrationCount,omitempty" xmlrpc:"domainRegistrationCount,omitempty"` + + // no documentation yet + DomainRegistrations []Dns_Domain_Registration `json:"domainRegistrations,omitempty" xmlrpc:"domainRegistrations,omitempty"` + + // The DNS domains associated with an account. + Domains []Dns_Domain `json:"domains,omitempty" xmlrpc:"domains,omitempty"` + + // A count of the DNS domains associated with an account that were not created as a result of a secondary DNS zone transfer. + DomainsWithoutSecondaryDnsRecordCount *uint `json:"domainsWithoutSecondaryDnsRecordCount,omitempty" xmlrpc:"domainsWithoutSecondaryDnsRecordCount,omitempty"` + + // The DNS domains associated with an account that were not created as a result of a secondary DNS zone transfer. + DomainsWithoutSecondaryDnsRecords []Dns_Domain `json:"domainsWithoutSecondaryDnsRecords,omitempty" xmlrpc:"domainsWithoutSecondaryDnsRecords,omitempty"` + + // A general email address assigned to an account. + Email *string `json:"email,omitempty" xmlrpc:"email,omitempty"` + + // The total capacity of Legacy EVault Volumes on an account, in GB. + EvaultCapacityGB *uint `json:"evaultCapacityGB,omitempty" xmlrpc:"evaultCapacityGB,omitempty"` + + // A count of an account's master EVault user. This is only used when an account has EVault service. + EvaultMasterUserCount *uint `json:"evaultMasterUserCount,omitempty" xmlrpc:"evaultMasterUserCount,omitempty"` + + // An account's master EVault user. This is only used when an account has EVault service. + EvaultMasterUsers []Account_Password `json:"evaultMasterUsers,omitempty" xmlrpc:"evaultMasterUsers,omitempty"` + + // An account's associated EVault storage volumes. + EvaultNetworkStorage []Network_Storage `json:"evaultNetworkStorage,omitempty" xmlrpc:"evaultNetworkStorage,omitempty"` + + // A count of an account's associated EVault storage volumes. + EvaultNetworkStorageCount *uint `json:"evaultNetworkStorageCount,omitempty" xmlrpc:"evaultNetworkStorageCount,omitempty"` + + // A count of stored security certificates that are expired (ie. SSL) + ExpiredSecurityCertificateCount *uint `json:"expiredSecurityCertificateCount,omitempty" xmlrpc:"expiredSecurityCertificateCount,omitempty"` + + // Stored security certificates that are expired (ie. SSL) + ExpiredSecurityCertificates []Security_Certificate `json:"expiredSecurityCertificates,omitempty" xmlrpc:"expiredSecurityCertificates,omitempty"` + + // A count of logs of who entered a colocation area which is assigned to this account, or when a user under this account enters a datacenter. + FacilityLogCount *uint `json:"facilityLogCount,omitempty" xmlrpc:"facilityLogCount,omitempty"` + + // Logs of who entered a colocation area which is assigned to this account, or when a user under this account enters a datacenter. + FacilityLogs []User_Access_Facility_Log `json:"facilityLogs,omitempty" xmlrpc:"facilityLogs,omitempty"` + + // A fax phone number assigned to an account. + FaxPhone *string `json:"faxPhone,omitempty" xmlrpc:"faxPhone,omitempty"` + + // Each customer account is listed under a single individual. This is that individual's first name. + FirstName *string `json:"firstName,omitempty" xmlrpc:"firstName,omitempty"` + + // A count of all of the account's current and former Flexible Credit enrollments. + FlexibleCreditEnrollmentCount *uint `json:"flexibleCreditEnrollmentCount,omitempty" xmlrpc:"flexibleCreditEnrollmentCount,omitempty"` + + // All of the account's current and former Flexible Credit enrollments. + FlexibleCreditEnrollments []FlexibleCredit_Enrollment `json:"flexibleCreditEnrollments,omitempty" xmlrpc:"flexibleCreditEnrollments,omitempty"` + + // A count of + GlobalIpRecordCount *uint `json:"globalIpRecordCount,omitempty" xmlrpc:"globalIpRecordCount,omitempty"` + + // no documentation yet + GlobalIpRecords []Network_Subnet_IpAddress_Global `json:"globalIpRecords,omitempty" xmlrpc:"globalIpRecords,omitempty"` + + // A count of + GlobalIpv4RecordCount *uint `json:"globalIpv4RecordCount,omitempty" xmlrpc:"globalIpv4RecordCount,omitempty"` + + // no documentation yet + GlobalIpv4Records []Network_Subnet_IpAddress_Global `json:"globalIpv4Records,omitempty" xmlrpc:"globalIpv4Records,omitempty"` + + // A count of + GlobalIpv6RecordCount *uint `json:"globalIpv6RecordCount,omitempty" xmlrpc:"globalIpv6RecordCount,omitempty"` + + // no documentation yet + GlobalIpv6Records []Network_Subnet_IpAddress_Global `json:"globalIpv6Records,omitempty" xmlrpc:"globalIpv6Records,omitempty"` + + // A count of the global load balancer accounts for a softlayer customer account. + GlobalLoadBalancerAccountCount *uint `json:"globalLoadBalancerAccountCount,omitempty" xmlrpc:"globalLoadBalancerAccountCount,omitempty"` + + // The global load balancer accounts for a softlayer customer account. + GlobalLoadBalancerAccounts []Network_LoadBalancer_Global_Account `json:"globalLoadBalancerAccounts,omitempty" xmlrpc:"globalLoadBalancerAccounts,omitempty"` + + // An account's associated hardware objects. + Hardware []Hardware `json:"hardware,omitempty" xmlrpc:"hardware,omitempty"` + + // A count of an account's associated hardware objects. + HardwareCount *uint `json:"hardwareCount,omitempty" xmlrpc:"hardwareCount,omitempty"` + + // An account's associated hardware objects currently over bandwidth allocation. + HardwareOverBandwidthAllocation []Hardware `json:"hardwareOverBandwidthAllocation,omitempty" xmlrpc:"hardwareOverBandwidthAllocation,omitempty"` + + // A count of an account's associated hardware objects currently over bandwidth allocation. + HardwareOverBandwidthAllocationCount *uint `json:"hardwareOverBandwidthAllocationCount,omitempty" xmlrpc:"hardwareOverBandwidthAllocationCount,omitempty"` + + // An account's associated hardware objects projected to go over bandwidth allocation. + HardwareProjectedOverBandwidthAllocation []Hardware `json:"hardwareProjectedOverBandwidthAllocation,omitempty" xmlrpc:"hardwareProjectedOverBandwidthAllocation,omitempty"` + + // A count of an account's associated hardware objects projected to go over bandwidth allocation. + HardwareProjectedOverBandwidthAllocationCount *uint `json:"hardwareProjectedOverBandwidthAllocationCount,omitempty" xmlrpc:"hardwareProjectedOverBandwidthAllocationCount,omitempty"` + + // All hardware associated with an account that has the cPanel web hosting control panel installed. + HardwareWithCpanel []Hardware `json:"hardwareWithCpanel,omitempty" xmlrpc:"hardwareWithCpanel,omitempty"` + + // A count of all hardware associated with an account that has the cPanel web hosting control panel installed. + HardwareWithCpanelCount *uint `json:"hardwareWithCpanelCount,omitempty" xmlrpc:"hardwareWithCpanelCount,omitempty"` + + // All hardware associated with an account that has the Helm web hosting control panel installed. + HardwareWithHelm []Hardware `json:"hardwareWithHelm,omitempty" xmlrpc:"hardwareWithHelm,omitempty"` + + // A count of all hardware associated with an account that has the Helm web hosting control panel installed. + HardwareWithHelmCount *uint `json:"hardwareWithHelmCount,omitempty" xmlrpc:"hardwareWithHelmCount,omitempty"` + + // All hardware associated with an account that has McAfee Secure software components. + HardwareWithMcafee []Hardware `json:"hardwareWithMcafee,omitempty" xmlrpc:"hardwareWithMcafee,omitempty"` + + // All hardware associated with an account that has McAfee Secure AntiVirus for Redhat software components. + HardwareWithMcafeeAntivirusRedhat []Hardware `json:"hardwareWithMcafeeAntivirusRedhat,omitempty" xmlrpc:"hardwareWithMcafeeAntivirusRedhat,omitempty"` + + // A count of all hardware associated with an account that has McAfee Secure AntiVirus for Redhat software components. + HardwareWithMcafeeAntivirusRedhatCount *uint `json:"hardwareWithMcafeeAntivirusRedhatCount,omitempty" xmlrpc:"hardwareWithMcafeeAntivirusRedhatCount,omitempty"` + + // A count of all hardware associated with an account that has McAfee Secure AntiVirus for Windows software components. + HardwareWithMcafeeAntivirusWindowCount *uint `json:"hardwareWithMcafeeAntivirusWindowCount,omitempty" xmlrpc:"hardwareWithMcafeeAntivirusWindowCount,omitempty"` + + // All hardware associated with an account that has McAfee Secure AntiVirus for Windows software components. + HardwareWithMcafeeAntivirusWindows []Hardware `json:"hardwareWithMcafeeAntivirusWindows,omitempty" xmlrpc:"hardwareWithMcafeeAntivirusWindows,omitempty"` + + // A count of all hardware associated with an account that has McAfee Secure software components. + HardwareWithMcafeeCount *uint `json:"hardwareWithMcafeeCount,omitempty" xmlrpc:"hardwareWithMcafeeCount,omitempty"` + + // All hardware associated with an account that has McAfee Secure Intrusion Detection System software components. + HardwareWithMcafeeIntrusionDetectionSystem []Hardware `json:"hardwareWithMcafeeIntrusionDetectionSystem,omitempty" xmlrpc:"hardwareWithMcafeeIntrusionDetectionSystem,omitempty"` + + // A count of all hardware associated with an account that has McAfee Secure Intrusion Detection System software components. + HardwareWithMcafeeIntrusionDetectionSystemCount *uint `json:"hardwareWithMcafeeIntrusionDetectionSystemCount,omitempty" xmlrpc:"hardwareWithMcafeeIntrusionDetectionSystemCount,omitempty"` + + // All hardware associated with an account that has the Plesk web hosting control panel installed. + HardwareWithPlesk []Hardware `json:"hardwareWithPlesk,omitempty" xmlrpc:"hardwareWithPlesk,omitempty"` + + // A count of all hardware associated with an account that has the Plesk web hosting control panel installed. + HardwareWithPleskCount *uint `json:"hardwareWithPleskCount,omitempty" xmlrpc:"hardwareWithPleskCount,omitempty"` + + // All hardware associated with an account that has the QuantaStor storage system installed. + HardwareWithQuantastor []Hardware `json:"hardwareWithQuantastor,omitempty" xmlrpc:"hardwareWithQuantastor,omitempty"` + + // A count of all hardware associated with an account that has the QuantaStor storage system installed. + HardwareWithQuantastorCount *uint `json:"hardwareWithQuantastorCount,omitempty" xmlrpc:"hardwareWithQuantastorCount,omitempty"` + + // All hardware associated with an account that has the Urchin web traffic analytics package installed. + HardwareWithUrchin []Hardware `json:"hardwareWithUrchin,omitempty" xmlrpc:"hardwareWithUrchin,omitempty"` + + // A count of all hardware associated with an account that has the Urchin web traffic analytics package installed. + HardwareWithUrchinCount *uint `json:"hardwareWithUrchinCount,omitempty" xmlrpc:"hardwareWithUrchinCount,omitempty"` + + // A count of all hardware associated with an account that is running a version of the Microsoft Windows operating system. + HardwareWithWindowCount *uint `json:"hardwareWithWindowCount,omitempty" xmlrpc:"hardwareWithWindowCount,omitempty"` + + // All hardware associated with an account that is running a version of the Microsoft Windows operating system. + HardwareWithWindows []Hardware `json:"hardwareWithWindows,omitempty" xmlrpc:"hardwareWithWindows,omitempty"` + + // Return 1 if one of the account's hardware has the EVault Bare Metal Server Restore Plugin otherwise 0. + HasEvaultBareMetalRestorePluginFlag *bool `json:"hasEvaultBareMetalRestorePluginFlag,omitempty" xmlrpc:"hasEvaultBareMetalRestorePluginFlag,omitempty"` + + // Return 1 if one of the account's hardware has an installation of Idera Server Backup otherwise 0. + HasIderaBareMetalRestorePluginFlag *bool `json:"hasIderaBareMetalRestorePluginFlag,omitempty" xmlrpc:"hasIderaBareMetalRestorePluginFlag,omitempty"` + + // The number of orders in a PENDING status for a SoftLayer customer account. + HasPendingOrder *uint `json:"hasPendingOrder,omitempty" xmlrpc:"hasPendingOrder,omitempty"` + + // Return 1 if one of the account's hardware has an installation of R1Soft CDP otherwise 0. + HasR1softBareMetalRestorePluginFlag *bool `json:"hasR1softBareMetalRestorePluginFlag,omitempty" xmlrpc:"hasR1softBareMetalRestorePluginFlag,omitempty"` + + // A count of an account's associated hourly bare metal server objects. + HourlyBareMetalInstanceCount *uint `json:"hourlyBareMetalInstanceCount,omitempty" xmlrpc:"hourlyBareMetalInstanceCount,omitempty"` + + // An account's associated hourly bare metal server objects. + HourlyBareMetalInstances []Hardware `json:"hourlyBareMetalInstances,omitempty" xmlrpc:"hourlyBareMetalInstances,omitempty"` + + // A count of hourly service billing items that will be on an account's next invoice. + HourlyServiceBillingItemCount *uint `json:"hourlyServiceBillingItemCount,omitempty" xmlrpc:"hourlyServiceBillingItemCount,omitempty"` + + // Hourly service billing items that will be on an account's next invoice. + HourlyServiceBillingItems []Billing_Item `json:"hourlyServiceBillingItems,omitempty" xmlrpc:"hourlyServiceBillingItems,omitempty"` + + // A count of an account's associated hourly virtual guest objects. + HourlyVirtualGuestCount *uint `json:"hourlyVirtualGuestCount,omitempty" xmlrpc:"hourlyVirtualGuestCount,omitempty"` + + // An account's associated hourly virtual guest objects. + HourlyVirtualGuests []Virtual_Guest `json:"hourlyVirtualGuests,omitempty" xmlrpc:"hourlyVirtualGuests,omitempty"` + + // An account's associated Virtual Storage volumes. + HubNetworkStorage []Network_Storage `json:"hubNetworkStorage,omitempty" xmlrpc:"hubNetworkStorage,omitempty"` + + // A count of an account's associated Virtual Storage volumes. + HubNetworkStorageCount *uint `json:"hubNetworkStorageCount,omitempty" xmlrpc:"hubNetworkStorageCount,omitempty"` + + // Unique identifier for a customer used throughout IBM. + IbmCustomerNumber *string `json:"ibmCustomerNumber,omitempty" xmlrpc:"ibmCustomerNumber,omitempty"` + + // Timestamp representing the point in time when an account is required to use IBMid authentication. + IbmIdMigrationExpirationTimestamp *string `json:"ibmIdMigrationExpirationTimestamp,omitempty" xmlrpc:"ibmIdMigrationExpirationTimestamp,omitempty"` + + // A customer account's internal identifier. Account numbers are typically preceded by the string "SL" in the customer portal. Every SoftLayer account has at least one portal user whose username follows the "SL" + account number naming scheme. + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // A count of + InternalNoteCount *uint `json:"internalNoteCount,omitempty" xmlrpc:"internalNoteCount,omitempty"` + + // no documentation yet + InternalNotes []Account_Note `json:"internalNotes,omitempty" xmlrpc:"internalNotes,omitempty"` + + // A count of an account's associated billing invoices. + InvoiceCount *uint `json:"invoiceCount,omitempty" xmlrpc:"invoiceCount,omitempty"` + + // An account's associated billing invoices. + Invoices []Billing_Invoice `json:"invoices,omitempty" xmlrpc:"invoices,omitempty"` + + // A count of + IpAddressCount *uint `json:"ipAddressCount,omitempty" xmlrpc:"ipAddressCount,omitempty"` + + // no documentation yet + IpAddresses []Network_Subnet_IpAddress `json:"ipAddresses,omitempty" xmlrpc:"ipAddresses,omitempty"` + + // A flag indicating if an account belongs to a reseller or not. + IsReseller *int `json:"isReseller,omitempty" xmlrpc:"isReseller,omitempty"` + + // An account's associated iSCSI storage volumes. + IscsiNetworkStorage []Network_Storage `json:"iscsiNetworkStorage,omitempty" xmlrpc:"iscsiNetworkStorage,omitempty"` + + // A count of an account's associated iSCSI storage volumes. + IscsiNetworkStorageCount *uint `json:"iscsiNetworkStorageCount,omitempty" xmlrpc:"iscsiNetworkStorageCount,omitempty"` + + // The most recently canceled billing item. + LastCanceledBillingItem *Billing_Item `json:"lastCanceledBillingItem,omitempty" xmlrpc:"lastCanceledBillingItem,omitempty"` + + // The most recent cancelled server billing item. + LastCancelledServerBillingItem *Billing_Item `json:"lastCancelledServerBillingItem,omitempty" xmlrpc:"lastCancelledServerBillingItem,omitempty"` + + // A count of the five most recently closed abuse tickets associated with an account. + LastFiveClosedAbuseTicketCount *uint `json:"lastFiveClosedAbuseTicketCount,omitempty" xmlrpc:"lastFiveClosedAbuseTicketCount,omitempty"` + + // The five most recently closed abuse tickets associated with an account. + LastFiveClosedAbuseTickets []Ticket `json:"lastFiveClosedAbuseTickets,omitempty" xmlrpc:"lastFiveClosedAbuseTickets,omitempty"` + + // A count of the five most recently closed accounting tickets associated with an account. + LastFiveClosedAccountingTicketCount *uint `json:"lastFiveClosedAccountingTicketCount,omitempty" xmlrpc:"lastFiveClosedAccountingTicketCount,omitempty"` + + // The five most recently closed accounting tickets associated with an account. + LastFiveClosedAccountingTickets []Ticket `json:"lastFiveClosedAccountingTickets,omitempty" xmlrpc:"lastFiveClosedAccountingTickets,omitempty"` + + // A count of the five most recently closed tickets that do not belong to the abuse, accounting, sales, or support groups associated with an account. + LastFiveClosedOtherTicketCount *uint `json:"lastFiveClosedOtherTicketCount,omitempty" xmlrpc:"lastFiveClosedOtherTicketCount,omitempty"` + + // The five most recently closed tickets that do not belong to the abuse, accounting, sales, or support groups associated with an account. + LastFiveClosedOtherTickets []Ticket `json:"lastFiveClosedOtherTickets,omitempty" xmlrpc:"lastFiveClosedOtherTickets,omitempty"` + + // A count of the five most recently closed sales tickets associated with an account. + LastFiveClosedSalesTicketCount *uint `json:"lastFiveClosedSalesTicketCount,omitempty" xmlrpc:"lastFiveClosedSalesTicketCount,omitempty"` + + // The five most recently closed sales tickets associated with an account. + LastFiveClosedSalesTickets []Ticket `json:"lastFiveClosedSalesTickets,omitempty" xmlrpc:"lastFiveClosedSalesTickets,omitempty"` + + // A count of the five most recently closed support tickets associated with an account. + LastFiveClosedSupportTicketCount *uint `json:"lastFiveClosedSupportTicketCount,omitempty" xmlrpc:"lastFiveClosedSupportTicketCount,omitempty"` + + // The five most recently closed support tickets associated with an account. + LastFiveClosedSupportTickets []Ticket `json:"lastFiveClosedSupportTickets,omitempty" xmlrpc:"lastFiveClosedSupportTickets,omitempty"` + + // A count of the five most recently closed tickets associated with an account. + LastFiveClosedTicketCount *uint `json:"lastFiveClosedTicketCount,omitempty" xmlrpc:"lastFiveClosedTicketCount,omitempty"` + + // The five most recently closed tickets associated with an account. + LastFiveClosedTickets []Ticket `json:"lastFiveClosedTickets,omitempty" xmlrpc:"lastFiveClosedTickets,omitempty"` + + // Each customer account is listed under a single individual. This is that individual's last name. + LastName *string `json:"lastName,omitempty" xmlrpc:"lastName,omitempty"` + + // Whether an account has late fee protection. + LateFeeProtectionFlag *bool `json:"lateFeeProtectionFlag,omitempty" xmlrpc:"lateFeeProtectionFlag,omitempty"` + + // An account's most recent billing date. + LatestBillDate *Time `json:"latestBillDate,omitempty" xmlrpc:"latestBillDate,omitempty"` + + // An account's latest recurring invoice. + LatestRecurringInvoice *Billing_Invoice `json:"latestRecurringInvoice,omitempty" xmlrpc:"latestRecurringInvoice,omitempty"` + + // An account's latest recurring pending invoice. + LatestRecurringPendingInvoice *Billing_Invoice `json:"latestRecurringPendingInvoice,omitempty" xmlrpc:"latestRecurringPendingInvoice,omitempty"` + + // A count of the legacy bandwidth allotments for an account. + LegacyBandwidthAllotmentCount *uint `json:"legacyBandwidthAllotmentCount,omitempty" xmlrpc:"legacyBandwidthAllotmentCount,omitempty"` + + // The legacy bandwidth allotments for an account. + LegacyBandwidthAllotments []Network_Bandwidth_Version1_Allotment `json:"legacyBandwidthAllotments,omitempty" xmlrpc:"legacyBandwidthAllotments,omitempty"` + + // The total capacity of Legacy iSCSI Volumes on an account, in GB. + LegacyIscsiCapacityGB *uint `json:"legacyIscsiCapacityGB,omitempty" xmlrpc:"legacyIscsiCapacityGB,omitempty"` + + // A count of an account's associated load balancers. + LoadBalancerCount *uint `json:"loadBalancerCount,omitempty" xmlrpc:"loadBalancerCount,omitempty"` + + // An account's associated load balancers. + LoadBalancers []Network_LoadBalancer_VirtualIpAddress `json:"loadBalancers,omitempty" xmlrpc:"loadBalancers,omitempty"` + + // The total capacity of Legacy lockbox Volumes on an account, in GB. + LockboxCapacityGB *uint `json:"lockboxCapacityGB,omitempty" xmlrpc:"lockboxCapacityGB,omitempty"` + + // An account's associated Lockbox storage volumes. + LockboxNetworkStorage []Network_Storage `json:"lockboxNetworkStorage,omitempty" xmlrpc:"lockboxNetworkStorage,omitempty"` + + // A count of an account's associated Lockbox storage volumes. + LockboxNetworkStorageCount *uint `json:"lockboxNetworkStorageCount,omitempty" xmlrpc:"lockboxNetworkStorageCount,omitempty"` + + // no documentation yet + ManualPaymentsUnderReview []Billing_Payment_Card_ManualPayment `json:"manualPaymentsUnderReview,omitempty" xmlrpc:"manualPaymentsUnderReview,omitempty"` + + // A count of + ManualPaymentsUnderReviewCount *uint `json:"manualPaymentsUnderReviewCount,omitempty" xmlrpc:"manualPaymentsUnderReviewCount,omitempty"` + + // An account's master user. + MasterUser *User_Customer `json:"masterUser,omitempty" xmlrpc:"masterUser,omitempty"` + + // A count of an account's media transfer service requests. + MediaDataTransferRequestCount *uint `json:"mediaDataTransferRequestCount,omitempty" xmlrpc:"mediaDataTransferRequestCount,omitempty"` + + // An account's media transfer service requests. + MediaDataTransferRequests []Account_Media_Data_Transfer_Request `json:"mediaDataTransferRequests,omitempty" xmlrpc:"mediaDataTransferRequests,omitempty"` + + // A count of an account's associated Message Queue accounts. + MessageQueueAccountCount *uint `json:"messageQueueAccountCount,omitempty" xmlrpc:"messageQueueAccountCount,omitempty"` + + // An account's associated Message Queue accounts. + MessageQueueAccounts []Network_Message_Queue `json:"messageQueueAccounts,omitempty" xmlrpc:"messageQueueAccounts,omitempty"` + + // The date an account was last modified. + ModifyDate *Time `json:"modifyDate,omitempty" xmlrpc:"modifyDate,omitempty"` + + // A count of an account's associated monthly bare metal server objects. + MonthlyBareMetalInstanceCount *uint `json:"monthlyBareMetalInstanceCount,omitempty" xmlrpc:"monthlyBareMetalInstanceCount,omitempty"` + + // An account's associated monthly bare metal server objects. + MonthlyBareMetalInstances []Hardware `json:"monthlyBareMetalInstances,omitempty" xmlrpc:"monthlyBareMetalInstances,omitempty"` + + // A count of an account's associated monthly virtual guest objects. + MonthlyVirtualGuestCount *uint `json:"monthlyVirtualGuestCount,omitempty" xmlrpc:"monthlyVirtualGuestCount,omitempty"` + + // An account's associated monthly virtual guest objects. + MonthlyVirtualGuests []Virtual_Guest `json:"monthlyVirtualGuests,omitempty" xmlrpc:"monthlyVirtualGuests,omitempty"` + + // An account's associated NAS storage volumes. + NasNetworkStorage []Network_Storage `json:"nasNetworkStorage,omitempty" xmlrpc:"nasNetworkStorage,omitempty"` + + // A count of an account's associated NAS storage volumes. + NasNetworkStorageCount *uint `json:"nasNetworkStorageCount,omitempty" xmlrpc:"nasNetworkStorageCount,omitempty"` + + // Whether or not this account can define their own networks. + NetworkCreationFlag *bool `json:"networkCreationFlag,omitempty" xmlrpc:"networkCreationFlag,omitempty"` + + // A count of all network gateway devices on this account. + NetworkGatewayCount *uint `json:"networkGatewayCount,omitempty" xmlrpc:"networkGatewayCount,omitempty"` + + // All network gateway devices on this account. + NetworkGateways []Network_Gateway `json:"networkGateways,omitempty" xmlrpc:"networkGateways,omitempty"` + + // An account's associated network hardware. + NetworkHardware []Hardware `json:"networkHardware,omitempty" xmlrpc:"networkHardware,omitempty"` + + // A count of an account's associated network hardware. + NetworkHardwareCount *uint `json:"networkHardwareCount,omitempty" xmlrpc:"networkHardwareCount,omitempty"` + + // A count of + NetworkMessageDeliveryAccountCount *uint `json:"networkMessageDeliveryAccountCount,omitempty" xmlrpc:"networkMessageDeliveryAccountCount,omitempty"` + + // no documentation yet + NetworkMessageDeliveryAccounts []Network_Message_Delivery `json:"networkMessageDeliveryAccounts,omitempty" xmlrpc:"networkMessageDeliveryAccounts,omitempty"` + + // Hardware which is currently experiencing a service failure. + NetworkMonitorDownHardware []Hardware `json:"networkMonitorDownHardware,omitempty" xmlrpc:"networkMonitorDownHardware,omitempty"` + + // A count of hardware which is currently experiencing a service failure. + NetworkMonitorDownHardwareCount *uint `json:"networkMonitorDownHardwareCount,omitempty" xmlrpc:"networkMonitorDownHardwareCount,omitempty"` + + // A count of virtual guest which is currently experiencing a service failure. + NetworkMonitorDownVirtualGuestCount *uint `json:"networkMonitorDownVirtualGuestCount,omitempty" xmlrpc:"networkMonitorDownVirtualGuestCount,omitempty"` + + // Virtual guest which is currently experiencing a service failure. + NetworkMonitorDownVirtualGuests []Virtual_Guest `json:"networkMonitorDownVirtualGuests,omitempty" xmlrpc:"networkMonitorDownVirtualGuests,omitempty"` + + // Hardware which is currently recovering from a service failure. + NetworkMonitorRecoveringHardware []Hardware `json:"networkMonitorRecoveringHardware,omitempty" xmlrpc:"networkMonitorRecoveringHardware,omitempty"` + + // A count of hardware which is currently recovering from a service failure. + NetworkMonitorRecoveringHardwareCount *uint `json:"networkMonitorRecoveringHardwareCount,omitempty" xmlrpc:"networkMonitorRecoveringHardwareCount,omitempty"` + + // A count of virtual guest which is currently recovering from a service failure. + NetworkMonitorRecoveringVirtualGuestCount *uint `json:"networkMonitorRecoveringVirtualGuestCount,omitempty" xmlrpc:"networkMonitorRecoveringVirtualGuestCount,omitempty"` + + // Virtual guest which is currently recovering from a service failure. + NetworkMonitorRecoveringVirtualGuests []Virtual_Guest `json:"networkMonitorRecoveringVirtualGuests,omitempty" xmlrpc:"networkMonitorRecoveringVirtualGuests,omitempty"` + + // Hardware which is currently online. + NetworkMonitorUpHardware []Hardware `json:"networkMonitorUpHardware,omitempty" xmlrpc:"networkMonitorUpHardware,omitempty"` + + // A count of hardware which is currently online. + NetworkMonitorUpHardwareCount *uint `json:"networkMonitorUpHardwareCount,omitempty" xmlrpc:"networkMonitorUpHardwareCount,omitempty"` + + // A count of virtual guest which is currently online. + NetworkMonitorUpVirtualGuestCount *uint `json:"networkMonitorUpVirtualGuestCount,omitempty" xmlrpc:"networkMonitorUpVirtualGuestCount,omitempty"` + + // Virtual guest which is currently online. + NetworkMonitorUpVirtualGuests []Virtual_Guest `json:"networkMonitorUpVirtualGuests,omitempty" xmlrpc:"networkMonitorUpVirtualGuests,omitempty"` + + // An account's associated storage volumes. This includes Lockbox, NAS, EVault, and iSCSI volumes. + NetworkStorage []Network_Storage `json:"networkStorage,omitempty" xmlrpc:"networkStorage,omitempty"` + + // A count of an account's associated storage volumes. This includes Lockbox, NAS, EVault, and iSCSI volumes. + NetworkStorageCount *uint `json:"networkStorageCount,omitempty" xmlrpc:"networkStorageCount,omitempty"` + + // A count of an account's Network Storage groups. + NetworkStorageGroupCount *uint `json:"networkStorageGroupCount,omitempty" xmlrpc:"networkStorageGroupCount,omitempty"` + + // An account's Network Storage groups. + NetworkStorageGroups []Network_Storage_Group `json:"networkStorageGroups,omitempty" xmlrpc:"networkStorageGroups,omitempty"` + + // A count of iPSec network tunnels for an account. + NetworkTunnelContextCount *uint `json:"networkTunnelContextCount,omitempty" xmlrpc:"networkTunnelContextCount,omitempty"` + + // IPSec network tunnels for an account. + NetworkTunnelContexts []Network_Tunnel_Module_Context `json:"networkTunnelContexts,omitempty" xmlrpc:"networkTunnelContexts,omitempty"` + + // A count of all network VLANs assigned to an account. + NetworkVlanCount *uint `json:"networkVlanCount,omitempty" xmlrpc:"networkVlanCount,omitempty"` + + // Whether or not an account has automatic private VLAN spanning enabled. + NetworkVlanSpan *Account_Network_Vlan_Span `json:"networkVlanSpan,omitempty" xmlrpc:"networkVlanSpan,omitempty"` + + // All network VLANs assigned to an account. + NetworkVlans []Network_Vlan `json:"networkVlans,omitempty" xmlrpc:"networkVlans,omitempty"` + + // A count of dEPRECATED - This information can be pulled directly through tapping keys now - DEPRECATED. The allotments for this account and their servers for the next billing cycle. The public inbound and outbound bandwidth is calculated for each server in addition to the daily average network traffic since the last billing date. + NextBillingPublicAllotmentHardwareBandwidthDetailCount *uint `json:"nextBillingPublicAllotmentHardwareBandwidthDetailCount,omitempty" xmlrpc:"nextBillingPublicAllotmentHardwareBandwidthDetailCount,omitempty"` + + // DEPRECATED - This information can be pulled directly through tapping keys now - DEPRECATED. The allotments for this account and their servers for the next billing cycle. The public inbound and outbound bandwidth is calculated for each server in addition to the daily average network traffic since the last billing date. + NextBillingPublicAllotmentHardwareBandwidthDetails []Network_Bandwidth_Version1_Allotment `json:"nextBillingPublicAllotmentHardwareBandwidthDetails,omitempty" xmlrpc:"nextBillingPublicAllotmentHardwareBandwidthDetails,omitempty"` + + // The pre-tax total amount exempt from incubator credit for the account's next invoice. This field is now deprecated and will soon be removed. Please update all references to instead use nextInvoiceTotalAmount + NextInvoiceIncubatorExemptTotal *Float64 `json:"nextInvoiceIncubatorExemptTotal,omitempty" xmlrpc:"nextInvoiceIncubatorExemptTotal,omitempty"` + + // A count of the billing items that will be on an account's next invoice. + NextInvoiceTopLevelBillingItemCount *uint `json:"nextInvoiceTopLevelBillingItemCount,omitempty" xmlrpc:"nextInvoiceTopLevelBillingItemCount,omitempty"` + + // The billing items that will be on an account's next invoice. + NextInvoiceTopLevelBillingItems []Billing_Item `json:"nextInvoiceTopLevelBillingItems,omitempty" xmlrpc:"nextInvoiceTopLevelBillingItems,omitempty"` + + // The pre-tax total amount of an account's next invoice measured in US Dollars ($USD), assuming no changes or charges occur between now and time of billing. + NextInvoiceTotalAmount *Float64 `json:"nextInvoiceTotalAmount,omitempty" xmlrpc:"nextInvoiceTotalAmount,omitempty"` + + // The total one-time charge amount of an account's next invoice measured in US Dollars ($USD), assuming no changes or charges occur between now and time of billing. + NextInvoiceTotalOneTimeAmount *Float64 `json:"nextInvoiceTotalOneTimeAmount,omitempty" xmlrpc:"nextInvoiceTotalOneTimeAmount,omitempty"` + + // The total one-time tax amount of an account's next invoice measured in US Dollars ($USD), assuming no changes or charges occur between now and time of billing. + NextInvoiceTotalOneTimeTaxAmount *Float64 `json:"nextInvoiceTotalOneTimeTaxAmount,omitempty" xmlrpc:"nextInvoiceTotalOneTimeTaxAmount,omitempty"` + + // The total recurring charge amount of an account's next invoice measured in US Dollars ($USD), assuming no changes or charges occur between now and time of billing. + NextInvoiceTotalRecurringAmount *Float64 `json:"nextInvoiceTotalRecurringAmount,omitempty" xmlrpc:"nextInvoiceTotalRecurringAmount,omitempty"` + + // The total recurring charge amount of an account's next invoice measured in US Dollars ($USD), assuming no changes or charges occur between now and time of billing. + NextInvoiceTotalRecurringAmountBeforeAccountDiscount *Float64 `json:"nextInvoiceTotalRecurringAmountBeforeAccountDiscount,omitempty" xmlrpc:"nextInvoiceTotalRecurringAmountBeforeAccountDiscount,omitempty"` + + // The total recurring tax amount of an account's next invoice measured in US Dollars ($USD), assuming no changes or charges occur between now and time of billing. + NextInvoiceTotalRecurringTaxAmount *Float64 `json:"nextInvoiceTotalRecurringTaxAmount,omitempty" xmlrpc:"nextInvoiceTotalRecurringTaxAmount,omitempty"` + + // The total recurring charge amount of an account's next invoice measured in US Dollars ($USD), assuming no changes or charges occur between now and time of billing. + NextInvoiceTotalTaxableRecurringAmount *Float64 `json:"nextInvoiceTotalTaxableRecurringAmount,omitempty" xmlrpc:"nextInvoiceTotalTaxableRecurringAmount,omitempty"` + + // A count of + NotificationSubscriberCount *uint `json:"notificationSubscriberCount,omitempty" xmlrpc:"notificationSubscriberCount,omitempty"` + + // no documentation yet + NotificationSubscribers []Notification_Subscriber `json:"notificationSubscribers,omitempty" xmlrpc:"notificationSubscribers,omitempty"` + + // An office phone number assigned to an account. + OfficePhone *string `json:"officePhone,omitempty" xmlrpc:"officePhone,omitempty"` + + // A count of the open abuse tickets associated with an account. + OpenAbuseTicketCount *uint `json:"openAbuseTicketCount,omitempty" xmlrpc:"openAbuseTicketCount,omitempty"` + + // The open abuse tickets associated with an account. + OpenAbuseTickets []Ticket `json:"openAbuseTickets,omitempty" xmlrpc:"openAbuseTickets,omitempty"` + + // A count of the open accounting tickets associated with an account. + OpenAccountingTicketCount *uint `json:"openAccountingTicketCount,omitempty" xmlrpc:"openAccountingTicketCount,omitempty"` + + // The open accounting tickets associated with an account. + OpenAccountingTickets []Ticket `json:"openAccountingTickets,omitempty" xmlrpc:"openAccountingTickets,omitempty"` + + // A count of the open billing tickets associated with an account. + OpenBillingTicketCount *uint `json:"openBillingTicketCount,omitempty" xmlrpc:"openBillingTicketCount,omitempty"` + + // The open billing tickets associated with an account. + OpenBillingTickets []Ticket `json:"openBillingTickets,omitempty" xmlrpc:"openBillingTickets,omitempty"` + + // A count of an open ticket requesting cancellation of this server, if one exists. + OpenCancellationRequestCount *uint `json:"openCancellationRequestCount,omitempty" xmlrpc:"openCancellationRequestCount,omitempty"` + + // An open ticket requesting cancellation of this server, if one exists. + OpenCancellationRequests []Billing_Item_Cancellation_Request `json:"openCancellationRequests,omitempty" xmlrpc:"openCancellationRequests,omitempty"` + + // A count of the open tickets that do not belong to the abuse, accounting, sales, or support groups associated with an account. + OpenOtherTicketCount *uint `json:"openOtherTicketCount,omitempty" xmlrpc:"openOtherTicketCount,omitempty"` + + // The open tickets that do not belong to the abuse, accounting, sales, or support groups associated with an account. + OpenOtherTickets []Ticket `json:"openOtherTickets,omitempty" xmlrpc:"openOtherTickets,omitempty"` + + // A count of an account's recurring invoices. + OpenRecurringInvoiceCount *uint `json:"openRecurringInvoiceCount,omitempty" xmlrpc:"openRecurringInvoiceCount,omitempty"` + + // An account's recurring invoices. + OpenRecurringInvoices []Billing_Invoice `json:"openRecurringInvoices,omitempty" xmlrpc:"openRecurringInvoices,omitempty"` + + // A count of the open sales tickets associated with an account. + OpenSalesTicketCount *uint `json:"openSalesTicketCount,omitempty" xmlrpc:"openSalesTicketCount,omitempty"` + + // The open sales tickets associated with an account. + OpenSalesTickets []Ticket `json:"openSalesTickets,omitempty" xmlrpc:"openSalesTickets,omitempty"` + + // A count of + OpenStackAccountLinkCount *uint `json:"openStackAccountLinkCount,omitempty" xmlrpc:"openStackAccountLinkCount,omitempty"` + + // no documentation yet + OpenStackAccountLinks []Account_Link `json:"openStackAccountLinks,omitempty" xmlrpc:"openStackAccountLinks,omitempty"` + + // An account's associated Openstack related Object Storage accounts. + OpenStackObjectStorage []Network_Storage `json:"openStackObjectStorage,omitempty" xmlrpc:"openStackObjectStorage,omitempty"` + + // A count of an account's associated Openstack related Object Storage accounts. + OpenStackObjectStorageCount *uint `json:"openStackObjectStorageCount,omitempty" xmlrpc:"openStackObjectStorageCount,omitempty"` + + // A count of the open support tickets associated with an account. + OpenSupportTicketCount *uint `json:"openSupportTicketCount,omitempty" xmlrpc:"openSupportTicketCount,omitempty"` + + // The open support tickets associated with an account. + OpenSupportTickets []Ticket `json:"openSupportTickets,omitempty" xmlrpc:"openSupportTickets,omitempty"` + + // A count of all open tickets associated with an account. + OpenTicketCount *uint `json:"openTicketCount,omitempty" xmlrpc:"openTicketCount,omitempty"` + + // All open tickets associated with an account. + OpenTickets []Ticket `json:"openTickets,omitempty" xmlrpc:"openTickets,omitempty"` + + // All open tickets associated with an account last edited by an employee. + OpenTicketsWaitingOnCustomer []Ticket `json:"openTicketsWaitingOnCustomer,omitempty" xmlrpc:"openTicketsWaitingOnCustomer,omitempty"` + + // A count of all open tickets associated with an account last edited by an employee. + OpenTicketsWaitingOnCustomerCount *uint `json:"openTicketsWaitingOnCustomerCount,omitempty" xmlrpc:"openTicketsWaitingOnCustomerCount,omitempty"` + + // A count of an account's associated billing orders excluding upgrades. + OrderCount *uint `json:"orderCount,omitempty" xmlrpc:"orderCount,omitempty"` + + // An account's associated billing orders excluding upgrades. + Orders []Billing_Order `json:"orders,omitempty" xmlrpc:"orders,omitempty"` + + // A count of the billing items that have no parent billing item. These are items that don't necessarily belong to a single server. + OrphanBillingItemCount *uint `json:"orphanBillingItemCount,omitempty" xmlrpc:"orphanBillingItemCount,omitempty"` + + // The billing items that have no parent billing item. These are items that don't necessarily belong to a single server. + OrphanBillingItems []Billing_Item `json:"orphanBillingItems,omitempty" xmlrpc:"orphanBillingItems,omitempty"` + + // A count of + OwnedBrandCount *uint `json:"ownedBrandCount,omitempty" xmlrpc:"ownedBrandCount,omitempty"` + + // no documentation yet + OwnedBrands []Brand `json:"ownedBrands,omitempty" xmlrpc:"ownedBrands,omitempty"` + + // A count of + OwnedHardwareGenericComponentModelCount *uint `json:"ownedHardwareGenericComponentModelCount,omitempty" xmlrpc:"ownedHardwareGenericComponentModelCount,omitempty"` + + // no documentation yet + OwnedHardwareGenericComponentModels []Hardware_Component_Model_Generic `json:"ownedHardwareGenericComponentModels,omitempty" xmlrpc:"ownedHardwareGenericComponentModels,omitempty"` + + // A count of + PaymentProcessorCount *uint `json:"paymentProcessorCount,omitempty" xmlrpc:"paymentProcessorCount,omitempty"` + + // no documentation yet + PaymentProcessors []Billing_Payment_Processor `json:"paymentProcessors,omitempty" xmlrpc:"paymentProcessors,omitempty"` + + // A count of + PendingEventCount *uint `json:"pendingEventCount,omitempty" xmlrpc:"pendingEventCount,omitempty"` + + // no documentation yet + PendingEvents []Notification_Occurrence_Event `json:"pendingEvents,omitempty" xmlrpc:"pendingEvents,omitempty"` + + // An account's latest open (pending) invoice. + PendingInvoice *Billing_Invoice `json:"pendingInvoice,omitempty" xmlrpc:"pendingInvoice,omitempty"` + + // A count of a list of top-level invoice items that are on an account's currently pending invoice. + PendingInvoiceTopLevelItemCount *uint `json:"pendingInvoiceTopLevelItemCount,omitempty" xmlrpc:"pendingInvoiceTopLevelItemCount,omitempty"` + + // A list of top-level invoice items that are on an account's currently pending invoice. + PendingInvoiceTopLevelItems []Billing_Invoice_Item `json:"pendingInvoiceTopLevelItems,omitempty" xmlrpc:"pendingInvoiceTopLevelItems,omitempty"` + + // The total amount of an account's pending invoice, if one exists. + PendingInvoiceTotalAmount *Float64 `json:"pendingInvoiceTotalAmount,omitempty" xmlrpc:"pendingInvoiceTotalAmount,omitempty"` + + // The total one-time charges for an account's pending invoice, if one exists. In other words, it is the sum of one-time charges, setup fees, and labor fees. It does not include taxes. + PendingInvoiceTotalOneTimeAmount *Float64 `json:"pendingInvoiceTotalOneTimeAmount,omitempty" xmlrpc:"pendingInvoiceTotalOneTimeAmount,omitempty"` + + // The sum of all the taxes related to one time charges for an account's pending invoice, if one exists. + PendingInvoiceTotalOneTimeTaxAmount *Float64 `json:"pendingInvoiceTotalOneTimeTaxAmount,omitempty" xmlrpc:"pendingInvoiceTotalOneTimeTaxAmount,omitempty"` + + // The total recurring amount of an account's pending invoice, if one exists. + PendingInvoiceTotalRecurringAmount *Float64 `json:"pendingInvoiceTotalRecurringAmount,omitempty" xmlrpc:"pendingInvoiceTotalRecurringAmount,omitempty"` + + // The total amount of the recurring taxes on an account's pending invoice, if one exists. + PendingInvoiceTotalRecurringTaxAmount *Float64 `json:"pendingInvoiceTotalRecurringTaxAmount,omitempty" xmlrpc:"pendingInvoiceTotalRecurringTaxAmount,omitempty"` + + // A count of an account's permission groups. + PermissionGroupCount *uint `json:"permissionGroupCount,omitempty" xmlrpc:"permissionGroupCount,omitempty"` + + // An account's permission groups. + PermissionGroups []User_Permission_Group `json:"permissionGroups,omitempty" xmlrpc:"permissionGroups,omitempty"` + + // A count of an account's user roles. + PermissionRoleCount *uint `json:"permissionRoleCount,omitempty" xmlrpc:"permissionRoleCount,omitempty"` + + // An account's user roles. + PermissionRoles []User_Permission_Role `json:"permissionRoles,omitempty" xmlrpc:"permissionRoles,omitempty"` + + // A count of + PortableStorageVolumeCount *uint `json:"portableStorageVolumeCount,omitempty" xmlrpc:"portableStorageVolumeCount,omitempty"` + + // no documentation yet + PortableStorageVolumes []Virtual_Disk_Image `json:"portableStorageVolumes,omitempty" xmlrpc:"portableStorageVolumes,omitempty"` + + // A count of customer specified URIs that are downloaded onto a newly provisioned or reloaded server. If the URI is sent over https it will be executed directly on the server. + PostProvisioningHookCount *uint `json:"postProvisioningHookCount,omitempty" xmlrpc:"postProvisioningHookCount,omitempty"` + + // Customer specified URIs that are downloaded onto a newly provisioned or reloaded server. If the URI is sent over https it will be executed directly on the server. + PostProvisioningHooks []Provisioning_Hook `json:"postProvisioningHooks,omitempty" xmlrpc:"postProvisioningHooks,omitempty"` + + // The postal code of the mailing address belonging to an account. + PostalCode *string `json:"postalCode,omitempty" xmlrpc:"postalCode,omitempty"` + + // A count of an account's associated portal users with PPTP VPN access. + PptpVpnUserCount *uint `json:"pptpVpnUserCount,omitempty" xmlrpc:"pptpVpnUserCount,omitempty"` + + // An account's associated portal users with PPTP VPN access. + PptpVpnUsers []User_Customer `json:"pptpVpnUsers,omitempty" xmlrpc:"pptpVpnUsers,omitempty"` + + // The total recurring amount for an accounts previous revenue. + PreviousRecurringRevenue *Float64 `json:"previousRecurringRevenue,omitempty" xmlrpc:"previousRecurringRevenue,omitempty"` + + // A count of the item price that an account is restricted to. + PriceRestrictionCount *uint `json:"priceRestrictionCount,omitempty" xmlrpc:"priceRestrictionCount,omitempty"` + + // The item price that an account is restricted to. + PriceRestrictions []Product_Item_Price_Account_Restriction `json:"priceRestrictions,omitempty" xmlrpc:"priceRestrictions,omitempty"` + + // A count of all priority one tickets associated with an account. + PriorityOneTicketCount *uint `json:"priorityOneTicketCount,omitempty" xmlrpc:"priorityOneTicketCount,omitempty"` + + // All priority one tickets associated with an account. + PriorityOneTickets []Ticket `json:"priorityOneTickets,omitempty" xmlrpc:"priorityOneTickets,omitempty"` + + // A count of dEPRECATED - This information can be pulled directly through tapping keys now - DEPRECATED. The allotments for this account and their servers. The private inbound and outbound bandwidth is calculated for each server in addition to the daily average network traffic since the last billing date. + PrivateAllotmentHardwareBandwidthDetailCount *uint `json:"privateAllotmentHardwareBandwidthDetailCount,omitempty" xmlrpc:"privateAllotmentHardwareBandwidthDetailCount,omitempty"` + + // DEPRECATED - This information can be pulled directly through tapping keys now - DEPRECATED. The allotments for this account and their servers. The private inbound and outbound bandwidth is calculated for each server in addition to the daily average network traffic since the last billing date. + PrivateAllotmentHardwareBandwidthDetails []Network_Bandwidth_Version1_Allotment `json:"privateAllotmentHardwareBandwidthDetails,omitempty" xmlrpc:"privateAllotmentHardwareBandwidthDetails,omitempty"` + + // A count of private and shared template group objects (parent only) for an account. + PrivateBlockDeviceTemplateGroupCount *uint `json:"privateBlockDeviceTemplateGroupCount,omitempty" xmlrpc:"privateBlockDeviceTemplateGroupCount,omitempty"` + + // Private and shared template group objects (parent only) for an account. + PrivateBlockDeviceTemplateGroups []Virtual_Guest_Block_Device_Template_Group `json:"privateBlockDeviceTemplateGroups,omitempty" xmlrpc:"privateBlockDeviceTemplateGroups,omitempty"` + + // A count of + PrivateIpAddressCount *uint `json:"privateIpAddressCount,omitempty" xmlrpc:"privateIpAddressCount,omitempty"` + + // no documentation yet + PrivateIpAddresses []Network_Subnet_IpAddress `json:"privateIpAddresses,omitempty" xmlrpc:"privateIpAddresses,omitempty"` + + // A count of the private network VLANs assigned to an account. + PrivateNetworkVlanCount *uint `json:"privateNetworkVlanCount,omitempty" xmlrpc:"privateNetworkVlanCount,omitempty"` + + // The private network VLANs assigned to an account. + PrivateNetworkVlans []Network_Vlan `json:"privateNetworkVlans,omitempty" xmlrpc:"privateNetworkVlans,omitempty"` + + // A count of all private subnets associated with an account. + PrivateSubnetCount *uint `json:"privateSubnetCount,omitempty" xmlrpc:"privateSubnetCount,omitempty"` + + // All private subnets associated with an account. + PrivateSubnets []Network_Subnet `json:"privateSubnets,omitempty" xmlrpc:"privateSubnets,omitempty"` + + // A count of dEPRECATED - This information can be pulled directly through tapping keys now - DEPRECATED. The allotments for this account and their servers. The public inbound and outbound bandwidth is calculated for each server in addition to the daily average network traffic since the last billing date. + PublicAllotmentHardwareBandwidthDetailCount *uint `json:"publicAllotmentHardwareBandwidthDetailCount,omitempty" xmlrpc:"publicAllotmentHardwareBandwidthDetailCount,omitempty"` + + // DEPRECATED - This information can be pulled directly through tapping keys now - DEPRECATED. The allotments for this account and their servers. The public inbound and outbound bandwidth is calculated for each server in addition to the daily average network traffic since the last billing date. + PublicAllotmentHardwareBandwidthDetails []Network_Bandwidth_Version1_Allotment `json:"publicAllotmentHardwareBandwidthDetails,omitempty" xmlrpc:"publicAllotmentHardwareBandwidthDetails,omitempty"` + + // A count of + PublicIpAddressCount *uint `json:"publicIpAddressCount,omitempty" xmlrpc:"publicIpAddressCount,omitempty"` + + // no documentation yet + PublicIpAddresses []Network_Subnet_IpAddress `json:"publicIpAddresses,omitempty" xmlrpc:"publicIpAddresses,omitempty"` + + // A count of the public network VLANs assigned to an account. + PublicNetworkVlanCount *uint `json:"publicNetworkVlanCount,omitempty" xmlrpc:"publicNetworkVlanCount,omitempty"` + + // The public network VLANs assigned to an account. + PublicNetworkVlans []Network_Vlan `json:"publicNetworkVlans,omitempty" xmlrpc:"publicNetworkVlans,omitempty"` + + // A count of all public network subnets associated with an account. + PublicSubnetCount *uint `json:"publicSubnetCount,omitempty" xmlrpc:"publicSubnetCount,omitempty"` + + // All public network subnets associated with an account. + PublicSubnets []Network_Subnet `json:"publicSubnets,omitempty" xmlrpc:"publicSubnets,omitempty"` + + // A count of an account's quotes. + QuoteCount *uint `json:"quoteCount,omitempty" xmlrpc:"quoteCount,omitempty"` + + // An account's quotes. + Quotes []Billing_Order_Quote `json:"quotes,omitempty" xmlrpc:"quotes,omitempty"` + + // A count of + RecentEventCount *uint `json:"recentEventCount,omitempty" xmlrpc:"recentEventCount,omitempty"` + + // no documentation yet + RecentEvents []Notification_Occurrence_Event `json:"recentEvents,omitempty" xmlrpc:"recentEvents,omitempty"` + + // The Referral Partner for this account, if any. + ReferralPartner *Account `json:"referralPartner,omitempty" xmlrpc:"referralPartner,omitempty"` + + // A count of if this is a account is a referral partner, the accounts this referral partner has referred + ReferredAccountCount *uint `json:"referredAccountCount,omitempty" xmlrpc:"referredAccountCount,omitempty"` + + // If this is a account is a referral partner, the accounts this referral partner has referred + ReferredAccounts []Account `json:"referredAccounts,omitempty" xmlrpc:"referredAccounts,omitempty"` + + // A count of + RegulatedWorkloadCount *uint `json:"regulatedWorkloadCount,omitempty" xmlrpc:"regulatedWorkloadCount,omitempty"` + + // no documentation yet + RegulatedWorkloads []Legal_RegulatedWorkload `json:"regulatedWorkloads,omitempty" xmlrpc:"regulatedWorkloads,omitempty"` + + // A count of remote management command requests for an account + RemoteManagementCommandRequestCount *uint `json:"remoteManagementCommandRequestCount,omitempty" xmlrpc:"remoteManagementCommandRequestCount,omitempty"` + + // Remote management command requests for an account + RemoteManagementCommandRequests []Hardware_Component_RemoteManagement_Command_Request `json:"remoteManagementCommandRequests,omitempty" xmlrpc:"remoteManagementCommandRequests,omitempty"` + + // A count of the Replication events for all Network Storage volumes on an account. + ReplicationEventCount *uint `json:"replicationEventCount,omitempty" xmlrpc:"replicationEventCount,omitempty"` + + // The Replication events for all Network Storage volumes on an account. + ReplicationEvents []Network_Storage_Event `json:"replicationEvents,omitempty" xmlrpc:"replicationEvents,omitempty"` + + // Indicates whether newly created users under this account will be associated with IBMid via an email requiring a response, or not. + RequireSilentIBMidUserCreation *bool `json:"requireSilentIBMidUserCreation,omitempty" xmlrpc:"requireSilentIBMidUserCreation,omitempty"` + + // A count of an account's associated top-level resource groups. + ResourceGroupCount *uint `json:"resourceGroupCount,omitempty" xmlrpc:"resourceGroupCount,omitempty"` + + // An account's associated top-level resource groups. + ResourceGroups []Resource_Group `json:"resourceGroups,omitempty" xmlrpc:"resourceGroups,omitempty"` + + // A count of all Routers that an accounts VLANs reside on + RouterCount *uint `json:"routerCount,omitempty" xmlrpc:"routerCount,omitempty"` + + // All Routers that an accounts VLANs reside on + Routers []Hardware `json:"routers,omitempty" xmlrpc:"routers,omitempty"` + + // An account's reverse WHOIS data. This data is used when making SWIP requests. + RwhoisData *Network_Subnet_Rwhois_Data `json:"rwhoisData,omitempty" xmlrpc:"rwhoisData,omitempty"` + + // no documentation yet + SalesforceAccountLink *Account_Link `json:"salesforceAccountLink,omitempty" xmlrpc:"salesforceAccountLink,omitempty"` + + // The SAML configuration for this account. + SamlAuthentication *Account_Authentication_Saml `json:"samlAuthentication,omitempty" xmlrpc:"samlAuthentication,omitempty"` + + // A count of all scale groups on this account. + ScaleGroupCount *uint `json:"scaleGroupCount,omitempty" xmlrpc:"scaleGroupCount,omitempty"` + + // All scale groups on this account. + ScaleGroups []Scale_Group `json:"scaleGroups,omitempty" xmlrpc:"scaleGroups,omitempty"` + + // A count of the secondary DNS records for a SoftLayer customer account. + SecondaryDomainCount *uint `json:"secondaryDomainCount,omitempty" xmlrpc:"secondaryDomainCount,omitempty"` + + // The secondary DNS records for a SoftLayer customer account. + SecondaryDomains []Dns_Secondary `json:"secondaryDomains,omitempty" xmlrpc:"secondaryDomains,omitempty"` + + // A count of stored security certificates (ie. SSL) + SecurityCertificateCount *uint `json:"securityCertificateCount,omitempty" xmlrpc:"securityCertificateCount,omitempty"` + + // Stored security certificates (ie. SSL) + SecurityCertificates []Security_Certificate `json:"securityCertificates,omitempty" xmlrpc:"securityCertificates,omitempty"` + + // A count of the security groups belonging to this account. + SecurityGroupCount *uint `json:"securityGroupCount,omitempty" xmlrpc:"securityGroupCount,omitempty"` + + // The security groups belonging to this account. + SecurityGroups []Network_SecurityGroup `json:"securityGroups,omitempty" xmlrpc:"securityGroups,omitempty"` + + // A count of an account's vulnerability scan requests. + SecurityScanRequestCount *uint `json:"securityScanRequestCount,omitempty" xmlrpc:"securityScanRequestCount,omitempty"` + + // An account's vulnerability scan requests. + SecurityScanRequests []Network_Security_Scanner_Request `json:"securityScanRequests,omitempty" xmlrpc:"securityScanRequests,omitempty"` + + // A count of the service billing items that will be on an account's next invoice. + ServiceBillingItemCount *uint `json:"serviceBillingItemCount,omitempty" xmlrpc:"serviceBillingItemCount,omitempty"` + + // The service billing items that will be on an account's next invoice. + ServiceBillingItems []Billing_Item `json:"serviceBillingItems,omitempty" xmlrpc:"serviceBillingItems,omitempty"` + + // A count of shipments that belong to the customer's account. + ShipmentCount *uint `json:"shipmentCount,omitempty" xmlrpc:"shipmentCount,omitempty"` + + // Shipments that belong to the customer's account. + Shipments []Account_Shipment `json:"shipments,omitempty" xmlrpc:"shipments,omitempty"` + + // A count of customer specified SSH keys that can be implemented onto a newly provisioned or reloaded server. + SshKeyCount *uint `json:"sshKeyCount,omitempty" xmlrpc:"sshKeyCount,omitempty"` + + // Customer specified SSH keys that can be implemented onto a newly provisioned or reloaded server. + SshKeys []Security_Ssh_Key `json:"sshKeys,omitempty" xmlrpc:"sshKeys,omitempty"` + + // A count of an account's associated portal users with SSL VPN access. + SslVpnUserCount *uint `json:"sslVpnUserCount,omitempty" xmlrpc:"sslVpnUserCount,omitempty"` + + // An account's associated portal users with SSL VPN access. + SslVpnUsers []User_Customer `json:"sslVpnUsers,omitempty" xmlrpc:"sslVpnUsers,omitempty"` + + // A count of an account's virtual guest objects that are hosted on a user provisioned hypervisor. + StandardPoolVirtualGuestCount *uint `json:"standardPoolVirtualGuestCount,omitempty" xmlrpc:"standardPoolVirtualGuestCount,omitempty"` + + // An account's virtual guest objects that are hosted on a user provisioned hypervisor. + StandardPoolVirtualGuests []Virtual_Guest `json:"standardPoolVirtualGuests,omitempty" xmlrpc:"standardPoolVirtualGuests,omitempty"` + + // A two-letter abbreviation of the state in the mailing address belonging to an account. If an account does not reside in a province then this is typically blank. + State *string `json:"state,omitempty" xmlrpc:"state,omitempty"` + + // The date of an account's last status change. + StatusDate *Time `json:"statusDate,omitempty" xmlrpc:"statusDate,omitempty"` + + // A count of all network subnets associated with an account. + SubnetCount *uint `json:"subnetCount,omitempty" xmlrpc:"subnetCount,omitempty"` + + // A count of + SubnetRegistrationCount *uint `json:"subnetRegistrationCount,omitempty" xmlrpc:"subnetRegistrationCount,omitempty"` + + // A count of + SubnetRegistrationDetailCount *uint `json:"subnetRegistrationDetailCount,omitempty" xmlrpc:"subnetRegistrationDetailCount,omitempty"` + + // no documentation yet + SubnetRegistrationDetails []Account_Regional_Registry_Detail `json:"subnetRegistrationDetails,omitempty" xmlrpc:"subnetRegistrationDetails,omitempty"` + + // no documentation yet + SubnetRegistrations []Network_Subnet_Registration `json:"subnetRegistrations,omitempty" xmlrpc:"subnetRegistrations,omitempty"` + + // All network subnets associated with an account. + Subnets []Network_Subnet `json:"subnets,omitempty" xmlrpc:"subnets,omitempty"` + + // A count of the SoftLayer employees that an account is assigned to. + SupportRepresentativeCount *uint `json:"supportRepresentativeCount,omitempty" xmlrpc:"supportRepresentativeCount,omitempty"` + + // The SoftLayer employees that an account is assigned to. + SupportRepresentatives []User_Employee `json:"supportRepresentatives,omitempty" xmlrpc:"supportRepresentatives,omitempty"` + + // A count of the active support subscriptions for this account. + SupportSubscriptionCount *uint `json:"supportSubscriptionCount,omitempty" xmlrpc:"supportSubscriptionCount,omitempty"` + + // The active support subscriptions for this account. + SupportSubscriptions []Billing_Item `json:"supportSubscriptions,omitempty" xmlrpc:"supportSubscriptions,omitempty"` + + // no documentation yet + SupportTier *string `json:"supportTier,omitempty" xmlrpc:"supportTier,omitempty"` + + // A flag indicating to suppress invoices. + SuppressInvoicesFlag *bool `json:"suppressInvoicesFlag,omitempty" xmlrpc:"suppressInvoicesFlag,omitempty"` + + // A count of + TagCount *uint `json:"tagCount,omitempty" xmlrpc:"tagCount,omitempty"` + + // no documentation yet + Tags []Tag `json:"tags,omitempty" xmlrpc:"tags,omitempty"` + + // A count of an account's associated tickets. + TicketCount *uint `json:"ticketCount,omitempty" xmlrpc:"ticketCount,omitempty"` + + // An account's associated tickets. + Tickets []Ticket `json:"tickets,omitempty" xmlrpc:"tickets,omitempty"` + + // Tickets closed within the last 72 hours or last 10 tickets, whichever is less, associated with an account. + TicketsClosedInTheLastThreeDays []Ticket `json:"ticketsClosedInTheLastThreeDays,omitempty" xmlrpc:"ticketsClosedInTheLastThreeDays,omitempty"` + + // A count of tickets closed within the last 72 hours or last 10 tickets, whichever is less, associated with an account. + TicketsClosedInTheLastThreeDaysCount *uint `json:"ticketsClosedInTheLastThreeDaysCount,omitempty" xmlrpc:"ticketsClosedInTheLastThreeDaysCount,omitempty"` + + // Tickets closed today associated with an account. + TicketsClosedToday []Ticket `json:"ticketsClosedToday,omitempty" xmlrpc:"ticketsClosedToday,omitempty"` + + // A count of tickets closed today associated with an account. + TicketsClosedTodayCount *uint `json:"ticketsClosedTodayCount,omitempty" xmlrpc:"ticketsClosedTodayCount,omitempty"` + + // A count of an account's associated Transcode account. + TranscodeAccountCount *uint `json:"transcodeAccountCount,omitempty" xmlrpc:"transcodeAccountCount,omitempty"` + + // An account's associated Transcode account. + TranscodeAccounts []Network_Media_Transcode_Account `json:"transcodeAccounts,omitempty" xmlrpc:"transcodeAccounts,omitempty"` + + // A count of an account's associated upgrade requests. + UpgradeRequestCount *uint `json:"upgradeRequestCount,omitempty" xmlrpc:"upgradeRequestCount,omitempty"` + + // An account's associated upgrade requests. + UpgradeRequests []Product_Upgrade_Request `json:"upgradeRequests,omitempty" xmlrpc:"upgradeRequests,omitempty"` + + // A count of an account's portal users. + UserCount *uint `json:"userCount,omitempty" xmlrpc:"userCount,omitempty"` + + // An account's portal users. + Users []User_Customer `json:"users,omitempty" xmlrpc:"users,omitempty"` + + // A count of stored security certificates that are not expired (ie. SSL) + ValidSecurityCertificateCount *uint `json:"validSecurityCertificateCount,omitempty" xmlrpc:"validSecurityCertificateCount,omitempty"` + + // Stored security certificates that are not expired (ie. SSL) + ValidSecurityCertificates []Security_Certificate `json:"validSecurityCertificates,omitempty" xmlrpc:"validSecurityCertificates,omitempty"` + + // Return 0 if vpn updates are currently in progress on this account otherwise 1. + VdrUpdatesInProgressFlag *bool `json:"vdrUpdatesInProgressFlag,omitempty" xmlrpc:"vdrUpdatesInProgressFlag,omitempty"` + + // A count of the bandwidth pooling for this account. + VirtualDedicatedRackCount *uint `json:"virtualDedicatedRackCount,omitempty" xmlrpc:"virtualDedicatedRackCount,omitempty"` + + // The bandwidth pooling for this account. + VirtualDedicatedRacks []Network_Bandwidth_Version1_Allotment `json:"virtualDedicatedRacks,omitempty" xmlrpc:"virtualDedicatedRacks,omitempty"` + + // A count of an account's associated virtual server virtual disk images. + VirtualDiskImageCount *uint `json:"virtualDiskImageCount,omitempty" xmlrpc:"virtualDiskImageCount,omitempty"` + + // An account's associated virtual server virtual disk images. + VirtualDiskImages []Virtual_Disk_Image `json:"virtualDiskImages,omitempty" xmlrpc:"virtualDiskImages,omitempty"` + + // A count of an account's associated virtual guest objects. + VirtualGuestCount *uint `json:"virtualGuestCount,omitempty" xmlrpc:"virtualGuestCount,omitempty"` + + // An account's associated virtual guest objects. + VirtualGuests []Virtual_Guest `json:"virtualGuests,omitempty" xmlrpc:"virtualGuests,omitempty"` + + // An account's associated virtual guest objects currently over bandwidth allocation. + VirtualGuestsOverBandwidthAllocation []Virtual_Guest `json:"virtualGuestsOverBandwidthAllocation,omitempty" xmlrpc:"virtualGuestsOverBandwidthAllocation,omitempty"` + + // A count of an account's associated virtual guest objects currently over bandwidth allocation. + VirtualGuestsOverBandwidthAllocationCount *uint `json:"virtualGuestsOverBandwidthAllocationCount,omitempty" xmlrpc:"virtualGuestsOverBandwidthAllocationCount,omitempty"` + + // An account's associated virtual guest objects currently over bandwidth allocation. + VirtualGuestsProjectedOverBandwidthAllocation []Virtual_Guest `json:"virtualGuestsProjectedOverBandwidthAllocation,omitempty" xmlrpc:"virtualGuestsProjectedOverBandwidthAllocation,omitempty"` + + // A count of an account's associated virtual guest objects currently over bandwidth allocation. + VirtualGuestsProjectedOverBandwidthAllocationCount *uint `json:"virtualGuestsProjectedOverBandwidthAllocationCount,omitempty" xmlrpc:"virtualGuestsProjectedOverBandwidthAllocationCount,omitempty"` + + // All virtual guests associated with an account that has the cPanel web hosting control panel installed. + VirtualGuestsWithCpanel []Virtual_Guest `json:"virtualGuestsWithCpanel,omitempty" xmlrpc:"virtualGuestsWithCpanel,omitempty"` + + // A count of all virtual guests associated with an account that has the cPanel web hosting control panel installed. + VirtualGuestsWithCpanelCount *uint `json:"virtualGuestsWithCpanelCount,omitempty" xmlrpc:"virtualGuestsWithCpanelCount,omitempty"` + + // All virtual guests associated with an account that have McAfee Secure software components. + VirtualGuestsWithMcafee []Virtual_Guest `json:"virtualGuestsWithMcafee,omitempty" xmlrpc:"virtualGuestsWithMcafee,omitempty"` + + // All virtual guests associated with an account that have McAfee Secure AntiVirus for Redhat software components. + VirtualGuestsWithMcafeeAntivirusRedhat []Virtual_Guest `json:"virtualGuestsWithMcafeeAntivirusRedhat,omitempty" xmlrpc:"virtualGuestsWithMcafeeAntivirusRedhat,omitempty"` + + // A count of all virtual guests associated with an account that have McAfee Secure AntiVirus for Redhat software components. + VirtualGuestsWithMcafeeAntivirusRedhatCount *uint `json:"virtualGuestsWithMcafeeAntivirusRedhatCount,omitempty" xmlrpc:"virtualGuestsWithMcafeeAntivirusRedhatCount,omitempty"` + + // A count of all virtual guests associated with an account that has McAfee Secure AntiVirus for Windows software components. + VirtualGuestsWithMcafeeAntivirusWindowCount *uint `json:"virtualGuestsWithMcafeeAntivirusWindowCount,omitempty" xmlrpc:"virtualGuestsWithMcafeeAntivirusWindowCount,omitempty"` + + // All virtual guests associated with an account that has McAfee Secure AntiVirus for Windows software components. + VirtualGuestsWithMcafeeAntivirusWindows []Virtual_Guest `json:"virtualGuestsWithMcafeeAntivirusWindows,omitempty" xmlrpc:"virtualGuestsWithMcafeeAntivirusWindows,omitempty"` + + // A count of all virtual guests associated with an account that have McAfee Secure software components. + VirtualGuestsWithMcafeeCount *uint `json:"virtualGuestsWithMcafeeCount,omitempty" xmlrpc:"virtualGuestsWithMcafeeCount,omitempty"` + + // All virtual guests associated with an account that has McAfee Secure Intrusion Detection System software components. + VirtualGuestsWithMcafeeIntrusionDetectionSystem []Virtual_Guest `json:"virtualGuestsWithMcafeeIntrusionDetectionSystem,omitempty" xmlrpc:"virtualGuestsWithMcafeeIntrusionDetectionSystem,omitempty"` + + // A count of all virtual guests associated with an account that has McAfee Secure Intrusion Detection System software components. + VirtualGuestsWithMcafeeIntrusionDetectionSystemCount *uint `json:"virtualGuestsWithMcafeeIntrusionDetectionSystemCount,omitempty" xmlrpc:"virtualGuestsWithMcafeeIntrusionDetectionSystemCount,omitempty"` + + // All virtual guests associated with an account that has the Plesk web hosting control panel installed. + VirtualGuestsWithPlesk []Virtual_Guest `json:"virtualGuestsWithPlesk,omitempty" xmlrpc:"virtualGuestsWithPlesk,omitempty"` + + // A count of all virtual guests associated with an account that has the Plesk web hosting control panel installed. + VirtualGuestsWithPleskCount *uint `json:"virtualGuestsWithPleskCount,omitempty" xmlrpc:"virtualGuestsWithPleskCount,omitempty"` + + // All virtual guests associated with an account that have the QuantaStor storage system installed. + VirtualGuestsWithQuantastor []Virtual_Guest `json:"virtualGuestsWithQuantastor,omitempty" xmlrpc:"virtualGuestsWithQuantastor,omitempty"` + + // A count of all virtual guests associated with an account that have the QuantaStor storage system installed. + VirtualGuestsWithQuantastorCount *uint `json:"virtualGuestsWithQuantastorCount,omitempty" xmlrpc:"virtualGuestsWithQuantastorCount,omitempty"` + + // All virtual guests associated with an account that has the Urchin web traffic analytics package installed. + VirtualGuestsWithUrchin []Virtual_Guest `json:"virtualGuestsWithUrchin,omitempty" xmlrpc:"virtualGuestsWithUrchin,omitempty"` + + // A count of all virtual guests associated with an account that has the Urchin web traffic analytics package installed. + VirtualGuestsWithUrchinCount *uint `json:"virtualGuestsWithUrchinCount,omitempty" xmlrpc:"virtualGuestsWithUrchinCount,omitempty"` + + // The bandwidth pooling for this account. + VirtualPrivateRack *Network_Bandwidth_Version1_Allotment `json:"virtualPrivateRack,omitempty" xmlrpc:"virtualPrivateRack,omitempty"` + + // An account's associated virtual server archived storage repositories. + VirtualStorageArchiveRepositories []Virtual_Storage_Repository `json:"virtualStorageArchiveRepositories,omitempty" xmlrpc:"virtualStorageArchiveRepositories,omitempty"` + + // A count of an account's associated virtual server archived storage repositories. + VirtualStorageArchiveRepositoryCount *uint `json:"virtualStorageArchiveRepositoryCount,omitempty" xmlrpc:"virtualStorageArchiveRepositoryCount,omitempty"` + + // An account's associated virtual server public storage repositories. + VirtualStoragePublicRepositories []Virtual_Storage_Repository `json:"virtualStoragePublicRepositories,omitempty" xmlrpc:"virtualStoragePublicRepositories,omitempty"` + + // A count of an account's associated virtual server public storage repositories. + VirtualStoragePublicRepositoryCount *uint `json:"virtualStoragePublicRepositoryCount,omitempty" xmlrpc:"virtualStoragePublicRepositoryCount,omitempty"` +} + +// An unfortunate facet of the hosting business is the necessity of with legal and network abuse inquiries. As these types of inquiries frequently contain sensitive information SoftLayer keeps a separate account contact email address for direct contact about legal and abuse matters, modeled by the SoftLayer_Account_AbuseEmail data type. SoftLayer will typically email an account's abuse email addresses in these types of cases, and an email is automatically sent to an account's abuse email addresses when a legal or abuse ticket is created or updated. +type Account_AbuseEmail struct { + Entity + + // The account associated with an abuse email address. + Account *Account `json:"account,omitempty" xmlrpc:"account,omitempty"` + + // A valid email address. + Email *string `json:"email,omitempty" xmlrpc:"email,omitempty"` +} + +// The SoftLayer_Account_Address data type contains information on an address associated with a SoftLayer account. +type Account_Address struct { + Entity + + // The account to which this address belongs. + Account *Account `json:"account,omitempty" xmlrpc:"account,omitempty"` + + // no documentation yet + AccountId *int `json:"accountId,omitempty" xmlrpc:"accountId,omitempty"` + + // Line 1 of the address (normally the street address). + Address1 *string `json:"address1,omitempty" xmlrpc:"address1,omitempty"` + + // Line 2 of the address. + Address2 *string `json:"address2,omitempty" xmlrpc:"address2,omitempty"` + + // The city of the address. + City *string `json:"city,omitempty" xmlrpc:"city,omitempty"` + + // The contact name (person, office) of the address. + ContactName *string `json:"contactName,omitempty" xmlrpc:"contactName,omitempty"` + + // The country of the address. + Country *string `json:"country,omitempty" xmlrpc:"country,omitempty"` + + // The customer user who created this address. + CreateUser *User_Customer `json:"createUser,omitempty" xmlrpc:"createUser,omitempty"` + + // The description of the address. + Description *string `json:"description,omitempty" xmlrpc:"description,omitempty"` + + // The unique id of the address. + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // Flag to show whether the address is active. + IsActive *int `json:"isActive,omitempty" xmlrpc:"isActive,omitempty"` + + // The location of this address. + Location *Location `json:"location,omitempty" xmlrpc:"location,omitempty"` + + // The location id of the address. + LocationId *int `json:"locationId,omitempty" xmlrpc:"locationId,omitempty"` + + // The employee who last modified this address. + ModifyEmployee *User_Employee `json:"modifyEmployee,omitempty" xmlrpc:"modifyEmployee,omitempty"` + + // The customer user who last modified this address. + ModifyUser *User_Customer `json:"modifyUser,omitempty" xmlrpc:"modifyUser,omitempty"` + + // The postal (zip) code of the address. + PostalCode *string `json:"postalCode,omitempty" xmlrpc:"postalCode,omitempty"` + + // The state of the address. + State *string `json:"state,omitempty" xmlrpc:"state,omitempty"` + + // An account address' type. + Type *Account_Address_Type `json:"type,omitempty" xmlrpc:"type,omitempty"` +} + +// no documentation yet +type Account_Address_Type struct { + Entity + + // DEPRECATED + CreateDate *Time `json:"createDate,omitempty" xmlrpc:"createDate,omitempty"` + + // no documentation yet + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // no documentation yet + KeyName *string `json:"keyName,omitempty" xmlrpc:"keyName,omitempty"` + + // no documentation yet + Name *string `json:"name,omitempty" xmlrpc:"name,omitempty"` +} + +// This service allows for a unique identifier to be associated to an existing customer account. +type Account_Affiliation struct { + Entity + + // The account that an affiliation belongs to. + Account *Account `json:"account,omitempty" xmlrpc:"account,omitempty"` + + // A customer account's internal identifier. + AccountId *int `json:"accountId,omitempty" xmlrpc:"accountId,omitempty"` + + // An affiliate identifier associated with the customer account. + AffiliateId *string `json:"affiliateId,omitempty" xmlrpc:"affiliateId,omitempty"` + + // The date an account affiliation was created. + CreateDate *Time `json:"createDate,omitempty" xmlrpc:"createDate,omitempty"` + + // A customer affiliation internal identifier. + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // The date an account affiliation was last modified. + ModifyDate *Time `json:"modifyDate,omitempty" xmlrpc:"modifyDate,omitempty"` +} + +// no documentation yet +type Account_Agreement struct { + Entity + + // no documentation yet + Account *Account `json:"account,omitempty" xmlrpc:"account,omitempty"` + + // The type of agreement. + AgreementType *Account_Agreement_Type `json:"agreementType,omitempty" xmlrpc:"agreementType,omitempty"` + + // The type of agreement identifier. + AgreementTypeId *int `json:"agreementTypeId,omitempty" xmlrpc:"agreementTypeId,omitempty"` + + // A count of the files attached to an agreement. + AttachedBillingAgreementFileCount *uint `json:"attachedBillingAgreementFileCount,omitempty" xmlrpc:"attachedBillingAgreementFileCount,omitempty"` + + // The files attached to an agreement. + AttachedBillingAgreementFiles []Account_MasterServiceAgreement `json:"attachedBillingAgreementFiles,omitempty" xmlrpc:"attachedBillingAgreementFiles,omitempty"` + + // no documentation yet + AutoRenew *int `json:"autoRenew,omitempty" xmlrpc:"autoRenew,omitempty"` + + // A count of the billing items associated with an agreement. + BillingItemCount *uint `json:"billingItemCount,omitempty" xmlrpc:"billingItemCount,omitempty"` + + // The billing items associated with an agreement. + BillingItems []Billing_Item `json:"billingItems,omitempty" xmlrpc:"billingItems,omitempty"` + + // no documentation yet + CancellationFee *int `json:"cancellationFee,omitempty" xmlrpc:"cancellationFee,omitempty"` + + // The date an agreement was created. + CreateDate *Time `json:"createDate,omitempty" xmlrpc:"createDate,omitempty"` + + // The duration in months of an agreement. + DurationMonths *int `json:"durationMonths,omitempty" xmlrpc:"durationMonths,omitempty"` + + // The end date of an agreement. + EndDate *Time `json:"endDate,omitempty" xmlrpc:"endDate,omitempty"` + + // An agreement's internal identifier. + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // The effective start date of an agreement. + StartDate *Time `json:"startDate,omitempty" xmlrpc:"startDate,omitempty"` + + // The status of the agreement. + Status *Account_Agreement_Status `json:"status,omitempty" xmlrpc:"status,omitempty"` + + // The status identifier for an agreement. + StatusId *int `json:"statusId,omitempty" xmlrpc:"statusId,omitempty"` + + // The title of an agreement. + Title *string `json:"title,omitempty" xmlrpc:"title,omitempty"` + + // A count of the top level billing item associated with an agreement. + TopLevelBillingItemCount *uint `json:"topLevelBillingItemCount,omitempty" xmlrpc:"topLevelBillingItemCount,omitempty"` + + // The top level billing item associated with an agreement. + TopLevelBillingItems []Billing_Item `json:"topLevelBillingItems,omitempty" xmlrpc:"topLevelBillingItems,omitempty"` +} + +// no documentation yet +type Account_Agreement_Status struct { + Entity + + // The name of the agreement status. + Name *string `json:"name,omitempty" xmlrpc:"name,omitempty"` +} + +// no documentation yet +type Account_Agreement_Type struct { + Entity + + // The name of the agreement type. + Name *string `json:"name,omitempty" xmlrpc:"name,omitempty"` +} + +// A SoftLayer_Account_Attachment_Employee models an assignment of a single [[SoftLayer_User_Employee|employee]] with a single [[SoftLayer_Account|account]] +type Account_Attachment_Employee struct { + Entity + + // A [[SoftLayer_Account|account]] that is assigned to a [[SoftLayer_User_Employee|employee]]. + Account *Account `json:"account,omitempty" xmlrpc:"account,omitempty"` + + // A [[SoftLayer_User_Employee|employee]] that is assigned to a [[SoftLayer_Account|account]]. + Employee *User_Employee `json:"employee,omitempty" xmlrpc:"employee,omitempty"` + + // A [[SoftLayer_User_Employee|employee]] that is assigned to a [[SoftLayer_Account|account]]. + EmployeeRole *Account_Attachment_Employee_Role `json:"employeeRole,omitempty" xmlrpc:"employeeRole,omitempty"` + + // Role identifier. + RoleId *int `json:"roleId,omitempty" xmlrpc:"roleId,omitempty"` +} + +// no documentation yet +type Account_Attachment_Employee_Role struct { + Entity + + // no documentation yet + Keyname *string `json:"keyname,omitempty" xmlrpc:"keyname,omitempty"` + + // no documentation yet + Name *string `json:"name,omitempty" xmlrpc:"name,omitempty"` +} + +// Many SoftLayer customer accounts have individual attributes assigned to them that describe features or special features for that account, such as special pricing, account statuses, and ordering instructions. The SoftLayer_Account_Attribute data type contains information relating to a single SoftLayer_Account attribute. +type Account_Attribute struct { + Entity + + // The SoftLayer customer account that has an attribute. + Account *Account `json:"account,omitempty" xmlrpc:"account,omitempty"` + + // The type of attribute assigned to a SoftLayer customer account. + AccountAttributeType *Account_Attribute_Type `json:"accountAttributeType,omitempty" xmlrpc:"accountAttributeType,omitempty"` + + // The internal identifier of the type of attribute that a SoftLayer customer account attribute belongs to. + AccountAttributeTypeId *int `json:"accountAttributeTypeId,omitempty" xmlrpc:"accountAttributeTypeId,omitempty"` + + // The internal identifier of the SoftLayer customer account that is assigned an account attribute. + AccountId *int `json:"accountId,omitempty" xmlrpc:"accountId,omitempty"` + + // A SoftLayer customer account attribute's internal identifier. + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // A SoftLayer account attribute's value. + Value *string `json:"value,omitempty" xmlrpc:"value,omitempty"` +} + +// SoftLayer_Account_Attribute_Type models the type of attribute that can be assigned to a SoftLayer customer account. +type Account_Attribute_Type struct { + Entity + + // A brief description of a SoftLayer account attribute type. + Description *string `json:"description,omitempty" xmlrpc:"description,omitempty"` + + // A SoftLayer account attribute type's internal identifier. + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // A SoftLayer account attribute type's key name. This is typically a shorter version of an attribute type's name. + KeyName *string `json:"keyName,omitempty" xmlrpc:"keyName,omitempty"` + + // A SoftLayer account attribute type's name. + Name *string `json:"name,omitempty" xmlrpc:"name,omitempty"` +} + +// Account authentication has many different settings that can be set. This class allows the customer or employee to set these settigns. +type Account_Authentication_Attribute struct { + Entity + + // The SoftLayer customer account. + Account *Account `json:"account,omitempty" xmlrpc:"account,omitempty"` + + // The internal identifier of the SoftLayer customer account that is assigned an account authenction attribute. + AccountId *int `json:"accountId,omitempty" xmlrpc:"accountId,omitempty"` + + // The SoftLayer account authentication that has an attribute. + AuthenticationRecord *Account_Authentication_Saml `json:"authenticationRecord,omitempty" xmlrpc:"authenticationRecord,omitempty"` + + // A SoftLayer account authenction attribute's internal identifier. + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // The type of attribute assigned to a SoftLayer account authentication. + Type *Account_Authentication_Attribute_Type `json:"type,omitempty" xmlrpc:"type,omitempty"` + + // The internal identifier of the type of attribute that a SoftLayer account authenction attribute belongs to. + TypeId *int `json:"typeId,omitempty" xmlrpc:"typeId,omitempty"` + + // A SoftLayer account authenction attribute's value. + Value *string `json:"value,omitempty" xmlrpc:"value,omitempty"` +} + +// SoftLayer_Account_Authentication_Attribute_Type models the type of attribute that can be assigned to a SoftLayer customer account authentication. +type Account_Authentication_Attribute_Type struct { + Entity + + // A brief description of a SoftLayer account authentication attribute type. + Description *string `json:"description,omitempty" xmlrpc:"description,omitempty"` + + // A SoftLayer account authentication attribute type's internal identifier. + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // A SoftLayer account authentication attribute type's key name. This is typically a shorter version of an attribute type's name. + KeyName *string `json:"keyName,omitempty" xmlrpc:"keyName,omitempty"` + + // A SoftLayer account authentication attribute type's name. + Name *string `json:"name,omitempty" xmlrpc:"name,omitempty"` + + // An example of what you can put in as your value. + ValueExample *string `json:"valueExample,omitempty" xmlrpc:"valueExample,omitempty"` +} + +// no documentation yet +type Account_Authentication_OpenIdConnect_Option struct { + Entity + + // no documentation yet + Key *string `json:"key,omitempty" xmlrpc:"key,omitempty"` + + // no documentation yet + Value *string `json:"value,omitempty" xmlrpc:"value,omitempty"` +} + +// no documentation yet +type Account_Authentication_OpenIdConnect_RegistrationInformation struct { + Entity + + // no documentation yet + ExistingBlueIdFlag *bool `json:"existingBlueIdFlag,omitempty" xmlrpc:"existingBlueIdFlag,omitempty"` + + // no documentation yet + FederatedEmailDomainFlag *bool `json:"federatedEmailDomainFlag,omitempty" xmlrpc:"federatedEmailDomainFlag,omitempty"` + + // no documentation yet + User *User_Customer `json:"user,omitempty" xmlrpc:"user,omitempty"` +} + +// no documentation yet +type Account_Authentication_Saml struct { + Entity + + // The account associated with this saml configuration. + Account *Account `json:"account,omitempty" xmlrpc:"account,omitempty"` + + // The saml account id. + AccountId *string `json:"accountId,omitempty" xmlrpc:"accountId,omitempty"` + + // A count of the saml attribute values for a SoftLayer customer account. + AttributeCount *uint `json:"attributeCount,omitempty" xmlrpc:"attributeCount,omitempty"` + + // The saml attribute values for a SoftLayer customer account. + Attributes []Account_Authentication_Attribute `json:"attributes,omitempty" xmlrpc:"attributes,omitempty"` + + // The identity provider x509 certificate. + Certificate *string `json:"certificate,omitempty" xmlrpc:"certificate,omitempty"` + + // The identity provider x509 certificate fingerprint. + CertificateFingerprint *string `json:"certificateFingerprint,omitempty" xmlrpc:"certificateFingerprint,omitempty"` + + // The identity provider entity ID. + EntityId *string `json:"entityId,omitempty" xmlrpc:"entityId,omitempty"` + + // The saml internal identifying number. + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // The service provider x509 certificate. + ServiceProviderCertificate *string `json:"serviceProviderCertificate,omitempty" xmlrpc:"serviceProviderCertificate,omitempty"` + + // The service provider entity IDs. + ServiceProviderEntityId *string `json:"serviceProviderEntityId,omitempty" xmlrpc:"serviceProviderEntityId,omitempty"` + + // The service provider public key. + ServiceProviderPublicKey *string `json:"serviceProviderPublicKey,omitempty" xmlrpc:"serviceProviderPublicKey,omitempty"` + + // The service provider signle logout encoding. + ServiceProviderSingleLogoutEncoding *string `json:"serviceProviderSingleLogoutEncoding,omitempty" xmlrpc:"serviceProviderSingleLogoutEncoding,omitempty"` + + // The service provider signle logout address. + ServiceProviderSingleLogoutUrl *string `json:"serviceProviderSingleLogoutUrl,omitempty" xmlrpc:"serviceProviderSingleLogoutUrl,omitempty"` + + // The service provider signle sign on encoding. + ServiceProviderSingleSignOnEncoding *string `json:"serviceProviderSingleSignOnEncoding,omitempty" xmlrpc:"serviceProviderSingleSignOnEncoding,omitempty"` + + // The service provider signle sign on address. + ServiceProviderSingleSignOnUrl *string `json:"serviceProviderSingleSignOnUrl,omitempty" xmlrpc:"serviceProviderSingleSignOnUrl,omitempty"` + + // The identity provider single logout encoding. + SingleLogoutEncoding *string `json:"singleLogoutEncoding,omitempty" xmlrpc:"singleLogoutEncoding,omitempty"` + + // The identity provider sigle logout address. + SingleLogoutUrl *string `json:"singleLogoutUrl,omitempty" xmlrpc:"singleLogoutUrl,omitempty"` + + // The identity provider single sign on encoding. + SingleSignOnEncoding *string `json:"singleSignOnEncoding,omitempty" xmlrpc:"singleSignOnEncoding,omitempty"` + + // The identity provider signle sign on address. + SingleSignOnUrl *string `json:"singleSignOnUrl,omitempty" xmlrpc:"singleSignOnUrl,omitempty"` +} + +// no documentation yet +type Account_Classification_Group_Type struct { + Entity + + // no documentation yet + KeyName *string `json:"keyName,omitempty" xmlrpc:"keyName,omitempty"` +} + +// no documentation yet +type Account_Contact struct { + Entity + + // no documentation yet + Account *Account `json:"account,omitempty" xmlrpc:"account,omitempty"` + + // no documentation yet + AccountId *int `json:"accountId,omitempty" xmlrpc:"accountId,omitempty"` + + // no documentation yet + Address1 *string `json:"address1,omitempty" xmlrpc:"address1,omitempty"` + + // no documentation yet + Address2 *string `json:"address2,omitempty" xmlrpc:"address2,omitempty"` + + // no documentation yet + AlternatePhone *string `json:"alternatePhone,omitempty" xmlrpc:"alternatePhone,omitempty"` + + // no documentation yet + City *string `json:"city,omitempty" xmlrpc:"city,omitempty"` + + // no documentation yet + CompanyName *string `json:"companyName,omitempty" xmlrpc:"companyName,omitempty"` + + // no documentation yet + Country *string `json:"country,omitempty" xmlrpc:"country,omitempty"` + + // no documentation yet + CreateDate *Time `json:"createDate,omitempty" xmlrpc:"createDate,omitempty"` + + // no documentation yet + Email *string `json:"email,omitempty" xmlrpc:"email,omitempty"` + + // no documentation yet + FaxPhone *string `json:"faxPhone,omitempty" xmlrpc:"faxPhone,omitempty"` + + // no documentation yet + FirstName *string `json:"firstName,omitempty" xmlrpc:"firstName,omitempty"` + + // no documentation yet + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // no documentation yet + JobTitle *string `json:"jobTitle,omitempty" xmlrpc:"jobTitle,omitempty"` + + // no documentation yet + LastName *string `json:"lastName,omitempty" xmlrpc:"lastName,omitempty"` + + // no documentation yet + ModifyDate *Time `json:"modifyDate,omitempty" xmlrpc:"modifyDate,omitempty"` + + // no documentation yet + OfficePhone *string `json:"officePhone,omitempty" xmlrpc:"officePhone,omitempty"` + + // no documentation yet + PostalCode *string `json:"postalCode,omitempty" xmlrpc:"postalCode,omitempty"` + + // no documentation yet + ProfileName *string `json:"profileName,omitempty" xmlrpc:"profileName,omitempty"` + + // no documentation yet + State *string `json:"state,omitempty" xmlrpc:"state,omitempty"` + + // no documentation yet + Type *Account_Contact_Type `json:"type,omitempty" xmlrpc:"type,omitempty"` + + // no documentation yet + TypeId *int `json:"typeId,omitempty" xmlrpc:"typeId,omitempty"` + + // no documentation yet + Url *string `json:"url,omitempty" xmlrpc:"url,omitempty"` +} + +// no documentation yet +type Account_Contact_Type struct { + Entity + + // no documentation yet + CreateDate *Time `json:"createDate,omitempty" xmlrpc:"createDate,omitempty"` + + // no documentation yet + Description *string `json:"description,omitempty" xmlrpc:"description,omitempty"` + + // no documentation yet + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // no documentation yet + KeyName *string `json:"keyName,omitempty" xmlrpc:"keyName,omitempty"` + + // no documentation yet + ModifyDate *Time `json:"modifyDate,omitempty" xmlrpc:"modifyDate,omitempty"` + + // no documentation yet + Name *string `json:"name,omitempty" xmlrpc:"name,omitempty"` +} + +// no documentation yet +type Account_Historical_Report struct { + Entity +} + +// no documentation yet +type Account_Link struct { + Entity + + // no documentation yet + Account *Account `json:"account,omitempty" xmlrpc:"account,omitempty"` + + // no documentation yet + AccountId *int `json:"accountId,omitempty" xmlrpc:"accountId,omitempty"` + + // no documentation yet + CreateDate *Time `json:"createDate,omitempty" xmlrpc:"createDate,omitempty"` + + // no documentation yet + DestinationAccountAlphanumericId *string `json:"destinationAccountAlphanumericId,omitempty" xmlrpc:"destinationAccountAlphanumericId,omitempty"` + + // no documentation yet + DestinationAccountId *int `json:"destinationAccountId,omitempty" xmlrpc:"destinationAccountId,omitempty"` + + // no documentation yet + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // no documentation yet + ServiceProvider *Service_Provider `json:"serviceProvider,omitempty" xmlrpc:"serviceProvider,omitempty"` + + // no documentation yet + ServiceProviderId *int `json:"serviceProviderId,omitempty" xmlrpc:"serviceProviderId,omitempty"` +} + +// no documentation yet +type Account_Link_Bluemix struct { + Account_Link +} + +// no documentation yet +type Account_Link_OpenStack struct { + Account_Link + + // Pseudonym for destinationAccountAlphanumericId + DomainId *string `json:"domainId,omitempty" xmlrpc:"domainId,omitempty"` +} + +// OpenStack domain creation details +type Account_Link_OpenStack_DomainCreationDetails struct { + Entity + + // Id for the domain this user was added to. + DomainId *string `json:"domainId,omitempty" xmlrpc:"domainId,omitempty"` + + // Id for the user given the Cloud Admin role for this domain. + UserId *string `json:"userId,omitempty" xmlrpc:"userId,omitempty"` + + // Name for the user given the Cloud Admin role for this domain. + UserName *string `json:"userName,omitempty" xmlrpc:"userName,omitempty"` +} + +// Details required for OpenStack link request +type Account_Link_OpenStack_LinkRequest struct { + Entity + + // Optional password + DesiredPassword *string `json:"desiredPassword,omitempty" xmlrpc:"desiredPassword,omitempty"` + + // Optional projectName + DesiredProjectName *string `json:"desiredProjectName,omitempty" xmlrpc:"desiredProjectName,omitempty"` + + // Required username + DesiredUsername *string `json:"desiredUsername,omitempty" xmlrpc:"desiredUsername,omitempty"` +} + +// OpenStack project creation details +type Account_Link_OpenStack_ProjectCreationDetails struct { + Entity + + // Id for the domain this project was added to. + DomainId *string `json:"domainId,omitempty" xmlrpc:"domainId,omitempty"` + + // Id for this project. + ProjectId *string `json:"projectId,omitempty" xmlrpc:"projectId,omitempty"` + + // Name for this project. + ProjectName *string `json:"projectName,omitempty" xmlrpc:"projectName,omitempty"` + + // Id for the user given the Project Admin role for this project. + UserId *string `json:"userId,omitempty" xmlrpc:"userId,omitempty"` + + // Name for the user given the Project Admin role for this project. + UserName *string `json:"userName,omitempty" xmlrpc:"userName,omitempty"` +} + +// OpenStack project details +type Account_Link_OpenStack_ProjectDetails struct { + Entity + + // Id for this project. + ProjectId *string `json:"projectId,omitempty" xmlrpc:"projectId,omitempty"` + + // Name for this project. + ProjectName *string `json:"projectName,omitempty" xmlrpc:"projectName,omitempty"` +} + +// no documentation yet +type Account_Link_ThePlanet struct { + Account_Link +} + +// no documentation yet +type Account_Link_Vendor struct { + Entity + + // no documentation yet + KeyName *string `json:"keyName,omitempty" xmlrpc:"keyName,omitempty"` + + // no documentation yet + Name *string `json:"name,omitempty" xmlrpc:"name,omitempty"` +} + +// The SoftLayer_Account_Lockdown_Request data type holds information on API requests from brand customers. +type Account_Lockdown_Request struct { + Entity + + // Account ID associated with this lockdown request. + AccountId *int `json:"accountId,omitempty" xmlrpc:"accountId,omitempty"` + + // Type of request. + Action *string `json:"action,omitempty" xmlrpc:"action,omitempty"` + + // Timestamp when the lockdown request was initially made. + CreateDate *Time `json:"createDate,omitempty" xmlrpc:"createDate,omitempty"` + + // ID of this lockdown request. + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // Timestamp when the lockdown request was modified. + ModifyDate *Time `json:"modifyDate,omitempty" xmlrpc:"modifyDate,omitempty"` + + // Status of the lockdown request denoting whether it's been completed. + Status *string `json:"status,omitempty" xmlrpc:"status,omitempty"` +} + +// no documentation yet +type Account_MasterServiceAgreement struct { + Entity + + // no documentation yet + Account *Account `json:"account,omitempty" xmlrpc:"account,omitempty"` + + // no documentation yet + AccountId *int `json:"accountId,omitempty" xmlrpc:"accountId,omitempty"` + + // no documentation yet + Guid *string `json:"guid,omitempty" xmlrpc:"guid,omitempty"` + + // no documentation yet + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // no documentation yet + Name *string `json:"name,omitempty" xmlrpc:"name,omitempty"` +} + +// The SoftLayer_Account_Media data type contains information on a single piece of media associated with a Data Transfer Service request. +type Account_Media struct { + Entity + + // The account to which the media belongs. + Account *Account `json:"account,omitempty" xmlrpc:"account,omitempty"` + + // The customer user who created the media object. + CreateUser *User_Customer `json:"createUser,omitempty" xmlrpc:"createUser,omitempty"` + + // The datacenter where the media resides. + Datacenter *Location `json:"datacenter,omitempty" xmlrpc:"datacenter,omitempty"` + + // The description of the media. + Description *string `json:"description,omitempty" xmlrpc:"description,omitempty"` + + // The unique id of the media. + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // The employee who last modified the media. + ModifyEmployee *User_Employee `json:"modifyEmployee,omitempty" xmlrpc:"modifyEmployee,omitempty"` + + // The customer user who last modified the media. + ModifyUser *User_Customer `json:"modifyUser,omitempty" xmlrpc:"modifyUser,omitempty"` + + // The request to which the media belongs. + Request *Account_Media_Data_Transfer_Request `json:"request,omitempty" xmlrpc:"request,omitempty"` + + // The request id of the media. + RequestId *int `json:"requestId,omitempty" xmlrpc:"requestId,omitempty"` + + // The manufacturer's serial number of the media. + SerialNumber *string `json:"serialNumber,omitempty" xmlrpc:"serialNumber,omitempty"` + + // The media's type. + Type *Account_Media_Type `json:"type,omitempty" xmlrpc:"type,omitempty"` + + // The type id of the media. + TypeId *int `json:"typeId,omitempty" xmlrpc:"typeId,omitempty"` + + // A guest's associated EVault network storage service account. + Volume *Network_Storage `json:"volume,omitempty" xmlrpc:"volume,omitempty"` +} + +// The SoftLayer_Account_Media_Data_Transfer_Request data type contains information on a single Data Transfer Service request. Creation of these requests is limited to SoftLayer customers through the SoftLayer Customer Portal. +type Account_Media_Data_Transfer_Request struct { + Entity + + // The account to which the request belongs. + Account *Account `json:"account,omitempty" xmlrpc:"account,omitempty"` + + // The account id of the request. + AccountId *int `json:"accountId,omitempty" xmlrpc:"accountId,omitempty"` + + // A count of the active tickets that are attached to the data transfer request. + ActiveTicketCount *uint `json:"activeTicketCount,omitempty" xmlrpc:"activeTicketCount,omitempty"` + + // The active tickets that are attached to the data transfer request. + ActiveTickets []Ticket `json:"activeTickets,omitempty" xmlrpc:"activeTickets,omitempty"` + + // The billing item for the original request. + BillingItem *Billing_Item `json:"billingItem,omitempty" xmlrpc:"billingItem,omitempty"` + + // The customer user who created the request. + CreateUser *User_Customer `json:"createUser,omitempty" xmlrpc:"createUser,omitempty"` + + // The create user id of the request. + CreateUserId *int `json:"createUserId,omitempty" xmlrpc:"createUserId,omitempty"` + + // The end date of the request. + EndDate *Time `json:"endDate,omitempty" xmlrpc:"endDate,omitempty"` + + // The unique id of the request. + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // The media of the request. + Media *Account_Media `json:"media,omitempty" xmlrpc:"media,omitempty"` + + // The employee who last modified the request. + ModifyEmployee *User_Employee `json:"modifyEmployee,omitempty" xmlrpc:"modifyEmployee,omitempty"` + + // The customer user who last modified the request. + ModifyUser *User_Customer `json:"modifyUser,omitempty" xmlrpc:"modifyUser,omitempty"` + + // The modify user id of the request. + ModifyUserId *int `json:"modifyUserId,omitempty" xmlrpc:"modifyUserId,omitempty"` + + // A count of the shipments of the request. + ShipmentCount *uint `json:"shipmentCount,omitempty" xmlrpc:"shipmentCount,omitempty"` + + // The shipments of the request. + Shipments []Account_Shipment `json:"shipments,omitempty" xmlrpc:"shipments,omitempty"` + + // The start date of the request. + StartDate *Time `json:"startDate,omitempty" xmlrpc:"startDate,omitempty"` + + // The status of the request. + Status *Account_Media_Data_Transfer_Request_Status `json:"status,omitempty" xmlrpc:"status,omitempty"` + + // The status id of the request. + StatusId *int `json:"statusId,omitempty" xmlrpc:"statusId,omitempty"` + + // A count of all tickets that are attached to the data transfer request. + TicketCount *uint `json:"ticketCount,omitempty" xmlrpc:"ticketCount,omitempty"` + + // All tickets that are attached to the data transfer request. + Tickets []Ticket `json:"tickets,omitempty" xmlrpc:"tickets,omitempty"` +} + +// The SoftLayer_Account_Media_Data_Transfer_Request_Status data type contains general information relating to the statuses to which a Data Transfer Request may be set. +type Account_Media_Data_Transfer_Request_Status struct { + Entity + + // The description of the request status. + Description *string `json:"description,omitempty" xmlrpc:"description,omitempty"` + + // The unique id of the request status. + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // The unique keyname of the request status. + KeyName *string `json:"keyName,omitempty" xmlrpc:"keyName,omitempty"` + + // The name of the request status. + Name *string `json:"name,omitempty" xmlrpc:"name,omitempty"` +} + +// The SoftLayer_Account_Media_Type data type contains general information relating to the different types of media devices that SoftLayer currently supports, as part of the Data Transfer Request Service. Such devices as USB hard drives and flash drives, as well as optical media such as CD and DVD are currently supported. +type Account_Media_Type struct { + Entity + + // The description of the media type. + Description *string `json:"description,omitempty" xmlrpc:"description,omitempty"` + + // The unique id of the media type. + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // The unique keyname of the media type. + KeyName *string `json:"keyName,omitempty" xmlrpc:"keyName,omitempty"` + + // The name of the media type. + Name *string `json:"name,omitempty" xmlrpc:"name,omitempty"` +} + +// The SoftLayer_Account_Network_Vlan_Span data type exposes the setting which controls the automatic spanning of private VLANs attached to a given customers account. +type Account_Network_Vlan_Span struct { + Entity + + // The SoftLayer customer account associated with a VLAN. + Account *Account `json:"account,omitempty" xmlrpc:"account,omitempty"` + + // Flag indicating whether the customer wishes to have all private network VLANs associated with account automatically joined [0 or 1] + EnabledFlag *bool `json:"enabledFlag,omitempty" xmlrpc:"enabledFlag,omitempty"` + + // The unique internal identifier of the SoftLayer_Account_Network_Vlan_Span object. + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // Timestamp of the last time the ACL for this account was applied. + LastAppliedDate *Time `json:"lastAppliedDate,omitempty" xmlrpc:"lastAppliedDate,omitempty"` + + // Timestamp of the last time the subnet hash was verified for this VLAN span record. + LastVerifiedDate *Time `json:"lastVerifiedDate,omitempty" xmlrpc:"lastVerifiedDate,omitempty"` + + // Timestamp of the last edit of the record. + ModifyDate *Time `json:"modifyDate,omitempty" xmlrpc:"modifyDate,omitempty"` +} + +// no documentation yet +type Account_Note struct { + Entity + + // no documentation yet + Account *Account `json:"account,omitempty" xmlrpc:"account,omitempty"` + + // no documentation yet + AccountId *int `json:"accountId,omitempty" xmlrpc:"accountId,omitempty"` + + // no documentation yet + CreateDate *Time `json:"createDate,omitempty" xmlrpc:"createDate,omitempty"` + + // no documentation yet + Customer *User_Customer `json:"customer,omitempty" xmlrpc:"customer,omitempty"` + + // no documentation yet + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // no documentation yet + ModifyDate *Time `json:"modifyDate,omitempty" xmlrpc:"modifyDate,omitempty"` + + // no documentation yet + Note *string `json:"note,omitempty" xmlrpc:"note,omitempty"` + + // no documentation yet + NoteHistory []Account_Note_History `json:"noteHistory,omitempty" xmlrpc:"noteHistory,omitempty"` + + // A count of + NoteHistoryCount *uint `json:"noteHistoryCount,omitempty" xmlrpc:"noteHistoryCount,omitempty"` + + // no documentation yet + NoteType *Account_Note_Type `json:"noteType,omitempty" xmlrpc:"noteType,omitempty"` + + // no documentation yet + NoteTypeId *int `json:"noteTypeId,omitempty" xmlrpc:"noteTypeId,omitempty"` + + // no documentation yet + UserId *int `json:"userId,omitempty" xmlrpc:"userId,omitempty"` +} + +// no documentation yet +type Account_Note_History struct { + Entity + + // no documentation yet + AccountNote *Account_Note `json:"accountNote,omitempty" xmlrpc:"accountNote,omitempty"` + + // no documentation yet + AccountNoteId *int `json:"accountNoteId,omitempty" xmlrpc:"accountNoteId,omitempty"` + + // no documentation yet + CreateDate *Time `json:"createDate,omitempty" xmlrpc:"createDate,omitempty"` + + // no documentation yet + Customer *User_Customer `json:"customer,omitempty" xmlrpc:"customer,omitempty"` + + // no documentation yet + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // no documentation yet + ModifyDate *Time `json:"modifyDate,omitempty" xmlrpc:"modifyDate,omitempty"` + + // no documentation yet + Note *string `json:"note,omitempty" xmlrpc:"note,omitempty"` + + // no documentation yet + UserId *int `json:"userId,omitempty" xmlrpc:"userId,omitempty"` +} + +// no documentation yet +type Account_Note_Type struct { + Entity + + // no documentation yet + BrandId *int `json:"brandId,omitempty" xmlrpc:"brandId,omitempty"` + + // no documentation yet + CreateDate *Time `json:"createDate,omitempty" xmlrpc:"createDate,omitempty"` + + // no documentation yet + Description *string `json:"description,omitempty" xmlrpc:"description,omitempty"` + + // no documentation yet + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // no documentation yet + KeyName *string `json:"keyName,omitempty" xmlrpc:"keyName,omitempty"` + + // no documentation yet + ModifyDate *Time `json:"modifyDate,omitempty" xmlrpc:"modifyDate,omitempty"` + + // no documentation yet + Name *string `json:"name,omitempty" xmlrpc:"name,omitempty"` + + // no documentation yet + ValueExpression *string `json:"valueExpression,omitempty" xmlrpc:"valueExpression,omitempty"` +} + +// no documentation yet +type Account_Partner_Referral_Prospect struct { + User_Customer_Prospect + + // no documentation yet + CompanyName *string `json:"companyName,omitempty" xmlrpc:"companyName,omitempty"` + + // no documentation yet + EmailAddress *string `json:"emailAddress,omitempty" xmlrpc:"emailAddress,omitempty"` + + // no documentation yet + FirstName *string `json:"firstName,omitempty" xmlrpc:"firstName,omitempty"` + + // no documentation yet + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // no documentation yet + LastName *string `json:"lastName,omitempty" xmlrpc:"lastName,omitempty"` +} + +// The SoftLayer_Account_Password contains username, passwords and notes for services that may require for external applications such the Webcc interface for the EVault Storage service. +type Account_Password struct { + Entity + + // no documentation yet + Account *Account `json:"account,omitempty" xmlrpc:"account,omitempty"` + + // The SoftLayer customer account id that a username/password combination is associated with. + AccountId *int `json:"accountId,omitempty" xmlrpc:"accountId,omitempty"` + + // A username/password combination's internal identifier. + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // A simple description of a username/password combination. These notes don't affect portal functionality. + Notes *string `json:"notes,omitempty" xmlrpc:"notes,omitempty"` + + // The password portion of a username/password combination. + Password *string `json:"password,omitempty" xmlrpc:"password,omitempty"` + + // The service that an account/password combination is tied to. + Type *Account_Password_Type `json:"type,omitempty" xmlrpc:"type,omitempty"` + + // An identifier relating to a username/password combinations's associated service. + TypeId *int `json:"typeId,omitempty" xmlrpc:"typeId,omitempty"` + + // The username portion of a username/password combination. + Username *string `json:"username,omitempty" xmlrpc:"username,omitempty"` +} + +// Every username and password combination associated with a SoftLayer customer account belongs to a service that SoftLayer provides. The relationship between a username/password and it's service is provided by the SoftLayer_Account_Password_Type data type. Each username/password belongs to a single service type. +type Account_Password_Type struct { + Entity + + // A description of the use for the account username/password combination. + Description *string `json:"description,omitempty" xmlrpc:"description,omitempty"` +} + +// +// +// +// +// +type Account_Regional_Registry_Detail struct { + Entity + + // The account that this detail object belongs to. + Account *Account `json:"account,omitempty" xmlrpc:"account,omitempty"` + + // The detail object's associated [[SoftLayer_Account|account]] id + AccountId *int `json:"accountId,omitempty" xmlrpc:"accountId,omitempty"` + + // The date and time the detail object was created + CreateDate *Time `json:"createDate,omitempty" xmlrpc:"createDate,omitempty"` + + // A count of references to the [[SoftLayer_Network_Subnet_Registration|registration objects]] that consume this detail object. + DetailCount *uint `json:"detailCount,omitempty" xmlrpc:"detailCount,omitempty"` + + // The associated type of this detail object. + DetailType *Account_Regional_Registry_Detail_Type `json:"detailType,omitempty" xmlrpc:"detailType,omitempty"` + + // The detail object's associated [[SoftLayer_Account_Regional_Registry_Detail_Type|type]] id + DetailTypeId *int `json:"detailTypeId,omitempty" xmlrpc:"detailTypeId,omitempty"` + + // References to the [[SoftLayer_Network_Subnet_Registration|registration objects]] that consume this detail object. + Details []Network_Subnet_Registration_Details `json:"details,omitempty" xmlrpc:"details,omitempty"` + + // Unique ID of the detail object + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // The date and time the detail object was last modified + ModifyDate *Time `json:"modifyDate,omitempty" xmlrpc:"modifyDate,omitempty"` + + // The individual properties that define this detail object's values. + Properties []Account_Regional_Registry_Detail_Property `json:"properties,omitempty" xmlrpc:"properties,omitempty"` + + // A count of the individual properties that define this detail object's values. + PropertyCount *uint `json:"propertyCount,omitempty" xmlrpc:"propertyCount,omitempty"` + + // The associated RWhois handle of this detail object. Used only when detailed reassignments are necessary. + RegionalInternetRegistryHandle *Account_Rwhois_Handle `json:"regionalInternetRegistryHandle,omitempty" xmlrpc:"regionalInternetRegistryHandle,omitempty"` + + // The detail object's associated [[SoftLayer_Account_Rwhois_Handle|RIR handle]] id + RegionalInternetRegistryHandleId *int `json:"regionalInternetRegistryHandleId,omitempty" xmlrpc:"regionalInternetRegistryHandleId,omitempty"` +} + +// Subnet registration properties are used to define various attributes of the [[SoftLayer_Account_Regional_Registry_Detail|detail objects]]. These properties are defined by the [[SoftLayer_Account_Regional_Registry_Detail_Property_Type]] objects, which describe the available value formats. +type Account_Regional_Registry_Detail_Property struct { + Entity + + // no documentation yet + CreateDate *Time `json:"createDate,omitempty" xmlrpc:"createDate,omitempty"` + + // The [[SoftLayer_Account_Regional_Registry_Detail]] object this property belongs to + Detail *Account_Regional_Registry_Detail `json:"detail,omitempty" xmlrpc:"detail,omitempty"` + + // Unique ID of the property object + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // no documentation yet + ModifyDate *Time `json:"modifyDate,omitempty" xmlrpc:"modifyDate,omitempty"` + + // The [[SoftLayer_Account_Regional_Registry_Detail_Property_Type]] object this property belongs to + PropertyType *Account_Regional_Registry_Detail_Property_Type `json:"propertyType,omitempty" xmlrpc:"propertyType,omitempty"` + + // The numeric ID of the related [[SoftLayer_Account_Regional_Registry_Detail_Property_Type|property type object]] + PropertyTypeId *int `json:"propertyTypeId,omitempty" xmlrpc:"propertyTypeId,omitempty"` + + // The numeric ID of the related [[SoftLayer_Account_Regional_Registry_Detail|detail object]] + RegistrationDetailId *int `json:"registrationDetailId,omitempty" xmlrpc:"registrationDetailId,omitempty"` + + // When multiple properties exist for a property type, defines the position in the sequence of those properties + SequencePosition *int `json:"sequencePosition,omitempty" xmlrpc:"sequencePosition,omitempty"` + + // The value of the property + Value *string `json:"value,omitempty" xmlrpc:"value,omitempty"` +} + +// Subnet Registration Detail Property Type objects describe the nature of a [[SoftLayer_Account_Regional_Registry_Detail_Property]] object. These types use [http://php.net/pcre.pattern.php Perl-Compatible Regular Expressions] to validate the value of a property object. +type Account_Regional_Registry_Detail_Property_Type struct { + Entity + + // no documentation yet + CreateDate *Time `json:"createDate,omitempty" xmlrpc:"createDate,omitempty"` + + // Unique numeric ID of the property type object + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // Code-friendly string name of the property type + KeyName *string `json:"keyName,omitempty" xmlrpc:"keyName,omitempty"` + + // no documentation yet + ModifyDate *Time `json:"modifyDate,omitempty" xmlrpc:"modifyDate,omitempty"` + + // Human-readable name of the property type + Name *string `json:"name,omitempty" xmlrpc:"name,omitempty"` + + // A Perl-compatible regular expression used to describe the valid format of the property + ValueExpression *string `json:"valueExpression,omitempty" xmlrpc:"valueExpression,omitempty"` +} + +// Subnet Registration Detail Type objects describe the nature of a [[SoftLayer_Account_Regional_Registry_Detail]] object. +// +// The standard values for these objects are as follows:
  • NETWORK - The detail object represents the information for a [[SoftLayer_Network_Subnet|subnet]]
  • NETWORK6 - The detail object represents the information for an [[SoftLayer_Network_Subnet_Version6|IPv6 subnet]]
  • PERSON - The detail object represents the information for a customer with the RIR
+type Account_Regional_Registry_Detail_Type struct { + Entity + + // no documentation yet + CreateDate *Time `json:"createDate,omitempty" xmlrpc:"createDate,omitempty"` + + // Unique numeric ID of the detail type object + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // Code-friendly string name of the detail type + KeyName *string `json:"keyName,omitempty" xmlrpc:"keyName,omitempty"` + + // no documentation yet + ModifyDate *Time `json:"modifyDate,omitempty" xmlrpc:"modifyDate,omitempty"` + + // Human-readable name of the detail type + Name *string `json:"name,omitempty" xmlrpc:"name,omitempty"` +} + +// The SoftLayer_Account_Regional_Registry_Detail_Version4_Person_Default data type contains general information relating to a single SoftLayer RIR account. RIR account information in this type such as names, addresses, and phone numbers are assigned to the registry only and not to users belonging to the account. +type Account_Regional_Registry_Detail_Version4_Person_Default struct { + Account_Regional_Registry_Detail +} + +// no documentation yet +type Account_Reports_Request struct { + Entity + + // no documentation yet + Account *Account `json:"account,omitempty" xmlrpc:"account,omitempty"` + + // A request's corresponding external contact, if one exists. + AccountContact *Account_Contact `json:"accountContact,omitempty" xmlrpc:"accountContact,omitempty"` + + // no documentation yet + AccountContactId *int `json:"accountContactId,omitempty" xmlrpc:"accountContactId,omitempty"` + + // no documentation yet + AccountId *int `json:"accountId,omitempty" xmlrpc:"accountId,omitempty"` + + // no documentation yet + ComplianceReportTypeId *string `json:"complianceReportTypeId,omitempty" xmlrpc:"complianceReportTypeId,omitempty"` + + // no documentation yet + CreateDate *Time `json:"createDate,omitempty" xmlrpc:"createDate,omitempty"` + + // no documentation yet + EmployeeRecordId *int `json:"employeeRecordId,omitempty" xmlrpc:"employeeRecordId,omitempty"` + + // no documentation yet + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // no documentation yet + ModifyDate *Time `json:"modifyDate,omitempty" xmlrpc:"modifyDate,omitempty"` + + // no documentation yet + Nda *string `json:"nda,omitempty" xmlrpc:"nda,omitempty"` + + // no documentation yet + Notes *string `json:"notes,omitempty" xmlrpc:"notes,omitempty"` + + // no documentation yet + Report *string `json:"report,omitempty" xmlrpc:"report,omitempty"` + + // Type of the report customer is requesting for. + ReportType *Compliance_Report_Type `json:"reportType,omitempty" xmlrpc:"reportType,omitempty"` + + // no documentation yet + RequestKey *string `json:"requestKey,omitempty" xmlrpc:"requestKey,omitempty"` + + // no documentation yet + Status *string `json:"status,omitempty" xmlrpc:"status,omitempty"` + + // no documentation yet + Ticket *Ticket `json:"ticket,omitempty" xmlrpc:"ticket,omitempty"` + + // no documentation yet + TicketId *int `json:"ticketId,omitempty" xmlrpc:"ticketId,omitempty"` + + // The customer user that initiated a report request. + User *User_Customer `json:"user,omitempty" xmlrpc:"user,omitempty"` + + // no documentation yet + UsrRecordId *int `json:"usrRecordId,omitempty" xmlrpc:"usrRecordId,omitempty"` +} + +// Provides a means of tracking handle identifiers at the various regional internet registries (RIRs). These objects are used by the [[SoftLayer_Network_Subnet_Registration (type)|SoftLayer_Network_Subnet_Registration]] objects to identify a customer or organization when a subnet is registered. +type Account_Rwhois_Handle struct { + Entity + + // The account that this handle belongs to. + Account *Account `json:"account,omitempty" xmlrpc:"account,omitempty"` + + // The handle object's associated [[SoftLayer_Account|account]] id + AccountId *int `json:"accountId,omitempty" xmlrpc:"accountId,omitempty"` + + // no documentation yet + CreateDate *Time `json:"createDate,omitempty" xmlrpc:"createDate,omitempty"` + + // The handle object's unique identifier as assigned by the RIR. + Handle *string `json:"handle,omitempty" xmlrpc:"handle,omitempty"` + + // Unique ID of the handle object + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // no documentation yet + ModifyDate *Time `json:"modifyDate,omitempty" xmlrpc:"modifyDate,omitempty"` +} + +// The SoftLayer_Account_Shipment data type contains information relating to a shipment. Basic information such as addresses, the shipment courier, and any tracking information for as shipment is accessible with this data type. +type Account_Shipment struct { + Entity + + // The account to which the shipment belongs. + Account *Account `json:"account,omitempty" xmlrpc:"account,omitempty"` + + // The account id of the shipment. + AccountId *int `json:"accountId,omitempty" xmlrpc:"accountId,omitempty"` + + // The courier handling the shipment. + Courier *Auxiliary_Shipping_Courier `json:"courier,omitempty" xmlrpc:"courier,omitempty"` + + // The courier id of the shipment. + CourierId *int `json:"courierId,omitempty" xmlrpc:"courierId,omitempty"` + + // The courier name of the shipment. + CourierName *string `json:"courierName,omitempty" xmlrpc:"courierName,omitempty"` + + // The employee who created the shipment. + CreateEmployee *User_Employee `json:"createEmployee,omitempty" xmlrpc:"createEmployee,omitempty"` + + // The customer user who created the shipment. + CreateUser *User_Customer `json:"createUser,omitempty" xmlrpc:"createUser,omitempty"` + + // The create user id of the shipment. + CreateUserId *int `json:"createUserId,omitempty" xmlrpc:"createUserId,omitempty"` + + // The address at which the shipment is received. + DestinationAddress *Account_Address `json:"destinationAddress,omitempty" xmlrpc:"destinationAddress,omitempty"` + + // The destination address id of the shipment. + DestinationAddressId *int `json:"destinationAddressId,omitempty" xmlrpc:"destinationAddressId,omitempty"` + + // The destination date of the shipment. + DestinationDate *Time `json:"destinationDate,omitempty" xmlrpc:"destinationDate,omitempty"` + + // The unique id of the shipment. + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // The employee who last modified the shipment. + ModifyEmployee *User_Employee `json:"modifyEmployee,omitempty" xmlrpc:"modifyEmployee,omitempty"` + + // The customer user who last modified the shipment. + ModifyUser *User_Customer `json:"modifyUser,omitempty" xmlrpc:"modifyUser,omitempty"` + + // The modify user id of the shipment. + ModifyUserId *int `json:"modifyUserId,omitempty" xmlrpc:"modifyUserId,omitempty"` + + // The shipment note (special handling instructions). + Note *string `json:"note,omitempty" xmlrpc:"note,omitempty"` + + // The address from which the shipment is sent. + OriginationAddress *Account_Address `json:"originationAddress,omitempty" xmlrpc:"originationAddress,omitempty"` + + // The origination address id of the shipment. + OriginationAddressId *int `json:"originationAddressId,omitempty" xmlrpc:"originationAddressId,omitempty"` + + // The origination date of the shipment. + OriginationDate *Time `json:"originationDate,omitempty" xmlrpc:"originationDate,omitempty"` + + // A count of the items in the shipment. + ShipmentItemCount *uint `json:"shipmentItemCount,omitempty" xmlrpc:"shipmentItemCount,omitempty"` + + // The items in the shipment. + ShipmentItems []Account_Shipment_Item `json:"shipmentItems,omitempty" xmlrpc:"shipmentItems,omitempty"` + + // The status of the shipment. + Status *Account_Shipment_Status `json:"status,omitempty" xmlrpc:"status,omitempty"` + + // The status id of the shipment. + StatusId *int `json:"statusId,omitempty" xmlrpc:"statusId,omitempty"` + + // The tracking data for the shipment. + TrackingData []Account_Shipment_Tracking_Data `json:"trackingData,omitempty" xmlrpc:"trackingData,omitempty"` + + // A count of the tracking data for the shipment. + TrackingDataCount *uint `json:"trackingDataCount,omitempty" xmlrpc:"trackingDataCount,omitempty"` + + // The type of shipment (e.g. for Data Transfer Service or Colocation Service). + Type *Account_Shipment_Type `json:"type,omitempty" xmlrpc:"type,omitempty"` + + // The type id of the shipment. + TypeId *int `json:"typeId,omitempty" xmlrpc:"typeId,omitempty"` +} + +// The SoftLayer_Account_Shipment_Item data type contains information relating to a shipment's item. Basic information such as addresses, the shipment courier, and any tracking information for as shipment is accessible with this data type. +type Account_Shipment_Item struct { + Entity + + // no documentation yet + CreateDate *Time `json:"createDate,omitempty" xmlrpc:"createDate,omitempty"` + + // The description of the shipping item. + Description *string `json:"description,omitempty" xmlrpc:"description,omitempty"` + + // The unique id of the shipping item. + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // The package id of the shipping item. + PackageId *int `json:"packageId,omitempty" xmlrpc:"packageId,omitempty"` + + // The shipment to which this item belongs. + Shipment *Account_Shipment `json:"shipment,omitempty" xmlrpc:"shipment,omitempty"` + + // The shipment id of the shipping item. + ShipmentId *int `json:"shipmentId,omitempty" xmlrpc:"shipmentId,omitempty"` + + // The item id of the shipping item. + ShipmentItemId *int `json:"shipmentItemId,omitempty" xmlrpc:"shipmentItemId,omitempty"` + + // The type of this shipment item. + ShipmentItemType *Account_Shipment_Item_Type `json:"shipmentItemType,omitempty" xmlrpc:"shipmentItemType,omitempty"` + + // The item type id of the shipping item. + ShipmentItemTypeId *int `json:"shipmentItemTypeId,omitempty" xmlrpc:"shipmentItemTypeId,omitempty"` +} + +// no documentation yet +type Account_Shipment_Item_Type struct { + Entity + + // DEPRECATED + CreateDate *Time `json:"createDate,omitempty" xmlrpc:"createDate,omitempty"` + + // no documentation yet + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // no documentation yet + KeyName *string `json:"keyName,omitempty" xmlrpc:"keyName,omitempty"` + + // no documentation yet + Name *string `json:"name,omitempty" xmlrpc:"name,omitempty"` +} + +// no documentation yet +type Account_Shipment_Resource_Type struct { + Entity +} + +// no documentation yet +type Account_Shipment_Status struct { + Entity + + // DEPRECATED + CreateDate *Time `json:"createDate,omitempty" xmlrpc:"createDate,omitempty"` + + // no documentation yet + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // no documentation yet + KeyName *string `json:"keyName,omitempty" xmlrpc:"keyName,omitempty"` + + // no documentation yet + Name *string `json:"name,omitempty" xmlrpc:"name,omitempty"` +} + +// The SoftLayer_Account_Shipment_Tracking_Data data type contains information on a single piece of tracking information pertaining to a shipment. This tracking information tracking numbers by which the shipment may be tracked through the shipping courier. +type Account_Shipment_Tracking_Data struct { + Entity + + // The employee who created the tracking datum. + CreateEmployee *User_Employee `json:"createEmployee,omitempty" xmlrpc:"createEmployee,omitempty"` + + // The customer user who created the tracking datum. + CreateUser *User_Customer `json:"createUser,omitempty" xmlrpc:"createUser,omitempty"` + + // The create user id of the tracking data. + CreateUserId *int `json:"createUserId,omitempty" xmlrpc:"createUserId,omitempty"` + + // The unique id of the tracking data. + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // The employee who last modified the tracking datum. + ModifyEmployee *User_Employee `json:"modifyEmployee,omitempty" xmlrpc:"modifyEmployee,omitempty"` + + // The customer user who last modified the tracking datum. + ModifyUser *User_Customer `json:"modifyUser,omitempty" xmlrpc:"modifyUser,omitempty"` + + // The user id of the tracking data. + ModifyUserId *int `json:"modifyUserId,omitempty" xmlrpc:"modifyUserId,omitempty"` + + // The package id of the tracking data. + PackageId *int `json:"packageId,omitempty" xmlrpc:"packageId,omitempty"` + + // The sequence of the tracking data. + Sequence *int `json:"sequence,omitempty" xmlrpc:"sequence,omitempty"` + + // The shipment of the tracking datum. + Shipment *Account_Shipment `json:"shipment,omitempty" xmlrpc:"shipment,omitempty"` + + // The shipment id of the tracking data. + ShipmentId *int `json:"shipmentId,omitempty" xmlrpc:"shipmentId,omitempty"` + + // The tracking data (tracking number/reference number). + TrackingData *string `json:"trackingData,omitempty" xmlrpc:"trackingData,omitempty"` +} + +// no documentation yet +type Account_Shipment_Type struct { + Entity + + // DEPRECATED + CreateDate *Time `json:"createDate,omitempty" xmlrpc:"createDate,omitempty"` + + // no documentation yet + Description *string `json:"description,omitempty" xmlrpc:"description,omitempty"` + + // no documentation yet + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // no documentation yet + KeyName *string `json:"keyName,omitempty" xmlrpc:"keyName,omitempty"` + + // no documentation yet + Name *string `json:"name,omitempty" xmlrpc:"name,omitempty"` +} + +// no documentation yet +type Account_Status struct { + Entity + + // no documentation yet + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // no documentation yet + Name *string `json:"name,omitempty" xmlrpc:"name,omitempty"` +} diff --git a/vendor/github.com/softlayer/softlayer-go/datatypes/auxiliary.go b/vendor/github.com/softlayer/softlayer-go/datatypes/auxiliary.go new file mode 100644 index 0000000000..84f03a52e9 --- /dev/null +++ b/vendor/github.com/softlayer/softlayer-go/datatypes/auxiliary.go @@ -0,0 +1,342 @@ +/** + * Copyright 2016 IBM Corp. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** + * AUTOMATICALLY GENERATED CODE - DO NOT MODIFY + */ + +package datatypes + +// no documentation yet +type Auxiliary_Marketing_Event struct { + Entity + + // no documentation yet + CreateDate *Time `json:"createDate,omitempty" xmlrpc:"createDate,omitempty"` + + // no documentation yet + EnabledFlag *int `json:"enabledFlag,omitempty" xmlrpc:"enabledFlag,omitempty"` + + // no documentation yet + EndDate *Time `json:"endDate,omitempty" xmlrpc:"endDate,omitempty"` + + // no documentation yet + Location *string `json:"location,omitempty" xmlrpc:"location,omitempty"` + + // no documentation yet + ModifyDate *Time `json:"modifyDate,omitempty" xmlrpc:"modifyDate,omitempty"` + + // no documentation yet + StartDate *Time `json:"startDate,omitempty" xmlrpc:"startDate,omitempty"` + + // no documentation yet + Title *string `json:"title,omitempty" xmlrpc:"title,omitempty"` + + // no documentation yet + Url *string `json:"url,omitempty" xmlrpc:"url,omitempty"` +} + +// no documentation yet +type Auxiliary_Network_Status struct { + Entity +} + +// A SoftLayer_Auxiliary_Notification_Emergency data object represents a notification event being broadcast to the SoftLayer customer base. It is used to provide information regarding outages or current known issues. +type Auxiliary_Notification_Emergency struct { + Entity + + // The date this event was created. + CreateDate *Time `json:"createDate,omitempty" xmlrpc:"createDate,omitempty"` + + // The device (if any) effected by this event. + Device *string `json:"device,omitempty" xmlrpc:"device,omitempty"` + + // The duration of this event. + Duration *string `json:"duration,omitempty" xmlrpc:"duration,omitempty"` + + // The device (if any) effected by this event. + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // The location effected by this event. + Location *string `json:"location,omitempty" xmlrpc:"location,omitempty"` + + // A message describing this event. + Message *string `json:"message,omitempty" xmlrpc:"message,omitempty"` + + // The last date this event was modified. + ModifyDate *Time `json:"modifyDate,omitempty" xmlrpc:"modifyDate,omitempty"` + + // The service(s) (if any) effected by this event. + ServicesAffected *string `json:"servicesAffected,omitempty" xmlrpc:"servicesAffected,omitempty"` + + // The signature of the SoftLayer employee department associated with this notification. + Signature *Auxiliary_Notification_Emergency_Signature `json:"signature,omitempty" xmlrpc:"signature,omitempty"` + + // The date this event will start. + StartDate *Time `json:"startDate,omitempty" xmlrpc:"startDate,omitempty"` + + // The status of this notification. + Status *Auxiliary_Notification_Emergency_Status `json:"status,omitempty" xmlrpc:"status,omitempty"` + + // Current status record for this event. + StatusId *int `json:"statusId,omitempty" xmlrpc:"statusId,omitempty"` +} + +// Every SoftLayer_Auxiliary_Notification_Emergency has a signatureId that references a SoftLayer_Auxiliary_Notification_Emergency_Signature data type. The signature is the user or group responsible for the current event. +type Auxiliary_Notification_Emergency_Signature struct { + Entity + + // The name or signature for the current Emergency Notification. + Name *string `json:"name,omitempty" xmlrpc:"name,omitempty"` +} + +// Every SoftLayer_Auxiliary_Notification_Emergency has a statusId that references a SoftLayer_Auxiliary_Notification_Emergency_Status data type. The status is used to determine the current state of the event. +type Auxiliary_Notification_Emergency_Status struct { + Entity + + // A name describing the status of the current Emergency Notification. + Name *string `json:"name,omitempty" xmlrpc:"name,omitempty"` +} + +// no documentation yet +type Auxiliary_Press_Release struct { + Entity + + // no documentation yet + About []Auxiliary_Press_Release_About_Press_Release `json:"about,omitempty" xmlrpc:"about,omitempty"` + + // A count of + AboutCount *uint `json:"aboutCount,omitempty" xmlrpc:"aboutCount,omitempty"` + + // A count of + ContactCount *uint `json:"contactCount,omitempty" xmlrpc:"contactCount,omitempty"` + + // no documentation yet + Contacts []Auxiliary_Press_Release_Contact_Press_Release `json:"contacts,omitempty" xmlrpc:"contacts,omitempty"` + + // A press release's internal identifier. + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // A count of + MediaPartnerCount *uint `json:"mediaPartnerCount,omitempty" xmlrpc:"mediaPartnerCount,omitempty"` + + // no documentation yet + MediaPartners []Auxiliary_Press_Release_Media_Partner_Press_Release `json:"mediaPartners,omitempty" xmlrpc:"mediaPartners,omitempty"` + + // no documentation yet + PressReleaseContent *Auxiliary_Press_Release_Content `json:"pressReleaseContent,omitempty" xmlrpc:"pressReleaseContent,omitempty"` + + // The data a press release was published. + PublishDate *Time `json:"publishDate,omitempty" xmlrpc:"publishDate,omitempty"` + + // A press release's location. + ReleaseLocation *string `json:"releaseLocation,omitempty" xmlrpc:"releaseLocation,omitempty"` + + // A press release's sub-title. + SubTitle *string `json:"subTitle,omitempty" xmlrpc:"subTitle,omitempty"` + + // A press release's title. + Title *string `json:"title,omitempty" xmlrpc:"title,omitempty"` + + // Whether or not a press release is highlighted on the SoftLayer Website. + WebsiteHighlightFlag *bool `json:"websiteHighlightFlag,omitempty" xmlrpc:"websiteHighlightFlag,omitempty"` +} + +// no documentation yet +type Auxiliary_Press_Release_About struct { + Entity + + // A press release about's content. + Content *string `json:"content,omitempty" xmlrpc:"content,omitempty"` + + // A press release about's internal + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // A press release about's title. + Title *string `json:"title,omitempty" xmlrpc:"title,omitempty"` +} + +// no documentation yet +type Auxiliary_Press_Release_About_Press_Release struct { + Entity + + // A count of + AboutParagraphCount *uint `json:"aboutParagraphCount,omitempty" xmlrpc:"aboutParagraphCount,omitempty"` + + // no documentation yet + AboutParagraphs []Auxiliary_Press_Release_About `json:"aboutParagraphs,omitempty" xmlrpc:"aboutParagraphs,omitempty"` + + // A press release about cross + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // A press release about's internal + PressReleaseAboutId *int `json:"pressReleaseAboutId,omitempty" xmlrpc:"pressReleaseAboutId,omitempty"` + + // A count of + PressReleaseCount *uint `json:"pressReleaseCount,omitempty" xmlrpc:"pressReleaseCount,omitempty"` + + // A press release internal identifier. + PressReleaseId *int `json:"pressReleaseId,omitempty" xmlrpc:"pressReleaseId,omitempty"` + + // no documentation yet + PressReleases []Auxiliary_Press_Release `json:"pressReleases,omitempty" xmlrpc:"pressReleases,omitempty"` + + // The number that associated an about + SortOrder *int `json:"sortOrder,omitempty" xmlrpc:"sortOrder,omitempty"` +} + +// no documentation yet +type Auxiliary_Press_Release_Contact struct { + Entity + + // A press release contact's email + Email *string `json:"email,omitempty" xmlrpc:"email,omitempty"` + + // A press release contact's first name. + FirstName *string `json:"firstName,omitempty" xmlrpc:"firstName,omitempty"` + + // A press release contact's internal + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // A press release contact's last name. + LastName *string `json:"lastName,omitempty" xmlrpc:"lastName,omitempty"` + + // A press release contact's phone + Phone *string `json:"phone,omitempty" xmlrpc:"phone,omitempty"` + + // A press release contact's + ProfessionalTitle *string `json:"professionalTitle,omitempty" xmlrpc:"professionalTitle,omitempty"` +} + +// no documentation yet +type Auxiliary_Press_Release_Contact_Press_Release struct { + Entity + + // A count of + ContactCount *uint `json:"contactCount,omitempty" xmlrpc:"contactCount,omitempty"` + + // no documentation yet + Contacts []Auxiliary_Press_Release_Contact `json:"contacts,omitempty" xmlrpc:"contacts,omitempty"` + + // A press release contact cross + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // A press release contact's internal + PressReleaseContactId *int `json:"pressReleaseContactId,omitempty" xmlrpc:"pressReleaseContactId,omitempty"` + + // A count of + PressReleaseCount *uint `json:"pressReleaseCount,omitempty" xmlrpc:"pressReleaseCount,omitempty"` + + // A press release internal identifier. + PressReleaseId *int `json:"pressReleaseId,omitempty" xmlrpc:"pressReleaseId,omitempty"` + + // no documentation yet + PressReleases []Auxiliary_Press_Release `json:"pressReleases,omitempty" xmlrpc:"pressReleases,omitempty"` + + // The number that associated a contact + SortOrder *int `json:"sortOrder,omitempty" xmlrpc:"sortOrder,omitempty"` +} + +// no documentation yet +type Auxiliary_Press_Release_Content struct { + Entity + + // the id of a single press release + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // the press release id that the content + PressReleaseId *int `json:"pressReleaseId,omitempty" xmlrpc:"pressReleaseId,omitempty"` + + // the content of a press release + Text *string `json:"text,omitempty" xmlrpc:"text,omitempty"` +} + +// no documentation yet +type Auxiliary_Press_Release_Media_Partner struct { + Entity + + // A press release media partner's + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // A press release media partner's name. + Name *string `json:"name,omitempty" xmlrpc:"name,omitempty"` +} + +// no documentation yet +type Auxiliary_Press_Release_Media_Partner_Press_Release struct { + Entity + + // A press release media partner cross + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // A count of + MediaPartnerCount *uint `json:"mediaPartnerCount,omitempty" xmlrpc:"mediaPartnerCount,omitempty"` + + // A press release media partner's + MediaPartnerId *int `json:"mediaPartnerId,omitempty" xmlrpc:"mediaPartnerId,omitempty"` + + // no documentation yet + MediaPartners []Auxiliary_Press_Release_Media_Partner `json:"mediaPartners,omitempty" xmlrpc:"mediaPartners,omitempty"` + + // A count of + PressReleaseCount *uint `json:"pressReleaseCount,omitempty" xmlrpc:"pressReleaseCount,omitempty"` + + // A press release internal identifier. + PressReleaseId *int `json:"pressReleaseId,omitempty" xmlrpc:"pressReleaseId,omitempty"` + + // no documentation yet + PressReleases []Auxiliary_Press_Release `json:"pressReleases,omitempty" xmlrpc:"pressReleases,omitempty"` +} + +// The SoftLayer_Auxiliary_Shipping_Courier data type contains general information relating the different (major) couriers that SoftLayer may use for shipping. +type Auxiliary_Shipping_Courier struct { + Entity + + // The unique id of the shipping courier. + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // The unique keyname of the shipping courier. + KeyName *string `json:"keyName,omitempty" xmlrpc:"keyName,omitempty"` + + // The name of the shipping courier. + Name *string `json:"name,omitempty" xmlrpc:"name,omitempty"` + + // The url to shipping courier's website. + Url *string `json:"url,omitempty" xmlrpc:"url,omitempty"` +} + +// no documentation yet +type Auxiliary_Shipping_Courier_Type struct { + Entity + + // no documentation yet + Courier []Auxiliary_Shipping_Courier `json:"courier,omitempty" xmlrpc:"courier,omitempty"` + + // A count of + CourierCount *uint `json:"courierCount,omitempty" xmlrpc:"courierCount,omitempty"` + + // no documentation yet + Description *string `json:"description,omitempty" xmlrpc:"description,omitempty"` + + // no documentation yet + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // no documentation yet + KeyName *string `json:"keyName,omitempty" xmlrpc:"keyName,omitempty"` + + // no documentation yet + Name *string `json:"name,omitempty" xmlrpc:"name,omitempty"` +} diff --git a/vendor/github.com/softlayer/softlayer-go/datatypes/billing.go b/vendor/github.com/softlayer/softlayer-go/datatypes/billing.go new file mode 100644 index 0000000000..eb88c54912 --- /dev/null +++ b/vendor/github.com/softlayer/softlayer-go/datatypes/billing.go @@ -0,0 +1,2660 @@ +/** + * Copyright 2016 IBM Corp. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** + * AUTOMATICALLY GENERATED CODE - DO NOT MODIFY + */ + +package datatypes + +// no documentation yet +type Billing_Currency struct { + Entity + + // no documentation yet + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // no documentation yet + KeyName *string `json:"keyName,omitempty" xmlrpc:"keyName,omitempty"` + + // no documentation yet + Name *string `json:"name,omitempty" xmlrpc:"name,omitempty"` +} + +// The SoftLayer_Billing_Currency_Country data type maps what currencies are valid for specific countries. US Dollars are valid from any country, but other currencies are only available to customers in certain countries. +type Billing_Currency_Country struct { + Entity + + // A unique identifier for the related country. + CountryId *int `json:"countryId,omitempty" xmlrpc:"countryId,omitempty"` + + // A unique identifier for the related currency. + CurrencyId *int `json:"currencyId,omitempty" xmlrpc:"currencyId,omitempty"` + + // A unique identifier for a map between a country and currency. + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` +} + +// no documentation yet +type Billing_Currency_ExchangeRate struct { + Entity + + // no documentation yet + EffectiveDate *Time `json:"effectiveDate,omitempty" xmlrpc:"effectiveDate,omitempty"` + + // no documentation yet + ExpirationDate *Time `json:"expirationDate,omitempty" xmlrpc:"expirationDate,omitempty"` + + // no documentation yet + FundingCurrency *Billing_Currency `json:"fundingCurrency,omitempty" xmlrpc:"fundingCurrency,omitempty"` + + // The id of the exchange rate record. + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // no documentation yet + LocalCurrency *Billing_Currency `json:"localCurrency,omitempty" xmlrpc:"localCurrency,omitempty"` + + // no documentation yet + Rate *Float64 `json:"rate,omitempty" xmlrpc:"rate,omitempty"` +} + +// Every SoftLayer customer account has billing specific information which is kept in the SoftLayer_Billing_Info data type. This information is used by the SoftLayer accounting group when sending invoices and making billing inquiries. +type Billing_Info struct { + Entity + + // The SoftLayer customer account associated with this billing information. + Account *Account `json:"account,omitempty" xmlrpc:"account,omitempty"` + + // A SoftLayer account's identifier. + AccountId *int `json:"accountId,omitempty" xmlrpc:"accountId,omitempty"` + + // no documentation yet + AchInformation []Billing_Info_Ach `json:"achInformation,omitempty" xmlrpc:"achInformation,omitempty"` + + // A count of + AchInformationCount *uint `json:"achInformationCount,omitempty" xmlrpc:"achInformationCount,omitempty"` + + // The day of the month that a SoftLayer customer is billed. + AnniversaryDayOfMonth *int `json:"anniversaryDayOfMonth,omitempty" xmlrpc:"anniversaryDayOfMonth,omitempty"` + + // This value doesn't persist to this object. It's used as part of the account creation process only; + CardAccountNumber *string `json:"cardAccountNumber,omitempty" xmlrpc:"cardAccountNumber,omitempty"` + + // the expiration month of the credit card on file + CardExpirationMonth *int `json:"cardExpirationMonth,omitempty" xmlrpc:"cardExpirationMonth,omitempty"` + + // the expiration year of the credit card on file + CardExpirationYear *int `json:"cardExpirationYear,omitempty" xmlrpc:"cardExpirationYear,omitempty"` + + // no documentation yet + CardNickname *string `json:"cardNickname,omitempty" xmlrpc:"cardNickname,omitempty"` + + // the type of the credit card on file + CardType *string `json:"cardType,omitempty" xmlrpc:"cardType,omitempty"` + + // This value doesn't persist to this object. It's used as part of the account creation process only. + CardVerificationNumber *string `json:"cardVerificationNumber,omitempty" xmlrpc:"cardVerificationNumber,omitempty"` + + // The date a customer's billing information was created. + CreateDate *Time `json:"createDate,omitempty" xmlrpc:"createDate,omitempty"` + + // Currency to be used by this customer account. + Currency *Billing_Currency `json:"currency,omitempty" xmlrpc:"currency,omitempty"` + + // Information related to an account's current and previous billing cycles. + CurrentBillingCycle *Billing_Info_Cycle `json:"currentBillingCycle,omitempty" xmlrpc:"currentBillingCycle,omitempty"` + + // A SoftLayer customer's billing information identifier. + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // The date on which an account was last billed. + LastBillDate *Time `json:"lastBillDate,omitempty" xmlrpc:"lastBillDate,omitempty"` + + // The last four digits of the credit card currently on the account. This is the only portion of the card that we store. For Paypal customers, this value will be empty. + LastFourPaymentCardDigits *int `json:"lastFourPaymentCardDigits,omitempty" xmlrpc:"lastFourPaymentCardDigits,omitempty"` + + // The date of the last payment received by SoftLayer from the account holder. + LastPaymentDate *Time `json:"lastPaymentDate,omitempty" xmlrpc:"lastPaymentDate,omitempty"` + + // The date a customer's billing information was last modified. + ModifyDate *Time `json:"modifyDate,omitempty" xmlrpc:"modifyDate,omitempty"` + + // The date on which an account will be billed next. + NextBillDate *Time `json:"nextBillDate,omitempty" xmlrpc:"nextBillDate,omitempty"` + + // The payment terms for an account. + PaymentTerms *int `json:"paymentTerms,omitempty" xmlrpc:"paymentTerms,omitempty"` + + // The percentage discount received on all one-time charges on a customer's monthly bill. + PercentDiscountOnetime *int `json:"percentDiscountOnetime,omitempty" xmlrpc:"percentDiscountOnetime,omitempty"` + + // The percentage discount received on all recurring charges on a customer's monthly bill. + PercentDiscountRecurring *int `json:"percentDiscountRecurring,omitempty" xmlrpc:"percentDiscountRecurring,omitempty"` + + // The total recurring fee amount for servers that are in the spare pool status. + SparePoolAmount *int `json:"sparePoolAmount,omitempty" xmlrpc:"sparePoolAmount,omitempty"` + + // no documentation yet + VatId *string `json:"vatId,omitempty" xmlrpc:"vatId,omitempty"` +} + +// no documentation yet +type Billing_Info_Ach struct { + Entity + + // no documentation yet + Account *Account `json:"account,omitempty" xmlrpc:"account,omitempty"` + + // no documentation yet + AccountId *int `json:"accountId,omitempty" xmlrpc:"accountId,omitempty"` + + // no documentation yet + AccountNumber *string `json:"accountNumber,omitempty" xmlrpc:"accountNumber,omitempty"` + + // no documentation yet + AccountType *string `json:"accountType,omitempty" xmlrpc:"accountType,omitempty"` + + // no documentation yet + BankTransitNumber *string `json:"bankTransitNumber,omitempty" xmlrpc:"bankTransitNumber,omitempty"` + + // no documentation yet + City *string `json:"city,omitempty" xmlrpc:"city,omitempty"` + + // no documentation yet + Country *string `json:"country,omitempty" xmlrpc:"country,omitempty"` + + // no documentation yet + FirstName *string `json:"firstName,omitempty" xmlrpc:"firstName,omitempty"` + + // no documentation yet + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // no documentation yet + LastName *string `json:"lastName,omitempty" xmlrpc:"lastName,omitempty"` + + // no documentation yet + PhoneNumber *string `json:"phoneNumber,omitempty" xmlrpc:"phoneNumber,omitempty"` + + // no documentation yet + Postalcode *string `json:"postalcode,omitempty" xmlrpc:"postalcode,omitempty"` + + // no documentation yet + State *string `json:"state,omitempty" xmlrpc:"state,omitempty"` + + // no documentation yet + Status *string `json:"status,omitempty" xmlrpc:"status,omitempty"` + + // no documentation yet + Street1 *string `json:"street1,omitempty" xmlrpc:"street1,omitempty"` + + // no documentation yet + Street2 *string `json:"street2,omitempty" xmlrpc:"street2,omitempty"` + + // no documentation yet + VerifiedDate *Time `json:"verifiedDate,omitempty" xmlrpc:"verifiedDate,omitempty"` +} + +// The SoftLayer_Billing_Info_Cycle data type models basic information concerning a SoftLayer account's previous and current billing cycles. The information in this class is only populated for SoftLayer customers who are billed monthly. +type Billing_Info_Cycle struct { + Entity + + // The account that a current billing cycle is associated with. + Account *Account `json:"account,omitempty" xmlrpc:"account,omitempty"` + + // The ending date of an account's current billing cycle. + CurrentCycleEndDate *Time `json:"currentCycleEndDate,omitempty" xmlrpc:"currentCycleEndDate,omitempty"` + + // The starting date of an account's current billing cycle. + CurrentCycleStartDate *Time `json:"currentCycleStartDate,omitempty" xmlrpc:"currentCycleStartDate,omitempty"` + + // The start date of an account's next billing cycle. + NextCycleStartDate *Time `json:"nextCycleStartDate,omitempty" xmlrpc:"nextCycleStartDate,omitempty"` + + // The ending date of an account's previous billing cycle. + PreviousCycleEndDate *Time `json:"previousCycleEndDate,omitempty" xmlrpc:"previousCycleEndDate,omitempty"` + + // The starting date of an account's previous billing cycle. + PreviousCycleStartDate *Time `json:"previousCycleStartDate,omitempty" xmlrpc:"previousCycleStartDate,omitempty"` +} + +// The SoftLayer_Billing_Invoice data type contains general information relating to an individual invoice applied to a SoftLayer customer account. Personal information in this type such as names, addresses, and phone numbers are taken from the account's contact information at the time the invoice is generated. +type Billing_Invoice struct { + Entity + + // The account that an invoice belongs to. + Account *Account `json:"account,omitempty" xmlrpc:"account,omitempty"` + + // The SoftLayer customer account that an invoice belongs to. + AccountId *int `json:"accountId,omitempty" xmlrpc:"accountId,omitempty"` + + // The first line of an address belonging to an account at the time an invoice is created. + Address1 *string `json:"address1,omitempty" xmlrpc:"address1,omitempty"` + + // The second line of an address belonging to an account at the time an invoice is created. + Address2 *string `json:"address2,omitempty" xmlrpc:"address2,omitempty"` + + // This is the amount of this invoice. + Amount *Float64 `json:"amount,omitempty" xmlrpc:"amount,omitempty"` + + // no documentation yet + BrandAtInvoiceCreation *Brand `json:"brandAtInvoiceCreation,omitempty" xmlrpc:"brandAtInvoiceCreation,omitempty"` + + // The city portion of an address belonging to an account at the time an invoice is created. + City *string `json:"city,omitempty" xmlrpc:"city,omitempty"` + + // Whether an account was exempt from taxes on their invoices at the time an invoice is created. + ClaimedTaxExemptTxFlag *bool `json:"claimedTaxExemptTxFlag,omitempty" xmlrpc:"claimedTaxExemptTxFlag,omitempty"` + + // The date an invoice was closed. Open invoices have a null closed date. + ClosedDate *Time `json:"closedDate,omitempty" xmlrpc:"closedDate,omitempty"` + + // The company name belonging to an account at the time an invoice is created. + CompanyName *string `json:"companyName,omitempty" xmlrpc:"companyName,omitempty"` + + // A two-letter abbreviation of the country portion of an address belonging to an account at the time an invoice is created. + Country *string `json:"country,omitempty" xmlrpc:"country,omitempty"` + + // The date an invoice was created. + CreateDate *Time `json:"createDate,omitempty" xmlrpc:"createDate,omitempty"` + + // A flag that will reflect whether the detailed version of the pdf has been generated. + DetailedPdfGeneratedFlag *bool `json:"detailedPdfGeneratedFlag,omitempty" xmlrpc:"detailedPdfGeneratedFlag,omitempty"` + + // no documentation yet + DocumentsGeneratedFlag *bool `json:"documentsGeneratedFlag,omitempty" xmlrpc:"documentsGeneratedFlag,omitempty"` + + // The email address belonging to an account at the time an invoice is created. + Email *string `json:"email,omitempty" xmlrpc:"email,omitempty"` + + // An SoftLayer account's balance at the time an invoice is closed. This value is measured in US Dollar ($USD) currency. + EndingBalance *Float64 `json:"endingBalance,omitempty" xmlrpc:"endingBalance,omitempty"` + + // The fax telephone number belonging to an account at the time an invoice is created. + FaxPhone *string `json:"faxPhone,omitempty" xmlrpc:"faxPhone,omitempty"` + + // The first name of the account holder at the time an invoice is created. + FirstName *string `json:"firstName,omitempty" xmlrpc:"firstName,omitempty"` + + // An invoice's internal identifier. + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // A count of a list of top-level invoice items that are on the currently pending invoice. + InvoiceTopLevelItemCount *uint `json:"invoiceTopLevelItemCount,omitempty" xmlrpc:"invoiceTopLevelItemCount,omitempty"` + + // A list of top-level invoice items that are on the currently pending invoice. + InvoiceTopLevelItems []Billing_Invoice_Item `json:"invoiceTopLevelItems,omitempty" xmlrpc:"invoiceTopLevelItems,omitempty"` + + // The total amount of this invoice. + InvoiceTotalAmount *Float64 `json:"invoiceTotalAmount,omitempty" xmlrpc:"invoiceTotalAmount,omitempty"` + + // The total one-time charges for this invoice. This is the sum of one-time charges + setup fees + labor fees. This does not include taxes. + InvoiceTotalOneTimeAmount *Float64 `json:"invoiceTotalOneTimeAmount,omitempty" xmlrpc:"invoiceTotalOneTimeAmount,omitempty"` + + // A sum of all the taxes related to one time charges for this invoice. + InvoiceTotalOneTimeTaxAmount *Float64 `json:"invoiceTotalOneTimeTaxAmount,omitempty" xmlrpc:"invoiceTotalOneTimeTaxAmount,omitempty"` + + // The total amount of this invoice. This does not include taxes. + InvoiceTotalPreTaxAmount *Float64 `json:"invoiceTotalPreTaxAmount,omitempty" xmlrpc:"invoiceTotalPreTaxAmount,omitempty"` + + // The total Recurring amount of this invoice. This amount does not include taxes or one time charges. + InvoiceTotalRecurringAmount *Float64 `json:"invoiceTotalRecurringAmount,omitempty" xmlrpc:"invoiceTotalRecurringAmount,omitempty"` + + // The total amount of the recurring taxes on this invoice. + InvoiceTotalRecurringTaxAmount *Float64 `json:"invoiceTotalRecurringTaxAmount,omitempty" xmlrpc:"invoiceTotalRecurringTaxAmount,omitempty"` + + // A count of the items that belong to this invoice. + ItemCount *uint `json:"itemCount,omitempty" xmlrpc:"itemCount,omitempty"` + + // The items that belong to this invoice. + Items []Billing_Invoice_Item `json:"items,omitempty" xmlrpc:"items,omitempty"` + + // The last name of the account holder at the time an invoice is created. + LastName *string `json:"lastName,omitempty" xmlrpc:"lastName,omitempty"` + + // The date an invoice was last modified. + ModifyDate *Time `json:"modifyDate,omitempty" xmlrpc:"modifyDate,omitempty"` + + // The telephone number belonging to an account at the time an invoice is created. + OfficePhone *string `json:"officePhone,omitempty" xmlrpc:"officePhone,omitempty"` + + // This is the total payment made on this invoice. + Payment *Float64 `json:"payment,omitempty" xmlrpc:"payment,omitempty"` + + // A count of the payments for the invoice. + PaymentCount *uint `json:"paymentCount,omitempty" xmlrpc:"paymentCount,omitempty"` + + // The payments for the invoice. + Payments []Billing_Invoice_Receivable_Payment `json:"payments,omitempty" xmlrpc:"payments,omitempty"` + + // The postal code portion of an address belonging to an account at the time an invoice is created. + PostalCode *string `json:"postalCode,omitempty" xmlrpc:"postalCode,omitempty"` + + // no documentation yet + PurchaseOrderNumber *string `json:"purchaseOrderNumber,omitempty" xmlrpc:"purchaseOrderNumber,omitempty"` + + // This is the seller's tax registration. + SellerRegistration *string `json:"sellerRegistration,omitempty" xmlrpc:"sellerRegistration,omitempty"` + + // An SoftLayer account's balance at the time an invoice is created. This value is measured in US Dollar ($USD) currency. + StartingBalance *Float64 `json:"startingBalance,omitempty" xmlrpc:"startingBalance,omitempty"` + + // A two-letter abbreviation of the state portion of an address belonging to an account at the time an invoice is created. If the account that the invoice was generated for resides outside a province then this is set to "other". + State *string `json:"state,omitempty" xmlrpc:"state,omitempty"` + + // An invoice's status. The "OPEN" status means SoftLayer has not yet received payment for this invoice. "CLOSED" status means that SoftLayer has received payment and closed the invoice. The "CLOSED_FAILED" status code means SoftLayer closed the invoice without receiving a payment. Invoices are usually set to CLOSED_FAILED status in cases where customer accounts are terminated for non-payment. + StatusCode *string `json:"statusCode,omitempty" xmlrpc:"statusCode,omitempty"` + + // This is the tax information that applies to tax auditing. This is the official tax record for this invoice. + TaxInfo *Billing_Invoice_Tax_Info `json:"taxInfo,omitempty" xmlrpc:"taxInfo,omitempty"` + + // This is the set of tax information for any tax calculation for this invoice. Note that not all of these are necessarily official, so use the taxInfo key to get the final information. + TaxInfoHistory []Billing_Invoice_Tax_Info `json:"taxInfoHistory,omitempty" xmlrpc:"taxInfoHistory,omitempty"` + + // A count of this is the set of tax information for any tax calculation for this invoice. Note that not all of these are necessarily official, so use the taxInfo key to get the final information. + TaxInfoHistoryCount *uint `json:"taxInfoHistoryCount,omitempty" xmlrpc:"taxInfoHistoryCount,omitempty"` + + // This is a message explaining the tax treatment for this invoice. + TaxMessage *string `json:"taxMessage,omitempty" xmlrpc:"taxMessage,omitempty"` + + // no documentation yet + TaxStatusId *int `json:"taxStatusId,omitempty" xmlrpc:"taxStatusId,omitempty"` + + // This is the strategy used to calculate tax on this invoice. + TaxType *Billing_Invoice_Tax_Type `json:"taxType,omitempty" xmlrpc:"taxType,omitempty"` + + // no documentation yet + TaxTypeId *int `json:"taxTypeId,omitempty" xmlrpc:"taxTypeId,omitempty"` + + // An invoice's type. SoftLayer invoices and service credits are differentiated by their type. The "NEW" type code signifies an invoice for new service. A SoftLayer customer's first invoice has the NEW type code. "RECURRING" invoices are generated on a SoftLayer customer's anniversary billing date for monthly services. "ONE-TIME-CHARGE" invoices are generated when one-time charges are applied to an account. "CREDIT" invoices are generated whenever SoftLayer applies a credit against an account's balance. There are two special types of service credits. "REFUND" type credits are applied against a customer's account balance along with the receivables on their account. "MANUAL_PAYMENT_CREDIT" invoice credits are generated whenever a customer makes an unscheduled payment. + TypeCode *string `json:"typeCode,omitempty" xmlrpc:"typeCode,omitempty"` +} + +// Each billing invoice item makes up a record within an invoice. This provides you with a detailed record of everything related to an invoice item. When you are billed, our system takes active billing items and creates an invoice. These invoice items are a copy of your active billing items, and make up the contents of your invoice. +type Billing_Invoice_Item struct { + Entity + + // An Invoice Item's associated child invoice items. Only parent invoice items have associated children. For instance, a server invoice item may have associated children. + AssociatedChildren []Billing_Invoice_Item `json:"associatedChildren,omitempty" xmlrpc:"associatedChildren,omitempty"` + + // A count of an Invoice Item's associated child invoice items. Only parent invoice items have associated children. For instance, a server invoice item may have associated children. + AssociatedChildrenCount *uint `json:"associatedChildrenCount,omitempty" xmlrpc:"associatedChildrenCount,omitempty"` + + // An Invoice Item's associated invoice item. If this is populated, it means this is an orphaned invoice item, but logically belongs to the associated invoice item. + AssociatedInvoiceItem *Billing_Invoice_Item `json:"associatedInvoiceItem,omitempty" xmlrpc:"associatedInvoiceItem,omitempty"` + + // The associated invoice Item ID. + AssociatedInvoiceItemId *int `json:"associatedInvoiceItemId,omitempty" xmlrpc:"associatedInvoiceItemId,omitempty"` + + // An Invoice Item's billing item, from which this item was generated. + BillingItem *Billing_Item `json:"billingItem,omitempty" xmlrpc:"billingItem,omitempty"` + + // The billing item from which this invoice item was generated. + BillingItemId *int `json:"billingItemId,omitempty" xmlrpc:"billingItemId,omitempty"` + + // This invoice item's "item category". + Category *Product_Item_Category `json:"category,omitempty" xmlrpc:"category,omitempty"` + + // The item category of the invoice item being invoiced. + CategoryCode *string `json:"categoryCode,omitempty" xmlrpc:"categoryCode,omitempty"` + + // An Invoice Item's child invoice items. Only parent invoice items have children. For instance, a server invoice item will have children. + Children []Billing_Invoice_Item `json:"children,omitempty" xmlrpc:"children,omitempty"` + + // A count of an Invoice Item's child invoice items. Only parent invoice items have children. For instance, a server invoice item will have children. + ChildrenCount *uint `json:"childrenCount,omitempty" xmlrpc:"childrenCount,omitempty"` + + // The date the invoice item was created. + CreateDate *Time `json:"createDate,omitempty" xmlrpc:"createDate,omitempty"` + + // The item description for this invoice item. + Description *string `json:"description,omitempty" xmlrpc:"description,omitempty"` + + // The domain name of the invoiced item. This is only used on invoice items whose category is "server". + DomainName *string `json:"domainName,omitempty" xmlrpc:"domainName,omitempty"` + + // An Invoice Item's associated child invoice items, excluding some items with a $0.00 recurring fee. Only parent invoice items have associated children. For instance, a server invoice item may have associated children. + FilteredAssociatedChildren []Billing_Invoice_Item `json:"filteredAssociatedChildren,omitempty" xmlrpc:"filteredAssociatedChildren,omitempty"` + + // A count of an Invoice Item's associated child invoice items, excluding some items with a $0.00 recurring fee. Only parent invoice items have associated children. For instance, a server invoice item may have associated children. + FilteredAssociatedChildrenCount *uint `json:"filteredAssociatedChildrenCount,omitempty" xmlrpc:"filteredAssociatedChildrenCount,omitempty"` + + // The Host name of the invoiced item. This is only used on invoice items whose category is "server". + HostName *string `json:"hostName,omitempty" xmlrpc:"hostName,omitempty"` + + // Indicating whether this invoice item is billed on an hourly basis. + HourlyFlag *bool `json:"hourlyFlag,omitempty" xmlrpc:"hourlyFlag,omitempty"` + + // The hourly recurring fee of the invoice item represented by a floating point decimal in US Dollars ($USD) + HourlyRecurringFee *Float64 `json:"hourlyRecurringFee,omitempty" xmlrpc:"hourlyRecurringFee,omitempty"` + + // The ID of the invoice item. + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // The invoice to which this item belongs. + Invoice *Billing_Invoice `json:"invoice,omitempty" xmlrpc:"invoice,omitempty"` + + // The invoice to which this invoice item belongs. + InvoiceId *int `json:"invoiceId,omitempty" xmlrpc:"invoiceId,omitempty"` + + // An invoice item's labor fee total after taxes. This does not include any child invoice items. + LaborAfterTaxAmount *Float64 `json:"laborAfterTaxAmount,omitempty" xmlrpc:"laborAfterTaxAmount,omitempty"` + + // This also a one-time fee of a special type. + LaborFee *Float64 `json:"laborFee,omitempty" xmlrpc:"laborFee,omitempty"` + + // The tax rate at which the labor fee is taxed. + LaborFeeTaxRate *Float64 `json:"laborFeeTaxRate,omitempty" xmlrpc:"laborFeeTaxRate,omitempty"` + + // An invoice item's labor tax amount. This does not include any child invoice items. + LaborTaxAmount *Float64 `json:"laborTaxAmount,omitempty" xmlrpc:"laborTaxAmount,omitempty"` + + // An invoice item's location, if one exists.' + Location *Location `json:"location,omitempty" xmlrpc:"location,omitempty"` + + // An Invoice Item's associated child invoice items, excluding ALL items with a $0.00 recurring fee. Only parent invoice items have associated children. For instance, a server invoice item may have associated children. + NonZeroAssociatedChildren []Billing_Invoice_Item `json:"nonZeroAssociatedChildren,omitempty" xmlrpc:"nonZeroAssociatedChildren,omitempty"` + + // A count of an Invoice Item's associated child invoice items, excluding ALL items with a $0.00 recurring fee. Only parent invoice items have associated children. For instance, a server invoice item may have associated children. + NonZeroAssociatedChildrenCount *uint `json:"nonZeroAssociatedChildrenCount,omitempty" xmlrpc:"nonZeroAssociatedChildrenCount,omitempty"` + + // A note to help describe more about the item. This normally holds usernames, or some other bit of extra information. + Notes *string `json:"notes,omitempty" xmlrpc:"notes,omitempty"` + + // An invoice item's one-time fee total after taxes. This does not include any child invoice items. + OneTimeAfterTaxAmount *Float64 `json:"oneTimeAfterTaxAmount,omitempty" xmlrpc:"oneTimeAfterTaxAmount,omitempty"` + + // If there are any one-time charges assessed, it will show up here represented by a floating point decimal in US Dollars ($USD) + OneTimeFee *Float64 `json:"oneTimeFee,omitempty" xmlrpc:"oneTimeFee,omitempty"` + + // The rate at which the one-time fee is taxed. + OneTimeFeeTaxRate *Float64 `json:"oneTimeFeeTaxRate,omitempty" xmlrpc:"oneTimeFeeTaxRate,omitempty"` + + // An invoice item's one-time tax amount. This does not include any child invoice items. + OneTimeTaxAmount *Float64 `json:"oneTimeTaxAmount,omitempty" xmlrpc:"oneTimeTaxAmount,omitempty"` + + // Every item tied to a server should have a parent invoice item which is the server line item. This is how we associate items to a server. + Parent *Billing_Invoice_Item `json:"parent,omitempty" xmlrpc:"parent,omitempty"` + + // The parent invoice item, usually the server invoice item. + ParentId *int `json:"parentId,omitempty" xmlrpc:"parentId,omitempty"` + + // The entry in the product catalog that a invoice item is based upon. + Product *Product_Item `json:"product,omitempty" xmlrpc:"product,omitempty"` + + // The entry in the product catalog that a invoice item is based upon. + ProductItemId *int `json:"productItemId,omitempty" xmlrpc:"productItemId,omitempty"` + + // An invoice item's recurring fee total after taxes. This does not include any child invoice items. + RecurringAfterTaxAmount *Float64 `json:"recurringAfterTaxAmount,omitempty" xmlrpc:"recurringAfterTaxAmount,omitempty"` + + // The recurring fee of the invoice item represented by a floating point decimal in US Dollars ($USD) + RecurringFee *Float64 `json:"recurringFee,omitempty" xmlrpc:"recurringFee,omitempty"` + + // the rate at which the recurring fee is taxed. + RecurringFeeTaxRate *Float64 `json:"recurringFeeTaxRate,omitempty" xmlrpc:"recurringFeeTaxRate,omitempty"` + + // An invoice item's recurring tax amount. This does not include any child invoice items. + RecurringTaxAmount *Float64 `json:"recurringTaxAmount,omitempty" xmlrpc:"recurringTaxAmount,omitempty"` + + // A unique identifier for a SoftLayer Service that is associated to an invoice item. + ResourceTableId *int `json:"resourceTableId,omitempty" xmlrpc:"resourceTableId,omitempty"` + + // The service provider for the invoice item. + ServiceProviderId *int `json:"serviceProviderId,omitempty" xmlrpc:"serviceProviderId,omitempty"` + + // An invoice item's setup fee total after taxes. This does not include any child invoice items. + SetupAfterTaxAmount *Float64 `json:"setupAfterTaxAmount,omitempty" xmlrpc:"setupAfterTaxAmount,omitempty"` + + // If there were any setup fees they will show up here. These are normally a one-time fee. + SetupFee *Float64 `json:"setupFee,omitempty" xmlrpc:"setupFee,omitempty"` + + // The tax rate at which the setup fee is taxed. + SetupFeeTaxRate *Float64 `json:"setupFeeTaxRate,omitempty" xmlrpc:"setupFeeTaxRate,omitempty"` + + // An invoice item's setup tax amount. This does not include any child invoice items. + SetupTaxAmount *Float64 `json:"setupTaxAmount,omitempty" xmlrpc:"setupTaxAmount,omitempty"` + + // A string representing the name of parent level product group of an invoice item. + TopLevelProductGroupName *string `json:"topLevelProductGroupName,omitempty" xmlrpc:"topLevelProductGroupName,omitempty"` + + // An invoice Item's total, including any child invoice items if they exist. + TotalOneTimeAmount *Float64 `json:"totalOneTimeAmount,omitempty" xmlrpc:"totalOneTimeAmount,omitempty"` + + // An invoice Item's total, including any child invoice items if they exist. + TotalOneTimeTaxAmount *Float64 `json:"totalOneTimeTaxAmount,omitempty" xmlrpc:"totalOneTimeTaxAmount,omitempty"` + + // An invoice Item's total, including any child invoice items if they exist. + TotalRecurringAmount *Float64 `json:"totalRecurringAmount,omitempty" xmlrpc:"totalRecurringAmount,omitempty"` + + // A Billing Item's total, including any child billing items if they exist.' + TotalRecurringTaxAmount *Float64 `json:"totalRecurringTaxAmount,omitempty" xmlrpc:"totalRecurringTaxAmount,omitempty"` +} + +// The SoftLayer_Billing_Invoice_Item_Hardware data type contains a "resource". This resource is a link to the hardware tied to a SoftLayer_Billing_item whose category code is "server". +type Billing_Invoice_Item_Hardware struct { + Billing_Invoice_Item + + // The resource for a server invoice item. + Resource *Hardware `json:"resource,omitempty" xmlrpc:"resource,omitempty"` +} + +// Information about the tax rates that apply to a particular invoice item. +type Billing_Invoice_Item_Tax_Info struct { + Entity + + // The date and time the tax information was recorded. + CreateDate *Time `json:"createDate,omitempty" xmlrpc:"createDate,omitempty"` + + // The invoice description with special information about the invoice. + Description *string `json:"description,omitempty" xmlrpc:"description,omitempty"` + + // The tax rate that can be multiplied by the subtotal to get the + EffectiveTaxRate *Float64 `json:"effectiveTaxRate,omitempty" xmlrpc:"effectiveTaxRate,omitempty"` + + // The amount that is exempt from tax. + ExemptAmount *Float64 `json:"exemptAmount,omitempty" xmlrpc:"exemptAmount,omitempty"` + + // The type of fee being tracked for this particular set of tax information. + FeeProperty *string `json:"feeProperty,omitempty" xmlrpc:"feeProperty,omitempty"` + + // An invoice item's tax information internal identifier. + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // no documentation yet + InvoiceItem *Billing_Invoice_Item `json:"invoiceItem,omitempty" xmlrpc:"invoiceItem,omitempty"` + + // A reference to the related invoice item. + InvoiceItemId *int `json:"invoiceItemId,omitempty" xmlrpc:"invoiceItemId,omitempty"` + + // no documentation yet + InvoiceTaxInfo *Billing_Invoice_Tax_Info `json:"invoiceTaxInfo,omitempty" xmlrpc:"invoiceTaxInfo,omitempty"` + + // A reference to the tax information for the parent invoice. + InvoiceTaxInfoId *int `json:"invoiceTaxInfoId,omitempty" xmlrpc:"invoiceTaxInfoId,omitempty"` + + // The date and time the tax information was modified. + ModifyDate *Time `json:"modifyDate,omitempty" xmlrpc:"modifyDate,omitempty"` + + // The amount that is exempt from tax. + NonTaxableBasis *Float64 `json:"nonTaxableBasis,omitempty" xmlrpc:"nonTaxableBasis,omitempty"` + + // A flag to indicate whether this is the official record for this invoice item. + ReportedFlag *bool `json:"reportedFlag,omitempty" xmlrpc:"reportedFlag,omitempty"` + + // The registration that the seller will use to report the invoice. + SellerRegistration *string `json:"sellerRegistration,omitempty" xmlrpc:"sellerRegistration,omitempty"` + + // The tax amount associated with this line item. + TaxAmount *Float64 `json:"taxAmount,omitempty" xmlrpc:"taxAmount,omitempty"` + + // The tax amount (converted to the 'to' currency) associated with this line item. + TaxAmountToCurrency *Float64 `json:"taxAmountToCurrency,omitempty" xmlrpc:"taxAmountToCurrency,omitempty"` + + // The tax rate used. Note that this might apply to only part of the + TaxRate *Float64 `json:"taxRate,omitempty" xmlrpc:"taxRate,omitempty"` + + // The amount that is subject to tax. + TaxableBasis *Float64 `json:"taxableBasis,omitempty" xmlrpc:"taxableBasis,omitempty"` + + // This is the currency the invoice will be converted to. + ToCurrency *Billing_Currency `json:"toCurrency,omitempty" xmlrpc:"toCurrency,omitempty"` + + // The currency code that the invoice is being converted to. + ToCurrencyId *int `json:"toCurrencyId,omitempty" xmlrpc:"toCurrencyId,omitempty"` +} + +// no documentation yet +type Billing_Invoice_Next struct { + Entity +} + +// The SoftLayer_Billing_Invoice_Receivable_Payment data type contains general information relating to payments made against invoices. +type Billing_Invoice_Receivable_Payment struct { + Entity + + // no documentation yet + Account *Account `json:"account,omitempty" xmlrpc:"account,omitempty"` + + // The amount of the payment. + Amount *Float64 `json:"amount,omitempty" xmlrpc:"amount,omitempty"` + + // The date of the payment. + CreateDate *Time `json:"createDate,omitempty" xmlrpc:"createDate,omitempty"` + + // no documentation yet + CreditCardLastFourDigits *int `json:"creditCardLastFourDigits,omitempty" xmlrpc:"creditCardLastFourDigits,omitempty"` + + // no documentation yet + CreditCardRequestId *string `json:"creditCardRequestId,omitempty" xmlrpc:"creditCardRequestId,omitempty"` + + // no documentation yet + CreditCardTransaction *Billing_Payment_Card_Transaction `json:"creditCardTransaction,omitempty" xmlrpc:"creditCardTransaction,omitempty"` + + // no documentation yet + ExchangeRate *Billing_Currency_ExchangeRate `json:"exchangeRate,omitempty" xmlrpc:"exchangeRate,omitempty"` + + // no documentation yet + Invoice *Billing_Invoice `json:"invoice,omitempty" xmlrpc:"invoice,omitempty"` + + // The invoice that the payment is for. + InvoiceId *int `json:"invoiceId,omitempty" xmlrpc:"invoiceId,omitempty"` + + // no documentation yet + PaypalTransaction *Billing_Payment_PayPal_Transaction `json:"paypalTransaction,omitempty" xmlrpc:"paypalTransaction,omitempty"` + + // The type of payment. + TypeCode *string `json:"typeCode,omitempty" xmlrpc:"typeCode,omitempty"` +} + +// Invoice tax information contains top-level information about the taxes recorded for a particular invoice. +type Billing_Invoice_Tax_Info struct { + Entity + + // The date and time this tax information was recorded. + CreateDate *Time `json:"createDate,omitempty" xmlrpc:"createDate,omitempty"` + + // This is the currency used for the invoice. + Currency *Billing_Currency `json:"currency,omitempty" xmlrpc:"currency,omitempty"` + + // The currency code that the invoice should be recorded in. + CurrencyId *int `json:"currencyId,omitempty" xmlrpc:"currencyId,omitempty"` + + // This is the functional currency used for the invoice. + FunctionalCurrency *Billing_Currency `json:"functionalCurrency,omitempty" xmlrpc:"functionalCurrency,omitempty"` + + // The internal identifier for this invoice tax information. + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // This is the related invoice for this tax-related information. + Invoice *Billing_Invoice `json:"invoice,omitempty" xmlrpc:"invoice,omitempty"` + + // A reference to the related invoice. + InvoiceId *int `json:"invoiceId,omitempty" xmlrpc:"invoiceId,omitempty"` + + // A count of this is the collection of tax information for each of the related invoice items. + ItemCount *uint `json:"itemCount,omitempty" xmlrpc:"itemCount,omitempty"` + + // This tax information on the invoice item that includes currency details. + ItemWithCurrencyInfo *Billing_Invoice_Item_Tax_Info `json:"itemWithCurrencyInfo,omitempty" xmlrpc:"itemWithCurrencyInfo,omitempty"` + + // This is the collection of tax information for each of the related invoice items. + Items []Billing_Invoice_Item_Tax_Info `json:"items,omitempty" xmlrpc:"items,omitempty"` + + // The date and time this tax information was updated. + ModifyDate *Time `json:"modifyDate,omitempty" xmlrpc:"modifyDate,omitempty"` + + // A flag to indicate whether the invoice will be auditable. + ReportedFlag *bool `json:"reportedFlag,omitempty" xmlrpc:"reportedFlag,omitempty"` + + // This the total tax amount (converted to the 'to' currency) for the invoice. + TotalTaxAmountToCurrency *Float64 `json:"totalTaxAmountToCurrency,omitempty" xmlrpc:"totalTaxAmountToCurrency,omitempty"` +} + +// The invoice tax status data type models a single status or state that an invoice can reflect in regard to an integration with a third-party tax calculation service. +type Billing_Invoice_Tax_Status struct { + Entity + + // no documentation yet + CreateDate *Time `json:"createDate,omitempty" xmlrpc:"createDate,omitempty"` + + // no documentation yet + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // no documentation yet + KeyName *string `json:"keyName,omitempty" xmlrpc:"keyName,omitempty"` + + // no documentation yet + ModifyDate *Time `json:"modifyDate,omitempty" xmlrpc:"modifyDate,omitempty"` + + // no documentation yet + Name *string `json:"name,omitempty" xmlrpc:"name,omitempty"` +} + +// The invoice tax type data type models a single strategy for handling tax calculations. +type Billing_Invoice_Tax_Type struct { + Entity + + // A tax type's internal identifier. Each type of tax calculation strategy has a unique ID value. + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // A unique string that identifies each strategy and is guaranteed to be stable over time. + KeyName *string `json:"keyName,omitempty" xmlrpc:"keyName,omitempty"` + + // A human-readable label for each tax strategy. + Name *string `json:"name,omitempty" xmlrpc:"name,omitempty"` +} + +// Every individual item that a SoftLayer customer is billed for is recorded in the SoftLayer_Billing_Item data type. Billing items range from server chassis to hard drives to control panels, bandwidth quota upgrades and port upgrade charges. Softlayer [[SoftLayer_Billing_Invoice|invoices]] are generated from the cost of a customer's billing items. Billing items are copied from the product catalog as they're ordered by customers to create a reference between an account and the billable items they own. +// +// Billing items exist in a tree relationship. Items are associated with each other by parent/child relationships. Component items such as CPU's, RAM, and software each have a parent billing item for the server chassis they're associated with. Billing Items with a null parent item do not have an associated parent item. +type Billing_Item struct { + Entity + + // The account that a billing item belongs to. + Account *Account `json:"account,omitempty" xmlrpc:"account,omitempty"` + + // no documentation yet + ActiveAgreement *Account_Agreement `json:"activeAgreement,omitempty" xmlrpc:"activeAgreement,omitempty"` + + // A flag indicating that the billing item is under an active agreement. + ActiveAgreementFlag *Account_Agreement `json:"activeAgreementFlag,omitempty" xmlrpc:"activeAgreementFlag,omitempty"` + + // A billing item's active associated child billing items. This includes "floating" items that are not necessarily child items of this billing item. + ActiveAssociatedChildren []Billing_Item `json:"activeAssociatedChildren,omitempty" xmlrpc:"activeAssociatedChildren,omitempty"` + + // A count of a billing item's active associated child billing items. This includes "floating" items that are not necessarily child items of this billing item. + ActiveAssociatedChildrenCount *uint `json:"activeAssociatedChildrenCount,omitempty" xmlrpc:"activeAssociatedChildrenCount,omitempty"` + + // A count of + ActiveAssociatedGuestDiskBillingItemCount *uint `json:"activeAssociatedGuestDiskBillingItemCount,omitempty" xmlrpc:"activeAssociatedGuestDiskBillingItemCount,omitempty"` + + // no documentation yet + ActiveAssociatedGuestDiskBillingItems []Billing_Item `json:"activeAssociatedGuestDiskBillingItems,omitempty" xmlrpc:"activeAssociatedGuestDiskBillingItems,omitempty"` + + // A count of a Billing Item's active bundled billing items. + ActiveBundledItemCount *uint `json:"activeBundledItemCount,omitempty" xmlrpc:"activeBundledItemCount,omitempty"` + + // A Billing Item's active bundled billing items. + ActiveBundledItems []Billing_Item `json:"activeBundledItems,omitempty" xmlrpc:"activeBundledItems,omitempty"` + + // A service cancellation request item that corresponds to the billing item. + ActiveCancellationItem *Billing_Item_Cancellation_Request_Item `json:"activeCancellationItem,omitempty" xmlrpc:"activeCancellationItem,omitempty"` + + // A Billing Item's active child billing items. + ActiveChildren []Billing_Item `json:"activeChildren,omitempty" xmlrpc:"activeChildren,omitempty"` + + // A count of a Billing Item's active child billing items. + ActiveChildrenCount *uint `json:"activeChildrenCount,omitempty" xmlrpc:"activeChildrenCount,omitempty"` + + // no documentation yet + ActiveFlag *bool `json:"activeFlag,omitempty" xmlrpc:"activeFlag,omitempty"` + + // A count of + ActiveSparePoolAssociatedGuestDiskBillingItemCount *uint `json:"activeSparePoolAssociatedGuestDiskBillingItemCount,omitempty" xmlrpc:"activeSparePoolAssociatedGuestDiskBillingItemCount,omitempty"` + + // no documentation yet + ActiveSparePoolAssociatedGuestDiskBillingItems []Billing_Item `json:"activeSparePoolAssociatedGuestDiskBillingItems,omitempty" xmlrpc:"activeSparePoolAssociatedGuestDiskBillingItems,omitempty"` + + // A count of a Billing Item's spare pool bundled billing items. + ActiveSparePoolBundledItemCount *uint `json:"activeSparePoolBundledItemCount,omitempty" xmlrpc:"activeSparePoolBundledItemCount,omitempty"` + + // A Billing Item's spare pool bundled billing items. + ActiveSparePoolBundledItems []Billing_Item `json:"activeSparePoolBundledItems,omitempty" xmlrpc:"activeSparePoolBundledItems,omitempty"` + + // Flag to check if a billing item can be cancelled. 1 = yes. 0 = no. + AllowCancellationFlag *int `json:"allowCancellationFlag,omitempty" xmlrpc:"allowCancellationFlag,omitempty"` + + // A billing item's associated parent. This is to be used for billing items that are "floating", and therefore are not child items of any parent billing item. If it is desired to associate an item to another, populate this with the SoftLayer_Billing_Item ID of that associated parent item. + AssociatedBillingItem *Billing_Item `json:"associatedBillingItem,omitempty" xmlrpc:"associatedBillingItem,omitempty"` + + // A history of billing items which a billing item has been associated with. + AssociatedBillingItemHistory []Billing_Item_Association_History `json:"associatedBillingItemHistory,omitempty" xmlrpc:"associatedBillingItemHistory,omitempty"` + + // A count of a history of billing items which a billing item has been associated with. + AssociatedBillingItemHistoryCount *uint `json:"associatedBillingItemHistoryCount,omitempty" xmlrpc:"associatedBillingItemHistoryCount,omitempty"` + + // This is sometimes populated for orphan billing items that are not attached to servers. Billing items like secondary portable IP addresses fit into this category. A user may set an association by calling [[SoftLayer_Billing_Item::setAssociationId]]. This will cause this orphan item to appear under its associated server billing item on future invoices. You may only attach orphaned billing items to server billing items without cancellation dates set. + AssociatedBillingItemId *string `json:"associatedBillingItemId,omitempty" xmlrpc:"associatedBillingItemId,omitempty"` + + // A Billing Item's associated child billing items. This includes "floating" items that are not necessarily child billing items of this billing item. + AssociatedChildren []Billing_Item `json:"associatedChildren,omitempty" xmlrpc:"associatedChildren,omitempty"` + + // A count of a Billing Item's associated child billing items. This includes "floating" items that are not necessarily child billing items of this billing item. + AssociatedChildrenCount *uint `json:"associatedChildrenCount,omitempty" xmlrpc:"associatedChildrenCount,omitempty"` + + // A billing item's associated parent billing item. This object will be the same as the parent billing item if parentId is set. + AssociatedParent []Billing_Item `json:"associatedParent,omitempty" xmlrpc:"associatedParent,omitempty"` + + // A count of a billing item's associated parent billing item. This object will be the same as the parent billing item if parentId is set. + AssociatedParentCount *uint `json:"associatedParentCount,omitempty" xmlrpc:"associatedParentCount,omitempty"` + + // A count of + AvailableMatchingVlanCount *uint `json:"availableMatchingVlanCount,omitempty" xmlrpc:"availableMatchingVlanCount,omitempty"` + + // no documentation yet + AvailableMatchingVlans []Network_Vlan `json:"availableMatchingVlans,omitempty" xmlrpc:"availableMatchingVlans,omitempty"` + + // The bandwidth allocation for a billing item. + BandwidthAllocation *Network_Bandwidth_Version1_Allocation `json:"bandwidthAllocation,omitempty" xmlrpc:"bandwidthAllocation,omitempty"` + + // A billing item's recurring child items that have once been billed and are scheduled to be billed in the future. + BillableChildren []Billing_Item `json:"billableChildren,omitempty" xmlrpc:"billableChildren,omitempty"` + + // A count of a billing item's recurring child items that have once been billed and are scheduled to be billed in the future. + BillableChildrenCount *uint `json:"billableChildrenCount,omitempty" xmlrpc:"billableChildrenCount,omitempty"` + + // A count of a Billing Item's bundled billing items + BundleItemCount *uint `json:"bundleItemCount,omitempty" xmlrpc:"bundleItemCount,omitempty"` + + // A Billing Item's bundled billing items + BundleItems []Product_Item_Bundles `json:"bundleItems,omitempty" xmlrpc:"bundleItems,omitempty"` + + // A count of a Billing Item's bundled billing items' + BundledItemCount *uint `json:"bundledItemCount,omitempty" xmlrpc:"bundledItemCount,omitempty"` + + // A Billing Item's bundled billing items' + BundledItems []Billing_Item `json:"bundledItems,omitempty" xmlrpc:"bundledItems,omitempty"` + + // A Billing Item's active child billing items. + CanceledChildren []Billing_Item `json:"canceledChildren,omitempty" xmlrpc:"canceledChildren,omitempty"` + + // A count of a Billing Item's active child billing items. + CanceledChildrenCount *uint `json:"canceledChildrenCount,omitempty" xmlrpc:"canceledChildrenCount,omitempty"` + + // A billing item's cancellation date. A billing item with a cancellation date in the past is not charged on your SoftLayer invoice. Cancellation dates in the future indicate the current billing item is active, but will be cancelled and not charged for in the future. A billing item with a null cancellation date is also considered an active billing item and is charged once every billing cycle. + CancellationDate *Time `json:"cancellationDate,omitempty" xmlrpc:"cancellationDate,omitempty"` + + // The billing item's cancellation reason. + CancellationReason *Billing_Item_Cancellation_Reason `json:"cancellationReason,omitempty" xmlrpc:"cancellationReason,omitempty"` + + // A count of this will return any cancellation requests that are associated with this billing item. + CancellationRequestCount *uint `json:"cancellationRequestCount,omitempty" xmlrpc:"cancellationRequestCount,omitempty"` + + // This will return any cancellation requests that are associated with this billing item. + CancellationRequests []Billing_Item_Cancellation_Request `json:"cancellationRequests,omitempty" xmlrpc:"cancellationRequests,omitempty"` + + // The item category to which the billing item's item belongs. + Category *Product_Item_Category `json:"category,omitempty" xmlrpc:"category,omitempty"` + + // The category code of this billing item. It is used to tell us the difference between a primary disk and a secondary disk, for instance. + CategoryCode *string `json:"categoryCode,omitempty" xmlrpc:"categoryCode,omitempty"` + + // A Billing Item's child billing items' + Children []Billing_Item `json:"children,omitempty" xmlrpc:"children,omitempty"` + + // A count of a Billing Item's child billing items' + ChildrenCount *uint `json:"childrenCount,omitempty" xmlrpc:"childrenCount,omitempty"` + + // A Billing Item's active child billing items. + ChildrenWithActiveAgreement []Billing_Item `json:"childrenWithActiveAgreement,omitempty" xmlrpc:"childrenWithActiveAgreement,omitempty"` + + // A count of a Billing Item's active child billing items. + ChildrenWithActiveAgreementCount *uint `json:"childrenWithActiveAgreementCount,omitempty" xmlrpc:"childrenWithActiveAgreementCount,omitempty"` + + // The date the billing item was created. You can see this date on the invoice. + CreateDate *Time `json:"createDate,omitempty" xmlrpc:"createDate,omitempty"` + + // This is the total charge for the billing item for this billing item. It is calculated based on the hourlyRecurringFee * hoursUsed. + CurrentHourlyCharge *string `json:"currentHourlyCharge,omitempty" xmlrpc:"currentHourlyCharge,omitempty"` + + // The last time this billing item was charged. + CycleStartDate *Time `json:"cycleStartDate,omitempty" xmlrpc:"cycleStartDate,omitempty"` + + // A brief description of a billing item. + Description *string `json:"description,omitempty" xmlrpc:"description,omitempty"` + + // The domain name is provided for server billing items. + DomainName *string `json:"domainName,omitempty" xmlrpc:"domainName,omitempty"` + + // A count of for product items which have a downgrade path defined, this will return those product items. + DowngradeItemCount *uint `json:"downgradeItemCount,omitempty" xmlrpc:"downgradeItemCount,omitempty"` + + // For product items which have a downgrade path defined, this will return those product items. + DowngradeItems []Product_Item `json:"downgradeItems,omitempty" xmlrpc:"downgradeItems,omitempty"` + + // A Billing Item's associated child billing items, excluding some items with a $0.00 recurring fee. + FilteredNextInvoiceChildren []Billing_Item `json:"filteredNextInvoiceChildren,omitempty" xmlrpc:"filteredNextInvoiceChildren,omitempty"` + + // A count of a Billing Item's associated child billing items, excluding some items with a $0.00 recurring fee. + FilteredNextInvoiceChildrenCount *uint `json:"filteredNextInvoiceChildrenCount,omitempty" xmlrpc:"filteredNextInvoiceChildrenCount,omitempty"` + + // The hostname is provided for server billing items + HostName *string `json:"hostName,omitempty" xmlrpc:"hostName,omitempty"` + + // A flag that will reflect whether this billing item is billed on an hourly basis or not. + HourlyFlag *bool `json:"hourlyFlag,omitempty" xmlrpc:"hourlyFlag,omitempty"` + + // The amount of money charged per hour for a billing item, if applicable. hourlyRecurringFee is measured in US Dollars ($USD). + HourlyRecurringFee *Float64 `json:"hourlyRecurringFee,omitempty" xmlrpc:"hourlyRecurringFee,omitempty"` + + // This is the number of hours the hourly billing item has been in use this billing period. For virtual servers, this means running, paused or stopped. + HoursUsed *string `json:"hoursUsed,omitempty" xmlrpc:"hoursUsed,omitempty"` + + // The unique identifier for this billing item. + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // Invoice items associated with this billing item + InvoiceItem *Billing_Invoice_Item `json:"invoiceItem,omitempty" xmlrpc:"invoiceItem,omitempty"` + + // A count of all invoice items associated with the billing item + InvoiceItemCount *uint `json:"invoiceItemCount,omitempty" xmlrpc:"invoiceItemCount,omitempty"` + + // All invoice items associated with the billing item + InvoiceItems []Billing_Invoice_Item `json:"invoiceItems,omitempty" xmlrpc:"invoiceItems,omitempty"` + + // The entry in the SoftLayer product catalog that a billing item is based upon. + Item *Product_Item `json:"item,omitempty" xmlrpc:"item,omitempty"` + + // The labor fee, if any. This is a one time charge. + LaborFee *Float64 `json:"laborFee,omitempty" xmlrpc:"laborFee,omitempty"` + + // The rate at which labor fees are taxed if you are a taxable customer. + LaborFeeTaxRate *Float64 `json:"laborFeeTaxRate,omitempty" xmlrpc:"laborFeeTaxRate,omitempty"` + + // The last time this billing item was charged. + LastBillDate *Time `json:"lastBillDate,omitempty" xmlrpc:"lastBillDate,omitempty"` + + // The location of the billing item. Some billing items have physical properties such as the server itself. For items such as these, we provide location information. + Location *Location `json:"location,omitempty" xmlrpc:"location,omitempty"` + + // The date that a billing item was last modified. + ModifyDate *Time `json:"modifyDate,omitempty" xmlrpc:"modifyDate,omitempty"` + + // The date on which your account will be charged for this billing item. + NextBillDate *Time `json:"nextBillDate,omitempty" xmlrpc:"nextBillDate,omitempty"` + + // A Billing Item's child billing items and associated items' + NextInvoiceChildren []Billing_Item `json:"nextInvoiceChildren,omitempty" xmlrpc:"nextInvoiceChildren,omitempty"` + + // A count of a Billing Item's child billing items and associated items' + NextInvoiceChildrenCount *uint `json:"nextInvoiceChildrenCount,omitempty" xmlrpc:"nextInvoiceChildrenCount,omitempty"` + + // A Billing Item's total, including any child billing items if they exist.' + NextInvoiceTotalOneTimeAmount *Float64 `json:"nextInvoiceTotalOneTimeAmount,omitempty" xmlrpc:"nextInvoiceTotalOneTimeAmount,omitempty"` + + // A Billing Item's total, including any child billing items if they exist.' + NextInvoiceTotalOneTimeTaxAmount *Float64 `json:"nextInvoiceTotalOneTimeTaxAmount,omitempty" xmlrpc:"nextInvoiceTotalOneTimeTaxAmount,omitempty"` + + // A Billing Item's total, including any child billing items and associated billing items if they exist.' + NextInvoiceTotalRecurringAmount *Float64 `json:"nextInvoiceTotalRecurringAmount,omitempty" xmlrpc:"nextInvoiceTotalRecurringAmount,omitempty"` + + // This is deprecated and will always be zero. Because tax is calculated in real-time, previewing the next recurring invoice is pre-tax only. + NextInvoiceTotalRecurringTaxAmount *Float64 `json:"nextInvoiceTotalRecurringTaxAmount,omitempty" xmlrpc:"nextInvoiceTotalRecurringTaxAmount,omitempty"` + + // A Billing Item's associated child billing items, excluding ALL items with a $0.00 recurring fee. + NonZeroNextInvoiceChildren []Billing_Item `json:"nonZeroNextInvoiceChildren,omitempty" xmlrpc:"nonZeroNextInvoiceChildren,omitempty"` + + // A count of a Billing Item's associated child billing items, excluding ALL items with a $0.00 recurring fee. + NonZeroNextInvoiceChildrenCount *uint `json:"nonZeroNextInvoiceChildrenCount,omitempty" xmlrpc:"nonZeroNextInvoiceChildrenCount,omitempty"` + + // Extra information provided to help you identify this billing item. This is often a username or something to help identify items that customers have more than one of. + Notes *string `json:"notes,omitempty" xmlrpc:"notes,omitempty"` + + // The amount of money charged as a one-time charge for a billing item, if applicable. oneTimeFee is measured in US Dollars ($USD). + OneTimeFee *Float64 `json:"oneTimeFee,omitempty" xmlrpc:"oneTimeFee,omitempty"` + + // The rate at which one time fees are taxed if you are a taxable customer. + OneTimeFeeTaxRate *Float64 `json:"oneTimeFeeTaxRate,omitempty" xmlrpc:"oneTimeFeeTaxRate,omitempty"` + + // A billing item's original order item. Simply a reference to the original order from which this billing item was created. + OrderItem *Billing_Order_Item `json:"orderItem,omitempty" xmlrpc:"orderItem,omitempty"` + + // the SoftLayer_Billing_Order_Item ID. This is a reference to the original order item from which this billing item was originally created. + OrderItemId *int `json:"orderItemId,omitempty" xmlrpc:"orderItemId,omitempty"` + + // The original physical location for this billing item--may differ from current. + OriginalLocation *Location `json:"originalLocation,omitempty" xmlrpc:"originalLocation,omitempty"` + + // The package under which this billing item was sold. A Package is the general grouping of products as seen on our order forms. + Package *Product_Package `json:"package,omitempty" xmlrpc:"package,omitempty"` + + // A billing item's parent item. If a billing item has no parent item then this value is null. + Parent *Billing_Item `json:"parent,omitempty" xmlrpc:"parent,omitempty"` + + // The unique identifier of the parent of this billing item. + ParentId *int `json:"parentId,omitempty" xmlrpc:"parentId,omitempty"` + + // A billing item's parent item. If a billing item has no parent item then this value is null. + ParentVirtualGuestBillingItem *Billing_Item_Virtual_Guest `json:"parentVirtualGuestBillingItem,omitempty" xmlrpc:"parentVirtualGuestBillingItem,omitempty"` + + // This flag indicates whether a billing item is scheduled to be canceled or not. + PendingCancellationFlag *bool `json:"pendingCancellationFlag,omitempty" xmlrpc:"pendingCancellationFlag,omitempty"` + + // The new order item that will replace this billing item. + PendingOrderItem *Billing_Order_Item `json:"pendingOrderItem,omitempty" xmlrpc:"pendingOrderItem,omitempty"` + + // Provisioning transaction for this billing item + ProvisionTransaction *Provisioning_Version1_Transaction `json:"provisionTransaction,omitempty" xmlrpc:"provisionTransaction,omitempty"` + + // The amount of money charged per month for a billing item, if applicable. recurringFee is measured in US Dollars ($USD). + RecurringFee *Float64 `json:"recurringFee,omitempty" xmlrpc:"recurringFee,omitempty"` + + // The rate at which recurring fees are taxed if you are a taxable customer. + RecurringFeeTaxRate *Float64 `json:"recurringFeeTaxRate,omitempty" xmlrpc:"recurringFeeTaxRate,omitempty"` + + // The number of months in which the recurring fees will be incurred. + RecurringMonths *int `json:"recurringMonths,omitempty" xmlrpc:"recurringMonths,omitempty"` + + // This is the service provider for this billing item. + ServiceProviderId *int `json:"serviceProviderId,omitempty" xmlrpc:"serviceProviderId,omitempty"` + + // The setup fee, if any. This is a one time charge. + SetupFee *Float64 `json:"setupFee,omitempty" xmlrpc:"setupFee,omitempty"` + + // The rate at which setup fees are taxed if you are a taxable customer. + SetupFeeTaxRate *Float64 `json:"setupFeeTaxRate,omitempty" xmlrpc:"setupFeeTaxRate,omitempty"` + + // A friendly description of software component + SoftwareDescription *Software_Description `json:"softwareDescription,omitempty" xmlrpc:"softwareDescription,omitempty"` + + // Billing items whose product item has an upgrade path defined in our system will return the next product item in the upgrade path. + UpgradeItem *Product_Item `json:"upgradeItem,omitempty" xmlrpc:"upgradeItem,omitempty"` + + // A count of billing items whose product item has an upgrade path defined in our system will return all the product items in the upgrade path. + UpgradeItemCount *uint `json:"upgradeItemCount,omitempty" xmlrpc:"upgradeItemCount,omitempty"` + + // Billing items whose product item has an upgrade path defined in our system will return all the product items in the upgrade path. + UpgradeItems []Product_Item `json:"upgradeItems,omitempty" xmlrpc:"upgradeItems,omitempty"` +} + +// The SoftLayer_Billing_Item_Account_Media_Data_Transfer_Request data type contains general information relating to a single SoftLayer billing item for a data transfer request. +type Billing_Item_Account_Media_Data_Transfer_Request struct { + Billing_Item + + // The data transfer request to which the billing item points. + Resource *Account_Media_Data_Transfer_Request `json:"resource,omitempty" xmlrpc:"resource,omitempty"` +} + +// The SoftLayer_Billing_Item_Association_History type keeps a record of which server billing items an "orphan" item has been associated with. Orphan billing items are billable items for secondary portable services (such as secondary subnets and StorageLayer accounts) that are not associated with a server and appear at the bottom of a SoftLayer invoice. The [[SoftLayer_Billing_Item::setAssociationId]] method allows you to associate these kinds of items with servers, making them appear as a child item of the server on your invoice. A SoftLayer_Billing_Item_Association_History record is created every time one of these associations are set. +type Billing_Item_Association_History struct { + Entity + + // The server billing item that an orphaned billing item was associated with. + AssociatedBillingItem *Billing_Item `json:"associatedBillingItem,omitempty" xmlrpc:"associatedBillingItem,omitempty"` + + // The internal identifier of the server billing item that an orphaned billing item was associated with. + AssociatedBillingItemId *int `json:"associatedBillingItemId,omitempty" xmlrpc:"associatedBillingItemId,omitempty"` + + // The billing item that was associated with a server billing item. + BillingItem *Billing_Item `json:"billingItem,omitempty" xmlrpc:"billingItem,omitempty"` + + // The internal identifier of the billing item that was associated with a server billing item. + BillingItemId *int `json:"billingItemId,omitempty" xmlrpc:"billingItemId,omitempty"` + + // The date that a billing item association was last changed. + CreateDate *Time `json:"createDate,omitempty" xmlrpc:"createDate,omitempty"` + + // A billing item association history's internal identifier. + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` +} + +// The SoftLayer_Billing_Item_Cancellation_Reason data type contains cancellation reasons. +type Billing_Item_Cancellation_Reason struct { + Entity + + // A cancel reason category internal identifier. + BillingCancelReasonCategoryId *int `json:"billingCancelReasonCategoryId,omitempty" xmlrpc:"billingCancelReasonCategoryId,omitempty"` + + // An billing cancellation reason category. + BillingCancellationReasonCategory *Billing_Item_Cancellation_Reason_Category `json:"billingCancellationReasonCategory,omitempty" xmlrpc:"billingCancellationReasonCategory,omitempty"` + + // A count of the corresponding billing items having the specific cancellation reason. + BillingItemCount *uint `json:"billingItemCount,omitempty" xmlrpc:"billingItemCount,omitempty"` + + // The corresponding billing items having the specific cancellation reason. + BillingItems []Billing_Item `json:"billingItems,omitempty" xmlrpc:"billingItems,omitempty"` + + // A reason internal identifier. + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // A standardized reason internal identifier. + KeyName *string `json:"keyName,omitempty" xmlrpc:"keyName,omitempty"` + + // The descriptoin of the reason + Reason *string `json:"reason,omitempty" xmlrpc:"reason,omitempty"` + + // no documentation yet + TranslatedReason *string `json:"translatedReason,omitempty" xmlrpc:"translatedReason,omitempty"` +} + +// The SoftLayer_Billing_Item_Cancellation_Reason_Category data type contains cancellation reason categories. +type Billing_Item_Cancellation_Reason_Category struct { + Entity + + // A count of the corresponding billing cancellation reasons having the specific billing cancellation reason category. + BillingCancellationReasonCount *uint `json:"billingCancellationReasonCount,omitempty" xmlrpc:"billingCancellationReasonCount,omitempty"` + + // The corresponding billing cancellation reasons having the specific billing cancellation reason category. + BillingCancellationReasons []Billing_Item_Cancellation_Reason `json:"billingCancellationReasons,omitempty" xmlrpc:"billingCancellationReasons,omitempty"` + + // A category internal identifier. + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // The description of the category + Name *string `json:"name,omitempty" xmlrpc:"name,omitempty"` +} + +// SoftLayer_Billing_Item_Cancellation_Request data type is used to cancel service billing items. +type Billing_Item_Cancellation_Request struct { + Entity + + // The SoftLayer account that a service cancellation request belongs to. + Account *Account `json:"account,omitempty" xmlrpc:"account,omitempty"` + + // The internal identifier of the customer account that a service cancellation record belongs to. + AccountId *int `json:"accountId,omitempty" xmlrpc:"accountId,omitempty"` + + // The last modified date. + BillingCancelReasonId *int `json:"billingCancelReasonId,omitempty" xmlrpc:"billingCancelReasonId,omitempty"` + + // The date that a cancellation request was created. + CreateDate *Time `json:"createDate,omitempty" xmlrpc:"createDate,omitempty"` + + // A cancellation record's internal identifier. + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // A count of a collection of service cancellation items. + ItemCount *uint `json:"itemCount,omitempty" xmlrpc:"itemCount,omitempty"` + + // A collection of service cancellation items. + Items []Billing_Item_Cancellation_Request_Item `json:"items,omitempty" xmlrpc:"items,omitempty"` + + // The last modified date. + ModifyDate *Time `json:"modifyDate,omitempty" xmlrpc:"modifyDate,omitempty"` + + // Brief cancellation note. + Notes *string `json:"notes,omitempty" xmlrpc:"notes,omitempty"` + + // The status of a service cancellation request. + Status *Billing_Item_Cancellation_Request_Status `json:"status,omitempty" xmlrpc:"status,omitempty"` + + // An internal identifier of the service cancellation status that this request is associated with. When a service cancellation is submitted, it will be in "Pending" status until SoftLayer Sales team reviews it. The status of a cancellation request will be updated to "Approved" or "Voided" by SoftLayer Sales. + // + // It will be updated to "Complete" when all services are reclaimed. + StatusId *int `json:"statusId,omitempty" xmlrpc:"statusId,omitempty"` + + // The ticket that is associated with the service cancellation request. + Ticket *Ticket `json:"ticket,omitempty" xmlrpc:"ticket,omitempty"` + + // An internal identifier of the ticket that is associated with a service cancellation request. When a service cancellation is submitted, a support ticket will be created. This ticket contains details on your service cancellation details and SoftLayer Sales team will use it to further communicate with you. + TicketId *int `json:"ticketId,omitempty" xmlrpc:"ticketId,omitempty"` + + // The user that initiated a service cancellation request. + User *User_Customer `json:"user,omitempty" xmlrpc:"user,omitempty"` +} + +// SoftLayer_Billing_Item_Cancellation_Request_Item data type contains a billing item for cancellation. This data type is used to harness billing items to the associated service. +type Billing_Item_Cancellation_Request_Item struct { + Entity + + // The billing item for cancellation. + BillingItem *Billing_Item `json:"billingItem,omitempty" xmlrpc:"billingItem,omitempty"` + + // The internal identifier of a billing item + BillingItemId *int `json:"billingItemId,omitempty" xmlrpc:"billingItemId,omitempty"` + + // The service cancellation request that a cancellation item belongs to. + CancellationRequest *Billing_Item_Cancellation_Request `json:"cancellationRequest,omitempty" xmlrpc:"cancellationRequest,omitempty"` + + // A cancellation request's internal identifier. + CancellationRequestId *int `json:"cancellationRequestId,omitempty" xmlrpc:"cancellationRequestId,omitempty"` + + // A cancellation request item's internal identifier. + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // This flag indicated if a billing item should be canceled immediately or not. Set this flag to true when creating a cancellation request. + ImmediateCancellationFlag *bool `json:"immediateCancellationFlag,omitempty" xmlrpc:"immediateCancellationFlag,omitempty"` + + // The scheduled cancellation date + ScheduledCancellationDate *Time `json:"scheduledCancellationDate,omitempty" xmlrpc:"scheduledCancellationDate,omitempty"` + + // The reclaim status of a service. + ServiceReclaimStatusCode *string `json:"serviceReclaimStatusCode,omitempty" xmlrpc:"serviceReclaimStatusCode,omitempty"` +} + +// SoftLayer_Billing_Item_Cancellation_Request_Status data type represents the status of a service cancellation request. +type Billing_Item_Cancellation_Request_Status struct { + Entity + + // The short description of a cancellation request status + Description *string `json:"description,omitempty" xmlrpc:"description,omitempty"` + + // The internal identifier of a cancellation request status. + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // status key name + KeyName *string `json:"keyName,omitempty" xmlrpc:"keyName,omitempty"` + + // The status name + Name *string `json:"name,omitempty" xmlrpc:"name,omitempty"` +} + +// The SoftLayer_Billing_Item_Ctc_Account data type contains general information relating to a single SoftLayer billing item for a CTC client account creation +type Billing_Item_Ctc_Account struct { + Billing_Item +} + +// The SoftLayer_Billing_Item_Big_Data_Cluster data type contains general information relating to a single SoftLayer billing item for a big data cluster. +type Billing_Item_Gateway_Appliance_Cluster struct { + Billing_Item + + // The resource for a resource group billing item. + Resource *Resource_Group `json:"resource,omitempty" xmlrpc:"resource,omitempty"` +} + +// The SoftLayer_Billing_Item_Hardware data type contains general information relating to a single SoftLayer billing item for hardware. +type Billing_Item_Hardware struct { + Billing_Item + + // The raw bandwidth usage data for the current billing cycle. One object will be returned for each network this server is attached to. + BillingCycleBandwidthUsage []Network_Bandwidth_Usage `json:"billingCycleBandwidthUsage,omitempty" xmlrpc:"billingCycleBandwidthUsage,omitempty"` + + // A count of the raw bandwidth usage data for the current billing cycle. One object will be returned for each network this server is attached to. + BillingCycleBandwidthUsageCount *uint `json:"billingCycleBandwidthUsageCount,omitempty" xmlrpc:"billingCycleBandwidthUsageCount,omitempty"` + + // The raw private bandwidth usage data for the current billing cycle. + BillingCyclePrivateBandwidthUsage []Network_Bandwidth_Usage `json:"billingCyclePrivateBandwidthUsage,omitempty" xmlrpc:"billingCyclePrivateBandwidthUsage,omitempty"` + + // A count of the raw private bandwidth usage data for the current billing cycle. + BillingCyclePrivateBandwidthUsageCount *uint `json:"billingCyclePrivateBandwidthUsageCount,omitempty" xmlrpc:"billingCyclePrivateBandwidthUsageCount,omitempty"` + + // The total private inbound bandwidth for this hardware for the current billing cycle. + BillingCyclePrivateUsageIn *Float64 `json:"billingCyclePrivateUsageIn,omitempty" xmlrpc:"billingCyclePrivateUsageIn,omitempty"` + + // The total private outbound bandwidth for this hardware for the current billing cycle. + BillingCyclePrivateUsageOut *Float64 `json:"billingCyclePrivateUsageOut,omitempty" xmlrpc:"billingCyclePrivateUsageOut,omitempty"` + + // The total private bandwidth for this hardware for the current billing cycle. + BillingCyclePrivateUsageTotal *uint `json:"billingCyclePrivateUsageTotal,omitempty" xmlrpc:"billingCyclePrivateUsageTotal,omitempty"` + + // The raw public bandwidth usage data for the current billing cycle. + BillingCyclePublicBandwidthUsage []Network_Bandwidth_Usage `json:"billingCyclePublicBandwidthUsage,omitempty" xmlrpc:"billingCyclePublicBandwidthUsage,omitempty"` + + // A count of the raw public bandwidth usage data for the current billing cycle. + BillingCyclePublicBandwidthUsageCount *uint `json:"billingCyclePublicBandwidthUsageCount,omitempty" xmlrpc:"billingCyclePublicBandwidthUsageCount,omitempty"` + + // The total public inbound bandwidth for this hardware for the current billing cycle. + BillingCyclePublicUsageIn *Float64 `json:"billingCyclePublicUsageIn,omitempty" xmlrpc:"billingCyclePublicUsageIn,omitempty"` + + // The total public outbound bandwidth for this hardware for the current billing cycle. + BillingCyclePublicUsageOut *Float64 `json:"billingCyclePublicUsageOut,omitempty" xmlrpc:"billingCyclePublicUsageOut,omitempty"` + + // The total public bandwidth for this hardware for the current billing cycle. + BillingCyclePublicUsageTotal *uint `json:"billingCyclePublicUsageTotal,omitempty" xmlrpc:"billingCyclePublicUsageTotal,omitempty"` + + // A lockbox account associated with a server. + LockboxNetworkStorage *Billing_Item_Network_Storage `json:"lockboxNetworkStorage,omitempty" xmlrpc:"lockboxNetworkStorage,omitempty"` + + // A count of + MonitoringBillingItemCount *uint `json:"monitoringBillingItemCount,omitempty" xmlrpc:"monitoringBillingItemCount,omitempty"` + + // no documentation yet + MonitoringBillingItems []Billing_Item `json:"monitoringBillingItems,omitempty" xmlrpc:"monitoringBillingItems,omitempty"` + + // The resource for a server billing item. + Resource *Hardware_Server `json:"resource,omitempty" xmlrpc:"resource,omitempty"` + + // The resource (unique identifier) for a server billing item. + ResourceTableId *int `json:"resourceTableId,omitempty" xmlrpc:"resourceTableId,omitempty"` +} + +// The SoftLayer_Billing_Item_Hardware data type contains general information relating to a single SoftLayer billing item for hardware. +type Billing_Item_Hardware_Colocation struct { + Billing_Item_Hardware +} + +// The SoftLayer_Billing_Item_Hardware data type contains general information relating to a single SoftLayer billing item for hardware components. +type Billing_Item_Hardware_Component struct { + Billing_Item + + // The hardware component that this billing item points to. + Resource []Hardware_Component `json:"resource,omitempty" xmlrpc:"resource,omitempty"` + + // A count of the hardware component that this billing item points to. + ResourceCount *uint `json:"resourceCount,omitempty" xmlrpc:"resourceCount,omitempty"` + + // The resource (unique identifier) for a server billing item. + ResourceTableId *int `json:"resourceTableId,omitempty" xmlrpc:"resourceTableId,omitempty"` +} + +// The SoftLayer_Billing_Item_Hardware_Security_Module data type contains general information relating to a single SoftLayer billing item for a hardware security module. +type Billing_Item_Hardware_Security_Module struct { + Billing_Item_Hardware +} + +// The SoftLayer_Billing_Item_Hardware_Server data type contains billing information about a bare metal server and its relationship to a particular customer account. +type Billing_Item_Hardware_Server struct { + Billing_Item_Hardware +} + +// no documentation yet +type Billing_Item_Link_ThePlanet struct { + Entity + + // no documentation yet + BillingItem *Billing_Item `json:"billingItem,omitempty" xmlrpc:"billingItem,omitempty"` + + // no documentation yet + ServiceProvider *Service_Provider `json:"serviceProvider,omitempty" xmlrpc:"serviceProvider,omitempty"` +} + +// The SoftLayer_Billing_Item_Network_Application_Delivery_Controller data type describes the billing item related to a NetScaler VPX +type Billing_Item_Network_Application_Delivery_Controller struct { + Billing_Item + + // The bandwidth allotment detail for a billing item. + BandwidthAllotmentDetail *Network_Bandwidth_Version1_Allotment_Detail `json:"bandwidthAllotmentDetail,omitempty" xmlrpc:"bandwidthAllotmentDetail,omitempty"` + + // The network application controller that a billing item is associated with. + Resource *Network_Application_Delivery_Controller `json:"resource,omitempty" xmlrpc:"resource,omitempty"` +} + +// A SoftLayer_Billing_Item_Network_Application_Delivery_Controller_LoadBalancer represents the [[SoftLayer_Billing_Item|billing item]] related to a single [[SoftLayer_Network_Application_Delivery_Controller_LoadBalancer_VirtualIpAddress|load balancer]] instance. +type Billing_Item_Network_Application_Delivery_Controller_LoadBalancer_VirtualIpAddress struct { + Billing_Item + + // The load balancer that a load balancer billing item is associated with. + Resource *Network_Application_Delivery_Controller_LoadBalancer_VirtualIpAddress `json:"resource,omitempty" xmlrpc:"resource,omitempty"` +} + +// The SoftLayer_Billing_Item_Hardware data type contains general information relating to a single SoftLayer billing item for hardware. +type Billing_Item_Network_Bandwidth struct { + Billing_Item +} + +// The SoftLayer_Billing_Item_Network_Firewall data type contains general information relating to a single SoftLayer billing item whose item category code is 'firewall' +type Billing_Item_Network_Firewall struct { + Billing_Item + + // The VLAN firewall that a VLAN firewall billing item is associated with. + Resource *Network_Component_Firewall `json:"resource,omitempty" xmlrpc:"resource,omitempty"` +} + +// The SoftLayer_Billing_Item_Network_Firewall_Module_Context data type describes the billing items related to VLAN Firewalls. +type Billing_Item_Network_Firewall_Module_Context struct { + Billing_Item +} + +// A SoftLayer_Billing_Item_Network_Interconnect represents the [[SoftLayer_Billing_Item|billing item]] related to a network interconnect instance. +type Billing_Item_Network_Interconnect struct { + Billing_Item +} + +// A SoftLayer_Billing_Item_Network_LoadBalancer represents the [[SoftLayer_Billing_Item|billing item]] related to a single [[SoftLayer_Network_LoadBalancer|load balancer]] instance. +type Billing_Item_Network_LoadBalancer struct { + Billing_Item +} + +// The SoftLayer_Billing_Item_Network_LoadBalancer_Global data type contains general information relating to a single SoftLayer billing item whose item category code is 'global_load_balancer' +type Billing_Item_Network_LoadBalancer_Global struct { + Billing_Item + + // The resource for a global load balancer billing item. + Resource *Network_LoadBalancer_Global_Account `json:"resource,omitempty" xmlrpc:"resource,omitempty"` +} + +// A SoftLayer_Billing_Item_Network_LoadBalancer_VirtualIpAddress represents the [[SoftLayer_Billing_Item|billing item]] related to a single [[SoftLayer_Network_LoadBalancer_VirtualIpAddress|load balancer]] instance. +type Billing_Item_Network_LoadBalancer_VirtualIpAddress struct { + Billing_Item + + // The load balancer's virtual IP address that the billing item is associated with. + Resource *Network_LoadBalancer_VirtualIpAddress `json:"resource,omitempty" xmlrpc:"resource,omitempty"` +} + +// The SoftLayer_Billing_Item_Network_Message_Delivery data describes the related billing item. +type Billing_Item_Network_Message_Delivery struct { + Billing_Item + + // The object this billing item is associated with. + Resource *Network_Message_Delivery `json:"resource,omitempty" xmlrpc:"resource,omitempty"` +} + +// The SoftLayer_Billing_Item_Network_Message_Queue data describes the related billing item. +type Billing_Item_Network_Message_Queue struct { + Billing_Item + + // The object this billing item is associated with. + Resource *Network_Message_Queue `json:"resource,omitempty" xmlrpc:"resource,omitempty"` +} + +// The SoftLayer_Billing_Item_Network_Message_Queue data describes the related billing item. +type Billing_Item_Network_Message_Queue_Delivery struct { + Billing_Item_Network_Message_Queue +} + +// The SoftLayer_Billing_Item_Network_PerformanceStorage_Iscsi data type contains general information relating to a single SoftLayer billing item whose item category code is 'performance_storage_iscsi' +type Billing_Item_Network_PerformanceStorage_Iscsi struct { + Billing_Item_Network_Storage +} + +// The SoftLayer_Billing_Item_Network_PerformanceStorage_Nfs data type contains general information relating to a single SoftLayer billing item whose item category code is 'performance_storage_nfs' +type Billing_Item_Network_PerformanceStorage_Nfs struct { + Billing_Item_Network_Storage +} + +// The SoftLayer_Billing_Item_Network_Storage data type describes the billing items related to StorageLayer accounts. +type Billing_Item_Network_Storage struct { + Billing_Item + + // The StorageLayer account that a network storage billing item is associated with. + Resource *Network_Storage `json:"resource,omitempty" xmlrpc:"resource,omitempty"` +} + +// The SoftLayer_Billing_Item_Network_Storage_Hub models all billing items related to hub-based StorageLayer offerings, such as CloudLayer storage. +type Billing_Item_Network_Storage_Hub struct { + Billing_Item_Network_Storage +} + +// The SoftLayer_Billing_Item_Network_Storage_Hub_Bandwidth data type models the billing items created when a CloudLayer storage account generates a bandwidth overage charge. +type Billing_Item_Network_Storage_Hub_Bandwidth struct { + Billing_Item_Network_Storage +} + +// The SoftLayer_Billing_Item_Network_Subnet data type contains general information relating to a single SoftLayer billing item whose item category code is one of the following: +// * pri_ip_address +// * static_sec_ip_addresses (static secondary) +// * sov_sec_ip_addresses (secondary on vlan, also known as "portable ips") +// * sov_sec_ip_addresses_pub (sov_sec_ip_addresses public only) +// * sov_sec_ip_addresses_priv (sov_sec_ip_addresses private only) +// * sec_ip_addresses (old style, secondary ip addresses) +// +// +// These item categories denote that the billing item has subnet information attached. +type Billing_Item_Network_Subnet struct { + Billing_Item + + // The resource for a subnet-related billing item. + Resource *Network_Subnet `json:"resource,omitempty" xmlrpc:"resource,omitempty"` + + // The resource name for a subnet billing item. + ResourceName *string `json:"resourceName,omitempty" xmlrpc:"resourceName,omitempty"` + + // The resource (unique identifier) for a server billing item. + ResourceTableId *int `json:"resourceTableId,omitempty" xmlrpc:"resourceTableId,omitempty"` +} + +// The SoftLayer_Billing_Item_Network_Subnet_IpAddress_Global data type contains general information relating to a single SoftLayer billing item whose item category code is one of the following: +// * global_ipv4 +// * global_ipv6 +// +// +// These item categories denote that the billing item has subnet information attached. +type Billing_Item_Network_Subnet_IpAddress_Global struct { + Billing_Item_Network_Subnet +} + +// The SoftLayer_Billing_Item_Network_Storage data type describes the billing items related to StorageLayer accounts. +type Billing_Item_Network_Tunnel struct { + Billing_Item + + // The IPsec VPN that a network tunnel billing item is associated with. + Resource *Network_Tunnel_Module_Context `json:"resource,omitempty" xmlrpc:"resource,omitempty"` +} + +// The SoftLayer_Billing_Item_Network_Vlant data type contains general information relating to a single SoftLayer billing item whose item category code is one of the following: +// * network_vlan +// +// +// These item categories denote that the billing item has network vlan information attached. +type Billing_Item_Network_Vlan struct { + Billing_Item + + // The resource for a network vlan related billing item. + Resource *Network_Vlan `json:"resource,omitempty" xmlrpc:"resource,omitempty"` +} + +// no documentation yet +type Billing_Item_NewCustomerSetup struct { + Billing_Item +} + +// The SoftLayer_Billing_Item_Private_Cloud data type contains general information relating to a single billing item for a private cloud. +type Billing_Item_Private_Cloud struct { + Billing_Item +} + +// The SoftLayer_Billing_Item_Hardware data type contains general information relating to a single SoftLayer billing item for hardware components. +type Billing_Item_Software_Component struct { + Billing_Item + + // The software component that this billing item points to. + Resource *Software_Component `json:"resource,omitempty" xmlrpc:"resource,omitempty"` + + // The resource (unique identifier) for a software component billing item. + ResourceTableId *int `json:"resourceTableId,omitempty" xmlrpc:"resourceTableId,omitempty"` +} + +// The SoftLayer_Billing_Item_Software_Component_Analytics_Urchin data type contains general information relating to a single SoftLayer billing item for Urchin software components. +type Billing_Item_Software_Component_Analytics_Urchin struct { + Billing_Item +} + +// The SoftLayer_Billing_Item_Software_Component_ControlPanel data type contains general information relating to a single SoftLayer billing item for control panel software components. +type Billing_Item_Software_Component_ControlPanel struct { + Billing_Item +} + +// The SoftLayer_Billing_Item_Software_Component_ControlPanel data type contains general information relating to a single SoftLayer billing item for control panel software components. +type Billing_Item_Software_Component_ControlPanel_Parallels_Plesk_Billing struct { + Billing_Item +} + +// The SoftLayer_Billing_Item_Software_Component_OperatingSystem_Addon data type contains general information relating to a single SoftLayer billing item for operating system add-on software components. +type Billing_Item_Software_Component_OperatingSystem_Addon struct { + Billing_Item +} + +// The SoftLayer_Billing_Item_Software_Component_OperatingSystem_Addon_Citrix_Essentials data type contains general information relating to a single SoftLayer billing item for Citrix Essentials software components. +type Billing_Item_Software_Component_OperatingSystem_Addon_Citrix_Essentials struct { + Billing_Item_Software_Component_OperatingSystem_Addon + + // The Citrix Essentials software component that a billing item is associated with. + Resource *Software_Component `json:"resource,omitempty" xmlrpc:"resource,omitempty"` +} + +// The SoftLayer_Billing_Item_Software_Component_Virtual_OperatingSystem data type contains general information relating to a single SoftLayer billing item for operating system software components on virtual machines. +type Billing_Item_Software_Component_Virtual_OperatingSystem struct { + Billing_Item +} + +// The SoftLayer_Billing_Item_Software_Component_Virtual_OperatingSystem_Microsoft data type contains general information relating to a single SoftLayer billing item for a Microsoft operating system software components on virtual machines. +type Billing_Item_Software_Component_Virtual_OperatingSystem_Microsoft struct { + Billing_Item_Software_Component_Virtual_OperatingSystem + + // The software virtual license to which this billing item points. + Resource *Software_VirtualLicense `json:"resource,omitempty" xmlrpc:"resource,omitempty"` + + // The resource (unique identifier) for a software virtual license billing item. + ResourceTableId *int `json:"resourceTableId,omitempty" xmlrpc:"resourceTableId,omitempty"` +} + +// The SoftLayer_Billing_Item_Software_Component_Virtual_OperatingSystem_Microsoft data type contains general information relating to a single SoftLayer billing item for a Microsoft operating system software components on virtual machines. +type Billing_Item_Software_Component_Virtual_OperatingSystem_Redhat struct { + Billing_Item_Software_Component_Virtual_OperatingSystem + + // The software component to which this billing item points. + Resource *Software_Component `json:"resource,omitempty" xmlrpc:"resource,omitempty"` + + // The resource (unique identifier) for a software component billing item. + ResourceTableId *int `json:"resourceTableId,omitempty" xmlrpc:"resourceTableId,omitempty"` +} + +// The SoftLayer_Billing_Item_Software_License data type contains general information relating to a single SoftLayer billing item for a software license. +type Billing_Item_Software_License struct { + Billing_Item + + // The resource for a software license billing item. + Resource *Software_AccountLicense `json:"resource,omitempty" xmlrpc:"resource,omitempty"` +} + +// The SoftLayer_Billing_Item_Support data type contains general information relating to a premium support offering +type Billing_Item_Support struct { + Billing_Item +} + +// The SoftLayer_Billing_Item_Network_Application_Delivery_Controller data type describes the billing item related to an external authentication binding +type Billing_Item_User_Customer_External_Binding struct { + Billing_Item + + // The external authentication binding that a billing item is associated with. + Resource *User_Customer_External_Binding `json:"resource,omitempty" xmlrpc:"resource,omitempty"` +} + +// no documentation yet +type Billing_Item_Virtual_DedicatedHost struct { + Billing_Item + + // The resource for a virtual dedicated host billing item. + Resource *Virtual_DedicatedHost `json:"resource,omitempty" xmlrpc:"resource,omitempty"` + + // The resource (unique identifier) for a server billing item. + ResourceTableId *int `json:"resourceTableId,omitempty" xmlrpc:"resourceTableId,omitempty"` +} + +// A SoftLayer_Billing_Item_Virtual_Dedicated_Rack data type models the billing information for a single bandwidth pooling. Bandwidth pooling members share their public bandwidth allocations, and incur overage charges instead of the overages on individual rack members. Virtual rack billing items are the parent items for all of it's rack membership billing items. +type Billing_Item_Virtual_Dedicated_Rack struct { + Billing_Item + + // The raw bandwidth usage data for the current billing cycle. One object is returned for each network a virtual rack is attached to. + BillingCycleBandwidthUsage []Network_Bandwidth_Usage `json:"billingCycleBandwidthUsage,omitempty" xmlrpc:"billingCycleBandwidthUsage,omitempty"` + + // A count of the raw bandwidth usage data for the current billing cycle. One object is returned for each network a virtual rack is attached to. + BillingCycleBandwidthUsageCount *uint `json:"billingCycleBandwidthUsageCount,omitempty" xmlrpc:"billingCycleBandwidthUsageCount,omitempty"` + + // The raw private bandwidth usage data for the current billing cycle. + BillingCyclePrivateBandwidthUsage []Network_Bandwidth_Usage `json:"billingCyclePrivateBandwidthUsage,omitempty" xmlrpc:"billingCyclePrivateBandwidthUsage,omitempty"` + + // A count of the raw private bandwidth usage data for the current billing cycle. + BillingCyclePrivateBandwidthUsageCount *uint `json:"billingCyclePrivateBandwidthUsageCount,omitempty" xmlrpc:"billingCyclePrivateBandwidthUsageCount,omitempty"` + + // The total private network inbound bandwidth for this virtual rack for the current billing cycle. + BillingCyclePrivateUsageIn *Float64 `json:"billingCyclePrivateUsageIn,omitempty" xmlrpc:"billingCyclePrivateUsageIn,omitempty"` + + // The total private network outbound bandwidth for this virtual rack for the current billing cycle. + BillingCyclePrivateUsageOut *Float64 `json:"billingCyclePrivateUsageOut,omitempty" xmlrpc:"billingCyclePrivateUsageOut,omitempty"` + + // The total private network bandwidth for this virtual rack for the current billing cycle. + BillingCyclePrivateUsageTotal *uint `json:"billingCyclePrivateUsageTotal,omitempty" xmlrpc:"billingCyclePrivateUsageTotal,omitempty"` + + // The raw public bandwidth usage data for the current billing cycle. + BillingCyclePublicBandwidthUsage []Network_Bandwidth_Usage `json:"billingCyclePublicBandwidthUsage,omitempty" xmlrpc:"billingCyclePublicBandwidthUsage,omitempty"` + + // A count of the raw public bandwidth usage data for the current billing cycle. + BillingCyclePublicBandwidthUsageCount *uint `json:"billingCyclePublicBandwidthUsageCount,omitempty" xmlrpc:"billingCyclePublicBandwidthUsageCount,omitempty"` + + // The total public inbound bandwidth for this virtual rack for the current billing cycle. + BillingCyclePublicUsageIn *Float64 `json:"billingCyclePublicUsageIn,omitempty" xmlrpc:"billingCyclePublicUsageIn,omitempty"` + + // The total public outbound bandwidth for this virtual rack for the current billing cycle. + BillingCyclePublicUsageOut *Float64 `json:"billingCyclePublicUsageOut,omitempty" xmlrpc:"billingCyclePublicUsageOut,omitempty"` + + // The total public bandwidth for this virtual rack for the current billing cycle. + BillingCyclePublicUsageTotal *uint `json:"billingCyclePublicUsageTotal,omitempty" xmlrpc:"billingCyclePublicUsageTotal,omitempty"` + + // The virtual rack that a virtual rack billing item is associated with. + Resource *Network_Bandwidth_Version1_Allotment `json:"resource,omitempty" xmlrpc:"resource,omitempty"` +} + +// The SoftLayer_Billing_Item_Virtual_Disk_Image data type contains general information relating to a single SoftLayer billing item for disk images. +type Billing_Item_Virtual_Disk_Image struct { + Billing_Item + + // The disk image to which the billing item points. + Resource *Virtual_Disk_Image `json:"resource,omitempty" xmlrpc:"resource,omitempty"` + + // The resource (unique identifier) for a disk image billing item. + ResourceTableId *int `json:"resourceTableId,omitempty" xmlrpc:"resourceTableId,omitempty"` +} + +// The SoftLayer_Billing_Item_Virtual_Guest data type contains general information relating to a single SoftLayer billing item for guests. +type Billing_Item_Virtual_Guest struct { + Billing_Item + + // The raw bandwidth usage data for the current billing cycle. One object will be returned for each network this server is attached to. + BillingCycleBandwidthUsage []Network_Bandwidth_Usage `json:"billingCycleBandwidthUsage,omitempty" xmlrpc:"billingCycleBandwidthUsage,omitempty"` + + // A count of the raw bandwidth usage data for the current billing cycle. One object will be returned for each network this server is attached to. + BillingCycleBandwidthUsageCount *uint `json:"billingCycleBandwidthUsageCount,omitempty" xmlrpc:"billingCycleBandwidthUsageCount,omitempty"` + + // The raw private bandwidth usage data for the current billing cycle. + BillingCyclePrivateBandwidthUsage []Network_Bandwidth_Usage `json:"billingCyclePrivateBandwidthUsage,omitempty" xmlrpc:"billingCyclePrivateBandwidthUsage,omitempty"` + + // A count of the raw private bandwidth usage data for the current billing cycle. + BillingCyclePrivateBandwidthUsageCount *uint `json:"billingCyclePrivateBandwidthUsageCount,omitempty" xmlrpc:"billingCyclePrivateBandwidthUsageCount,omitempty"` + + // The total private inbound bandwidth for this virtual server for the current billing cycle. + BillingCyclePrivateUsageIn *Float64 `json:"billingCyclePrivateUsageIn,omitempty" xmlrpc:"billingCyclePrivateUsageIn,omitempty"` + + // The total private outbound bandwidth for this virtual server for the current billing cycle. + BillingCyclePrivateUsageOut *Float64 `json:"billingCyclePrivateUsageOut,omitempty" xmlrpc:"billingCyclePrivateUsageOut,omitempty"` + + // The total private bandwidth for this virtual server for the current billing cycle. + BillingCyclePrivateUsageTotal *uint `json:"billingCyclePrivateUsageTotal,omitempty" xmlrpc:"billingCyclePrivateUsageTotal,omitempty"` + + // The raw public bandwidth usage data for the current billing cycle. + BillingCyclePublicBandwidthUsage []Network_Bandwidth_Usage `json:"billingCyclePublicBandwidthUsage,omitempty" xmlrpc:"billingCyclePublicBandwidthUsage,omitempty"` + + // A count of the raw public bandwidth usage data for the current billing cycle. + BillingCyclePublicBandwidthUsageCount *uint `json:"billingCyclePublicBandwidthUsageCount,omitempty" xmlrpc:"billingCyclePublicBandwidthUsageCount,omitempty"` + + // The total public inbound bandwidth for this virtual server for the current billing cycle. + BillingCyclePublicUsageIn *Float64 `json:"billingCyclePublicUsageIn,omitempty" xmlrpc:"billingCyclePublicUsageIn,omitempty"` + + // The total public outbound bandwidth for this virtual server for the current billing cycle. + BillingCyclePublicUsageOut *Float64 `json:"billingCyclePublicUsageOut,omitempty" xmlrpc:"billingCyclePublicUsageOut,omitempty"` + + // The total public bandwidth for this virtual server for the current billing cycle. + BillingCyclePublicUsageTotal *uint `json:"billingCyclePublicUsageTotal,omitempty" xmlrpc:"billingCyclePublicUsageTotal,omitempty"` + + // A count of + MonitoringBillingItemCount *uint `json:"monitoringBillingItemCount,omitempty" xmlrpc:"monitoringBillingItemCount,omitempty"` + + // no documentation yet + MonitoringBillingItems []Billing_Item `json:"monitoringBillingItems,omitempty" xmlrpc:"monitoringBillingItems,omitempty"` + + // The resource for a cloud server billing item. + Resource *Virtual_Guest `json:"resource,omitempty" xmlrpc:"resource,omitempty"` + + // The resource (unique identifier) for a server billing item. + ResourceTableId *int `json:"resourceTableId,omitempty" xmlrpc:"resourceTableId,omitempty"` +} + +// The SoftLayer_Billing_Item_Virtual_Host_Usage data type contains general information relating to a single SoftLayer billing item for virtual machine peak usage +type Billing_Item_Virtual_Host_Usage struct { + Billing_Item + + // The resource for a peak virtual machine usage billing item. + Resource *Hardware `json:"resource,omitempty" xmlrpc:"resource,omitempty"` + + // The resource (unique identifier) for a server billing item. + ResourceTableId *int `json:"resourceTableId,omitempty" xmlrpc:"resourceTableId,omitempty"` +} + +// The SoftLayer_Billing_Item_Workspace data type contains general information relating to a single SoftLayer billing item whose item category code is 'workspace' +type Billing_Item_Workspace struct { + Billing_Item +} + +// The SoftLayer_Billing_Order data type contains general information relating to an individual order applied to a SoftLayer customer account or to a new customer. Personal information in this type such as names, addresses, and phone numbers are taken from the account's contact information at the time the order is generated for existing SoftLayer customer. +type Billing_Order struct { + Entity + + // The [[SoftLayer_Account|account]] to which an order belongs. + Account *Account `json:"account,omitempty" xmlrpc:"account,omitempty"` + + // The account ID to which an order belongs. + AccountId *int `json:"accountId,omitempty" xmlrpc:"accountId,omitempty"` + + // no documentation yet + Brand *Brand `json:"brand,omitempty" xmlrpc:"brand,omitempty"` + + // A cart is similar to a quote, except that it can be continually modified by the customer and does not have locked-in prices. Not all orders will have a cart associated with them. See [[SoftLayer_Billing_Order_Cart]] for more information. + Cart *Billing_Order_Cart `json:"cart,omitempty" xmlrpc:"cart,omitempty"` + + // A count of the [[SoftLayer_Billing_Order_Item (type)|order items]] that are core restricted + CoreRestrictedItemCount *uint `json:"coreRestrictedItemCount,omitempty" xmlrpc:"coreRestrictedItemCount,omitempty"` + + // The [[SoftLayer_Billing_Order_Item (type)|order items]] that are core restricted + CoreRestrictedItems []Billing_Order_Item `json:"coreRestrictedItems,omitempty" xmlrpc:"coreRestrictedItems,omitempty"` + + // The point in time at which a billing item was created. + CreateDate *Time `json:"createDate,omitempty" xmlrpc:"createDate,omitempty"` + + // A count of all credit card transactions associated with this order. If this order was not placed with a credit card, this will be empty. + CreditCardTransactionCount *uint `json:"creditCardTransactionCount,omitempty" xmlrpc:"creditCardTransactionCount,omitempty"` + + // All credit card transactions associated with this order. If this order was not placed with a credit card, this will be empty. + CreditCardTransactions []Billing_Payment_Card_Transaction `json:"creditCardTransactions,omitempty" xmlrpc:"creditCardTransactions,omitempty"` + + // no documentation yet + ExchangeRate *Billing_Currency_ExchangeRate `json:"exchangeRate,omitempty" xmlrpc:"exchangeRate,omitempty"` + + // * + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // The SoftLayer_User_Customer id of the portal or API user who impersonated the user which submitted an order. + ImpersonatingUserRecordId *int `json:"impersonatingUserRecordId,omitempty" xmlrpc:"impersonatingUserRecordId,omitempty"` + + // no documentation yet + InitialInvoice *Billing_Invoice `json:"initialInvoice,omitempty" xmlrpc:"initialInvoice,omitempty"` + + // A count of the SoftLayer_Billing_Order_items included in an order. + ItemCount *uint `json:"itemCount,omitempty" xmlrpc:"itemCount,omitempty"` + + // The SoftLayer_Billing_Order_items included in an order. + Items []Billing_Order_Item `json:"items,omitempty" xmlrpc:"items,omitempty"` + + // The last time an order was updated. + ModifyDate *Time `json:"modifyDate,omitempty" xmlrpc:"modifyDate,omitempty"` + + // no documentation yet + OrderApprovalDate *Time `json:"orderApprovalDate,omitempty" xmlrpc:"orderApprovalDate,omitempty"` + + // An order's non-server items total monthly fee. + OrderNonServerMonthlyAmount *Float64 `json:"orderNonServerMonthlyAmount,omitempty" xmlrpc:"orderNonServerMonthlyAmount,omitempty"` + + // The SoftLayer_Billing_Order_Quote id of the quote's user who finalized an order. + OrderQuoteId *int `json:"orderQuoteId,omitempty" xmlrpc:"orderQuoteId,omitempty"` + + // An order's server items total monthly fee. + OrderServerMonthlyAmount *Float64 `json:"orderServerMonthlyAmount,omitempty" xmlrpc:"orderServerMonthlyAmount,omitempty"` + + // A count of an order's top level items. This normally includes the server line item and any non-server additional services such as NAS or ISCSI. + OrderTopLevelItemCount *uint `json:"orderTopLevelItemCount,omitempty" xmlrpc:"orderTopLevelItemCount,omitempty"` + + // An order's top level items. This normally includes the server line item and any non-server additional services such as NAS or ISCSI. + OrderTopLevelItems []Billing_Order_Item `json:"orderTopLevelItems,omitempty" xmlrpc:"orderTopLevelItems,omitempty"` + + // This amount represents the order's initial charge including set up fee and taxes. + OrderTotalAmount *Float64 `json:"orderTotalAmount,omitempty" xmlrpc:"orderTotalAmount,omitempty"` + + // An order's total one time amount summing all the set up fees, the labor fees and the one time fees. Taxes will be applied for non-tax-exempt. This amount represents the initial fees that will be charged. + OrderTotalOneTime *Float64 `json:"orderTotalOneTime,omitempty" xmlrpc:"orderTotalOneTime,omitempty"` + + // An order's total one time amount. This amount represents the initial fees before tax. + OrderTotalOneTimeAmount *Float64 `json:"orderTotalOneTimeAmount,omitempty" xmlrpc:"orderTotalOneTimeAmount,omitempty"` + + // An order's total one time tax amount. This amount represents the tax that will be applied to the total charge, if the SoftLayer_Account tied to a SoftLayer_Billing_Order is a taxable account. + OrderTotalOneTimeTaxAmount *Float64 `json:"orderTotalOneTimeTaxAmount,omitempty" xmlrpc:"orderTotalOneTimeTaxAmount,omitempty"` + + // An order's total recurring amount. Taxes will be applied for non-tax-exempt. This amount represents the fees that will be charged on a recurring (usually monthly) basis. + OrderTotalRecurring *Float64 `json:"orderTotalRecurring,omitempty" xmlrpc:"orderTotalRecurring,omitempty"` + + // An order's total recurring amount. This amount represents the fees that will be charged on a recurring (usually monthly) basis. + OrderTotalRecurringAmount *Float64 `json:"orderTotalRecurringAmount,omitempty" xmlrpc:"orderTotalRecurringAmount,omitempty"` + + // The total tax amount of the recurring fees, if the SoftLayer_Account tied to a SoftLayer_Billing_Order is a taxable account. + OrderTotalRecurringTaxAmount *Float64 `json:"orderTotalRecurringTaxAmount,omitempty" xmlrpc:"orderTotalRecurringTaxAmount,omitempty"` + + // An order's total setup fee. + OrderTotalSetupAmount *Float64 `json:"orderTotalSetupAmount,omitempty" xmlrpc:"orderTotalSetupAmount,omitempty"` + + // The type of an order. This lets you know where this order was generated from. + OrderType *Billing_Order_Type `json:"orderType,omitempty" xmlrpc:"orderType,omitempty"` + + // The SoftLayer_Billing_Order_Type id of the order. + OrderTypeId *int `json:"orderTypeId,omitempty" xmlrpc:"orderTypeId,omitempty"` + + // A count of all PayPal transactions associated with this order. If this order was not placed with PayPal, this will be empty. + PaypalTransactionCount *uint `json:"paypalTransactionCount,omitempty" xmlrpc:"paypalTransactionCount,omitempty"` + + // All PayPal transactions associated with this order. If this order was not placed with PayPal, this will be empty. + PaypalTransactions []Billing_Payment_PayPal_Transaction `json:"paypalTransactions,omitempty" xmlrpc:"paypalTransactions,omitempty"` + + // no documentation yet + PresaleEvent *Sales_Presale_Event `json:"presaleEvent,omitempty" xmlrpc:"presaleEvent,omitempty"` + + // no documentation yet + PresaleEventId *int `json:"presaleEventId,omitempty" xmlrpc:"presaleEventId,omitempty"` + + // Flag indicating a private cloud solution order (Deprecated) + PrivateCloudOrderFlag *bool `json:"privateCloudOrderFlag,omitempty" xmlrpc:"privateCloudOrderFlag,omitempty"` + + // The quote of an order. This quote holds information about its expiration date, creation date, name and status. This information is tied to an order having the status 'QUOTE' + Quote *Billing_Order_Quote `json:"quote,omitempty" xmlrpc:"quote,omitempty"` + + // The Referral Partner who referred this order. (Only necessary for new customer orders) + ReferralPartner *Account `json:"referralPartner,omitempty" xmlrpc:"referralPartner,omitempty"` + + // Purchaser current status e.i. Approved, Pending_Approval + Status *string `json:"status,omitempty" xmlrpc:"status,omitempty"` + + // This flag indicates an order is an upgrade. + UpgradeRequestFlag *bool `json:"upgradeRequestFlag,omitempty" xmlrpc:"upgradeRequestFlag,omitempty"` + + // The SoftLayer_User_Customer object tied to an order. + UserRecord *User_Customer `json:"userRecord,omitempty" xmlrpc:"userRecord,omitempty"` + + // The SoftLayer_User_Customer id of the portal or API user who submitted an order. + UserRecordId *int `json:"userRecordId,omitempty" xmlrpc:"userRecordId,omitempty"` +} + +// no documentation yet +type Billing_Order_Cart struct { + Billing_Order_Quote +} + +// Every individual item that a SoftLayer customer is billed for is recorded in the SoftLayer_Billing_Item data type. Billing items range from server chassis to hard drives to control panels, bandwidth quota upgrades and port upgrade charges. Softlayer [[SoftLayer_Billing_Invoice|invoices]] are generated from the cost of a customer's billing items. Billing items are copied from the product catalog as they're ordered by customers to create a reference between an account and the billable items they own. +// +// Billing items exist in a tree relationship. Items are associated with each other by parent/child relationships. Component items such as CPU's, RAM, and software each have a parent billing item for the server chassis they're associated with. Billing Items with a null parent item do not have an associated parent item. +type Billing_Order_Item struct { + Entity + + // The SoftLayer_Billing_Item tied to the order item. + BillingItem *Billing_Item `json:"billingItem,omitempty" xmlrpc:"billingItem,omitempty"` + + // A count of the other items included with an ordered item. + BundledItemCount *uint `json:"bundledItemCount,omitempty" xmlrpc:"bundledItemCount,omitempty"` + + // The other items included with an ordered item. + BundledItems []Billing_Order_Item `json:"bundledItems,omitempty" xmlrpc:"bundledItems,omitempty"` + + // The item category tied to an order item. + Category *Product_Item_Category `json:"category,omitempty" xmlrpc:"category,omitempty"` + + // The category code for the order item. + CategoryCode *string `json:"categoryCode,omitempty" xmlrpc:"categoryCode,omitempty"` + + // The child order items for an order item. All server order items should have children. These children are considered a part of the server. + Children []Billing_Order_Item `json:"children,omitempty" xmlrpc:"children,omitempty"` + + // A count of the child order items for an order item. All server order items should have children. These children are considered a part of the server. + ChildrenCount *uint `json:"childrenCount,omitempty" xmlrpc:"childrenCount,omitempty"` + + // friendly description of purchase item. + Description *string `json:"description,omitempty" xmlrpc:"description,omitempty"` + + // The domain name of the server as designated by the purchaser at the time of order placement. + DomainName *string `json:"domainName,omitempty" xmlrpc:"domainName,omitempty"` + + // A hardware's universally unique identifier. + GlobalIdentifier *string `json:"globalIdentifier,omitempty" xmlrpc:"globalIdentifier,omitempty"` + + // The component type tied to an order item. All hardware-specific items should have a generic hardware component. + HardwareGenericComponent *Hardware_Component_Model_Generic `json:"hardwareGenericComponent,omitempty" xmlrpc:"hardwareGenericComponent,omitempty"` + + // The hostname of the server as designated by the purchaser at the time of order placement. + HostName *string `json:"hostName,omitempty" xmlrpc:"hostName,omitempty"` + + // The amount of money charged per hourly for an order item, if applicable, and only if it was ordered this day. hourlyRecurringFee is measured in US Dollars ($USD). + HourlyRecurringFee *Float64 `json:"hourlyRecurringFee,omitempty" xmlrpc:"hourlyRecurringFee,omitempty"` + + // no documentation yet + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // The SoftLayer_Product_Item tied to an order item. The item is the actual definition of the product being sold. + Item *Product_Item `json:"item,omitempty" xmlrpc:"item,omitempty"` + + // A count of this is an item's category answers. + ItemCategoryAnswerCount *uint `json:"itemCategoryAnswerCount,omitempty" xmlrpc:"itemCategoryAnswerCount,omitempty"` + + // This is an item's category answers. + ItemCategoryAnswers []Billing_Order_Item_Category_Answer `json:"itemCategoryAnswers,omitempty" xmlrpc:"itemCategoryAnswers,omitempty"` + + // The SoftLayer_Product_Item ID for this order item. + ItemId *int `json:"itemId,omitempty" xmlrpc:"itemId,omitempty"` + + // The SoftLayer_Product_Item_Price tied to an order item. The item price object describes the cost of an item. + ItemPrice *Product_Item_Price `json:"itemPrice,omitempty" xmlrpc:"itemPrice,omitempty"` + + // the item price id (SoftLayer_Product_Item_Price->id) of the ordered item. + ItemPriceId *Float64 `json:"itemPriceId,omitempty" xmlrpc:"itemPriceId,omitempty"` + + // An order item's labor fee total after taxes. This does not include any child invoice items. + LaborAfterTaxAmount *Float64 `json:"laborAfterTaxAmount,omitempty" xmlrpc:"laborAfterTaxAmount,omitempty"` + + // The labor fee, if any. This is a one time charge. + LaborFee *Float64 `json:"laborFee,omitempty" xmlrpc:"laborFee,omitempty"` + + // The rate at which labor fees are taxed if you are a taxable customer. + LaborFeeTaxRate *Float64 `json:"laborFeeTaxRate,omitempty" xmlrpc:"laborFeeTaxRate,omitempty"` + + // An order item's labor tax amount. This does not include any child invoice items. + LaborTaxAmount *Float64 `json:"laborTaxAmount,omitempty" xmlrpc:"laborTaxAmount,omitempty"` + + // The location of an ordered item. This is usually the same as the server it is being ordered with. Otherwise it describes the location of the additional service being ordered. + Location *Location `json:"location,omitempty" xmlrpc:"location,omitempty"` + + // no documentation yet + NextOrderChildren []Billing_Order_Item `json:"nextOrderChildren,omitempty" xmlrpc:"nextOrderChildren,omitempty"` + + // A count of + NextOrderChildrenCount *uint `json:"nextOrderChildrenCount,omitempty" xmlrpc:"nextOrderChildrenCount,omitempty"` + + // This is only populated when an upgrade order is placed. The old billing item represents what the billing was before the upgrade happened. + OldBillingItem *Billing_Item `json:"oldBillingItem,omitempty" xmlrpc:"oldBillingItem,omitempty"` + + // An order item's one-time fee total after taxes. This does not include any child invoice items. + OneTimeAfterTaxAmount *Float64 `json:"oneTimeAfterTaxAmount,omitempty" xmlrpc:"oneTimeAfterTaxAmount,omitempty"` + + // The amount of money charged as a one-time charge for an order item, if applicable. oneTimeFee is measured in US Dollars ($USD). + OneTimeFee *Float64 `json:"oneTimeFee,omitempty" xmlrpc:"oneTimeFee,omitempty"` + + // The rate at which one time fees are taxed if you are a taxable customer. + OneTimeFeeTaxRate *Float64 `json:"oneTimeFeeTaxRate,omitempty" xmlrpc:"oneTimeFeeTaxRate,omitempty"` + + // An order item's one-time tax amount. This does not include any child invoice items. + OneTimeTaxAmount *Float64 `json:"oneTimeTaxAmount,omitempty" xmlrpc:"oneTimeTaxAmount,omitempty"` + + // The order to which this item belongs. The order contains all the information related to the items included in an order + Order *Billing_Order `json:"order,omitempty" xmlrpc:"order,omitempty"` + + // no documentation yet + OrderApprovalDate *Time `json:"orderApprovalDate,omitempty" xmlrpc:"orderApprovalDate,omitempty"` + + // The SoftLayer_Product_Package an order item is a part of. + Package *Product_Package `json:"package,omitempty" xmlrpc:"package,omitempty"` + + // The parent order item ID for an item. Items that are associated with a server will have a parent. The parent will be the server item itself. + Parent *Billing_Order_Item `json:"parent,omitempty" xmlrpc:"parent,omitempty"` + + // no documentation yet + ParentId *int `json:"parentId,omitempty" xmlrpc:"parentId,omitempty"` + + // no documentation yet + PromoCodeId *int `json:"promoCodeId,omitempty" xmlrpc:"promoCodeId,omitempty"` + + // the quantity of the ordered item in a quote. + Quantity *int `json:"quantity,omitempty" xmlrpc:"quantity,omitempty"` + + // An order item's recurring fee total after taxes. This does not include any child invoice items. + RecurringAfterTaxAmount *Float64 `json:"recurringAfterTaxAmount,omitempty" xmlrpc:"recurringAfterTaxAmount,omitempty"` + + // The amount of money charged per month for an order item, if applicable. recurringFee is measured in US Dollars ($USD). + RecurringFee *Float64 `json:"recurringFee,omitempty" xmlrpc:"recurringFee,omitempty"` + + // An order item's recurring tax amount. This does not include any child invoice items. + RecurringTaxAmount *Float64 `json:"recurringTaxAmount,omitempty" xmlrpc:"recurringTaxAmount,omitempty"` + + // A count of power supplies contained within this SoftLayer_Billing_Order + RedundantPowerSupplyCount *uint `json:"redundantPowerSupplyCount,omitempty" xmlrpc:"redundantPowerSupplyCount,omitempty"` + + // An order item's setup fee total after taxes. This does not include any child invoice items. + SetupAfterTaxAmount *Float64 `json:"setupAfterTaxAmount,omitempty" xmlrpc:"setupAfterTaxAmount,omitempty"` + + // The setup fee, if any. This is a one time charge. + SetupFee *Float64 `json:"setupFee,omitempty" xmlrpc:"setupFee,omitempty"` + + // The month set up fee deferral. + SetupFeeDeferralMonths *int `json:"setupFeeDeferralMonths,omitempty" xmlrpc:"setupFeeDeferralMonths,omitempty"` + + // The rate at which setup fees are taxed if you are a taxable customer. + SetupFeeTaxRate *Float64 `json:"setupFeeTaxRate,omitempty" xmlrpc:"setupFeeTaxRate,omitempty"` + + // An order item's setup tax amount. This does not include any child invoice items. + SetupTaxAmount *Float64 `json:"setupTaxAmount,omitempty" xmlrpc:"setupTaxAmount,omitempty"` + + // For ordered items that are software items, a full description of that software can be found with this property. + SoftwareDescription *Software_Description `json:"softwareDescription,omitempty" xmlrpc:"softwareDescription,omitempty"` + + // A count of the drive storage groups that are attached to this billing order item. + StorageGroupCount *uint `json:"storageGroupCount,omitempty" xmlrpc:"storageGroupCount,omitempty"` + + // The drive storage groups that are attached to this billing order item. + StorageGroups []Configuration_Storage_Group_Order `json:"storageGroups,omitempty" xmlrpc:"storageGroups,omitempty"` + + // The recurring fee of an ordered item. This amount represents the fees that will be charged on a recurring (usually monthly) basis. + TotalRecurringAmount *Float64 `json:"totalRecurringAmount,omitempty" xmlrpc:"totalRecurringAmount,omitempty"` + + // The next SoftLayer_Product_Item in the upgrade path for this order item. + UpgradeItem *Product_Item `json:"upgradeItem,omitempty" xmlrpc:"upgradeItem,omitempty"` +} + +// The SoftLayer_Billing_Order_Item_Category_Answer data type represents a single answer to an item category question. +type Billing_Order_Item_Category_Answer struct { + Entity + + // The answer to the question. + Answer *string `json:"answer,omitempty" xmlrpc:"answer,omitempty"` + + // The date that the answer was created. + CreateDate *Time `json:"createDate,omitempty" xmlrpc:"createDate,omitempty"` + + // The billing order item that the answer is for. + OrderItem *Billing_Order_Item `json:"orderItem,omitempty" xmlrpc:"orderItem,omitempty"` + + // The question that is being answered. + Question *Product_Item_Category_Question `json:"question,omitempty" xmlrpc:"question,omitempty"` + + // The identifier for the question that the answer belongs to. + QuestionId *int `json:"questionId,omitempty" xmlrpc:"questionId,omitempty"` +} + +// no documentation yet +type Billing_Order_Note struct { + Entity + + // no documentation yet + CreateDate *Time `json:"createDate,omitempty" xmlrpc:"createDate,omitempty"` + + // no documentation yet + Employee *User_Employee `json:"employee,omitempty" xmlrpc:"employee,omitempty"` + + // no documentation yet + Order *Billing_Order `json:"order,omitempty" xmlrpc:"order,omitempty"` +} + +// The SoftLayer_Billing_Oder_Quote data type contains general information relating to an individual order applied to a SoftLayer customer account or to a new customer. Personal information in this type such as names, addresses, and phone numbers are taken from the account's contact information at the time the quote is generated for existing SoftLayer customer. +type Billing_Order_Quote struct { + Entity + + // A quote's corresponding account. + Account *Account `json:"account,omitempty" xmlrpc:"account,omitempty"` + + // Identification Number of the account record tied to the quote + AccountId *int `json:"accountId,omitempty" xmlrpc:"accountId,omitempty"` + + // Identification Number of the order record tied to the quote. + CompletedPurchaseDataId *int `json:"completedPurchaseDataId,omitempty" xmlrpc:"completedPurchaseDataId,omitempty"` + + // Holds the date the quote record was created + CreateDate *Time `json:"createDate,omitempty" xmlrpc:"createDate,omitempty"` + + // This property holds the date of expiration of a quote, after that date the quote would be deem expired + ExpirationDate *Time `json:"expirationDate,omitempty" xmlrpc:"expirationDate,omitempty"` + + // The id use to identify a quote. + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // Holds the date when the quote record was modified with reference to its creation date + ModifyDate *Time `json:"modifyDate,omitempty" xmlrpc:"modifyDate,omitempty"` + + // The name given to quote by the initiator + Name *string `json:"name,omitempty" xmlrpc:"name,omitempty"` + + // This order contains the records for which products were selected for this quote. + Order *Billing_Order `json:"order,omitempty" xmlrpc:"order,omitempty"` + + // These are all the orders that were created from this quote. + OrdersFromQuote []Billing_Order `json:"ordersFromQuote,omitempty" xmlrpc:"ordersFromQuote,omitempty"` + + // A count of these are all the orders that were created from this quote. + OrdersFromQuoteCount *uint `json:"ordersFromQuoteCount,omitempty" xmlrpc:"ordersFromQuoteCount,omitempty"` + + // This property Holds system generated notes. In our case if a quote is tied to an order where one of the order item has an inactive promotion code, the quote will be considered invalid. + PublicNote *string `json:"publicNote,omitempty" xmlrpc:"publicNote,omitempty"` + + // Holds system generated hash password for the Quote + QuoteKey *string `json:"quoteKey,omitempty" xmlrpc:"quoteKey,omitempty"` + + // This property Holds the current status of a Quote: pending,expired, saved or deleted + Status *string `json:"status,omitempty" xmlrpc:"status,omitempty"` +} + +// The SoftLayer_Billing_Oder_Type data type contains general information relating to all the different types of orders that exist. This data pertains only to where an order was generated from, from any of the SoftLayer websites with ordering interfaces or directly through the SoftLayer API. +type Billing_Order_Type struct { + Entity + + // A brief description of where a SoftLayer order originated from. + Description *string `json:"description,omitempty" xmlrpc:"description,omitempty"` + + // A SoftLayer order type's internal identifier. + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // A simple keyname stating where a SoftLayer order originated from. + Type *string `json:"type,omitempty" xmlrpc:"type,omitempty"` +} + +// The SoftLayer_Billing_Payment_Card_ChangeRequest data type contains general information relating to attempted credit card information changes. +type Billing_Payment_Card_ChangeRequest struct { + Entity + + // no documentation yet + Account *Account `json:"account,omitempty" xmlrpc:"account,omitempty"` + + // The account ID to which the credit card and billing information is associated with. + AccountId *int `json:"accountId,omitempty" xmlrpc:"accountId,omitempty"` + + // The total amount of the attempted transaction, represented in decimal format as US Dollars ($USD). + Amount *Float64 `json:"amount,omitempty" xmlrpc:"amount,omitempty"` + + // The SoftLayer_Billing_Payment_Card_Transaction tied to the authorization performed as part of this change request. + AuthorizedCreditCardTransaction *Billing_Payment_Card_Transaction `json:"authorizedCreditCardTransaction,omitempty" xmlrpc:"authorizedCreditCardTransaction,omitempty"` + + // The physical street address. Reserve information such as "apartment #123" or "Suite 2" for line 1. + BillingAddressLine1 *string `json:"billingAddressLine1,omitempty" xmlrpc:"billingAddressLine1,omitempty"` + + // The second line in the address. Information such as suite number goes here. + BillingAddressLine2 *string `json:"billingAddressLine2,omitempty" xmlrpc:"billingAddressLine2,omitempty"` + + // The city in which a customer's account resides. + BillingCity *string `json:"billingCity,omitempty" xmlrpc:"billingCity,omitempty"` + + // The 2-character Country code for an account's address. (i.e. US) + BillingCountryCode *string `json:"billingCountryCode,omitempty" xmlrpc:"billingCountryCode,omitempty"` + + // The email address associated with a customer account. + BillingEmail *string `json:"billingEmail,omitempty" xmlrpc:"billingEmail,omitempty"` + + // the company name for an account. + BillingNameCompany *string `json:"billingNameCompany,omitempty" xmlrpc:"billingNameCompany,omitempty"` + + // The first name of the customer account owner. + BillingNameFirst *string `json:"billingNameFirst,omitempty" xmlrpc:"billingNameFirst,omitempty"` + + // The last name of the customer account owner + BillingNameLast *string `json:"billingNameLast,omitempty" xmlrpc:"billingNameLast,omitempty"` + + // The fax number associated with a customer account. + BillingPhoneFax *string `json:"billingPhoneFax,omitempty" xmlrpc:"billingPhoneFax,omitempty"` + + // The phone number associated with a customer account. + BillingPhoneVoice *string `json:"billingPhoneVoice,omitempty" xmlrpc:"billingPhoneVoice,omitempty"` + + // The Zip or Postal Code for the billing address on an account. + BillingPostalCode *string `json:"billingPostalCode,omitempty" xmlrpc:"billingPostalCode,omitempty"` + + // The State for the account. + BillingState *string `json:"billingState,omitempty" xmlrpc:"billingState,omitempty"` + + // The SoftLayer_Billing_Payment_Card_Transaction tied to the capture of funds performed as part of this change request. + CaptureCreditCardTransaction *Billing_Payment_Card_Transaction `json:"captureCreditCardTransaction,omitempty" xmlrpc:"captureCreditCardTransaction,omitempty"` + + // The last 4 digits of a customer's credit card. + CardAccountLast4 *string `json:"cardAccountLast4,omitempty" xmlrpc:"cardAccountLast4,omitempty"` + + // The card number submitted in the change request. + CardAccountNumber *string `json:"cardAccountNumber,omitempty" xmlrpc:"cardAccountNumber,omitempty"` + + // The month (MM) in which a customer's payment card will expire. + CardExpirationMonth *string `json:"cardExpirationMonth,omitempty" xmlrpc:"cardExpirationMonth,omitempty"` + + // The year (YYYY) in which a customer's payment card will expire. + CardExpirationYear *string `json:"cardExpirationYear,omitempty" xmlrpc:"cardExpirationYear,omitempty"` + + // no documentation yet + CardNickname *string `json:"cardNickname,omitempty" xmlrpc:"cardNickname,omitempty"` + + // The type of payment card a customer has. (i.e. Visa, MasterCard, American Express). + CardType *string `json:"cardType,omitempty" xmlrpc:"cardType,omitempty"` + + // The credit card verification number submitted in the change request. + CreditCardVerificationNumber *string `json:"creditCardVerificationNumber,omitempty" xmlrpc:"creditCardVerificationNumber,omitempty"` + + // Describes the currency selected for payment + CurrencyShortName *string `json:"currencyShortName,omitempty" xmlrpc:"currencyShortName,omitempty"` + + // Device Fingerprint Identifier - Used internally and can safely be ignored. + DeviceFingerprintId *string `json:"deviceFingerprintId,omitempty" xmlrpc:"deviceFingerprintId,omitempty"` + + // The unique identifier for a single change request. + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // the notes stored about a customer's payment card. + Notes *string `json:"notes,omitempty" xmlrpc:"notes,omitempty"` + + // no documentation yet + PaymentRoleId *int `json:"paymentRoleId,omitempty" xmlrpc:"paymentRoleId,omitempty"` + + // The description of the type of payment sent in a change transaction. + PaymentType *string `json:"paymentType,omitempty" xmlrpc:"paymentType,omitempty"` + + // A count of these are tickets tied to a credit card change request. + TicketAttachmentReferenceCount *uint `json:"ticketAttachmentReferenceCount,omitempty" xmlrpc:"ticketAttachmentReferenceCount,omitempty"` + + // These are tickets tied to a credit card change request. + TicketAttachmentReferences []Ticket_Attachment `json:"ticketAttachmentReferences,omitempty" xmlrpc:"ticketAttachmentReferences,omitempty"` + + // Unique identifier for a ticket discussing the switch between payment methods. + TicketId *int `json:"ticketId,omitempty" xmlrpc:"ticketId,omitempty"` +} + +// The SoftLayer_Billing_Payment_Card_ManualPayment data type contains general information relating to attempted credit card information changes. +type Billing_Payment_Card_ManualPayment struct { + Entity + + // no documentation yet + Account *Account `json:"account,omitempty" xmlrpc:"account,omitempty"` + + // The account ID to which the credit card and billing information is associated with. + AccountId *int `json:"accountId,omitempty" xmlrpc:"accountId,omitempty"` + + // The total amount of the attempted transaction, represented in decimal format as US Dollars ($USD). + Amount *Float64 `json:"amount,omitempty" xmlrpc:"amount,omitempty"` + + // This is the credit card transaction data tied to a credit card manual payment. + AuthorizedCreditCardTransaction *Billing_Payment_Card_Transaction `json:"authorizedCreditCardTransaction,omitempty" xmlrpc:"authorizedCreditCardTransaction,omitempty"` + + // The unique identifier of an attempted credit card transaction. + AuthorizedCreditCardTransactionId *int `json:"authorizedCreditCardTransactionId,omitempty" xmlrpc:"authorizedCreditCardTransactionId,omitempty"` + + // This is the PayPal transaction data tied to a PayPal manual payment. + AuthorizedPayPalTransaction *Billing_Payment_PayPal_Transaction `json:"authorizedPayPalTransaction,omitempty" xmlrpc:"authorizedPayPalTransaction,omitempty"` + + // The unique identifier of an attempted PayPal transaction. + AuthorizedPayPalTransactionId *int `json:"authorizedPayPalTransactionId,omitempty" xmlrpc:"authorizedPayPalTransactionId,omitempty"` + + // The physical street address. Reserve information such as "apartment #123" or "Suite 2" for line 1. + BillingAddressLine1 *string `json:"billingAddressLine1,omitempty" xmlrpc:"billingAddressLine1,omitempty"` + + // The second line in the address. Information such as suite number goes here. + BillingAddressLine2 *string `json:"billingAddressLine2,omitempty" xmlrpc:"billingAddressLine2,omitempty"` + + // The city in which a customer's account resides. + BillingCity *string `json:"billingCity,omitempty" xmlrpc:"billingCity,omitempty"` + + // The 2-character Country code for an account's address. (i.e. US) + BillingCountryCode *string `json:"billingCountryCode,omitempty" xmlrpc:"billingCountryCode,omitempty"` + + // The email address associated with a customer account. + BillingEmail *string `json:"billingEmail,omitempty" xmlrpc:"billingEmail,omitempty"` + + // the company name for an account. + BillingNameCompany *string `json:"billingNameCompany,omitempty" xmlrpc:"billingNameCompany,omitempty"` + + // The first name of the customer account owner. + BillingNameFirst *string `json:"billingNameFirst,omitempty" xmlrpc:"billingNameFirst,omitempty"` + + // The last name of the customer account owner. + BillingNameLast *string `json:"billingNameLast,omitempty" xmlrpc:"billingNameLast,omitempty"` + + // The fax number associated with a customer account. + BillingPhoneFax *string `json:"billingPhoneFax,omitempty" xmlrpc:"billingPhoneFax,omitempty"` + + // The phone number associated with a customer account. + BillingPhoneVoice *string `json:"billingPhoneVoice,omitempty" xmlrpc:"billingPhoneVoice,omitempty"` + + // The Zip or Postal Code for the billing address on an account. + BillingPostalCode *string `json:"billingPostalCode,omitempty" xmlrpc:"billingPostalCode,omitempty"` + + // The State for the account. + BillingState *string `json:"billingState,omitempty" xmlrpc:"billingState,omitempty"` + + // The cancel URL is the page to which PayPal redirects if payment is not approved. + CancelUrl *string `json:"cancelUrl,omitempty" xmlrpc:"cancelUrl,omitempty"` + + // The SoftLayer_Billing_Payment_Card_Transaction tied to the capture performed as part of this manual payment. This will only exist if the manual payment was performed with a credit card. + CaptureCreditCardTransaction *Billing_Payment_Card_Transaction `json:"captureCreditCardTransaction,omitempty" xmlrpc:"captureCreditCardTransaction,omitempty"` + + // The SoftLayer_Billing_Payment_PayPal_Transaction tied to the capture performed as part of this manual payment. This will only exist if the manual payment was performed via PayPal. + CapturePayPalTransaction *Billing_Payment_PayPal_Transaction `json:"capturePayPalTransaction,omitempty" xmlrpc:"capturePayPalTransaction,omitempty"` + + // A hash value of the credit card number. + CardAccountHash *string `json:"cardAccountHash,omitempty" xmlrpc:"cardAccountHash,omitempty"` + + // The last 4 digits of a customer's credit card. + CardAccountLast4 *string `json:"cardAccountLast4,omitempty" xmlrpc:"cardAccountLast4,omitempty"` + + // The card number submitted in the change request. + CardAccountNumber *string `json:"cardAccountNumber,omitempty" xmlrpc:"cardAccountNumber,omitempty"` + + // The month (MM) in which a customer's payment card will expire. + CardExpirationMonth *string `json:"cardExpirationMonth,omitempty" xmlrpc:"cardExpirationMonth,omitempty"` + + // The year (YYYY) in which a customer's payment card will expire. + CardExpirationYear *string `json:"cardExpirationYear,omitempty" xmlrpc:"cardExpirationYear,omitempty"` + + // The method key of the type payment issued (Visa - 001, Mastercard - 002, American Express - 003, Discover - 004, PayPal - paypal). + CardType *string `json:"cardType,omitempty" xmlrpc:"cardType,omitempty"` + + // The credit card verification number submitted in the change request. + CreditCardVerificationNumber *string `json:"creditCardVerificationNumber,omitempty" xmlrpc:"creditCardVerificationNumber,omitempty"` + + // Describes the currency selected for payment + CurrencyShortName *string `json:"currencyShortName,omitempty" xmlrpc:"currencyShortName,omitempty"` + + // Device Fingerprint Identifier - Used internally and can safely be ignored. + DeviceFingerprintId *string `json:"deviceFingerprintId,omitempty" xmlrpc:"deviceFingerprintId,omitempty"` + + // The IP address from which the transaction originates. + FromIpAddress *string `json:"fromIpAddress,omitempty" xmlrpc:"fromIpAddress,omitempty"` + + // The unique identifier for a single manual payment request. + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // Notes generated as a result of the payment request. + Notes *string `json:"notes,omitempty" xmlrpc:"notes,omitempty"` + + // The description of the type of payment sent in a change transaction. + PaymentType *string `json:"paymentType,omitempty" xmlrpc:"paymentType,omitempty"` + + // The return URL is the page to which PayPal redirects after payment is approved. + ReturnUrl *string `json:"returnUrl,omitempty" xmlrpc:"returnUrl,omitempty"` + + // A count of these are tickets tied to a credit card manual payment. + TicketAttachmentReferenceCount *uint `json:"ticketAttachmentReferenceCount,omitempty" xmlrpc:"ticketAttachmentReferenceCount,omitempty"` + + // These are tickets tied to a credit card manual payment. + TicketAttachmentReferences []Ticket_Attachment `json:"ticketAttachmentReferences,omitempty" xmlrpc:"ticketAttachmentReferences,omitempty"` + + // Describes the type of manual payment (PAYPAL or CREDIT_CARD). + Type *string `json:"type,omitempty" xmlrpc:"type,omitempty"` +} + +// The SoftLayer_Billing_Payment_Card_Transaction data type contains general information relating to attempted credit card transactions. +type Billing_Payment_Card_Transaction struct { + Billing_Payment_Transaction + + // The account to which a transaction belongs. + Account *Account `json:"account,omitempty" xmlrpc:"account,omitempty"` + + // The account ID to which the credit card and billing information is associated with. + AccountId *int `json:"accountId,omitempty" xmlrpc:"accountId,omitempty"` + + // The total amount of the attempted transaction, represented in decimal format as US Dollars ($USD). + Amount *Float64 `json:"amount,omitempty" xmlrpc:"amount,omitempty"` + + // The physical street address. Reserve information such as "apartment #123" or "Suite 2" for line 1. + BillingAddressLine1 *string `json:"billingAddressLine1,omitempty" xmlrpc:"billingAddressLine1,omitempty"` + + // The second line in the address. Information such as suite number goes here. + BillingAddressLine2 *string `json:"billingAddressLine2,omitempty" xmlrpc:"billingAddressLine2,omitempty"` + + // The city in which a customer's account resides. + BillingCity *string `json:"billingCity,omitempty" xmlrpc:"billingCity,omitempty"` + + // The 2-character Country code for an account's address. (i.e. US) + BillingCountryCode *string `json:"billingCountryCode,omitempty" xmlrpc:"billingCountryCode,omitempty"` + + // The email address associated with a customer account. + BillingEmail *string `json:"billingEmail,omitempty" xmlrpc:"billingEmail,omitempty"` + + // the company name for an account. + BillingNameCompany *string `json:"billingNameCompany,omitempty" xmlrpc:"billingNameCompany,omitempty"` + + // The first name of the customer account owner. + BillingNameFirst *string `json:"billingNameFirst,omitempty" xmlrpc:"billingNameFirst,omitempty"` + + // The last name of the customer account owner. + BillingNameLast *string `json:"billingNameLast,omitempty" xmlrpc:"billingNameLast,omitempty"` + + // The fax number associated with a customer account. + BillingPhoneFax *string `json:"billingPhoneFax,omitempty" xmlrpc:"billingPhoneFax,omitempty"` + + // The phone number associated with a customer account. + BillingPhoneVoice *string `json:"billingPhoneVoice,omitempty" xmlrpc:"billingPhoneVoice,omitempty"` + + // The Zip or Postal Code for the billing address on an account. + BillingPostalCode *string `json:"billingPostalCode,omitempty" xmlrpc:"billingPostalCode,omitempty"` + + // The State for the account. + BillingState *string `json:"billingState,omitempty" xmlrpc:"billingState,omitempty"` + + // The last 4 digits of a customer's credit card. + CardAccountLast4 *int `json:"cardAccountLast4,omitempty" xmlrpc:"cardAccountLast4,omitempty"` + + // The month (MM) in which a customer's payment card will expire. + CardExpirationMonth *int `json:"cardExpirationMonth,omitempty" xmlrpc:"cardExpirationMonth,omitempty"` + + // The year (YYYY) in which a customer's payment card will expire. + CardExpirationYear *int `json:"cardExpirationYear,omitempty" xmlrpc:"cardExpirationYear,omitempty"` + + // The type of payment issued (i.e. Visa, MasterCard, American Express). + CardType *string `json:"cardType,omitempty" xmlrpc:"cardType,omitempty"` + + // The date that the transaction was attempted. + CreateDate *Time `json:"createDate,omitempty" xmlrpc:"createDate,omitempty"` + + // The unique identifier for a single credit card transaction request. + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // Unique identifier of the invoice to which funds will be applied. + InvoiceId *int `json:"invoiceId,omitempty" xmlrpc:"invoiceId,omitempty"` + + // The date that the transaction was modified. + ModifyDate *Time `json:"modifyDate,omitempty" xmlrpc:"modifyDate,omitempty"` + + // no documentation yet + Order *Billing_Order `json:"order,omitempty" xmlrpc:"order,omitempty"` + + // The IP address from which the transaction originates. + OrderFromIpAddress *string `json:"orderFromIpAddress,omitempty" xmlrpc:"orderFromIpAddress,omitempty"` + + // A code used by the financial institution to refer to the requested transaction. + ReferenceCode *string `json:"referenceCode,omitempty" xmlrpc:"referenceCode,omitempty"` + + // The unique identifier of the request submitted to the financial institution. + RequestId *string `json:"requestId,omitempty" xmlrpc:"requestId,omitempty"` + + // The status code returned from the financial institution. + ReturnStatus *int `json:"returnStatus,omitempty" xmlrpc:"returnStatus,omitempty"` + + // A serialized, delimited string of the transaction request sent to the financial institution. + SerializedReply *string `json:"serializedReply,omitempty" xmlrpc:"serializedReply,omitempty"` + + // A serialized, delimited string of the transaction request sent to the financial institution. + SerializedRequest *string `json:"serializedRequest,omitempty" xmlrpc:"serializedRequest,omitempty"` +} + +// The SoftLayer_Billing_Payment_PayPal_Transaction data type contains general information relating to attempted PayPal transactions. +type Billing_Payment_PayPal_Transaction struct { + Billing_Payment_Transaction + + // The account to which a transaction belongs. + Account *Account `json:"account,omitempty" xmlrpc:"account,omitempty"` + + // The account ID to which the PayPal and billing information is associated with. + AccountId *int `json:"accountId,omitempty" xmlrpc:"accountId,omitempty"` + + // City given in the address of the PayPal user. + AddressCityName *string `json:"addressCityName,omitempty" xmlrpc:"addressCityName,omitempty"` + + // Country given in the named address of the PayPal user. + AddressCountry *string `json:"addressCountry,omitempty" xmlrpc:"addressCountry,omitempty"` + + // Name given to the address provided for the PayPal user. + AddressName *string `json:"addressName,omitempty" xmlrpc:"addressName,omitempty"` + + // Postal Code of the address of the PayPal user. + AddressPostalCode *string `json:"addressPostalCode,omitempty" xmlrpc:"addressPostalCode,omitempty"` + + // State or Province in the address of the PayPal user. + AddressStateProvence *string `json:"addressStateProvence,omitempty" xmlrpc:"addressStateProvence,omitempty"` + + // PayPal defined status of the address of the PayPal user. + AddressStatus *string `json:"addressStatus,omitempty" xmlrpc:"addressStatus,omitempty"` + + // First line of the street address of the PayPal user. + AddressStreet1 *string `json:"addressStreet1,omitempty" xmlrpc:"addressStreet1,omitempty"` + + // Second line of the street address of the PayPal user. + AddressStreet2 *string `json:"addressStreet2,omitempty" xmlrpc:"addressStreet2,omitempty"` + + // Phone number provided for the PayPal user. + ContactPhone *string `json:"contactPhone,omitempty" xmlrpc:"contactPhone,omitempty"` + + // The date that the transaction was attempted. + CreateDate *Time `json:"createDate,omitempty" xmlrpc:"createDate,omitempty"` + + // Exchange rate imposed on the payment amount. + ExchangeRate *string `json:"exchangeRate,omitempty" xmlrpc:"exchangeRate,omitempty"` + + // PayPal fee applied to the payment. + FeeAmount *Float64 `json:"feeAmount,omitempty" xmlrpc:"feeAmount,omitempty"` + + // The total amount of the payment executed by PayPal, represented in decimal format as US Dollars ($USD). + GrossAmount *Float64 `json:"grossAmount,omitempty" xmlrpc:"grossAmount,omitempty"` + + // The unique identifier for a single PayPal transaction request. + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // Unique identifier of the invoice to which funds will be applied. + InvoiceId *int `json:"invoiceId,omitempty" xmlrpc:"invoiceId,omitempty"` + + // The name of the command issued to PayPal with regards to the attempted transaction. + LastPaypalCommand *string `json:"lastPaypalCommand,omitempty" xmlrpc:"lastPaypalCommand,omitempty"` + + // The date that the transaction was modified. + ModifyDate *Time `json:"modifyDate,omitempty" xmlrpc:"modifyDate,omitempty"` + + // no documentation yet + Order *Billing_Order `json:"order,omitempty" xmlrpc:"order,omitempty"` + + // The IP address from where the PayPal payment request originated. + OrderFromIpAddress *string `json:"orderFromIpAddress,omitempty" xmlrpc:"orderFromIpAddress,omitempty"` + + // The amount of the payment submitted through the SoftLayer interface, represented in decimal format as US Dollars ($USD). + OrderTotal *Float64 `json:"orderTotal,omitempty" xmlrpc:"orderTotal,omitempty"` + + // The PayPal user account name (email address) associated with the customer account. + Payer *string `json:"payer,omitempty" xmlrpc:"payer,omitempty"` + + // The name of the business associated with the PayPal user. + PayerBusiness *string `json:"payerBusiness,omitempty" xmlrpc:"payerBusiness,omitempty"` + + // Country given in the address of the PayPal user. + PayerCountry *string `json:"payerCountry,omitempty" xmlrpc:"payerCountry,omitempty"` + + // First name of the PayPal user. + PayerFirstName *string `json:"payerFirstName,omitempty" xmlrpc:"payerFirstName,omitempty"` + + // Unique PayPal user account identifier. + PayerId *string `json:"payerId,omitempty" xmlrpc:"payerId,omitempty"` + + // Last name of the PayPal user. + PayerLastName *string `json:"payerLastName,omitempty" xmlrpc:"payerLastName,omitempty"` + + // Current PayPal status associated with the user account. + PayerStatus *string `json:"payerStatus,omitempty" xmlrpc:"payerStatus,omitempty"` + + // Date that the payment was confirmed in PayPal by the user. + PaymentDate *Time `json:"paymentDate,omitempty" xmlrpc:"paymentDate,omitempty"` + + // PayPal defined status of the attempted payment. + PaymentStatus *string `json:"paymentStatus,omitempty" xmlrpc:"paymentStatus,omitempty"` + + // PayPal defined code used to identify the type of payment. Provided in a PayPal response. + PaymentType *string `json:"paymentType,omitempty" xmlrpc:"paymentType,omitempty"` + + // Reason provided by PayPal for a payment given a pending status. + PendingReason *string `json:"pendingReason,omitempty" xmlrpc:"pendingReason,omitempty"` + + // A serialized, delimited string of the reply received from PayPal. + SerializedReply *string `json:"serializedReply,omitempty" xmlrpc:"serializedReply,omitempty"` + + // A serialized, delimited string of the request submitted to PayPal. + SerializedRequest *string `json:"serializedRequest,omitempty" xmlrpc:"serializedRequest,omitempty"` + + // PayPal defined fee. + SettleAmount *Float64 `json:"settleAmount,omitempty" xmlrpc:"settleAmount,omitempty"` + + // Tax applied by PayPal to the payment amount. + TaxAmount *Float64 `json:"taxAmount,omitempty" xmlrpc:"taxAmount,omitempty"` + + // Value issued by PayPal for referencing the attempted transaction. + Token *string `json:"token,omitempty" xmlrpc:"token,omitempty"` + + // Unique transaction ID provided in a PayPal response. + TransactionId *string `json:"transactionId,omitempty" xmlrpc:"transactionId,omitempty"` + + // PayPal defined code used to identify the type of transaction. Provided in a PayPal response. + TransactionType *string `json:"transactionType,omitempty" xmlrpc:"transactionType,omitempty"` +} + +// no documentation yet +type Billing_Payment_Processor struct { + Entity + + // A count of + BrandAssignmentCount *uint `json:"brandAssignmentCount,omitempty" xmlrpc:"brandAssignmentCount,omitempty"` + + // no documentation yet + BrandAssignments []Brand_Payment_Processor `json:"brandAssignments,omitempty" xmlrpc:"brandAssignments,omitempty"` + + // no documentation yet + Description *string `json:"description,omitempty" xmlrpc:"description,omitempty"` + + // no documentation yet + Name *string `json:"name,omitempty" xmlrpc:"name,omitempty"` + + // no documentation yet + OwnerAccount *Account `json:"ownerAccount,omitempty" xmlrpc:"ownerAccount,omitempty"` + + // A count of + PaymentMethodCount *uint `json:"paymentMethodCount,omitempty" xmlrpc:"paymentMethodCount,omitempty"` + + // no documentation yet + PaymentMethods []Billing_Payment_Processor_Method `json:"paymentMethods,omitempty" xmlrpc:"paymentMethods,omitempty"` + + // no documentation yet + Type *Billing_Payment_Processor_Type `json:"type,omitempty" xmlrpc:"type,omitempty"` +} + +// no documentation yet +type Billing_Payment_Processor_Method struct { + Entity + + // no documentation yet + MethodKey *string `json:"methodKey,omitempty" xmlrpc:"methodKey,omitempty"` + + // no documentation yet + MultipleCurrencyFlag *bool `json:"multipleCurrencyFlag,omitempty" xmlrpc:"multipleCurrencyFlag,omitempty"` + + // no documentation yet + PaymentProcessor *Billing_Payment_Processor `json:"paymentProcessor,omitempty" xmlrpc:"paymentProcessor,omitempty"` + + // no documentation yet + PaymentType *Billing_Payment_Type `json:"paymentType,omitempty" xmlrpc:"paymentType,omitempty"` +} + +// no documentation yet +type Billing_Payment_Processor_Type struct { + Entity + + // no documentation yet + Description *string `json:"description,omitempty" xmlrpc:"description,omitempty"` + + // no documentation yet + KeyName *string `json:"keyName,omitempty" xmlrpc:"keyName,omitempty"` + + // no documentation yet + Name *string `json:"name,omitempty" xmlrpc:"name,omitempty"` + + // A count of + PaymentProcessorCount *uint `json:"paymentProcessorCount,omitempty" xmlrpc:"paymentProcessorCount,omitempty"` + + // no documentation yet + PaymentProcessors []Billing_Payment_Processor `json:"paymentProcessors,omitempty" xmlrpc:"paymentProcessors,omitempty"` +} + +// Implementation for payment transactions. +type Billing_Payment_Transaction struct { + Entity +} + +// no documentation yet +type Billing_Payment_Type struct { + Entity + + // no documentation yet + Description *string `json:"description,omitempty" xmlrpc:"description,omitempty"` + + // no documentation yet + KeyName *string `json:"keyName,omitempty" xmlrpc:"keyName,omitempty"` + + // no documentation yet + Name *string `json:"name,omitempty" xmlrpc:"name,omitempty"` +} diff --git a/vendor/github.com/softlayer/softlayer-go/datatypes/brand.go b/vendor/github.com/softlayer/softlayer-go/datatypes/brand.go new file mode 100644 index 0000000000..aec747cf01 --- /dev/null +++ b/vendor/github.com/softlayer/softlayer-go/datatypes/brand.go @@ -0,0 +1,227 @@ +/** + * Copyright 2016 IBM Corp. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** + * AUTOMATICALLY GENERATED CODE - DO NOT MODIFY + */ + +package datatypes + +// The SoftLayer_Brand data type contains brand information relating to the single SoftLayer customer account. +// +// SoftLayer customers are unable to change their brand information in the portal or the API. +type Brand struct { + Entity + + // no documentation yet + Account *Account `json:"account,omitempty" xmlrpc:"account,omitempty"` + + // A count of all accounts owned by the brand. + AllOwnedAccountCount *uint `json:"allOwnedAccountCount,omitempty" xmlrpc:"allOwnedAccountCount,omitempty"` + + // All accounts owned by the brand. + AllOwnedAccounts []Account `json:"allOwnedAccounts,omitempty" xmlrpc:"allOwnedAccounts,omitempty"` + + // This flag indicates if creation of accounts is allowed. + AllowAccountCreationFlag *bool `json:"allowAccountCreationFlag,omitempty" xmlrpc:"allowAccountCreationFlag,omitempty"` + + // The Product Catalog for the Brand + Catalog *Product_Catalog `json:"catalog,omitempty" xmlrpc:"catalog,omitempty"` + + // ID of the Catalog used by this Brand + CatalogId *int `json:"catalogId,omitempty" xmlrpc:"catalogId,omitempty"` + + // A count of the contacts for the brand. + ContactCount *uint `json:"contactCount,omitempty" xmlrpc:"contactCount,omitempty"` + + // The contacts for the brand. + Contacts []Brand_Contact `json:"contacts,omitempty" xmlrpc:"contacts,omitempty"` + + // A count of this references relationship between brands, locations and countries associated with a user's account that are ineligible when ordering products. For example, the India datacenter may not be available on this brand for customers that live in Great Britain. + CustomerCountryLocationRestrictionCount *uint `json:"customerCountryLocationRestrictionCount,omitempty" xmlrpc:"customerCountryLocationRestrictionCount,omitempty"` + + // This references relationship between brands, locations and countries associated with a user's account that are ineligible when ordering products. For example, the India datacenter may not be available on this brand for customers that live in Great Britain. + CustomerCountryLocationRestrictions []Brand_Restriction_Location_CustomerCountry `json:"customerCountryLocationRestrictions,omitempty" xmlrpc:"customerCountryLocationRestrictions,omitempty"` + + // no documentation yet + Distributor *Brand `json:"distributor,omitempty" xmlrpc:"distributor,omitempty"` + + // no documentation yet + DistributorChildFlag *bool `json:"distributorChildFlag,omitempty" xmlrpc:"distributorChildFlag,omitempty"` + + // no documentation yet + DistributorFlag *string `json:"distributorFlag,omitempty" xmlrpc:"distributorFlag,omitempty"` + + // An account's associated hardware objects. + Hardware []Hardware `json:"hardware,omitempty" xmlrpc:"hardware,omitempty"` + + // A count of an account's associated hardware objects. + HardwareCount *uint `json:"hardwareCount,omitempty" xmlrpc:"hardwareCount,omitempty"` + + // no documentation yet + HasAgentSupportFlag *bool `json:"hasAgentSupportFlag,omitempty" xmlrpc:"hasAgentSupportFlag,omitempty"` + + // no documentation yet + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // The brand key name. + KeyName *string `json:"keyName,omitempty" xmlrpc:"keyName,omitempty"` + + // The brand long name. + LongName *string `json:"longName,omitempty" xmlrpc:"longName,omitempty"` + + // The brand name. + Name *string `json:"name,omitempty" xmlrpc:"name,omitempty"` + + // A count of + OpenTicketCount *uint `json:"openTicketCount,omitempty" xmlrpc:"openTicketCount,omitempty"` + + // no documentation yet + OpenTickets []Ticket `json:"openTickets,omitempty" xmlrpc:"openTickets,omitempty"` + + // A count of active accounts owned by the brand. + OwnedAccountCount *uint `json:"ownedAccountCount,omitempty" xmlrpc:"ownedAccountCount,omitempty"` + + // Active accounts owned by the brand. + OwnedAccounts []Account `json:"ownedAccounts,omitempty" xmlrpc:"ownedAccounts,omitempty"` + + // A count of + TicketCount *uint `json:"ticketCount,omitempty" xmlrpc:"ticketCount,omitempty"` + + // A count of + TicketGroupCount *uint `json:"ticketGroupCount,omitempty" xmlrpc:"ticketGroupCount,omitempty"` + + // no documentation yet + TicketGroups []Ticket_Group `json:"ticketGroups,omitempty" xmlrpc:"ticketGroups,omitempty"` + + // no documentation yet + Tickets []Ticket `json:"tickets,omitempty" xmlrpc:"tickets,omitempty"` + + // A count of + UserCount *uint `json:"userCount,omitempty" xmlrpc:"userCount,omitempty"` + + // no documentation yet + Users []User_Customer `json:"users,omitempty" xmlrpc:"users,omitempty"` + + // A count of an account's associated virtual guest objects. + VirtualGuestCount *uint `json:"virtualGuestCount,omitempty" xmlrpc:"virtualGuestCount,omitempty"` + + // An account's associated virtual guest objects. + VirtualGuests []Virtual_Guest `json:"virtualGuests,omitempty" xmlrpc:"virtualGuests,omitempty"` +} + +// no documentation yet +type Brand_Attribute struct { + Entity + + // no documentation yet + Brand *Brand `json:"brand,omitempty" xmlrpc:"brand,omitempty"` +} + +// SoftLayer_Brand_Contact contains the contact information for the brand such as Corporate or Support contact information +type Brand_Contact struct { + Entity + + // The contact's address 1. + Address1 *string `json:"address1,omitempty" xmlrpc:"address1,omitempty"` + + // The contact's address 2. + Address2 *string `json:"address2,omitempty" xmlrpc:"address2,omitempty"` + + // The contact's alternate phone number. + AlternatePhone *string `json:"alternatePhone,omitempty" xmlrpc:"alternatePhone,omitempty"` + + // no documentation yet + Brand *Brand `json:"brand,omitempty" xmlrpc:"brand,omitempty"` + + // no documentation yet + BrandContactType *Brand_Contact_Type `json:"brandContactType,omitempty" xmlrpc:"brandContactType,omitempty"` + + // The contact's type identifier. + BrandContactTypeId *int `json:"brandContactTypeId,omitempty" xmlrpc:"brandContactTypeId,omitempty"` + + // The contact's city. + City *string `json:"city,omitempty" xmlrpc:"city,omitempty"` + + // The contact's country. + Country *string `json:"country,omitempty" xmlrpc:"country,omitempty"` + + // The contact's email address. + Email *string `json:"email,omitempty" xmlrpc:"email,omitempty"` + + // The contact's fax number. + FaxPhone *string `json:"faxPhone,omitempty" xmlrpc:"faxPhone,omitempty"` + + // The contact's first name. + FirstName *string `json:"firstName,omitempty" xmlrpc:"firstName,omitempty"` + + // The contact's last name. + LastName *string `json:"lastName,omitempty" xmlrpc:"lastName,omitempty"` + + // The contact's phone number. + OfficePhone *string `json:"officePhone,omitempty" xmlrpc:"officePhone,omitempty"` + + // The contact's postal code. + PostalCode *string `json:"postalCode,omitempty" xmlrpc:"postalCode,omitempty"` + + // The contact's state. + State *string `json:"state,omitempty" xmlrpc:"state,omitempty"` +} + +// SoftLayer_Brand_Contact_Type contains the contact type information for the brand contacts such as Corporate or Support contact type +type Brand_Contact_Type struct { + Entity + + // Contact type description. + Description *string `json:"description,omitempty" xmlrpc:"description,omitempty"` + + // Contact type key name. + KeyName *string `json:"keyName,omitempty" xmlrpc:"keyName,omitempty"` + + // Contact type name. + Name *string `json:"name,omitempty" xmlrpc:"name,omitempty"` +} + +// no documentation yet +type Brand_Payment_Processor struct { + Entity + + // no documentation yet + Brand *Brand `json:"brand,omitempty" xmlrpc:"brand,omitempty"` + + // no documentation yet + PaymentProcessor *Billing_Payment_Processor `json:"paymentProcessor,omitempty" xmlrpc:"paymentProcessor,omitempty"` +} + +// The [[SoftLayer_Brand_Restriction_Location_CustomerCountry]] data type defines the relationship between brands, locations and countries associated with a user's account that are ineligible when ordering products. For example, the India datacenter may not be available on the SoftLayer US brand for customers that live in Great Britain. +type Brand_Restriction_Location_CustomerCountry struct { + Entity + + // This references the brand that has a brand-location-country restriction setup. + Brand *Brand `json:"brand,omitempty" xmlrpc:"brand,omitempty"` + + // The brand associated with customer's account. + BrandId *int `json:"brandId,omitempty" xmlrpc:"brandId,omitempty"` + + // country code associated with customer's account. + CustomerCountryCode *string `json:"customerCountryCode,omitempty" xmlrpc:"customerCountryCode,omitempty"` + + // This references the datacenter that has a brand-location-country restriction setup. For example, if a datacenter is listed with a restriction for Canada, a Canadian customer may not be eligible to order services at that location. + Location *Location `json:"location,omitempty" xmlrpc:"location,omitempty"` + + // The id for datacenter location. + LocationId *int `json:"locationId,omitempty" xmlrpc:"locationId,omitempty"` +} diff --git a/vendor/github.com/softlayer/softlayer-go/datatypes/catalyst.go b/vendor/github.com/softlayer/softlayer-go/datatypes/catalyst.go new file mode 100644 index 0000000000..3f8824abe4 --- /dev/null +++ b/vendor/github.com/softlayer/softlayer-go/datatypes/catalyst.go @@ -0,0 +1,211 @@ +/** + * Copyright 2016 IBM Corp. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** + * AUTOMATICALLY GENERATED CODE - DO NOT MODIFY + */ + +package datatypes + +// no documentation yet +type Catalyst_Affiliate struct { + Entity + + // no documentation yet + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // no documentation yet + Name *string `json:"name,omitempty" xmlrpc:"name,omitempty"` + + // no documentation yet + SkipCreditCardVerificationFlag *bool `json:"skipCreditCardVerificationFlag,omitempty" xmlrpc:"skipCreditCardVerificationFlag,omitempty"` +} + +// no documentation yet +type Catalyst_Company_Type struct { + Entity + + // no documentation yet + Description *string `json:"description,omitempty" xmlrpc:"description,omitempty"` + + // no documentation yet + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` +} + +// no documentation yet +type Catalyst_Enrollment struct { + Entity + + // no documentation yet + Account *Account `json:"account,omitempty" xmlrpc:"account,omitempty"` + + // no documentation yet + AccountId *int `json:"accountId,omitempty" xmlrpc:"accountId,omitempty"` + + // no documentation yet + Affiliate *Catalyst_Affiliate `json:"affiliate,omitempty" xmlrpc:"affiliate,omitempty"` + + // no documentation yet + AffiliateId *int `json:"affiliateId,omitempty" xmlrpc:"affiliateId,omitempty"` + + // no documentation yet + AgreementCompleteFlag *int `json:"agreementCompleteFlag,omitempty" xmlrpc:"agreementCompleteFlag,omitempty"` + + // no documentation yet + CompanyDescription *string `json:"companyDescription,omitempty" xmlrpc:"companyDescription,omitempty"` + + // no documentation yet + CompanyType *Catalyst_Company_Type `json:"companyType,omitempty" xmlrpc:"companyType,omitempty"` + + // no documentation yet + CompanyTypeId *int `json:"companyTypeId,omitempty" xmlrpc:"companyTypeId,omitempty"` + + // no documentation yet + EnrollmentDate *Time `json:"enrollmentDate,omitempty" xmlrpc:"enrollmentDate,omitempty"` + + // no documentation yet + GraduationDate *Time `json:"graduationDate,omitempty" xmlrpc:"graduationDate,omitempty"` + + // no documentation yet + IsActiveFlag *bool `json:"isActiveFlag,omitempty" xmlrpc:"isActiveFlag,omitempty"` + + // no documentation yet + MonthlyCreditAmount *Float64 `json:"monthlyCreditAmount,omitempty" xmlrpc:"monthlyCreditAmount,omitempty"` + + // no documentation yet + Representative *User_Employee `json:"representative,omitempty" xmlrpc:"representative,omitempty"` + + // no documentation yet + RepresentativeEmployeeId *int `json:"representativeEmployeeId,omitempty" xmlrpc:"representativeEmployeeId,omitempty"` +} + +// Contains user information for Catalyst self-enrollment. +type Catalyst_Enrollment_Request struct { + Entity + + // Applicant's address + Address1 *string `json:"address1,omitempty" xmlrpc:"address1,omitempty"` + + // Additional field for extended address + Address2 *string `json:"address2,omitempty" xmlrpc:"address2,omitempty"` + + // no documentation yet + Affiliate *Catalyst_Affiliate `json:"affiliate,omitempty" xmlrpc:"affiliate,omitempty"` + + // Id of the affiliate who referred applicant's + AffiliateId *int `json:"affiliateId,omitempty" xmlrpc:"affiliateId,omitempty"` + + // no documentation yet + AgreementCompleteFlag *bool `json:"agreementCompleteFlag,omitempty" xmlrpc:"agreementCompleteFlag,omitempty"` + + // Determines whether or not to also apply to the GEP program + ApplyToGepFlag *bool `json:"applyToGepFlag,omitempty" xmlrpc:"applyToGepFlag,omitempty"` + + // no documentation yet + CardAccountNumber *string `json:"cardAccountNumber,omitempty" xmlrpc:"cardAccountNumber,omitempty"` + + // no documentation yet + CardExpirationMonth *string `json:"cardExpirationMonth,omitempty" xmlrpc:"cardExpirationMonth,omitempty"` + + // no documentation yet + CardExpirationYear *string `json:"cardExpirationYear,omitempty" xmlrpc:"cardExpirationYear,omitempty"` + + // no documentation yet + CardType *string `json:"cardType,omitempty" xmlrpc:"cardType,omitempty"` + + // no documentation yet + CardVerificationNumber *string `json:"cardVerificationNumber,omitempty" xmlrpc:"cardVerificationNumber,omitempty"` + + // Applicant's city + City *string `json:"city,omitempty" xmlrpc:"city,omitempty"` + + // Brief description of Startup's product and key differentiators + CompanyDescription *string `json:"companyDescription,omitempty" xmlrpc:"companyDescription,omitempty"` + + // Name of the applicant's company + CompanyName *string `json:"companyName,omitempty" xmlrpc:"companyName,omitempty"` + + // no documentation yet + CompanyType *Catalyst_Company_Type `json:"companyType,omitempty" xmlrpc:"companyType,omitempty"` + + // Id of the company type which best describes applicant's company + CompanyTypeId *int `json:"companyTypeId,omitempty" xmlrpc:"companyTypeId,omitempty"` + + // URL to the Startup's site + CompanyUrl *string `json:"companyUrl,omitempty" xmlrpc:"companyUrl,omitempty"` + + // Applicant's country code + Country *string `json:"country,omitempty" xmlrpc:"country,omitempty"` + + // Index of answer chosen for how many current users question + CurrentUserChoice *int `json:"currentUserChoice,omitempty" xmlrpc:"currentUserChoice,omitempty"` + + // Id of the fingerprint + DeviceFingerprintId *string `json:"deviceFingerprintId,omitempty" xmlrpc:"deviceFingerprintId,omitempty"` + + // Applicant's email address + Email *string `json:"email,omitempty" xmlrpc:"email,omitempty"` + + // Applicant's first name + FirstName *string `json:"firstName,omitempty" xmlrpc:"firstName,omitempty"` + + // Index of answer chosen for how many future users question + FutureUserChoice *int `json:"futureUserChoice,omitempty" xmlrpc:"futureUserChoice,omitempty"` + + // Name of accelerator or incubator startup belongs to, if any + IncubatorName *string `json:"incubatorName,omitempty" xmlrpc:"incubatorName,omitempty"` + + // Name of the investor, if any + InvestorName *string `json:"investorName,omitempty" xmlrpc:"investorName,omitempty"` + + // Applicant's last name + LastName *string `json:"lastName,omitempty" xmlrpc:"lastName,omitempty"` + + // Applicant's primary phone number + OfficePhone *string `json:"officePhone,omitempty" xmlrpc:"officePhone,omitempty"` + + // Whether or not the startup has been operating for more than five years + OverFiveYearsOldFlag *bool `json:"overFiveYearsOldFlag,omitempty" xmlrpc:"overFiveYearsOldFlag,omitempty"` + + // Applicant's postal code + PostalCode *string `json:"postalCode,omitempty" xmlrpc:"postalCode,omitempty"` + + // IBM referral code, if any + ReferralCode *string `json:"referralCode,omitempty" xmlrpc:"referralCode,omitempty"` + + // Whether or not the startup has over one million in annual revenue + RevenueOverOneMillionFlag *bool `json:"revenueOverOneMillionFlag,omitempty" xmlrpc:"revenueOverOneMillionFlag,omitempty"` + + // Determines whether or not to apply to the Catalyst program + SkipCatalystApplicationFlag *bool `json:"skipCatalystApplicationFlag,omitempty" xmlrpc:"skipCatalystApplicationFlag,omitempty"` + + // Applicant's state/region code + State *string `json:"state,omitempty" xmlrpc:"state,omitempty"` + + // Applicant's vatId, if one exists + VatId *string `json:"vatId,omitempty" xmlrpc:"vatId,omitempty"` +} + +// no documentation yet +type Catalyst_Enrollment_Request_Container_AnswerOption struct { + Entity + + // no documentation yet + Answer *string `json:"answer,omitempty" xmlrpc:"answer,omitempty"` + + // no documentation yet + Index *int `json:"index,omitempty" xmlrpc:"index,omitempty"` +} diff --git a/vendor/github.com/softlayer/softlayer-go/datatypes/compliance.go b/vendor/github.com/softlayer/softlayer-go/datatypes/compliance.go new file mode 100644 index 0000000000..0efbbf9292 --- /dev/null +++ b/vendor/github.com/softlayer/softlayer-go/datatypes/compliance.go @@ -0,0 +1,35 @@ +/** + * Copyright 2016 IBM Corp. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** + * AUTOMATICALLY GENERATED CODE - DO NOT MODIFY + */ + +package datatypes + +// no documentation yet +type Compliance_Report_Type struct { + Entity + + // no documentation yet + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // no documentation yet + KeyName *string `json:"keyName,omitempty" xmlrpc:"keyName,omitempty"` + + // no documentation yet + Name *string `json:"name,omitempty" xmlrpc:"name,omitempty"` +} diff --git a/vendor/github.com/softlayer/softlayer-go/datatypes/configuration.go b/vendor/github.com/softlayer/softlayer-go/datatypes/configuration.go new file mode 100644 index 0000000000..c39b50e341 --- /dev/null +++ b/vendor/github.com/softlayer/softlayer-go/datatypes/configuration.go @@ -0,0 +1,547 @@ +/** + * Copyright 2016 IBM Corp. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** + * AUTOMATICALLY GENERATED CODE - DO NOT MODIFY + */ + +package datatypes + +// no documentation yet +type Configuration_Storage_Filesystem_Type struct { + Entity + + // no documentation yet + KeyName *string `json:"keyName,omitempty" xmlrpc:"keyName,omitempty"` + + // no documentation yet + Name *string `json:"name,omitempty" xmlrpc:"name,omitempty"` +} + +// Supported hardware raid modes +type Configuration_Storage_Group_Array_Type struct { + Entity + + // no documentation yet + Description *string `json:"description,omitempty" xmlrpc:"description,omitempty"` + + // no documentation yet + DriveMultiplier *int `json:"driveMultiplier,omitempty" xmlrpc:"driveMultiplier,omitempty"` + + // A count of + HardwareComponentModelCount *uint `json:"hardwareComponentModelCount,omitempty" xmlrpc:"hardwareComponentModelCount,omitempty"` + + // no documentation yet + HardwareComponentModels []Hardware_Component_Model `json:"hardwareComponentModels,omitempty" xmlrpc:"hardwareComponentModels,omitempty"` + + // no documentation yet + HotspareAllow *bool `json:"hotspareAllow,omitempty" xmlrpc:"hotspareAllow,omitempty"` + + // no documentation yet + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // no documentation yet + KeyName *string `json:"keyName,omitempty" xmlrpc:"keyName,omitempty"` + + // no documentation yet + MaximumDrives *int `json:"maximumDrives,omitempty" xmlrpc:"maximumDrives,omitempty"` + + // no documentation yet + MinimumDrives *int `json:"minimumDrives,omitempty" xmlrpc:"minimumDrives,omitempty"` + + // no documentation yet + Name *string `json:"name,omitempty" xmlrpc:"name,omitempty"` +} + +// Single storage group(array) used for a hardware server order. +// +// If a raid configuration is required this object will describe a single array that will be configured on the server. If the server requires more than one array, a storage group will need to be created for each array. +type Configuration_Storage_Group_Order struct { + Entity + + // no documentation yet + ArrayNumber *int `json:"arrayNumber,omitempty" xmlrpc:"arrayNumber,omitempty"` + + // no documentation yet + ArraySize *Float64 `json:"arraySize,omitempty" xmlrpc:"arraySize,omitempty"` + + // Raid mode for the storage group. + ArrayType *Configuration_Storage_Group_Array_Type `json:"arrayType,omitempty" xmlrpc:"arrayType,omitempty"` + + // no documentation yet + ArrayTypeId *int `json:"arrayTypeId,omitempty" xmlrpc:"arrayTypeId,omitempty"` + + // The order item that relates to this storage group. + BillingOrderItem *Billing_Order_Item `json:"billingOrderItem,omitempty" xmlrpc:"billingOrderItem,omitempty"` + + // no documentation yet + BillingOrderItemId *int `json:"billingOrderItemId,omitempty" xmlrpc:"billingOrderItemId,omitempty"` + + // no documentation yet + Controller *int `json:"controller,omitempty" xmlrpc:"controller,omitempty"` + + // no documentation yet + HardDrives []int `json:"hardDrives,omitempty" xmlrpc:"hardDrives,omitempty"` + + // no documentation yet + HotSpareDrives []int `json:"hotSpareDrives,omitempty" xmlrpc:"hotSpareDrives,omitempty"` + + // no documentation yet + LvmFlag *bool `json:"lvmFlag,omitempty" xmlrpc:"lvmFlag,omitempty"` + + // no documentation yet + PartitionData *string `json:"partitionData,omitempty" xmlrpc:"partitionData,omitempty"` +} + +// Single storage group(array) used in a storage group template. +// +// If a server configuration requires a raid configuration this object will describe a single array to be configured. +type Configuration_Storage_Group_Template_Group struct { + Entity + + // Flag to use all available space. + Grow *bool `json:"grow,omitempty" xmlrpc:"grow,omitempty"` + + // Comma delimited integers of drive indexes for the array. This can also be the string 'all' to specify all drives in the server + HardDrivesString *string `json:"hardDrivesString,omitempty" xmlrpc:"hardDrivesString,omitempty"` + + // The order of the arrays in the template. + OrderIndex *int `json:"orderIndex,omitempty" xmlrpc:"orderIndex,omitempty"` + + // Size of array. Must be within limitations of the smallest drive and raid mode + Size *Float64 `json:"size,omitempty" xmlrpc:"size,omitempty"` + + // no documentation yet + Type *Configuration_Storage_Group_Array_Type `json:"type,omitempty" xmlrpc:"type,omitempty"` +} + +// The SoftLayer_Configuration_Template data type contains general information of an arbitrary resource. +type Configuration_Template struct { + Entity + + // no documentation yet + Account *Account `json:"account,omitempty" xmlrpc:"account,omitempty"` + + // Internal identifier of a SoftLayer account that this configuration template belongs to + AccountId *int `json:"accountId,omitempty" xmlrpc:"accountId,omitempty"` + + // A count of + ConfigurationSectionCount *uint `json:"configurationSectionCount,omitempty" xmlrpc:"configurationSectionCount,omitempty"` + + // no documentation yet + ConfigurationSections []Configuration_Template_Section `json:"configurationSections,omitempty" xmlrpc:"configurationSections,omitempty"` + + // no documentation yet + ConfigurationTemplateReference []Monitoring_Agent_Configuration_Template_Group_Reference `json:"configurationTemplateReference,omitempty" xmlrpc:"configurationTemplateReference,omitempty"` + + // A count of + ConfigurationTemplateReferenceCount *uint `json:"configurationTemplateReferenceCount,omitempty" xmlrpc:"configurationTemplateReferenceCount,omitempty"` + + // Created date + CreateDate *Time `json:"createDate,omitempty" xmlrpc:"createDate,omitempty"` + + // A count of + DefaultValueCount *uint `json:"defaultValueCount,omitempty" xmlrpc:"defaultValueCount,omitempty"` + + // no documentation yet + DefaultValues []Configuration_Template_Section_Definition_Value `json:"defaultValues,omitempty" xmlrpc:"defaultValues,omitempty"` + + // A count of + DefinitionCount *uint `json:"definitionCount,omitempty" xmlrpc:"definitionCount,omitempty"` + + // no documentation yet + Definitions []Configuration_Template_Section_Definition `json:"definitions,omitempty" xmlrpc:"definitions,omitempty"` + + // Configuration template description + Description *string `json:"description,omitempty" xmlrpc:"description,omitempty"` + + // Internal identifier of a configuration template. + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // no documentation yet + Item *Product_Item `json:"item,omitempty" xmlrpc:"item,omitempty"` + + // Internal identifier of a product item that this configuration template is associated with + ItemId *int `json:"itemId,omitempty" xmlrpc:"itemId,omitempty"` + + // no documentation yet + LinkedSectionReferences *Configuration_Template_Section_Reference `json:"linkedSectionReferences,omitempty" xmlrpc:"linkedSectionReferences,omitempty"` + + // Last modified date + ModifyDate *Time `json:"modifyDate,omitempty" xmlrpc:"modifyDate,omitempty"` + + // Configuration template name + Name *string `json:"name,omitempty" xmlrpc:"name,omitempty"` + + // no documentation yet + Parent *Configuration_Template `json:"parent,omitempty" xmlrpc:"parent,omitempty"` + + // Internal identifier of the parent configuration template + ParentId *int `json:"parentId,omitempty" xmlrpc:"parentId,omitempty"` + + // no documentation yet + User *User_Customer `json:"user,omitempty" xmlrpc:"user,omitempty"` + + // Internal identifier of a user that last modified this configuration template + UserRecordId *int `json:"userRecordId,omitempty" xmlrpc:"userRecordId,omitempty"` +} + +// Configuration template attribute class contains supplementary information for a configuration template. +type Configuration_Template_Attribute struct { + Entity + + // no documentation yet + ConfigurationTemplate *Configuration_Template `json:"configurationTemplate,omitempty" xmlrpc:"configurationTemplate,omitempty"` + + // Value of a configuration template attribute + Value *string `json:"value,omitempty" xmlrpc:"value,omitempty"` +} + +// The SoftLayer_Configuration_Template_Section data type contains information of a configuration section. +// +// Configuration can contain sub-sections. +type Configuration_Template_Section struct { + Entity + + // Created date + CreateDate *Time `json:"createDate,omitempty" xmlrpc:"createDate,omitempty"` + + // A count of + DefinitionCount *uint `json:"definitionCount,omitempty" xmlrpc:"definitionCount,omitempty"` + + // no documentation yet + Definitions []Configuration_Template_Section_Definition `json:"definitions,omitempty" xmlrpc:"definitions,omitempty"` + + // Configuration section description + Description *string `json:"description,omitempty" xmlrpc:"description,omitempty"` + + // no documentation yet + DisallowedDeletionFlag *bool `json:"disallowedDeletionFlag,omitempty" xmlrpc:"disallowedDeletionFlag,omitempty"` + + // Internal identifier of a configuration section. + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // no documentation yet + LinkedTemplate *Configuration_Template `json:"linkedTemplate,omitempty" xmlrpc:"linkedTemplate,omitempty"` + + // Internal identifier of a sub configuration template that this section points to. Use this property if you wish to create a reference to a sub configuration template when creating a linked section. + LinkedTemplateId *string `json:"linkedTemplateId,omitempty" xmlrpc:"linkedTemplateId,omitempty"` + + // no documentation yet + LinkedTemplateReference *Configuration_Template_Section_Reference `json:"linkedTemplateReference,omitempty" xmlrpc:"linkedTemplateReference,omitempty"` + + // Last modified date + ModifyDate *Time `json:"modifyDate,omitempty" xmlrpc:"modifyDate,omitempty"` + + // Configuration section name + Name *string `json:"name,omitempty" xmlrpc:"name,omitempty"` + + // Internal identifier of the parent configuration section + ParentId *int `json:"parentId,omitempty" xmlrpc:"parentId,omitempty"` + + // A count of + ProfileCount *uint `json:"profileCount,omitempty" xmlrpc:"profileCount,omitempty"` + + // no documentation yet + Profiles []Configuration_Template_Section_Profile `json:"profiles,omitempty" xmlrpc:"profiles,omitempty"` + + // no documentation yet + SectionType *Configuration_Template_Section_Type `json:"sectionType,omitempty" xmlrpc:"sectionType,omitempty"` + + // no documentation yet + SectionTypeName *string `json:"sectionTypeName,omitempty" xmlrpc:"sectionTypeName,omitempty"` + + // Sort order + Sort *int `json:"sort,omitempty" xmlrpc:"sort,omitempty"` + + // A count of + SubSectionCount *uint `json:"subSectionCount,omitempty" xmlrpc:"subSectionCount,omitempty"` + + // no documentation yet + SubSections []Configuration_Template_Section `json:"subSections,omitempty" xmlrpc:"subSections,omitempty"` + + // no documentation yet + Template *Configuration_Template `json:"template,omitempty" xmlrpc:"template,omitempty"` + + // Internal identifier of a configuration template that this section belongs to + TemplateId *string `json:"templateId,omitempty" xmlrpc:"templateId,omitempty"` + + // Internal identifier of the configuration section type + TypeId *int `json:"typeId,omitempty" xmlrpc:"typeId,omitempty"` +} + +// Configuration section attribute class contains supplementary information for a configuration section. +type Configuration_Template_Section_Attribute struct { + Entity + + // no documentation yet + ConfigurationSection *Configuration_Template_Section `json:"configurationSection,omitempty" xmlrpc:"configurationSection,omitempty"` + + // Value of a configuration section attribute + Value *string `json:"value,omitempty" xmlrpc:"value,omitempty"` +} + +// Configuration definition gives you details of the value that you're setting. +// +// Some monitoring agents requires values unique to your system. If value type is defined as "Resource Specific Values", you will have to make an additional API call to retrieve your system specific values. +// +// See [[SoftLayer_Monitoring_Agent::getAvailableConfigurationValues|Monitoring Agent]] service to retrieve your system specific values. +type Configuration_Template_Section_Definition struct { + Entity + + // A count of + AttributeCount *uint `json:"attributeCount,omitempty" xmlrpc:"attributeCount,omitempty"` + + // no documentation yet + Attributes []Configuration_Template_Section_Definition_Attribute `json:"attributes,omitempty" xmlrpc:"attributes,omitempty"` + + // Created date + CreateDate *Time `json:"createDate,omitempty" xmlrpc:"createDate,omitempty"` + + // no documentation yet + DefaultValue *Configuration_Template_Section_Definition_Value `json:"defaultValue,omitempty" xmlrpc:"defaultValue,omitempty"` + + // Description of a configuration definition. + Description *string `json:"description,omitempty" xmlrpc:"description,omitempty"` + + // Enumeration values separated by comma. + EnumerationValues *string `json:"enumerationValues,omitempty" xmlrpc:"enumerationValues,omitempty"` + + // no documentation yet + Group *Configuration_Template_Section_Definition_Group `json:"group,omitempty" xmlrpc:"group,omitempty"` + + // Definition group id. + GroupId *string `json:"groupId,omitempty" xmlrpc:"groupId,omitempty"` + + // Internal identifier of a configuration definition. + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // Maximum value of a configuration definition. + MaximumValue *string `json:"maximumValue,omitempty" xmlrpc:"maximumValue,omitempty"` + + // Minimum value of a configuration definition. + MinimumValue *string `json:"minimumValue,omitempty" xmlrpc:"minimumValue,omitempty"` + + // Last modify date + ModifyDate *Time `json:"modifyDate,omitempty" xmlrpc:"modifyDate,omitempty"` + + // no documentation yet + MonitoringDataFlag *bool `json:"monitoringDataFlag,omitempty" xmlrpc:"monitoringDataFlag,omitempty"` + + // Configuration definition name. + Name *string `json:"name,omitempty" xmlrpc:"name,omitempty"` + + // Definition path. + Path *string `json:"path,omitempty" xmlrpc:"path,omitempty"` + + // Indicates if a configuration value is required for this definition. + RequireValueFlag *int `json:"requireValueFlag,omitempty" xmlrpc:"requireValueFlag,omitempty"` + + // no documentation yet + Section *Configuration_Template_Section `json:"section,omitempty" xmlrpc:"section,omitempty"` + + // Internal identifier of a configuration section. + SectionId *int `json:"sectionId,omitempty" xmlrpc:"sectionId,omitempty"` + + // Shortened configuration definition name. + ShortName *string `json:"shortName,omitempty" xmlrpc:"shortName,omitempty"` + + // Sort order + Sort *int `json:"sort,omitempty" xmlrpc:"sort,omitempty"` + + // Internal identifier of a configuration definition type. + TypeId *int `json:"typeId,omitempty" xmlrpc:"typeId,omitempty"` + + // no documentation yet + ValueType *Configuration_Template_Section_Definition_Type `json:"valueType,omitempty" xmlrpc:"valueType,omitempty"` +} + +// Configuration definition attribute class contains supplementary information for a configuration definition. +type Configuration_Template_Section_Definition_Attribute struct { + Entity + + // no documentation yet + AttributeType *Configuration_Template_Section_Definition_Attribute_Type `json:"attributeType,omitempty" xmlrpc:"attributeType,omitempty"` + + // no documentation yet + ConfigurationDefinition *Configuration_Template_Section_Definition `json:"configurationDefinition,omitempty" xmlrpc:"configurationDefinition,omitempty"` + + // Value of a configuration definition attribute + Value *string `json:"value,omitempty" xmlrpc:"value,omitempty"` +} + +// SoftLayer_Configuration_Template_Attribute_Type models the type of attribute that can be assigned to a configuration definition. +type Configuration_Template_Section_Definition_Attribute_Type struct { + Entity + + // Description of a definition attribute type + Description *string `json:"description,omitempty" xmlrpc:"description,omitempty"` + + // Name of a definition attribute type + Name *string `json:"name,omitempty" xmlrpc:"name,omitempty"` +} + +// Configuration definition group gives you details of the definition and allows extra functionality. +// +// +type Configuration_Template_Section_Definition_Group struct { + Entity + + // Created date + CreateDate *Time `json:"createDate,omitempty" xmlrpc:"createDate,omitempty"` + + // Internal Description of a definition group. + Description *string `json:"description,omitempty" xmlrpc:"description,omitempty"` + + // Internal identifier of a definition group. + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // Internal Definition group name. + Name *string `json:"name,omitempty" xmlrpc:"name,omitempty"` + + // no documentation yet + Parent *Configuration_Template_Section_Definition_Group `json:"parent,omitempty" xmlrpc:"parent,omitempty"` + + // Sort order + SortOrder *int `json:"sortOrder,omitempty" xmlrpc:"sortOrder,omitempty"` +} + +// SoftLayer_Configuration_Template_Section_Definition_Type further defines the value of a configuration definition. +type Configuration_Template_Section_Definition_Type struct { + Entity + + // Description of a configuration value type + Description *string `json:"description,omitempty" xmlrpc:"description,omitempty"` + + // Internal identifier of a configuration value type + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // Name of a configuration value type + Name *string `json:"name,omitempty" xmlrpc:"name,omitempty"` +} + +// SoftLayer_Configuration_Section_Value is used to set the value for a configuration definition +type Configuration_Template_Section_Definition_Value struct { + Entity + + // Created date + CreateDate *Time `json:"createDate,omitempty" xmlrpc:"createDate,omitempty"` + + // no documentation yet + Definition *Configuration_Template_Section_Definition `json:"definition,omitempty" xmlrpc:"definition,omitempty"` + + // Internal identifier of a configuration definition that this configuration value if defined by + DefinitionId *int `json:"definitionId,omitempty" xmlrpc:"definitionId,omitempty"` + + // Internal Last modified date + ModifyDate *Time `json:"modifyDate,omitempty" xmlrpc:"modifyDate,omitempty"` + + // no documentation yet + Template *Configuration_Template `json:"template,omitempty" xmlrpc:"template,omitempty"` + + // Internal identifier of a configuration template that this configuration value belongs to + TemplateId *int `json:"templateId,omitempty" xmlrpc:"templateId,omitempty"` + + // Internal Configuration value + Value *string `json:"value,omitempty" xmlrpc:"value,omitempty"` +} + +// Some configuration templates let you create a unique configuration profiles. +// +// For example, you can create multiple configuration profiles to monitor multiple hard drives with "CPU/Memory/Disk Monitoring Agent". SoftLayer_Configuration_Template_Section_Profile help you keep track of custom configuration profiles. +type Configuration_Template_Section_Profile struct { + Entity + + // Internal identifier of a monitoring agent this profile belongs to. + AgentId *int `json:"agentId,omitempty" xmlrpc:"agentId,omitempty"` + + // no documentation yet + ConfigurationSection *Configuration_Template_Section `json:"configurationSection,omitempty" xmlrpc:"configurationSection,omitempty"` + + // Created date + CreateDate *Time `json:"createDate,omitempty" xmlrpc:"createDate,omitempty"` + + // Internal identifier of a configuration profile. + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // no documentation yet + MonitoringAgent *Monitoring_Agent `json:"monitoringAgent,omitempty" xmlrpc:"monitoringAgent,omitempty"` + + // Name of a configuration profile + Name *string `json:"name,omitempty" xmlrpc:"name,omitempty"` + + // Internal identifier of a configuration section that this profile belongs to. + SectionId *int `json:"sectionId,omitempty" xmlrpc:"sectionId,omitempty"` +} + +// The SoftLayer_Configuration_Template_Section_Reference data type contains information of a configuration section and its associated configuration template. +type Configuration_Template_Section_Reference struct { + Entity + + // Created date + CreateDate *Time `json:"createDate,omitempty" xmlrpc:"createDate,omitempty"` + + // Internal identifier of a configuration section reference. + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // Modified date + ModifyDate *Time `json:"modifyDate,omitempty" xmlrpc:"modifyDate,omitempty"` + + // no documentation yet + Section *Configuration_Template_Section `json:"section,omitempty" xmlrpc:"section,omitempty"` + + // Internal identifier of a configuration section. + SectionId *int `json:"sectionId,omitempty" xmlrpc:"sectionId,omitempty"` + + // no documentation yet + Template *Configuration_Template `json:"template,omitempty" xmlrpc:"template,omitempty"` + + // Internal identifier of a configuration template. + TemplateId *int `json:"templateId,omitempty" xmlrpc:"templateId,omitempty"` +} + +// The SoftLayer_Configuration_Template_Section_Type data type contains information of a configuration section type. +// +// Configuration can contain sub-sections. +type Configuration_Template_Section_Type struct { + Entity + + // Configuration section type description + Description *string `json:"description,omitempty" xmlrpc:"description,omitempty"` + + // Internal identifier of a configuration section type + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // Configuration section type name + Name *string `json:"name,omitempty" xmlrpc:"name,omitempty"` +} + +// The SoftLayer_Configuration_Template_Type data type contains configuration template type information. +type Configuration_Template_Type struct { + Entity + + // Created date. This is deprecated now. + CreateDate *Time `json:"createDate,omitempty" xmlrpc:"createDate,omitempty"` + + // Description of a configuration template + Description *string `json:"description,omitempty" xmlrpc:"description,omitempty"` + + // Internal identifier of a configuration template type + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // Name of a configuration template type + Name *string `json:"name,omitempty" xmlrpc:"name,omitempty"` +} diff --git a/vendor/github.com/softlayer/softlayer-go/datatypes/container.go b/vendor/github.com/softlayer/softlayer-go/datatypes/container.go new file mode 100644 index 0000000000..d96feed1a8 --- /dev/null +++ b/vendor/github.com/softlayer/softlayer-go/datatypes/container.go @@ -0,0 +1,4728 @@ +/** + * Copyright 2016 IBM Corp. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** + * AUTOMATICALLY GENERATED CODE - DO NOT MODIFY + */ + +package datatypes + +// SoftLayer_Container_Account_Discount_Program models a single outbound object for a graph of given data sets. +type Container_Account_Discount_Program struct { + Entity + + // The credit allowance that has already been applied during the current billing cycle. If the lifetime limit has been or soon will be reached, this amount may included credit applied in previous billing cycles. + AppliedCredit *Float64 `json:"appliedCredit,omitempty" xmlrpc:"appliedCredit,omitempty"` + + // Flag to signify whether the account is a participant in the discount program. + IsParticipant *bool `json:"isParticipant,omitempty" xmlrpc:"isParticipant,omitempty"` + + // Credit allowance applied over the course of the entire program enrollment. For enrollments without a lifetime restriction, this property will not be populated as credit will be tracked on a purely monthly basis. + LifetimeAppliedCredit *Float64 `json:"lifetimeAppliedCredit,omitempty" xmlrpc:"lifetimeAppliedCredit,omitempty"` + + // Credit allowance available over the course of the entire program enrollment. If null, enrollment credit is applied on a strictly monthly basis and there is no lifetime maximum. Enrollments with non-null lifetime credit will receive the lesser of the remaining monthly credit or the remaining lifetime credit. + LifetimeCredit *Float64 `json:"lifetimeCredit,omitempty" xmlrpc:"lifetimeCredit,omitempty"` + + // Remaining credit allowance available over the remaining duration of the program enrollment. If null, enrollment credit is applied on a strictly monthly basis and there is no lifetime maximum. Enrollments with non-null remaining lifetime credit will receive the lesser of the remaining monthly credit or the remaining lifetime credit. + LifetimeRemainingCredit *Float64 `json:"lifetimeRemainingCredit,omitempty" xmlrpc:"lifetimeRemainingCredit,omitempty"` + + // Maximum number of orders the enrolled account is allowed to have open at one time. If null, then the Flexible Credit Program does not impose an order limit. + MaximumActiveOrders *Float64 `json:"maximumActiveOrders,omitempty" xmlrpc:"maximumActiveOrders,omitempty"` + + // The monthly credit allowance that is available at the beginning of the billing cycle. + MonthlyCredit *Float64 `json:"monthlyCredit,omitempty" xmlrpc:"monthlyCredit,omitempty"` + + // DEPRECATED: Taxes are calculated in real time and discount amounts are shown pre-tax in all cases. Tax values in the SoftLayer_Container_Account_Discount_Program container are now populated with the related pre-tax values. + PostTaxRemainingCredit *Float64 `json:"postTaxRemainingCredit,omitempty" xmlrpc:"postTaxRemainingCredit,omitempty"` + + // The date at which the program expires in MM/DD/YYYY format. + ProgramEndDate *Time `json:"programEndDate,omitempty" xmlrpc:"programEndDate,omitempty"` + + // Name of the Flexible Credit Program the account is enrolled in. + ProgramName *string `json:"programName,omitempty" xmlrpc:"programName,omitempty"` + + // The credit allowance that is available during the current billing cycle. If the lifetime limit has been or soon will be reached, this amount may be reduced by credit applied in previous billing cycles. + RemainingCredit *Float64 `json:"remainingCredit,omitempty" xmlrpc:"remainingCredit,omitempty"` + + // DEPRECATED: Taxes are calculated in real time and discount amounts are shown pre-tax in all cases. Tax values in the SoftLayer_Container_Account_Discount_Program container are now populated with the related pre-tax values. + RemainingCreditTax *Float64 `json:"remainingCreditTax,omitempty" xmlrpc:"remainingCreditTax,omitempty"` +} + +// SoftLayer_Container_Account_Graph_Outputs <<< EOT +type Container_Account_Graph_Outputs struct { + Entity + + // The count of closed tickets included in this graph. + ClosedTickets *string `json:"closedTickets,omitempty" xmlrpc:"closedTickets,omitempty"` + + // The count of completed backups included in this graph. + CompletedBackupCount *string `json:"completedBackupCount,omitempty" xmlrpc:"completedBackupCount,omitempty"` + + // The count of conflicted backups included in this graph. + ConflictBackupCount *string `json:"conflictBackupCount,omitempty" xmlrpc:"conflictBackupCount,omitempty"` + + // The maximum date included in this graph. + EndDate *Time `json:"endDate,omitempty" xmlrpc:"endDate,omitempty"` + + // The count of failed backups included in this graph. + FailedBackupCount *string `json:"failedBackupCount,omitempty" xmlrpc:"failedBackupCount,omitempty"` + + // Error message encountered during graphing + GraphError *string `json:"graphError,omitempty" xmlrpc:"graphError,omitempty"` + + // The raw PNG binary data to be displayed once the graph is drawn. + GraphImage *[]byte `json:"graphImage,omitempty" xmlrpc:"graphImage,omitempty"` + + // The average of hardware uptime included in this graph. + HardwareUptime *string `json:"hardwareUptime,omitempty" xmlrpc:"hardwareUptime,omitempty"` + + // The inbound bandwidth usage shown in this graph. + InboundUsage *string `json:"inboundUsage,omitempty" xmlrpc:"inboundUsage,omitempty"` + + // The count of open tickets included in this graph. + OpenTickets *string `json:"openTickets,omitempty" xmlrpc:"openTickets,omitempty"` + + // The outbound bandwidth usage shown in this graph. + OutboundUsage *string `json:"outboundUsage,omitempty" xmlrpc:"outboundUsage,omitempty"` + + // The count of tickets included in this graph. + PendingCustomerResponseTicketCount *string `json:"pendingCustomerResponseTicketCount,omitempty" xmlrpc:"pendingCustomerResponseTicketCount,omitempty"` + + // The minimum date included in this graph. + StartDate *Time `json:"startDate,omitempty" xmlrpc:"startDate,omitempty"` + + // The average of url uptime included in this graph. + UrlUptime *string `json:"urlUptime,omitempty" xmlrpc:"urlUptime,omitempty"` + + // The count of tickets included in this graph. + WaitingEmployeeResponseTicketCount *string `json:"waitingEmployeeResponseTicketCount,omitempty" xmlrpc:"waitingEmployeeResponseTicketCount,omitempty"` +} + +// Historical Summary Container for account resource details +type Container_Account_Historical_Summary struct { + Entity + + // Array of server uptime detail containers + Details []Container_Account_Historical_Summary_Detail `json:"details,omitempty" xmlrpc:"details,omitempty"` + + // The maximum date included in the summary. + EndDate *Time `json:"endDate,omitempty" xmlrpc:"endDate,omitempty"` + + // The minimum date included in the summary. + StartDate *Time `json:"startDate,omitempty" xmlrpc:"startDate,omitempty"` +} + +// Historical Summary Details Container for a resource's data +type Container_Account_Historical_Summary_Detail struct { + Entity + + // The maximum date included in the detail. + EndDate *Time `json:"endDate,omitempty" xmlrpc:"endDate,omitempty"` + + // The minimum date included in the detail. + StartDate *Time `json:"startDate,omitempty" xmlrpc:"startDate,omitempty"` +} + +// Historical Summary Details Container for a host resource uptime +type Container_Account_Historical_Summary_Detail_Uptime struct { + Container_Account_Historical_Summary_Detail + + // The hardware for uptime details. + CloudComputingInstance *Virtual_Guest `json:"cloudComputingInstance,omitempty" xmlrpc:"cloudComputingInstance,omitempty"` + + // The configuration value for the detail's resource. + ConfigurationValue *Monitoring_Agent_Configuration_Value `json:"configurationValue,omitempty" xmlrpc:"configurationValue,omitempty"` + + // The data associated with a host uptime details. + Data []Metric_Tracking_Object_Data `json:"data,omitempty" xmlrpc:"data,omitempty"` + + // The hardware for uptime details. + Hardware *Hardware `json:"hardware,omitempty" xmlrpc:"hardware,omitempty"` +} + +// Historical Summary Container for account host's resource uptime details +type Container_Account_Historical_Summary_Uptime struct { + Container_Account_Historical_Summary +} + +// no documentation yet +type Container_Account_Payment_Method_CreditCard struct { + Entity + + // no documentation yet + Address1 *string `json:"address1,omitempty" xmlrpc:"address1,omitempty"` + + // no documentation yet + Address2 *string `json:"address2,omitempty" xmlrpc:"address2,omitempty"` + + // no documentation yet + City *string `json:"city,omitempty" xmlrpc:"city,omitempty"` + + // no documentation yet + Country *string `json:"country,omitempty" xmlrpc:"country,omitempty"` + + // no documentation yet + CurrencyShortName *string `json:"currencyShortName,omitempty" xmlrpc:"currencyShortName,omitempty"` + + // no documentation yet + CybersourceAssignedCardType *string `json:"cybersourceAssignedCardType,omitempty" xmlrpc:"cybersourceAssignedCardType,omitempty"` + + // no documentation yet + ExpireMonth *string `json:"expireMonth,omitempty" xmlrpc:"expireMonth,omitempty"` + + // no documentation yet + ExpireYear *string `json:"expireYear,omitempty" xmlrpc:"expireYear,omitempty"` + + // no documentation yet + FirstName *string `json:"firstName,omitempty" xmlrpc:"firstName,omitempty"` + + // no documentation yet + LastFourDigits *string `json:"lastFourDigits,omitempty" xmlrpc:"lastFourDigits,omitempty"` + + // no documentation yet + LastName *string `json:"lastName,omitempty" xmlrpc:"lastName,omitempty"` + + // no documentation yet + Nickname *string `json:"nickname,omitempty" xmlrpc:"nickname,omitempty"` + + // no documentation yet + PaymentMethodRoleName *string `json:"paymentMethodRoleName,omitempty" xmlrpc:"paymentMethodRoleName,omitempty"` + + // no documentation yet + PaymentTypeId *string `json:"paymentTypeId,omitempty" xmlrpc:"paymentTypeId,omitempty"` + + // no documentation yet + PaymentTypeName *string `json:"paymentTypeName,omitempty" xmlrpc:"paymentTypeName,omitempty"` + + // no documentation yet + PostalCode *string `json:"postalCode,omitempty" xmlrpc:"postalCode,omitempty"` + + // no documentation yet + State *string `json:"state,omitempty" xmlrpc:"state,omitempty"` +} + +// The SoftLayer_Container_Authentication_Request_Common data type contains common information for requests to the getPortalLogin API. This is an abstract class that serves as a base that more specialized classes will derive from. For example, a request class specific to SoftLayer Native IMS Login (username and password). +type Container_Authentication_Request_Common struct { + Container_Authentication_Request_Contract + + // The answer to your security question. + SecurityQuestionAnswer *string `json:"securityQuestionAnswer,omitempty" xmlrpc:"securityQuestionAnswer,omitempty"` + + // A security question you wish to answer when authenticating to the SoftLayer customer portal. This parameter isn't required if no security questions are set on your portal account or if your account is configured to not require answering a security account upon login. + SecurityQuestionId *int `json:"securityQuestionId,omitempty" xmlrpc:"securityQuestionId,omitempty"` +} + +// The SoftLayer_Container_Authentication_Request_Contract provides a common set of operations for implementing classes. +type Container_Authentication_Request_Contract struct { + Entity +} + +// The SoftLayer_Container_Authentication_Request_Native data type contains information for requests to the getPortalLogin API. This class is specific to the SoftLayer Native login (username/password). The request information will be verified to ensure it is valid, and then there will be an attempt to obtain a portal login token in authenticating the user with the provided information. +type Container_Authentication_Request_Native struct { + Container_Authentication_Request_Common + + // Your SoftLayer customer portal user's portal password. + Password *string `json:"password,omitempty" xmlrpc:"password,omitempty"` + + // The username you wish to authenticate to the SoftLayer customer portal with. + Username *string `json:"username,omitempty" xmlrpc:"username,omitempty"` +} + +// The SoftLayer_Container_Authentication_Request_Native_External data type contains information for requests to the getPortalLogin API. This class serves as a base class for more specialized external authentication classes to the SoftLayer Native login (username/password). +type Container_Authentication_Request_Native_External struct { + Container_Authentication_Request_Native +} + +// The SoftLayer_Container_Authentication_Request_Native_External_Totp data type contains information for requests to the getPortalLogin API. This class provides information to allow the user to submit a request to the native SoftLayer (username/password) login service for a portal login token, as well as submitting a request to the TOTP 2 factor authentication service. +type Container_Authentication_Request_Native_External_Totp struct { + Container_Authentication_Request_Native_External + + // no documentation yet + SecondSecurityCode *string `json:"secondSecurityCode,omitempty" xmlrpc:"secondSecurityCode,omitempty"` + + // no documentation yet + SecurityCode *string `json:"securityCode,omitempty" xmlrpc:"securityCode,omitempty"` + + // no documentation yet + Vendor *string `json:"vendor,omitempty" xmlrpc:"vendor,omitempty"` +} + +// The SoftLayer_Container_Authentication_Request_Native_External_Verisign data type contains information for requests to the getPortalLogin API. This class provides information to allow the user to submit a request to the native SoftLayer (username/password) login service for a portal login token, as well as submitting a request to the Verisign 2 factor authentication service. +type Container_Authentication_Request_Native_External_Verisign struct { + Container_Authentication_Request_Native_External + + // no documentation yet + SecondSecurityCode *string `json:"secondSecurityCode,omitempty" xmlrpc:"secondSecurityCode,omitempty"` + + // no documentation yet + SecurityCode *string `json:"securityCode,omitempty" xmlrpc:"securityCode,omitempty"` + + // no documentation yet + Vendor *string `json:"vendor,omitempty" xmlrpc:"vendor,omitempty"` +} + +// The SoftLayer_Container_Authentication_Request_OpenIdConnect data type contains information for requests to the getPortalLogin API. This class is specific to the SoftLayer Cloud Token login. The request information will be verified to ensure it is valid, and then there will be an attempt to obtain a portal login token in authenticating the user with the provided information. +type Container_Authentication_Request_OpenIdConnect struct { + Container_Authentication_Request_Common + + // no documentation yet + OpenIdConnectAccessToken *string `json:"openIdConnectAccessToken,omitempty" xmlrpc:"openIdConnectAccessToken,omitempty"` + + // no documentation yet + OpenIdConnectAccountId *int `json:"openIdConnectAccountId,omitempty" xmlrpc:"openIdConnectAccountId,omitempty"` + + // no documentation yet + OpenIdConnectProvider *string `json:"openIdConnectProvider,omitempty" xmlrpc:"openIdConnectProvider,omitempty"` +} + +// The SoftLayer_Container_Authentication_Request_OpenIdConnect_External data type contains information for requests to the getPortalLogin API. This class serves as a base class for more specialized external authentication classes to the SoftLayer OpenIdConnect login service. +type Container_Authentication_Request_OpenIdConnect_External struct { + Container_Authentication_Request_OpenIdConnect +} + +// The SoftLayer_Container_Authentication_Request_OpenIdConnect_External_Totp data type contains information for requests to the getPortalLogin API. This class provides information to allow the user to submit a request to the SoftLayer OpenIdConnect (token) login service for a portal login token, as well as submitting a request to the TOTP 2 factor authentication service. +type Container_Authentication_Request_OpenIdConnect_External_Totp struct { + Container_Authentication_Request_OpenIdConnect_External + + // no documentation yet + SecondSecurityCode *string `json:"secondSecurityCode,omitempty" xmlrpc:"secondSecurityCode,omitempty"` + + // no documentation yet + SecurityCode *string `json:"securityCode,omitempty" xmlrpc:"securityCode,omitempty"` + + // no documentation yet + Vendor *string `json:"vendor,omitempty" xmlrpc:"vendor,omitempty"` +} + +// The SoftLayer_Container_Authentication_Request_OpenIdConnect_External_Verisign data type contains information for requests to the getPortalLogin API. This class provides information to allow the user to submit a request to the SoftLayer OpenIdConnect (token) login service for a portal login token, as well as submitting a request to the Verisign 2 factor authentication service. +type Container_Authentication_Request_OpenIdConnect_External_Verisign struct { + Container_Authentication_Request_OpenIdConnect_External + + // no documentation yet + SecondSecurityCode *string `json:"secondSecurityCode,omitempty" xmlrpc:"secondSecurityCode,omitempty"` + + // no documentation yet + SecurityCode *int `json:"securityCode,omitempty" xmlrpc:"securityCode,omitempty"` + + // no documentation yet + Vendor *string `json:"vendor,omitempty" xmlrpc:"vendor,omitempty"` +} + +// The SoftLayer_Container_Authentication_Response_2FactorAuthenticationNeeded data type contains information for specific responses from the getPortalLogin API. This class is indicative of a request that is missing the appropriate 2FA information. +type Container_Authentication_Response_2FactorAuthenticationNeeded struct { + Container_Authentication_Response_Common + + // no documentation yet + AdditionalData *Container_Authentication_Response_Common `json:"additionalData,omitempty" xmlrpc:"additionalData,omitempty"` + + // no documentation yet + StatusKeyName *string `json:"statusKeyName,omitempty" xmlrpc:"statusKeyName,omitempty"` +} + +// The SoftLayer_Container_Authentication_Response_Account data type contains account information for responses from the getPortalLogin API. +type Container_Authentication_Response_Account struct { + Entity + + // no documentation yet + AccountCompanyName *string `json:"accountCompanyName,omitempty" xmlrpc:"accountCompanyName,omitempty"` + + // no documentation yet + AccountCountry *string `json:"accountCountry,omitempty" xmlrpc:"accountCountry,omitempty"` + + // no documentation yet + AccountId *int `json:"accountId,omitempty" xmlrpc:"accountId,omitempty"` + + // no documentation yet + AccountStatusName *string `json:"accountStatusName,omitempty" xmlrpc:"accountStatusName,omitempty"` + + // no documentation yet + BluemixAccountId *string `json:"bluemixAccountId,omitempty" xmlrpc:"bluemixAccountId,omitempty"` + + // no documentation yet + CreateDate *Time `json:"createDate,omitempty" xmlrpc:"createDate,omitempty"` + + // no documentation yet + DefaultAccount *bool `json:"defaultAccount,omitempty" xmlrpc:"defaultAccount,omitempty"` + + // no documentation yet + IsMasterUserFlag *bool `json:"isMasterUserFlag,omitempty" xmlrpc:"isMasterUserFlag,omitempty"` + + // no documentation yet + ModifyDate *Time `json:"modifyDate,omitempty" xmlrpc:"modifyDate,omitempty"` + + // no documentation yet + PhoneFactorExternalAuthenticationRequired *bool `json:"phoneFactorExternalAuthenticationRequired,omitempty" xmlrpc:"phoneFactorExternalAuthenticationRequired,omitempty"` + + // no documentation yet + SecurityQuestionRequired *bool `json:"securityQuestionRequired,omitempty" xmlrpc:"securityQuestionRequired,omitempty"` + + // no documentation yet + TotpExternalAuthenticationRequired *bool `json:"totpExternalAuthenticationRequired,omitempty" xmlrpc:"totpExternalAuthenticationRequired,omitempty"` + + // no documentation yet + UserId *int `json:"userId,omitempty" xmlrpc:"userId,omitempty"` + + // no documentation yet + VerisignExternalAuthenticationRequired *bool `json:"verisignExternalAuthenticationRequired,omitempty" xmlrpc:"verisignExternalAuthenticationRequired,omitempty"` +} + +// The SoftLayer_Container_Authentication_Response_AccountIdMissing data type contains information for specific responses from the getPortalLogin API. This class is indicative of a request that is missing the account id. +type Container_Authentication_Response_AccountIdMissing struct { + Container_Authentication_Response_Common + + // no documentation yet + StatusKeyName *string `json:"statusKeyName,omitempty" xmlrpc:"statusKeyName,omitempty"` +} + +// The SoftLayer_Container_Authentication_Response_Common data type contains common information for responses from the getPortalLogin API. This is an abstract class that serves as a base that more specialized classes will derive from. For example, a response class that is specific to a successful response from the getPortalLogin API. +type Container_Authentication_Response_Common struct { + Entity + + // The list of linked accounts for the authenticated SoftLayer customer portal user. + Accounts []Container_Authentication_Response_Account `json:"accounts,omitempty" xmlrpc:"accounts,omitempty"` +} + +// The SoftLayer_Container_Authentication_Response_LOGIN_FAILED data type contains information for specific responses from the getPortalLogin API. This class is indicative of a request where there was an inability to login based on the information that was provided. +type Container_Authentication_Response_LoginFailed struct { + Container_Authentication_Response_Common + + // no documentation yet + ErrorMessage *string `json:"errorMessage,omitempty" xmlrpc:"errorMessage,omitempty"` + + // no documentation yet + StatusKeyName *string `json:"statusKeyName,omitempty" xmlrpc:"statusKeyName,omitempty"` +} + +// The SoftLayer_Container_Authentication_Response_SUCCESS data type contains information for specific responses from the getPortalLogin API. This class is indicative of a request that was successful in obtaining a portal login token from the getPortalLogin API. +type Container_Authentication_Response_Success struct { + Container_Authentication_Response_Common + + // no documentation yet + StatusKeyName *string `json:"statusKeyName,omitempty" xmlrpc:"statusKeyName,omitempty"` + + // The token for interacting with the SoftLayer customer portal. + Token *Container_User_Authentication_Token `json:"token,omitempty" xmlrpc:"token,omitempty"` +} + +// The SoftLayer_Container_Auxiliary_Network_Status_Reading data type contains information relating to an object being monitored from outside the SoftLayer network. It is primarily used to check the status of our edge routers from multiple locations around the world. +type Container_Auxiliary_Network_Status_Reading struct { + Entity + + // Average packet round-trip time. + AveragePing *Float64 `json:"averagePing,omitempty" xmlrpc:"averagePing,omitempty"` + + // Number of failures since the target was last detected to be working properly. + Fails *int `json:"fails,omitempty" xmlrpc:"fails,omitempty"` + + // Monitoring frequency in minutes. + Frequency *int `json:"frequency,omitempty" xmlrpc:"frequency,omitempty"` + + // The target babel. + Label *string `json:"label,omitempty" xmlrpc:"label,omitempty"` + + // Last check date and time. + LastCheckDate *Time `json:"lastCheckDate,omitempty" xmlrpc:"lastCheckDate,omitempty"` + + // Date and time of the last problem detected. + LastDownDate *Time `json:"lastDownDate,omitempty" xmlrpc:"lastDownDate,omitempty"` + + // The total response time in seconds calculated during the last check. + Latency *Float64 `json:"latency,omitempty" xmlrpc:"latency,omitempty"` + + // The monitoring location name. + Location *string `json:"location,omitempty" xmlrpc:"location,omitempty"` + + // Maximum packet round-trip time. + MaximumPing *Float64 `json:"maximumPing,omitempty" xmlrpc:"maximumPing,omitempty"` + + // Minimum packet round-trip time. + MinimumPing *Float64 `json:"minimumPing,omitempty" xmlrpc:"minimumPing,omitempty"` + + // Packet loss percentage. + PingLoss *Float64 `json:"pingLoss,omitempty" xmlrpc:"pingLoss,omitempty"` + + // The date monitoring first began + StartDate *Time `json:"startDate,omitempty" xmlrpc:"startDate,omitempty"` + + // Status Code - one of UP, Down, Test pending. + StatusCode *string `json:"statusCode,omitempty" xmlrpc:"statusCode,omitempty"` + + // The status message from the last effective check. + StatusMessage *string `json:"statusMessage,omitempty" xmlrpc:"statusMessage,omitempty"` + + // The target object. + Target *string `json:"target,omitempty" xmlrpc:"target,omitempty"` + + // A letter indicating the target type. + TargetType *string `json:"targetType,omitempty" xmlrpc:"targetType,omitempty"` +} + +// SoftLayer_Container_Bandwidth_GraphInputs models a single inbound object for a given bandwidth graph. +type Container_Bandwidth_GraphInputs struct { + Entity + + // This is a unix timestamp that represents the stop date/time for a graph. + EndDate *Time `json:"endDate,omitempty" xmlrpc:"endDate,omitempty"` + + // The front-end or back-end network uplink interface associated with this server. + NetworkInterfaceId *int `json:"networkInterfaceId,omitempty" xmlrpc:"networkInterfaceId,omitempty"` + + // * + Pod *int `json:"pod,omitempty" xmlrpc:"pod,omitempty"` + + // This is a human readable name for the server or rack being graphed. + ServerName *string `json:"serverName,omitempty" xmlrpc:"serverName,omitempty"` + + // This is a unix timestamp that represents the begin date/time for a graph. + StartDate *Time `json:"startDate,omitempty" xmlrpc:"startDate,omitempty"` +} + +// SoftLayer_Container_Bandwidth_GraphOutputs models a single outbound object for a given bandwidth graph. +type Container_Bandwidth_GraphOutputs struct { + Entity + + // The raw PNG binary data to be displayed once the graph is drawn. + GraphImage *[]byte `json:"graphImage,omitempty" xmlrpc:"graphImage,omitempty"` + + // The title that ended up being displayed as part of the graph image. + GraphTitle *string `json:"graphTitle,omitempty" xmlrpc:"graphTitle,omitempty"` + + // The maximum date included in this graph. + MaxEndDate *Time `json:"maxEndDate,omitempty" xmlrpc:"maxEndDate,omitempty"` + + // The minimum date included in this graph. + MinStartDate *Time `json:"minStartDate,omitempty" xmlrpc:"minStartDate,omitempty"` +} + +// SoftLayer_Container_Bandwidth_GraphOutputs models an individual bandwidth graph image and certain details about that graph image. +type Container_Bandwidth_GraphOutputsExtended struct { + Entity + + // The raw PNG binary data of a bandwidth graph image. + GraphImage *[]byte `json:"graphImage,omitempty" xmlrpc:"graphImage,omitempty"` + + // A bandwidth graph's title. + GraphTitle *string `json:"graphTitle,omitempty" xmlrpc:"graphTitle,omitempty"` + + // The amount of inbound traffic reported on a bandwidth graph image. + InBoundTotalBytes *uint `json:"inBoundTotalBytes,omitempty" xmlrpc:"inBoundTotalBytes,omitempty"` + + // The ending date of the data represented in a bandwidth graph. + MaxEndDate *Time `json:"maxEndDate,omitempty" xmlrpc:"maxEndDate,omitempty"` + + // The beginning date of the data represented in a bandwidth graph. + MinStartDate *Time `json:"minStartDate,omitempty" xmlrpc:"minStartDate,omitempty"` + + // The amount of outbound traffic reported on a bandwidth graph image. + OutBoundTotalBytes *uint `json:"outBoundTotalBytes,omitempty" xmlrpc:"outBoundTotalBytes,omitempty"` +} + +// SoftLayer_Container_Bandwidth_Projection models projected bandwidth use over a time range. +type Container_Bandwidth_Projection struct { + Entity + + // Bandwidth limit for this hardware. + AllowedUsage *string `json:"allowedUsage,omitempty" xmlrpc:"allowedUsage,omitempty"` + + // Estimated bandwidth usage so far this billing cycle. + EstimatedUsage *string `json:"estimatedUsage,omitempty" xmlrpc:"estimatedUsage,omitempty"` + + // Hardware ID of server to monitor. + HardwareId *int `json:"hardwareId,omitempty" xmlrpc:"hardwareId,omitempty"` + + // Projected usage for this hardware based on previous usage this billing cycle. + ProjectedUsage *string `json:"projectedUsage,omitempty" xmlrpc:"projectedUsage,omitempty"` + + // the text name of the server being monitored. + ServerName *string `json:"serverName,omitempty" xmlrpc:"serverName,omitempty"` + + // The minimum date included in this list. + StartDate *Time `json:"startDate,omitempty" xmlrpc:"startDate,omitempty"` +} + +// no documentation yet +type Container_Billing_Currency_Country struct { + Entity + + // no documentation yet + AvailableCurrencies []Billing_Currency `json:"availableCurrencies,omitempty" xmlrpc:"availableCurrencies,omitempty"` + + // no documentation yet + Country *Locale_Country `json:"country,omitempty" xmlrpc:"country,omitempty"` +} + +// no documentation yet +type Container_Billing_Currency_Format struct { + Entity + + // no documentation yet + Currency *string `json:"currency,omitempty" xmlrpc:"currency,omitempty"` + + // no documentation yet + Display *int `json:"display,omitempty" xmlrpc:"display,omitempty"` + + // no documentation yet + Format *string `json:"format,omitempty" xmlrpc:"format,omitempty"` + + // no documentation yet + Locale *string `json:"locale,omitempty" xmlrpc:"locale,omitempty"` + + // no documentation yet + Name *string `json:"name,omitempty" xmlrpc:"name,omitempty"` + + // no documentation yet + Position *int `json:"position,omitempty" xmlrpc:"position,omitempty"` + + // no documentation yet + Precision *int `json:"precision,omitempty" xmlrpc:"precision,omitempty"` + + // no documentation yet + Script *string `json:"script,omitempty" xmlrpc:"script,omitempty"` + + // no documentation yet + Service *string `json:"service,omitempty" xmlrpc:"service,omitempty"` + + // no documentation yet + Symbol *string `json:"symbol,omitempty" xmlrpc:"symbol,omitempty"` + + // no documentation yet + Tag *string `json:"tag,omitempty" xmlrpc:"tag,omitempty"` + + // no documentation yet + Value *Float64 `json:"value,omitempty" xmlrpc:"value,omitempty"` +} + +// no documentation yet +type Container_Billing_Info_Ach struct { + Entity + + // no documentation yet + AccountNumber *string `json:"accountNumber,omitempty" xmlrpc:"accountNumber,omitempty"` + + // no documentation yet + AccountType *string `json:"accountType,omitempty" xmlrpc:"accountType,omitempty"` + + // no documentation yet + BankTransitNumber *string `json:"bankTransitNumber,omitempty" xmlrpc:"bankTransitNumber,omitempty"` + + // no documentation yet + City *string `json:"city,omitempty" xmlrpc:"city,omitempty"` + + // no documentation yet + Country *string `json:"country,omitempty" xmlrpc:"country,omitempty"` + + // no documentation yet + FederalTaxId *string `json:"federalTaxId,omitempty" xmlrpc:"federalTaxId,omitempty"` + + // no documentation yet + FirstName *string `json:"firstName,omitempty" xmlrpc:"firstName,omitempty"` + + // no documentation yet + LastName *string `json:"lastName,omitempty" xmlrpc:"lastName,omitempty"` + + // no documentation yet + PhoneNumber *string `json:"phoneNumber,omitempty" xmlrpc:"phoneNumber,omitempty"` + + // no documentation yet + PostalCode *string `json:"postalCode,omitempty" xmlrpc:"postalCode,omitempty"` + + // no documentation yet + State *string `json:"state,omitempty" xmlrpc:"state,omitempty"` + + // no documentation yet + Street1 *string `json:"street1,omitempty" xmlrpc:"street1,omitempty"` + + // no documentation yet + Street2 *string `json:"street2,omitempty" xmlrpc:"street2,omitempty"` +} + +// This container is used to provide all the options for [[SoftLayer_Billing_Invoice/emailInvoices|emailInvoices]] in order to have the necessary invoices generated and links sent to the user's email. +type Container_Billing_Invoice_Email struct { + Entity + + // Excel Invoices to email + ExcelInvoiceIds []int `json:"excelInvoiceIds,omitempty" xmlrpc:"excelInvoiceIds,omitempty"` + + // PDF Invoice Details to email + PdfDetailedInvoiceIds []int `json:"pdfDetailedInvoiceIds,omitempty" xmlrpc:"pdfDetailedInvoiceIds,omitempty"` + + // PDF Invoices to email + PdfInvoiceIds []int `json:"pdfInvoiceIds,omitempty" xmlrpc:"pdfInvoiceIds,omitempty"` + + // The type of Invoices to be emailed [current|next]. If next is selected, the account id will be used. + Type *string `json:"type,omitempty" xmlrpc:"type,omitempty"` +} + +// SoftLayer_Container_Billing_Order_Status models an order status. +type Container_Billing_Order_Status struct { + Entity + + // The description of the status. + Description *string `json:"description,omitempty" xmlrpc:"description,omitempty"` + + // The keyname of the status. + Status *string `json:"status,omitempty" xmlrpc:"status,omitempty"` +} + +// Contains user information used to request a manual Catalyst enrollment. +type Container_Catalyst_ManualEnrollmentRequest struct { + Entity + + // Applicant's email address + CustomerEmail *string `json:"customerEmail,omitempty" xmlrpc:"customerEmail,omitempty"` + + // Applicant's first and last name + CustomerName *string `json:"customerName,omitempty" xmlrpc:"customerName,omitempty"` + + // Name of applicant's startup company + StartupName *string `json:"startupName,omitempty" xmlrpc:"startupName,omitempty"` + + // Flag indicating whether (true) or not (false) and applicant is + VentureAffiliationFlag *bool `json:"ventureAffiliationFlag,omitempty" xmlrpc:"ventureAffiliationFlag,omitempty"` + + // Name of the venture capital fund, if any, applicant is affiliated with + VentureFundName *string `json:"ventureFundName,omitempty" xmlrpc:"ventureFundName,omitempty"` +} + +// This container is used to hold country locale information. +type Container_Collection_Locale_CountryCode struct { + Entity + + // no documentation yet + LongName *string `json:"longName,omitempty" xmlrpc:"longName,omitempty"` + + // no documentation yet + ShortName *string `json:"shortName,omitempty" xmlrpc:"shortName,omitempty"` + + // no documentation yet + StateCodes []Container_Collection_Locale_StateCode `json:"stateCodes,omitempty" xmlrpc:"stateCodes,omitempty"` +} + +// This container is used to hold information regarding a state or province. +type Container_Collection_Locale_StateCode struct { + Entity + + // no documentation yet + LongName *string `json:"longName,omitempty" xmlrpc:"longName,omitempty"` + + // no documentation yet + ShortName *string `json:"shortName,omitempty" xmlrpc:"shortName,omitempty"` +} + +// no documentation yet +type Container_Disk_Image_Capture_Template struct { + Entity + + // no documentation yet + Description *string `json:"description,omitempty" xmlrpc:"description,omitempty"` + + // no documentation yet + Name *string `json:"name,omitempty" xmlrpc:"name,omitempty"` + + // no documentation yet + Summary *string `json:"summary,omitempty" xmlrpc:"summary,omitempty"` + + // no documentation yet + Volumes []Container_Disk_Image_Capture_Template_Volume `json:"volumes,omitempty" xmlrpc:"volumes,omitempty"` +} + +// no documentation yet +type Container_Disk_Image_Capture_Template_Volume struct { + Entity + + // no documentation yet + Name *string `json:"name,omitempty" xmlrpc:"name,omitempty"` + + // no documentation yet + Partitions []Container_Disk_Image_Capture_Template_Volume_Partition `json:"partitions,omitempty" xmlrpc:"partitions,omitempty"` +} + +// no documentation yet +type Container_Disk_Image_Capture_Template_Volume_Partition struct { + Entity + + // no documentation yet + Name *string `json:"name,omitempty" xmlrpc:"name,omitempty"` +} + +// Contact information container for domain registration +type Container_Dns_Domain_Registration_Contact struct { + Entity + + // The street address of the contact. + Address1 *string `json:"address1,omitempty" xmlrpc:"address1,omitempty"` + + // The second line in the address of the contact. + Address2 *string `json:"address2,omitempty" xmlrpc:"address2,omitempty"` + + // The third line in the address of the contact. + Address3 *string `json:"address3,omitempty" xmlrpc:"address3,omitempty"` + + // The city of the contact. + City *string `json:"city,omitempty" xmlrpc:"city,omitempty"` + + // The 2-character Country code. (i.e. US) + Country *string `json:"country,omitempty" xmlrpc:"country,omitempty"` + + // The email address of the contact. + Email *string `json:"email,omitempty" xmlrpc:"email,omitempty"` + + // The fax number of the contact. + Fax *string `json:"fax,omitempty" xmlrpc:"fax,omitempty"` + + // The first name of the contact. + FirstName *string `json:"firstName,omitempty" xmlrpc:"firstName,omitempty"` + + // The last name of the contact. + LastName *string `json:"lastName,omitempty" xmlrpc:"lastName,omitempty"` + + // The organization name of the contact. + OrganizationName *string `json:"organizationName,omitempty" xmlrpc:"organizationName,omitempty"` + + // The phone number of the contact. + Phone *string `json:"phone,omitempty" xmlrpc:"phone,omitempty"` + + // The postal code of the contact. + PostalCode *string `json:"postalCode,omitempty" xmlrpc:"postalCode,omitempty"` + + // The state of the contact. + State *string `json:"state,omitempty" xmlrpc:"state,omitempty"` + + // The type of contact. The following are the valid types of contacts: + // * admin + // * owner + // * billing + // * tech + Type *string `json:"type,omitempty" xmlrpc:"type,omitempty"` +} + +// This container data type contains extended attributes information for a domain of country code TLD. +type Container_Dns_Domain_Registration_ExtendedAttribute struct { + Entity + + // Indicates if this is a child of another extended attribute. + ChildFlag *bool `json:"childFlag,omitempty" xmlrpc:"childFlag,omitempty"` + + // The description of an extended attribute. + Description *string `json:"description,omitempty" xmlrpc:"description,omitempty"` + + // The name of an extended attribute. + Name *string `json:"name,omitempty" xmlrpc:"name,omitempty"` + + // The collection of options for an extended attribute. + Options []Container_Dns_Domain_Registration_ExtendedAttribute_Option `json:"options,omitempty" xmlrpc:"options,omitempty"` + + // Indicates if extended attribute is required. + RequiredFlag *int `json:"requiredFlag,omitempty" xmlrpc:"requiredFlag,omitempty"` + + // User defined indicates that the value is required from outside sources. + UserDefinedFlag *bool `json:"userDefinedFlag,omitempty" xmlrpc:"userDefinedFlag,omitempty"` +} + +// This is the data type that may need to be populated to complete registraton for domains that are country code TLD's. +type Container_Dns_Domain_Registration_ExtendedAttribute_Configuration struct { + Entity + + // The extended attribute name. + Name *string `json:"name,omitempty" xmlrpc:"name,omitempty"` + + // The extended attribute option value. + Value *string `json:"value,omitempty" xmlrpc:"value,omitempty"` +} + +// This container data type contains extended attribute options information for a domain of country code TLD. +type Container_Dns_Domain_Registration_ExtendedAttribute_Option struct { + Entity + + // The description of an option. + Description *string `json:"description,omitempty" xmlrpc:"description,omitempty"` + + // Extended Attribute that is required for an option. + RequireExtendedAttributes []Container_Dns_Domain_Registration_ExtendedAttribute_Option_Require `json:"requireExtendedAttributes,omitempty" xmlrpc:"requireExtendedAttributes,omitempty"` + + // The title of an option. + Title *string `json:"title,omitempty" xmlrpc:"title,omitempty"` + + // The value of an option. + Value *string `json:"value,omitempty" xmlrpc:"value,omitempty"` +} + +// This container data type contains the extended attribute name that is required by an extended attribute option. +type Container_Dns_Domain_Registration_ExtendedAttribute_Option_Require struct { + Entity + + // The name of an extended attribute that is required by an extended attribute option. + Name *string `json:"name,omitempty" xmlrpc:"name,omitempty"` +} + +// Information container for domain registration +type Container_Dns_Domain_Registration_Information struct { + Entity + + // The information of the registered domain. + Contacts []Container_Dns_Domain_Registration_Contact `json:"contacts,omitempty" xmlrpc:"contacts,omitempty"` + + // The date that a domain is set to expire. + ExpireDate *Time `json:"expireDate,omitempty" xmlrpc:"expireDate,omitempty"` + + // The list of nameservers for the domain. + Nameservers []Container_Dns_Domain_Registration_Nameserver `json:"nameservers,omitempty" xmlrpc:"nameservers,omitempty"` + + // no documentation yet + RegistryCreateDate *Time `json:"registryCreateDate,omitempty" xmlrpc:"registryCreateDate,omitempty"` + + // no documentation yet + RegistryExpireDate *Time `json:"registryExpireDate,omitempty" xmlrpc:"registryExpireDate,omitempty"` + + // no documentation yet + RegistryUpdateDate *Time `json:"registryUpdateDate,omitempty" xmlrpc:"registryUpdateDate,omitempty"` +} + +// no documentation yet +type Container_Dns_Domain_Registration_List struct { + Entity + + // The domain name. + DomainName *string `json:"domainName,omitempty" xmlrpc:"domainName,omitempty"` + + // Three-character language tag for the IDN domain that you're trying to register. This is only required for IDN domains. + EncodingType *string `json:"encodingType,omitempty" xmlrpc:"encodingType,omitempty"` + + // Data required by the Registry for some country code top level domains (i.e. example.us). + // + // In order to determine if a domain requires extended attributes, use [[SoftLayer_Dns_Domain_Registration::getExtendedAttributes|domain registration]] service. + ExtendedAttributeConfiguration []Container_Dns_Domain_Registration_ExtendedAttribute_Configuration `json:"extendedAttributeConfiguration,omitempty" xmlrpc:"extendedAttributeConfiguration,omitempty"` + + // The length of the registration period in years. Valid values are 1 – 10. + RegistrationPeriod *int `json:"registrationPeriod,omitempty" xmlrpc:"registrationPeriod,omitempty"` +} + +// Lookup domain container for domain registration +type Container_Dns_Domain_Registration_Lookup struct { + Entity + + // The list of available and taken domain names. + Items []Container_Dns_Domain_Registration_Lookup_Items `json:"items,omitempty" xmlrpc:"items,omitempty"` +} + +// Lookup items container for domain registration +type Container_Dns_Domain_Registration_Lookup_Items struct { + Entity + + // The domain name. + DomainName *string `json:"domainName,omitempty" xmlrpc:"domainName,omitempty"` + + // The status of the domain name if available and can be registered. + Status *string `json:"status,omitempty" xmlrpc:"status,omitempty"` +} + +// Nameserver container for domain registration +type Container_Dns_Domain_Registration_Nameserver struct { + Entity + + // The list of fully qualified names of the nameserver. + Nameservers []Container_Dns_Domain_Registration_Nameserver_List `json:"nameservers,omitempty" xmlrpc:"nameservers,omitempty"` +} + +// Nameservers list container for domain registration +type Container_Dns_Domain_Registration_Nameserver_List struct { + Entity + + // The IPv4 address of the nameserver. + Ipv4Address *string `json:"ipv4Address,omitempty" xmlrpc:"ipv4Address,omitempty"` + + // The IPv6 address of the nameserver. + Ipv6Address *string `json:"ipv6Address,omitempty" xmlrpc:"ipv6Address,omitempty"` + + // The fully qualified name of the nameserver + Name *string `json:"name,omitempty" xmlrpc:"name,omitempty"` + + // The sort order of the nameserver + SortOrder *int `json:"sortOrder,omitempty" xmlrpc:"sortOrder,omitempty"` +} + +// no documentation yet +type Container_Dns_Domain_Registration_Registrant_Verification_StatusDetail struct { + Entity + + // The current status of the verification. + Status *Dns_Domain_Registration_Registrant_Verification_Status `json:"status,omitempty" xmlrpc:"status,omitempty"` + + // The adate when the domain will be suspended. + VerificationDeadlineDate *Time `json:"verificationDeadlineDate,omitempty" xmlrpc:"verificationDeadlineDate,omitempty"` +} + +// Transfer Information container for domain registration +type Container_Dns_Domain_Registration_Transfer_Information struct { + Entity + + // The reason why a domain is not transferable. + Reason *string `json:"reason,omitempty" xmlrpc:"reason,omitempty"` + + // The registrant email. + RegistrantEmail *string `json:"registrantEmail,omitempty" xmlrpc:"registrantEmail,omitempty"` + + // The status of the latest transfer on the domain. + Status *string `json:"status,omitempty" xmlrpc:"status,omitempty"` + + // The date and time of the most recent update to the state of the transfer. + TimeStamp *Time `json:"timeStamp,omitempty" xmlrpc:"timeStamp,omitempty"` + + // Indicates if the domain can be transferred. + Transferrable *int `json:"transferrable,omitempty" xmlrpc:"transferrable,omitempty"` +} + +// The SoftLayer_Container_Exception data type represents a SoftLayer_Exception. +type Container_Exception struct { + Entity + + // The SoftLayer_Exception class that the error is. + ExceptionClass *string `json:"exceptionClass,omitempty" xmlrpc:"exceptionClass,omitempty"` + + // The exception message. + ExceptionMessage *string `json:"exceptionMessage,omitempty" xmlrpc:"exceptionMessage,omitempty"` +} + +// no documentation yet +type Container_Graph struct { + Entity + + // base units associated with the graph. + BaseUnit *string `json:"baseUnit,omitempty" xmlrpc:"baseUnit,omitempty"` + + // Graph range end datetime. + EndDatetime *string `json:"endDatetime,omitempty" xmlrpc:"endDatetime,omitempty"` + + // The height of the graph image. + Height *int `json:"height,omitempty" xmlrpc:"height,omitempty"` + + // The graph image. + Image *[]byte `json:"image,omitempty" xmlrpc:"image,omitempty"` + + // The graph interval in seconds. + Interval *int `json:"interval,omitempty" xmlrpc:"interval,omitempty"` + + // Metric types associated with the graph. + Metrics []Container_Metric_Data_Type `json:"metrics,omitempty" xmlrpc:"metrics,omitempty"` + + // Indicator to control whether the graph data is normalized. + NormalizeFlag *[]byte `json:"normalizeFlag,omitempty" xmlrpc:"normalizeFlag,omitempty"` + + // The options used to control the graph appearance. + Options []Container_Graph_Option `json:"options,omitempty" xmlrpc:"options,omitempty"` + + // A collection of graph plots. + Plots []Container_Graph_Plot `json:"plots,omitempty" xmlrpc:"plots,omitempty"` + + // option to not return the image. + ReturnImage *bool `json:"returnImage,omitempty" xmlrpc:"returnImage,omitempty"` + + // Graph range start datetime. + StartDatetime *string `json:"startDatetime,omitempty" xmlrpc:"startDatetime,omitempty"` + + // The name of the template to use; may be null. + Template *string `json:"template,omitempty" xmlrpc:"template,omitempty"` + + // The title of the graph image. + Title *string `json:"title,omitempty" xmlrpc:"title,omitempty"` + + // The width of the graph image. + Width *int `json:"width,omitempty" xmlrpc:"width,omitempty"` +} + +// no documentation yet +type Container_Graph_Option struct { + Entity + + // no documentation yet + Name *string `json:"name,omitempty" xmlrpc:"name,omitempty"` + + // no documentation yet + Value *string `json:"value,omitempty" xmlrpc:"value,omitempty"` +} + +// no documentation yet +type Container_Graph_Plot struct { + Entity + + // no documentation yet + Data []Container_Graph_Plot_Coordinate `json:"data,omitempty" xmlrpc:"data,omitempty"` + + // no documentation yet + Metric *Container_Metric_Data_Type `json:"metric,omitempty" xmlrpc:"metric,omitempty"` + + // no documentation yet + Unit *string `json:"unit,omitempty" xmlrpc:"unit,omitempty"` +} + +// no documentation yet +type Container_Graph_Plot_Coordinate struct { + Entity + + // no documentation yet + XValue *Float64 `json:"xValue,omitempty" xmlrpc:"xValue,omitempty"` + + // no documentation yet + YValue *Float64 `json:"yValue,omitempty" xmlrpc:"yValue,omitempty"` + + // no documentation yet + ZValue *Float64 `json:"zValue,omitempty" xmlrpc:"zValue,omitempty"` +} + +// The hardware configuration container is used to provide configuration options for servers. +// +// Each configuration option will include both an itemPrice and a template. +// +// The itemPrice value will provide hourly and monthly costs (if either are applicable), and a description of the option. +// +// The template will provide a fragment of the request with the properties and values that must be sent when creating a server with the option. +// +// The [[SoftLayer_Hardware/getCreateObjectOptions|getCreateObjectOptions]] method returns this data structure. +// +// +type Container_Hardware_Configuration struct { + Entity + + // + //
+ // Available datacenter options. + // + // + // The datacenter.name value in the template represents which datacenter the server will be provisioned in. + //
+ Datacenters []Container_Hardware_Configuration_Option `json:"datacenters,omitempty" xmlrpc:"datacenters,omitempty"` + + // + //
+ // Available fixed configuration preset options. + // + // + // The fixedConfigurationPreset.keyName value in the template is an identifier for a particular fixed configuration. When provided exactly as shown in the template, that fixed configuration will be used. + // + // + // When providing a fixedConfigurationPreset.keyName while ordering a server the processors and hardDrives configuration options cannot be used. + //
+ FixedConfigurationPresets []Container_Hardware_Configuration_Option `json:"fixedConfigurationPresets,omitempty" xmlrpc:"fixedConfigurationPresets,omitempty"` + + // + //
+ // Available hard drive options. + // + // + // A server will have at least one hard drive. + // + // + // The hardDrives.capacity value in the template represents the size, in gigabytes, of the disk. + //
+ HardDrives []Container_Hardware_Configuration_Option `json:"hardDrives,omitempty" xmlrpc:"hardDrives,omitempty"` + + // + //
+ // Available network component options. + // + // + // The networkComponent.maxSpeed value in the template represents the link speed, in megabits per second, of the network connections for a server. + //
+ NetworkComponents []Container_Hardware_Configuration_Option `json:"networkComponents,omitempty" xmlrpc:"networkComponents,omitempty"` + + // + //
+ // Available operating system options. + // + // + // The operatingSystemReferenceCode value in the template is an identifier for a particular operating system. When provided exactly as shown in the template, that operating system will be used. + // + // + // A reference code is structured as three tokens separated by underscores. The first token represents the product, the second is the version of the product, and the third is whether the OS is 32 or 64bit. + // + // + // When providing an operatingSystemReferenceCode while ordering a server the only token required to match exactly is the product. The version token may be given as 'LATEST', else it will require an exact match as well. When the bits token is not provided, 64 bits will be assumed. + // + // + // Providing the value of 'LATEST' for a version will select the latest release of that product for the operating system. As this may change over time, you should be sure that the release version is irrelevant for your applications. + // + // + // For Windows based operating systems the version will represent both the release version (2008, 2012, etc) and the edition (Standard, Enterprise, etc). For all other operating systems the version will represent the major version (Centos 6, Ubuntu 12, etc) of that operating system, minor versions are represented in few reference codes where they are significant. + //
+ OperatingSystems []Container_Hardware_Configuration_Option `json:"operatingSystems,omitempty" xmlrpc:"operatingSystems,omitempty"` + + // + //
+ // Available processor options. + // + // + // The processorCoreAmount value in the template represents the number of cores allocated to the server. + // The memoryCapacity value in the template represents the amount of memory, in gigabytes, allocated to the server. + //
+ Processors []Container_Hardware_Configuration_Option `json:"processors,omitempty" xmlrpc:"processors,omitempty"` +} + +// An option found within a [[SoftLayer_Container_Hardware_Configuration (type)]] structure. +type Container_Hardware_Configuration_Option struct { + Entity + + // + // Provides hourly and monthly costs (if either are applicable), and a description of the option. + ItemPrice *Product_Item_Price `json:"itemPrice,omitempty" xmlrpc:"itemPrice,omitempty"` + + // + // Provides a description of a fixed configuration preset with monthly and hourly costs. + Preset *Product_Package_Preset `json:"preset,omitempty" xmlrpc:"preset,omitempty"` + + // + // Provides a fragment of the request with the properties and values that must be sent when creating a server with the option. + Template *Hardware `json:"template,omitempty" xmlrpc:"template,omitempty"` +} + +// no documentation yet +type Container_Hardware_MassUpdate struct { + Entity + + // The hardwares updated by the mass update tool + HardwareId *int `json:"hardwareId,omitempty" xmlrpc:"hardwareId,omitempty"` + + // Errors encountered while mass updating hardwares + Message *string `json:"message,omitempty" xmlrpc:"message,omitempty"` + + // The hardwares that failed to update + SuccessFlag *string `json:"successFlag,omitempty" xmlrpc:"successFlag,omitempty"` +} + +// no documentation yet +type Container_Hardware_Pool_Details struct { + Entity + + // no documentation yet + PoolDescription *string `json:"poolDescription,omitempty" xmlrpc:"poolDescription,omitempty"` + + // no documentation yet + PoolKeyName *string `json:"poolKeyName,omitempty" xmlrpc:"poolKeyName,omitempty"` + + // no documentation yet + PoolName *string `json:"poolName,omitempty" xmlrpc:"poolName,omitempty"` + + // no documentation yet + Routers []Container_Hardware_Pool_Details_Router `json:"routers,omitempty" xmlrpc:"routers,omitempty"` + + // no documentation yet + TotalHardware *int `json:"totalHardware,omitempty" xmlrpc:"totalHardware,omitempty"` + + // no documentation yet + TotalInventoryHardware *int `json:"totalInventoryHardware,omitempty" xmlrpc:"totalInventoryHardware,omitempty"` + + // no documentation yet + TotalProvisionedHardware *int `json:"totalProvisionedHardware,omitempty" xmlrpc:"totalProvisionedHardware,omitempty"` + + // no documentation yet + TotalTestedHardware *int `json:"totalTestedHardware,omitempty" xmlrpc:"totalTestedHardware,omitempty"` + + // no documentation yet + TotalTestingHardware *int `json:"totalTestingHardware,omitempty" xmlrpc:"totalTestingHardware,omitempty"` +} + +// no documentation yet +type Container_Hardware_Pool_Details_Router struct { + Entity + + // no documentation yet + RouterId *int `json:"routerId,omitempty" xmlrpc:"routerId,omitempty"` + + // no documentation yet + RouterName *string `json:"routerName,omitempty" xmlrpc:"routerName,omitempty"` + + // no documentation yet + TotalHardware *int `json:"totalHardware,omitempty" xmlrpc:"totalHardware,omitempty"` + + // no documentation yet + TotalInventoryHardware *int `json:"totalInventoryHardware,omitempty" xmlrpc:"totalInventoryHardware,omitempty"` + + // no documentation yet + TotalProvisionedHardware *int `json:"totalProvisionedHardware,omitempty" xmlrpc:"totalProvisionedHardware,omitempty"` + + // no documentation yet + TotalTestedHardware *int `json:"totalTestedHardware,omitempty" xmlrpc:"totalTestedHardware,omitempty"` + + // no documentation yet + TotalTestingHardware *int `json:"totalTestingHardware,omitempty" xmlrpc:"totalTestingHardware,omitempty"` +} + +// The SoftLayer_Container_Hardware_Server_Configuration data type contains information relating to a server's item price information, and hard drive partition information. +type Container_Hardware_Server_Configuration struct { + Entity + + // A flag indicating that the server will be moved into the spare pool after an Operating system reload. + AddToSparePoolAfterOsReload *int `json:"addToSparePoolAfterOsReload,omitempty" xmlrpc:"addToSparePoolAfterOsReload,omitempty"` + + // The customer provision uri will be used to download and execute a customer defined script on the host at the end of provisioning. + CustomProvisionScriptUri *string `json:"customProvisionScriptUri,omitempty" xmlrpc:"customProvisionScriptUri,omitempty"` + + // A flag indicating that the primary drive will be converted to a portable storage volume during an Operating System reload. + DriveRetentionFlag *bool `json:"driveRetentionFlag,omitempty" xmlrpc:"driveRetentionFlag,omitempty"` + + // A flag indicating that all data will be erased from drives during an Operating System reload. + EraseHardDrives *int `json:"eraseHardDrives,omitempty" xmlrpc:"eraseHardDrives,omitempty"` + + // The hard drive partitions that a server can be partitioned with. + HardDrives []Hardware_Component `json:"hardDrives,omitempty" xmlrpc:"hardDrives,omitempty"` + + // An Image Template ID [[SoftLayer_Virtual_Guest_Block_Device_Template_Group]] that will be deployed to the host. If provided no item prices are required. + ImageTemplateId *int `json:"imageTemplateId,omitempty" xmlrpc:"imageTemplateId,omitempty"` + + // The item prices that a server can be configured with. + ItemPrices []Product_Item_Price `json:"itemPrices,omitempty" xmlrpc:"itemPrices,omitempty"` + + // A flag indicating that the provision should use LVM for all logical drives. + LvmFlag *bool `json:"lvmFlag,omitempty" xmlrpc:"lvmFlag,omitempty"` + + // A flag indicating that the remote management cards password will be reset. + ResetIpmiPassword *int `json:"resetIpmiPassword,omitempty" xmlrpc:"resetIpmiPassword,omitempty"` + + // IDs to SoftLayer_Security_Ssh_Key objects on the current account which will be added to the server for authentication. SSH Keys will not be added to servers with Microsoft Windows. + SshKeyIds []int `json:"sshKeyIds,omitempty" xmlrpc:"sshKeyIds,omitempty"` + + // A flag indicating that the BIOS will be updated when installing the operating system. + UpgradeBios *int `json:"upgradeBios,omitempty" xmlrpc:"upgradeBios,omitempty"` + + // A flag indicating that the firmware on all hard drives will be updated when installing the operating system. + UpgradeHardDriveFirmware *int `json:"upgradeHardDriveFirmware,omitempty" xmlrpc:"upgradeHardDriveFirmware,omitempty"` +} + +// The SoftLayer_Container_Hardware_Server_Details data type contains information relating to a server's component information, network information, and software information. +type Container_Hardware_Server_Details struct { + Entity + + // The components that belong to a piece of hardware. + Components []Hardware_Component `json:"components,omitempty" xmlrpc:"components,omitempty"` + + // The network components that belong to a piece of hardware. + NetworkComponents []Network_Component `json:"networkComponents,omitempty" xmlrpc:"networkComponents,omitempty"` + + // The software that belong to a piece of hardware. + Software []Software_Component `json:"software,omitempty" xmlrpc:"software,omitempty"` +} + +// no documentation yet +type Container_Hardware_Server_Request struct { + Entity + + // no documentation yet + HardwareId *int `json:"hardwareId,omitempty" xmlrpc:"hardwareId,omitempty"` + + // no documentation yet + Message *string `json:"message,omitempty" xmlrpc:"message,omitempty"` + + // no documentation yet + SuccessFlag *bool `json:"successFlag,omitempty" xmlrpc:"successFlag,omitempty"` +} + +// SoftLayer_Container_KnowledgeLayer_QuestionAnswer models a single question and answer pair from SoftLayer's KnowledgeLayer knowledge base. SoftLayer's backend network interfaces with the KnowledgeLayer to recommend helpful articles when support tickets are created. +type Container_KnowledgeLayer_QuestionAnswer struct { + Entity + + // The answer to a question asked on the SoftLayer KnowledgeLayer. + Answer *string `json:"answer,omitempty" xmlrpc:"answer,omitempty"` + + // The link to a question asked on the SoftLayer KnowledgeLayer. + Link *string `json:"link,omitempty" xmlrpc:"link,omitempty"` + + // A question asked on the SoftLayer KnowledgeLayer. + Question *string `json:"question,omitempty" xmlrpc:"question,omitempty"` +} + +// no documentation yet +type Container_Message struct { + Entity + + // no documentation yet + Message *string `json:"message,omitempty" xmlrpc:"message,omitempty"` + + // no documentation yet + Type *string `json:"type,omitempty" xmlrpc:"type,omitempty"` +} + +// no documentation yet +type Container_Metric_Data_Type struct { + Entity + + // no documentation yet + KeyName *string `json:"keyName,omitempty" xmlrpc:"keyName,omitempty"` + + // no documentation yet + Name *string `json:"name,omitempty" xmlrpc:"name,omitempty"` + + // no documentation yet + SummaryType *string `json:"summaryType,omitempty" xmlrpc:"summaryType,omitempty"` + + // no documentation yet + Unit *string `json:"unit,omitempty" xmlrpc:"unit,omitempty"` +} + +// SoftLayer_Container_Metric_Tracking_Object_Details This container is a parent class for detailing diverse metrics. +type Container_Metric_Tracking_Object_Details struct { + Entity + + // The name that best describes the metric being collected. + MetricName *string `json:"metricName,omitempty" xmlrpc:"metricName,omitempty"` +} + +// SoftLayer_Container_Metric_Tracking_Object_Summary This container is a parent class for summarizing diverse metrics. +type Container_Metric_Tracking_Object_Summary struct { + Entity + + // The name that best describes the metric being collected. + MetricName *string `json:"metricName,omitempty" xmlrpc:"metricName,omitempty"` +} + +// SoftLayer_Container_Metric_Tracking_Object_Virtual_Host_Details This container details a virtual host's metric data. +type Container_Metric_Tracking_Object_Virtual_Host_Details struct { + Container_Metric_Tracking_Object_Details + + // The day this metric was collected. + Day *Time `json:"day,omitempty" xmlrpc:"day,omitempty"` + + // The maximum number of guests hosted by this platform for the given day. + MaxInstances *int `json:"maxInstances,omitempty" xmlrpc:"maxInstances,omitempty"` + + // The maximum amount of memory utilized by this platform for the given day. + MaxMemoryUsage *int `json:"maxMemoryUsage,omitempty" xmlrpc:"maxMemoryUsage,omitempty"` + + // The mean number of guests hosted by this platform for the given day. + MeanInstances *Float64 `json:"meanInstances,omitempty" xmlrpc:"meanInstances,omitempty"` + + // The mean amount of memory utilized by this platform for the given day. + MeanMemoryUsage *Float64 `json:"meanMemoryUsage,omitempty" xmlrpc:"meanMemoryUsage,omitempty"` + + // The minimum number of guests hosted by this platform for the given day. + MinInstances *int `json:"minInstances,omitempty" xmlrpc:"minInstances,omitempty"` + + // The minimum amount of memory utilized by this platform for the given day. + MinMemoryUsage *int `json:"minMemoryUsage,omitempty" xmlrpc:"minMemoryUsage,omitempty"` +} + +// SoftLayer_Container_Metric_Tracking_Object_Virtual_Host_Summary This container summarizes a virtual host's metric data. +type Container_Metric_Tracking_Object_Virtual_Host_Summary struct { + Container_Metric_Tracking_Object_Summary + + // The average amount of memory usage thus far in this billing cycle. + AvgMemoryUsageInBillingCycle *int `json:"avgMemoryUsageInBillingCycle,omitempty" xmlrpc:"avgMemoryUsageInBillingCycle,omitempty"` + + // Current bill cycle end date. + CurrentBillCycleEnd *Time `json:"currentBillCycleEnd,omitempty" xmlrpc:"currentBillCycleEnd,omitempty"` + + // Current bill cycle start date. + CurrentBillCycleStart *Time `json:"currentBillCycleStart,omitempty" xmlrpc:"currentBillCycleStart,omitempty"` + + // The last count of instances this platform was hosting. + LastInstanceCount *int `json:"lastInstanceCount,omitempty" xmlrpc:"lastInstanceCount,omitempty"` + + // The last amount of memory this platform was using. + LastMemoryUsageAmount *int `json:"lastMemoryUsageAmount,omitempty" xmlrpc:"lastMemoryUsageAmount,omitempty"` + + // The last time this virtual host was polled for metrics. + LastPollTime *Time `json:"lastPollTime,omitempty" xmlrpc:"lastPollTime,omitempty"` + + // The max number of instances hosted thus far in this billing cycle. + MaxInstanceInBillingCycle *int `json:"maxInstanceInBillingCycle,omitempty" xmlrpc:"maxInstanceInBillingCycle,omitempty"` + + // Previous bill cycle end date. + PreviousBillCycleEnd *Time `json:"previousBillCycleEnd,omitempty" xmlrpc:"previousBillCycleEnd,omitempty"` + + // Previous bill cycle start date. + PreviousBillCycleStart *Time `json:"previousBillCycleStart,omitempty" xmlrpc:"previousBillCycleStart,omitempty"` + + // This virtual hosting platform name. + VirtualPlatformName *string `json:"virtualPlatformName,omitempty" xmlrpc:"virtualPlatformName,omitempty"` +} + +// The SoftLayer_Container_Monitoring_Alarm_History data type contains information relating to SoftLayer monitoring alarm history. +type Container_Monitoring_Alarm_History struct { + Entity + + // Account ID that this alarm belongs to + AccountId *int `json:"accountId,omitempty" xmlrpc:"accountId,omitempty"` + + // ID of the monitoring agent that triggered this alarm + AgentId *int `json:"agentId,omitempty" xmlrpc:"agentId,omitempty"` + + // Alarm ID + AlarmId *string `json:"alarmId,omitempty" xmlrpc:"alarmId,omitempty"` + + // Time that an alarm was closed. + ClosedDate *Time `json:"closedDate,omitempty" xmlrpc:"closedDate,omitempty"` + + // Time that an alarm was triggered + CreateDate *Time `json:"createDate,omitempty" xmlrpc:"createDate,omitempty"` + + // Alarm message + Message *string `json:"message,omitempty" xmlrpc:"message,omitempty"` + + // Robot ID + RobotId *int `json:"robotId,omitempty" xmlrpc:"robotId,omitempty"` + + // Severity of an alarm + Severity *string `json:"severity,omitempty" xmlrpc:"severity,omitempty"` +} + +// SoftLayer_Container_Monitoring_Graph_Outputs models a single outbound object for a graph of given data sets. +type Container_Monitoring_Graph_Outputs struct { + Entity + + // The maximum date included in this graph. + EndDate *Time `json:"endDate,omitempty" xmlrpc:"endDate,omitempty"` + + // Error message encountered during graphing + GraphError *string `json:"graphError,omitempty" xmlrpc:"graphError,omitempty"` + + // The raw PNG binary data to be displayed once the graph is drawn. + GraphImage *[]byte `json:"graphImage,omitempty" xmlrpc:"graphImage,omitempty"` + + // The minimum date included in this graph. + StartDate *Time `json:"startDate,omitempty" xmlrpc:"startDate,omitempty"` +} + +// This object holds authentication data to a server. +type Container_Network_Authentication_Data struct { + Entity + + // The name of a host + Host *string `json:"host,omitempty" xmlrpc:"host,omitempty"` + + // The authentication password + Password *string `json:"password,omitempty" xmlrpc:"password,omitempty"` + + // The port number + Port *int `json:"port,omitempty" xmlrpc:"port,omitempty"` + + // The type of network protocol. This can be ftp, ssh and so on. + Type *string `json:"type,omitempty" xmlrpc:"type,omitempty"` + + // The authentication username + Username *string `json:"username,omitempty" xmlrpc:"username,omitempty"` +} + +// SoftLayer_Container_Network_Bandwidth_Data_Summary models an interface's overall bandwidth usage during it's current billing cycle. +type Container_Network_Bandwidth_Data_Summary struct { + Entity + + // The amount of bandwidth a server has allocated to it in it's current billing period. + AllowedUsage *Float64 `json:"allowedUsage,omitempty" xmlrpc:"allowedUsage,omitempty"` + + // The amount of bandwidth that a server has used within it's current billing period. + EstimatedUsage *Float64 `json:"estimatedUsage,omitempty" xmlrpc:"estimatedUsage,omitempty"` + + // The amount of bandwidth a server is projected to use within its billing period, based on it's current usage. + ProjectedUsage *Float64 `json:"projectedUsage,omitempty" xmlrpc:"projectedUsage,omitempty"` + + // The unit of measurement used in a bandwidth data summary. + UsageUnits *string `json:"usageUnits,omitempty" xmlrpc:"usageUnits,omitempty"` +} + +// SoftLayer_Container_Network_Bandwidth_Version1_Usage models an hourly bandwidth record. +type Container_Network_Bandwidth_Version1_Usage struct { + Entity + + // The amount of incoming bandwidth that a server has used within the hour of the recordedDate. + IncomingAmount *Float64 `json:"incomingAmount,omitempty" xmlrpc:"incomingAmount,omitempty"` + + // The amount of outgoing bandwidth that a server has used within the hour of the recordedDate. + OutgoingAmount *Float64 `json:"outgoingAmount,omitempty" xmlrpc:"outgoingAmount,omitempty"` + + // The date and time that the bandwidth was used by a piece of hardware + RecordedDate *Time `json:"recordedDate,omitempty" xmlrpc:"recordedDate,omitempty"` +} + +// SoftLayer_Container_Network_ContentDelivery_Authentication_Directory represents a token authentication directory on your CDN FTP or on your origin server. +type Container_Network_ContentDelivery_Authentication_Directory struct { + Entity + + // The date that a token authentication directory was created. + CreateDate *Time `json:"createDate,omitempty" xmlrpc:"createDate,omitempty"` + + // The name of a directory or a file within a directory listing. + Name *string `json:"name,omitempty" xmlrpc:"name,omitempty"` + + // The type of platform that a token authentication directory is defined for. Possible types are HTTP Large, HTTP Small, Flash and Windows Media + Type *string `json:"type,omitempty" xmlrpc:"type,omitempty"` +} + +// This container is used for CDN content authentication service. +type Container_Network_ContentDelivery_Authentication_Parameter struct { + Entity + + // A CDN account name + CdnAccountName *string `json:"cdnAccountName,omitempty" xmlrpc:"cdnAccountName,omitempty"` + + // A client IP address + ClientIp *string `json:"clientIp,omitempty" xmlrpc:"clientIp,omitempty"` + + // A client referrer information + Referrer *string `json:"referrer,omitempty" xmlrpc:"referrer,omitempty"` + + // A source URL + SourceUrl *string `json:"sourceUrl,omitempty" xmlrpc:"sourceUrl,omitempty"` + + // An authentication token string + Token *string `json:"token,omitempty" xmlrpc:"token,omitempty"` +} + +// CDN supports the content authentication service. With the content authentication service, customers can control access to their contents. There are several scenarios where this authentication capability could be useful. Websites can prevent other rogue websites from linking to their videos. Content owners can prevent users from passing around http links, thus forcing them to login to view contents. It is also possible to authenticate via the client IP address. Referrer information is also checked if provided by a client's browser. servers will invoke a web service method to validate a content authentication token. +// +// CDN uses the default authentication web service provided by SoftLayer to validate a token. A customer can use their own implementation of the token authentication web service by using [[SoftLayer_Network_ContentDelivery_Account::setAuthenticationServiceEndpoint|setAuthenticationServiceEndpoint]] method. +// +// This container class holds the token validation web service endpoint information. CDN supports 3 different protocols: HTTP, RTMP (streaming Flash), and MMS (streaming Windows Media) +type Container_Network_ContentDelivery_Authentication_ServiceEndpoint struct { + Entity + + // The authentication web service endpoint that CDN servers will use to validate a token + Endpoint *string `json:"endpoint,omitempty" xmlrpc:"endpoint,omitempty"` + + // The protocol that the WSDL will be used for. This can be HTTP, WINDOWSMEDIA, or FLASH + Protocol *string `json:"protocol,omitempty" xmlrpc:"protocol,omitempty"` +} + +// SoftLayer_Container_Network_ContentDelivery_Bandwidth_PointsOfPresence_Summary models an individual CDN point of presence's bandwidth usage for a CDN account within a given date range. CDN POPs are located throughout the world, so individual POP usage may be beneficial in determining who is downloading your CDN hosted content. +type Container_Network_ContentDelivery_Bandwidth_PointsOfPresence_Summary struct { + Entity + + // The amount of bandwidth used by a CDN POP. + Bandwidth *uint `json:"bandwidth,omitempty" xmlrpc:"bandwidth,omitempty"` + + // The ending date of a CDN POP bandwidth summary. + EndDateTime *Time `json:"endDateTime,omitempty" xmlrpc:"endDateTime,omitempty"` + + // A CDN POP's name. This is typically the city the POP resides in. + PopName *string `json:"popName,omitempty" xmlrpc:"popName,omitempty"` + + // The starting date of a CDN POP bandwidth summary. + StartDateTime *Time `json:"startDateTime,omitempty" xmlrpc:"startDateTime,omitempty"` + + // The unit of measurement used in a CDN POP bandwidth summary. + UsageUnits *string `json:"usageUnits,omitempty" xmlrpc:"usageUnits,omitempty"` + + // The view count + ViewCount *uint `json:"viewCount,omitempty" xmlrpc:"viewCount,omitempty"` +} + +// SoftLayer_Container_Network_ContentDelivery_Bandwidth_Summary models a CDN account's overall bandwidth usage and overages within a given date range. +type Container_Network_ContentDelivery_Bandwidth_Summary struct { + Entity + + // The CDN account id + CdnAccountId *int `json:"cdnAccountId,omitempty" xmlrpc:"cdnAccountId,omitempty"` + + // The ending date of a CDN bandwidth summary. + EndDateTime *Time `json:"endDateTime,omitempty" xmlrpc:"endDateTime,omitempty"` + + // The name of a file that is requested by visitors + FileName *string `json:"fileName,omitempty" xmlrpc:"fileName,omitempty"` + + // The media type + MediaType *string `json:"mediaType,omitempty" xmlrpc:"mediaType,omitempty"` + + // The starting date of a CDN bandwidth summary. + StartDateTime *Time `json:"startDateTime,omitempty" xmlrpc:"startDateTime,omitempty"` + + // The amount of bandwidth used by a CDN account in between a given starting and ending date. + Usage *Float64 `json:"usage,omitempty" xmlrpc:"usage,omitempty"` + + // The unit of measurement used in a CDN bandwidth summary. + UsageUnits *string `json:"usageUnits,omitempty" xmlrpc:"usageUnits,omitempty"` +} + +// SoftLayer_Container_Network_ContentDelivery_Bandwidth_Summary_File models a CDN account's overall bandwidth usage and overages within a given date range. +type Container_Network_ContentDelivery_Bandwidth_Summary_Detail struct { + Container_Network_ContentDelivery_Bandwidth_Summary + + // The duration of a file that is viewed. + Duration *Float64 `json:"duration,omitempty" xmlrpc:"duration,omitempty"` + + // The number of times that a file is viewed. + ViewCount *int `json:"viewCount,omitempty" xmlrpc:"viewCount,omitempty"` +} + +// SoftLayer's CDN allows for multiple origin pull domains and CNAME records. This container holds the origin pull configuration details. CDN currently supports origin pull method for HTTP content. +type Container_Network_ContentDelivery_OriginPull_Mapping struct { + Entity + + // The CNAME record. + Cname *string `json:"cname,omitempty" xmlrpc:"cname,omitempty"` + + // The unique identifier of an origin pull configuration + Id *string `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // This indicates if an origin pull mapping is for the secure content or not. + IsSecureContent *bool `json:"isSecureContent,omitempty" xmlrpc:"isSecureContent,omitempty"` + + // The type of a media supported by CDN. Supported media types are: "HTTP", "FLASH" and "WM" + MediaType *string `json:"mediaType,omitempty" xmlrpc:"mediaType,omitempty"` + + // The URL of a origin server. A URL can contain a directory path. + OriginUrl *string `json:"originUrl,omitempty" xmlrpc:"originUrl,omitempty"` +} + +// SoftLayer's CDN content delivery network offering replicates your data to a number of Points of Presence (POP's) around the world. SoftLayer_Container_Network_ContentDelivery_PointsOfPresence models one of these POP locations. +type Container_Network_ContentDelivery_PointsOfPresence struct { + Entity + + // A CDN Point of Presence's internal identifier. + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // A CDN Point of Presence's name. This is typically the city that the POP is located in. + Name *string `json:"name,omitempty" xmlrpc:"name,omitempty"` +} + +// This container holds information on a purge request. [[SoftLayer_Network_ContentDelivery_Account::purgeCache|Purge method]] for more details. +// +// Status code can be "SUCCESS", "FAILED", or "INVALID_URL" "INVALID_URL" code is returned when a URL is malformed or does not belong to customer. "FAILED" is returned in case there was an internal error. +type Container_Network_ContentDelivery_PurgeService_Response struct { + Entity + + // A status code indicates whether your request was successful or not + StatusCode *string `json:"statusCode,omitempty" xmlrpc:"statusCode,omitempty"` + + // A URL that you wish to purge its cache object + Url *string `json:"url,omitempty" xmlrpc:"url,omitempty"` +} + +// no documentation yet +type Container_Network_ContentDelivery_Report_Usage struct { + Entity + + // no documentation yet + ApplicationDeliveryNetwork *Float64 `json:"applicationDeliveryNetwork,omitempty" xmlrpc:"applicationDeliveryNetwork,omitempty"` + + // no documentation yet + ApplicationDeliveryNetworkSsl *Float64 `json:"applicationDeliveryNetworkSsl,omitempty" xmlrpc:"applicationDeliveryNetworkSsl,omitempty"` + + // no documentation yet + DiskSpace *Float64 `json:"diskSpace,omitempty" xmlrpc:"diskSpace,omitempty"` + + // no documentation yet + EndDate *Time `json:"endDate,omitempty" xmlrpc:"endDate,omitempty"` + + // no documentation yet + Flash *Float64 `json:"flash,omitempty" xmlrpc:"flash,omitempty"` + + // no documentation yet + Http *Float64 `json:"http,omitempty" xmlrpc:"http,omitempty"` + + // no documentation yet + HttpSmall *Float64 `json:"httpSmall,omitempty" xmlrpc:"httpSmall,omitempty"` + + // no documentation yet + Https *Float64 `json:"https,omitempty" xmlrpc:"https,omitempty"` + + // no documentation yet + HttpsSmall *Float64 `json:"httpsSmall,omitempty" xmlrpc:"httpsSmall,omitempty"` + + // no documentation yet + Region *string `json:"region,omitempty" xmlrpc:"region,omitempty"` + + // no documentation yet + SslTotal *Float64 `json:"sslTotal,omitempty" xmlrpc:"sslTotal,omitempty"` + + // no documentation yet + StandardTotal *Float64 `json:"standardTotal,omitempty" xmlrpc:"standardTotal,omitempty"` + + // no documentation yet + StartDate *Time `json:"startDate,omitempty" xmlrpc:"startDate,omitempty"` + + // no documentation yet + WindowsMedia *Float64 `json:"windowsMedia,omitempty" xmlrpc:"windowsMedia,omitempty"` +} + +// SoftLayer's CDN content delivery network allows for multiple types of media hosting in addition to traditional HTTP hosting. Each of these media types are accessible form a different URL. SoftLayer_Container_Network_ContentDelivery_SupportedProtocol holds details about CDN supported media types and their associated URLs. +// +// CDN media URLs follow the standard ://..cdn.softlayer.net +// +// Flash streaming, Windows Media streaming and HTTP protocols are supported: Flash streaming: rtmp://.flash.cdn.softlayer.net Windows Media streaming: mms://.wm.cdn.softlayer.net HTTP: http://.http.cdn.softlayer.net +type Container_Network_ContentDelivery_SupportedProtocol struct { + Entity + + // The host name related to CDN supported media, and is represented in the hostname portion of a CDN URL. + Host *string `json:"host,omitempty" xmlrpc:"host,omitempty"` + + // The type of a media supported by CDN such as "FLASH", "WINDOWSMEDIA" or "HTTP" + MediaType *string `json:"mediaType,omitempty" xmlrpc:"mediaType,omitempty"` + + // The platform name. It's a friendly name of media type. + Platform *string `json:"platform,omitempty" xmlrpc:"platform,omitempty"` + + // The media protocol supported by CDN. This represents the media portion of a CDN URL. Currently supported protocols are: rtmp, mms and http + Protocol *string `json:"protocol,omitempty" xmlrpc:"protocol,omitempty"` +} + +// SoftLayer_Container_Network_Directory_Listing represents a single entry in a listing of files within a remote directory. API methods that return remote directory listings typically return arrays of SoftLayer_Container_Network_Directory_Listing objects. +type Container_Network_Directory_Listing struct { + Entity + + // If the file in a directory listing is a directory itself then fileCount is the number of files within the directory. + FileCount *int `json:"fileCount,omitempty" xmlrpc:"fileCount,omitempty"` + + // The name of a directory or a file within a directory listing. + Name *string `json:"name,omitempty" xmlrpc:"name,omitempty"` + + // The type of file in a directory listing. If a directory listing entry is a directory itself then type is set to "directory". Otherwise, type is a blank string. + Type *string `json:"type,omitempty" xmlrpc:"type,omitempty"` +} + +// The IntrusionProtection_Event object stores information about individual intrusion protection events. +// +// It is a data container that cannot be edited, deleted, or saved. +// +// It is returned by many methods in the TippingPointReporting object, but never directly, always as a child of another container object. +type Container_Network_IntrusionProtection_Event struct { + Entity + + // The CVE ID(s), if any, associated with this attack signature. + CVEId *string `json:"CVEId,omitempty" xmlrpc:"CVEId,omitempty"` + + // The action that was taken when this attack was discovered. Can be either "Block" or "Permit" + ActionTaken *string `json:"actionTaken,omitempty" xmlrpc:"actionTaken,omitempty"` + + // The number of attacks in this block. Attacks are grouped differently based on the query performed on the tippingPointReporting object. + AttackCount *int `json:"attackCount,omitempty" xmlrpc:"attackCount,omitempty"` + + // Long description of the attack. May contain links to more information + AttackLongDescription *string `json:"attackLongDescription,omitempty" xmlrpc:"attackLongDescription,omitempty"` + + // Name of the attack + AttackName *string `json:"attackName,omitempty" xmlrpc:"attackName,omitempty"` + + // The starting timestamp of the attack recorded, in Y-m-d H:i:s format. May not be set, depending on the type of query performed. + BeginTime *string `json:"beginTime,omitempty" xmlrpc:"beginTime,omitempty"` + + // The BugTraq ID(s), if any, associated with this attack signature. + BugtraqId *string `json:"bugtraqId,omitempty" xmlrpc:"bugtraqId,omitempty"` + + // The human-readable classification of the attack + Classification *string `json:"classification,omitempty" xmlrpc:"classification,omitempty"` + + // The IP Address (as a dotted decimal string) of the machine that was the target of the attack + DestinationIpAddress *string `json:"destinationIpAddress,omitempty" xmlrpc:"destinationIpAddress,omitempty"` + + // The port the attack was directed at + DestinationPort *int `json:"destinationPort,omitempty" xmlrpc:"destinationPort,omitempty"` + + // The ending timestamp of the attack recorded, in Y-m-d H:i:s format. May not be set, depending on the type of query performed. + EndTime *string `json:"endTime,omitempty" xmlrpc:"endTime,omitempty"` + + // The platform affected by the attack + Platform *string `json:"platform,omitempty" xmlrpc:"platform,omitempty"` + + // The protocol used in the attack + Protocol *string `json:"protocol,omitempty" xmlrpc:"protocol,omitempty"` + + // The human-readable severity of this attack, from "Low" to "Critical" + Severity *string `json:"severity,omitempty" xmlrpc:"severity,omitempty"` + + // Unique ID of the "Signature" in question. The signature determines the type of attack recorded. SignatureId is used in the drillDown() function on the TippingPointReporting service + SignatureId *string `json:"signatureId,omitempty" xmlrpc:"signatureId,omitempty"` + + // The IP Address (as a dotted decimal string) of the machine originating the attack + SourceIpAddress *string `json:"sourceIpAddress,omitempty" xmlrpc:"sourceIpAddress,omitempty"` + + // The port the attack originated from + SourcePort *int `json:"sourcePort,omitempty" xmlrpc:"sourcePort,omitempty"` +} + +// The IntrusionProtection_Statistic is used exclusively by the getMainStatistics method on the TippingPointReporting service, and serves mainly as a pair object, storing a name and an attack count. Name is usually the name of an attack, but it can also be an attacking IP Address +type Container_Network_IntrusionProtection_Statistic struct { + Entity + + // The number of attacks effecting this name over the time period + AttackCount *int `json:"attackCount,omitempty" xmlrpc:"attackCount,omitempty"` + + // Either the name of the attack in question, or the attacking IP Address + Name *string `json:"name,omitempty" xmlrpc:"name,omitempty"` +} + +// The IntrusionProtection_Statistics Type is used as a container for SoftLayer_Container_Network_IntrusionProtection_Statistic objects. The SoftLayer_Container_Network_IntrusionProtection_Statistics class holds the "header" information, like the item being queried (either account or data center), the time frame, and the grand total of the attacks. +type Container_Network_IntrusionProtection_Statistics struct { + Entity + + // The actual target, either a datacenter name, an account ID, or a subnet IP + Target *string `json:"target,omitempty" xmlrpc:"target,omitempty"` + + // The type of the target, right now either "datacenter", "account", or "subnet" + TargetType *string `json:"targetType,omitempty" xmlrpc:"targetType,omitempty"` + + // The time frame of the attack, in string form, like "Last 24 hours" + TimeFrame *string `json:"timeFrame,omitempty" xmlrpc:"timeFrame,omitempty"` + + // The top attacks for this target over this time frame + TopAttacks []Container_Network_IntrusionProtection_Statistic `json:"topAttacks,omitempty" xmlrpc:"topAttacks,omitempty"` + + // Total attacks for this $target over this time frame + TotalAttacks *int `json:"totalAttacks,omitempty" xmlrpc:"totalAttacks,omitempty"` +} + +// The IntrusionProtection_SubnetReport object is the container that holds the SoftLayer_Container_Network_IntrusionProtection_Event objects for a particular subnet, or "All Subnets", whatever the case may be. Subnet, subnet mask, direction, and the individual events are returned by this object. +type Container_Network_IntrusionProtection_SubnetReport struct { + Entity + + // cidr for this report. If the subnetIpAddress is "All Subnets", this is set to 32 and should be ignored. + Cidr *int `json:"cidr,omitempty" xmlrpc:"cidr,omitempty"` + + // Direction of the attack, either 'Inbound' or 'Outbound' + Direction *string `json:"direction,omitempty" xmlrpc:"direction,omitempty"` + + // The class SoftLayer_Container_Network_IntrusionProtection_Event objects on this report. + Events []Container_Network_IntrusionProtection_Event `json:"events,omitempty" xmlrpc:"events,omitempty"` + + // The "target" of this report, could be an IP address, a subnet's network identifier, or "All Subnets" + SubnetIpAddress *string `json:"subnetIpAddress,omitempty" xmlrpc:"subnetIpAddress,omitempty"` +} + +// The LoadBalancer_StatusEntry object stores information about the current status of a particular load balancer service. +// +// It is a data container that cannot be edited, deleted, or saved. +// +// It is returned exclusively by the getStatus method on the [[SoftLayer_Network_LoadBalancer_Service]] service +type Container_Network_LoadBalancer_StatusEntry struct { + Entity + + // The value of the entry. + Content *string `json:"content,omitempty" xmlrpc:"content,omitempty"` + + // Text description of the status entry + Label *string `json:"label,omitempty" xmlrpc:"label,omitempty"` +} + +// This container class holds information on a media file such as file name, codec, frame rate and so on +type Container_Network_Media_Information struct { + Entity + + // The audio bit rate + AudioBitRate *int `json:"audioBitRate,omitempty" xmlrpc:"audioBitRate,omitempty"` + + // The audio channel mode + AudioChannelMode *string `json:"audioChannelMode,omitempty" xmlrpc:"audioChannelMode,omitempty"` + + // The number of audio channels + AudioChannels *int `json:"audioChannels,omitempty" xmlrpc:"audioChannels,omitempty"` + + // The audio codec name + AudioCodec *string `json:"audioCodec,omitempty" xmlrpc:"audioCodec,omitempty"` + + // The audio sample rate + AudioSampleRate *int `json:"audioSampleRate,omitempty" xmlrpc:"audioSampleRate,omitempty"` + + // The duration of a media + Duration *Float64 `json:"duration,omitempty" xmlrpc:"duration,omitempty"` + + // The error message if any. + ErrorMessage *string `json:"errorMessage,omitempty" xmlrpc:"errorMessage,omitempty"` + + // The name of a media file + File *string `json:"file,omitempty" xmlrpc:"file,omitempty"` + + // The file format + FileFormat *string `json:"fileFormat,omitempty" xmlrpc:"fileFormat,omitempty"` + + // The size of a media file in byte + FileSize *uint `json:"fileSize,omitempty" xmlrpc:"fileSize,omitempty"` + + // The frame rate + FrameRate *Float64 `json:"frameRate,omitempty" xmlrpc:"frameRate,omitempty"` + + // The width of a media in pixel + SizeX *int `json:"sizeX,omitempty" xmlrpc:"sizeX,omitempty"` + + // The height of a media in pixel + SizeY *int `json:"sizeY,omitempty" xmlrpc:"sizeY,omitempty"` + + // The total of frames + TotalFrames *uint `json:"totalFrames,omitempty" xmlrpc:"totalFrames,omitempty"` + + // The width in a video's width to height aspect ratio + VideoAspectX *Float64 `json:"videoAspectX,omitempty" xmlrpc:"videoAspectX,omitempty"` + + // The height in a video's width to height aspect ratio + VideoAspectY *int `json:"videoAspectY,omitempty" xmlrpc:"videoAspectY,omitempty"` + + // The video codec name + VideoCodec *string `json:"videoCodec,omitempty" xmlrpc:"videoCodec,omitempty"` +} + +// no documentation yet +type Container_Network_Media_Transcode_Job_Watermark struct { + Entity + + // Time to stop showing watermark in milliseconds + EndTime *int `json:"endTime,omitempty" xmlrpc:"endTime,omitempty"` + + // Filename of image to use as watermark in transcoding job + FileName *string `json:"fileName,omitempty" xmlrpc:"fileName,omitempty"` + + // Position to place watermark at + Position *Container_Network_Media_Transcode_Job_Watermark_Position `json:"position,omitempty" xmlrpc:"position,omitempty"` + + // Time to start showing watermark in milliseconds + StartTime *int `json:"startTime,omitempty" xmlrpc:"startTime,omitempty"` + + // Text to Place in Watermark + Text *string `json:"text,omitempty" xmlrpc:"text,omitempty"` + + // Percentage Transparent watermark should be + TransparencyPercentage *int `json:"transparencyPercentage,omitempty" xmlrpc:"transparencyPercentage,omitempty"` +} + +// no documentation yet +type Container_Network_Media_Transcode_Job_Watermark_Position struct { + Entity + + // X Coordinate of Watermark + X *int `json:"x,omitempty" xmlrpc:"x,omitempty"` + + // vertical Coordinate of Watermark + Y *int `json:"y,omitempty" xmlrpc:"y,omitempty"` +} + +// Transcode preset is a set of configuration parameters that defines a Transcode output format. SoftLayer_Container_Network_Media_Transcode_Preset contains a preset information defined on a Transcode server +type Container_Network_Media_Transcode_Preset struct { + Entity + + // The unique id that is used by a Transcode server + GUID *string `json:"GUID,omitempty" xmlrpc:"GUID,omitempty"` + + // The category that a preset belongs to + Category *string `json:"category,omitempty" xmlrpc:"category,omitempty"` + + // The description of the preset + Description *string `json:"description,omitempty" xmlrpc:"description,omitempty"` + + // The friendly name of a preset + Name *string `json:"name,omitempty" xmlrpc:"name,omitempty"` +} + +// Transcode preset element +type Container_Network_Media_Transcode_Preset_Element struct { + Entity + + // The additional elements for DROPDOWNLIST element + AdditionalElements []Container_Network_Media_Transcode_Preset_Element_Option `json:"additionalElements,omitempty" xmlrpc:"additionalElements,omitempty"` + + // The default value of an element. + DefaultValue *string `json:"defaultValue,omitempty" xmlrpc:"defaultValue,omitempty"` + + // The description of a preset element + Description *string `json:"description,omitempty" xmlrpc:"description,omitempty"` + + // The flag that indicates whether an element is enabled or not + Enabled *bool `json:"enabled,omitempty" xmlrpc:"enabled,omitempty"` + + // The extended description of a preset element + ExtendedDescription *string `json:"extendedDescription,omitempty" xmlrpc:"extendedDescription,omitempty"` + + // The flag that indicates whether an element is hidden or not + Hidden *bool `json:"hidden,omitempty" xmlrpc:"hidden,omitempty"` + + // The maximum value of an element + MaximumValue *int `json:"maximumValue,omitempty" xmlrpc:"maximumValue,omitempty"` + + // The minimum value of an element + MinimumValue *int `json:"minimumValue,omitempty" xmlrpc:"minimumValue,omitempty"` + + // The name of an preset element + Name *string `json:"name,omitempty" xmlrpc:"name,omitempty"` + + // The name of a parent element + ParentName *string `json:"parentName,omitempty" xmlrpc:"parentName,omitempty"` + + // The type of an preset element. + Type *string `json:"type,omitempty" xmlrpc:"type,omitempty"` +} + +// Transcode preset element +type Container_Network_Media_Transcode_Preset_Element_Option struct { + Entity + + // The name of a additional preset element + Name *string `json:"name,omitempty" xmlrpc:"name,omitempty"` + + // The value of a additional preset element + Value *string `json:"value,omitempty" xmlrpc:"value,omitempty"` +} + +// no documentation yet +type Container_Network_Message_Delivery_Email struct { + Entity + + // no documentation yet + Body *string `json:"body,omitempty" xmlrpc:"body,omitempty"` + + // no documentation yet + ContainsHtml *bool `json:"containsHtml,omitempty" xmlrpc:"containsHtml,omitempty"` + + // no documentation yet + From *string `json:"from,omitempty" xmlrpc:"from,omitempty"` + + // no documentation yet + Subject *string `json:"subject,omitempty" xmlrpc:"subject,omitempty"` + + // no documentation yet + To *string `json:"to,omitempty" xmlrpc:"to,omitempty"` +} + +// no documentation yet +type Container_Network_Message_Delivery_Email_Sendgrid_Account_Overview struct { + Entity + + // no documentation yet + CreditsAllowed *int `json:"creditsAllowed,omitempty" xmlrpc:"creditsAllowed,omitempty"` + + // no documentation yet + CreditsOverage *int `json:"creditsOverage,omitempty" xmlrpc:"creditsOverage,omitempty"` + + // no documentation yet + CreditsRemain *int `json:"creditsRemain,omitempty" xmlrpc:"creditsRemain,omitempty"` + + // no documentation yet + CreditsUsed *int `json:"creditsUsed,omitempty" xmlrpc:"creditsUsed,omitempty"` + + // no documentation yet + Package *string `json:"package,omitempty" xmlrpc:"package,omitempty"` + + // no documentation yet + Reputation *int `json:"reputation,omitempty" xmlrpc:"reputation,omitempty"` + + // no documentation yet + Requests *int `json:"requests,omitempty" xmlrpc:"requests,omitempty"` +} + +// no documentation yet +type Container_Network_Message_Delivery_Email_Sendgrid_Customer_Profile struct { + Entity + + // no documentation yet + Address *string `json:"address,omitempty" xmlrpc:"address,omitempty"` + + // no documentation yet + City *string `json:"city,omitempty" xmlrpc:"city,omitempty"` + + // no documentation yet + Country *string `json:"country,omitempty" xmlrpc:"country,omitempty"` + + // no documentation yet + Email *string `json:"email,omitempty" xmlrpc:"email,omitempty"` + + // no documentation yet + FirstName *string `json:"firstName,omitempty" xmlrpc:"firstName,omitempty"` + + // no documentation yet + LastName *string `json:"lastName,omitempty" xmlrpc:"lastName,omitempty"` + + // no documentation yet + Phone *string `json:"phone,omitempty" xmlrpc:"phone,omitempty"` + + // no documentation yet + State *string `json:"state,omitempty" xmlrpc:"state,omitempty"` + + // no documentation yet + Website *string `json:"website,omitempty" xmlrpc:"website,omitempty"` + + // no documentation yet + Zip *string `json:"zip,omitempty" xmlrpc:"zip,omitempty"` +} + +// no documentation yet +type Container_Network_Message_Delivery_Email_Sendgrid_List_Entry struct { + Entity + + // no documentation yet + Created *string `json:"created,omitempty" xmlrpc:"created,omitempty"` + + // no documentation yet + Email *string `json:"email,omitempty" xmlrpc:"email,omitempty"` + + // no documentation yet + Reason *string `json:"reason,omitempty" xmlrpc:"reason,omitempty"` + + // no documentation yet + Status *string `json:"status,omitempty" xmlrpc:"status,omitempty"` +} + +// no documentation yet +type Container_Network_Message_Delivery_Email_Sendgrid_Statistics struct { + Entity + + // no documentation yet + Blocks *int `json:"blocks,omitempty" xmlrpc:"blocks,omitempty"` + + // no documentation yet + Bounces *int `json:"bounces,omitempty" xmlrpc:"bounces,omitempty"` + + // no documentation yet + Clicks *int `json:"clicks,omitempty" xmlrpc:"clicks,omitempty"` + + // no documentation yet + Date *string `json:"date,omitempty" xmlrpc:"date,omitempty"` + + // no documentation yet + Delivered *int `json:"delivered,omitempty" xmlrpc:"delivered,omitempty"` + + // no documentation yet + InvalidEmail *int `json:"invalidEmail,omitempty" xmlrpc:"invalidEmail,omitempty"` + + // no documentation yet + Opens *int `json:"opens,omitempty" xmlrpc:"opens,omitempty"` + + // no documentation yet + RepeatBounces *int `json:"repeatBounces,omitempty" xmlrpc:"repeatBounces,omitempty"` + + // no documentation yet + RepeatSpamReports *int `json:"repeatSpamReports,omitempty" xmlrpc:"repeatSpamReports,omitempty"` + + // no documentation yet + RepeatUnsubscribes *int `json:"repeatUnsubscribes,omitempty" xmlrpc:"repeatUnsubscribes,omitempty"` + + // no documentation yet + Requests *int `json:"requests,omitempty" xmlrpc:"requests,omitempty"` + + // no documentation yet + SpamReports *int `json:"spamReports,omitempty" xmlrpc:"spamReports,omitempty"` + + // no documentation yet + UniqueClicks *int `json:"uniqueClicks,omitempty" xmlrpc:"uniqueClicks,omitempty"` + + // no documentation yet + UniqueOpens *int `json:"uniqueOpens,omitempty" xmlrpc:"uniqueOpens,omitempty"` + + // no documentation yet + Unsubscribes *int `json:"unsubscribes,omitempty" xmlrpc:"unsubscribes,omitempty"` +} + +// no documentation yet +type Container_Network_Message_Delivery_Email_Sendgrid_Statistics_Graph struct { + Entity + + // no documentation yet + GraphError *string `json:"graphError,omitempty" xmlrpc:"graphError,omitempty"` + + // no documentation yet + GraphImage *[]byte `json:"graphImage,omitempty" xmlrpc:"graphImage,omitempty"` + + // no documentation yet + GraphTitle *string `json:"graphTitle,omitempty" xmlrpc:"graphTitle,omitempty"` +} + +// no documentation yet +type Container_Network_Message_Delivery_Email_Sendgrid_Statistics_Options struct { + Entity + + // no documentation yet + AggregatesOnly *bool `json:"aggregatesOnly,omitempty" xmlrpc:"aggregatesOnly,omitempty"` + + // no documentation yet + Category *string `json:"category,omitempty" xmlrpc:"category,omitempty"` + + // no documentation yet + Days *int `json:"days,omitempty" xmlrpc:"days,omitempty"` + + // no documentation yet + EndDate *Time `json:"endDate,omitempty" xmlrpc:"endDate,omitempty"` + + // no documentation yet + SelectedStatistics []string `json:"selectedStatistics,omitempty" xmlrpc:"selectedStatistics,omitempty"` + + // no documentation yet + StartDate *Time `json:"startDate,omitempty" xmlrpc:"startDate,omitempty"` +} + +// no documentation yet +type Container_Network_Port_Statistic struct { + Entity + + // no documentation yet + AdministrativeStatus *int `json:"administrativeStatus,omitempty" xmlrpc:"administrativeStatus,omitempty"` + + // no documentation yet + InDiscardPackets *uint `json:"inDiscardPackets,omitempty" xmlrpc:"inDiscardPackets,omitempty"` + + // no documentation yet + InErrorPackets *uint `json:"inErrorPackets,omitempty" xmlrpc:"inErrorPackets,omitempty"` + + // no documentation yet + InOctets *uint `json:"inOctets,omitempty" xmlrpc:"inOctets,omitempty"` + + // no documentation yet + InUnicastPackets *uint `json:"inUnicastPackets,omitempty" xmlrpc:"inUnicastPackets,omitempty"` + + // no documentation yet + MaximumTransmissionUnit *uint `json:"maximumTransmissionUnit,omitempty" xmlrpc:"maximumTransmissionUnit,omitempty"` + + // no documentation yet + OperationalStatus *int `json:"operationalStatus,omitempty" xmlrpc:"operationalStatus,omitempty"` + + // no documentation yet + OutDiscardPackets *uint `json:"outDiscardPackets,omitempty" xmlrpc:"outDiscardPackets,omitempty"` + + // no documentation yet + OutErrorPackets *uint `json:"outErrorPackets,omitempty" xmlrpc:"outErrorPackets,omitempty"` + + // no documentation yet + OutOctets *uint `json:"outOctets,omitempty" xmlrpc:"outOctets,omitempty"` + + // no documentation yet + OutUnicastPackets *uint `json:"outUnicastPackets,omitempty" xmlrpc:"outUnicastPackets,omitempty"` + + // no documentation yet + PortDuplex *uint `json:"portDuplex,omitempty" xmlrpc:"portDuplex,omitempty"` + + // no documentation yet + Speed *uint `json:"speed,omitempty" xmlrpc:"speed,omitempty"` +} + +// no documentation yet +type Container_Network_Service_Resource_ObjectStorage_ConnectionInformation struct { + Entity + + // no documentation yet + Datacenter *string `json:"datacenter,omitempty" xmlrpc:"datacenter,omitempty"` + + // no documentation yet + DatacenterShortName *string `json:"datacenterShortName,omitempty" xmlrpc:"datacenterShortName,omitempty"` + + // no documentation yet + PrivateEndpoint *string `json:"privateEndpoint,omitempty" xmlrpc:"privateEndpoint,omitempty"` + + // no documentation yet + PublicEndpoint *string `json:"publicEndpoint,omitempty" xmlrpc:"publicEndpoint,omitempty"` +} + +// no documentation yet +type Container_Network_Storage_Backup_Evault_WebCc_Authentication_Details struct { + Entity + + // no documentation yet + EventValidation *string `json:"eventValidation,omitempty" xmlrpc:"eventValidation,omitempty"` + + // no documentation yet + ViewState *string `json:"viewState,omitempty" xmlrpc:"viewState,omitempty"` + + // no documentation yet + WebCcUrl *string `json:"webCcUrl,omitempty" xmlrpc:"webCcUrl,omitempty"` +} + +// SoftLayer's StorageLayer Evault services provides details regarding the the purchased vault. +// +// When a job is created using the Webcc Console, the job created is identified as a task on the vault. Using this service, information regarding the task can be retrieved. +// +// +type Container_Network_Storage_Evault_Vault_Task struct { + Entity + + // Unique identifier for the task. + Id *uint `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // The hostname provided at time of agent registration. + Name *string `json:"name,omitempty" xmlrpc:"name,omitempty"` + + // Total compressed bytes used for the task. + UsedPoolsize *uint `json:"usedPoolsize,omitempty" xmlrpc:"usedPoolsize,omitempty"` +} + +// The SoftLayer_Container_Network_Storage_Evault_WebCc_AgentStatus will contain the timestamp of the last backup performed by the EVault agent. The agent status will also be returned. +type Container_Network_Storage_Evault_WebCc_AgentStatus struct { + Entity + + // Timestamp of last backup performed by the EVault backup agent + LastBackup *Time `json:"lastBackup,omitempty" xmlrpc:"lastBackup,omitempty"` + + // Status indicating the accumulative status result of all jobs performed by the evault agent. For example, if one job out three jobs failed agent status will by "Failed Backup(s)". + Status *string `json:"status,omitempty" xmlrpc:"status,omitempty"` +} + +// The SoftLayer_Container_Network_Storage_Evault_WebCc_BackupResults will contain the timeframe of backups and the results will also be returned. +type Container_Network_Storage_Evault_WebCc_BackupResults struct { + Entity + + // Timestamp of begin time + BeginTime *Time `json:"beginTime,omitempty" xmlrpc:"beginTime,omitempty"` + + // Count of backups with conflicts. + Conflict *string `json:"conflict,omitempty" xmlrpc:"conflict,omitempty"` + + // Timestamp of end time + EndTime *Time `json:"endTime,omitempty" xmlrpc:"endTime,omitempty"` + + // Count of failed backups. + Failed *string `json:"failed,omitempty" xmlrpc:"failed,omitempty"` + + // Count of successfull backups. + Success *string `json:"success,omitempty" xmlrpc:"success,omitempty"` +} + +// The SoftLayer_Container_Network_Storage_Evault_WebCc_JobDetails will contain basic details for all backup and restore jobs performed by the StorageLayer EVault service offering. +type Container_Network_Storage_Evault_WebCc_JobDetails struct { + Entity + + // The number of bytes currently used by the backup job. (provided only for backup jobs) + BytesUsed *uint `json:"bytesUsed,omitempty" xmlrpc:"bytesUsed,omitempty"` + + // Description of the backup/restore job + Description *string `json:"description,omitempty" xmlrpc:"description,omitempty"` + + // hardware id + HardwareId *int `json:"hardwareId,omitempty" xmlrpc:"hardwareId,omitempty"` + + // Date of the last jobrun. + LastRunDate *Time `json:"lastRunDate,omitempty" xmlrpc:"lastRunDate,omitempty"` + + // Name of the backup/restore job + Name *string `json:"name,omitempty" xmlrpc:"name,omitempty"` + + // Size of backup job when it was first run. (provided only for backup jobs) + OriginalSize *uint `json:"originalSize,omitempty" xmlrpc:"originalSize,omitempty"` + + // Percentage of overall used space allocated by the job. (provided only for backup jobs) + PercentageOfTotalUsage *int `json:"percentageOfTotalUsage,omitempty" xmlrpc:"percentageOfTotalUsage,omitempty"` + + // Result of the latest jobrun. + Result *string `json:"result,omitempty" xmlrpc:"result,omitempty"` + + // virtual guest id + VirtualGuestId *int `json:"virtualGuestId,omitempty" xmlrpc:"virtualGuestId,omitempty"` +} + +// The SoftLayer_Container_Network_Storage_Host will contain the reference id field for the object associated with the host. The host object type will also be returned. +type Container_Network_Storage_Host struct { + Entity + + // Reference id field for object associated with host. + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // Type for the object associated with host + ObjectType *string `json:"objectType,omitempty" xmlrpc:"objectType,omitempty"` +} + +// SoftLayer_Container_Network_Storage_Hub_ObjectStorage_Bucket provides description of a bucket +type Container_Network_Storage_Hub_ObjectStorage_Bucket struct { + Entity + + // no documentation yet + BytesUsed *int `json:"bytesUsed,omitempty" xmlrpc:"bytesUsed,omitempty"` + + // no documentation yet + Name *string `json:"name,omitempty" xmlrpc:"name,omitempty"` + + // no documentation yet + ObjectCount *int `json:"objectCount,omitempty" xmlrpc:"objectCount,omitempty"` + + // no documentation yet + StorageLocation *string `json:"storageLocation,omitempty" xmlrpc:"storageLocation,omitempty"` +} + +// SoftLayer_Container_Network_Storage_Hub_ObjectStorage_ContentDeliveryUrl provides specific details is a container which contains the cdn urls associated with an object storage account +type Container_Network_Storage_Hub_ObjectStorage_ContentDeliveryUrl struct { + Entity + + // no documentation yet + Datacenter *string `json:"datacenter,omitempty" xmlrpc:"datacenter,omitempty"` + + // no documentation yet + FlashUrl *string `json:"flashUrl,omitempty" xmlrpc:"flashUrl,omitempty"` + + // no documentation yet + HttpUrl *string `json:"httpUrl,omitempty" xmlrpc:"httpUrl,omitempty"` +} + +// SoftLayer_Container_Network_Storage_Hub_ObjectStorage_Endpoint provides specific details on available endpoint URLs and locations. +type Container_Network_Storage_Hub_ObjectStorage_Endpoint struct { + Entity + + // no documentation yet + Location *string `json:"location,omitempty" xmlrpc:"location,omitempty"` + + // no documentation yet + Region *string `json:"region,omitempty" xmlrpc:"region,omitempty"` + + // no documentation yet + Type *string `json:"type,omitempty" xmlrpc:"type,omitempty"` + + // no documentation yet + Url *string `json:"url,omitempty" xmlrpc:"url,omitempty"` +} + +// SoftLayer_Container_Network_Storage_Hub_ObjectStorage_File provides specific details that only apply to files that are sent or received from CloudLayer storage resources. +type Container_Network_Storage_Hub_ObjectStorage_File struct { + Container_Utility_File_Entity + + // no documentation yet + Folder *string `json:"folder,omitempty" xmlrpc:"folder,omitempty"` + + // no documentation yet + Hash *string `json:"hash,omitempty" xmlrpc:"hash,omitempty"` +} + +// SoftLayer_Container_Network_Storage_Hub_Container provides details about containers which store collections of files. +type Container_Network_Storage_Hub_ObjectStorage_Folder struct { + Entity + + // no documentation yet + Bytes *uint `json:"bytes,omitempty" xmlrpc:"bytes,omitempty"` + + // no documentation yet + Count *uint `json:"count,omitempty" xmlrpc:"count,omitempty"` + + // no documentation yet + Name *string `json:"name,omitempty" xmlrpc:"name,omitempty"` +} + +// SoftLayer_Container_Network_Storage_Hub_ObjectStorage_Node provides detailed information for a particular object storage node +type Container_Network_Storage_Hub_ObjectStorage_Node struct { + Entity + + // no documentation yet + DeviceName *string `json:"deviceName,omitempty" xmlrpc:"deviceName,omitempty"` + + // no documentation yet + ResourceName *string `json:"resourceName,omitempty" xmlrpc:"resourceName,omitempty"` + + // no documentation yet + UserAuthUrl *string `json:"userAuthUrl,omitempty" xmlrpc:"userAuthUrl,omitempty"` +} + +// SoftLayer_Container_Network_Storage_Hub_ObjectStorage_Policy provides specific details on available storage policies. +type Container_Network_Storage_Hub_ObjectStorage_Policy struct { + Entity + + // no documentation yet + PolicyCode *string `json:"policyCode,omitempty" xmlrpc:"policyCode,omitempty"` +} + +// SoftLayer_Container_Network_Storage_Hub_ObjectStorage_Provision provides description of a provision +type Container_Network_Storage_Hub_ObjectStorage_Provision struct { + Entity + + // no documentation yet + AccountId *int `json:"accountId,omitempty" xmlrpc:"accountId,omitempty"` + + // no documentation yet + Provision *string `json:"provision,omitempty" xmlrpc:"provision,omitempty"` + + // no documentation yet + ProvisionCreateDate *Time `json:"provisionCreateDate,omitempty" xmlrpc:"provisionCreateDate,omitempty"` + + // no documentation yet + ProvisionModifyDate *Time `json:"provisionModifyDate,omitempty" xmlrpc:"provisionModifyDate,omitempty"` + + // no documentation yet + ProvisionTime *int `json:"provisionTime,omitempty" xmlrpc:"provisionTime,omitempty"` +} + +// no documentation yet +type Container_Network_Storage_NetworkConnectionInformation struct { + Entity + + // no documentation yet + Id *string `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // no documentation yet + IpAddress *string `json:"ipAddress,omitempty" xmlrpc:"ipAddress,omitempty"` + + // no documentation yet + StorageType *string `json:"storageType,omitempty" xmlrpc:"storageType,omitempty"` +} + +// Container for Volume Duplicate Information +type Container_Network_Storage_VolumeDuplicateParameters struct { + Entity + + // The number of ongoing concurrentDuplicateOperations. + ConcurrentDuplicateOperations *int `json:"concurrentDuplicateOperations,omitempty" xmlrpc:"concurrentDuplicateOperations,omitempty"` + + // The iopsPerGB of the volume + IopsPerGb *Float64 `json:"iopsPerGb,omitempty" xmlrpc:"iopsPerGb,omitempty"` + + // Returns true if volume can be duplicated; false otherwise + IsDuplicatable *bool `json:"isDuplicatable,omitempty" xmlrpc:"isDuplicatable,omitempty"` + + // This represents the location id + LocationId *int `json:"locationId,omitempty" xmlrpc:"locationId,omitempty"` + + // This represents the location name + LocationName *string `json:"locationName,omitempty" xmlrpc:"locationName,omitempty"` + + // The maximumIopsPerGb allowed for a duplicated volume + MaximumIopsPerGb *Float64 `json:"maximumIopsPerGb,omitempty" xmlrpc:"maximumIopsPerGb,omitempty"` + + // The maximumIopsTier allowed for a duplicated volume + MaximumIopsTier *Float64 `json:"maximumIopsTier,omitempty" xmlrpc:"maximumIopsTier,omitempty"` + + // The maximumVolumeSize allowed for a duplicated volume + MaximumVolumeSize *int `json:"maximumVolumeSize,omitempty" xmlrpc:"maximumVolumeSize,omitempty"` + + // The minimumIopsPerGb allowed for a duplicated volume + MinimumIopsPerGb *Float64 `json:"minimumIopsPerGb,omitempty" xmlrpc:"minimumIopsPerGb,omitempty"` + + // The minimumIopsTier allowed for a duplicated volume + MinimumIopsTier *Float64 `json:"minimumIopsTier,omitempty" xmlrpc:"minimumIopsTier,omitempty"` + + // The minimumVolumeSize allowed for a duplicated volume + MinimumVolumeSize *int `json:"minimumVolumeSize,omitempty" xmlrpc:"minimumVolumeSize,omitempty"` + + // The snapshotSpaceSize allowed for a cloned volume + SnapshotSpaceSize *int `json:"snapshotSpaceSize,omitempty" xmlrpc:"snapshotSpaceSize,omitempty"` + + // The volume duplicate status description + Status *string `json:"status,omitempty" xmlrpc:"status,omitempty"` + + // This represents the volume username + VolumeUsername *string `json:"volumeUsername,omitempty" xmlrpc:"volumeUsername,omitempty"` +} + +// SoftLayer_Container_Subnet_IPAddress models an IP v4 address as it exists as a member of it's subnet, letting the user know if it is a network identifier, gateway, broadcast, or useable address. Addresses that are neither the network identifier nor the gateway nor the broadcast addresses are usable by SoftLayer servers. +type Container_Network_Subnet_IpAddress struct { + Entity + + // The hardware that an IP address is associated with. + Hardware *Hardware `json:"hardware,omitempty" xmlrpc:"hardware,omitempty"` + + // An IP address expressed in dotted-quad notation. + IpAddress *string `json:"ipAddress,omitempty" xmlrpc:"ipAddress,omitempty"` + + // Whether an IP address is its subnet's broadcast address. + IsBroadcastAddress *bool `json:"isBroadcastAddress,omitempty" xmlrpc:"isBroadcastAddress,omitempty"` + + // Whether an IP address is its subnet's gateway address. Gateway addresses exist on SoftLayer's routers and many not be assigned to servers. + IsGatewayAddress *bool `json:"isGatewayAddress,omitempty" xmlrpc:"isGatewayAddress,omitempty"` + + // Whether an IP address is its subnet's network identifier address. + IsNetworkAddress *bool `json:"isNetworkAddress,omitempty" xmlrpc:"isNetworkAddress,omitempty"` +} + +// SoftLayer_Container_Network_Subnet_Registration_SubnetReference is provided to reference [[SoftLayer_Network_Subnet_Registration]] object and the [[SoftLayer_Network_Subnet]] it references, in CIDR form. +type Container_Network_Subnet_Registration_SubnetReference struct { + Entity + + // The ID of the [[SoftLayer_Network_Subnet_Registration]] object. + RegistrationId *int `json:"registrationId,omitempty" xmlrpc:"registrationId,omitempty"` + + // The subnet address in CIDR form. + SubnetCidr *string `json:"subnetCidr,omitempty" xmlrpc:"subnetCidr,omitempty"` +} + +// SoftLayer_Container_Subnet_Registration_TransactionDetails is provided to return details of a newly created Subnet Registration Transaction. +type Container_Network_Subnet_Registration_TransactionDetails struct { + Entity + + // The IDs and Subnets of the [[SoftLayer_Network_Subnet_Registration]] object. + SubnetReferences []Container_Network_Subnet_Registration_SubnetReference `json:"subnetReferences,omitempty" xmlrpc:"subnetReferences,omitempty"` + + // The ID of the Transaction object. + TransactionId *int `json:"transactionId,omitempty" xmlrpc:"transactionId,omitempty"` +} + +// no documentation yet +type Container_Notification_Mass_Filter_TemplateKey struct { + Entity +} + +// no documentation yet +type Container_Notification_Mass_Filter_TemplateValue struct { + Entity +} + +// Represents the acceptance status of a Policy. +type Container_Policy_Acceptance struct { + Entity + + // Flag to indicate if a policy has been previously accepted. + AcceptedFlag *bool `json:"acceptedFlag,omitempty" xmlrpc:"acceptedFlag,omitempty"` + + // Name of the policy for which we are representing it's acceptance status. + PolicyName *string `json:"policyName,omitempty" xmlrpc:"policyName,omitempty"` + + // ID of the [[SoftLayer_Product_Item_Policy_Assignment]]. + ProductPolicyAssignmentId *int `json:"productPolicyAssignmentId,omitempty" xmlrpc:"productPolicyAssignmentId,omitempty"` +} + +// The SoftLayer_Container_Product_Item_Category data type represents a single product item category. +type Container_Product_Item_Category struct { + Entity + + // identifier for category. + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` +} + +// The SoftLayer_Container_Product_Item_Category_Question_Answer data type represents an answer to an item category question. It contains the category, the question being answered, and the answer. +type Container_Product_Item_Category_Question_Answer struct { + Entity + + // The answer to the question. + Answer *string `json:"answer,omitempty" xmlrpc:"answer,omitempty"` + + // The product item category code. + CategoryCode *string `json:"categoryCode,omitempty" xmlrpc:"categoryCode,omitempty"` + + // The product item category id. + CategoryId *int `json:"categoryId,omitempty" xmlrpc:"categoryId,omitempty"` + + // The product item category question id. + QuestionId *int `json:"questionId,omitempty" xmlrpc:"questionId,omitempty"` +} + +// The SoftLayer_Container_Product_Item_Category_ZeroFee_Count data type represents a count of zero fee billing/invoice items. +type Container_Product_Item_Category_ZeroFee_Count struct { + Entity + + // The product item category code. + CategoryCode *string `json:"categoryCode,omitempty" xmlrpc:"categoryCode,omitempty"` + + // The product item category id. + CategoryId *int `json:"categoryId,omitempty" xmlrpc:"categoryId,omitempty"` + + // The product item category name. + CategoryName *string `json:"categoryName,omitempty" xmlrpc:"categoryName,omitempty"` + + // The count of zero fee items for this category. + Count *int `json:"count,omitempty" xmlrpc:"count,omitempty"` +} + +// The SoftLayer_Container_Product_Item_Discount_Program data type represents the information about a discount that is related to a specific product item. +type Container_Product_Item_Discount_Program struct { + Entity + + // The number of times the item discount(s) may be applied for that order container. At a minimum the number will be 1 and at most, it will match the quantity of the order container. + ApplicableQuantity *int `json:"applicableQuantity,omitempty" xmlrpc:"applicableQuantity,omitempty"` + + // The product item that the discount applies to. + Item *Product_Item `json:"item,omitempty" xmlrpc:"item,omitempty"` + + // The sum of the one time fees (one time, setup and labor) of the prices of this container multiplied by the applicable quantity of this container. + OneTimeAmount *Float64 `json:"oneTimeAmount,omitempty" xmlrpc:"oneTimeAmount,omitempty"` + + // The tax amount on the one time fees (one time, setup and labor) of the prices of this container mulitiplied by the applicable quantity of this container. + OneTimeTax *Float64 `json:"oneTimeTax,omitempty" xmlrpc:"oneTimeTax,omitempty"` + + // The item prices that contain the amount of the discount in the recurringFee field. There may be one or more prices. + Prices []Product_Item_Price `json:"prices,omitempty" xmlrpc:"prices,omitempty"` + + // The sum of the one time fees (one time, setup and labor) of the prices of this container multiplied by the applicable quantity of this container with the proration factor applied. + ProratedOneTimeAmount *Float64 `json:"proratedOneTimeAmount,omitempty" xmlrpc:"proratedOneTimeAmount,omitempty"` + + // The tax amount on the one time fees (one time, setup and labor) of the prices of this container mulitiplied by the applicable quantity of this container with the proration factor applied. + ProratedOneTimeTax *Float64 `json:"proratedOneTimeTax,omitempty" xmlrpc:"proratedOneTimeTax,omitempty"` + + // The sum of the recurring fees of the prices of this container multiplied by the applicable quantity of this container with the proration factor applied. + ProratedRecurringAmount *Float64 `json:"proratedRecurringAmount,omitempty" xmlrpc:"proratedRecurringAmount,omitempty"` + + // The tax amount on the recurring fees of the prices of this container mulitiplied by the applicable quantity of this container with the proration factor applied. + ProratedRecurringTax *Float64 `json:"proratedRecurringTax,omitempty" xmlrpc:"proratedRecurringTax,omitempty"` + + // The sum of the recurring fees of the prices of this container multiplied by the applicable quantity of this container. + RecurringAmount *Float64 `json:"recurringAmount,omitempty" xmlrpc:"recurringAmount,omitempty"` + + // The tax amount on the recurring fees of the prices of this container mulitiplied by the applicable quantity of this container. + RecurringTax *Float64 `json:"recurringTax,omitempty" xmlrpc:"recurringTax,omitempty"` +} + +// This is the datatype that needs to be populated and sent to SoftLayer_Product_Order::placeOrder. This datatype has everything required to place an order with SoftLayer. +type Container_Product_Order struct { + Entity + + // Flag for identifying an order for Big Data Deployment. + BigDataOrderFlag *bool `json:"bigDataOrderFlag,omitempty" xmlrpc:"bigDataOrderFlag,omitempty"` + + // Billing Information associated with an order. For existing customers this information is completely ignored. Do not send this information for existing customers. + BillingInformation *Container_Product_Order_Billing_Information `json:"billingInformation,omitempty" xmlrpc:"billingInformation,omitempty"` + + // This is the ID of the [[SoftLayer_Billing_Order_Item]] of this configuration/container. It is used for rebuilding an order container from a quote and is set automatically. + BillingOrderItemId *int `json:"billingOrderItemId,omitempty" xmlrpc:"billingOrderItemId,omitempty"` + + // The URL to which PayPal redirects browser after checkout has been canceled before completion of a payment. + CancelUrl *string `json:"cancelUrl,omitempty" xmlrpc:"cancelUrl,omitempty"` + + // Added by Gopherlayer. This hints to the API what kind of product order this is. + ComplexType *string `json:"complexType,omitempty" xmlrpc:"complexType,omitempty"` + + // User-specified description to identify a particular order container. This is useful if you have a multi-configuration order (multiple orderContainers) and you want to be able to easily determine one from another. Populating this value may be helpful if an exception is thrown when placing an order and it's tied to a specific order container. + ContainerIdentifier *string `json:"containerIdentifier,omitempty" xmlrpc:"containerIdentifier,omitempty"` + + // This hash is internally-generated and is used to for tracking order containers. + ContainerSplHash *string `json:"containerSplHash,omitempty" xmlrpc:"containerSplHash,omitempty"` + + // The currency type chosen at checkout. + CurrencyShortName *string `json:"currencyShortName,omitempty" xmlrpc:"currencyShortName,omitempty"` + + // Device Fingerprint Identifier - Optional. + DeviceFingerprintId *string `json:"deviceFingerprintId,omitempty" xmlrpc:"deviceFingerprintId,omitempty"` + + // This is the configuration identifier for tracking orders on the HTML order forms. + DisplayLayerSessionId *string `json:"displayLayerSessionId,omitempty" xmlrpc:"displayLayerSessionId,omitempty"` + + // no documentation yet + ExtendedHardwareTesting *bool `json:"extendedHardwareTesting,omitempty" xmlrpc:"extendedHardwareTesting,omitempty"` + + // The [[SoftLayer_Product_Item_Price]] for the Flexible Credit Program discount. The oneTimeFee field contains the calculated discount being applied to the order. + FlexibleCreditProgramPrice *Product_Item_Price `json:"flexibleCreditProgramPrice,omitempty" xmlrpc:"flexibleCreditProgramPrice,omitempty"` + + // For orders that contain servers (bare metal, virtual server, big data, etc.), the hardware property is required. This property is an array of [[SoftLayer_Hardware]] objects. The hostname and domain properties are required for each hardware object. Note that virtual server ([[SoftLayer_Container_Product_Order_Virtual_Guest]]) orders may populate this field instead of the virtualGuests property. + Hardware []Hardware `json:"hardware,omitempty" xmlrpc:"hardware,omitempty"` + + // An optional virtual disk image template identifier to be used as an installation base for a computing instance order + ImageTemplateGlobalIdentifier *string `json:"imageTemplateGlobalIdentifier,omitempty" xmlrpc:"imageTemplateGlobalIdentifier,omitempty"` + + // An optional virtual disk image template identifier to be used as an installation base for a computing instance order + ImageTemplateId *int `json:"imageTemplateId,omitempty" xmlrpc:"imageTemplateId,omitempty"` + + // Flag to identify a "managed" order. This value is set internally. + IsManagedOrder *int `json:"isManagedOrder,omitempty" xmlrpc:"isManagedOrder,omitempty"` + + // The collection of [[SoftLayer_Container_Product_Item_Category_Question_Answer]] for any product category that has additional questions requiring user input. + ItemCategoryQuestionAnswers []Container_Product_Item_Category_Question_Answer `json:"itemCategoryQuestionAnswers,omitempty" xmlrpc:"itemCategoryQuestionAnswers,omitempty"` + + // The [[SoftLayer_Location_Region]] keyname or specific [[SoftLayer_Location_Datacenter]] id where the order should be provisioned. If this value is provided and the regionalGroup property is also specified, an exception will be thrown indicating that only 1 is allowed. + Location *string `json:"location,omitempty" xmlrpc:"location,omitempty"` + + // This [[SoftLayer_Location]] object will be determined from the location property and will be returned in the order verification or placement response. Any value specified here will get overwritten by the verification process. + LocationObject *Location `json:"locationObject,omitempty" xmlrpc:"locationObject,omitempty"` + + // A generic message about the order. Does not need to be sent in with any orders. + Message *string `json:"message,omitempty" xmlrpc:"message,omitempty"` + + // Orders may contain an array of configurations. Populating this property allows you to purchase multiple configurations within a single order. Each order container will have its own individual settings independent of the other order containers. For example, it is possible to order a bare metal server in one configuration and a virtual server in another. + // + // If orderContainers is populated on the base order container, most of the configuration-specific properties are ignored on the base container. For example, prices, location and packageId will be ignored on the base container, but since the billingInformation is a property that's not specific to a single order container (but the order as a whole) it must be populated on the base container. + OrderContainers []Container_Product_Order `json:"orderContainers,omitempty" xmlrpc:"orderContainers,omitempty"` + + // This is deprecated and does not do anything. + OrderHostnames []string `json:"orderHostnames,omitempty" xmlrpc:"orderHostnames,omitempty"` + + // Collection of exceptions resulting from the verification of the order. This value is set internally and is not required for end users when placing an order. When placing API orders, users can use this value to determine the container-specific exception that was thrown. + OrderVerificationExceptions []Container_Exception `json:"orderVerificationExceptions,omitempty" xmlrpc:"orderVerificationExceptions,omitempty"` + + // The [[SoftLayer_Product_Package]] id for an order container. This is required to place an order. + PackageId *int `json:"packageId,omitempty" xmlrpc:"packageId,omitempty"` + + // The Payment Type is Optional. If nothing is sent in, then the normal method of payment will be used. For paypal customers, this means a paypalToken will be returned in the receipt. This token is to be used on the paypal website to complete the order. For Credit Card customers, the card on file in our system will be used to make an initial authorization. To force the order to use a payment type, use one of the following: CARD_ON_FILE or PAYPAL + PaymentType *string `json:"paymentType,omitempty" xmlrpc:"paymentType,omitempty"` + + // The post-tax recurring charge for the order. This is the sum of preTaxRecurring + totalRecurringTax. + PostTaxRecurring *Float64 `json:"postTaxRecurring,omitempty" xmlrpc:"postTaxRecurring,omitempty"` + + // The post-tax recurring hourly charge for the order. Since taxes are not calculated for hourly orders, this value will be the same as preTaxRecurringHourly. + PostTaxRecurringHourly *Float64 `json:"postTaxRecurringHourly,omitempty" xmlrpc:"postTaxRecurringHourly,omitempty"` + + // The post-tax recurring monthly charge for the order. This is the sum of preTaxRecurringMonthly + totalRecurringTax. + PostTaxRecurringMonthly *Float64 `json:"postTaxRecurringMonthly,omitempty" xmlrpc:"postTaxRecurringMonthly,omitempty"` + + // The post-tax setup fees of the order. This is the sum of preTaxSetup + totalSetupTax; + PostTaxSetup *Float64 `json:"postTaxSetup,omitempty" xmlrpc:"postTaxSetup,omitempty"` + + // The pre-tax recurring total of the order. If there are mixed monthly and hourly prices on the order, this will be the sum of preTaxRecurringHourly and preTaxRecurringMonthly. + PreTaxRecurring *Float64 `json:"preTaxRecurring,omitempty" xmlrpc:"preTaxRecurring,omitempty"` + + // The pre-tax hourly recurring total of the order. If there are only monthly prices on the order, this value will be 0. + PreTaxRecurringHourly *Float64 `json:"preTaxRecurringHourly,omitempty" xmlrpc:"preTaxRecurringHourly,omitempty"` + + // The pre-tax monthly recurring total of the order. If there are only hourly prices on the order, this value will be 0. + PreTaxRecurringMonthly *Float64 `json:"preTaxRecurringMonthly,omitempty" xmlrpc:"preTaxRecurringMonthly,omitempty"` + + // The pre-tax setup fee total of the order. + PreTaxSetup *Float64 `json:"preTaxSetup,omitempty" xmlrpc:"preTaxSetup,omitempty"` + + // If there are any presale events available for an order, this value will be populated. It is set internally and is not required for end users when placing an order. See [[SoftLayer_Sales_Presale_Event]] for more info. + PresaleEvent *Sales_Presale_Event `json:"presaleEvent,omitempty" xmlrpc:"presaleEvent,omitempty"` + + // A preset configuration id for the package. Is required if not submitting any prices. + PresetId *int `json:"presetId,omitempty" xmlrpc:"presetId,omitempty"` + + // This is a collection of [[SoftLayer_Product_Item_Price]] objects. The only required property to populate for an item price object when ordering is its id - all other supplied information about the price (e.g., recurringFee, setupFee, etc.) will be ignored. Unless the [[SoftLayer_Product_Package]] associated with the order allows for preset prices, this property is required to place an order. + Prices []Product_Item_Price `json:"prices,omitempty" xmlrpc:"prices,omitempty"` + + // The id of a [[SoftLayer_Hardware_Component_Partition_Template]]. This property is optional. If no partition template is provided, a default will be used according to the operating system chosen with the order. Using the [[SoftLayer_Hardware_Component_Partition_OperatingSystem]] service, getPartitionTemplates will return those available for the particular operating system. + PrimaryDiskPartitionId *int `json:"primaryDiskPartitionId,omitempty" xmlrpc:"primaryDiskPartitionId,omitempty"` + + // Priorities to set on replication set servers. + Priorities []string `json:"priorities,omitempty" xmlrpc:"priorities,omitempty"` + + // Flag for identifying a container as Virtual Server (Private Node). + PrivateCloudOrderFlag *bool `json:"privateCloudOrderFlag,omitempty" xmlrpc:"privateCloudOrderFlag,omitempty"` + + // Type of Virtual Server (Private Node) order. Potential values: INITIAL, ADDHOST, ADDIPS, ADDZONE + PrivateCloudOrderType *string `json:"privateCloudOrderType,omitempty" xmlrpc:"privateCloudOrderType,omitempty"` + + // Optional promotion code for an order. + PromotionCode *string `json:"promotionCode,omitempty" xmlrpc:"promotionCode,omitempty"` + + // Generic properties. + Properties []Container_Product_Order_Property `json:"properties,omitempty" xmlrpc:"properties,omitempty"` + + // The Prorated Initial Charge plus the balance on the account. Only the recurring fees are prorated. Here's how the calculation works: We take the postTaxRecurring value and we prorate it based on the time between now and the next bill date for this account. After this, we add in the setup fee since this is not prorated. Then, if there is a balance on the account, we add that to the account. In the event that there is a credit balance on the account, we will subtract this amount from the order total. If the credit balance on the account is greater than the prorated initial charge, the order will go through without a charge to the credit card on the account or requiring a paypal payment. The credit on the account will be reduced by the order total, and the order will await approval from sales, as normal. If there is a pending order already in the system, We will ignore the balance on the account completely, in the calculation of the initial charge. This is to protect against two orders coming into the system and getting the benefit of a credit balance, or worse, both orders being charged the order amount + the balance on the account. + ProratedInitialCharge *Float64 `json:"proratedInitialCharge,omitempty" xmlrpc:"proratedInitialCharge,omitempty"` + + // This is the same as the proratedInitialCharge, except the balance on the account is ignored. This is the prorated total amount of the order. + ProratedOrderTotal *Float64 `json:"proratedOrderTotal,omitempty" xmlrpc:"proratedOrderTotal,omitempty"` + + // The URLs for scripts to execute on their respective servers after they have been provisioned. Provision scripts are not available for Microsoft Windows servers. + ProvisionScripts []string `json:"provisionScripts,omitempty" xmlrpc:"provisionScripts,omitempty"` + + // The quantity of the item being ordered + Quantity *int `json:"quantity,omitempty" xmlrpc:"quantity,omitempty"` + + // A custom name to be assigned to the quote. + QuoteName *string `json:"quoteName,omitempty" xmlrpc:"quoteName,omitempty"` + + // Specifying a regional group name allows you to not worry about placing your server or service at a specific datacenter, but to any datacenter within that regional group. See [[SoftLayer_Location_Group_Regional]] to get a list of available regional group names. + // + // location and regionalGroup are mutually exclusive on an order container. If both location and regionalGroup are provided, an exception will be thrown indicating that only 1 is allowed. + // + // If a regional group is provided and VLANs are specified (within the hardware or virtualGuests properties), we will use the datacenter where the VLANs are located. If no VLANs are specified, we will use the preferred datacenter on the regional group object. + RegionalGroup *string `json:"regionalGroup,omitempty" xmlrpc:"regionalGroup,omitempty"` + + // An optional resource group identifier specifying the resource group to attach the order to + ResourceGroupId *int `json:"resourceGroupId,omitempty" xmlrpc:"resourceGroupId,omitempty"` + + // This variable specifies the name of the resource group the server configuration belongs to. For MongoDB Replica sets, it would be the replica set name. + ResourceGroupName *string `json:"resourceGroupName,omitempty" xmlrpc:"resourceGroupName,omitempty"` + + // An optional resource group template identifier to be used as a deployment base for a Virtual Server (Private Node) order. + ResourceGroupTemplateId *int `json:"resourceGroupTemplateId,omitempty" xmlrpc:"resourceGroupTemplateId,omitempty"` + + // The URL to which PayPal redirects browser after a payment is completed. + ReturnUrl *string `json:"returnUrl,omitempty" xmlrpc:"returnUrl,omitempty"` + + // This flag indicates that the quote should be sent to the email address associated with the account or order. + SendQuoteEmailFlag *bool `json:"sendQuoteEmailFlag,omitempty" xmlrpc:"sendQuoteEmailFlag,omitempty"` + + // The number of cores for the server being ordered. This value is set internally. + ServerCoreCount *int `json:"serverCoreCount,omitempty" xmlrpc:"serverCoreCount,omitempty"` + + // The token of a requesting service. Do not set. + ServiceToken *string `json:"serviceToken,omitempty" xmlrpc:"serviceToken,omitempty"` + + // An optional computing instance identifier to be used as an installation base for a computing instance order + SourceVirtualGuestId *int `json:"sourceVirtualGuestId,omitempty" xmlrpc:"sourceVirtualGuestId,omitempty"` + + // The containers which hold SoftLayer_Security_Ssh_Key IDs to add to their respective servers. The order of containers passed in needs to match the order they are assigned to either hardware or virtualGuests. SSH Keys will not be assigned for servers with Microsoft Windows. + SshKeys []Container_Product_Order_SshKeys `json:"sshKeys,omitempty" xmlrpc:"sshKeys,omitempty"` + + // An optional parameter for step-based order processing. + StepId *int `json:"stepId,omitempty" xmlrpc:"stepId,omitempty"` + + // + // + // For orders that want to add storage groups such as RAID across multiple disks, simply add [[SoftLayer_Container_Product_Order_Storage_Group]] objects to this array. Storage groups will only be used if the 'RAID' disk controller price is selected. Any other disk controller types will ignore the storage groups set here. + // + // The first storage group in this array will be considered the primary storage group, which is used for the OS. Any other storage groups will act as data storage. + // + // + StorageGroups []Container_Product_Order_Storage_Group `json:"storageGroups,omitempty" xmlrpc:"storageGroups,omitempty"` + + // The order container may not contain the final tax rates when it is returned from [[SoftLayer_Product_Order/verifyOrder|verifyOrder]]. This hash will facilitate checking if the tax rates have finished being calculated and retrieving the accurate tax rate values. + TaxCacheHash *string `json:"taxCacheHash,omitempty" xmlrpc:"taxCacheHash,omitempty"` + + // Flag to indicate if the order container has the final tax rates for the order. Some tax rates are calculated in the background because they take longer, and they might not be finished when the container is returned from [[SoftLayer_Product_Order/verifyOrder|verifyOrder]]. + TaxCompletedFlag *bool `json:"taxCompletedFlag,omitempty" xmlrpc:"taxCompletedFlag,omitempty"` + + // The SoftLayer_Product_Item_Price for the Tech Incubator discount. The oneTimeFee field contain the calculated discount being applied to the order. + TechIncubatorItemPrice *Product_Item_Price `json:"techIncubatorItemPrice,omitempty" xmlrpc:"techIncubatorItemPrice,omitempty"` + + // The total tax portion of the recurring fees. + TotalRecurringTax *Float64 `json:"totalRecurringTax,omitempty" xmlrpc:"totalRecurringTax,omitempty"` + + // The tax amount of the setup fees. + TotalSetupTax *Float64 `json:"totalSetupTax,omitempty" xmlrpc:"totalSetupTax,omitempty"` + + // An optional flag to use hourly pricing instead of standard monthly pricing. + UseHourlyPricing *bool `json:"useHourlyPricing,omitempty" xmlrpc:"useHourlyPricing,omitempty"` + + // For virtual guest (virtual server) orders, this property is required if you did not specify data in the hardware property. This is an array of [[SoftLayer_Virtual_Guest]] objects. The hostname and domain properties are required for each virtual guest object. There is no need to specify data in this property and the hardware property - only one is required for virtual server orders. + VirtualGuests []Virtual_Guest `json:"virtualGuests,omitempty" xmlrpc:"virtualGuests,omitempty"` +} + +// This datatype is to be used for data transfer requests. +type Container_Product_Order_Account_Media_Data_Transfer_Request struct { + Container_Product_Order + + // An instance of [[SoftLayer_Account_Media_Data_Transfer_Request]] + Request *Account_Media_Data_Transfer_Request `json:"request,omitempty" xmlrpc:"request,omitempty"` +} + +// This is the datatype that needs to be populated and sent to SoftLayer_Product_Order::placeOrder. The SoftLayer_Container_Product_Order_Attribute_Address datatype contains the address information. +type Container_Product_Order_Attribute_Address struct { + Entity + + // The physical street address. + AddressLine1 *string `json:"addressLine1,omitempty" xmlrpc:"addressLine1,omitempty"` + + // The second line in the address. Information such as suite number goes here. + AddressLine2 *string `json:"addressLine2,omitempty" xmlrpc:"addressLine2,omitempty"` + + // The city name + City *string `json:"city,omitempty" xmlrpc:"city,omitempty"` + + // The 2-character Country code. (i.e. US) + CountryCode *string `json:"countryCode,omitempty" xmlrpc:"countryCode,omitempty"` + + // The non US/Canadian state or region. + NonUsState *string `json:"nonUsState,omitempty" xmlrpc:"nonUsState,omitempty"` + + // The Zip or Postal Code. + PostalCode *string `json:"postalCode,omitempty" xmlrpc:"postalCode,omitempty"` + + // The state or region. + State *string `json:"state,omitempty" xmlrpc:"state,omitempty"` +} + +// This is the datatype that needs to be populated and sent to SoftLayer_Product_Order::placeOrder. The SoftLayer_Container_Product_Order_Attribute_Contact datatype contains the contact information. +type Container_Product_Order_Attribute_Contact struct { + Entity + + // The address information of the contact. + Address *Container_Product_Order_Attribute_Address `json:"address,omitempty" xmlrpc:"address,omitempty"` + + // The email address of the contact. + EmailAddress *string `json:"emailAddress,omitempty" xmlrpc:"emailAddress,omitempty"` + + // The fax number associated with a contact. This is an optional value. + FaxNumber *string `json:"faxNumber,omitempty" xmlrpc:"faxNumber,omitempty"` + + // The first name of the contact. + FirstName *string `json:"firstName,omitempty" xmlrpc:"firstName,omitempty"` + + // The last name of the contact. + LastName *string `json:"lastName,omitempty" xmlrpc:"lastName,omitempty"` + + // The organization name of the contact. + OrganizationName *string `json:"organizationName,omitempty" xmlrpc:"organizationName,omitempty"` + + // The phone number associated with a contact. + PhoneNumber *string `json:"phoneNumber,omitempty" xmlrpc:"phoneNumber,omitempty"` + + // The title of the contact. + Title *string `json:"title,omitempty" xmlrpc:"title,omitempty"` +} + +// This is the datatype that needs to be populated and sent to SoftLayer_Product_Order::placeOrder. The SoftLayer_Container_Product_Order_Attribute_Organization datatype contains the organization information. +type Container_Product_Order_Attribute_Organization struct { + Entity + + // The address information of the contact. + Address *Container_Product_Order_Attribute_Address `json:"address,omitempty" xmlrpc:"address,omitempty"` + + // The fax number associated with an organization. This is an optional value. + FaxNumber *string `json:"faxNumber,omitempty" xmlrpc:"faxNumber,omitempty"` + + // The name of an organization. + OrganizationName *string `json:"organizationName,omitempty" xmlrpc:"organizationName,omitempty"` + + // The phone number associated with an organization. + PhoneNumber *string `json:"phoneNumber,omitempty" xmlrpc:"phoneNumber,omitempty"` +} + +// This is the datatype that needs to be populated and sent to SoftLayer_Product_Order::placeOrder. This datatype has everything required to place an order with SoftLayer. +type Container_Product_Order_Billing_Information struct { + Entity + + // The physical street address. Reserve information such as "apartment #123" or "Suite 2" for line 1. + BillingAddressLine1 *string `json:"billingAddressLine1,omitempty" xmlrpc:"billingAddressLine1,omitempty"` + + // The second line in the address. Information such as suite number goes here. + BillingAddressLine2 *string `json:"billingAddressLine2,omitempty" xmlrpc:"billingAddressLine2,omitempty"` + + // The city in which a customer's account resides. + BillingCity *string `json:"billingCity,omitempty" xmlrpc:"billingCity,omitempty"` + + // The 2-character Country code for an account's address. (i.e. US) + BillingCountryCode *string `json:"billingCountryCode,omitempty" xmlrpc:"billingCountryCode,omitempty"` + + // The email address associated with a customer account. + BillingEmail *string `json:"billingEmail,omitempty" xmlrpc:"billingEmail,omitempty"` + + // the company name for an account. + BillingNameCompany *string `json:"billingNameCompany,omitempty" xmlrpc:"billingNameCompany,omitempty"` + + // The first name of the customer account owner. + BillingNameFirst *string `json:"billingNameFirst,omitempty" xmlrpc:"billingNameFirst,omitempty"` + + // The last name of the customer account owner + BillingNameLast *string `json:"billingNameLast,omitempty" xmlrpc:"billingNameLast,omitempty"` + + // The fax number associated with a customer account. + BillingPhoneFax *string `json:"billingPhoneFax,omitempty" xmlrpc:"billingPhoneFax,omitempty"` + + // The phone number associated with a customer account. + BillingPhoneVoice *string `json:"billingPhoneVoice,omitempty" xmlrpc:"billingPhoneVoice,omitempty"` + + // The Zip or Postal Code for the billing address on an account. + BillingPostalCode *string `json:"billingPostalCode,omitempty" xmlrpc:"billingPostalCode,omitempty"` + + // The State for the account. + BillingState *string `json:"billingState,omitempty" xmlrpc:"billingState,omitempty"` + + // The credit card number to use. + CardAccountNumber *string `json:"cardAccountNumber,omitempty" xmlrpc:"cardAccountNumber,omitempty"` + + // The payment card expiration month + CardExpirationMonth *int `json:"cardExpirationMonth,omitempty" xmlrpc:"cardExpirationMonth,omitempty"` + + // The payment card expiration year + CardExpirationYear *int `json:"cardExpirationYear,omitempty" xmlrpc:"cardExpirationYear,omitempty"` + + // The Card Verification Value Code (CVV) number + CreditCardVerificationNumber *string `json:"creditCardVerificationNumber,omitempty" xmlrpc:"creditCardVerificationNumber,omitempty"` + + // Tax exempt status. 1 = exempt (not taxable), 0 = not exempt (taxable) + TaxExempt *int `json:"taxExempt,omitempty" xmlrpc:"taxExempt,omitempty"` + + // The VAT ID entered at checkout + VatId *string `json:"vatId,omitempty" xmlrpc:"vatId,omitempty"` +} + +// This is the datatype that needs to be populated and sent to SoftLayer_Product_Order::placeOrder. The SoftLayer_Container_Product_Order_Dns_Domain_Registration datatype contains everything required to place a domain registration order with SoftLayer. +type Container_Product_Order_Dns_Domain_Registration struct { + Container_Product_Order + + // Administrative contact information associated with an registraton or transfer. This is required if registration type is 'new' or 'transfer'. + AdministrativeContact *Container_Dns_Domain_Registration_Contact `json:"administrativeContact,omitempty" xmlrpc:"administrativeContact,omitempty"` + + // Billing contact information associated with an registraton or transfer. This is required if registration type is 'new' or 'transfer'. + BillingContact *Container_Dns_Domain_Registration_Contact `json:"billingContact,omitempty" xmlrpc:"billingContact,omitempty"` + + // The list of domains to be registered. This is required if registration type is 'new', 'renew', or 'transfer'. + DomainRegistrationList []Container_Dns_Domain_Registration_List `json:"domainRegistrationList,omitempty" xmlrpc:"domainRegistrationList,omitempty"` + + // Owner contact information associated with an registraton or transfer. This is required if registration type is 'new' or 'transfer'. + OwnerContact *Container_Dns_Domain_Registration_Contact `json:"ownerContact,omitempty" xmlrpc:"ownerContact,omitempty"` + + // The type of a domain registration order. The registration type is Required. Allowed values are new, transfer, and renew + RegistrationType *string `json:"registrationType,omitempty" xmlrpc:"registrationType,omitempty"` + + // Technical contact information associated with an registraton or transfer. This is required if registration type is 'new' or 'transfer'. + TechnicalContact *Container_Dns_Domain_Registration_Contact `json:"technicalContact,omitempty" xmlrpc:"technicalContact,omitempty"` +} + +// This is the datatype that needs to be populated and sent to SoftLayer_Product_Order::placeOrder. The SoftLayer_Container_Product_Order_Dns_Domain_Reseller datatype contains everything required to place a domain reseller credit order with SoftLayer. +type Container_Product_Order_Dns_Domain_Reseller struct { + Container_Product_Order + + // Amount to be credited to the domain reseller account. + CreditAmount *Float64 `json:"creditAmount,omitempty" xmlrpc:"creditAmount,omitempty"` +} + +// This is the datatype that needs to be populated and sent to SoftLayer_Product_Order::placeOrder. This datatype has everything required to place a Gateway Appliance Cluster order with SoftLayer. +type Container_Product_Order_Gateway_Appliance_Cluster struct { + Container_Product_Order + + // Used to identify which items on an order belong in the same cluster. + ClusterIdentifier *string `json:"clusterIdentifier,omitempty" xmlrpc:"clusterIdentifier,omitempty"` + + // Indicates what type of cluster order is being placed (HA, Provision). + ClusterOrderType *string `json:"clusterOrderType,omitempty" xmlrpc:"clusterOrderType,omitempty"` +} + +// This is the datatype that needs to be populated and sent to SoftLayer_Product_Order::placeOrder. This datatype has everything required to place a hardware security module order with SoftLayer. +type Container_Product_Order_Hardware_Security_Module struct { + Container_Product_Order_Hardware_Server +} + +// This is the datatype that needs to be populated and sent to SoftLayer_Product_Order::placeOrder. This datatype has everything required to place an order with SoftLayer. +type Container_Product_Order_Hardware_Server struct { + Container_Product_Order + + // Used to identify which items on an order belong in the same cluster. + ClusterIdentifier *string `json:"clusterIdentifier,omitempty" xmlrpc:"clusterIdentifier,omitempty"` + + // Indicates what type of cluster order is being placed (HA, Provision). + ClusterOrderType *string `json:"clusterOrderType,omitempty" xmlrpc:"clusterOrderType,omitempty"` + + // Used to identify which gateway is being upgraded to HA. + ClusterResourceId *int `json:"clusterResourceId,omitempty" xmlrpc:"clusterResourceId,omitempty"` + + // Id of the [[SoftLayer_Monitoring_Agent_Configuration_Template_Group]] to be used with the monitoring package + MonitoringAgentConfigurationTemplateGroupId *int `json:"monitoringAgentConfigurationTemplateGroupId,omitempty" xmlrpc:"monitoringAgentConfigurationTemplateGroupId,omitempty"` + + // When ordering Virtual Server (Private Node), this variable specifies the role of the server configuration. (Deprecated) + PrivateCloudServerRole *string `json:"privateCloudServerRole,omitempty" xmlrpc:"privateCloudServerRole,omitempty"` + + // Used to identify which device the new server should be attached to. + RequiredUpstreamDeviceId *int `json:"requiredUpstreamDeviceId,omitempty" xmlrpc:"requiredUpstreamDeviceId,omitempty"` + + // tags (used in MongoDB deployments). (Deprecated) + Tags []Container_Product_Order_Property `json:"tags,omitempty" xmlrpc:"tags,omitempty"` +} + +// This is the datatype that needs to be populated and sent to SoftLayer_Product_Order::placeOrder. This datatype has everything required to place an order with SoftLayer. +type Container_Product_Order_Hardware_Server_Colocation struct { + Container_Product_Order_Hardware_Server +} + +// This is the datatype that needs to be populated and sent to SoftLayer_Product_Order::placeOrder. This datatype has everything required to place a Gateway Appliance order. +type Container_Product_Order_Hardware_Server_Gateway_Appliance struct { + Container_Product_Order_Hardware_Server +} + +// This is the datatype that needs to be populated and sent to SoftLayer_Product_Order::placeOrder. This datatype has everything required to place an order with SoftLayer. +type Container_Product_Order_Hardware_Server_Upgrade struct { + Container_Product_Order_Hardware_Server +} + +// This is the datatype that needs to be populated and sent to SoftLayer_Product_Order::placeOrder. This datatype has everything required to place a Monitoring Package order with SoftLayer. +type Container_Product_Order_Monitoring_Package struct { + Container_Product_Order + + // no documentation yet + ConfigurationTemplateGroups []Monitoring_Agent_Configuration_Template_Group `json:"configurationTemplateGroups,omitempty" xmlrpc:"configurationTemplateGroups,omitempty"` + + // no documentation yet + ServerType *string `json:"serverType,omitempty" xmlrpc:"serverType,omitempty"` +} + +// This is a datatype used with multi-configuration deployments. Multi-configuration deployments also have a deployment specific datatype that should be used in lieu of this one. +type Container_Product_Order_MultiConfiguration struct { + Container_Product_Order +} + +// no documentation yet +type Container_Product_Order_MultiConfiguration_Tornado struct { + Container_Product_Order_MultiConfiguration +} + +// This type contains the structure of network-related objects that may be specified when ordering services. +type Container_Product_Order_Network struct { + Entity + + // The [[SoftLayer_Network]] object. + Network *Network `json:"network,omitempty" xmlrpc:"network,omitempty"` + + // The list of public [[SoftLayer_Container_Product_Order_Network_Vlan|vlans]] available for ordering. Each VLAN will have list of public subnets that are accessible to the VLAN. + PublicVlans []Container_Product_Order `json:"publicVlans,omitempty" xmlrpc:"publicVlans,omitempty"` + + // The list of private [[SoftLayer_Container_Product_Order_Network_Subnet|subnets]] available for ordering with a description of their available IP space. + Subnets []Container_Product_Order `json:"subnets,omitempty" xmlrpc:"subnets,omitempty"` +} + +// This is the datatype that needs to be populated and sent to SoftLayer_Product_Order::placeOrder. This datatype has everything required to place an application delivery controller order with SoftLayer. +type Container_Product_Order_Network_Application_Delivery_Controller struct { + Container_Product_Order + + // An optional [[SoftLayer_Network_Application_Delivery_Controller]] identifier that is used for upgrading an existing application delivery controller. + ApplicationDeliveryControllerId *int `json:"applicationDeliveryControllerId,omitempty" xmlrpc:"applicationDeliveryControllerId,omitempty"` +} + +// This is the datatype that needs to be populated and sent to SoftLayer_Product_Order::placeOrder. This datatype has everything required to place a CDN order with SoftLayer. +type Container_Product_Order_Network_ContentDelivery_Account struct { + Container_Product_Order + + // The CDN account name + CdnAccountName *string `json:"cdnAccountName,omitempty" xmlrpc:"cdnAccountName,omitempty"` +} + +// This is the datatype that needs to be populated and sent to SoftLayer_Product_Order::placeOrder. This datatype has everything required to place a CDN order with SoftLayer. +type Container_Product_Order_Network_ContentDelivery_Account_Upgrade struct { + Container_Product_Order + + // ID of an existing CDN account. You can use this to upgrade an existing CDN account. + CdnAccountId *string `json:"cdnAccountId,omitempty" xmlrpc:"cdnAccountId,omitempty"` +} + +// This is the default container type for network load balancer orders. +type Container_Product_Order_Network_LoadBalancer struct { + Container_Product_Order +} + +// This is the datatype that needs to be populated and sent to SoftLayer_Product_Order::placeOrder. This datatype has everything required to place an order for a Load Balancer as a Service. +type Container_Product_Order_Network_LoadBalancer_AsAService struct { + Container_Product_Order + + // A description of this Load Balancer. + Description *string `json:"description,omitempty" xmlrpc:"description,omitempty"` + + // A name to identify this Load Balancer. + Name *string `json:"name,omitempty" xmlrpc:"name,omitempty"` + + // The [[SoftLayer_Network_LBaaS_LoadBalancerProtocolConfiguration]]s for this Load Balancer. + ProtocolConfigurations []Network_LBaaS_LoadBalancerProtocolConfiguration `json:"protocolConfigurations,omitempty" xmlrpc:"protocolConfigurations,omitempty"` + + // The [[SoftLayer_Network_LBaaS_LoadBalancerServerInstanceInfo]]s for this Load Balancer. + ServerInstancesInformation []Network_LBaaS_LoadBalancerServerInstanceInfo `json:"serverInstancesInformation,omitempty" xmlrpc:"serverInstancesInformation,omitempty"` + + // The [[SoftLayer_Network_Subnet]]s where this Load Balancer will be provisioned. + Subnets []Network_Subnet `json:"subnets,omitempty" xmlrpc:"subnets,omitempty"` +} + +// This is the datatype that needs to be populated and sent to SoftLayer_Product_Order::placeOrder. This datatype has everything required to place a global load balancer order with SoftLayer. +type Container_Product_Order_Network_LoadBalancer_Global struct { + Container_Product_Order + + // The domain name that will be load balanced. + Domain *string `json:"domain,omitempty" xmlrpc:"domain,omitempty"` + + // The hostname that will be load balanced. + Hostname *string `json:"hostname,omitempty" xmlrpc:"hostname,omitempty"` +} + +// This is the datatype that needs to be populated and sent to SoftLayer_Product_Order::placeOrder. This datatype has everything required to place a network message delivery order with SoftLayer. +type Container_Product_Order_Network_Message_Delivery struct { + Container_Product_Order + + // The account password for SendGrid enrollment. + AccountPassword *string `json:"accountPassword,omitempty" xmlrpc:"accountPassword,omitempty"` + + // The username for SendGrid enrollment. + AccountUsername *string `json:"accountUsername,omitempty" xmlrpc:"accountUsername,omitempty"` + + // The email address for SendGrid enrollment. + EmailAddress *string `json:"emailAddress,omitempty" xmlrpc:"emailAddress,omitempty"` +} + +// This is the datatype that needs to be populated and sent to SoftLayer_Product_Order::placeOrder. This datatype has everything required to place a Message Queue order with SoftLayer. +type Container_Product_Order_Network_Message_Queue struct { + Container_Product_Order +} + +// This is the base data type for Performance storage order containers. If you wish to place an order you must not use this class and instead use the appropriate child container for the type of storage you would like to order: [[SoftLayer_Container_Product_Order_Network_PerformanceStorage_Nfs]] for File and [[SoftLayer_Container_Product_Order_Network_PerformanceStorage_Iscsi]] for Block storage. +type Container_Product_Order_Network_PerformanceStorage struct { + Container_Product_Order +} + +// This is the datatype that needs to be populated and sent to SoftLayer_Product_Order::placeOrder. This datatype has everything required to place an order for iSCSI (Block) Performance Storage +type Container_Product_Order_Network_PerformanceStorage_Iscsi struct { + Container_Product_Order_Network_PerformanceStorage + + // OS Type to be used when formatting the storage space, this should match the OS type that will be connecting to the LUN. The only required property its the keyName of the OS type. + OsFormatType *Network_Storage_Iscsi_OS_Type `json:"osFormatType,omitempty" xmlrpc:"osFormatType,omitempty"` +} + +// This is the datatype that needs to be populated and sent to SoftLayer_Product_Order::placeOrder. This datatype has everything required to place an order for NFS (File) Performance Storage +type Container_Product_Order_Network_PerformanceStorage_Nfs struct { + Container_Product_Order_Network_PerformanceStorage +} + +// This is the datatype that needs to be populated and sent to SoftLayer_Product_Order::placeOrder. This datatype has everything required to place a hardware firewall order with SoftLayer. +type Container_Product_Order_Network_Protection_Firewall struct { + Container_Product_Order +} + +// This is the datatype that needs to be populated and sent to SoftLayer_Product_Order::placeOrder. This datatype has everything required to place a hardware (dedicated) firewall order with SoftLayer. +type Container_Product_Order_Network_Protection_Firewall_Dedicated struct { + Container_Product_Order + + // generic properties. + Vlan *Network_Vlan `json:"vlan,omitempty" xmlrpc:"vlan,omitempty"` + + // generic properties. + VlanId *int `json:"vlanId,omitempty" xmlrpc:"vlanId,omitempty"` +} + +// This is the datatype that needs to be populated and sent to SoftLayer_Product_Order::placeOrder. This datatype has everything required to place an order for Storage as a Service. +type Container_Product_Order_Network_Storage_AsAService struct { + Container_Product_Order + + // This must be populated only for duplicating a specific snapshot for volume duplicating. It represents the identifier of the origin [[SoftLayer_Network_Storage_Snapshot]] + DuplicateOriginSnapshotId *int `json:"duplicateOriginSnapshotId,omitempty" xmlrpc:"duplicateOriginSnapshotId,omitempty"` + + // This must be populated only for duplicate volume ordering. It represents the identifier of the origin [[SoftLayer_Network_Storage]]. + DuplicateOriginVolumeId *int `json:"duplicateOriginVolumeId,omitempty" xmlrpc:"duplicateOriginVolumeId,omitempty"` + + // When ordering performance by IOPS, populate this property with how many. + Iops *int `json:"iops,omitempty" xmlrpc:"iops,omitempty"` + + // This must be populated only for replicant volume ordering. It represents the identifier of the origin [[SoftLayer_Network_Storage]]. + OriginVolumeId *int `json:"originVolumeId,omitempty" xmlrpc:"originVolumeId,omitempty"` + + // This must be populated only for replicant volume ordering. It represents the [[SoftLayer_Network_Storage_Schedule]] that will be be used to replicate the origin [[SoftLayer_Network_Storage]] volume. + OriginVolumeScheduleId *int `json:"originVolumeScheduleId,omitempty" xmlrpc:"originVolumeScheduleId,omitempty"` + + // This must be populated for block storage orders. This should match the OS type of the host(s) that will connect to the volume. The only required property is the keyName of the OS type. This property is ignored for file storage orders. + OsFormatType *Network_Storage_Iscsi_OS_Type `json:"osFormatType,omitempty" xmlrpc:"osFormatType,omitempty"` + + // Volume size in GB's. + VolumeSize *int `json:"volumeSize,omitempty" xmlrpc:"volumeSize,omitempty"` +} + +// This is the datatype that needs to be populated and sent to SoftLayer_Product_Order::placeOrder. This datatype has everything required to place an order for additional Evault plugins. +type Container_Product_Order_Network_Storage_Backup_Evault_Plugin struct { + Container_Product_Order +} + +// This is the datatype that needs to be populated and sent to SoftLayer_Product_Order::placeOrder. This datatype has everything required to place an Evault order with SoftLayer. +type Container_Product_Order_Network_Storage_Backup_Evault_Vault struct { + Container_Product_Order +} + +// This is the datatype that needs to be populated and sent to SoftLayer_Product_Order::placeOrder. This datatype has everything required to place an order for Enterprise Storage +type Container_Product_Order_Network_Storage_Enterprise struct { + Container_Product_Order + + // This must be populated only for replicant volume ordering. It represents the identifier of the origin [[SoftLayer_Network_Storage]]. + OriginVolumeId *int `json:"originVolumeId,omitempty" xmlrpc:"originVolumeId,omitempty"` + + // This must be populated only for replicant volume ordering. It represents the [[SoftLayer_Network_Storage_Schedule]] that will be be used to replicate the origin [[SoftLayer_Network_Storage]] volume. + OriginVolumeScheduleId *int `json:"originVolumeScheduleId,omitempty" xmlrpc:"originVolumeScheduleId,omitempty"` + + // This must be populated for block storage orders. This should match the OS type of the host(s) that will connect to the volume. The only required property is the keyName of the OS type. This property is ignored for file storage orders. + OsFormatType *Network_Storage_Iscsi_OS_Type `json:"osFormatType,omitempty" xmlrpc:"osFormatType,omitempty"` +} + +// This is the datatype that needs to be populated and sent to SoftLayer_Product_Order::placeOrder. This datatype has everything required to place an order for Enterprise Storage Snapshot Space. +type Container_Product_Order_Network_Storage_Enterprise_SnapshotSpace struct { + Container_Product_Order + + // The [[SoftLayer_Network_Storage]] id for which snapshot space is being ordered for. + VolumeId *int `json:"volumeId,omitempty" xmlrpc:"volumeId,omitempty"` +} + +// This is the datatype that needs to be populated and sent to SoftLayer_Product_Order::placeOrder. This datatype has everything required to place an upgrade order for Enterprise Storage Snapshot Space. +type Container_Product_Order_Network_Storage_Enterprise_SnapshotSpace_Upgrade struct { + Container_Product_Order_Network_Storage_Enterprise_SnapshotSpace +} + +// This datatype is to be used for object storage orders. +type Container_Product_Order_Network_Storage_Hub struct { + Container_Product_Order +} + +// This class is used to contain a datacenter location and its associated active usage rate prices for object storage ordering. +type Container_Product_Order_Network_Storage_Hub_Datacenter struct { + Entity + + // The datacenter location where object storage is available. + Location *Location `json:"location,omitempty" xmlrpc:"location,omitempty"` + + // The collection of active usage rate item prices. + UsageRatePrices []Product_Item_Price `json:"usageRatePrices,omitempty" xmlrpc:"usageRatePrices,omitempty"` +} + +// This is the datatype that needs to be populated and sent to SoftLayer_Product_Order::placeOrder. This datatype has everything required to place an ISCSI order with SoftLayer. +type Container_Product_Order_Network_Storage_Iscsi struct { + Container_Product_Order +} + +// This is the datatype that needs to be populated and sent to SoftLayer_Product_Order::placeOrder. This datatype has everything required to place an ISCSI Replication order with SoftLayer. +type Container_Product_Order_Network_Storage_Iscsi_Replication struct { + Container_Product_Order + + // the [[SoftLayer_Network_Storage_Iscsi_EqualLogic_Version3]] Id. + VolumeId *int `json:"volumeId,omitempty" xmlrpc:"volumeId,omitempty"` +} + +// This is the datatype that needs to be populated and sent to SoftLayer_Product_Order::placeOrder. This datatype has everything required to place an ISCSI Snapshot Space order with SoftLayer. +type Container_Product_Order_Network_Storage_Iscsi_SnapshotSpace struct { + Container_Product_Order + + // the [[SoftLayer_Network_Storage_Iscsi_EqualLogic_Version3]] Id. + VolumeId *int `json:"volumeId,omitempty" xmlrpc:"volumeId,omitempty"` +} + +// The SoftLayer_Container_Product_Order_Network_Storage_Modification datatype has everything required to place a modification to an existing StorageLayer account with SoftLayer. Modifications, at present time, include upgrade and downgrades only. The ''volumeId'' property must be set to the network storage volume id to be upgraded. Once populated send this container to the [[SoftLayer_Product_Order::placeOrder]] method. +// +// The ''packageId'' property passed in for CloudLayer storage accounts must be set to 0 (zero) and the ''quantity'' property must be set to 1. The location does not have to be set. Please use the [[SoftLayer_Product_Package]] service to retrieve a list of CloudLayer items. +// +// NOTE: When upgrading CloudLayer storage service from a metered plan (pay as you go) to a non-metered plan, make sure the chosen plan's storage allotment has enough space to cover the current usage. If the chosen plan's usage allotment is less than the CloudLayer storage's usage the order will be rejected. +type Container_Product_Order_Network_Storage_Modification struct { + Container_Product_Order + + // The id of the StorageLayer account to modify. + VolumeId *int `json:"volumeId,omitempty" xmlrpc:"volumeId,omitempty"` +} + +// This is the datatype that needs to be populated and sent to SoftLayer_Product_Order::placeOrder when placing network attached storage orders. +type Container_Product_Order_Network_Storage_Nas struct { + Container_Product_Order +} + +// This datatype is to be used for ordering object storage products using the object_storage [[SoftLayer_Product_Item_Category|category]]. For object storage products using hub [[SoftLayer_Product_Item_Category|category]] use the [[SoftLayer_Container_Product_Order_Network_Storage_Hub]] order container. +type Container_Product_Order_Network_Storage_Object struct { + Container_Product_Order +} + +// This class is used to contain a location group and its associated active usage rate prices for object storage ordering. +type Container_Product_Order_Network_Storage_ObjectStorage_LocationGroup struct { + Entity + + // The datacenter location where object storage is available. + ClusterGeolocationType *string `json:"clusterGeolocationType,omitempty" xmlrpc:"clusterGeolocationType,omitempty"` + + // The datacenter location where object storage is available. + LocationGroup *Location_Group `json:"locationGroup,omitempty" xmlrpc:"locationGroup,omitempty"` + + // The collection of active usage rate item prices. + UsageRatePrices []Product_Item_Price `json:"usageRatePrices,omitempty" xmlrpc:"usageRatePrices,omitempty"` +} + +// This is the datatype that needs to be populated and sent to SoftLayer_Product_Order::placeOrder. This datatype has everything required to place a subnet order with SoftLayer. +type Container_Product_Order_Network_Subnet struct { + Container_Product_Order + + // The description which includes the network identifier, Classless Inter-Domain Routing prefix and the available slot count. + Description *string `json:"description,omitempty" xmlrpc:"description,omitempty"` + + // The [[SoftLayer_Network_Subnet_IpAddress]] id. + EndPointIpAddressId *int `json:"endPointIpAddressId,omitempty" xmlrpc:"endPointIpAddressId,omitempty"` + + // The [[SoftLayer_Network_Vlan]] id. + EndPointVlanId *int `json:"endPointVlanId,omitempty" xmlrpc:"endPointVlanId,omitempty"` + + // The [[SoftLayer_Network_Subnet]] id. + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // This is the hostname for the router associated with the [[SoftLayer_Network_Subnet|subnet]]. This is a readonly property. + RouterHostname *string `json:"routerHostname,omitempty" xmlrpc:"routerHostname,omitempty"` +} + +// This is the datatype that needs to be populated and sent to SoftLayer_Product_Order::placeOrder. This datatype has everything required to place a network ipsec vpn order with SoftLayer. +type Container_Product_Order_Network_Tunnel_Ipsec struct { + Container_Product_Order +} + +// This is the datatype that needs to be populated and sent to SoftLayer_Product_Order::placeOrder. This datatype has everything required to place a network vlan order with SoftLayer. +type Container_Product_Order_Network_Vlan struct { + Container_Product_Order + + // The description which includes the primary router's hostname plus the vlan number. + Description *string `json:"description,omitempty" xmlrpc:"description,omitempty"` + + // The datacenter portion of the hostname. + HostnameDatacenter *string `json:"hostnameDatacenter,omitempty" xmlrpc:"hostnameDatacenter,omitempty"` + + // The router portion of the hostname. + HostnameRouter *string `json:"hostnameRouter,omitempty" xmlrpc:"hostnameRouter,omitempty"` + + // The [[SoftLayer_Network_Vlan]] id. + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // The optional name for this VLAN + Name *string `json:"name,omitempty" xmlrpc:"name,omitempty"` + + // The router object on which the new VLAN should be created. + Router *Hardware `json:"router,omitempty" xmlrpc:"router,omitempty"` + + // The ID of the [[SoftLayer_Hardware_Router]] object on which the new VLAN should be created. + RouterId *int `json:"routerId,omitempty" xmlrpc:"routerId,omitempty"` + + // The collection of subnets associated with this vlan. + Subnets []Container_Product_Order `json:"subnets,omitempty" xmlrpc:"subnets,omitempty"` + + // The vlan number. + VlanNumber *int `json:"vlanNumber,omitempty" xmlrpc:"vlanNumber,omitempty"` +} + +// This class contains the collections of public and private VLANs that are available during the ordering process. +type Container_Product_Order_Network_Vlans struct { + Entity + + // The collection of private vlans available during ordering. + PrivateVlans []Container_Product_Order `json:"privateVlans,omitempty" xmlrpc:"privateVlans,omitempty"` + + // The collection of public vlans available during ordering. + PublicVlans []Container_Product_Order `json:"publicVlans,omitempty" xmlrpc:"publicVlans,omitempty"` +} + +// This is the datatype that needs to be populated and sent to SoftLayer_Product_Order::placeOrder when linking a Bluemix account to a newly created SoftLayer account. +type Container_Product_Order_NewCustomerSetup struct { + Container_Product_Order + + // no documentation yet + AuthorizationToken *string `json:"authorizationToken,omitempty" xmlrpc:"authorizationToken,omitempty"` + + // no documentation yet + ExternalAccountId *string `json:"externalAccountId,omitempty" xmlrpc:"externalAccountId,omitempty"` + + // no documentation yet + ExternalServiceProviderKey *string `json:"externalServiceProviderKey,omitempty" xmlrpc:"externalServiceProviderKey,omitempty"` +} + +// This is the datatype that needs to be populated and sent to SoftLayer_Product_Order::placeOrder. This datatype has everything required to place an order for Private Cloud. +type Container_Product_Order_Private_Cloud struct { + Container_Product_Order +} + +// This is used for storing various items about the order. Currently used for storing additional raid information when ordering servers. This is optional +type Container_Product_Order_Property struct { + Entity + + // The property name + Name *string `json:"name,omitempty" xmlrpc:"name,omitempty"` + + // The property value + Value *string `json:"value,omitempty" xmlrpc:"value,omitempty"` +} + +// When an order is placed (SoftLayer_Product_Order::placeOrder), a receipt is returned when the order is created successfully. The information in the receipt helps explain information about the order. It's order ID, and all the data within the order as well. +// +// For PayPal Orders, an URL is also returned to the user so that the user can complete the transaction. Users paying with PayPal must continue on to this URL, login and pay. When doing this, PayPal will redirect the user back to a SoftLayer page which will then "finalize" the authorization process. From here, Sales will verify the order by contacting the user in some way, unless sales has already spoken to the user about approving the order. +// +// For users paying with a credit card, a receipt means the order has gone to sales and is awaiting approval. +type Container_Product_Order_Receipt struct { + Entity + + // This URL refers to the location where you will visit to complete the payment authorization for an external service, such as PayPal. This property is associated with externalPaymentToken and will only be populated when purchasing products with an external service. + // + // Once you visit this location, you will be presented with the options to confirm payment or deny payment. If you confirm payment, you will be redirected back to the receipt for your order. If you deny, you will be redirected back to the cancel order page where you do not need to take any additional action. + // + // Until you confirm payment with the external service, your products will not be provisioned or accessible for your consumption. Upon successfully confirming payment, our system will be notified and the order approval and provisioning systems will begin processing. After provisioning is complete, your services will be available. + ExternalPaymentCheckoutUrl *string `json:"externalPaymentCheckoutUrl,omitempty" xmlrpc:"externalPaymentCheckoutUrl,omitempty"` + + // This token refers to the identifier for the external payment authorization. This token is associated with the externalPaymentCheckoutUrl and is only populated when purchasing products with an external service like PayPal. + ExternalPaymentToken *string `json:"externalPaymentToken,omitempty" xmlrpc:"externalPaymentToken,omitempty"` + + // The date when SoftLayer received the order. + OrderDate *Time `json:"orderDate,omitempty" xmlrpc:"orderDate,omitempty"` + + // This is a copy of the order container (SoftLayer_Container_Product_Order) which holds all the data related to an order. This will only return when an order is processed successfully. It will contain all the items in an order as well as the order totals. + OrderDetails *Container_Product_Order `json:"orderDetails,omitempty" xmlrpc:"orderDetails,omitempty"` + + // SoftLayer's unique identifier for the order. + OrderId *int `json:"orderId,omitempty" xmlrpc:"orderId,omitempty"` + + // Deprecation notice: use externalPaymentCheckoutUrl instead of this property. + // + // This URL refers to the location where you will visit to complete the payment authorization for PayPal. This property is associated with paypalToken and will only be populated when purchasing products with PayPal. + // + // Once you visit PayPal's site, you will be presented with the options to confirm payment or deny payment. If you confirm payment, you will be redirected back to the receipt for your order. If you deny, you will be redirected back to the cancel order page where you do not need to take any additional action. + // + // Until you confirm payment with PayPal, your products will not be provisioned or accessible for your consumption. Upon successfully confirming payment, our system will be notified and the order approval and provisioning systems will begin processing. After provisioning is complete, your services will be available. + PaypalCheckoutUrl *string `json:"paypalCheckoutUrl,omitempty" xmlrpc:"paypalCheckoutUrl,omitempty"` + + // Deprecation notice: use externalPaymentToken instead of this property. + // + // This token refers to the identifier provided when payment is processed via PayPal. This token is associated with the paypalCheckoutUrl. + PaypalToken *string `json:"paypalToken,omitempty" xmlrpc:"paypalToken,omitempty"` + + // This is a copy of the order that was successfully placed (SoftLayer_Billing_Order). This will only return when an order is processed successfully. + PlacedOrder *Billing_Order `json:"placedOrder,omitempty" xmlrpc:"placedOrder,omitempty"` + + // This is a copy of the quote container (SoftLayer_Billing_Order_Quote) which holds all the data related to a quote. This will only return when a quote is processed successfully. + Quote *Billing_Order_Quote `json:"quote,omitempty" xmlrpc:"quote,omitempty"` +} + +// This is the datatype that needs to be populated and sent to SoftLayer_Product_Order::placeOrder. This datatype contains everything required to place a secure certificate order with SoftLayer. +type Container_Product_Order_Security_Certificate struct { + Container_Product_Order + + // The administrator contact associated with a SSL certificate. If the contact is not provided the technical contact will be used. If the address is not provided the organization information address will be used. + AdministrativeContact *Container_Product_Order_Attribute_Contact `json:"administrativeContact,omitempty" xmlrpc:"administrativeContact,omitempty"` + + // The billing contact associated with a SSL certificate. If the contact is not provided the technical contact will be used. If the address is not provided the organization information address will be used. + BillingContact *Container_Product_Order_Attribute_Contact `json:"billingContact,omitempty" xmlrpc:"billingContact,omitempty"` + + // The base64 encoded string that sent from an applicant to a certificate authority. The CSR contains information identifying the applicant and the public key chosen by the applicant. The corresponding private key should not be included. + CertificateSigningRequest *string `json:"certificateSigningRequest,omitempty" xmlrpc:"certificateSigningRequest,omitempty"` + + // The email address that can approve a secure certificate order. + OrderApproverEmailAddress *string `json:"orderApproverEmailAddress,omitempty" xmlrpc:"orderApproverEmailAddress,omitempty"` + + // The organization information associated with a SSL certificate. + OrganizationInformation *Container_Product_Order_Attribute_Organization `json:"organizationInformation,omitempty" xmlrpc:"organizationInformation,omitempty"` + + // Indicates if it is an renewal order of an existing SSL certificate. + RenewalFlag *bool `json:"renewalFlag,omitempty" xmlrpc:"renewalFlag,omitempty"` + + // The number of servers. + ServerCount *int `json:"serverCount,omitempty" xmlrpc:"serverCount,omitempty"` + + // The server type. This is the name from a [[SoftLayer_Security_Certificate_Request_ServerType]] object. + ServerType *string `json:"serverType,omitempty" xmlrpc:"serverType,omitempty"` + + // The technical contact associated with a SSL certificate. If the address is not provided the organization information address will be used. + TechnicalContact *Container_Product_Order_Attribute_Contact `json:"technicalContact,omitempty" xmlrpc:"technicalContact,omitempty"` + + // The period that a SSL certificate is valid for. For example, 12, 24, 36 + ValidityMonths *int `json:"validityMonths,omitempty" xmlrpc:"validityMonths,omitempty"` +} + +// This is the datatype that needs to be populated and sent to SoftLayer_Product_Order::placeOrder. +type Container_Product_Order_Service struct { + Container_Product_Order +} + +// This is the datatype that needs to be populated and sent to SoftLayer_Product_Order::placeOrder. This datatype has everything required to place a virtual license order with SoftLayer. +type Container_Product_Order_Software_Component_Virtual struct { + Container_Product_Order + + // array of ip address ids for which a license should be allocated for. + EndPointIpAddressIds []int `json:"endPointIpAddressIds,omitempty" xmlrpc:"endPointIpAddressIds,omitempty"` +} + +// This is the datatype that needs to be populated and sent to SoftLayer_Product_Order::placeOrder. This datatype has everything required to place a hardware security module order with SoftLayer. +type Container_Product_Order_Software_License struct { + Container_Product_Order +} + +// This object holds all of the ssh key ids that will allow authentication to a single server. +type Container_Product_Order_SshKeys struct { + Entity + + // An array of SoftLayer_Security_Ssh_Key IDs to assign to a server. + SshKeyIds []int `json:"sshKeyIds,omitempty" xmlrpc:"sshKeyIds,omitempty"` +} + +// A single storage group container used for a hardware server order. +// +// This object describes a single storage group that can be added to an order container. +type Container_Product_Order_Storage_Group struct { + Entity + + // Size of the array in gigabytes. Must be within limitations of the smallest drive assigned to the storage group and the storage group type. + ArraySize *Float64 `json:"arraySize,omitempty" xmlrpc:"arraySize,omitempty"` + + // The array type id from a [[SoftLayer_Configuration_Storage_Group_Array_Type]] object. + ArrayTypeId *int `json:"arrayTypeId,omitempty" xmlrpc:"arrayTypeId,omitempty"` + + // Integer array of drive indexes to use in the storage group. + HardDrives []int `json:"hardDrives,omitempty" xmlrpc:"hardDrives,omitempty"` + + // If an array should be protected by an hotspare, the drive index of the hotspares should be here. + // + // If a drive is a hotspare for all arrays then a separate storage group with array type GLOBAL_HOT_SPARE should be used + HotSpareDrives []int `json:"hotSpareDrives,omitempty" xmlrpc:"hotSpareDrives,omitempty"` + + // The id for a [[SoftLayer_Hardware_Component_Partition_Template]] object, which will determine the partitions to add to the storage group. + // + // If this storage group is not a primary storage group, then this will not be used. + PartitionTemplateId *int `json:"partitionTemplateId,omitempty" xmlrpc:"partitionTemplateId,omitempty"` + + // Defines the partitions for the storage group. + // + // If this storage group is not a secondary storage group, then this will not be used. + Partitions []Container_Product_Order_Storage_Group_Partition `json:"partitions,omitempty" xmlrpc:"partitions,omitempty"` +} + +// A storage group partition container used for a hardware server order. +// +// This object describes the partitions for a single storage group that can be added to an order container. +type Container_Product_Order_Storage_Group_Partition struct { + Entity + + // Is this a grow partition + IsGrow *bool `json:"isGrow,omitempty" xmlrpc:"isGrow,omitempty"` + + // The name of this partition + Name *string `json:"name,omitempty" xmlrpc:"name,omitempty"` + + // The size of this partition + Size *Float64 `json:"size,omitempty" xmlrpc:"size,omitempty"` +} + +// When ordering paid support this datatype needs to be populated and sent to SoftLayer_Product_Order::placeOrder. +type Container_Product_Order_Support struct { + Container_Product_Order +} + +// This container type is used for placing orders for external authentication, such as phone-based authentication. +type Container_Product_Order_User_Customer_External_Binding struct { + Container_Product_Order + + // The external id that access to external authentication is being purchased for. + ExternalId *string `json:"externalId,omitempty" xmlrpc:"externalId,omitempty"` + + // The SoftLayer [[SoftLayer_User_Customer|user]] identifier that an external binding is being purchased for. + UserId *int `json:"userId,omitempty" xmlrpc:"userId,omitempty"` + + // The [[SoftLayer_User_Customer_External_Binding_Vendor|vendor]] identifier for the external binding being purchased. + VendorId *int `json:"vendorId,omitempty" xmlrpc:"vendorId,omitempty"` +} + +// This is the default container type for Dedicated Virtual Host orders. +type Container_Product_Order_Virtual_DedicatedHost struct { + Container_Product_Order +} + +// This is the datatype that needs to be populated and sent to SoftLayer_Product_Order::placeOrder. This datatype has everything required to place a Portable Storage order with SoftLayer. +type Container_Product_Order_Virtual_Disk_Image struct { + Container_Product_Order + + // Label for the portable storage volume. + DiskDescription *string `json:"diskDescription,omitempty" xmlrpc:"diskDescription,omitempty"` +} + +// This is the datatype that needs to be populated and sent to SoftLayer_Product_Order::placeOrder. This datatype has everything required to place an order with SoftLayer. +type Container_Product_Order_Virtual_Guest struct { + Container_Product_Order_Hardware_Server + + // Identifier of the [[SoftLayer_Virtual_Disk_Image]] to boot from. + BootableDiskId *int `json:"bootableDiskId,omitempty" xmlrpc:"bootableDiskId,omitempty"` + + // Identifier of [[SoftLayer_Virtual_DedicatedHost]] to order + HostId *int `json:"hostId,omitempty" xmlrpc:"hostId,omitempty"` +} + +// This is the datatype that needs to be populated and sent to SoftLayer_Product_Order::placeOrder. This datatype has everything required to place an order with SoftLayer. +type Container_Product_Order_Virtual_Guest_Upgrade struct { + Container_Product_Order_Virtual_Guest +} + +// This is the datatype that needs to be populated and sent to SoftLayer_Provisioning_Maintenance_Window::addCustomerUpgradeWindow. This datatype has everything required to place an order with SoftLayer. +type Container_Provisioning_Maintenance_Window struct { + Entity + + // Maintenance classifications. + ClassificationIds []Provisioning_Maintenance_Classification `json:"classificationIds,omitempty" xmlrpc:"classificationIds,omitempty"` + + // Maintenance classifications. + ItemCategoryIds []Product_Item_Category `json:"itemCategoryIds,omitempty" xmlrpc:"itemCategoryIds,omitempty"` + + // The maintenance window id + MaintenanceWindowId *int `json:"maintenanceWindowId,omitempty" xmlrpc:"maintenanceWindowId,omitempty"` + + // Maintenance window ticket id + TicketId *int `json:"ticketId,omitempty" xmlrpc:"ticketId,omitempty"` + + // Maintenance window date + WindowMaintenanceDate *Time `json:"windowMaintenanceDate,omitempty" xmlrpc:"windowMaintenanceDate,omitempty"` +} + +// no documentation yet +type Container_Referral_Partner_Commission struct { + Entity + + // no documentation yet + CommissionAmount *Float64 `json:"commissionAmount,omitempty" xmlrpc:"commissionAmount,omitempty"` + + // no documentation yet + CommissionRate *Float64 `json:"commissionRate,omitempty" xmlrpc:"commissionRate,omitempty"` + + // no documentation yet + CreateDate *Time `json:"createDate,omitempty" xmlrpc:"createDate,omitempty"` + + // no documentation yet + ReferralAccountId *int `json:"referralAccountId,omitempty" xmlrpc:"referralAccountId,omitempty"` + + // no documentation yet + ReferralCompanyName *string `json:"referralCompanyName,omitempty" xmlrpc:"referralCompanyName,omitempty"` + + // no documentation yet + ReferralPartnerAccountId *int `json:"referralPartnerAccountId,omitempty" xmlrpc:"referralPartnerAccountId,omitempty"` + + // no documentation yet + ReferralRevenue *Float64 `json:"referralRevenue,omitempty" xmlrpc:"referralRevenue,omitempty"` +} + +// no documentation yet +type Container_Referral_Partner_Payment_Option struct { + Entity + + // no documentation yet + AccountNumber *string `json:"accountNumber,omitempty" xmlrpc:"accountNumber,omitempty"` + + // no documentation yet + AccountType *string `json:"accountType,omitempty" xmlrpc:"accountType,omitempty"` + + // no documentation yet + Address1 *string `json:"address1,omitempty" xmlrpc:"address1,omitempty"` + + // no documentation yet + Address2 *string `json:"address2,omitempty" xmlrpc:"address2,omitempty"` + + // no documentation yet + BankTransitNumber *string `json:"bankTransitNumber,omitempty" xmlrpc:"bankTransitNumber,omitempty"` + + // no documentation yet + City *string `json:"city,omitempty" xmlrpc:"city,omitempty"` + + // no documentation yet + CompanyName *string `json:"companyName,omitempty" xmlrpc:"companyName,omitempty"` + + // no documentation yet + Country *string `json:"country,omitempty" xmlrpc:"country,omitempty"` + + // no documentation yet + FederalTaxId *string `json:"federalTaxId,omitempty" xmlrpc:"federalTaxId,omitempty"` + + // no documentation yet + FirstName *string `json:"firstName,omitempty" xmlrpc:"firstName,omitempty"` + + // no documentation yet + LastName *string `json:"lastName,omitempty" xmlrpc:"lastName,omitempty"` + + // no documentation yet + PaymentType *string `json:"paymentType,omitempty" xmlrpc:"paymentType,omitempty"` + + // no documentation yet + PaypalEmail *string `json:"paypalEmail,omitempty" xmlrpc:"paypalEmail,omitempty"` + + // no documentation yet + PhoneNumber *string `json:"phoneNumber,omitempty" xmlrpc:"phoneNumber,omitempty"` + + // no documentation yet + PostalCode *string `json:"postalCode,omitempty" xmlrpc:"postalCode,omitempty"` + + // no documentation yet + State *string `json:"state,omitempty" xmlrpc:"state,omitempty"` +} + +// no documentation yet +type Container_Referral_Partner_Prospect struct { + Entity + + // no documentation yet + Address1 *string `json:"address1,omitempty" xmlrpc:"address1,omitempty"` + + // no documentation yet + Address2 *string `json:"address2,omitempty" xmlrpc:"address2,omitempty"` + + // no documentation yet + City *string `json:"city,omitempty" xmlrpc:"city,omitempty"` + + // no documentation yet + CompanyName *string `json:"companyName,omitempty" xmlrpc:"companyName,omitempty"` + + // no documentation yet + Country *string `json:"country,omitempty" xmlrpc:"country,omitempty"` + + // no documentation yet + Email *string `json:"email,omitempty" xmlrpc:"email,omitempty"` + + // no documentation yet + FirstName *string `json:"firstName,omitempty" xmlrpc:"firstName,omitempty"` + + // no documentation yet + LastName *string `json:"lastName,omitempty" xmlrpc:"lastName,omitempty"` + + // no documentation yet + OfficePhone *string `json:"officePhone,omitempty" xmlrpc:"officePhone,omitempty"` + + // no documentation yet + PostalCode *string `json:"postalCode,omitempty" xmlrpc:"postalCode,omitempty"` + + // no documentation yet + Questions []string `json:"questions,omitempty" xmlrpc:"questions,omitempty"` + + // no documentation yet + Responses []Survey_Response `json:"responses,omitempty" xmlrpc:"responses,omitempty"` + + // no documentation yet + State *string `json:"state,omitempty" xmlrpc:"state,omitempty"` + + // no documentation yet + SurveyId *string `json:"surveyId,omitempty" xmlrpc:"surveyId,omitempty"` +} + +// The SoftLayer_Container_RemoteManagement_Graphs_SensorSpeed contains graphs to display speed for each of the server's fans. Fan speeds are gathered from the server's remote management card. +type Container_RemoteManagement_Graphs_SensorSpeed struct { + Entity + + // The graph to display the server's fan speed. + Graph *[]byte `json:"graph,omitempty" xmlrpc:"graph,omitempty"` + + // A title that may be used to display for the graph. + Title *string `json:"title,omitempty" xmlrpc:"title,omitempty"` +} + +// The SoftLayer_Container_RemoteManagement_Graphs_SensorTemperature contains graphs to display the cpu(s) and system temperatures retrieved from the management card using thermometer graphs. +type Container_RemoteManagement_Graphs_SensorTemperature struct { + Entity + + // The graph to display the server's cpu(s) and system temperatures. + Graph *[]byte `json:"graph,omitempty" xmlrpc:"graph,omitempty"` + + // A title that may be used to display for the graph. + Title *string `json:"title,omitempty" xmlrpc:"title,omitempty"` +} + +// The SoftLayer_Container_RemoteManagement_PmInfo contains pminfo information retrieved from a server's remote management card. +type Container_RemoteManagement_PmInfo struct { + Entity + + // PmInfo ID + PmInfoId *string `json:"pmInfoId,omitempty" xmlrpc:"pmInfoId,omitempty"` + + // PmInfo Reading + PmInfoReading *string `json:"pmInfoReading,omitempty" xmlrpc:"pmInfoReading,omitempty"` +} + +// The SoftLayer_Container_RemoteManagement_SensorReadings contains sensor information retrieved from a server's remote management card. +type Container_RemoteManagement_SensorReading struct { + Entity + + // Lower Non-Recoverable threshold + LowerCritical *string `json:"lowerCritical,omitempty" xmlrpc:"lowerCritical,omitempty"` + + // Lower Non-Critical threshold + LowerNonCritical *string `json:"lowerNonCritical,omitempty" xmlrpc:"lowerNonCritical,omitempty"` + + // Lower Non-Recoverable threshold + LowerNonRecoverable *string `json:"lowerNonRecoverable,omitempty" xmlrpc:"lowerNonRecoverable,omitempty"` + + // Sensor ID + SensorId *string `json:"sensorId,omitempty" xmlrpc:"sensorId,omitempty"` + + // Sensor Reading + SensorReading *string `json:"sensorReading,omitempty" xmlrpc:"sensorReading,omitempty"` + + // Sensor Units + SensorUnits *string `json:"sensorUnits,omitempty" xmlrpc:"sensorUnits,omitempty"` + + // Sensor Status + Status *string `json:"status,omitempty" xmlrpc:"status,omitempty"` + + // Upper Critical threshold + UpperCritical *string `json:"upperCritical,omitempty" xmlrpc:"upperCritical,omitempty"` + + // Upper Non-Critical threshold + UpperNonCritical *string `json:"upperNonCritical,omitempty" xmlrpc:"upperNonCritical,omitempty"` + + // Upper Non-Recoverable threshold + UpperNonRecoverable *string `json:"upperNonRecoverable,omitempty" xmlrpc:"upperNonRecoverable,omitempty"` +} + +// The SoftLayer_Container_RemoteManagement_SensorReadingsWithGraphs contains the raw data retrieved from a server's remote management card. Along with the raw data, two sets of graphs will be returned. One set of graphs is used to display, using thermometer graphs, the temperatures (cpu(s) and system) retrieved from the management card. The other set is used to display speed for each of the server's fans. +type Container_RemoteManagement_SensorReadingsWithGraphs struct { + Entity + + // The raw data returned from the server's remote management card. + RawData []Container_RemoteManagement_SensorReading `json:"rawData,omitempty" xmlrpc:"rawData,omitempty"` + + // The graph(s) to display the server's fan speeds. + SpeedGraphs []Container_RemoteManagement_Graphs_SensorSpeed `json:"speedGraphs,omitempty" xmlrpc:"speedGraphs,omitempty"` + + // The graph(s) to display the server's cpu(s) and system temperatures. + TemperatureGraphs []Container_RemoteManagement_Graphs_SensorTemperature `json:"temperatureGraphs,omitempty" xmlrpc:"temperatureGraphs,omitempty"` +} + +// The metadata service resource container is used to store information about a single service resource. +type Container_Resource_Metadata_ServiceResource struct { + Entity + + // The backend IP address for this resource + BackendIpAddress *string `json:"backendIpAddress,omitempty" xmlrpc:"backendIpAddress,omitempty"` + + // The type for this resource + Type *Network_Service_Resource_Type `json:"type,omitempty" xmlrpc:"type,omitempty"` +} + +// This data type is a container that stores information about a single indexed object type. Object type information can be used for discovery of searchable data and for creation or validation of object index search strings. Each of these containers holds a collection of [[SoftLayer_Container_Search_ObjectType_Property (type)|SoftLayer_Container_Search_ObjectType_Property]] objects, specifying which object properties are exposed for the current user. Refer to the the documentation for the [[SoftLayer_Search/search|search()]] method for information on using object types in search strings. +type Container_Search_ObjectType struct { + Entity + + // Name of object type. + Name *string `json:"name,omitempty" xmlrpc:"name,omitempty"` + + // A collection of [[SoftLayer_Container_Search_ObjectType_Property|object properties]]. + Properties []Container_Search_ObjectType_Property `json:"properties,omitempty" xmlrpc:"properties,omitempty"` +} + +// This data type is a container that stores information about a single property of a searchable object type. Each [[SoftLayer_Container_Search_ObjectType (type)|SoftLayer_Container_Search_ObjectType]] object holds a collection of these properties. Property information can be used for discovery of searchable data and for the creation or validation of object index search strings. Note that properties are only understood by the [[SoftLayer_Search/advancedSearch|advancedSearch()]] method. Refer to the advancedSearch() method for information on using properties in search strings. +type Container_Search_ObjectType_Property struct { + Entity + + // Name of property. + Name *string `json:"name,omitempty" xmlrpc:"name,omitempty"` + + // Indicates if this property can be sorted. + SortableFlag *bool `json:"sortableFlag,omitempty" xmlrpc:"sortableFlag,omitempty"` + + // Property data type. Valid values include 'boolean', 'integer', 'date', 'string' or 'text'. + Type *string `json:"type,omitempty" xmlrpc:"type,omitempty"` +} + +// The SoftLayer_Container_Search_Result data type represents a result row from an execution of Search service. +type Container_Search_Result struct { + Entity + + // An array of terms that were matched in the resource object. + MatchedTerms []string `json:"matchedTerms,omitempty" xmlrpc:"matchedTerms,omitempty"` + + // The score ratio of the result for relevance to the search criteria. + RelevanceScore *Float64 `json:"relevanceScore,omitempty" xmlrpc:"relevanceScore,omitempty"` + + // A search results resource object that matched search criteria. + Resource *Entity `json:"resource,omitempty" xmlrpc:"resource,omitempty"` + + // The type of the resource object that matched search criteria. + ResourceType *string `json:"resourceType,omitempty" xmlrpc:"resourceType,omitempty"` +} + +// The SoftLayer_Container_Software_Component_HostIps_Policy container holds the title and value of a current host ips policy. +type Container_Software_Component_HostIps_Policy struct { + Entity + + // The value of a host ips category. + Policy *string `json:"policy,omitempty" xmlrpc:"policy,omitempty"` + + // The category title of a host ips policy. + PolicyTitle *string `json:"policyTitle,omitempty" xmlrpc:"policyTitle,omitempty"` +} + +// These are the results of a tax calculation. The tax calculation was kicked off but allowed to run in the background. This type stores the results so that an interface can be updated with up-to-date information. +type Container_Tax_Cache struct { + Entity + + // The percentage of the final total that should be tax. + EffectiveTaxRate *Float64 `json:"effectiveTaxRate,omitempty" xmlrpc:"effectiveTaxRate,omitempty"` + + // The container that holds the four actual tax rates, one for each fee type. + Items []Container_Tax_Cache_Item `json:"items,omitempty" xmlrpc:"items,omitempty"` + + // The status of the tax request. This should be PENDING, FAILED, or COMPLETED. + Status *string `json:"status,omitempty" xmlrpc:"status,omitempty"` + + // The final amount of tax for the order. + TotalTaxAmount *Float64 `json:"totalTaxAmount,omitempty" xmlrpc:"totalTaxAmount,omitempty"` +} + +// This represents one order item in a tax calculation. +type Container_Tax_Cache_Item struct { + Entity + + // The category code for the referenced product. + CategoryCode *string `json:"categoryCode,omitempty" xmlrpc:"categoryCode,omitempty"` + + // This hash will match to the hash on an order container. + ContainerHash *string `json:"containerHash,omitempty" xmlrpc:"containerHash,omitempty"` + + // The reference to the price for this order item. + ItemPriceId *int `json:"itemPriceId,omitempty" xmlrpc:"itemPriceId,omitempty"` + + // This is the container containing the individual tax rates. + TaxRates *Container_Tax_Rates `json:"taxRates,omitempty" xmlrpc:"taxRates,omitempty"` +} + +// This contains the four tax rates, one for each fee type. +type Container_Tax_Rates struct { + Entity + + // The tax rate associated with the labor fee. + LaborTaxRate *Float64 `json:"laborTaxRate,omitempty" xmlrpc:"laborTaxRate,omitempty"` + + // A reference to a location. + LocationId *Float64 `json:"locationId,omitempty" xmlrpc:"locationId,omitempty"` + + // The tax rate associated with the one-time fee. + OneTimeTaxRate *Float64 `json:"oneTimeTaxRate,omitempty" xmlrpc:"oneTimeTaxRate,omitempty"` + + // The tax rate associated with the recurring fee. + RecurringTaxRate *Float64 `json:"recurringTaxRate,omitempty" xmlrpc:"recurringTaxRate,omitempty"` + + // The tax rate associated with the setup fee. + SetupTaxRate *Float64 `json:"setupTaxRate,omitempty" xmlrpc:"setupTaxRate,omitempty"` +} + +// SoftLayer_Container_Ticket_GraphInputs models a single inbound object for a given ticket graph. +type Container_Ticket_GraphInputs struct { + Entity + + // This is a unix timestamp that represents the stop date/time for a graph. + EndDate *Time `json:"endDate,omitempty" xmlrpc:"endDate,omitempty"` + + // The front-end or back-end network uplink interface associated with this server. + NetworkInterfaceId *int `json:"networkInterfaceId,omitempty" xmlrpc:"networkInterfaceId,omitempty"` + + // * + Pod *int `json:"pod,omitempty" xmlrpc:"pod,omitempty"` + + // This is a human readable name for the server or rack being graphed. + ServerName *string `json:"serverName,omitempty" xmlrpc:"serverName,omitempty"` + + // This is a unix timestamp that represents the begin date/time for a graph. + StartDate *Time `json:"startDate,omitempty" xmlrpc:"startDate,omitempty"` +} + +// SoftLayer_Container_Ticket_GraphOutputs models a single outbound object for a given bandwidth graph. +type Container_Ticket_GraphOutputs struct { + Entity + + // The raw PNG binary data to be displayed once the graph is drawn. + GraphImage *[]byte `json:"graphImage,omitempty" xmlrpc:"graphImage,omitempty"` + + // The title that ended up being displayed as part of the graph image. + GraphTitle *string `json:"graphTitle,omitempty" xmlrpc:"graphTitle,omitempty"` + + // The maximum date included in this graph. + MaxEndDate *Time `json:"maxEndDate,omitempty" xmlrpc:"maxEndDate,omitempty"` + + // The minimum date included in this graph. + MinStartDate *Time `json:"minStartDate,omitempty" xmlrpc:"minStartDate,omitempty"` +} + +// no documentation yet +type Container_Ticket_Priority struct { + Entity + + // no documentation yet + Name *string `json:"name,omitempty" xmlrpc:"name,omitempty"` + + // no documentation yet + Value *int `json:"value,omitempty" xmlrpc:"value,omitempty"` +} + +// no documentation yet +type Container_Ticket_Survey_Preference struct { + Entity + + // no documentation yet + Applicable *bool `json:"applicable,omitempty" xmlrpc:"applicable,omitempty"` + + // no documentation yet + OptedOut *bool `json:"optedOut,omitempty" xmlrpc:"optedOut,omitempty"` + + // no documentation yet + OptedOutDate *Time `json:"optedOutDate,omitempty" xmlrpc:"optedOutDate,omitempty"` +} + +// Container class used to hold user authentication token +type Container_User_Authentication_Token struct { + Entity + + // hash that gets populated for user authentication + Hash *string `json:"hash,omitempty" xmlrpc:"hash,omitempty"` + + // the user authenticated object + User *User_Customer `json:"user,omitempty" xmlrpc:"user,omitempty"` + + // the id of the user to authenticate + UserId *int `json:"userId,omitempty" xmlrpc:"userId,omitempty"` +} + +// Container classed used to hold external authentication information +type Container_User_Customer_External_Binding struct { + Entity + + // The unique token that is created by an external authentication request. + AuthenticationToken *string `json:"authenticationToken,omitempty" xmlrpc:"authenticationToken,omitempty"` + + // The OpenID Connect access token which provides access to a resource by the OpenID Connect provider. + OpenIdConnectAccessToken *string `json:"openIdConnectAccessToken,omitempty" xmlrpc:"openIdConnectAccessToken,omitempty"` + + // The account to login to, if not provided a default will be used. + OpenIdConnectAccountId *int `json:"openIdConnectAccountId,omitempty" xmlrpc:"openIdConnectAccountId,omitempty"` + + // The OpenID Connect provider type, as a string. + OpenIdConnectProvider *string `json:"openIdConnectProvider,omitempty" xmlrpc:"openIdConnectProvider,omitempty"` + + // Your SoftLayer customer portal user's portal password. + Password *string `json:"password,omitempty" xmlrpc:"password,omitempty"` + + // The answer to your security question. + SecurityQuestionAnswer *string `json:"securityQuestionAnswer,omitempty" xmlrpc:"securityQuestionAnswer,omitempty"` + + // A security question you wish to answer when authenticating to the SoftLayer customer portal. This parameter isn't required if no security questions are set on your portal account or if your account is configured to not require answering a security account upon login. + SecurityQuestionId *int `json:"securityQuestionId,omitempty" xmlrpc:"securityQuestionId,omitempty"` + + // The username you wish to authenticate to the SoftLayer customer portal with. + Username *string `json:"username,omitempty" xmlrpc:"username,omitempty"` + + // The name of the vendor that will be used for external authentication + Vendor *string `json:"vendor,omitempty" xmlrpc:"vendor,omitempty"` +} + +// Container classed used to hold portal token +type Container_User_Customer_External_Binding_Phone struct { + Container_User_Customer_External_Binding +} + +// This container can be used to configure the phone authentication mode. By default, "VOICE_CALL" in "STANDARD" mode with no Pin number will be used. With the default mode, you will have to answer a phone call from a trusted 2 form factor vendor during authentication process. You have to answer the call and follow the instruction in order to complete the authentication. +// +// You can also use SMS text message or PhoneFactor mobile app modes (in case you're using PhoneFactor). Additionally, you can set up a Pin number. By requiring you to verify your secret PIN, you can ensure that you have possession of your phone. +type Container_User_Customer_External_Binding_Phone_Mode struct { + Entity + + // Authentication mode. Valid modes are: VOICE_CALL, SMS_TEXT, PHONE_APP + // + // + // *VOICE_CALL + // In this mode, users will receive a phone call to authenticate. Using PIN can enhance the security of the phone authentication by requiring the user to enter a PIN during the authentication call. Valid Pin modes are: PIN, VOICE_PRINT, STANDARD + // + // + // **STANDARD: (default) No PIN is used. + // **PIN: 4 to 10 digit numeric value + // **VOICE_PRINT: The user's voice will be used to identify the user. + // + // + // *SMS_TEXT + // SMS Text mode will send a SMS text message to the user's phone to complete the authentication. There are 2 different pin modes: + // + // + // **OTP: (default) A text message containing a One-Time Passcode (OTP) is sent to the user. The user must reply to the text message entering this OTP to complete the authentication. + // **OTP_PIN: This mode enhances the security of the authentication by requiring the user to enter the OTP + their PIN in the text reply. + // + // + // + // + // *PHONE_APP + // This mode is applicable for PhoneFactor. Phone App mode results in a notification being sent to the user's PhoneFactor phone app. There are 2 different pin modes for the mobile app authentication. + // **STANDARD: (default) The first authentication is when the user signs on using a username and password. + // The second authentication is when the user receives a notification in the PhoneFactor phone app. In Standard Mode, users will prompted to authenticate, deny, or deny and report fraud. + // **PIN: This mode enhances the security of the authentication by requiring the user to enter their PIN in the phone app. + Mode *string `json:"mode,omitempty" xmlrpc:"mode,omitempty"` + + // Optional authentication pin. + Pin *string `json:"pin,omitempty" xmlrpc:"pin,omitempty"` + + // Available Pin modes are: PIN, VOICE_PRINT, STANDARD Default: STANDARD (Pin is not used) + PinMode *string `json:"pinMode,omitempty" xmlrpc:"pinMode,omitempty"` +} + +// Container classed used to hold portal token +type Container_User_Customer_External_Binding_Totp struct { + Container_User_Customer_External_Binding + + // The security code used to validate a Totp credential. + SecurityCode *string `json:"securityCode,omitempty" xmlrpc:"securityCode,omitempty"` +} + +// Container classed used to hold details about an external authentication vendor. +type Container_User_Customer_External_Binding_Vendor struct { + Entity + + // The keyname used to identify an external authentication vendor. + KeyName *string `json:"keyName,omitempty" xmlrpc:"keyName,omitempty"` + + // The name of an external authentication vendor. + Name *string `json:"name,omitempty" xmlrpc:"name,omitempty"` +} + +// Container classed used to hold portal token +type Container_User_Customer_External_Binding_Verisign struct { + Container_User_Customer_External_Binding + + // A second security code that is only required if your credential has become unsynchronized. + SecondSecurityCode *string `json:"secondSecurityCode,omitempty" xmlrpc:"secondSecurityCode,omitempty"` + + // The security code used to validate a VeriSign credential. + SecurityCode *string `json:"securityCode,omitempty" xmlrpc:"securityCode,omitempty"` +} + +// no documentation yet +type Container_User_Customer_OpenIdConnect_LoginAccountInfo struct { + Entity + + // The customer account's internal identifier. + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // The company name associated with an account. + Name *string `json:"name,omitempty" xmlrpc:"name,omitempty"` +} + +// no documentation yet +type Container_User_Customer_OpenIdConnect_MigrationState struct { + Entity + + // The number of days remaining in the grace period for this user's account to + DaysToGracePeriodEnd *int `json:"daysToGracePeriodEnd,omitempty" xmlrpc:"daysToGracePeriodEnd,omitempty"` + + // Flag for whether the email address inside this SoftLayer_User_Customer object + EmailAlreadyUsedForInvitationToAccount *bool `json:"emailAlreadyUsedForInvitationToAccount,omitempty" xmlrpc:"emailAlreadyUsedForInvitationToAccount,omitempty"` + + // Flag for whether the email address inside this SoftLayer_User_Customer object + EmailAlreadyUsedForLinkToAccount *bool `json:"emailAlreadyUsedForLinkToAccount,omitempty" xmlrpc:"emailAlreadyUsedForLinkToAccount,omitempty"` + + // The IBMid email address where an invitation was sent. + ExistingInvitationOpenIdConnectName *string `json:"existingInvitationOpenIdConnectName,omitempty" xmlrpc:"existingInvitationOpenIdConnectName,omitempty"` + + // Flag for whether the account is OpenIdConnect authenticated or not. + IsAccountOpenIdConnectAuthenticated *bool `json:"isAccountOpenIdConnectAuthenticated,omitempty" xmlrpc:"isAccountOpenIdConnectAuthenticated,omitempty"` +} + +// Container for holding information necessary for the setting and resetting of customer passwords +// +// +type Container_User_Customer_PasswordSet struct { + Entity + + // id of SoftLayer_User_Security_Question + AnsweredSecurityQuestionId *int `json:"answeredSecurityQuestionId,omitempty" xmlrpc:"answeredSecurityQuestionId,omitempty"` + + // the authentication methods required + AuthenticationMethods []int `json:"authenticationMethods,omitempty" xmlrpc:"authenticationMethods,omitempty"` + + // the password key provided to user in the password set url link sent via email + Key *string `json:"key,omitempty" xmlrpc:"key,omitempty"` + + // the user's new password + Password *string `json:"password,omitempty" xmlrpc:"password,omitempty"` + + // answer to security question provided by the user + SecurityAnswer *string `json:"securityAnswer,omitempty" xmlrpc:"securityAnswer,omitempty"` + + // array of SoftLayer_User_Security_Question + SecurityQuestions []User_Security_Question `json:"securityQuestions,omitempty" xmlrpc:"securityQuestions,omitempty"` + + // the id of the user to authenticate + UserId *int `json:"userId,omitempty" xmlrpc:"userId,omitempty"` +} + +// Container classed used to hold mobile portal token +type Container_User_Customer_Portal_MobileToken struct { + Container_User_Customer_Portal_Token + + // True if this user login required an external binding. + HasExternalBinding *bool `json:"hasExternalBinding,omitempty" xmlrpc:"hasExternalBinding,omitempty"` +} + +// Container classed used to hold portal token +type Container_User_Customer_Portal_Token struct { + Entity + + // hash of logged in user session id + Hash *string `json:"hash,omitempty" xmlrpc:"hash,omitempty"` + + // the logged in user data + User *User_Customer `json:"user,omitempty" xmlrpc:"user,omitempty"` + + // the id of the logged in user + UserId *int `json:"userId,omitempty" xmlrpc:"userId,omitempty"` +} + +// This container holds user's phone information. +type Container_User_Data_Phone struct { + Entity + + // Country code number for the phone number Default: 1 (United States & Canada +1) + CountryCode *int `json:"countryCode,omitempty" xmlrpc:"countryCode,omitempty"` + + // Phone extension code. It can be digits, commas, *, and # are allowed. + Extension *string `json:"extension,omitempty" xmlrpc:"extension,omitempty"` + + // Phone number can be a mobile phone number, desk phone number, or some other option. The phone number format must match the format selected in the country code. + Phone *string `json:"phone,omitempty" xmlrpc:"phone,omitempty"` + + // Type of phone number such as "primary", "office" or "home" + PhoneType *string `json:"phoneType,omitempty" xmlrpc:"phoneType,omitempty"` +} + +// Container classed used to hold portal token +type Container_User_Employee_External_Binding_Verisign struct { + Entity +} + +// At times,such as when attaching files to tickets, it is necessary to send files to SoftLayer API methods. The SoftLayer_Container_Utility_File_Attachment data type models a single file to upload to the API. +type Container_Utility_File_Attachment struct { + Entity + + // The contents of a file that is uploaded to the SoftLayer API. + Data *[]byte `json:"data,omitempty" xmlrpc:"data,omitempty"` + + // The name of a file that is uploaded to the SoftLayer API. + Filename *string `json:"filename,omitempty" xmlrpc:"filename,omitempty"` +} + +// Used to describe a document in the file system on the file server +type Container_Utility_File_Descriptor struct { + Entity + + // The name of a file as it exists on the file server. + FileName *string `json:"fileName,omitempty" xmlrpc:"fileName,omitempty"` + + // The friendly name of a file as it exists on the file server. + FriendlyName *string `json:"friendlyName,omitempty" xmlrpc:"friendlyName,omitempty"` + + // The date the file was last modified on the file server. + ModifyDate *Time `json:"modifyDate,omitempty" xmlrpc:"modifyDate,omitempty"` +} + +// SoftLayer_Container_Utility_File_Entity data type models a single entity on a storage resource. Entities can include anything within a storage volume including files, folders, directories, and CloudLayer storage projects. +type Container_Utility_File_Entity struct { + Entity + + // A file entity's raw content. + Content *[]byte `json:"content,omitempty" xmlrpc:"content,omitempty"` + + // A file entity's MIME content type. + ContentType *string `json:"contentType,omitempty" xmlrpc:"contentType,omitempty"` + + // The date a file entity was created. + CreateDate *Time `json:"createDate,omitempty" xmlrpc:"createDate,omitempty"` + + // The date a CloudLayer storage file entity was moved into the recycle bin. This field applies to files that are pending deletion in the recycle bin. + DeleteDate *Time `json:"deleteDate,omitempty" xmlrpc:"deleteDate,omitempty"` + + // Unique identifier for the file. This can be either a number or guid. + Id *string `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // Whether a CloudLayer storage file entity is shared with another CloudLayer user. + IsShared *int `json:"isShared,omitempty" xmlrpc:"isShared,omitempty"` + + // The date a file entity was last changed. + ModifyDate *Time `json:"modifyDate,omitempty" xmlrpc:"modifyDate,omitempty"` + + // A file entity's name. + Name *string `json:"name,omitempty" xmlrpc:"name,omitempty"` + + // The owner is usually the account who first upload or created the file on the resource or the account who is responsible for the file at the moment. + Owner *string `json:"owner,omitempty" xmlrpc:"owner,omitempty"` + + // The size of a file entity in bytes. + Size *uint `json:"size,omitempty" xmlrpc:"size,omitempty"` + + // A CloudLayer storage file entity's type. Types can include "file", "folder", "dir", and "project". + Type *string `json:"type,omitempty" xmlrpc:"type,omitempty"` + + // The latest revision of a file on a CloudLayer storage volume. This number increments each time a new revision of the file is uploaded. + Version *int `json:"version,omitempty" xmlrpc:"version,omitempty"` +} + +// no documentation yet +type Container_Utility_Message struct { + Entity + + // no documentation yet + CreateDate *Time `json:"createDate,omitempty" xmlrpc:"createDate,omitempty"` + + // no documentation yet + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // no documentation yet + Message *string `json:"message,omitempty" xmlrpc:"message,omitempty"` + + // no documentation yet + ModifyDate *Time `json:"modifyDate,omitempty" xmlrpc:"modifyDate,omitempty"` + + // no documentation yet + Summary *string `json:"summary,omitempty" xmlrpc:"summary,omitempty"` +} + +// SoftLayer customer servers that are purchased with the Microsoft Windows operating system are configured by default to retrieve updates from SoftLayer's local Windows Server Update Services (WSUS) server. Periodically, these servers synchronize and check for new updates from their local WSUS server. SoftLayer_Container_Utility_Microsoft_Windows_UpdateServices_Status models the results of a server's last synchronization attempt as queried from SoftLayer's WSUS servers. +type Container_Utility_Microsoft_Windows_UpdateServices_Status struct { + Entity + + // The last time a server rebooted due to a Windows Update. + LastRebootDate *Time `json:"lastRebootDate,omitempty" xmlrpc:"lastRebootDate,omitempty"` + + // The last time that SoftLayer's local WSUS server received a status update from a customer server. + LastStatusDate *Time `json:"lastStatusDate,omitempty" xmlrpc:"lastStatusDate,omitempty"` + + // The last time a server synchronized with SoftLayer's local WSUS server. + LastSyncDate *Time `json:"lastSyncDate,omitempty" xmlrpc:"lastSyncDate,omitempty"` + + // This is the private IP address for this server. + PrivateIPAddress *string `json:"privateIPAddress,omitempty" xmlrpc:"privateIPAddress,omitempty"` + + // The status message returned from a server's last synchronization with SoftLayer's local WSUS server. + SyncStatus *string `json:"syncStatus,omitempty" xmlrpc:"syncStatus,omitempty"` + + // A server's update status, as retrieved form SoftLayer's local WSUS server. + UpdateStatus *string `json:"updateStatus,omitempty" xmlrpc:"updateStatus,omitempty"` +} + +// SoftLayer_Container_Utility_Microsoft_Windows_UpdateServices_UpdateItem models a single Microsoft Update as reported by SoftLayer's private Windows Server Update Services (WSUS) services. All servers purchased with Microsoft Windows retrieve updates from SoftLayer's WSUS servers by default. +type Container_Utility_Microsoft_Windows_UpdateServices_UpdateItem struct { + Entity + + // A short description of a Microsoft Windows Update. + Description *string `json:"description,omitempty" xmlrpc:"description,omitempty"` + + // Flag indicating that this patch failed to properly install + Failed *bool `json:"failed,omitempty" xmlrpc:"failed,omitempty"` + + // A Windows Update's knowledge base article number. Every Windows Update can be referenced on the Microsoft Help and Support site at the URL http://support.microsoft.com/kb/
. + KbArticleNumber *int `json:"kbArticleNumber,omitempty" xmlrpc:"kbArticleNumber,omitempty"` + + // Flag indicating that the update is entirely optionals + Optional *bool `json:"optional,omitempty" xmlrpc:"optional,omitempty"` + + // Flag indicating that a reboot is needed for this update to be fully applied + RequiresReboot *bool `json:"requiresReboot,omitempty" xmlrpc:"requiresReboot,omitempty"` +} + +// The SoftLayer_Container_Utility_Network_Firewall_Rule_Attribute data type contains information relating to a single firewall rule. +type Container_Utility_Network_Firewall_Rule_Attribute struct { + Entity + + // The valid actions for use with rules. + Actions []string `json:"actions,omitempty" xmlrpc:"actions,omitempty"` + + // Maximum allowed number of rules. + MaximumRuleCount *int `json:"maximumRuleCount,omitempty" xmlrpc:"maximumRuleCount,omitempty"` + + // The valid protocols for use with rules. + Protocols []string `json:"protocols,omitempty" xmlrpc:"protocols,omitempty"` + + // The valid source ip subnet masks for use with rules. + SourceIpSubnetMasks []Container_Utility_Network_Subnet_Mask_Generic_Detail `json:"sourceIpSubnetMasks,omitempty" xmlrpc:"sourceIpSubnetMasks,omitempty"` +} + +// The SoftLayer_Container_Utility_Network_Subnet_Mask_Generic_Detail data type contains information relating to a subnet mask and details associated with that object. +type Container_Utility_Network_Subnet_Mask_Generic_Detail struct { + Entity + + // The subnet cidr prefix. + Cidr *string `json:"cidr,omitempty" xmlrpc:"cidr,omitempty"` + + // The subnet mask description. + Description *string `json:"description,omitempty" xmlrpc:"description,omitempty"` + + // The subnet mask. + Mask *string `json:"mask,omitempty" xmlrpc:"mask,omitempty"` +} + +// This type represents the structure to hold the allocation properties of a [[SoftLayer_Virtual_DedicatedHost]]. +type Container_Virtual_DedicatedHost_AllocationStatus struct { + Entity + + // Number of CPU cores allocated on the specified DedicatedHost + CpuAllocated *int `json:"cpuAllocated,omitempty" xmlrpc:"cpuAllocated,omitempty"` + + // Number of CPU cores available on the specified DedicatedHost + CpuAvailable *int `json:"cpuAvailable,omitempty" xmlrpc:"cpuAvailable,omitempty"` + + // Total number of cpu cores on the DedicatedHost + CpuCount *int `json:"cpuCount,omitempty" xmlrpc:"cpuCount,omitempty"` + + // Amount of disk space allocated on the specified DedicatedHost + DiskAllocated *int `json:"diskAllocated,omitempty" xmlrpc:"diskAllocated,omitempty"` + + // Amount of disk space available on the specified DedicatedHost + DiskAvailable *int `json:"diskAvailable,omitempty" xmlrpc:"diskAvailable,omitempty"` + + // Total amount of disk capacity on the DedicatedHost + DiskCapacity *int `json:"diskCapacity,omitempty" xmlrpc:"diskCapacity,omitempty"` + + // Number of guests allocated on the specified DedicatedHost + GuestCount *int `json:"guestCount,omitempty" xmlrpc:"guestCount,omitempty"` + + // Amount of memory allocated on the specified DedicatedHost + MemoryAllocated *int `json:"memoryAllocated,omitempty" xmlrpc:"memoryAllocated,omitempty"` + + // Amount of memory available on the specified DedicatedHost + MemoryAvailable *int `json:"memoryAvailable,omitempty" xmlrpc:"memoryAvailable,omitempty"` + + // Total amount of memory capacity on the DedicatedHost + MemoryCapacity *int `json:"memoryCapacity,omitempty" xmlrpc:"memoryCapacity,omitempty"` +} + +// The SoftLayer_Container_Virtual_Guest_Block_Device_Template_Configuration data type contains information relating to a template's external location for importing and exporting +type Container_Virtual_Guest_Block_Device_Template_Configuration struct { + Entity + + // + // Optional virtualization boot mode parameter, if set, can mark a template to boot specifically into PV or HVM. + BootMode *string `json:"bootMode,omitempty" xmlrpc:"bootMode,omitempty"` + + // + // Specifies if image is using a customer's software license. + Byol *bool `json:"byol,omitempty" xmlrpc:"byol,omitempty"` + + // + // Specifies if image requires cloud-init. + CloudInit *bool `json:"cloudInit,omitempty" xmlrpc:"cloudInit,omitempty"` + + // The group name to be applied to the imported template + Name *string `json:"name,omitempty" xmlrpc:"name,omitempty"` + + // The note to be applied to the imported template + Note *string `json:"note,omitempty" xmlrpc:"note,omitempty"` + + // + // The referenceCode of the operating system software description for the imported VHD + OperatingSystemReferenceCode *string `json:"operatingSystemReferenceCode,omitempty" xmlrpc:"operatingSystemReferenceCode,omitempty"` + + // + // The URI for an object storage object (.vhd/.iso file) + // swift://@// + Uri *string `json:"uri,omitempty" xmlrpc:"uri,omitempty"` +} + +// The guest configuration container is used to provide configuration options for creating computing instances. +// +// Each configuration option will include both an itemPrice and a template. +// +// The itemPrice value will provide hourly and monthly costs (if either are applicable), and a description of the option. +// +// The template will provide a fragment of the request with the properties and values that must be sent when creating a computing instance with the option. +// +// The [[SoftLayer_Virtual_Guest/getCreateObjectOptions|getCreateObjectOptions]] method returns this data structure. +// +// +type Container_Virtual_Guest_Configuration struct { + Entity + + // + //
+ // Available block device options. + // + // + // A computing instance will have at least one block device represented by a device number of '0'. + // + // + // The blockDevices.device value in the template represents which device the option is for. + // The blockDevices.diskImage.capacity value in the template represents the size, in gigabytes, of the disk. + // The localDiskFlag value in the template represents whether the option is a local or SAN based disk. + // + // + // Note: The block device number '1' is reserved for the SWAP disk attached to the computing instance. + //
+ BlockDevices []Container_Virtual_Guest_Configuration_Option `json:"blockDevices,omitempty" xmlrpc:"blockDevices,omitempty"` + + // + //
+ // Available datacenter options. + // + // + // The datacenter.name value in the template represents which datacenter the computing instance will be provisioned in. + //
+ Datacenters []Container_Virtual_Guest_Configuration_Option `json:"datacenters,omitempty" xmlrpc:"datacenters,omitempty"` + + // + //
+ // Available memory options. + // + // + // The maxMemory value in the template represents the amount of memory, in megabytes, allocated to the computing instance. + //
+ Memory []Container_Virtual_Guest_Configuration_Option `json:"memory,omitempty" xmlrpc:"memory,omitempty"` + + // + //
+ // Available network component options. + // + // + // The networkComponent.maxSpeed value in the template represents the link speed, in megabits per second, of the network connections for a computing instance. + //
+ NetworkComponents []Container_Virtual_Guest_Configuration_Option `json:"networkComponents,omitempty" xmlrpc:"networkComponents,omitempty"` + + // + //
+ // Available operating system options. + // + // + // The operatingSystemReferenceCode value in the template is an identifier for a particular operating system. When provided exactly as shown in the template, that operating system will be used. + // + // + // A reference code is structured as three tokens separated by underscores. The first token represents the product, the second is the version of the product, and the third is whether the OS is 32 or 64bit. + // + // + // When providing an operatingSystemReferenceCode while ordering a computing instance the only token required to match exactly is the product. The version token may be given as 'LATEST', else it will require an exact match as well. When the bits token is not provided, 64 bits will be assumed. + // + // + // Providing the value of 'LATEST' for a version will select the latest release of that product for the operating system. As this may change over time, you should be sure that the release version is irrelevant for your applications. + // + // + // For Windows based operating systems the version will represent both the release version (2008, 2012, etc) and the edition (Standard, Enterprise, etc). For all other operating systems the version will represent the major version (Centos 6, Ubuntu 12, etc) of that operating system, minor versions are not represented in a reference code. + // + // + // Notice - Some operating systems are charged based on the value specified in startCpus. The price which is used can be determined by calling [[SoftLayer_Virtual_Guest/generateOrderTemplate|generateOrderTemplate]] with your desired device specifications. + //
+ OperatingSystems []Container_Virtual_Guest_Configuration_Option `json:"operatingSystems,omitempty" xmlrpc:"operatingSystems,omitempty"` + + // + //
+ // Available processor options. + // + // + // The startCpus value in the template represents the number of cores allocated to the computing instance. + // The dedicatedAccountHostOnlyFlag value in the template represents whether the instance will run on hosts with instances belonging to other accounts. + //
+ Processors []Container_Virtual_Guest_Configuration_Option `json:"processors,omitempty" xmlrpc:"processors,omitempty"` +} + +// An option found within a [[SoftLayer_Container_Virtual_Guest_Configuration (type)]] structure. +type Container_Virtual_Guest_Configuration_Option struct { + Entity + + // + // Provides hourly and monthly costs (if either are applicable), and a description of the option. + ItemPrice *Product_Item_Price `json:"itemPrice,omitempty" xmlrpc:"itemPrice,omitempty"` + + // + // Provides a fragment of the request with the properties and values that must be sent when creating a computing instance with the option. + Template *Virtual_Guest `json:"template,omitempty" xmlrpc:"template,omitempty"` +} diff --git a/vendor/github.com/softlayer/softlayer-go/datatypes/dns.go b/vendor/github.com/softlayer/softlayer-go/datatypes/dns.go new file mode 100644 index 0000000000..98be7e711b --- /dev/null +++ b/vendor/github.com/softlayer/softlayer-go/datatypes/dns.go @@ -0,0 +1,430 @@ +/** + * Copyright 2016 IBM Corp. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** + * AUTOMATICALLY GENERATED CODE - DO NOT MODIFY + */ + +package datatypes + +// The SoftLayer_Dns_Domain data type represents a single DNS domain record hosted on the SoftLayer nameservers. Domains contain general information about the domain name such as name and serial. Individual records such as A, AAAA, CTYPE, and MX records are stored in the domain's associated [[SoftLayer_Dns_Domain_ResourceRecord (type)|SoftLayer_Dns_Domain_ResourceRecord]] records. +type Dns_Domain struct { + Entity + + // The SoftLayer customer account that owns a domain. + Account *Account `json:"account,omitempty" xmlrpc:"account,omitempty"` + + // A domain record's internal identifier. + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // A flag indicating that the dns domain record is a managed resource. + ManagedResourceFlag *bool `json:"managedResourceFlag,omitempty" xmlrpc:"managedResourceFlag,omitempty"` + + // A domain's name including top-level domain, for example "example.com". + Name *string `json:"name,omitempty" xmlrpc:"name,omitempty"` + + // A count of the individual records contained within a domain record. These include but are not limited to A, AAAA, MX, CTYPE, SPF and TXT records. + ResourceRecordCount *uint `json:"resourceRecordCount,omitempty" xmlrpc:"resourceRecordCount,omitempty"` + + // The individual records contained within a domain record. These include but are not limited to A, AAAA, MX, CTYPE, SPF and TXT records. + ResourceRecords []Dns_Domain_ResourceRecord `json:"resourceRecords" xmlrpc:"resourceRecords"` + + // The secondary DNS record that defines this domain as being managed through zone transfers. + Secondary *Dns_Secondary `json:"secondary,omitempty" xmlrpc:"secondary,omitempty"` + + // A unique number denoting the latest revision of a domain. Whenever a domain is changed its corresponding serial number is also changed. Serial numbers typically follow the format yyyymmdd## where yyyy is the current year, mm is the current month, dd is the current day of the month, and ## is the number of the revision for that day. A domain's serial number is automatically updated when edited via the API. + Serial *int `json:"serial,omitempty" xmlrpc:"serial,omitempty"` + + // The start of authority (SOA) record contains authoritative and propagation details for a DNS zone. This property is not considered in requests to createObject and editObject. + SoaResourceRecord *Dns_Domain_ResourceRecord_SoaType `json:"soaResourceRecord,omitempty" xmlrpc:"soaResourceRecord,omitempty"` + + // The date that this domain record was last updated. + UpdateDate *Time `json:"updateDate,omitempty" xmlrpc:"updateDate,omitempty"` +} + +// The SoftLayer_Dns_Domain_Forward data type represents a single DNS domain record hosted on the SoftLayer nameservers. Domains contain general information about the domain name such as name and serial. Individual records such as A, AAAA, CTYPE, and MX records are stored in the domain's associated [[SoftLayer_Dns_Domain_ResourceRecord (type)|SoftLayer_Dns_Domain_ResourceRecord]] records. +type Dns_Domain_Forward struct { + Dns_Domain +} + +// The SoftLayer_Dns_Domain_Registration data type represents a domain registration record. +type Dns_Domain_Registration struct { + Entity + + // The SoftLayer customer account that the domain is registered to. + Account *Account `json:"account,omitempty" xmlrpc:"account,omitempty"` + + // no documentation yet + CreateDate *Time `json:"createDate,omitempty" xmlrpc:"createDate,omitempty"` + + // The domain registration status. + DomainRegistrationStatus *Dns_Domain_Registration_Status `json:"domainRegistrationStatus,omitempty" xmlrpc:"domainRegistrationStatus,omitempty"` + + // no documentation yet + DomainRegistrationStatusId *int `json:"domainRegistrationStatusId,omitempty" xmlrpc:"domainRegistrationStatusId,omitempty"` + + // The date that the domain registration will expire. + ExpireDate *Time `json:"expireDate,omitempty" xmlrpc:"expireDate,omitempty"` + + // A domain record's internal identifier. + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // Indicates whether a domain is locked or unlocked. + LockedFlag *int `json:"lockedFlag,omitempty" xmlrpc:"lockedFlag,omitempty"` + + // no documentation yet + ModifyDate *Time `json:"modifyDate,omitempty" xmlrpc:"modifyDate,omitempty"` + + // A domain's name, for example "example.com". + Name *string `json:"name,omitempty" xmlrpc:"name,omitempty"` + + // The registrant verification status. + RegistrantVerificationStatus *Dns_Domain_Registration_Registrant_Verification_Status `json:"registrantVerificationStatus,omitempty" xmlrpc:"registrantVerificationStatus,omitempty"` + + // no documentation yet + RegistrantVerificationStatusId *int `json:"registrantVerificationStatusId,omitempty" xmlrpc:"registrantVerificationStatusId,omitempty"` + + // no documentation yet + ServiceProvider *Service_Provider `json:"serviceProvider,omitempty" xmlrpc:"serviceProvider,omitempty"` + + // no documentation yet + ServiceProviderId *int `json:"serviceProviderId,omitempty" xmlrpc:"serviceProviderId,omitempty"` +} + +// SoftLayer_Dns_Domain_Registration_Registrant_Verification_Status models the state of the registrant. Here are the following status codes: +// +// +// *'''Admin Reviewing''': The registrant data has been submitted and being reviewed by compliance team. +// *'''Pending''': The verification process has been inititated, and verification email will be sent. +// *'''Suspended''': The registrant has failed verification and the domain has been suspended. +// *'''Verified''': The registrant has been validated. +// *'''Verifying''': The verification process has been initiated and is waiting for registrant response. +// *'''Unverified''': The verification process has not been inititated. +// +// +type Dns_Domain_Registration_Registrant_Verification_Status struct { + Entity + + // The description of the registrant verification status. + Description *string `json:"description,omitempty" xmlrpc:"description,omitempty"` + + // The unique identifier of the registrant verification status + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // The unique keyname of the registrant verification status. + KeyName *string `json:"keyName,omitempty" xmlrpc:"keyName,omitempty"` + + // The name of the registrant verification status. + Name *string `json:"name,omitempty" xmlrpc:"name,omitempty"` +} + +// SoftLayer_Dns_Domain_Registration_Status models the state of domain name. Here are the following status codes: +// +// +// *'''Active''': This domain name is active. +// *'''Pending Owner Approval''': Pending owner approval for completion of transfer. +// *'''Pending Admin Review''': Pending admin review for transfer. +// *'''Pending Registry''': Pending registry for transfer. +// *'''Expired''': Domain name has expired. +// +// +type Dns_Domain_Registration_Status struct { + Entity + + // The description of the domain registration status names. + Description *string `json:"description,omitempty" xmlrpc:"description,omitempty"` + + // The unique identifier of the domain registration status + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // The unique keyname of the domain registration status. + KeyName *string `json:"keyName,omitempty" xmlrpc:"keyName,omitempty"` + + // The name of the domain registration status. + Name *string `json:"name,omitempty" xmlrpc:"name,omitempty"` +} + +// The SoftLayer_Dns_Domain_ResourceRecord data type represents a single resource record entry in a SoftLayer hosted domain. Each resource record contains a ''host'' and ''data'' property, defining a resource's name and it's target data. Domains contain multiple types of resource records. The ''type'' property separates out resource records by type. ''Type'' can take one of the following values: +// * '''"a"''' for [[SoftLayer_Dns_Domain_ResourceRecord_AType|address]] records +// * '''"aaaa"''' for [[SoftLayer_Dns_Domain_ResourceRecord_AaaaType|address]] records +// * '''"cname"''' for [[SoftLayer_Dns_Domain_ResourceRecord_CnameType|canonical name]] records +// * '''"mx"''' for [[SoftLayer_Dns_Domain_ResourceRecord_MxType|mail exchanger]] records +// * '''"ns"''' for [[SoftLayer_Dns_Domain_ResourceRecord_NsType|name server]] records +// * '''"ptr"''' for [[SoftLayer_Dns_Domain_ResourceRecord_PtrType|pointer]] records in reverse domains +// * '''"soa"''' for a domain's [[SoftLayer_Dns_Domain_ResourceRecord_SoaType|start of authority]] record +// * '''"spf"''' for [[SoftLayer_Dns_Domain_ResourceRecord_SpfType|sender policy framework]] records +// * '''"srv"''' for [[SoftLayer_Dns_Domain_ResourceRecord_SrvType|service]] records +// * '''"txt"''' for [[SoftLayer_Dns_Domain_ResourceRecord_TxtType|text]] records +// +// +// As ''SoftLayer_Dns_Domain_ResourceRecord'' objects are created and loaded, the API verifies the ''type'' property and casts the object as the appropriate type. +type Dns_Domain_ResourceRecord struct { + Entity + + // The value of a domain's resource record. This can be an IP address or a hostname. Fully qualified host and domain name data must end with the "." character. + Data *string `json:"data,omitempty" xmlrpc:"data,omitempty"` + + // The domain that a resource record belongs to. + Domain *Dns_Domain `json:"domain,omitempty" xmlrpc:"domain,omitempty"` + + // An identifier belonging to the domain that a resource record is associated with. + DomainId *int `json:"domainId,omitempty" xmlrpc:"domainId,omitempty"` + + // The amount of time in seconds that a secondary name server (or servers) will hold a zone before it is no longer considered authoritative. + Expire *int `json:"expire,omitempty" xmlrpc:"expire,omitempty"` + + // The host defined by a resource record. A value of "@" denotes a wildcard. + Host *string `json:"host,omitempty" xmlrpc:"host,omitempty"` + + // A domain resource record's internal identifier. + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // Whether the address associated with a PTR record is the gateway address of a subnet. + IsGatewayAddress *bool `json:"isGatewayAddress,omitempty" xmlrpc:"isGatewayAddress,omitempty"` + + // The amount of time in seconds that a domain's resource records are valid. This is also known as a minimum TTL, and can be overridden by an individual resource record's TTL. + Minimum *int `json:"minimum,omitempty" xmlrpc:"minimum,omitempty"` + + // Useful in cases where a domain has more than one mail exchanger, the priority property is the priority of the MTA that delivers mail for a domain. A lower number denotes a higher priority, and mail will attempt to deliver through that MTA before moving to lower priority mail servers. Priority is defaulted to 10 upon resource record creation. + MxPriority *int `json:"mxPriority,omitempty" xmlrpc:"mxPriority,omitempty"` + + // The TCP or UDP port on which the service is to be found. + Port *int `json:"port,omitempty" xmlrpc:"port,omitempty"` + + // The priority of the target host, lower value means more preferred. + Priority *int `json:"priority,omitempty" xmlrpc:"priority,omitempty"` + + // The protocol of the desired service; this is usually either TCP or UDP. + Protocol *string `json:"protocol,omitempty" xmlrpc:"protocol,omitempty"` + + // The amount of time in seconds that a secondary name server should wait to check for a new copy of a DNS zone from the domain's primary name server. If a zone file has changed then the secondary DNS server will update it's copy of the zone to match the primary DNS server's zone. + Refresh *int `json:"refresh,omitempty" xmlrpc:"refresh,omitempty"` + + // The email address of the person responsible for a domain, with the "@" replaced with a ".". For instance, if root@example.org is responsible for example.org, then example.org's SOA responsibility is "root.example.org.". + ResponsiblePerson *string `json:"responsiblePerson,omitempty" xmlrpc:"responsiblePerson,omitempty"` + + // The amount of time in seconds that a domain's primary name server (or servers) should wait if an attempt to refresh by a secondary name server failed before attempting to refresh a domain's zone with that secondary name server again. + Retry *int `json:"retry,omitempty" xmlrpc:"retry,omitempty"` + + // The symbolic name of the desired service + Service *string `json:"service,omitempty" xmlrpc:"service,omitempty"` + + // The Time To Live value of a resource record, measured in seconds. TTL is used by a name server to determine how long to cache a resource record. An SOA record's TTL value defines the domain's overall TTL. + Ttl *int `json:"ttl,omitempty" xmlrpc:"ttl,omitempty"` + + // A domain resource record's type. A value of "a" denotes an A (address) record, "aaaa" denotes an AAAA (IPv6 address) record, "cname" denotes a CNAME (canonical name) record, "mx" denotes an MX (mail exchanger) record, "ns" denotes an NS (nameserver) record, "ptr" denotes a PTR (pointer/reverse) record, "soa" denotes the SOA (start of authority) record, "spf" denotes a SPF (sender policy framework) record, and "txt" denotes a TXT (text) record. A domain record's type also denotes which class in the SoftLayer API is a best match for extending a resource record. + Type *string `json:"type,omitempty" xmlrpc:"type,omitempty"` + + // A relative weight for records with the same priority. + Weight *int `json:"weight,omitempty" xmlrpc:"weight,omitempty"` +} + +// SoftLayer_Dns_Domain_ResourceRecord_AType is a SoftLayer_Dns_Domain_ResourceRecord object whose ''type'' property is set to "a" and defines a DNS A record on a SoftLayer hosted domain. An A record directs a host name to an IP address. For instance if the A record for "host.example.org" points to the IP address 10.0.0.1 then the ''host'' property for the A record equals "host" and the ''data'' property equals "10.0.0.1". +type Dns_Domain_ResourceRecord_AType struct { + Dns_Domain_ResourceRecord +} + +// SoftLayer_Dns_Domain_ResourceRecord_AaaaType is a SoftLayer_Dns_Domain_ResourceRecord object whose ''type'' property is set to "aaaa" and defines a DNS AAAA record on a SoftLayer hosted domain. An AAAA record directs a host name to an IPv6 address. For instance if the AAAA record for "host.example.org" points to the IPv6 address "fe80:0:0:0:0:0:a00:0" then the ''host'' property for the AAAA record equals "host" and the ''data'' property equals "fe80:0:0:0:0:0:a00:0". +type Dns_Domain_ResourceRecord_AaaaType struct { + Dns_Domain_ResourceRecord +} + +// SoftLayer_Dns_Domain_ResourceRecord_CnameType is a SoftLayer_Dns_Domain_ResourceRecord object whose ''type'' property is set to "cname" and defines a DNS CNAME record on a SoftLayer hosted domain. A CNAME record directs a host name to another host. For instance, if the CNAME record for "alias.example.org" points to the host "host.example.org" then the ''host'' property equals "alias" and the ''data'' property equals "host.example.org.". +// +// DNS entries defined by CNAME should not be used as the data field for an MX record. +type Dns_Domain_ResourceRecord_CnameType struct { + Dns_Domain_ResourceRecord +} + +// SoftLayer_Dns_Domain_ResourceRecord_MxType is a SoftLayer_Dns_Domain_ResourceRecord object whose ''type'' property is set to "mx" and used to describe MX resource records. MX records control which hosts are responsible as mail exchangers for a domain. For instance, in the domain example.org, an MX record whose host is "@" and data is "mail" says that the host "mail.example.org" is responsible for handling mail for example.org. That means mail sent to users @example.org are delivered to mail.example.org. +// +// Domains can have more than one MX record if it uses more than one server to send mail through. Multiple MX records are denoted by their priority, defined by the mxPriority property. +// +// MX records must be defined for hosts with accompanying A or AAAA resource records. They may not point mail towards a host defined by a CNAME record. +type Dns_Domain_ResourceRecord_MxType struct { + Dns_Domain_ResourceRecord +} + +// SoftLayer_Dns_Domain_ResourceRecord_NsType is a SoftLayer_Dns_Domain_ResourceRecord object whose ''type'' property is set to "ns" and defines a DNS NS record on a SoftLayer hosted domain. An NS record defines the authoritative name server for a domain. All SoftLayer hosted domains contain NS records for "ns1.softlayer.com" and "ns2.softlayer.com" . For instance, if example.org is hosted on ns1.softlayer.com, then example.org contains an NS record whose ''host'' property equals "@" and whose ''data'' property equals "ns1.example.org". +// +// NS resource records pointing to ns1.softlayer.com or ns2.softlayer.com many not be removed from a SoftLayer hosted domain. +type Dns_Domain_ResourceRecord_NsType struct { + Dns_Domain_ResourceRecord +} + +// SoftLayer_Dns_Domain_ResourceRecord_PtrType is a SoftLayer_Dns_Domain_ResourceRecord object whose ''type'' property is set to "ptr" and defines a reverse DNS PTR record on the SoftLayer name servers. +// +// The format for a reverse DNS PTR record varies based on whether it is for an IPv4 or IPv6 address. +// +// For an IPv4 address the ''host'' property for every PTR record is the last octet of the IP address that the PTR record belongs to, while the ''data'' property is the canonical name of the host that the reverse lookup resolves to. Every PTR record belongs to a domain on the SoftLayer name servers named by the first three octets of an IP address in reverse order followed by ".in-addr.arpa". +// +// For instance, if the reverse DNS record for 10.0.0.1 is "host.example.org" then it's corresponding SoftLayer_Dns_Domain_ResourceRecord_PtrType host is "1", while it's data property equals "host.example.org". The full name of the reverse record for host.example.org including the domain name is "1.0.0.10.in-addr.arpa". +// +// For an IPv6 address the ''host'' property for every PTR record is the last four octets of the IP address that the PTR record belongs to. The last four octets need to be in reversed order and each digit separated by a period. The ''data'' property is the canonical name of the host that the reverse lookup resolves to. Every PTR record belongs to a domain on the SoftLayer name servers named by the first four octets of an IP address in reverse order, split up by digit with a period, and followed by ".ip6.arpa". +// +// For instance, if the reverse DNS record for fe80:0000:0000:0000:0000:0000:0a00:0001 is "host.example.org" then it's corresponding SoftLayer_Dns_Domain_ResourceRecord_PtrType host is "1.0.0.0.0.0.a.0.0.0.0.0.0.0.0.0", while it's data property equals "host.example.org". The full name of the reverse record for host.example.org including the domain name is "1.0.0.0.0.0.a.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.8.e.f.ip6.arpa". +// +// PTR record host names may not be changed by [[SoftLayer_Dns_Domain_ResourceRecord::editObject]] or [[SoftLayer_Dns_Domain_ResourceRecord::editObjects]]. +type Dns_Domain_ResourceRecord_PtrType struct { + Dns_Domain_ResourceRecord + + // Whether the address associated with a PTR record is the gateway address of a subnet. + IsGatewayAddress *bool `json:"isGatewayAddress,omitempty" xmlrpc:"isGatewayAddress,omitempty"` +} + +// SoftLayer_Dns_Domain_ResourceRecord_SoaType defines a domains' Start of Authority (or SOA) resource record. A domain's SOA record contains a domain's general and propagation information. Every domain must have one SOA record, and it is not possible to remove a domain's SOA record. +// +// SOA records typically contain a domain's serial number, but the SoftLayer API associates a domain's serial number directly with it's SoftLayer_Dns_Domain record. +type Dns_Domain_ResourceRecord_SoaType struct { + Dns_Domain_ResourceRecord +} + +// SoftLayer_Dns_Domain_ResourceRecord_SpfType is a SoftLayer_Dns_Domain_ResourceRecord object whose ''type'' property is set to "spf" and defines a DNS SPF record on a SoftLayer hosted domain. An SPF record provides sender policy framework data for a host. For instance, if defining the SPF record "v=spf1 mx:mail.example.org ~all" for "host.example.org". then the ''host'' property equals "host" and the ''data'' property equals "v=spf1 mx:mail.example.org ~all". +// +// SPF records are commonly used in email verification methods such as Sender Policy Framework. +type Dns_Domain_ResourceRecord_SpfType struct { + Dns_Domain_ResourceRecord_TxtType +} + +// SoftLayer_Dns_Domain_ResourceRecord_SrvType is a SoftLayer_Dns_Domain_ResourceRecord object whose ''type'' property is set to "srv" and defines a DNS SRV record on a SoftLayer hosted domain. +type Dns_Domain_ResourceRecord_SrvType struct { + Dns_Domain_ResourceRecord + + // The TCP or UDP port on which the service is to be found. + Port *int `json:"port,omitempty" xmlrpc:"port,omitempty"` + + // The priority of the target host, lower value means more preferred. + Priority *int `json:"priority,omitempty" xmlrpc:"priority,omitempty"` + + // The protocol of the desired service; this is usually either TCP or UDP. + Protocol *string `json:"protocol,omitempty" xmlrpc:"protocol,omitempty"` + + // The symbolic name of the desired service + Service *string `json:"service,omitempty" xmlrpc:"service,omitempty"` + + // A relative weight for records with the same priority. + Weight *int `json:"weight,omitempty" xmlrpc:"weight,omitempty"` +} + +// SoftLayer_Dns_Domain_ResourceRecord_TxtType is a SoftLayer_Dns_Domain_ResourceRecord object whose ''type'' property is set to "txt" and defines a DNS TXT record on a SoftLayer hosted domain. A TXT record provides a text description for a host. For instance, if defining the TXT record "My test host" for "host.example.org". then the ''host'' property equals "host" and the ''data'' property equals "My test host". +// +// TXT records are commonly used in email verification methods such as Sender Policy Framework. +type Dns_Domain_ResourceRecord_TxtType struct { + Dns_Domain_ResourceRecord +} + +// The SoftLayer_Dns_Domain_Reverse data type represents a reverse IP address record. +type Dns_Domain_Reverse struct { + Dns_Domain + + // Network address the domain is associated with. + NetworkAddress *string `json:"networkAddress,omitempty" xmlrpc:"networkAddress,omitempty"` +} + +// The SoftLayer_Dns_Domain_Reverse_Version4 data type represents a reverse IPv4 address record. +type Dns_Domain_Reverse_Version4 struct { + Dns_Domain_Reverse +} + +// The SoftLayer_Dns_Domain_Reverse_Version6 data type represents a reverse IPv6 address record. +type Dns_Domain_Reverse_Version6 struct { + Dns_Domain_Reverse +} + +// The SoftLayer_Dns_Message data type contains information for a single message generated by the SoftLayer DNS system. SoftLayer_Dns_Messages are typically created during the secondary DNS transfer process. +type Dns_Message struct { + Entity + + // The date the message was created. + CreateDate *Time `json:"createDate,omitempty" xmlrpc:"createDate,omitempty"` + + // The domain that is associated with a message. + Domain *Dns_Domain `json:"domain,omitempty" xmlrpc:"domain,omitempty"` + + // The internal identifier for a DNS message. + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // The message text. + Message *string `json:"message,omitempty" xmlrpc:"message,omitempty"` + + // The priority level for a DNS message. The possible levels are 'notice' and 'error'. + Priority *string `json:"priority,omitempty" xmlrpc:"priority,omitempty"` + + // The resource record that is associated with a message. + ResourceRecord *Dns_Domain_ResourceRecord `json:"resourceRecord,omitempty" xmlrpc:"resourceRecord,omitempty"` + + // The secondary DNS record that a message belongs to. + Secondary *Dns_Secondary `json:"secondary,omitempty" xmlrpc:"secondary,omitempty"` +} + +// The SoftLayer_Dns_Secondary data type contains information on a single secondary DNS zone which is managed through SoftLayer's zone transfer service. Domains created via zone transfer may not be modified by the SoftLayer portal or API. +type Dns_Secondary struct { + Entity + + // The SoftLayer account that owns a secondary DNS record. + Account *Account `json:"account,omitempty" xmlrpc:"account,omitempty"` + + // The date a secondary DNS record was created. + CreateDate *Time `json:"createDate,omitempty" xmlrpc:"createDate,omitempty"` + + // The domain record created by zone transfer from a secondary DNS record. + Domain *Dns_Domain `json:"domain,omitempty" xmlrpc:"domain,omitempty"` + + // A count of the error messages created during secondary DNS record transfer. + ErrorMessageCount *uint `json:"errorMessageCount,omitempty" xmlrpc:"errorMessageCount,omitempty"` + + // The error messages created during secondary DNS record transfer. + ErrorMessages []Dns_Message `json:"errorMessages,omitempty" xmlrpc:"errorMessages,omitempty"` + + // The internal identifier for a secondary DNS record. + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // The date when the most recent secondary DNS zone transfer took place. + LastUpdate *Time `json:"lastUpdate,omitempty" xmlrpc:"lastUpdate,omitempty"` + + // The IP address of the master name server where a secondary DNS zone is transferred from. + MasterIpAddress *string `json:"masterIpAddress,omitempty" xmlrpc:"masterIpAddress,omitempty"` + + // The current status of the secondary DNS zone. + Status *Dns_Status `json:"status,omitempty" xmlrpc:"status,omitempty"` + + // The current status of a secondary DNS record. The status may be one of the following: + // :*'''0''': Disabled + // :*'''1''': Active + // :*'''2''': Transfer Now + // :*'''3''': An error occurred that prevented the zone transfer from being completed. + StatusId *int `json:"statusId,omitempty" xmlrpc:"statusId,omitempty"` + + // The textual representation of a secondary DNS zone's status. + StatusText *string `json:"statusText,omitempty" xmlrpc:"statusText,omitempty"` + + // How often a secondary DNS zone should be transferred in minutes. + TransferFrequency *int `json:"transferFrequency,omitempty" xmlrpc:"transferFrequency,omitempty"` + + // The name of the zone that is transferred. + ZoneName *string `json:"zoneName,omitempty" xmlrpc:"zoneName,omitempty"` +} + +// The SoftLayer_Dns_Status data type contains information for a DNS status +type Dns_Status struct { + Entity + + // Internal identifier of a DNS status + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // Monitoring DNS status name + Name *string `json:"name,omitempty" xmlrpc:"name,omitempty"` +} diff --git a/vendor/github.com/softlayer/softlayer-go/datatypes/entity.go b/vendor/github.com/softlayer/softlayer-go/datatypes/entity.go new file mode 100644 index 0000000000..49e32cf358 --- /dev/null +++ b/vendor/github.com/softlayer/softlayer-go/datatypes/entity.go @@ -0,0 +1,25 @@ +/** + * Copyright 2016 IBM Corp. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** + * AUTOMATICALLY GENERATED CODE - DO NOT MODIFY + */ + +package datatypes + +// no documentation yet +type Entity struct { +} diff --git a/vendor/github.com/softlayer/softlayer-go/datatypes/event.go b/vendor/github.com/softlayer/softlayer-go/datatypes/event.go new file mode 100644 index 0000000000..ee0084edbf --- /dev/null +++ b/vendor/github.com/softlayer/softlayer-go/datatypes/event.go @@ -0,0 +1,71 @@ +/** + * Copyright 2016 IBM Corp. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** + * AUTOMATICALLY GENERATED CODE - DO NOT MODIFY + */ + +package datatypes + +// The SoftLayer_Event_Log data type contains an event detail occurred upon various SoftLayer resources. +type Event_Log struct { + Entity + + // Account id with which the event is associated + AccountId *int `json:"accountId,omitempty" xmlrpc:"accountId,omitempty"` + + // Event creation date in millisecond precision + EventCreateDate *Time `json:"eventCreateDate,omitempty" xmlrpc:"eventCreateDate,omitempty"` + + // Event name such as "reboot", "cancel", "update host" and so on. + EventName *string `json:"eventName,omitempty" xmlrpc:"eventName,omitempty"` + + // The remote IP Address that made the request + IpAddress *string `json:"ipAddress,omitempty" xmlrpc:"ipAddress,omitempty"` + + // Label or description of the event object + Label *string `json:"label,omitempty" xmlrpc:"label,omitempty"` + + // Meta data for an event in JSON string + MetaData *string `json:"metaData,omitempty" xmlrpc:"metaData,omitempty"` + + // Event object id + ObjectId *int `json:"objectId,omitempty" xmlrpc:"objectId,omitempty"` + + // Event object name such as "server", "dns" and so on. + ObjectName *string `json:"objectName,omitempty" xmlrpc:"objectName,omitempty"` + + // OpenIdConnectUserName of the customer who initiated the event + OpenIdConnectUserName *string `json:"openIdConnectUserName,omitempty" xmlrpc:"openIdConnectUserName,omitempty"` + + // A resource object that is associated with the event + Resource *Entity `json:"resource,omitempty" xmlrpc:"resource,omitempty"` + + // A unique trace id. Multiple event can be grouped by a trace id. + TraceId *string `json:"traceId,omitempty" xmlrpc:"traceId,omitempty"` + + // no documentation yet + User *User_Customer `json:"user,omitempty" xmlrpc:"user,omitempty"` + + // Id of customer who initiated the event + UserId *int `json:"userId,omitempty" xmlrpc:"userId,omitempty"` + + // Type of user that triggered the event. User type can be CUSTOMER, EMPLOYEE or SYSTEM. + UserType *string `json:"userType,omitempty" xmlrpc:"userType,omitempty"` + + // Customer username who initiated the event + Username *string `json:"username,omitempty" xmlrpc:"username,omitempty"` +} diff --git a/vendor/github.com/softlayer/softlayer-go/datatypes/flexiblecredit.go b/vendor/github.com/softlayer/softlayer-go/datatypes/flexiblecredit.go new file mode 100644 index 0000000000..23da7fbfec --- /dev/null +++ b/vendor/github.com/softlayer/softlayer-go/datatypes/flexiblecredit.go @@ -0,0 +1,110 @@ +/** + * Copyright 2016 IBM Corp. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** + * AUTOMATICALLY GENERATED CODE - DO NOT MODIFY + */ + +package datatypes + +// no documentation yet +type FlexibleCredit_Affiliate struct { + Entity + + // Flexible Credit Program the affiliate belongs to. + FlexibleCreditProgram *FlexibleCredit_Program `json:"flexibleCreditProgram,omitempty" xmlrpc:"flexibleCreditProgram,omitempty"` + + // Primary ID for the affiliate + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // Name of this affiliate + Name *string `json:"name,omitempty" xmlrpc:"name,omitempty"` +} + +// no documentation yet +type FlexibleCredit_Company_Type struct { + Entity + + // Description of the company type + Description *string `json:"description,omitempty" xmlrpc:"description,omitempty"` + + // Primary ID for the company type + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` +} + +// no documentation yet +type FlexibleCredit_Enrollment struct { + Entity + + // Account the enrollment belongs to + Account *Account `json:"account,omitempty" xmlrpc:"account,omitempty"` + + // Account ID associated with this enrollment + AccountId *int `json:"accountId,omitempty" xmlrpc:"accountId,omitempty"` + + // Affiliate associated with the account enrollment + Affiliate *FlexibleCredit_Affiliate `json:"affiliate,omitempty" xmlrpc:"affiliate,omitempty"` + + // ID of the corresponding Flexible Credit Program Affiliate + AffiliateId *int `json:"affiliateId,omitempty" xmlrpc:"affiliateId,omitempty"` + + // Indicates signing of Flexible Credit agreement (independent from MSA) + AgreementCompleteFlag *int `json:"agreementCompleteFlag,omitempty" xmlrpc:"agreementCompleteFlag,omitempty"` + + // Brief description of the company + CompanyDescription *string `json:"companyDescription,omitempty" xmlrpc:"companyDescription,omitempty"` + + // Category which best describes the company + CompanyType *FlexibleCredit_Company_Type `json:"companyType,omitempty" xmlrpc:"companyType,omitempty"` + + // ID of the Flexible Credit Program Company classification for this enrollment + CompanyTypeId *int `json:"companyTypeId,omitempty" xmlrpc:"companyTypeId,omitempty"` + + // Date when participation in the Flexible Credit program began + EnrollmentDate *Time `json:"enrollmentDate,omitempty" xmlrpc:"enrollmentDate,omitempty"` + + // Discount program the enrollment belongs to + FlexibleCreditProgram *FlexibleCredit_Program `json:"flexibleCreditProgram,omitempty" xmlrpc:"flexibleCreditProgram,omitempty"` + + // Date Flexible Credit Program benefits end. + GraduationDate *Time `json:"graduationDate,omitempty" xmlrpc:"graduationDate,omitempty"` + + // Flag indicating whether an enrollment is active (true) or inactive (false) + IsActiveFlag *bool `json:"isActiveFlag,omitempty" xmlrpc:"isActiveFlag,omitempty"` + + // Amount of monthly credit (USD) given to the account + MonthlyCreditAmount *Float64 `json:"monthlyCreditAmount,omitempty" xmlrpc:"monthlyCreditAmount,omitempty"` + + // Employee overseeing the enrollment + Representative *User_Employee `json:"representative,omitempty" xmlrpc:"representative,omitempty"` + + // ID of the employee representing this account. + RepresentativeEmployeeId *int `json:"representativeEmployeeId,omitempty" xmlrpc:"representativeEmployeeId,omitempty"` +} + +// no documentation yet +type FlexibleCredit_Program struct { + Entity + + // Primary ID of the Flexible Credit Program + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // Unique name for the Flexible Credit Program + KeyName *string `json:"keyName,omitempty" xmlrpc:"keyName,omitempty"` + + // Name of the Flexible Credit Program. + Name *string `json:"name,omitempty" xmlrpc:"name,omitempty"` +} diff --git a/vendor/github.com/softlayer/softlayer-go/datatypes/hardware.go b/vendor/github.com/softlayer/softlayer-go/datatypes/hardware.go new file mode 100644 index 0000000000..db77f33634 --- /dev/null +++ b/vendor/github.com/softlayer/softlayer-go/datatypes/hardware.go @@ -0,0 +1,1761 @@ +/** + * Copyright 2016 IBM Corp. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** + * AUTOMATICALLY GENERATED CODE - DO NOT MODIFY + */ + +package datatypes + +// The SoftLayer_Hardware data type contains general information relating to a single SoftLayer hardware. +type Hardware struct { + Entity + + // The account associated with a piece of hardware. + Account *Account `json:"account,omitempty" xmlrpc:"account,omitempty"` + + // A hardware's associated [[SoftLayer_Account|account]] id. + AccountId *int `json:"accountId,omitempty" xmlrpc:"accountId,omitempty"` + + // A count of a piece of hardware's active physical components. + ActiveComponentCount *uint `json:"activeComponentCount,omitempty" xmlrpc:"activeComponentCount,omitempty"` + + // A piece of hardware's active physical components. + ActiveComponents []Hardware_Component `json:"activeComponents,omitempty" xmlrpc:"activeComponents,omitempty"` + + // A piece of hardware's active network monitoring incidents. + ActiveNetworkMonitorIncident []Network_Monitor_Version1_Incident `json:"activeNetworkMonitorIncident,omitempty" xmlrpc:"activeNetworkMonitorIncident,omitempty"` + + // A count of a piece of hardware's active network monitoring incidents. + ActiveNetworkMonitorIncidentCount *uint `json:"activeNetworkMonitorIncidentCount,omitempty" xmlrpc:"activeNetworkMonitorIncidentCount,omitempty"` + + // A count of + AllPowerComponentCount *uint `json:"allPowerComponentCount,omitempty" xmlrpc:"allPowerComponentCount,omitempty"` + + // no documentation yet + AllPowerComponents []Hardware_Power_Component `json:"allPowerComponents,omitempty" xmlrpc:"allPowerComponents,omitempty"` + + // The SoftLayer_Network_Storage_Allowed_Host information to connect this server to Network Storage volumes that require access control lists. + AllowedHost *Network_Storage_Allowed_Host `json:"allowedHost,omitempty" xmlrpc:"allowedHost,omitempty"` + + // The SoftLayer_Network_Storage objects that this SoftLayer_Hardware has access to. + AllowedNetworkStorage []Network_Storage `json:"allowedNetworkStorage,omitempty" xmlrpc:"allowedNetworkStorage,omitempty"` + + // A count of the SoftLayer_Network_Storage objects that this SoftLayer_Hardware has access to. + AllowedNetworkStorageCount *uint `json:"allowedNetworkStorageCount,omitempty" xmlrpc:"allowedNetworkStorageCount,omitempty"` + + // A count of the SoftLayer_Network_Storage objects whose Replica that this SoftLayer_Hardware has access to. + AllowedNetworkStorageReplicaCount *uint `json:"allowedNetworkStorageReplicaCount,omitempty" xmlrpc:"allowedNetworkStorageReplicaCount,omitempty"` + + // The SoftLayer_Network_Storage objects whose Replica that this SoftLayer_Hardware has access to. + AllowedNetworkStorageReplicas []Network_Storage `json:"allowedNetworkStorageReplicas,omitempty" xmlrpc:"allowedNetworkStorageReplicas,omitempty"` + + // Information regarding an antivirus/spyware software component object. + AntivirusSpywareSoftwareComponent *Software_Component `json:"antivirusSpywareSoftwareComponent,omitempty" xmlrpc:"antivirusSpywareSoftwareComponent,omitempty"` + + // A count of information regarding a piece of hardware's specific attributes. + AttributeCount *uint `json:"attributeCount,omitempty" xmlrpc:"attributeCount,omitempty"` + + // Information regarding a piece of hardware's specific attributes. + Attributes []Hardware_Attribute `json:"attributes,omitempty" xmlrpc:"attributes,omitempty"` + + // The average daily public bandwidth usage for the current billing cycle. + AverageDailyPublicBandwidthUsage *Float64 `json:"averageDailyPublicBandwidthUsage,omitempty" xmlrpc:"averageDailyPublicBandwidthUsage,omitempty"` + + // A count of a piece of hardware's back-end or private network components. + BackendNetworkComponentCount *uint `json:"backendNetworkComponentCount,omitempty" xmlrpc:"backendNetworkComponentCount,omitempty"` + + // A piece of hardware's back-end or private network components. + BackendNetworkComponents []Network_Component `json:"backendNetworkComponents,omitempty" xmlrpc:"backendNetworkComponents,omitempty"` + + // A count of a hardware's backend or private router. + BackendRouterCount *uint `json:"backendRouterCount,omitempty" xmlrpc:"backendRouterCount,omitempty"` + + // A hardware's backend or private router. + BackendRouters []Hardware `json:"backendRouters,omitempty" xmlrpc:"backendRouters,omitempty"` + + // A hardware's allotted bandwidth (measured in GB). + BandwidthAllocation *Float64 `json:"bandwidthAllocation,omitempty" xmlrpc:"bandwidthAllocation,omitempty"` + + // A hardware's allotted detail record. Allotment details link bandwidth allocation with allotments. + BandwidthAllotmentDetail *Network_Bandwidth_Version1_Allotment_Detail `json:"bandwidthAllotmentDetail,omitempty" xmlrpc:"bandwidthAllotmentDetail,omitempty"` + + // When true, this flag specifies that a hardware is Bare Metal Server. Bare Metal Servers are physical bare metal servers that are billed with the same options as Virtual Servers, with monthly and hourly rates. Bare Metal instances are ordered based on processor core count and ram amount. + BareMetalInstanceFlag *int `json:"bareMetalInstanceFlag,omitempty" xmlrpc:"bareMetalInstanceFlag,omitempty"` + + // A count of information regarding a piece of hardware's benchmark certifications. + BenchmarkCertificationCount *uint `json:"benchmarkCertificationCount,omitempty" xmlrpc:"benchmarkCertificationCount,omitempty"` + + // Information regarding a piece of hardware's benchmark certifications. + BenchmarkCertifications []Hardware_Benchmark_Certification `json:"benchmarkCertifications,omitempty" xmlrpc:"benchmarkCertifications,omitempty"` + + // Information regarding the billing item for a server. + BillingItem *Billing_Item_Hardware `json:"billingItem,omitempty" xmlrpc:"billingItem,omitempty"` + + // A flag indicating that a billing item exists. + BillingItemFlag *bool `json:"billingItemFlag,omitempty" xmlrpc:"billingItemFlag,omitempty"` + + // Determines whether the hardware is ineligible for cancellation because it is disconnected. + BlockCancelBecauseDisconnectedFlag *bool `json:"blockCancelBecauseDisconnectedFlag,omitempty" xmlrpc:"blockCancelBecauseDisconnectedFlag,omitempty"` + + // Status indicating whether or not a piece of hardware has business continuance insurance. + BusinessContinuanceInsuranceFlag *bool `json:"businessContinuanceInsuranceFlag,omitempty" xmlrpc:"businessContinuanceInsuranceFlag,omitempty"` + + // Child hardware. + ChildrenHardware []Hardware `json:"childrenHardware,omitempty" xmlrpc:"childrenHardware,omitempty"` + + // A count of child hardware. + ChildrenHardwareCount *uint `json:"childrenHardwareCount,omitempty" xmlrpc:"childrenHardwareCount,omitempty"` + + // A count of a piece of hardware's components. + ComponentCount *uint `json:"componentCount,omitempty" xmlrpc:"componentCount,omitempty"` + + // A piece of hardware's components. + Components []Hardware_Component `json:"components,omitempty" xmlrpc:"components,omitempty"` + + // A continuous data protection/server backup software component object. + ContinuousDataProtectionSoftwareComponent *Software_Component `json:"continuousDataProtectionSoftwareComponent,omitempty" xmlrpc:"continuousDataProtectionSoftwareComponent,omitempty"` + + // The current billable public outbound bandwidth for this hardware for the current billing cycle. + CurrentBillableBandwidthUsage *Float64 `json:"currentBillableBandwidthUsage,omitempty" xmlrpc:"currentBillableBandwidthUsage,omitempty"` + + // Information regarding the datacenter in which a piece of hardware resides. + Datacenter *Location `json:"datacenter,omitempty" xmlrpc:"datacenter,omitempty"` + + // The name of the datacenter in which a piece of hardware resides. + DatacenterName *string `json:"datacenterName,omitempty" xmlrpc:"datacenterName,omitempty"` + + // A piece of hardware's local network domain name. + Domain *string `json:"domain,omitempty" xmlrpc:"domain,omitempty"` + + // All hardware that has uplink network connections to a piece of hardware. + DownlinkHardware []Hardware `json:"downlinkHardware,omitempty" xmlrpc:"downlinkHardware,omitempty"` + + // A count of all hardware that has uplink network connections to a piece of hardware. + DownlinkHardwareCount *uint `json:"downlinkHardwareCount,omitempty" xmlrpc:"downlinkHardwareCount,omitempty"` + + // All hardware that has uplink network connections to a piece of hardware. + DownlinkNetworkHardware []Hardware `json:"downlinkNetworkHardware,omitempty" xmlrpc:"downlinkNetworkHardware,omitempty"` + + // A count of all hardware that has uplink network connections to a piece of hardware. + DownlinkNetworkHardwareCount *uint `json:"downlinkNetworkHardwareCount,omitempty" xmlrpc:"downlinkNetworkHardwareCount,omitempty"` + + // A count of information regarding all servers attached to a piece of network hardware. + DownlinkServerCount *uint `json:"downlinkServerCount,omitempty" xmlrpc:"downlinkServerCount,omitempty"` + + // Information regarding all servers attached to a piece of network hardware. + DownlinkServers []Hardware `json:"downlinkServers,omitempty" xmlrpc:"downlinkServers,omitempty"` + + // A count of information regarding all virtual guests attached to a piece of network hardware. + DownlinkVirtualGuestCount *uint `json:"downlinkVirtualGuestCount,omitempty" xmlrpc:"downlinkVirtualGuestCount,omitempty"` + + // Information regarding all virtual guests attached to a piece of network hardware. + DownlinkVirtualGuests []Virtual_Guest `json:"downlinkVirtualGuests,omitempty" xmlrpc:"downlinkVirtualGuests,omitempty"` + + // A count of all hardware downstream from a network device. + DownstreamHardwareBindingCount *uint `json:"downstreamHardwareBindingCount,omitempty" xmlrpc:"downstreamHardwareBindingCount,omitempty"` + + // All hardware downstream from a network device. + DownstreamHardwareBindings []Network_Component_Uplink_Hardware `json:"downstreamHardwareBindings,omitempty" xmlrpc:"downstreamHardwareBindings,omitempty"` + + // All network hardware downstream from the selected piece of hardware. + DownstreamNetworkHardware []Hardware `json:"downstreamNetworkHardware,omitempty" xmlrpc:"downstreamNetworkHardware,omitempty"` + + // A count of all network hardware downstream from the selected piece of hardware. + DownstreamNetworkHardwareCount *uint `json:"downstreamNetworkHardwareCount,omitempty" xmlrpc:"downstreamNetworkHardwareCount,omitempty"` + + // A count of all network hardware with monitoring warnings or errors that are downstream from the selected piece of hardware. + DownstreamNetworkHardwareWithIncidentCount *uint `json:"downstreamNetworkHardwareWithIncidentCount,omitempty" xmlrpc:"downstreamNetworkHardwareWithIncidentCount,omitempty"` + + // All network hardware with monitoring warnings or errors that are downstream from the selected piece of hardware. + DownstreamNetworkHardwareWithIncidents []Hardware `json:"downstreamNetworkHardwareWithIncidents,omitempty" xmlrpc:"downstreamNetworkHardwareWithIncidents,omitempty"` + + // A count of information regarding all servers attached downstream to a piece of network hardware. + DownstreamServerCount *uint `json:"downstreamServerCount,omitempty" xmlrpc:"downstreamServerCount,omitempty"` + + // Information regarding all servers attached downstream to a piece of network hardware. + DownstreamServers []Hardware `json:"downstreamServers,omitempty" xmlrpc:"downstreamServers,omitempty"` + + // A count of information regarding all virtual guests attached to a piece of network hardware. + DownstreamVirtualGuestCount *uint `json:"downstreamVirtualGuestCount,omitempty" xmlrpc:"downstreamVirtualGuestCount,omitempty"` + + // Information regarding all virtual guests attached to a piece of network hardware. + DownstreamVirtualGuests []Virtual_Guest `json:"downstreamVirtualGuests,omitempty" xmlrpc:"downstreamVirtualGuests,omitempty"` + + // A count of the drive controllers contained within a piece of hardware. + DriveControllerCount *uint `json:"driveControllerCount,omitempty" xmlrpc:"driveControllerCount,omitempty"` + + // The drive controllers contained within a piece of hardware. + DriveControllers []Hardware_Component `json:"driveControllers,omitempty" xmlrpc:"driveControllers,omitempty"` + + // Information regarding a piece of hardware's associated EVault network storage service account. + EvaultNetworkStorage []Network_Storage `json:"evaultNetworkStorage,omitempty" xmlrpc:"evaultNetworkStorage,omitempty"` + + // A count of information regarding a piece of hardware's associated EVault network storage service account. + EvaultNetworkStorageCount *uint `json:"evaultNetworkStorageCount,omitempty" xmlrpc:"evaultNetworkStorageCount,omitempty"` + + // Information regarding a piece of hardware's firewall services. + FirewallServiceComponent *Network_Component_Firewall `json:"firewallServiceComponent,omitempty" xmlrpc:"firewallServiceComponent,omitempty"` + + // Defines the fixed components in a fixed configuration bare metal server. + FixedConfigurationPreset *Product_Package_Preset `json:"fixedConfigurationPreset,omitempty" xmlrpc:"fixedConfigurationPreset,omitempty"` + + // A count of a piece of hardware's front-end or public network components. + FrontendNetworkComponentCount *uint `json:"frontendNetworkComponentCount,omitempty" xmlrpc:"frontendNetworkComponentCount,omitempty"` + + // A piece of hardware's front-end or public network components. + FrontendNetworkComponents []Network_Component `json:"frontendNetworkComponents,omitempty" xmlrpc:"frontendNetworkComponents,omitempty"` + + // A count of a hardware's frontend or public router. + FrontendRouterCount *uint `json:"frontendRouterCount,omitempty" xmlrpc:"frontendRouterCount,omitempty"` + + // A hardware's frontend or public router. + FrontendRouters []Hardware `json:"frontendRouters,omitempty" xmlrpc:"frontendRouters,omitempty"` + + // A name reflecting the hostname and domain of the hardware. This is created from the combined values of the hardware's hostname and domain name automatically, and thus should not be edited directly. + FullyQualifiedDomainName *string `json:"fullyQualifiedDomainName,omitempty" xmlrpc:"fullyQualifiedDomainName,omitempty"` + + // A hardware's universally unique identifier. + GlobalIdentifier *string `json:"globalIdentifier,omitempty" xmlrpc:"globalIdentifier,omitempty"` + + // A count of the hard drives contained within a piece of hardware. + HardDriveCount *uint `json:"hardDriveCount,omitempty" xmlrpc:"hardDriveCount,omitempty"` + + // The hard drives contained within a piece of hardware. + HardDrives []Hardware_Component `json:"hardDrives,omitempty" xmlrpc:"hardDrives,omitempty"` + + // The chassis that a piece of hardware is housed in. + HardwareChassis *Hardware_Chassis `json:"hardwareChassis,omitempty" xmlrpc:"hardwareChassis,omitempty"` + + // A hardware's function. + HardwareFunction *Hardware_Function `json:"hardwareFunction,omitempty" xmlrpc:"hardwareFunction,omitempty"` + + // A hardware's function. + HardwareFunctionDescription *string `json:"hardwareFunctionDescription,omitempty" xmlrpc:"hardwareFunctionDescription,omitempty"` + + // A hardware's status. + HardwareStatus *Hardware_Status `json:"hardwareStatus,omitempty" xmlrpc:"hardwareStatus,omitempty"` + + // A number reflecting the state of a hardware + HardwareStatusId *int `json:"hardwareStatusId,omitempty" xmlrpc:"hardwareStatusId,omitempty"` + + // Determine in hardware object has TPM enabled. + HasTrustedPlatformModuleBillingItemFlag *bool `json:"hasTrustedPlatformModuleBillingItemFlag,omitempty" xmlrpc:"hasTrustedPlatformModuleBillingItemFlag,omitempty"` + + // Information regarding a host IPS software component object. + HostIpsSoftwareComponent *Software_Component `json:"hostIpsSoftwareComponent,omitempty" xmlrpc:"hostIpsSoftwareComponent,omitempty"` + + // A hardware's hostname + Hostname *string `json:"hostname,omitempty" xmlrpc:"hostname,omitempty"` + + // A server's hourly billing status. + HourlyBillingFlag *bool `json:"hourlyBillingFlag,omitempty" xmlrpc:"hourlyBillingFlag,omitempty"` + + // A hardware's internal identification number + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // The sum of all the inbound network traffic data for the last 30 days. + InboundBandwidthUsage *Float64 `json:"inboundBandwidthUsage,omitempty" xmlrpc:"inboundBandwidthUsage,omitempty"` + + // The total public inbound bandwidth for this hardware for the current billing cycle. + InboundPublicBandwidthUsage *Float64 `json:"inboundPublicBandwidthUsage,omitempty" xmlrpc:"inboundPublicBandwidthUsage,omitempty"` + + // Information regarding the last transaction a server performed. + LastTransaction *Provisioning_Version1_Transaction `json:"lastTransaction,omitempty" xmlrpc:"lastTransaction,omitempty"` + + // A piece of hardware's latest network monitoring incident. + LatestNetworkMonitorIncident *Network_Monitor_Version1_Incident `json:"latestNetworkMonitorIncident,omitempty" xmlrpc:"latestNetworkMonitorIncident,omitempty"` + + // Where a piece of hardware is located within SoftLayer's location hierarchy. + Location *Location `json:"location,omitempty" xmlrpc:"location,omitempty"` + + // no documentation yet + LocationPathString *string `json:"locationPathString,omitempty" xmlrpc:"locationPathString,omitempty"` + + // Information regarding a lockbox account associated with a server. + LockboxNetworkStorage *Network_Storage `json:"lockboxNetworkStorage,omitempty" xmlrpc:"lockboxNetworkStorage,omitempty"` + + // A flag indicating that the hardware is a managed resource. + ManagedResourceFlag *bool `json:"managedResourceFlag,omitempty" xmlrpc:"managedResourceFlag,omitempty"` + + // A hardware's serial number that is supplied by the manufacturer. + ManufacturerSerialNumber *string `json:"manufacturerSerialNumber,omitempty" xmlrpc:"manufacturerSerialNumber,omitempty"` + + // Information regarding a piece of hardware's memory. + Memory []Hardware_Component `json:"memory,omitempty" xmlrpc:"memory,omitempty"` + + // The amount of memory a piece of hardware has, measured in gigabytes. + MemoryCapacity *uint `json:"memoryCapacity,omitempty" xmlrpc:"memoryCapacity,omitempty"` + + // A count of information regarding a piece of hardware's memory. + MemoryCount *uint `json:"memoryCount,omitempty" xmlrpc:"memoryCount,omitempty"` + + // A piece of hardware's metric tracking object. + MetricTrackingObject *Metric_Tracking_Object_HardwareServer `json:"metricTrackingObject,omitempty" xmlrpc:"metricTrackingObject,omitempty"` + + // A count of information regarding the monitoring agents associated with a piece of hardware. + MonitoringAgentCount *uint `json:"monitoringAgentCount,omitempty" xmlrpc:"monitoringAgentCount,omitempty"` + + // Information regarding the monitoring agents associated with a piece of hardware. + MonitoringAgents []Monitoring_Agent `json:"monitoringAgents,omitempty" xmlrpc:"monitoringAgents,omitempty"` + + // Information regarding the hardware's monitoring robot. + MonitoringRobot *Monitoring_Robot `json:"monitoringRobot,omitempty" xmlrpc:"monitoringRobot,omitempty"` + + // Information regarding a piece of hardware's network monitoring services. + MonitoringServiceComponent *Network_Monitor_Version1_Query_Host_Stratum `json:"monitoringServiceComponent,omitempty" xmlrpc:"monitoringServiceComponent,omitempty"` + + // The monitoring service flag eligibility status for a piece of hardware. + MonitoringServiceEligibilityFlag *bool `json:"monitoringServiceEligibilityFlag,omitempty" xmlrpc:"monitoringServiceEligibilityFlag,omitempty"` + + // The service flag status for a piece of hardware. + MonitoringServiceFlag *bool `json:"monitoringServiceFlag,omitempty" xmlrpc:"monitoringServiceFlag,omitempty"` + + // Information regarding a piece of hardware's motherboard. + Motherboard *Hardware_Component `json:"motherboard,omitempty" xmlrpc:"motherboard,omitempty"` + + // A count of information regarding a piece of hardware's network cards. + NetworkCardCount *uint `json:"networkCardCount,omitempty" xmlrpc:"networkCardCount,omitempty"` + + // Information regarding a piece of hardware's network cards. + NetworkCards []Hardware_Component `json:"networkCards,omitempty" xmlrpc:"networkCards,omitempty"` + + // A count of returns a hardware's network components. + NetworkComponentCount *uint `json:"networkComponentCount,omitempty" xmlrpc:"networkComponentCount,omitempty"` + + // Returns a hardware's network components. + NetworkComponents []Network_Component `json:"networkComponents,omitempty" xmlrpc:"networkComponents,omitempty"` + + // The gateway member if this device is part of a network gateway. + NetworkGatewayMember *Network_Gateway_Member `json:"networkGatewayMember,omitempty" xmlrpc:"networkGatewayMember,omitempty"` + + // Whether or not this device is part of a network gateway. + NetworkGatewayMemberFlag *bool `json:"networkGatewayMemberFlag,omitempty" xmlrpc:"networkGatewayMemberFlag,omitempty"` + + // A piece of hardware's network management IP address. + NetworkManagementIpAddress *string `json:"networkManagementIpAddress,omitempty" xmlrpc:"networkManagementIpAddress,omitempty"` + + // All servers with failed monitoring that are attached downstream to a piece of hardware. + NetworkMonitorAttachedDownHardware []Hardware `json:"networkMonitorAttachedDownHardware,omitempty" xmlrpc:"networkMonitorAttachedDownHardware,omitempty"` + + // A count of all servers with failed monitoring that are attached downstream to a piece of hardware. + NetworkMonitorAttachedDownHardwareCount *uint `json:"networkMonitorAttachedDownHardwareCount,omitempty" xmlrpc:"networkMonitorAttachedDownHardwareCount,omitempty"` + + // A count of virtual guests that are attached downstream to a hardware that have failed monitoring + NetworkMonitorAttachedDownVirtualGuestCount *uint `json:"networkMonitorAttachedDownVirtualGuestCount,omitempty" xmlrpc:"networkMonitorAttachedDownVirtualGuestCount,omitempty"` + + // Virtual guests that are attached downstream to a hardware that have failed monitoring + NetworkMonitorAttachedDownVirtualGuests []Virtual_Guest `json:"networkMonitorAttachedDownVirtualGuests,omitempty" xmlrpc:"networkMonitorAttachedDownVirtualGuests,omitempty"` + + // A count of information regarding a piece of hardware's network monitors. + NetworkMonitorCount *uint `json:"networkMonitorCount,omitempty" xmlrpc:"networkMonitorCount,omitempty"` + + // A count of the status of all of a piece of hardware's network monitoring incidents. + NetworkMonitorIncidentCount *uint `json:"networkMonitorIncidentCount,omitempty" xmlrpc:"networkMonitorIncidentCount,omitempty"` + + // The status of all of a piece of hardware's network monitoring incidents. + NetworkMonitorIncidents []Network_Monitor_Version1_Incident `json:"networkMonitorIncidents,omitempty" xmlrpc:"networkMonitorIncidents,omitempty"` + + // Information regarding a piece of hardware's network monitors. + NetworkMonitors []Network_Monitor_Version1_Query_Host `json:"networkMonitors,omitempty" xmlrpc:"networkMonitors,omitempty"` + + // The value of a hardware's network status attribute. + NetworkStatus *string `json:"networkStatus,omitempty" xmlrpc:"networkStatus,omitempty"` + + // The hardware's related network status attribute. + NetworkStatusAttribute *Hardware_Attribute `json:"networkStatusAttribute,omitempty" xmlrpc:"networkStatusAttribute,omitempty"` + + // Information regarding a piece of hardware's associated network storage service account. + NetworkStorage []Network_Storage `json:"networkStorage,omitempty" xmlrpc:"networkStorage,omitempty"` + + // A count of information regarding a piece of hardware's associated network storage service account. + NetworkStorageCount *uint `json:"networkStorageCount,omitempty" xmlrpc:"networkStorageCount,omitempty"` + + // A count of the network virtual LANs (VLANs) associated with a piece of hardware's network components. + NetworkVlanCount *uint `json:"networkVlanCount,omitempty" xmlrpc:"networkVlanCount,omitempty"` + + // The network virtual LANs (VLANs) associated with a piece of hardware's network components. + NetworkVlans []Network_Vlan `json:"networkVlans,omitempty" xmlrpc:"networkVlans,omitempty"` + + // A hardware's allotted bandwidth for the next billing cycle (measured in GB). + NextBillingCycleBandwidthAllocation *Float64 `json:"nextBillingCycleBandwidthAllocation,omitempty" xmlrpc:"nextBillingCycleBandwidthAllocation,omitempty"` + + // A small note about a piece of hardware to use at your discretion. + Notes *string `json:"notes,omitempty" xmlrpc:"notes,omitempty"` + + // no documentation yet + NotesHistory []Hardware_Note `json:"notesHistory,omitempty" xmlrpc:"notesHistory,omitempty"` + + // A count of + NotesHistoryCount *uint `json:"notesHistoryCount,omitempty" xmlrpc:"notesHistoryCount,omitempty"` + + // Information regarding a piece of hardware's operating system. + OperatingSystem *Software_Component_OperatingSystem `json:"operatingSystem,omitempty" xmlrpc:"operatingSystem,omitempty"` + + // A hardware's operating system software description. + OperatingSystemReferenceCode *string `json:"operatingSystemReferenceCode,omitempty" xmlrpc:"operatingSystemReferenceCode,omitempty"` + + // The sum of all the outbound network traffic data for the last 30 days. + OutboundBandwidthUsage *Float64 `json:"outboundBandwidthUsage,omitempty" xmlrpc:"outboundBandwidthUsage,omitempty"` + + // The total public outbound bandwidth for this hardware for the current billing cycle. + OutboundPublicBandwidthUsage *Float64 `json:"outboundPublicBandwidthUsage,omitempty" xmlrpc:"outboundPublicBandwidthUsage,omitempty"` + + // Parent Hardware. + ParentHardware *Hardware `json:"parentHardware,omitempty" xmlrpc:"parentHardware,omitempty"` + + // Information regarding the Point of Presence (PoP) location in which a piece of hardware resides. + PointOfPresenceLocation *Location `json:"pointOfPresenceLocation,omitempty" xmlrpc:"pointOfPresenceLocation,omitempty"` + + // URI of the script to be downloaded and executed after installation is complete. + PostInstallScriptUri *string `json:"postInstallScriptUri,omitempty" xmlrpc:"postInstallScriptUri,omitempty"` + + // A count of the power components for a hardware object. + PowerComponentCount *uint `json:"powerComponentCount,omitempty" xmlrpc:"powerComponentCount,omitempty"` + + // The power components for a hardware object. + PowerComponents []Hardware_Power_Component `json:"powerComponents,omitempty" xmlrpc:"powerComponents,omitempty"` + + // Information regarding a piece of hardware's power supply. + PowerSupply []Hardware_Component `json:"powerSupply,omitempty" xmlrpc:"powerSupply,omitempty"` + + // A count of information regarding a piece of hardware's power supply. + PowerSupplyCount *uint `json:"powerSupplyCount,omitempty" xmlrpc:"powerSupplyCount,omitempty"` + + // The hardware's primary private IP address. + PrimaryBackendIpAddress *string `json:"primaryBackendIpAddress,omitempty" xmlrpc:"primaryBackendIpAddress,omitempty"` + + // Information regarding the hardware's primary back-end network component. + PrimaryBackendNetworkComponent *Network_Component `json:"primaryBackendNetworkComponent,omitempty" xmlrpc:"primaryBackendNetworkComponent,omitempty"` + + // The hardware's primary public IP address. + PrimaryIpAddress *string `json:"primaryIpAddress,omitempty" xmlrpc:"primaryIpAddress,omitempty"` + + // Information regarding the hardware's primary public network component. + PrimaryNetworkComponent *Network_Component `json:"primaryNetworkComponent,omitempty" xmlrpc:"primaryNetworkComponent,omitempty"` + + // Whether the hardware only has access to the private network. + PrivateNetworkOnlyFlag *bool `json:"privateNetworkOnlyFlag,omitempty" xmlrpc:"privateNetworkOnlyFlag,omitempty"` + + // The total number of processor cores, summed from all processors that are attached to a piece of hardware + ProcessorCoreAmount *uint `json:"processorCoreAmount,omitempty" xmlrpc:"processorCoreAmount,omitempty"` + + // A count of information regarding a piece of hardware's processors. + ProcessorCount *uint `json:"processorCount,omitempty" xmlrpc:"processorCount,omitempty"` + + // The total number of physical processor cores, summed from all processors that are attached to a piece of hardware + ProcessorPhysicalCoreAmount *uint `json:"processorPhysicalCoreAmount,omitempty" xmlrpc:"processorPhysicalCoreAmount,omitempty"` + + // Information regarding a piece of hardware's processors. + Processors []Hardware_Component `json:"processors,omitempty" xmlrpc:"processors,omitempty"` + + // no documentation yet + ProvisionDate *Time `json:"provisionDate,omitempty" xmlrpc:"provisionDate,omitempty"` + + // no documentation yet + Rack *Location `json:"rack,omitempty" xmlrpc:"rack,omitempty"` + + // A count of the RAID controllers contained within a piece of hardware. + RaidControllerCount *uint `json:"raidControllerCount,omitempty" xmlrpc:"raidControllerCount,omitempty"` + + // The RAID controllers contained within a piece of hardware. + RaidControllers []Hardware_Component `json:"raidControllers,omitempty" xmlrpc:"raidControllers,omitempty"` + + // A count of recent events that impact this hardware. + RecentEventCount *uint `json:"recentEventCount,omitempty" xmlrpc:"recentEventCount,omitempty"` + + // Recent events that impact this hardware. + RecentEvents []Notification_Occurrence_Event `json:"recentEvents,omitempty" xmlrpc:"recentEvents,omitempty"` + + // A count of user credentials to issue commands and/or interact with the server's remote management card. + RemoteManagementAccountCount *uint `json:"remoteManagementAccountCount,omitempty" xmlrpc:"remoteManagementAccountCount,omitempty"` + + // User credentials to issue commands and/or interact with the server's remote management card. + RemoteManagementAccounts []Hardware_Component_RemoteManagement_User `json:"remoteManagementAccounts,omitempty" xmlrpc:"remoteManagementAccounts,omitempty"` + + // A hardware's associated remote management component. This is normally IPMI. + RemoteManagementComponent *Network_Component `json:"remoteManagementComponent,omitempty" xmlrpc:"remoteManagementComponent,omitempty"` + + // A count of + ResourceConfigurationCount *uint `json:"resourceConfigurationCount,omitempty" xmlrpc:"resourceConfigurationCount,omitempty"` + + // no documentation yet + ResourceConfigurations []Hardware_Resource_Configuration `json:"resourceConfigurations,omitempty" xmlrpc:"resourceConfigurations,omitempty"` + + // A count of the resource groups in which this hardware is a member. + ResourceGroupCount *uint `json:"resourceGroupCount,omitempty" xmlrpc:"resourceGroupCount,omitempty"` + + // A count of + ResourceGroupMemberReferenceCount *uint `json:"resourceGroupMemberReferenceCount,omitempty" xmlrpc:"resourceGroupMemberReferenceCount,omitempty"` + + // no documentation yet + ResourceGroupMemberReferences []Resource_Group_Member `json:"resourceGroupMemberReferences,omitempty" xmlrpc:"resourceGroupMemberReferences,omitempty"` + + // A count of + ResourceGroupRoleCount *uint `json:"resourceGroupRoleCount,omitempty" xmlrpc:"resourceGroupRoleCount,omitempty"` + + // no documentation yet + ResourceGroupRoles []Resource_Group_Role `json:"resourceGroupRoles,omitempty" xmlrpc:"resourceGroupRoles,omitempty"` + + // The resource groups in which this hardware is a member. + ResourceGroups []Resource_Group `json:"resourceGroups,omitempty" xmlrpc:"resourceGroups,omitempty"` + + // A count of a hardware's routers. + RouterCount *uint `json:"routerCount,omitempty" xmlrpc:"routerCount,omitempty"` + + // A hardware's routers. + Routers []Hardware `json:"routers,omitempty" xmlrpc:"routers,omitempty"` + + // A count of collection of scale assets this hardware corresponds to. + ScaleAssetCount *uint `json:"scaleAssetCount,omitempty" xmlrpc:"scaleAssetCount,omitempty"` + + // Collection of scale assets this hardware corresponds to. + ScaleAssets []Scale_Asset `json:"scaleAssets,omitempty" xmlrpc:"scaleAssets,omitempty"` + + // A count of information regarding a piece of hardware's vulnerability scan requests. + SecurityScanRequestCount *uint `json:"securityScanRequestCount,omitempty" xmlrpc:"securityScanRequestCount,omitempty"` + + // Information regarding a piece of hardware's vulnerability scan requests. + SecurityScanRequests []Network_Security_Scanner_Request `json:"securityScanRequests,omitempty" xmlrpc:"securityScanRequests,omitempty"` + + // A hardware's serial number that is supplied by SoftLayer. + SerialNumber *string `json:"serialNumber,omitempty" xmlrpc:"serialNumber,omitempty"` + + // Information regarding the server room in which the hardware is located. + ServerRoom *Location `json:"serverRoom,omitempty" xmlrpc:"serverRoom,omitempty"` + + // Information regarding the piece of hardware's service provider. + ServiceProvider *Service_Provider `json:"serviceProvider,omitempty" xmlrpc:"serviceProvider,omitempty"` + + // no documentation yet + ServiceProviderId *int `json:"serviceProviderId,omitempty" xmlrpc:"serviceProviderId,omitempty"` + + // A hardware's internal identification number at its service provider + ServiceProviderResourceId *int `json:"serviceProviderResourceId,omitempty" xmlrpc:"serviceProviderResourceId,omitempty"` + + // A count of information regarding a piece of hardware's installed software. + SoftwareComponentCount *uint `json:"softwareComponentCount,omitempty" xmlrpc:"softwareComponentCount,omitempty"` + + // Information regarding a piece of hardware's installed software. + SoftwareComponents []Software_Component `json:"softwareComponents,omitempty" xmlrpc:"softwareComponents,omitempty"` + + // Information regarding the billing item for a spare pool server. + SparePoolBillingItem *Billing_Item_Hardware `json:"sparePoolBillingItem,omitempty" xmlrpc:"sparePoolBillingItem,omitempty"` + + // A count of sSH keys to be installed on the server during provisioning or an OS reload. + SshKeyCount *uint `json:"sshKeyCount,omitempty" xmlrpc:"sshKeyCount,omitempty"` + + // SSH keys to be installed on the server during provisioning or an OS reload. + SshKeys []Security_Ssh_Key `json:"sshKeys,omitempty" xmlrpc:"sshKeys,omitempty"` + + // A count of + StorageNetworkComponentCount *uint `json:"storageNetworkComponentCount,omitempty" xmlrpc:"storageNetworkComponentCount,omitempty"` + + // no documentation yet + StorageNetworkComponents []Network_Component `json:"storageNetworkComponents,omitempty" xmlrpc:"storageNetworkComponents,omitempty"` + + // A count of + TagReferenceCount *uint `json:"tagReferenceCount,omitempty" xmlrpc:"tagReferenceCount,omitempty"` + + // no documentation yet + TagReferences []Tag_Reference `json:"tagReferences,omitempty" xmlrpc:"tagReferences,omitempty"` + + // no documentation yet + TopLevelLocation *Location `json:"topLevelLocation,omitempty" xmlrpc:"topLevelLocation,omitempty"` + + // An account's associated upgrade request object, if any. + UpgradeRequest *Product_Upgrade_Request `json:"upgradeRequest,omitempty" xmlrpc:"upgradeRequest,omitempty"` + + // The network device connected to a piece of hardware. + UplinkHardware *Hardware `json:"uplinkHardware,omitempty" xmlrpc:"uplinkHardware,omitempty"` + + // A count of information regarding the network component that is one level higher than a piece of hardware on the network infrastructure. + UplinkNetworkComponentCount *uint `json:"uplinkNetworkComponentCount,omitempty" xmlrpc:"uplinkNetworkComponentCount,omitempty"` + + // Information regarding the network component that is one level higher than a piece of hardware on the network infrastructure. + UplinkNetworkComponents []Network_Component `json:"uplinkNetworkComponents,omitempty" xmlrpc:"uplinkNetworkComponents,omitempty"` + + // A string containing custom user data for a hardware order. + UserData []Hardware_Attribute `json:"userData,omitempty" xmlrpc:"userData,omitempty"` + + // A count of a string containing custom user data for a hardware order. + UserDataCount *uint `json:"userDataCount,omitempty" xmlrpc:"userDataCount,omitempty"` + + // Information regarding the virtual chassis for a piece of hardware. + VirtualChassis *Hardware_Group `json:"virtualChassis,omitempty" xmlrpc:"virtualChassis,omitempty"` + + // A count of information regarding the virtual chassis siblings for a piece of hardware. + VirtualChassisSiblingCount *uint `json:"virtualChassisSiblingCount,omitempty" xmlrpc:"virtualChassisSiblingCount,omitempty"` + + // Information regarding the virtual chassis siblings for a piece of hardware. + VirtualChassisSiblings []Hardware `json:"virtualChassisSiblings,omitempty" xmlrpc:"virtualChassisSiblings,omitempty"` + + // A piece of hardware's virtual host record. + VirtualHost *Virtual_Host `json:"virtualHost,omitempty" xmlrpc:"virtualHost,omitempty"` + + // A count of information regarding a piece of hardware's virtual software licenses. + VirtualLicenseCount *uint `json:"virtualLicenseCount,omitempty" xmlrpc:"virtualLicenseCount,omitempty"` + + // Information regarding a piece of hardware's virtual software licenses. + VirtualLicenses []Software_VirtualLicense `json:"virtualLicenses,omitempty" xmlrpc:"virtualLicenses,omitempty"` + + // Information regarding the bandwidth allotment to which a piece of hardware belongs. + VirtualRack *Network_Bandwidth_Version1_Allotment `json:"virtualRack,omitempty" xmlrpc:"virtualRack,omitempty"` + + // The name of the bandwidth allotment belonging to a piece of hardware. + VirtualRackId *int `json:"virtualRackId,omitempty" xmlrpc:"virtualRackId,omitempty"` + + // The name of the bandwidth allotment belonging to a piece of hardware. + VirtualRackName *string `json:"virtualRackName,omitempty" xmlrpc:"virtualRackName,omitempty"` + + // A piece of hardware's virtualization platform software. + VirtualizationPlatform *Software_Component `json:"virtualizationPlatform,omitempty" xmlrpc:"virtualizationPlatform,omitempty"` +} + +// The SoftLayer_Hardware_Attribute type contains general information for a hardware attribute. Hardware attributes can be assigned to specific hardware objects to describe relatively arbitrary information. +type Hardware_Attribute struct { + Entity + + // The type of hardware attribute that this represents. + HardwareAttributeType *Hardware_Attribute_Type `json:"hardwareAttributeType,omitempty" xmlrpc:"hardwareAttributeType,omitempty"` + + // The unique identifier of a hardware attribute's type. + HardwareAttributeTypeId *int `json:"hardwareAttributeTypeId,omitempty" xmlrpc:"hardwareAttributeTypeId,omitempty"` + + // A hardware attribute's unique identifier. + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // A hardware attribute's value. + Value *string `json:"value,omitempty" xmlrpc:"value,omitempty"` +} + +// Retrieve attributes associated with a hardware object. +type Hardware_Attribute_Type struct { + Entity + + // The attribute type key name or code. + Keyname *string `json:"keyname,omitempty" xmlrpc:"keyname,omitempty"` + + // The attribute type name. + Name *string `json:"name,omitempty" xmlrpc:"name,omitempty"` +} + +// no documentation yet +type Hardware_Attribute_UserData struct { + Hardware_Attribute +} + +// The SoftLayer_Hardware_Benchmark_Certification data type contains general information relating to a single SoftLayer hardware benchmark certification document. +type Hardware_Benchmark_Certification struct { + Entity + + // Information regarding a benchmark certification result's associated SoftLayer customer account. + Account *Account `json:"account,omitempty" xmlrpc:"account,omitempty"` + + // The internal identifier of the SoftLayer customer account associated with a benchmark certification result. + AccountId *int `json:"accountId,omitempty" xmlrpc:"accountId,omitempty"` + + // The date that a benchmark certification result was generated. + CreateDate *Time `json:"createDate,omitempty" xmlrpc:"createDate,omitempty"` + + // Information regarding the piece of hardware on which a benchmark certification test was performed. + Hardware *Hardware `json:"hardware,omitempty" xmlrpc:"hardware,omitempty"` + + // A benchmark certification results's associated hardware's internal identification number. + HardwareId *int `json:"hardwareId,omitempty" xmlrpc:"hardwareId,omitempty"` +} + +// Every piece of hardware in SoftLayer's datacenters, including customer servers, are housed in one of many hardware chassis. The SoftLayer_Hardware_Chassis data type defines these chassis. +type Hardware_Chassis struct { + Entity + + // no documentation yet + BackplaneCapacity *string `json:"backplaneCapacity,omitempty" xmlrpc:"backplaneCapacity,omitempty"` + + // no documentation yet + BayCapacity *string `json:"bayCapacity,omitempty" xmlrpc:"bayCapacity,omitempty"` + + // no documentation yet + BookCapacity *string `json:"bookCapacity,omitempty" xmlrpc:"bookCapacity,omitempty"` + + // no documentation yet + DriveCapacity *string `json:"driveCapacity,omitempty" xmlrpc:"driveCapacity,omitempty"` + + // no documentation yet + DriveControllerCapacity *string `json:"driveControllerCapacity,omitempty" xmlrpc:"driveControllerCapacity,omitempty"` + + // A hardware form factor internal identifier. + FormFactorId *int `json:"formFactorId,omitempty" xmlrpc:"formFactorId,omitempty"` + + // no documentation yet + GpuCapacity *string `json:"gpuCapacity,omitempty" xmlrpc:"gpuCapacity,omitempty"` + + // A hardware's function. + HardwareFunction *Hardware_Function `json:"hardwareFunction,omitempty" xmlrpc:"hardwareFunction,omitempty"` + + // A hardware chassis' internal identifier. + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // A hardware chassis' manufacturer. + Manufacturer *string `json:"manufacturer,omitempty" xmlrpc:"manufacturer,omitempty"` + + // A hardware chassis' name. + Name *string `json:"name,omitempty" xmlrpc:"name,omitempty"` + + // no documentation yet + PowerCapacity *string `json:"powerCapacity,omitempty" xmlrpc:"powerCapacity,omitempty"` + + // The physical size of a hardware chassis. Currently this relates to the 'U' size of a chassis buy default. + UnitSize *int `json:"unitSize,omitempty" xmlrpc:"unitSize,omitempty"` + + // A hardware chassis' revision number. + Version *string `json:"version,omitempty" xmlrpc:"version,omitempty"` +} + +// The SoftLayer_Hardware_Component data type abstracts information related to a hardware component. +type Hardware_Component struct { + Entity + + // A component's capacity. + Capacity *Float64 `json:"capacity,omitempty" xmlrpc:"capacity,omitempty"` + + // A components sub components. Devices that are usually integrated or in some way attached to a component. + Children []Hardware_Component `json:"children,omitempty" xmlrpc:"children,omitempty"` + + // A count of a components sub components. Devices that are usually integrated or in some way attached to a component. + ChildrenCount *uint `json:"childrenCount,omitempty" xmlrpc:"childrenCount,omitempty"` + + // A count of + DownlinkHardwareComponentCount *uint `json:"downlinkHardwareComponentCount,omitempty" xmlrpc:"downlinkHardwareComponentCount,omitempty"` + + // no documentation yet + DownlinkHardwareComponents []Hardware_Component `json:"downlinkHardwareComponents,omitempty" xmlrpc:"downlinkHardwareComponents,omitempty"` + + // The hardware object that this component belongs to. + Hardware *Hardware `json:"hardware,omitempty" xmlrpc:"hardware,omitempty"` + + // The general group of component models. + HardwareComponentModel *Hardware_Component_Model `json:"hardwareComponentModel,omitempty" xmlrpc:"hardwareComponentModel,omitempty"` + + // The internal identifier of a hardware component's component model. + HardwareComponentModelId *int `json:"hardwareComponentModelId,omitempty" xmlrpc:"hardwareComponentModelId,omitempty"` + + // A components type. + HardwareComponentType *Hardware_Component_Type `json:"hardwareComponentType,omitempty" xmlrpc:"hardwareComponentType,omitempty"` + + // The internal identifier of the hardware that a hardware component resides inside. + HardwareId *int `json:"hardwareId,omitempty" xmlrpc:"hardwareId,omitempty"` + + // A hardware component's internal identifier. + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // The date that a hardware component was last modified. + ModifyDate *Time `json:"modifyDate,omitempty" xmlrpc:"modifyDate,omitempty"` + + // A count of the module's hardware components + ModuleComponentCount *uint `json:"moduleComponentCount,omitempty" xmlrpc:"moduleComponentCount,omitempty"` + + // The module's hardware components + ModuleComponents []Hardware_Component `json:"moduleComponents,omitempty" xmlrpc:"moduleComponents,omitempty"` + + // A count of the module's hardware components + ModuleHardwareComponentCount *uint `json:"moduleHardwareComponentCount,omitempty" xmlrpc:"moduleHardwareComponentCount,omitempty"` + + // The module's hardware components + ModuleHardwareComponents []Hardware_Component `json:"moduleHardwareComponents,omitempty" xmlrpc:"moduleHardwareComponents,omitempty"` + + // A count of the module's network components + ModuleNetworkComponentCount *uint `json:"moduleNetworkComponentCount,omitempty" xmlrpc:"moduleNetworkComponentCount,omitempty"` + + // The module's network components + ModuleNetworkComponents []Hardware_Component `json:"moduleNetworkComponents,omitempty" xmlrpc:"moduleNetworkComponents,omitempty"` + + // The name of this component as referenced by the operating system. + Name *string `json:"name,omitempty" xmlrpc:"name,omitempty"` + + // A count of the components local ethernet and remote management interfaces + NetworkComponentCount *uint `json:"networkComponentCount,omitempty" xmlrpc:"networkComponentCount,omitempty"` + + // The components local ethernet and remote management interfaces + NetworkComponents []Network_Component `json:"networkComponents,omitempty" xmlrpc:"networkComponents,omitempty"` + + // The account this component belongs to. + Owner *Account `json:"owner,omitempty" xmlrpc:"owner,omitempty"` + + // A components parent. Devices that are usually integrated or in some way attached to a component. + Parent *Hardware_Component `json:"parent,omitempty" xmlrpc:"parent,omitempty"` + + // no documentation yet + ParentModule *Hardware_Component `json:"parentModule,omitempty" xmlrpc:"parentModule,omitempty"` + + // no documentation yet + PrefixAttribute *Hardware_Component_Model_Generic_Attribute `json:"prefixAttribute,omitempty" xmlrpc:"prefixAttribute,omitempty"` + + // A RAID controllers RAID mode. + RaidMode *string `json:"raidMode,omitempty" xmlrpc:"raidMode,omitempty"` + + // The component serial number. + SerialNumber *string `json:"serialNumber,omitempty" xmlrpc:"serialNumber,omitempty"` + + // no documentation yet + ServiceProvider *Service_Provider `json:"serviceProvider,omitempty" xmlrpc:"serviceProvider,omitempty"` + + // A hardware's internal identification number at its service provider + ServiceProviderId *int `json:"serviceProviderId,omitempty" xmlrpc:"serviceProviderId,omitempty"` + + // A count of + UplinkHardwareComponentCount *uint `json:"uplinkHardwareComponentCount,omitempty" xmlrpc:"uplinkHardwareComponentCount,omitempty"` + + // no documentation yet + UplinkHardwareComponents []Hardware_Component `json:"uplinkHardwareComponents,omitempty" xmlrpc:"uplinkHardwareComponents,omitempty"` +} + +// The SoftLayer_Hardware_Component_Attribute data type contains general information relating to a single hardware setting or attribute for a component model. For Example: A RAID controller may be setup for many different RAID configurations. A RAID controller with a configuration of RAID-1 will have a single attribute for this RAID setting. +type Hardware_Component_Attribute struct { + Entity + + // A hardware component attribute's associated [[SoftLayer_Hardware_Component|Hardware Component]]. + HardwareComponent *Hardware_Component `json:"hardwareComponent,omitempty" xmlrpc:"hardwareComponent,omitempty"` + + // A hardware component attribute's associated [[SoftLayer_Hardware_Component_Attribute_Type|type]]. + HardwareComponentAttributeType *Hardware_Component_Attribute_Type `json:"hardwareComponentAttributeType,omitempty" xmlrpc:"hardwareComponentAttributeType,omitempty"` + + // A hardware component attribute's associated [[SoftLayer_Hardware_Component_Attribute_Type|type]] Id. + HardwareComponentAttributeTypeId *int `json:"hardwareComponentAttributeTypeId,omitempty" xmlrpc:"hardwareComponentAttributeTypeId,omitempty"` + + // A hardware component attribute's associated [[SoftLayer_Hardware_Component|hardware component]] Id. + HardwareComponentId *int `json:"hardwareComponentId,omitempty" xmlrpc:"hardwareComponentId,omitempty"` + + // A hardware component attribute's value. A value can have many different values depending on the attributes [[SoftLayer_Hardware_Component_Attribute_Type|type]]. + Value *string `json:"value,omitempty" xmlrpc:"value,omitempty"` +} + +// The SoftLayer_Hardware_Component_Attribute_Type data type contains general information for the type of an attribute for a hardware component. +type Hardware_Component_Attribute_Type struct { + Entity + + // The description for the date that a hardware component attribute type's [[SoftLayer_Hardware_Component_Attribute|Attribute]] contains. + Description *string `json:"description,omitempty" xmlrpc:"description,omitempty"` + + // A hardware component attribute type's Id. + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // A hardware component attribute type's unique name. + KeyName *string `json:"keyName,omitempty" xmlrpc:"keyName,omitempty"` + + // A hardware component attribute type's name. + Name *string `json:"name,omitempty" xmlrpc:"name,omitempty"` +} + +// The SoftLayer_Hardware_Component_DriveController data type abstracts information related to a drive controller. +type Hardware_Component_DriveController struct { + Hardware_Component +} + +// The SoftLayer_Hardware_Component_HardDrive data type abstracts information related to a hard drive. +type Hardware_Component_HardDrive struct { + Hardware_Component + + // A count of the attached component partitions. + PartitionCount *uint `json:"partitionCount,omitempty" xmlrpc:"partitionCount,omitempty"` + + // The attached component partitions. + Partitions []Hardware_Component_Partition `json:"partitions,omitempty" xmlrpc:"partitions,omitempty"` +} + +// The SoftLayer_Hardware_Component_Model data type contains general information relating to a single SoftLayer component model. A component model represents a vendor specific representation of a hardware component. Every piece of hardware on a server will have a specific hardware component model. +type Hardware_Component_Model struct { + Entity + + // no documentation yet + ArchitectureType *Hardware_Component_Model_Architecture_Type `json:"architectureType,omitempty" xmlrpc:"architectureType,omitempty"` + + // no documentation yet + ArchitectureTypeId *string `json:"architectureTypeId,omitempty" xmlrpc:"architectureTypeId,omitempty"` + + // A count of + AttributeCount *uint `json:"attributeCount,omitempty" xmlrpc:"attributeCount,omitempty"` + + // no documentation yet + Attributes []Hardware_Component_Model_Attribute `json:"attributes,omitempty" xmlrpc:"attributes,omitempty"` + + // A component model's capacity. The capacity of a component model depends on the model itself. For Example: Hard drives have a capacity that reflects the amount of data that hard drive can store. + Capacity *Float64 `json:"capacity,omitempty" xmlrpc:"capacity,omitempty"` + + // A count of + CompatibleArrayTypeCount *uint `json:"compatibleArrayTypeCount,omitempty" xmlrpc:"compatibleArrayTypeCount,omitempty"` + + // no documentation yet + CompatibleArrayTypes []Configuration_Storage_Group_Array_Type `json:"compatibleArrayTypes,omitempty" xmlrpc:"compatibleArrayTypes,omitempty"` + + // A count of all the component models that are compatible with a hardware component model. + CompatibleChildComponentModelCount *uint `json:"compatibleChildComponentModelCount,omitempty" xmlrpc:"compatibleChildComponentModelCount,omitempty"` + + // All the component models that are compatible with a hardware component model. + CompatibleChildComponentModels []Hardware_Component_Model `json:"compatibleChildComponentModels,omitempty" xmlrpc:"compatibleChildComponentModels,omitempty"` + + // A count of all the component models that a hardware component model is compatible with. + CompatibleParentComponentModelCount *uint `json:"compatibleParentComponentModelCount,omitempty" xmlrpc:"compatibleParentComponentModelCount,omitempty"` + + // All the component models that a hardware component model is compatible with. + CompatibleParentComponentModels []Hardware_Component_Model `json:"compatibleParentComponentModels,omitempty" xmlrpc:"compatibleParentComponentModels,omitempty"` + + // A colon delimited list of hardware component model attributes. + Description *string `json:"description,omitempty" xmlrpc:"description,omitempty"` + + // A hardware component model's physical components in inventory. + HardwareComponents []Hardware_Component `json:"hardwareComponents,omitempty" xmlrpc:"hardwareComponents,omitempty"` + + // The non-vendor specific generic component model for a hardware component model. + HardwareGenericComponentModel *Hardware_Component_Model_Generic `json:"hardwareGenericComponentModel,omitempty" xmlrpc:"hardwareGenericComponentModel,omitempty"` + + // The internal identifier of the generic component model for a component model. + HardwareGenericComponentModelId *int `json:"hardwareGenericComponentModelId,omitempty" xmlrpc:"hardwareGenericComponentModelId,omitempty"` + + // A hardware component model's internal identifier number. + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // no documentation yet + InfinibandCompatibleAttribute *Hardware_Component_Model_Attribute `json:"infinibandCompatibleAttribute,omitempty" xmlrpc:"infinibandCompatibleAttribute,omitempty"` + + // no documentation yet + IsFlexSkuCompatible *bool `json:"isFlexSkuCompatible,omitempty" xmlrpc:"isFlexSkuCompatible,omitempty"` + + // no documentation yet + IsInfinibandCompatible *bool `json:"isInfinibandCompatible,omitempty" xmlrpc:"isInfinibandCompatible,omitempty"` + + // no documentation yet + LongDescription *string `json:"longDescription,omitempty" xmlrpc:"longDescription,omitempty"` + + // A hardware component model's manufacturer. + Manufacturer *string `json:"manufacturer,omitempty" xmlrpc:"manufacturer,omitempty"` + + // The model name of a hardware component model. + Name *string `json:"name,omitempty" xmlrpc:"name,omitempty"` + + // A motherboard's average reboot time. + RebootTime *Hardware_Component_Motherboard_Reboot_Time `json:"rebootTime,omitempty" xmlrpc:"rebootTime,omitempty"` + + // A hardware component model's type. + Type *string `json:"type,omitempty" xmlrpc:"type,omitempty"` + + // A count of the types of attributes that are allowed for a given hardware component model. + ValidAttributeTypeCount *uint `json:"validAttributeTypeCount,omitempty" xmlrpc:"validAttributeTypeCount,omitempty"` + + // The types of attributes that are allowed for a given hardware component model. + ValidAttributeTypes []Hardware_Component_Model_Attribute_Type `json:"validAttributeTypes,omitempty" xmlrpc:"validAttributeTypes,omitempty"` + + // The model number or model description of a hardware component model. + Version *string `json:"version,omitempty" xmlrpc:"version,omitempty"` +} + +// no documentation yet +type Hardware_Component_Model_Architecture_Type struct { + Entity + + // no documentation yet + Children []Hardware_Component_Model_Architecture_Type `json:"children,omitempty" xmlrpc:"children,omitempty"` + + // A count of + ChildrenCount *uint `json:"childrenCount,omitempty" xmlrpc:"childrenCount,omitempty"` + + // no documentation yet + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // no documentation yet + KeyName *string `json:"keyName,omitempty" xmlrpc:"keyName,omitempty"` + + // no documentation yet + Name *string `json:"name,omitempty" xmlrpc:"name,omitempty"` + + // no documentation yet + Parent *Hardware_Component_Model_Architecture_Type `json:"parent,omitempty" xmlrpc:"parent,omitempty"` + + // no documentation yet + ParentId *string `json:"parentId,omitempty" xmlrpc:"parentId,omitempty"` +} + +// The SoftLayer_Hardware_Component__Model_Attribute data type contains general information relating to a single hardware setting or attribute for a component model. +type Hardware_Component_Model_Attribute struct { + Entity + + // A hardware component model attribute's associated [[SoftLayer_Hardware_Component_Model_Attribute_Type|type]] Id. + AttributeTypeId *int `json:"attributeTypeId,omitempty" xmlrpc:"attributeTypeId,omitempty"` + + // no documentation yet + HardwareComponent *Hardware_Component_Model `json:"hardwareComponent,omitempty" xmlrpc:"hardwareComponent,omitempty"` + + // no documentation yet + HardwareComponentAttributeType *Hardware_Component_Model_Attribute_Type `json:"hardwareComponentAttributeType,omitempty" xmlrpc:"hardwareComponentAttributeType,omitempty"` + + // A hardware component model attribute's associated [[SoftLayer_Hardware_Component_Model|hardware component model]] Id. + HardwareComponentModelId *int `json:"hardwareComponentModelId,omitempty" xmlrpc:"hardwareComponentModelId,omitempty"` + + // A hardware component model attribute's value. A value can have many different values depending on the attributes [[SoftLayer_Hardware_Component_Model_Attribute_Type|type]]. + Value *string `json:"value,omitempty" xmlrpc:"value,omitempty"` +} + +// The SoftLayer_Hardware_Component_Model_Attribute_Type data type contains general information for the type of an attribute for a hardware component model. +type Hardware_Component_Model_Attribute_Type struct { + Entity + + // The description for the data that a hardware component model type's [[SoftLayer_Hardware_Component_Model_Attribute|Attribute]] contains. + Description *string `json:"description,omitempty" xmlrpc:"description,omitempty"` + + // A hardware component model attribute type's Id. + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // A hardware component model attribute type's unique name. + KeyName *string `json:"keyName,omitempty" xmlrpc:"keyName,omitempty"` + + // A hardware component model attribute type's name. + Name *string `json:"name,omitempty" xmlrpc:"name,omitempty"` + + // A count of + ValidComponentTypeCount *uint `json:"validComponentTypeCount,omitempty" xmlrpc:"validComponentTypeCount,omitempty"` + + // no documentation yet + ValidComponentTypes []Hardware_Component_Type `json:"validComponentTypes,omitempty" xmlrpc:"validComponentTypes,omitempty"` +} + +// The SoftLayer_Hardware_Component_Model_Generic data type contains general information relating to a single SoftLayer generic component model. A generic component model represents a non-vendor specific representation of a hardware component. Frequently SoftLayer utilizes components from various vendors in the servers they provision. For Example: Several different vendors produce 6GB DDR2 memory. The generic component model for the 6GB stick of RAM encompasses every instance of this component regardless of make and model. +type Hardware_Component_Model_Generic struct { + Entity + + // A generic component model's capacity. The capacity of a generic component model depends on the model itself. For Example: Hard drives have a capacity that reflects the amount of data that hard drive can store. + Capacity *Float64 `json:"capacity,omitempty" xmlrpc:"capacity,omitempty"` + + // A brief description for a generic component model that typically defines it's function. + Description *string `json:"description,omitempty" xmlrpc:"description,omitempty"` + + // A count of a generic component model's hardware component model. + HardwareComponentModelCount *uint `json:"hardwareComponentModelCount,omitempty" xmlrpc:"hardwareComponentModelCount,omitempty"` + + // A generic component model's hardware component model. + HardwareComponentModels []Hardware_Component_Model `json:"hardwareComponentModels,omitempty" xmlrpc:"hardwareComponentModels,omitempty"` + + // A generic component model's type. + HardwareComponentType *Hardware_Component_Type `json:"hardwareComponentType,omitempty" xmlrpc:"hardwareComponentType,omitempty"` + + // The internal identifier of the component type for a generic component model. + HardwareComponentTypeId *int `json:"hardwareComponentTypeId,omitempty" xmlrpc:"hardwareComponentTypeId,omitempty"` + + // A generic component model's internal identification number. + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // A list of features that a generic component model can provide. + MarketingFeatures *Hardware_Component_Model_Generic_MarketingFeature `json:"marketingFeatures,omitempty" xmlrpc:"marketingFeatures,omitempty"` + + // The unit of measurement for the capacity of a generic component model. + Units *string `json:"units,omitempty" xmlrpc:"units,omitempty"` + + // A generic component model's upgrade priority. The upgrade priority indicates the order a generic component model should be considered over other generic component models. A higher number indicates that a generic component model receives a higher upgrade preference in comparison to a generic component model with a lower priority number. + UpgradePriority *int `json:"upgradePriority,omitempty" xmlrpc:"upgradePriority,omitempty"` +} + +// The SoftLayer_Hardware_Component_Model_Generic_Attribute data type contains information relating to a single SoftLayer generic component model. Generic component model attributes can hold any information to describe functionality of the model. For Example: The number of cores that a processor has. +type Hardware_Component_Model_Generic_Attribute struct { + Entity + + // An attributes generic component model. + HardwareGenericComponentModel *Hardware_Component_Model_Generic `json:"hardwareGenericComponentModel,omitempty" xmlrpc:"hardwareGenericComponentModel,omitempty"` + + // A generic component model attribute's value. + Value *string `json:"value,omitempty" xmlrpc:"value,omitempty"` +} + +// The SoftLayer_Hardware_Component_Model_Generic_MarketingFeature data type contains general information relating to all the advertising features of a single SoftLayer hardware generic component model. +type Hardware_Component_Model_Generic_MarketingFeature struct { + Entity + + // An html formatted list of all features. + Features *string `json:"features,omitempty" xmlrpc:"features,omitempty"` + + // The generic component model for a list of advertising or marketing features + HardwareGenericComponentModel *Hardware_Component_Model_Generic `json:"hardwareGenericComponentModel,omitempty" xmlrpc:"hardwareGenericComponentModel,omitempty"` + + // A hardware component's upgrade price. + Price *string `json:"price,omitempty" xmlrpc:"price,omitempty"` +} + +// The SoftLayer_Hardware_Component_DriveController data type abstracts information related to a motherboard. +type Hardware_Component_Motherboard struct { + Hardware_Component +} + +// The SoftLayer_Hardware_Component_Motherboard_Reboot_Time contains the average reboot times for motherboards. There are two types of average times. One is for motherboards without raid, and the other is for motherboards with raid. These times are based on averages and have been gathered through numerous test cases. +type Hardware_Component_Motherboard_Reboot_Time struct { + Entity + + // Motherboard's specifications (manufacturer, version, etc....) + HardwareComponentModel *Hardware_Component_Model `json:"hardwareComponentModel,omitempty" xmlrpc:"hardwareComponentModel,omitempty"` + + // Average reboot time in seconds for the motherboard when raid is installed. + WithRaid *int `json:"withRaid,omitempty" xmlrpc:"withRaid,omitempty"` + + // Average reboot time in seconds for the motherboard when NO raid is installed. + WithoutRaid *int `json:"withoutRaid,omitempty" xmlrpc:"withoutRaid,omitempty"` +} + +// The SoftLayer_Hardware_Component_NetworkCard data type abstracts information related to a network card. +type Hardware_Component_NetworkCard struct { + Hardware_Component +} + +// The SoftLayer_Hardware_Component_Partition data type contains general information relating to a single hard drive partition. +type Hardware_Component_Partition struct { + Entity + + // A hardware component partition's order in the [[SoftLayer_Hardware_Hardware|hardware]]. + DiskNumber *int `json:"diskNumber,omitempty" xmlrpc:"diskNumber,omitempty"` + + // A flag indicating if a partition is the grow partition. The grow partition will grow to fill all remaining space on a disk. There can only be one. + Grow *int `json:"grow,omitempty" xmlrpc:"grow,omitempty"` + + // A hardware component partitions's associated [[SoftLayer_Hardware_Component|Hardware Component]]. Likely to be a [[SoftLayer_Hardware_Component_HardDrive|Hard Drive]] + HardwareComponent *Hardware_Component `json:"hardwareComponent,omitempty" xmlrpc:"hardwareComponent,omitempty"` + + // A hardware component partition's associated [[SoftLayer_Hardware_Component|hardware component]] Id. + HardwareComponentId *int `json:"hardwareComponentId,omitempty" xmlrpc:"hardwareComponentId,omitempty"` + + // A hardware component partition's minimum size(GB). + MinimumSize *Float64 `json:"minimumSize,omitempty" xmlrpc:"minimumSize,omitempty"` + + // A hardware component partition's name. On a server with windows this may be 'C' and on Linux this may be '/var' + Name *string `json:"name,omitempty" xmlrpc:"name,omitempty"` +} + +// The SoftLayer_Hardware_Component_Partition_OperatingSystem data type contains general information relating to a single SoftLayer Operating System Partition Template. +type Hardware_Component_Partition_OperatingSystem struct { + Entity + + // A partition template operating system's description. Typically the title of the Operating System. + Description *string `json:"description,omitempty" xmlrpc:"description,omitempty"` + + // A partition template operating system's id. + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // Information about the kinds of partition templates assigned to this operating system. + Notes *string `json:"notes,omitempty" xmlrpc:"notes,omitempty"` + + // A count of information regarding an operating system's [[SoftLayer_Hardware_Component_Partition_Template|Partition Templates]]. + PartitionTemplateCount *uint `json:"partitionTemplateCount,omitempty" xmlrpc:"partitionTemplateCount,omitempty"` + + // Information regarding an operating system's [[SoftLayer_Hardware_Component_Partition_Template|Partition Templates]]. + PartitionTemplates []Hardware_Component_Partition_Template `json:"partitionTemplates,omitempty" xmlrpc:"partitionTemplates,omitempty"` +} + +// The SoftLayer_Hardware_Component_Partition_Template data type contains general information relating to a single SoftLayer partition template. Partition templates group 1 or more partition configurations that can be used to predefine how a hard drive's partitions will be configured. +type Hardware_Component_Partition_Template struct { + Entity + + // A partition template's associated [[SoftLayer_Account|Account]]. + Account *Account `json:"account,omitempty" xmlrpc:"account,omitempty"` + + // A partition template's owner. The [[SoftLayer_Account|Account]] that a template was created by. + AccountId *int `json:"accountId,omitempty" xmlrpc:"accountId,omitempty"` + + // An individual partition for a partition template. This is identical to 'partitionTemplatePartition' except this will sort unix partitions. + Data []Hardware_Component_Partition_Template_Partition `json:"data,omitempty" xmlrpc:"data,omitempty"` + + // A count of an individual partition for a partition template. This is identical to 'partitionTemplatePartition' except this will sort unix partitions. + DataCount *uint `json:"dataCount,omitempty" xmlrpc:"dataCount,omitempty"` + + // A partition template's description. + Description *string `json:"description,omitempty" xmlrpc:"description,omitempty"` + + // no documentation yet + ExpireDate *string `json:"expireDate,omitempty" xmlrpc:"expireDate,omitempty"` + + // A partition template's id. + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // A partition template's associated [[SoftLayer_Hardware_Component_Partition_OperatingSystem|Operating System]]. + PartitionOperatingSystem *Hardware_Component_Partition_OperatingSystem `json:"partitionOperatingSystem,omitempty" xmlrpc:"partitionOperatingSystem,omitempty"` + + // A partition template's associated [[SoftLayer_Hardware_Component_Partition_OperatingSystem|Operating System]] Id. + PartitionOperatingSystemId *int `json:"partitionOperatingSystemId,omitempty" xmlrpc:"partitionOperatingSystemId,omitempty"` + + // An individual partition for a partition template. + PartitionTemplatePartition []Hardware_Component_Partition_Template_Partition `json:"partitionTemplatePartition,omitempty" xmlrpc:"partitionTemplatePartition,omitempty"` + + // A count of an individual partition for a partition template. + PartitionTemplatePartitionCount *uint `json:"partitionTemplatePartitionCount,omitempty" xmlrpc:"partitionTemplatePartitionCount,omitempty"` + + // A partition template's status code. ACTIVE ,INACTIVE. + StatusCode *string `json:"statusCode,omitempty" xmlrpc:"statusCode,omitempty"` + + // A partition template's Type. SYSTEM - template generated by softlayer. CUSTOM - templates generated by SoftLayer customers. + TemplateType *string `json:"templateType,omitempty" xmlrpc:"templateType,omitempty"` +} + +// The SoftLayer_Hardware_Component_Partition_Template_Partition data type contains general information relating to a single SoftLayer Template Partition. +type Hardware_Component_Partition_Template_Partition struct { + Entity + + // The filesystem type of a partition + FilesystemType *Configuration_Storage_Filesystem_Type `json:"filesystemType,omitempty" xmlrpc:"filesystemType,omitempty"` + + // A partition's id. + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // A flag indication if a partition will be the grow partition. The grow partition will have its size adjusted to fill all available space on a hard drive. + IsGrow *bool `json:"isGrow,omitempty" xmlrpc:"isGrow,omitempty"` + + // A partition's default name. + PartitionName *string `json:"partitionName,omitempty" xmlrpc:"partitionName,omitempty"` + + // A partition's default size. + PartitionSize *Float64 `json:"partitionSize,omitempty" xmlrpc:"partitionSize,omitempty"` + + // A partition's [[SoftLayer_Hardware_Component_Partition_Template|Partition Template]]. + PartitionTemplate *Hardware_Component_Partition_Template `json:"partitionTemplate,omitempty" xmlrpc:"partitionTemplate,omitempty"` + + // A partition's associated [[SoftLayer_Hardware_Component_Partition_Template|Partition Template]] Id. + PartitionTemplateId *int `json:"partitionTemplateId,omitempty" xmlrpc:"partitionTemplateId,omitempty"` + + // The volume the partition will be put on + VolumeNumber *int `json:"volumeNumber,omitempty" xmlrpc:"volumeNumber,omitempty"` +} + +// The SoftLayer_Hardware_Component_Processor data type abstracts information related to a processor. +type Hardware_Component_Processor struct { + Hardware_Component +} + +// The SoftLayer_Hardware_Component_Ram data type abstracts information related to RAM. +type Hardware_Component_Ram struct { + Hardware_Component +} + +// This class adds functionality to the base SoftLayer_Hardware class for web servers (all server hardware) +type Hardware_Component_RemoteManagement struct { + Hardware_Component + + // A network component data type. + NetworkComponent *Network_Component `json:"networkComponent,omitempty" xmlrpc:"networkComponent,omitempty"` +} + +// The SoftLayer_Network_Storage_Evault_Version6 contains the names of the remote management commands. Currently, only the reboot and power commands for the remote management card exist. +type Hardware_Component_RemoteManagement_Command struct { + Entity + + // The name of the remote management command. + KeyName *string `json:"keyName,omitempty" xmlrpc:"keyName,omitempty"` + + // A count of all requests issued for the remote management command. + RequestCount *uint `json:"requestCount,omitempty" xmlrpc:"requestCount,omitempty"` + + // All requests issued for the remote management command. + Requests []Hardware_Component_RemoteManagement_Command_Request `json:"requests,omitempty" xmlrpc:"requests,omitempty"` +} + +// The SoftLayer_Hardware_Component_RemoteManagement_Command_Request contains details for remote management commands issued to a server's remote management card. Details for remote management commands such as powerOn, powerOff, powerCycle, rebootDefault, rebootSoft, rebootHard can be retrieved. Details such as the user who issued the command, the id of the remote management card the command was issued, when the command was issued may be retrieved. +type Hardware_Component_RemoteManagement_Command_Request struct { + Entity + + // The timestamp the remote management command was issued. + CreateDate *Time `json:"createDate,omitempty" xmlrpc:"createDate,omitempty"` + + // The id of the hardware to perform the remote management or powerstrip command on. + Hardware *Hardware `json:"hardware,omitempty" xmlrpc:"hardware,omitempty"` + + // The hardware id the command was issued for. + HardwareId *int `json:"hardwareId,omitempty" xmlrpc:"hardwareId,omitempty"` + + // The timestamp recorded when the remote management command returned a status of the command issued. + ModifyDate *Time `json:"modifyDate,omitempty" xmlrpc:"modifyDate,omitempty"` + + // A hardware's network components. Network components are hardware components such as IPMI cards or Ethernet cards. + NetworkComponent *Network_Component `json:"networkComponent,omitempty" xmlrpc:"networkComponent,omitempty"` + + // Execution status of the remote management command. True is successful. False is failure. + Processed *bool `json:"processed,omitempty" xmlrpc:"processed,omitempty"` + + // The remote management command issued. + RemoteManagementCommand *Hardware_Component_RemoteManagement_Command `json:"remoteManagementCommand,omitempty" xmlrpc:"remoteManagementCommand,omitempty"` + + // Information regarding the user who issued the remote management command. + User *User_Customer `json:"user,omitempty" xmlrpc:"user,omitempty"` +} + +// The credentials used for remote management such as username, password, etc... +type Hardware_Component_RemoteManagement_User struct { + Entity + + // no documentation yet + Hardware *Hardware `json:"hardware,omitempty" xmlrpc:"hardware,omitempty"` + + // no documentation yet + NetworkComponent *Network_Component `json:"networkComponent,omitempty" xmlrpc:"networkComponent,omitempty"` + + // The password used for this remote management command. + Password *string `json:"password,omitempty" xmlrpc:"password,omitempty"` + + // The username used for this remote management command. + Username *string `json:"username,omitempty" xmlrpc:"username,omitempty"` +} + +// The SoftLayer_Hardware_Component_SecurityDevice is used to determine the security devices attached to the hardware component. +type Hardware_Component_SecurityDevice struct { + Hardware_Component +} + +// The SoftLayer_Hardware_Component_SecurityDevice_Infineon is used to determine the Infineon security device attached to the hardware component. +type Hardware_Component_SecurityDevice_Infineon struct { + Hardware_Component_SecurityDevice +} + +// The SoftLayer_Hardware_Component_Type data type provides details on the type of component requested +type Hardware_Component_Type struct { + Entity + + // A count of the generic component model description for this component type object. + HardwareGenericComponentModelCount *uint `json:"hardwareGenericComponentModelCount,omitempty" xmlrpc:"hardwareGenericComponentModelCount,omitempty"` + + // The generic component model description for this component type object. + HardwareGenericComponentModels []Hardware_Component_Model_Generic `json:"hardwareGenericComponentModels,omitempty" xmlrpc:"hardwareGenericComponentModels,omitempty"` + + // The ID associated with this component type. + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // The hardware component type key name or code. + KeyName *string `json:"keyName,omitempty" xmlrpc:"keyName,omitempty"` + + // The type associated with this component type. + Type *string `json:"type,omitempty" xmlrpc:"type,omitempty"` + + // The parent generic component model object for this generic component model object. + TypeParent *Hardware_Component_Type `json:"typeParent,omitempty" xmlrpc:"typeParent,omitempty"` + + // The parent id associated with this component type. + TypeParentId *int `json:"typeParentId,omitempty" xmlrpc:"typeParentId,omitempty"` +} + +// The SoftLayer_Hardware_Firewall data type contains general information relating to a single SoftLayer firewall. +type Hardware_Firewall struct { + Hardware_Switch + + // A count of a list of users that have access to this hardware firewall. + UserCount *uint `json:"userCount,omitempty" xmlrpc:"userCount,omitempty"` + + // A list of users that have access to this hardware firewall. + Users []User_Customer `json:"users,omitempty" xmlrpc:"users,omitempty"` +} + +// The SoftLayer_Hardware_Function data type contains a generic object type for a piece of hardware, like switch, firewall, server, etc.. +type Hardware_Function struct { + Entity + + // The code associated with this hardware function. + Code *string `json:"code,omitempty" xmlrpc:"code,omitempty"` + + // The description for a hardware function. + Description *string `json:"description,omitempty" xmlrpc:"description,omitempty"` + + // The id associated with a hardware function. + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` +} + +// no documentation yet +type Hardware_Group struct { + Entity + + // no documentation yet + Domain *string `json:"domain,omitempty" xmlrpc:"domain,omitempty"` + + // A count of all servers attached to a network hardware. + DownlinkServerCount *uint `json:"downlinkServerCount,omitempty" xmlrpc:"downlinkServerCount,omitempty"` + + // All servers attached to a network hardware. + DownlinkServers []Hardware `json:"downlinkServers,omitempty" xmlrpc:"downlinkServers,omitempty"` + + // A count of all virtual guests attached to a network hardware. + DownlinkVirtualGuestCount *uint `json:"downlinkVirtualGuestCount,omitempty" xmlrpc:"downlinkVirtualGuestCount,omitempty"` + + // All virtual guests attached to a network hardware. + DownlinkVirtualGuests []Virtual_Guest `json:"downlinkVirtualGuests,omitempty" xmlrpc:"downlinkVirtualGuests,omitempty"` + + // All network hardware downstream from this hardware. + DownstreamNetworkHardware []Hardware `json:"downstreamNetworkHardware,omitempty" xmlrpc:"downstreamNetworkHardware,omitempty"` + + // A count of all network hardware downstream from this hardware. + DownstreamNetworkHardwareCount *uint `json:"downstreamNetworkHardwareCount,omitempty" xmlrpc:"downstreamNetworkHardwareCount,omitempty"` + + // A count of all network hardware with monitoring warnings or errors downstream from this hardware. + DownstreamNetworkHardwareWithIncidentCount *uint `json:"downstreamNetworkHardwareWithIncidentCount,omitempty" xmlrpc:"downstreamNetworkHardwareWithIncidentCount,omitempty"` + + // All network hardware with monitoring warnings or errors downstream from this hardware. + DownstreamNetworkHardwareWithIncidents []Hardware `json:"downstreamNetworkHardwareWithIncidents,omitempty" xmlrpc:"downstreamNetworkHardwareWithIncidents,omitempty"` + + // The chassis that a piece of hardware is housed in. + HardwareChassis *Hardware_Chassis `json:"hardwareChassis,omitempty" xmlrpc:"hardwareChassis,omitempty"` + + // no documentation yet + Hostname *string `json:"hostname,omitempty" xmlrpc:"hostname,omitempty"` + + // no documentation yet + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // All servers attached downstream to a hardware that have failed monitoring + NetworkMonitorAttachedDownHardware []Hardware `json:"networkMonitorAttachedDownHardware,omitempty" xmlrpc:"networkMonitorAttachedDownHardware,omitempty"` + + // A count of all servers attached downstream to a hardware that have failed monitoring + NetworkMonitorAttachedDownHardwareCount *uint `json:"networkMonitorAttachedDownHardwareCount,omitempty" xmlrpc:"networkMonitorAttachedDownHardwareCount,omitempty"` + + // A count of virtual guests that are attached downstream to a hardware that have failed monitoring + NetworkMonitorAttachedDownVirtualGuestCount *uint `json:"networkMonitorAttachedDownVirtualGuestCount,omitempty" xmlrpc:"networkMonitorAttachedDownVirtualGuestCount,omitempty"` + + // Virtual guests that are attached downstream to a hardware that have failed monitoring + NetworkMonitorAttachedDownVirtualGuests []Virtual_Guest `json:"networkMonitorAttachedDownVirtualGuests,omitempty" xmlrpc:"networkMonitorAttachedDownVirtualGuests,omitempty"` + + // The value of a hardware's network status attribute. + NetworkStatus *string `json:"networkStatus,omitempty" xmlrpc:"networkStatus,omitempty"` +} + +// no documentation yet +type Hardware_LoadBalancer struct { + Hardware + + // no documentation yet + ModelFamily *string `json:"modelFamily,omitempty" xmlrpc:"modelFamily,omitempty"` + + // A count of a list of users that have access to this hardware load balancer. + UserCount *uint `json:"userCount,omitempty" xmlrpc:"userCount,omitempty"` + + // A list of users that have access to this hardware load balancer. + Users []User_Customer `json:"users,omitempty" xmlrpc:"users,omitempty"` +} + +// no documentation yet +type Hardware_Note struct { + Entity + + // no documentation yet + CreateDate *Time `json:"createDate,omitempty" xmlrpc:"createDate,omitempty"` + + // no documentation yet + Employee *User_Employee `json:"employee,omitempty" xmlrpc:"employee,omitempty"` + + // no documentation yet + Hardware *Hardware `json:"hardware,omitempty" xmlrpc:"hardware,omitempty"` + + // no documentation yet + HardwareId *int `json:"hardwareId,omitempty" xmlrpc:"hardwareId,omitempty"` + + // no documentation yet + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // no documentation yet + ModifyDate *Time `json:"modifyDate,omitempty" xmlrpc:"modifyDate,omitempty"` + + // no documentation yet + Note *string `json:"note,omitempty" xmlrpc:"note,omitempty"` + + // no documentation yet + Type *Hardware_Note_Type `json:"type,omitempty" xmlrpc:"type,omitempty"` + + // no documentation yet + TypeId *int `json:"typeId,omitempty" xmlrpc:"typeId,omitempty"` + + // no documentation yet + User *User_Customer `json:"user,omitempty" xmlrpc:"user,omitempty"` + + // no documentation yet + UserRecordId *int `json:"userRecordId,omitempty" xmlrpc:"userRecordId,omitempty"` +} + +// no documentation yet +type Hardware_Note_Type struct { + Entity + + // no documentation yet + KeyName *string `json:"keyName,omitempty" xmlrpc:"keyName,omitempty"` +} + +// no documentation yet +type Hardware_Power_Component struct { + Entity + + // no documentation yet + Hardware *Hardware `json:"hardware,omitempty" xmlrpc:"hardware,omitempty"` + + // no documentation yet + HardwareId *int `json:"hardwareId,omitempty" xmlrpc:"hardwareId,omitempty"` + + // no documentation yet + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` +} + +// no documentation yet +type Hardware_Resource_Configuration struct { + Entity + + // no documentation yet + ConfigurationTypeId *int `json:"configurationTypeId,omitempty" xmlrpc:"configurationTypeId,omitempty"` + + // no documentation yet + Hardware *Hardware `json:"hardware,omitempty" xmlrpc:"hardware,omitempty"` + + // no documentation yet + HardwareId *int `json:"hardwareId,omitempty" xmlrpc:"hardwareId,omitempty"` + + // no documentation yet + Properties []Hardware_Resource_Configuration_Property `json:"properties,omitempty" xmlrpc:"properties,omitempty"` + + // A count of + PropertyCount *uint `json:"propertyCount,omitempty" xmlrpc:"propertyCount,omitempty"` + + // no documentation yet + Type *Hardware_Resource_Configuration_Type `json:"type,omitempty" xmlrpc:"type,omitempty"` +} + +// no documentation yet +type Hardware_Resource_Configuration_Property struct { + Entity + + // no documentation yet + Configuration *Hardware_Resource_Configuration `json:"configuration,omitempty" xmlrpc:"configuration,omitempty"` + + // no documentation yet + ConfigurationId *int `json:"configurationId,omitempty" xmlrpc:"configurationId,omitempty"` + + // no documentation yet + ConfigurationPropertyTypeId *int `json:"configurationPropertyTypeId,omitempty" xmlrpc:"configurationPropertyTypeId,omitempty"` + + // no documentation yet + Type *Hardware_Resource_Configuration_Property_Type `json:"type,omitempty" xmlrpc:"type,omitempty"` + + // no documentation yet + Value *string `json:"value,omitempty" xmlrpc:"value,omitempty"` +} + +// no documentation yet +type Hardware_Resource_Configuration_Property_Type struct { + Entity + + // no documentation yet + Name *string `json:"name,omitempty" xmlrpc:"name,omitempty"` + + // no documentation yet + Properties []Hardware_Resource_Configuration_Property `json:"properties,omitempty" xmlrpc:"properties,omitempty"` + + // A count of + PropertyCount *uint `json:"propertyCount,omitempty" xmlrpc:"propertyCount,omitempty"` + + // no documentation yet + Unit *string `json:"unit,omitempty" xmlrpc:"unit,omitempty"` +} + +// no documentation yet +type Hardware_Resource_Configuration_Type struct { + Entity + + // no documentation yet + KeyName *string `json:"keyName,omitempty" xmlrpc:"keyName,omitempty"` + + // no documentation yet + Name *string `json:"name,omitempty" xmlrpc:"name,omitempty"` +} + +// The SoftLayer_Hardware_Router data type contains general information relating to a single SoftLayer router. +type Hardware_Router struct { + Hardware_Switch + + // A count of associated subnets for a router object. + BoundSubnetCount *uint `json:"boundSubnetCount,omitempty" xmlrpc:"boundSubnetCount,omitempty"` + + // Associated subnets for a router object. + BoundSubnets []Network_Subnet `json:"boundSubnets,omitempty" xmlrpc:"boundSubnets,omitempty"` + + // A flag indicating that a VLAN on the router can be assigned to a host that has local disk functionality. + LocalDiskStorageCapabilityFlag *bool `json:"localDiskStorageCapabilityFlag,omitempty" xmlrpc:"localDiskStorageCapabilityFlag,omitempty"` + + // A flag indicating that a VLAN on the router can be assigned to a host that has SAN disk functionality. + SanStorageCapabilityFlag *bool `json:"sanStorageCapabilityFlag,omitempty" xmlrpc:"sanStorageCapabilityFlag,omitempty"` +} + +// The SoftLayer_Hardware_Router_Backend data type contains general information relating to a single SoftLayer router item for hardware. +type Hardware_Router_Backend struct { + Hardware_Router +} + +// The SoftLayer_Hardware_Router_Frontend data type contains general information relating to a single SoftLayer router item for hardware. +type Hardware_Router_Frontend struct { + Hardware_Router +} + +// no documentation yet +type Hardware_SecurityModule struct { + Hardware_Server +} + +// The SoftLayer_Hardware_Server data type contains general information relating to a single SoftLayer server. +type Hardware_Server struct { + Hardware + + // The billing item for a server's attached network firewall. + ActiveNetworkFirewallBillingItem *Billing_Item `json:"activeNetworkFirewallBillingItem,omitempty" xmlrpc:"activeNetworkFirewallBillingItem,omitempty"` + + // A count of + ActiveTicketCount *uint `json:"activeTicketCount,omitempty" xmlrpc:"activeTicketCount,omitempty"` + + // no documentation yet + ActiveTickets []Ticket `json:"activeTickets,omitempty" xmlrpc:"activeTickets,omitempty"` + + // Transaction currently running for server. + ActiveTransaction *Provisioning_Version1_Transaction `json:"activeTransaction,omitempty" xmlrpc:"activeTransaction,omitempty"` + + // A count of any active transaction(s) that are currently running for the server (example: os reload). + ActiveTransactionCount *uint `json:"activeTransactionCount,omitempty" xmlrpc:"activeTransactionCount,omitempty"` + + // Any active transaction(s) that are currently running for the server (example: os reload). + ActiveTransactions []Provisioning_Version1_Transaction `json:"activeTransactions,omitempty" xmlrpc:"activeTransactions,omitempty"` + + // An object that stores the maximum level for the monitoring query types and response types. + AvailableMonitoring []Network_Monitor_Version1_Query_Host_Stratum `json:"availableMonitoring,omitempty" xmlrpc:"availableMonitoring,omitempty"` + + // A count of an object that stores the maximum level for the monitoring query types and response types. + AvailableMonitoringCount *uint `json:"availableMonitoringCount,omitempty" xmlrpc:"availableMonitoringCount,omitempty"` + + // The average daily total bandwidth usage for the current billing cycle. + AverageDailyBandwidthUsage *Float64 `json:"averageDailyBandwidthUsage,omitempty" xmlrpc:"averageDailyBandwidthUsage,omitempty"` + + // The average daily private bandwidth usage for the current billing cycle. + AverageDailyPrivateBandwidthUsage *Float64 `json:"averageDailyPrivateBandwidthUsage,omitempty" xmlrpc:"averageDailyPrivateBandwidthUsage,omitempty"` + + // The raw bandwidth usage data for the current billing cycle. One object will be returned for each network this server is attached to. + BillingCycleBandwidthUsage []Network_Bandwidth_Usage `json:"billingCycleBandwidthUsage,omitempty" xmlrpc:"billingCycleBandwidthUsage,omitempty"` + + // A count of the raw bandwidth usage data for the current billing cycle. One object will be returned for each network this server is attached to. + BillingCycleBandwidthUsageCount *uint `json:"billingCycleBandwidthUsageCount,omitempty" xmlrpc:"billingCycleBandwidthUsageCount,omitempty"` + + // The raw private bandwidth usage data for the current billing cycle. + BillingCyclePrivateBandwidthUsage *Network_Bandwidth_Usage `json:"billingCyclePrivateBandwidthUsage,omitempty" xmlrpc:"billingCyclePrivateBandwidthUsage,omitempty"` + + // The raw public bandwidth usage data for the current billing cycle. + BillingCyclePublicBandwidthUsage *Network_Bandwidth_Usage `json:"billingCyclePublicBandwidthUsage,omitempty" xmlrpc:"billingCyclePublicBandwidthUsage,omitempty"` + + // no documentation yet + ContainsSolidStateDrivesFlag *bool `json:"containsSolidStateDrivesFlag,omitempty" xmlrpc:"containsSolidStateDrivesFlag,omitempty"` + + // A server's control panel. + ControlPanel *Software_Component_ControlPanel `json:"controlPanel,omitempty" xmlrpc:"controlPanel,omitempty"` + + // The total cost of a server, measured in US Dollars ($USD). + Cost *Float64 `json:"cost,omitempty" xmlrpc:"cost,omitempty"` + + // An object that provides commonly used bandwidth summary components for the current billing cycle. + CurrentBandwidthSummary *Metric_Tracking_Object_Bandwidth_Summary `json:"currentBandwidthSummary,omitempty" xmlrpc:"currentBandwidthSummary,omitempty"` + + // Indicates if a server has a Customer Installed OS + CustomerInstalledOperatingSystemFlag *bool `json:"customerInstalledOperatingSystemFlag,omitempty" xmlrpc:"customerInstalledOperatingSystemFlag,omitempty"` + + // Indicates if a server is a customer owned device. + CustomerOwnedFlag *bool `json:"customerOwnedFlag,omitempty" xmlrpc:"customerOwnedFlag,omitempty"` + + // The total private inbound bandwidth for this hardware for the current billing cycle. + InboundPrivateBandwidthUsage *Float64 `json:"inboundPrivateBandwidthUsage,omitempty" xmlrpc:"inboundPrivateBandwidthUsage,omitempty"` + + // The last transaction that a server's operating system was loaded. + LastOperatingSystemReload *Provisioning_Version1_Transaction `json:"lastOperatingSystemReload,omitempty" xmlrpc:"lastOperatingSystemReload,omitempty"` + + // The metric tracking object id for this server. + MetricTrackingObjectId *int `json:"metricTrackingObjectId,omitempty" xmlrpc:"metricTrackingObjectId,omitempty"` + + // The monitoring notification objects for this hardware. Each object links this hardware instance to a user account that will be notified if monitoring on this hardware object fails + MonitoringUserNotification []User_Customer_Notification_Hardware `json:"monitoringUserNotification,omitempty" xmlrpc:"monitoringUserNotification,omitempty"` + + // A count of the monitoring notification objects for this hardware. Each object links this hardware instance to a user account that will be notified if monitoring on this hardware object fails + MonitoringUserNotificationCount *uint `json:"monitoringUserNotificationCount,omitempty" xmlrpc:"monitoringUserNotificationCount,omitempty"` + + // An open ticket requesting cancellation of this server, if one exists. + OpenCancellationTicket *Ticket `json:"openCancellationTicket,omitempty" xmlrpc:"openCancellationTicket,omitempty"` + + // The total private outbound bandwidth for this hardware for the current billing cycle. + OutboundPrivateBandwidthUsage *Float64 `json:"outboundPrivateBandwidthUsage,omitempty" xmlrpc:"outboundPrivateBandwidthUsage,omitempty"` + + // Whether the bandwidth usage for this hardware for the current billing cycle exceeds the allocation. + OverBandwidthAllocationFlag *int `json:"overBandwidthAllocationFlag,omitempty" xmlrpc:"overBandwidthAllocationFlag,omitempty"` + + // A server's primary private IP address. + PrivateIpAddress *string `json:"privateIpAddress,omitempty" xmlrpc:"privateIpAddress,omitempty"` + + // Whether the bandwidth usage for this hardware for the current billing cycle is projected to exceed the allocation. + ProjectedOverBandwidthAllocationFlag *int `json:"projectedOverBandwidthAllocationFlag,omitempty" xmlrpc:"projectedOverBandwidthAllocationFlag,omitempty"` + + // The projected public outbound bandwidth for this hardware for the current billing cycle. + ProjectedPublicBandwidthUsage *Float64 `json:"projectedPublicBandwidthUsage,omitempty" xmlrpc:"projectedPublicBandwidthUsage,omitempty"` + + // A count of the last five commands issued to the server's remote management card. + RecentRemoteManagementCommandCount *uint `json:"recentRemoteManagementCommandCount,omitempty" xmlrpc:"recentRemoteManagementCommandCount,omitempty"` + + // The last five commands issued to the server's remote management card. + RecentRemoteManagementCommands []Hardware_Component_RemoteManagement_Command_Request `json:"recentRemoteManagementCommands,omitempty" xmlrpc:"recentRemoteManagementCommands,omitempty"` + + // no documentation yet + RegionalInternetRegistry *Network_Regional_Internet_Registry `json:"regionalInternetRegistry,omitempty" xmlrpc:"regionalInternetRegistry,omitempty"` + + // A server's remote management card. + RemoteManagement *Hardware_Component_RemoteManagement `json:"remoteManagement,omitempty" xmlrpc:"remoteManagement,omitempty"` + + // A count of user(s) who have access to issue commands and/or interact with the server's remote management card. + RemoteManagementUserCount *uint `json:"remoteManagementUserCount,omitempty" xmlrpc:"remoteManagementUserCount,omitempty"` + + // User(s) who have access to issue commands and/or interact with the server's remote management card. + RemoteManagementUsers []Hardware_Component_RemoteManagement_User `json:"remoteManagementUsers,omitempty" xmlrpc:"remoteManagementUsers,omitempty"` + + // A server's remote management card used for statistics. + StatisticsRemoteManagement *Hardware_Component_RemoteManagement `json:"statisticsRemoteManagement,omitempty" xmlrpc:"statisticsRemoteManagement,omitempty"` + + // A count of a list of users that have access to this computing instance. + UserCount *uint `json:"userCount,omitempty" xmlrpc:"userCount,omitempty"` + + // A list of users that have access to this computing instance. + Users []User_Customer `json:"users,omitempty" xmlrpc:"users,omitempty"` + + // A count of a hardware server's virtual servers. + VirtualGuestCount *uint `json:"virtualGuestCount,omitempty" xmlrpc:"virtualGuestCount,omitempty"` + + // A hardware server's virtual servers. + VirtualGuests []Virtual_Guest `json:"virtualGuests,omitempty" xmlrpc:"virtualGuests,omitempty"` +} + +// SoftLayer_Hardware_Status models the inventory state of any piece of hardware in SoftLayer's inventory. Most of these statuses are used by SoftLayer while a server is not provisioned or undergoing provisioning. SoftLayer uses the following status codes: +// +// +// *'''ACTIVE''': This server is active and in use. +// *'''DEPLOY''': Used during server provisioning. +// *'''DEPLOY2''': Used during server provisioning. +// *'''MACWAIT''': Used during server provisioning. +// *'''RECLAIM''': This server has been reclaimed by SoftLayer and is awaiting de-provisioning. +// +// +// Servers in production and in use should stay in the ACTIVE state. If a server's status ever reads anything else then please contact SoftLayer support. +type Hardware_Status struct { + Entity + + // A hardware status' internal identifier. + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // A hardware's status code. See the SoftLayer_Hardware_Status Overview for ''status''' possible values. + Status *string `json:"status,omitempty" xmlrpc:"status,omitempty"` +} + +// The SoftLayer_Hardware_Switch object extends the base functionality of the SoftLayer_Hardware service. +type Hardware_Switch struct { + Hardware +} diff --git a/vendor/github.com/softlayer/softlayer-go/datatypes/layout.go b/vendor/github.com/softlayer/softlayer-go/datatypes/layout.go new file mode 100644 index 0000000000..880585bfc5 --- /dev/null +++ b/vendor/github.com/softlayer/softlayer-go/datatypes/layout.go @@ -0,0 +1,245 @@ +/** + * Copyright 2016 IBM Corp. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** + * AUTOMATICALLY GENERATED CODE - DO NOT MODIFY + */ + +package datatypes + +// The SoftLayer_Layout_Container contains definitions for default page layouts +type Layout_Container struct { + Entity + + // The internal identifier of a layout container + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // The unique key name of the layout container, used primarily for programmatic purposes + Keyname *string `json:"keyname,omitempty" xmlrpc:"keyname,omitempty"` + + // The type of the layout container object + LayoutContainerType *Layout_Container_Type `json:"layoutContainerType,omitempty" xmlrpc:"layoutContainerType,omitempty"` + + // The internal identifier of the related [[SoftLayer_Layout_Container_Type]] + LayoutContainerTypeId *int `json:"layoutContainerTypeId,omitempty" xmlrpc:"layoutContainerTypeId,omitempty"` + + // A count of the layout items assigned to this layout container + LayoutItemCount *uint `json:"layoutItemCount,omitempty" xmlrpc:"layoutItemCount,omitempty"` + + // The layout items assigned to this layout container + LayoutItems []Layout_Item `json:"layoutItems,omitempty" xmlrpc:"layoutItems,omitempty"` + + // The friendly name of the layout container + Name *string `json:"name,omitempty" xmlrpc:"name,omitempty"` +} + +// The SoftLayer_Layout_Container_Type contains definitions for container types +type Layout_Container_Type struct { + Entity + + // The internal identifier of the container type + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // The unique key name of the container type, used primarily for programmatic purposes + Keyname *string `json:"keyname,omitempty" xmlrpc:"keyname,omitempty"` + + // The friendly name of the container type + Name *string `json:"name,omitempty" xmlrpc:"name,omitempty"` +} + +// The SoftLayer_Layout_Item contains definitions for default layout items +type Layout_Item struct { + Entity + + // The internal identifier of a layout item + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // The unique key name of the layout item, used primarily for programmatic purposes + Keyname *string `json:"keyname,omitempty" xmlrpc:"keyname,omitempty"` + + // A count of the layout preferences assigned to this layout item + LayoutItemPreferenceCount *uint `json:"layoutItemPreferenceCount,omitempty" xmlrpc:"layoutItemPreferenceCount,omitempty"` + + // The layout preferences assigned to this layout item + LayoutItemPreferences []Layout_Preference `json:"layoutItemPreferences,omitempty" xmlrpc:"layoutItemPreferences,omitempty"` + + // The type of the layout item object + LayoutItemType *Layout_Item_Type `json:"layoutItemType,omitempty" xmlrpc:"layoutItemType,omitempty"` + + // The internal identifier of the related [[SoftLayer_Layout_Item_Type]] + LayoutItemTypeId *int `json:"layoutItemTypeId,omitempty" xmlrpc:"layoutItemTypeId,omitempty"` + + // The friendly name of the layout item + Name *string `json:"name,omitempty" xmlrpc:"name,omitempty"` +} + +// The SoftLayer_Layout_Item_Type contains definitions for item types +type Layout_Item_Type struct { + Entity + + // The internal identifier of the item type + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // The unique key name of the item type, used primarily for programmatic purposes + Keyname *string `json:"keyname,omitempty" xmlrpc:"keyname,omitempty"` + + // The friendly name of the item type + Name *string `json:"name,omitempty" xmlrpc:"name,omitempty"` +} + +// The SoftLayer_Layout_Preference contains definitions for default layout item preferences +type Layout_Preference struct { + Entity + + // The internal identifier of a layout preference + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // The type of the preference object + LayoutPreferenceType *Layout_Preference_Type `json:"layoutPreferenceType,omitempty" xmlrpc:"layoutPreferenceType,omitempty"` + + // The internal identifier of the related [[SoftLayer_Layout_Preference_Type]] + LayoutPreferenceTypeId *int `json:"layoutPreferenceTypeId,omitempty" xmlrpc:"layoutPreferenceTypeId,omitempty"` + + // The default value of the preference + Value *string `json:"value,omitempty" xmlrpc:"value,omitempty"` +} + +// The SoftLayer_Layout_Preference_Type contains definitions for preference types +type Layout_Preference_Type struct { + Entity + + // The internal identifier of the item type + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // The unique key name of the item type, used primarily for programmatic purposes + Keyname *string `json:"keyname,omitempty" xmlrpc:"keyname,omitempty"` + + // The friendly name of the item type + Name *string `json:"name,omitempty" xmlrpc:"name,omitempty"` + + // A regular expression used to validate the related [[SoftLayer_Layout_Preference]] + ValueExpression *string `json:"valueExpression,omitempty" xmlrpc:"valueExpression,omitempty"` +} + +// The SoftLayer_Layout_Profile contains the definition of the layout profile +type Layout_Profile struct { + Entity + + // Active status of the layout profile + ActiveFlag *int `json:"activeFlag,omitempty" xmlrpc:"activeFlag,omitempty"` + + // Timestamp of when the layout profile was created + CreateDate *Time `json:"createDate,omitempty" xmlrpc:"createDate,omitempty"` + + // The internal identifier of a layout profile + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // A count of + LayoutContainerCount *uint `json:"layoutContainerCount,omitempty" xmlrpc:"layoutContainerCount,omitempty"` + + // no documentation yet + LayoutContainers []Layout_Container `json:"layoutContainers,omitempty" xmlrpc:"layoutContainers,omitempty"` + + // A count of + LayoutPreferenceCount *uint `json:"layoutPreferenceCount,omitempty" xmlrpc:"layoutPreferenceCount,omitempty"` + + // no documentation yet + LayoutPreferences []Layout_Profile_Preference `json:"layoutPreferences,omitempty" xmlrpc:"layoutPreferences,omitempty"` + + // Timestamp of when the layout profile was last updated + ModifyDate *Time `json:"modifyDate,omitempty" xmlrpc:"modifyDate,omitempty"` + + // The friendly name of the layout profile + Name *string `json:"name,omitempty" xmlrpc:"name,omitempty"` + + // The [[SoftLayer_User_Customer]] owning this layout profile + UserRecordId *int `json:"userRecordId,omitempty" xmlrpc:"userRecordId,omitempty"` +} + +// no documentation yet +type Layout_Profile_Containers struct { + Entity + + // Timestamp of when the reference was created + CreateDate *Time `json:"createDate,omitempty" xmlrpc:"createDate,omitempty"` + + // The internal identifier of the container reference + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // The id of the referenced [[SoftLayer_Layout_Container]] + LayoutContainerId *int `json:"layoutContainerId,omitempty" xmlrpc:"layoutContainerId,omitempty"` + + // The container to be contained + LayoutContainerType *Layout_Container `json:"layoutContainerType,omitempty" xmlrpc:"layoutContainerType,omitempty"` + + // The profile containing this container + LayoutProfile *Layout_Profile `json:"layoutProfile,omitempty" xmlrpc:"layoutProfile,omitempty"` + + // The id of the referenced [[SoftLayer_Layout_Profile]] + LayoutProfileId *int `json:"layoutProfileId,omitempty" xmlrpc:"layoutProfileId,omitempty"` + + // Timestamp of when the reference was last updated + ModifyDate *Time `json:"modifyDate,omitempty" xmlrpc:"modifyDate,omitempty"` +} + +// no documentation yet +type Layout_Profile_Customer struct { + Layout_Profile + + // no documentation yet + UserRecord *User_Customer `json:"userRecord,omitempty" xmlrpc:"userRecord,omitempty"` +} + +// The SoftLayer_Layout_Profile_Preference contains definitions for layout preferences +type Layout_Profile_Preference struct { + Entity + + // Timestamp of when the preference was created + CreateDate *Time `json:"createDate,omitempty" xmlrpc:"createDate,omitempty"` + + // Indicates whether this is a default value or not + DefaultValueFlag *int `json:"defaultValueFlag,omitempty" xmlrpc:"defaultValueFlag,omitempty"` + + // no documentation yet + LayoutContainer *Layout_Container `json:"layoutContainer,omitempty" xmlrpc:"layoutContainer,omitempty"` + + // The id of the related [[SoftLayer_Layout_Container]] + LayoutContainerId *int `json:"layoutContainerId,omitempty" xmlrpc:"layoutContainerId,omitempty"` + + // no documentation yet + LayoutItem *Layout_Item `json:"layoutItem,omitempty" xmlrpc:"layoutItem,omitempty"` + + // The id of the related [[SoftLayer_Layout_Item]] + LayoutItemId *int `json:"layoutItemId,omitempty" xmlrpc:"layoutItemId,omitempty"` + + // no documentation yet + LayoutPreference *Layout_Preference `json:"layoutPreference,omitempty" xmlrpc:"layoutPreference,omitempty"` + + // The internal identifier of the overridden [[SoftLayer_Layout_Preference]] + LayoutPreferenceId *int `json:"layoutPreferenceId,omitempty" xmlrpc:"layoutPreferenceId,omitempty"` + + // no documentation yet + LayoutProfile *Layout_Profile `json:"layoutProfile,omitempty" xmlrpc:"layoutProfile,omitempty"` + + // The internal identifier of the related [[SoftLayer_Layout_Profile]] + LayoutProfileId *int `json:"layoutProfileId,omitempty" xmlrpc:"layoutProfileId,omitempty"` + + // Timestamp of when the preference was last updated + ModifyDate *Time `json:"modifyDate,omitempty" xmlrpc:"modifyDate,omitempty"` + + // The value overriding the default value + Value *string `json:"value,omitempty" xmlrpc:"value,omitempty"` +} diff --git a/vendor/github.com/softlayer/softlayer-go/datatypes/legal.go b/vendor/github.com/softlayer/softlayer-go/datatypes/legal.go new file mode 100644 index 0000000000..f3bb064ba7 --- /dev/null +++ b/vendor/github.com/softlayer/softlayer-go/datatypes/legal.go @@ -0,0 +1,58 @@ +/** + * Copyright 2016 IBM Corp. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** + * AUTOMATICALLY GENERATED CODE - DO NOT MODIFY + */ + +package datatypes + +// no documentation yet +type Legal_RegulatedWorkload struct { + Entity + + // no documentation yet + Account *Account `json:"account,omitempty" xmlrpc:"account,omitempty"` + + // no documentation yet + AccountId *int `json:"accountId,omitempty" xmlrpc:"accountId,omitempty"` + + // no documentation yet + EnabledFlag *bool `json:"enabledFlag,omitempty" xmlrpc:"enabledFlag,omitempty"` + + // no documentation yet + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // no documentation yet + Type *Legal_RegulatedWorkload_Type `json:"type,omitempty" xmlrpc:"type,omitempty"` + + // no documentation yet + WorkloadTypeId *int `json:"workloadTypeId,omitempty" xmlrpc:"workloadTypeId,omitempty"` +} + +// no documentation yet +type Legal_RegulatedWorkload_Type struct { + Entity + + // no documentation yet + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // no documentation yet + KeyName *string `json:"keyName,omitempty" xmlrpc:"keyName,omitempty"` + + // no documentation yet + Name *string `json:"name,omitempty" xmlrpc:"name,omitempty"` +} diff --git a/vendor/github.com/softlayer/softlayer-go/datatypes/locale.go b/vendor/github.com/softlayer/softlayer-go/datatypes/locale.go new file mode 100644 index 0000000000..a7920bea03 --- /dev/null +++ b/vendor/github.com/softlayer/softlayer-go/datatypes/locale.go @@ -0,0 +1,95 @@ +/** + * Copyright 2016 IBM Corp. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** + * AUTOMATICALLY GENERATED CODE - DO NOT MODIFY + */ + +package datatypes + +// no documentation yet +type Locale struct { + Entity + + // no documentation yet + FriendlyName *string `json:"friendlyName,omitempty" xmlrpc:"friendlyName,omitempty"` + + // Internal identification number of a locale + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // no documentation yet + LanguageTag *string `json:"languageTag,omitempty" xmlrpc:"languageTag,omitempty"` + + // Locale name + Name *string `json:"name,omitempty" xmlrpc:"name,omitempty"` +} + +// no documentation yet +type Locale_Country struct { + Entity + + // Binary flag denoting if this country is part of the European Union + IsEuropeanUnionFlag *int `json:"isEuropeanUnionFlag,omitempty" xmlrpc:"isEuropeanUnionFlag,omitempty"` + + // no documentation yet + LongName *string `json:"longName,omitempty" xmlrpc:"longName,omitempty"` + + // no documentation yet + PostalCodeFormat *string `json:"postalCodeFormat,omitempty" xmlrpc:"postalCodeFormat,omitempty"` + + // no documentation yet + PostalCodeRequiredFlag *int `json:"postalCodeRequiredFlag,omitempty" xmlrpc:"postalCodeRequiredFlag,omitempty"` + + // no documentation yet + ShortName *string `json:"shortName,omitempty" xmlrpc:"shortName,omitempty"` + + // A count of states that belong to this country. + StateCount *uint `json:"stateCount,omitempty" xmlrpc:"stateCount,omitempty"` + + // States that belong to this country. + States []Locale_StateProvince `json:"states,omitempty" xmlrpc:"states,omitempty"` +} + +// This object represents a state or province for a country. +type Locale_StateProvince struct { + Entity + + // no documentation yet + LongName *string `json:"longName,omitempty" xmlrpc:"longName,omitempty"` + + // no documentation yet + ShortName *string `json:"shortName,omitempty" xmlrpc:"shortName,omitempty"` +} + +// Each User is assigned a timezone allowing for a precise local timestamp. +type Locale_Timezone struct { + Entity + + // A timezone's identifying number. + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // A timezone's long name. For example, "(GMT-06:00) America/Dallas - CST". + LongName *string `json:"longName,omitempty" xmlrpc:"longName,omitempty"` + + // A timezone's name. For example, "America/Dallas". + Name *string `json:"name,omitempty" xmlrpc:"name,omitempty"` + + // A timezone's offset based on the GMT standard. For example, Central Standard Time's offset is "-0600" from GMT=0000. + Offset *string `json:"offset,omitempty" xmlrpc:"offset,omitempty"` + + // A timezone's common abbreviation. For example, Central Standard Time's abbreviation is "CST". + ShortName *string `json:"shortName,omitempty" xmlrpc:"shortName,omitempty"` +} diff --git a/vendor/github.com/softlayer/softlayer-go/datatypes/location.go b/vendor/github.com/softlayer/softlayer-go/datatypes/location.go new file mode 100644 index 0000000000..f0d1d865c9 --- /dev/null +++ b/vendor/github.com/softlayer/softlayer-go/datatypes/location.go @@ -0,0 +1,431 @@ +/** + * Copyright 2016 IBM Corp. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** + * AUTOMATICALLY GENERATED CODE - DO NOT MODIFY + */ + +package datatypes + +// Every piece of hardware and network connection owned by SoftLayer is tracked physically by location and stored in the SoftLayer_Location data type. SoftLayer locations exist in parent/child relationships, a convenient way to track equipment from it's city, datacenter, server room, rack, then slot. Network backbones are tied to datacenters only, not to a room, rack, or slot. +type Location struct { + Entity + + // A count of + BackboneDependentCount *uint `json:"backboneDependentCount,omitempty" xmlrpc:"backboneDependentCount,omitempty"` + + // no documentation yet + BackboneDependents []Network_Backbone_Location_Dependent `json:"backboneDependents,omitempty" xmlrpc:"backboneDependents,omitempty"` + + // A count of a location can be a member of 1 or more groups. This will show which groups to which a location belongs. + GroupCount *uint `json:"groupCount,omitempty" xmlrpc:"groupCount,omitempty"` + + // A location can be a member of 1 or more groups. This will show which groups to which a location belongs. + Groups []Location_Group `json:"groups,omitempty" xmlrpc:"groups,omitempty"` + + // A count of + HardwareFirewallCount *uint `json:"hardwareFirewallCount,omitempty" xmlrpc:"hardwareFirewallCount,omitempty"` + + // no documentation yet + HardwareFirewalls []Hardware `json:"hardwareFirewalls,omitempty" xmlrpc:"hardwareFirewalls,omitempty"` + + // The unique identifier of a specific location. + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // A location's physical address. + LocationAddress *Account_Address `json:"locationAddress,omitempty" xmlrpc:"locationAddress,omitempty"` + + // A location's Dedicated Rack member + LocationReservationMember *Location_Reservation_Rack_Member `json:"locationReservationMember,omitempty" xmlrpc:"locationReservationMember,omitempty"` + + // The current locations status. + LocationStatus *Location_Status `json:"locationStatus,omitempty" xmlrpc:"locationStatus,omitempty"` + + // A longer location description. + LongName *string `json:"longName,omitempty" xmlrpc:"longName,omitempty"` + + // A short location description. + Name *string `json:"name,omitempty" xmlrpc:"name,omitempty"` + + // no documentation yet + NetworkConfigurationAttribute *Hardware_Attribute `json:"networkConfigurationAttribute,omitempty" xmlrpc:"networkConfigurationAttribute,omitempty"` + + // The total number of users online using SoftLayer's PPTP VPN service for a location. + OnlinePptpVpnUserCount *int `json:"onlinePptpVpnUserCount,omitempty" xmlrpc:"onlinePptpVpnUserCount,omitempty"` + + // The total number of users online using SoftLayer's SSL VPN service for a location. + OnlineSslVpnUserCount *int `json:"onlineSslVpnUserCount,omitempty" xmlrpc:"onlineSslVpnUserCount,omitempty"` + + // no documentation yet + PathString *string `json:"pathString,omitempty" xmlrpc:"pathString,omitempty"` + + // A count of a location can be a member of 1 or more Price Groups. This will show which groups to which a location belongs. + PriceGroupCount *uint `json:"priceGroupCount,omitempty" xmlrpc:"priceGroupCount,omitempty"` + + // A location can be a member of 1 or more Price Groups. This will show which groups to which a location belongs. + PriceGroups []Location_Group `json:"priceGroups,omitempty" xmlrpc:"priceGroups,omitempty"` + + // A count of a location can be a member of 1 or more regions. This will show which regions to which a location belongs. + RegionCount *uint `json:"regionCount,omitempty" xmlrpc:"regionCount,omitempty"` + + // A location can be a member of 1 or more regions. This will show which regions to which a location belongs. + Regions []Location_Region `json:"regions,omitempty" xmlrpc:"regions,omitempty"` + + // no documentation yet + StatusId *int `json:"statusId,omitempty" xmlrpc:"statusId,omitempty"` + + // no documentation yet + Timezone *Locale_Timezone `json:"timezone,omitempty" xmlrpc:"timezone,omitempty"` + + // A location can be a member of 1 Bandwidth Pooling Group. This will show which group to which a location belongs. + VdrGroup *Location_Group_Location_CrossReference `json:"vdrGroup,omitempty" xmlrpc:"vdrGroup,omitempty"` +} + +// SoftLayer_Location_Datacenter extends the [[SoftLayer_Location]] data type to include datacenter-specific properties. +type Location_Datacenter struct { + Location + + // A count of + ActiveItemPresaleEventCount *uint `json:"activeItemPresaleEventCount,omitempty" xmlrpc:"activeItemPresaleEventCount,omitempty"` + + // no documentation yet + ActiveItemPresaleEvents []Sales_Presale_Event `json:"activeItemPresaleEvents,omitempty" xmlrpc:"activeItemPresaleEvents,omitempty"` + + // A count of + ActivePresaleEventCount *uint `json:"activePresaleEventCount,omitempty" xmlrpc:"activePresaleEventCount,omitempty"` + + // no documentation yet + ActivePresaleEvents []Sales_Presale_Event `json:"activePresaleEvents,omitempty" xmlrpc:"activePresaleEvents,omitempty"` + + // A count of + BackendHardwareRouterCount *uint `json:"backendHardwareRouterCount,omitempty" xmlrpc:"backendHardwareRouterCount,omitempty"` + + // no documentation yet + BackendHardwareRouters []Hardware `json:"backendHardwareRouters,omitempty" xmlrpc:"backendHardwareRouters,omitempty"` + + // A count of subnets which are directly bound to one or more routers in a given datacenter, and currently allow routing. + BoundSubnetCount *uint `json:"boundSubnetCount,omitempty" xmlrpc:"boundSubnetCount,omitempty"` + + // Subnets which are directly bound to one or more routers in a given datacenter, and currently allow routing. + BoundSubnets []Network_Subnet `json:"boundSubnets,omitempty" xmlrpc:"boundSubnets,omitempty"` + + // A count of this references relationship between brands, locations and countries associated with a user's account that are ineligible when ordering products. For example, the India datacenter may not be available on this brand for customers that live in Great Britain. + BrandCountryRestrictionCount *uint `json:"brandCountryRestrictionCount,omitempty" xmlrpc:"brandCountryRestrictionCount,omitempty"` + + // This references relationship between brands, locations and countries associated with a user's account that are ineligible when ordering products. For example, the India datacenter may not be available on this brand for customers that live in Great Britain. + BrandCountryRestrictions []Brand_Restriction_Location_CustomerCountry `json:"brandCountryRestrictions,omitempty" xmlrpc:"brandCountryRestrictions,omitempty"` + + // A count of + FrontendHardwareRouterCount *uint `json:"frontendHardwareRouterCount,omitempty" xmlrpc:"frontendHardwareRouterCount,omitempty"` + + // no documentation yet + FrontendHardwareRouters []Hardware `json:"frontendHardwareRouters,omitempty" xmlrpc:"frontendHardwareRouters,omitempty"` + + // A count of + HardwareRouterCount *uint `json:"hardwareRouterCount,omitempty" xmlrpc:"hardwareRouterCount,omitempty"` + + // no documentation yet + HardwareRouters []Hardware `json:"hardwareRouters,omitempty" xmlrpc:"hardwareRouters,omitempty"` + + // A count of + PresaleEventCount *uint `json:"presaleEventCount,omitempty" xmlrpc:"presaleEventCount,omitempty"` + + // no documentation yet + PresaleEvents []Sales_Presale_Event `json:"presaleEvents,omitempty" xmlrpc:"presaleEvents,omitempty"` + + // The regional group this datacenter belongs to. + RegionalGroup *Location_Group_Regional `json:"regionalGroup,omitempty" xmlrpc:"regionalGroup,omitempty"` + + // no documentation yet + RegionalInternetRegistry *Network_Regional_Internet_Registry `json:"regionalInternetRegistry,omitempty" xmlrpc:"regionalInternetRegistry,omitempty"` + + // A count of retrieve all subnets that are eligible to be routed; those which the account has permission to associate with a vlan. + RoutableBoundSubnetCount *uint `json:"routableBoundSubnetCount,omitempty" xmlrpc:"routableBoundSubnetCount,omitempty"` + + // Retrieve all subnets that are eligible to be routed; those which the account has permission to associate with a vlan. + RoutableBoundSubnets []Network_Subnet `json:"routableBoundSubnets,omitempty" xmlrpc:"routableBoundSubnets,omitempty"` +} + +// no documentation yet +type Location_Group struct { + Entity + + // no documentation yet + Description *string `json:"description,omitempty" xmlrpc:"description,omitempty"` + + // no documentation yet + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // A count of the locations in a group. + LocationCount *uint `json:"locationCount,omitempty" xmlrpc:"locationCount,omitempty"` + + // The type for this location group. + LocationGroupType *Location_Group_Type `json:"locationGroupType,omitempty" xmlrpc:"locationGroupType,omitempty"` + + // no documentation yet + LocationGroupTypeId *int `json:"locationGroupTypeId,omitempty" xmlrpc:"locationGroupTypeId,omitempty"` + + // The locations in a group. + Locations []Location `json:"locations,omitempty" xmlrpc:"locations,omitempty"` + + // no documentation yet + Name *string `json:"name,omitempty" xmlrpc:"name,omitempty"` + + // no documentation yet + SecurityLevelId *int `json:"securityLevelId,omitempty" xmlrpc:"securityLevelId,omitempty"` +} + +// no documentation yet +type Location_Group_Location_CrossReference struct { + Entity + + // no documentation yet + Location *Location `json:"location,omitempty" xmlrpc:"location,omitempty"` + + // no documentation yet + LocationGroup *Location_Group `json:"locationGroup,omitempty" xmlrpc:"locationGroup,omitempty"` + + // no documentation yet + LocationGroupId *int `json:"locationGroupId,omitempty" xmlrpc:"locationGroupId,omitempty"` + + // no documentation yet + LocationId *int `json:"locationId,omitempty" xmlrpc:"locationId,omitempty"` + + // If set, this is the priority of this cross reference record in the group. + Priority *int `json:"priority,omitempty" xmlrpc:"priority,omitempty"` +} + +// no documentation yet +type Location_Group_Pricing struct { + Location_Group + + // A count of the prices that this pricing location group limits. All of these prices will only be available in the locations defined by this pricing location group. + PriceCount *uint `json:"priceCount,omitempty" xmlrpc:"priceCount,omitempty"` + + // The prices that this pricing location group limits. All of these prices will only be available in the locations defined by this pricing location group. + Prices []Product_Item_Price `json:"prices,omitempty" xmlrpc:"prices,omitempty"` +} + +// no documentation yet +type Location_Group_Regional struct { + Location_Group + + // A count of the datacenters in a group. + DatacenterCount *uint `json:"datacenterCount,omitempty" xmlrpc:"datacenterCount,omitempty"` + + // The datacenters in a group. + Datacenters []Location `json:"datacenters,omitempty" xmlrpc:"datacenters,omitempty"` + + // The preferred datacenters of a group. + PreferredDatacenter *Location_Datacenter `json:"preferredDatacenter,omitempty" xmlrpc:"preferredDatacenter,omitempty"` +} + +// no documentation yet +type Location_Group_Type struct { + Entity + + // no documentation yet + Name *string `json:"name,omitempty" xmlrpc:"name,omitempty"` +} + +// SoftLayer_Location_Inventory_Room extends the [[SoftLayer_Location]] data type to include inventory room-specific properties. +type Location_Inventory_Room struct { + Location +} + +// SoftLayer_Location_Network_Operations_Center extends the [[SoftLayer_Location]] data type to include network operation center-specific properties. +type Location_Network_Operations_Center struct { + Location +} + +// SoftLayer_Location_Office extends the [[SoftLayer_Location]] data type to include office-specific properties. +type Location_Office struct { + Location +} + +// SoftLayer_Location_Rack extends the [[SoftLayer_Location]] data type to include rack-specific properties. +type Location_Rack struct { + Location +} + +// A region is made up of a keyname and a description of that region. A region keyname can be used as part of an order. Check the SoftLayer_Product_Order service for more details. +type Location_Region struct { + Entity + + // A short description of a region's name. This description is seen on the order forms. + Description *string `json:"description,omitempty" xmlrpc:"description,omitempty"` + + // A unique key name for a region. Provided for easy debugging. This is to be sent in with an order. + Keyname *string `json:"keyname,omitempty" xmlrpc:"keyname,omitempty"` + + // Each region can have many datacenter locations tied to it. However, this is the location we currently provision to for a region. This location is the current valid location for a region. + Location *Location_Region_Location `json:"location,omitempty" xmlrpc:"location,omitempty"` + + // An integer representing the order in which this element is displayed. + SortOrder *int `json:"sortOrder,omitempty" xmlrpc:"sortOrder,omitempty"` +} + +// The SoftLayer_Location_Region_Location is very specific to the location where services will actually be provisioned. When accessed through a package, this location is the top priority location for a region. All new servers and services are provisioned at this location. When a server is ordered and a region is selected, this is the location within that region where the server will actually exist and have software/services installed. +type Location_Region_Location struct { + Entity + + // The SoftLayer_Location tied to a region's location. This provides more information about the location, including specific datacenter information. + Location *Location `json:"location,omitempty" xmlrpc:"location,omitempty"` + + // A count of a region's location also has delivery information as well as other information to be determined. For now, availability is provided and could weigh into the decision as to where to decide to have a server provisioned.' + LocationPackageDetailCount *uint `json:"locationPackageDetailCount,omitempty" xmlrpc:"locationPackageDetailCount,omitempty"` + + // A region's location also has delivery information as well as other information to be determined. For now, availability is provided and could weigh into the decision as to where to decide to have a server provisioned.' + LocationPackageDetails []Product_Package_Locations `json:"locationPackageDetails,omitempty" xmlrpc:"locationPackageDetails,omitempty"` + + // The region to which this location belongs. + Region *Location_Region `json:"region,omitempty" xmlrpc:"region,omitempty"` +} + +// no documentation yet +type Location_Reservation struct { + Entity + + // The account that a billing item belongs to. + Account *Account `json:"account,omitempty" xmlrpc:"account,omitempty"` + + // The bandwidth allotment that the reservation belongs to. + Allotment *Network_Bandwidth_Version1_Allotment `json:"allotment,omitempty" xmlrpc:"allotment,omitempty"` + + // no documentation yet + AllotmentId *int `json:"allotmentId,omitempty" xmlrpc:"allotmentId,omitempty"` + + // The bandwidth allotment that the reservation belongs to. + BillingItem *Billing_Item `json:"billingItem,omitempty" xmlrpc:"billingItem,omitempty"` + + // no documentation yet + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // The datacenter location that the reservation belongs to. + Location *Location `json:"location,omitempty" xmlrpc:"location,omitempty"` + + // no documentation yet + LocationId *int `json:"locationId,omitempty" xmlrpc:"locationId,omitempty"` + + // Rack information for the reservation + LocationReservationRack *Location_Reservation_Rack `json:"locationReservationRack,omitempty" xmlrpc:"locationReservationRack,omitempty"` + + // no documentation yet + Name *string `json:"name,omitempty" xmlrpc:"name,omitempty"` + + // no documentation yet + Notes *string `json:"notes,omitempty" xmlrpc:"notes,omitempty"` +} + +// no documentation yet +type Location_Reservation_Rack struct { + Entity + + // The bandwidth allotment that the reservation belongs to. + Allotment *Network_Bandwidth_Version1_Allotment `json:"allotment,omitempty" xmlrpc:"allotment,omitempty"` + + // Members of the rack. + Children []Location_Reservation_Rack_Member `json:"children,omitempty" xmlrpc:"children,omitempty"` + + // A count of members of the rack. + ChildrenCount *uint `json:"childrenCount,omitempty" xmlrpc:"childrenCount,omitempty"` + + // no documentation yet + Location *Location `json:"location,omitempty" xmlrpc:"location,omitempty"` + + // no documentation yet + LocationId *int `json:"locationId,omitempty" xmlrpc:"locationId,omitempty"` + + // no documentation yet + LocationReservation *Location_Reservation `json:"locationReservation,omitempty" xmlrpc:"locationReservation,omitempty"` + + // no documentation yet + LocationReservationId *int `json:"locationReservationId,omitempty" xmlrpc:"locationReservationId,omitempty"` + + // no documentation yet + NetworkConnectionCapacity *int `json:"networkConnectionCapacity,omitempty" xmlrpc:"networkConnectionCapacity,omitempty"` + + // no documentation yet + NetworkConnectionReservation *int `json:"networkConnectionReservation,omitempty" xmlrpc:"networkConnectionReservation,omitempty"` + + // no documentation yet + PowerConnectionCapacity *int `json:"powerConnectionCapacity,omitempty" xmlrpc:"powerConnectionCapacity,omitempty"` + + // no documentation yet + PowerConnectionReservation *int `json:"powerConnectionReservation,omitempty" xmlrpc:"powerConnectionReservation,omitempty"` + + // no documentation yet + SlotCapacity *int `json:"slotCapacity,omitempty" xmlrpc:"slotCapacity,omitempty"` + + // no documentation yet + SlotReservation *int `json:"slotReservation,omitempty" xmlrpc:"slotReservation,omitempty"` +} + +// no documentation yet +type Location_Reservation_Rack_Member struct { + Entity + + // no documentation yet + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // Location relation for the rack member + Location *Location `json:"location,omitempty" xmlrpc:"location,omitempty"` + + // no documentation yet + LocationId *int `json:"locationId,omitempty" xmlrpc:"locationId,omitempty"` + + // no documentation yet + LocationReservationRack *Location_Reservation `json:"locationReservationRack,omitempty" xmlrpc:"locationReservationRack,omitempty"` +} + +// SoftLayer_Location_Root extends the [[SoftLayer_Location]] data type to include root-specific properties. +type Location_Root struct { + Location +} + +// SoftLayer_Location_Server_Room extends the [[SoftLayer_Location]] data type to include server room-specific properties. +type Location_Server_Room struct { + Location +} + +// SoftLayer_Location_Slot extends the [[SoftLayer_Location]] data type to include slot-specific properties. +type Location_Slot struct { + Location +} + +// SoftLayer_Location_Status models the state of any location. SoftLayer uses the following status codes: +// +// +// *'''ACTIVE''': The location is currently active and available for public usage. +// *'''PLANNED''': Used when a location is planned but not yet active. +// *'''RETIRED''': Used when a location has been retired and no longer active. +// +// +// Locations in use should stay in the ACTIVE state. If a locations status ever reads anything else and contains active hardware then please contact SoftLayer support. +type Location_Status struct { + Entity + + // A locations status's internal identifier. + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // A Location's status code. See the SoftLayer_Locaiton_Status Overview for ''status''' possible values. + Status *string `json:"status,omitempty" xmlrpc:"status,omitempty"` +} + +// SoftLayer_Location_Storage_Room extends the [[SoftLayer_Location]] data type to include storage room-specific properties. +type Location_Storage_Room struct { + Location +} diff --git a/vendor/github.com/softlayer/softlayer-go/datatypes/marketplace.go b/vendor/github.com/softlayer/softlayer-go/datatypes/marketplace.go new file mode 100644 index 0000000000..9e5b5a1c38 --- /dev/null +++ b/vendor/github.com/softlayer/softlayer-go/datatypes/marketplace.go @@ -0,0 +1,195 @@ +/** + * Copyright 2016 IBM Corp. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** + * AUTOMATICALLY GENERATED CODE - DO NOT MODIFY + */ + +package datatypes + +// no documentation yet +type Marketplace_EmailDistribution struct { + Entity + + // no documentation yet + Email *string `json:"email,omitempty" xmlrpc:"email,omitempty"` + + // no documentation yet + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` +} + +// no documentation yet +type Marketplace_Partner struct { + Entity + + // no documentation yet + AccountId *int `json:"accountId,omitempty" xmlrpc:"accountId,omitempty"` + + // no documentation yet + AttachedFiles []Marketplace_Partner_Attachment `json:"attachedFiles,omitempty" xmlrpc:"attachedFiles,omitempty"` + + // A count of + AttachmentCount *uint `json:"attachmentCount,omitempty" xmlrpc:"attachmentCount,omitempty"` + + // no documentation yet + Attachments []Marketplace_Partner_Attachment `json:"attachments,omitempty" xmlrpc:"attachments,omitempty"` + + // no documentation yet + CompanyDescription *string `json:"companyDescription,omitempty" xmlrpc:"companyDescription,omitempty"` + + // no documentation yet + CompanyName *string `json:"companyName,omitempty" xmlrpc:"companyName,omitempty"` + + // no documentation yet + HeadlineDescription *string `json:"headlineDescription,omitempty" xmlrpc:"headlineDescription,omitempty"` + + // no documentation yet + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // no documentation yet + LinkFreeTrial *string `json:"linkFreeTrial,omitempty" xmlrpc:"linkFreeTrial,omitempty"` + + // no documentation yet + LinkOrderPage *string `json:"linkOrderPage,omitempty" xmlrpc:"linkOrderPage,omitempty"` + + // no documentation yet + LinkWebsite *string `json:"linkWebsite,omitempty" xmlrpc:"linkWebsite,omitempty"` + + // no documentation yet + LogoMedium *Marketplace_Partner_Attachment `json:"logoMedium,omitempty" xmlrpc:"logoMedium,omitempty"` + + // no documentation yet + LogoMediumTemp *Marketplace_Partner_Attachment `json:"logoMediumTemp,omitempty" xmlrpc:"logoMediumTemp,omitempty"` + + // no documentation yet + LogoSmall *Marketplace_Partner_Attachment `json:"logoSmall,omitempty" xmlrpc:"logoSmall,omitempty"` + + // no documentation yet + LogoSmallTemp *Marketplace_Partner_Attachment `json:"logoSmallTemp,omitempty" xmlrpc:"logoSmallTemp,omitempty"` + + // no documentation yet + MetaDescription *string `json:"metaDescription,omitempty" xmlrpc:"metaDescription,omitempty"` + + // no documentation yet + MetaKeywords *string `json:"metaKeywords,omitempty" xmlrpc:"metaKeywords,omitempty"` + + // no documentation yet + ProductBenefits *string `json:"productBenefits,omitempty" xmlrpc:"productBenefits,omitempty"` + + // no documentation yet + ProductCategoryId *int `json:"productCategoryId,omitempty" xmlrpc:"productCategoryId,omitempty"` + + // no documentation yet + ProductDescriptionLong *string `json:"productDescriptionLong,omitempty" xmlrpc:"productDescriptionLong,omitempty"` + + // no documentation yet + ProductDescriptionShort *string `json:"productDescriptionShort,omitempty" xmlrpc:"productDescriptionShort,omitempty"` + + // no documentation yet + ProductFeatures *string `json:"productFeatures,omitempty" xmlrpc:"productFeatures,omitempty"` + + // no documentation yet + ProductName *string `json:"productName,omitempty" xmlrpc:"productName,omitempty"` + + // no documentation yet + ProductTitle *string `json:"productTitle,omitempty" xmlrpc:"productTitle,omitempty"` + + // no documentation yet + UrlIdentifier *string `json:"urlIdentifier,omitempty" xmlrpc:"urlIdentifier,omitempty"` +} + +// no documentation yet +type Marketplace_Partner_Attachment struct { + Entity + + // no documentation yet + AttachmentType *Marketplace_Partner_Attachment_Type `json:"attachmentType,omitempty" xmlrpc:"attachmentType,omitempty"` + + // no documentation yet + AttachmentTypeId *int `json:"attachmentTypeId,omitempty" xmlrpc:"attachmentTypeId,omitempty"` + + // no documentation yet + BaseName *string `json:"baseName,omitempty" xmlrpc:"baseName,omitempty"` + + // no documentation yet + DisplayName *string `json:"displayName,omitempty" xmlrpc:"displayName,omitempty"` + + // no documentation yet + FileName *string `json:"fileName,omitempty" xmlrpc:"fileName,omitempty"` + + // no documentation yet + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // no documentation yet + MarketplacePartnerId *int `json:"marketplacePartnerId,omitempty" xmlrpc:"marketplacePartnerId,omitempty"` + + // no documentation yet + SaveAsName *string `json:"saveAsName,omitempty" xmlrpc:"saveAsName,omitempty"` +} + +// no documentation yet +type Marketplace_Partner_Attachment_Type struct { + Entity + + // no documentation yet + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // no documentation yet + KeyName *string `json:"keyName,omitempty" xmlrpc:"keyName,omitempty"` + + // no documentation yet + Type *string `json:"type,omitempty" xmlrpc:"type,omitempty"` +} + +// no documentation yet +type Marketplace_Partner_File struct { + Entity + + // no documentation yet + Attributes *Marketplace_Partner_File_Attributes `json:"attributes,omitempty" xmlrpc:"attributes,omitempty"` + + // no documentation yet + Contents *[]byte `json:"contents,omitempty" xmlrpc:"contents,omitempty"` +} + +// no documentation yet +type Marketplace_Partner_File_Attributes struct { + Entity + + // no documentation yet + Bits *int `json:"bits,omitempty" xmlrpc:"bits,omitempty"` + + // no documentation yet + Channels *int `json:"channels,omitempty" xmlrpc:"channels,omitempty"` + + // no documentation yet + Height *int `json:"height,omitempty" xmlrpc:"height,omitempty"` + + // no documentation yet + HtmlAttributes *string `json:"htmlAttributes,omitempty" xmlrpc:"htmlAttributes,omitempty"` + + // no documentation yet + ImageType *int `json:"imageType,omitempty" xmlrpc:"imageType,omitempty"` + + // no documentation yet + IsImage *bool `json:"isImage,omitempty" xmlrpc:"isImage,omitempty"` + + // no documentation yet + MimeType *string `json:"mimeType,omitempty" xmlrpc:"mimeType,omitempty"` + + // no documentation yet + Width *int `json:"width,omitempty" xmlrpc:"width,omitempty"` +} diff --git a/vendor/github.com/softlayer/softlayer-go/datatypes/mcafee.go b/vendor/github.com/softlayer/softlayer-go/datatypes/mcafee.go new file mode 100644 index 0000000000..dbb99d5c20 --- /dev/null +++ b/vendor/github.com/softlayer/softlayer-go/datatypes/mcafee.go @@ -0,0 +1,319 @@ +/** + * Copyright 2016 IBM Corp. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** + * AUTOMATICALLY GENERATED CODE - DO NOT MODIFY + */ + +package datatypes + +// The McAfee_Epolicy_Orchestrator_Version36_Agent_Details data type represents a virus scan agent and contains details about its version. +type McAfee_Epolicy_Orchestrator_Version36_Agent_Details struct { + Entity + + // Version number of the anti-virus scan agent. + AgentVersion *string `json:"agentVersion,omitempty" xmlrpc:"agentVersion,omitempty"` + + // The current anti-virus policy of an agent. + CurrentPolicy *McAfee_Epolicy_Orchestrator_Version36_Agent_Parent_Details `json:"currentPolicy,omitempty" xmlrpc:"currentPolicy,omitempty"` + + // The date of the last time the anti-virus agent checked in. + LastUpdate *string `json:"lastUpdate,omitempty" xmlrpc:"lastUpdate,omitempty"` +} + +// The McAfee_Epolicy_Orchestrator_Version36_Agent_Parent_Details data type contains the name of an anti-virus policy. +type McAfee_Epolicy_Orchestrator_Version36_Agent_Parent_Details struct { + Entity + + // The current anti-virus policy of an agent. + CurrentPolicy *McAfee_Epolicy_Orchestrator_Version36_Agent_Parent_Details `json:"currentPolicy,omitempty" xmlrpc:"currentPolicy,omitempty"` + + // The name of a policy. + Name *string `json:"name,omitempty" xmlrpc:"name,omitempty"` +} + +// The McAfee_Epolicy_Orchestrator_Version36_Antivirus_Event data type represents a single anti-virus event. It contains details about the event such as the date the event occurred, the virus that is detected and the action that is taken. +type McAfee_Epolicy_Orchestrator_Version36_Antivirus_Event struct { + Entity + + // The date when an anti-virus event occurs. + EventLocalDateTime *Time `json:"eventLocalDateTime,omitempty" xmlrpc:"eventLocalDateTime,omitempty"` + + // Name of the file found to be infected. + Filename *string `json:"filename,omitempty" xmlrpc:"filename,omitempty"` + + // The action taken when a virus is detected. + VirusActionTaken *McAfee_Epolicy_Orchestrator_Version36_Antivirus_Event_Filter_Description `json:"virusActionTaken,omitempty" xmlrpc:"virusActionTaken,omitempty"` + + // The name of a virus that is found. + VirusName *string `json:"virusName,omitempty" xmlrpc:"virusName,omitempty"` + + // The type of virus that is found. + VirusType *string `json:"virusType,omitempty" xmlrpc:"virusType,omitempty"` +} + +// The McAfee_Epolicy_Orchestrator_Version36_Antivirus_Event_AccessProtection data type represents an access protection event. It contains details about the event such as when it occurs, the process that caused it, and the rule that triggered the event. +type McAfee_Epolicy_Orchestrator_Version36_Antivirus_Event_AccessProtection struct { + Entity + + // The date that an access protection event occurs. + EventLocalDateTime *Time `json:"eventLocalDateTime,omitempty" xmlrpc:"eventLocalDateTime,omitempty"` + + // The name of the file that was protected from access. + Filename *string `json:"filename,omitempty" xmlrpc:"filename,omitempty"` + + // The name of the process that was protected from access. + ProcessName *string `json:"processName,omitempty" xmlrpc:"processName,omitempty"` + + // The name of the rule that triggered an access protection event. + RuleName *string `json:"ruleName,omitempty" xmlrpc:"ruleName,omitempty"` + + // The IP address that caused an access protection event. + Source *string `json:"source,omitempty" xmlrpc:"source,omitempty"` +} + +// The McAfee_Epolicy_Orchestrator_Version36_Antivirus_Event_Filter_Description data type contains the name of the rule that was triggered by an anti-virus event. +type McAfee_Epolicy_Orchestrator_Version36_Antivirus_Event_Filter_Description struct { + Entity + + // The name of the rule that triggered an anti-virus event. + Name *string `json:"name,omitempty" xmlrpc:"name,omitempty"` +} + +// The McAfee_Epolicy_Orchestrator_Version36_Hips_Version6_BlockedApplicationEvent data type contains a single blocked application event. The details of the event are the time the event occurred, the process that generated the event and a brief description of the application that was blocked. +type McAfee_Epolicy_Orchestrator_Version36_Hips_Version6_BlockedApplicationEvent struct { + Entity + + // A brief description of the application that is blocked. + ApplicationDescription *string `json:"applicationDescription,omitempty" xmlrpc:"applicationDescription,omitempty"` + + // The time that an application is blocked. + IncidentTime *Time `json:"incidentTime,omitempty" xmlrpc:"incidentTime,omitempty"` + + // The name of a process that is blocked. + ProcessName *string `json:"processName,omitempty" xmlrpc:"processName,omitempty"` +} + +// The McAfee_Epolicy_Orchestrator_Version36_Hips_Version6_Event_Signature data type contains the signature name of a rule that generated an IPS event. +type McAfee_Epolicy_Orchestrator_Version36_Hips_Version6_Event_Signature struct { + Entity + + // The name of a rule that triggered an IPS event. + SignatureName *string `json:"signatureName,omitempty" xmlrpc:"signatureName,omitempty"` +} + +// The McAfee_Epolicy_Orchestrator_Version36_Hips_Version6_IPSEvent data type represents a single IPS event. It contains details about the event such as the date the event occurred, the process that generated it, the severity of the event, and the action taken. +type McAfee_Epolicy_Orchestrator_Version36_Hips_Version6_IPSEvent struct { + Entity + + // The time when an IPS event occurred. + IncidentTime *Time `json:"incidentTime,omitempty" xmlrpc:"incidentTime,omitempty"` + + // Name of the process that generated an IPS event. + ProcessName *string `json:"processName,omitempty" xmlrpc:"processName,omitempty"` + + // The action taken because of an IPS event. + ReactionText *string `json:"reactionText,omitempty" xmlrpc:"reactionText,omitempty"` + + // The IP address that generated an IPS event. + RemoteIpAddress *string `json:"remoteIpAddress,omitempty" xmlrpc:"remoteIpAddress,omitempty"` + + // The severity level for an IPS event. + SeverityText *string `json:"severityText,omitempty" xmlrpc:"severityText,omitempty"` + + // The signature that generated an IPS event. + Signature *McAfee_Epolicy_Orchestrator_Version36_Hips_Version6_Event_Signature `json:"signature,omitempty" xmlrpc:"signature,omitempty"` +} + +// The McAfee_Epolicy_Orchestrator_Version36_Hips_Version7_BlockedApplicationEvent data type contains a single blocked application event. The details of the event are the time the event occurred, the process that generated the event and a brief description of the application that was blocked. +type McAfee_Epolicy_Orchestrator_Version36_Hips_Version7_BlockedApplicationEvent struct { + Entity + + // A brief description of the application that is blocked. + ApplicationDescription *string `json:"applicationDescription,omitempty" xmlrpc:"applicationDescription,omitempty"` + + // The time that an application is blocked. + IncidentTime *Time `json:"incidentTime,omitempty" xmlrpc:"incidentTime,omitempty"` + + // The name of a process that is blocked. + ProcessName *string `json:"processName,omitempty" xmlrpc:"processName,omitempty"` +} + +// The McAfee_Epolicy_Orchestrator_Version36_Hips_Version7_Event_Signature data type contains the signature name of a rule that generated an IPS event. +type McAfee_Epolicy_Orchestrator_Version36_Hips_Version7_Event_Signature struct { + Entity + + // The name of a rule that triggered an IPS event. + SignatureName *string `json:"signatureName,omitempty" xmlrpc:"signatureName,omitempty"` +} + +// The McAfee_Epolicy_Orchestrator_Version36_Hips_Version7_IPSEvent data type represents a single IPS event. It contains details about the event such as the date the event occurred, the process that generated it, the severity of the event, and the action taken. +type McAfee_Epolicy_Orchestrator_Version36_Hips_Version7_IPSEvent struct { + Entity + + // The time when an IPS event occurred. + IncidentTime *Time `json:"incidentTime,omitempty" xmlrpc:"incidentTime,omitempty"` + + // Name of the process that generated an IPS event. + ProcessName *string `json:"processName,omitempty" xmlrpc:"processName,omitempty"` + + // The action taken because of an IPS event. + ReactionText *string `json:"reactionText,omitempty" xmlrpc:"reactionText,omitempty"` + + // The IP address that generated an IPS event. + RemoteIpAddress *string `json:"remoteIpAddress,omitempty" xmlrpc:"remoteIpAddress,omitempty"` + + // The severity level for an IPS event. + SeverityText *string `json:"severityText,omitempty" xmlrpc:"severityText,omitempty"` + + // The signature that generated an IPS event. + Signature *McAfee_Epolicy_Orchestrator_Version36_Hips_Version7_Event_Signature `json:"signature,omitempty" xmlrpc:"signature,omitempty"` +} + +// The McAfee_Epolicy_Orchestrator_Version36_Policy_Object data type contains the name of a policy that may be assigned to a server. +type McAfee_Epolicy_Orchestrator_Version36_Policy_Object struct { + Entity + + // The name of a policy. + Name *string `json:"name,omitempty" xmlrpc:"name,omitempty"` +} + +// The McAfee_Epolicy_Orchestrator_Version36_Product_Properties data type contains the virus definition file version. +type McAfee_Epolicy_Orchestrator_Version36_Product_Properties struct { + Entity + + // The virus definition file version. + DatVersion *string `json:"datVersion,omitempty" xmlrpc:"datVersion,omitempty"` +} + +// The McAfee_Epolicy_Orchestrator_Version45_Agent_Details data type represents a virus scan agent and contains details about its version. +type McAfee_Epolicy_Orchestrator_Version45_Agent_Details struct { + Entity + + // Version number of the anti-virus scan agent. + AgentVersion *string `json:"agentVersion,omitempty" xmlrpc:"agentVersion,omitempty"` + + // The date of the last time the anti-virus agent checked in. + LastUpdate *Time `json:"lastUpdate,omitempty" xmlrpc:"lastUpdate,omitempty"` +} + +// The McAfee_Epolicy_Orchestrator_Version45_Agent_Parent_Details data type contains the name of an anti-virus policy. +type McAfee_Epolicy_Orchestrator_Version45_Agent_Parent_Details struct { + Entity + + // Additional information about an agent. + AgentDetails *McAfee_Epolicy_Orchestrator_Version45_Agent_Details `json:"agentDetails,omitempty" xmlrpc:"agentDetails,omitempty"` + + // The name of a policy. + Name *string `json:"name,omitempty" xmlrpc:"name,omitempty"` + + // The current anti-virus policy of an agent. + Policies []McAfee_Epolicy_Orchestrator_Version45_Agent_Parent_Details `json:"policies,omitempty" xmlrpc:"policies,omitempty"` + + // A count of the current anti-virus policy of an agent. + PolicyCount *uint `json:"policyCount,omitempty" xmlrpc:"policyCount,omitempty"` +} + +// The McAfee_Epolicy_Orchestrator_Version45_Event data type represents a single event. It contains details about the event such as the date the event occurred, the virus or intrusion that is detected and the action that is taken. +type McAfee_Epolicy_Orchestrator_Version45_Event struct { + Entity + + // Additional information about an agent. + AgentDetails *McAfee_Epolicy_Orchestrator_Version45_Agent_Details `json:"agentDetails,omitempty" xmlrpc:"agentDetails,omitempty"` + + // The time that an event was detected. + DetectedUtc *Time `json:"detectedUtc,omitempty" xmlrpc:"detectedUtc,omitempty"` + + // The IP address of the source that generated an event. + SourceIpv4 *string `json:"sourceIpv4,omitempty" xmlrpc:"sourceIpv4,omitempty"` + + // The name of the process that generated an event. + SourceProcessName *string `json:"sourceProcessName,omitempty" xmlrpc:"sourceProcessName,omitempty"` + + // The name of the file that was the target of the event. + TargetFilename *string `json:"targetFilename,omitempty" xmlrpc:"targetFilename,omitempty"` + + // The action taken regarding a threat. + ThreatActionTaken *string `json:"threatActionTaken,omitempty" xmlrpc:"threatActionTaken,omitempty"` + + // The name of the threat. + ThreatName *string `json:"threatName,omitempty" xmlrpc:"threatName,omitempty"` + + // The textual representation of the severity level. + ThreatSeverityLabel *string `json:"threatSeverityLabel,omitempty" xmlrpc:"threatSeverityLabel,omitempty"` + + // The type of threat. + ThreatType *string `json:"threatType,omitempty" xmlrpc:"threatType,omitempty"` + + // The action taken when a virus is detected. + VirusActionTaken *McAfee_Epolicy_Orchestrator_Version45_Event_Filter_Description `json:"virusActionTaken,omitempty" xmlrpc:"virusActionTaken,omitempty"` +} + +// The McAfee_Epolicy_Orchestrator_Version45_Event_Filter_Description data type contains the name of the rule that was triggered by an event. +type McAfee_Epolicy_Orchestrator_Version45_Event_Filter_Description struct { + Entity + + // The name of the rule that triggered an event. + Name *string `json:"name,omitempty" xmlrpc:"name,omitempty"` +} + +// The McAfee_Epolicy_Orchestrator_Version45_Event_Version7 data type represents a single event. It contains details about the event such as the date the event occurred, the virus or intrusion that is detected and the action that is taken. +type McAfee_Epolicy_Orchestrator_Version45_Event_Version7 struct { + McAfee_Epolicy_Orchestrator_Version45_Event + + // The signature information for an event. + Signature *McAfee_Epolicy_Orchestrator_Version45_Hips_Event_Signature_Version7 `json:"signature,omitempty" xmlrpc:"signature,omitempty"` +} + +// The McAfee_Epolicy_Orchestrator_Version45_Event_Version8 data type represents a single event. It contains details about the event such as the date the event occurred, the virus or intrusion that is detected and the action that is taken. +type McAfee_Epolicy_Orchestrator_Version45_Event_Version8 struct { + McAfee_Epolicy_Orchestrator_Version45_Event + + // The signature information for an event. + Signature *McAfee_Epolicy_Orchestrator_Version45_Hips_Event_Signature_Version8 `json:"signature,omitempty" xmlrpc:"signature,omitempty"` +} + +// The McAfee_Epolicy_Orchestrator_Version45_Hips_Event_Signature_Version7 data type contains the signature name of a rule that generated an IPS event. +type McAfee_Epolicy_Orchestrator_Version45_Hips_Event_Signature_Version7 struct { + Entity + + // The name of a rule that triggered an IPS event. + SignatureName *string `json:"signatureName,omitempty" xmlrpc:"signatureName,omitempty"` +} + +// The McAfee_Epolicy_Orchestrator_Version45_Hips_Event_Signature_Version8 data type contains the signature name of a rule that generated an IPS event. +type McAfee_Epolicy_Orchestrator_Version45_Hips_Event_Signature_Version8 struct { + Entity + + // The name of a rule that triggered an IPS event. + SignatureName *string `json:"signatureName,omitempty" xmlrpc:"signatureName,omitempty"` +} + +// The McAfee_Epolicy_Orchestrator_Version45_Policy_Object data type contains the name of a policy that may be assigned to a server. +type McAfee_Epolicy_Orchestrator_Version45_Policy_Object struct { + Entity + + // The name of a policy. + Name *string `json:"name,omitempty" xmlrpc:"name,omitempty"` +} + +// The McAfee_Epolicy_Orchestrator_Version45_Product_Properties data type contains the virus definition file version. +type McAfee_Epolicy_Orchestrator_Version45_Product_Properties struct { + Entity + + // The virus definition file version. + DatVersion *string `json:"datVersion,omitempty" xmlrpc:"datVersion,omitempty"` +} diff --git a/vendor/github.com/softlayer/softlayer-go/datatypes/metric.go b/vendor/github.com/softlayer/softlayer-go/datatypes/metric.go new file mode 100644 index 0000000000..46cfef55ac --- /dev/null +++ b/vendor/github.com/softlayer/softlayer-go/datatypes/metric.go @@ -0,0 +1,207 @@ +/** + * Copyright 2016 IBM Corp. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** + * AUTOMATICALLY GENERATED CODE - DO NOT MODIFY + */ + +package datatypes + +// Metric tracking objects provides a common interface to all metrics provided by SoftLayer. These metrics range from network component traffic for a server to aggregated Bandwidth Pooling traffic and more. Every object within SoftLayer's range of objects that has data that can be tracked over time has an associated tracking object. Use the [[SoftLayer_Metric_Tracking_Object]] service to retrieve raw and graph data from a tracking object. +type Metric_Tracking_Object struct { + Entity + + // The data recorded by a tracking object. + Data []Metric_Tracking_Object_Data `json:"data,omitempty" xmlrpc:"data,omitempty"` + + // A tracking object's internal identifier. + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // Tracking object label + Label *string `json:"label,omitempty" xmlrpc:"label,omitempty"` + + // The identifier of the existing resource this object is attempting to track. + ResourceTableId *int `json:"resourceTableId,omitempty" xmlrpc:"resourceTableId,omitempty"` + + // The date this tracker began tracking this particular resource. + StartDate *Time `json:"startDate,omitempty" xmlrpc:"startDate,omitempty"` + + // The type of data that a tracking object polls. + Type *Metric_Tracking_Object_Type `json:"type,omitempty" xmlrpc:"type,omitempty"` +} + +// SoftLayer_Metric_Tracking_Object_Abstract models a generic tracking object type. Typically a tracking object with a specific purpose has it's own data type defined within the SoftLayer API. +type Metric_Tracking_Object_Abstract struct { + Metric_Tracking_Object +} + +// This data type provides commonly used bandwidth summary components for the current billing cycle. +type Metric_Tracking_Object_Bandwidth_Summary struct { + Entity + + // This is the amount of bandwidth (measured in gigabytes) allocated for this tracking object. + AllocationAmount *Float64 `json:"allocationAmount,omitempty" xmlrpc:"allocationAmount,omitempty"` + + // no documentation yet + AllocationId *int `json:"allocationId,omitempty" xmlrpc:"allocationId,omitempty"` + + // The amount of outbound bandwidth (measured in gigabytes) currently used this billing period. Same as $outboundBandwidthAmount. Aliased for backward compatability. + AmountOut *Float64 `json:"amountOut,omitempty" xmlrpc:"amountOut,omitempty"` + + // The daily average amount of outbound bandwidth usage. + AverageDailyUsage *Float64 `json:"averageDailyUsage,omitempty" xmlrpc:"averageDailyUsage,omitempty"` + + // A flag that tells whether or not this tracking object's bandwidth usage is already over the allocation. 1 means yes, 0 means no. + CurrentlyOverAllocationFlag *int `json:"currentlyOverAllocationFlag,omitempty" xmlrpc:"currentlyOverAllocationFlag,omitempty"` + + // The metric tracking id for this resource. + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // The amount of outbound bandwidth (measured in gigabytes) currently used this billing period + OutboundBandwidthAmount *Float64 `json:"outboundBandwidthAmount,omitempty" xmlrpc:"outboundBandwidthAmount,omitempty"` + + // The amount of bandwidth (measured in gigabytes) of projected usage, using a basic average calculation of daily usage. + ProjectedBandwidthUsage *Float64 `json:"projectedBandwidthUsage,omitempty" xmlrpc:"projectedBandwidthUsage,omitempty"` + + // A flag that tells whether or not this tracking object's bandwidth usage is projected to go over the allocation, based on daily average usage. 1 means yes, 0 means no. + ProjectedOverAllocationFlag *int `json:"projectedOverAllocationFlag,omitempty" xmlrpc:"projectedOverAllocationFlag,omitempty"` +} + +// SoftLayer_Metric_Tracking_Object_Data models an individual unit of data tracked by a SoftLayer tracking object, including the type of data polled, the date it was polled at, and the counter value that was measured at polling time. +type Metric_Tracking_Object_Data struct { + Entity + + // The value stored for a data record. + Counter *Float64 `json:"counter,omitempty" xmlrpc:"counter,omitempty"` + + // The time a data record was stored. + DateTime *Time `json:"dateTime,omitempty" xmlrpc:"dateTime,omitempty"` + + // The type of data held in a record. + Type *string `json:"type,omitempty" xmlrpc:"type,omitempty"` +} + +// SoftLayer_Metric_Tracking_Object_Data_Network_ContentDelivery_Account models usage data polled from the CDN system. +type Metric_Tracking_Object_Data_Network_ContentDelivery_Account struct { + Metric_Tracking_Object_Data + + // The name of a file. This value is only populated in file-based bandwidth reports. + FileName *string `json:"fileName,omitempty" xmlrpc:"fileName,omitempty"` + + // The internal identifier of a CDN POP (Points of Presence). + PopId *int `json:"popId,omitempty" xmlrpc:"popId,omitempty"` +} + +// SoftLayer_Metric_Tracking_Object_HardwareServer models tracking objects specific to physical hardware and the data that are recorded by those servers. +type Metric_Tracking_Object_HardwareServer struct { + Metric_Tracking_Object_Abstract + + // The raw bandwidth usage data for the current billing cycle. One object is returned for each network this server is attached to. + BillingCycleBandwidthUsage []Network_Bandwidth_Usage `json:"billingCycleBandwidthUsage,omitempty" xmlrpc:"billingCycleBandwidthUsage,omitempty"` + + // A count of the raw bandwidth usage data for the current billing cycle. One object is returned for each network this server is attached to. + BillingCycleBandwidthUsageCount *uint `json:"billingCycleBandwidthUsageCount,omitempty" xmlrpc:"billingCycleBandwidthUsageCount,omitempty"` + + // The raw bandwidth usage data for the current billing cycle. One object is returned for each network this server is attached to. + BillingCyclePrivateBandwidthUsage []Network_Bandwidth_Usage `json:"billingCyclePrivateBandwidthUsage,omitempty" xmlrpc:"billingCyclePrivateBandwidthUsage,omitempty"` + + // A count of the raw bandwidth usage data for the current billing cycle. One object is returned for each network this server is attached to. + BillingCyclePrivateBandwidthUsageCount *uint `json:"billingCyclePrivateBandwidthUsageCount,omitempty" xmlrpc:"billingCyclePrivateBandwidthUsageCount,omitempty"` + + // The total private inbound bandwidth for this item's resource for the current billing cycle. + BillingCyclePrivateUsageIn *Float64 `json:"billingCyclePrivateUsageIn,omitempty" xmlrpc:"billingCyclePrivateUsageIn,omitempty"` + + // The total private outbound bandwidth for this item's resource for the current billing cycle. + BillingCyclePrivateUsageOut *Float64 `json:"billingCyclePrivateUsageOut,omitempty" xmlrpc:"billingCyclePrivateUsageOut,omitempty"` + + // The total private bandwidth for this item's resource for the current billing cycle. + BillingCyclePrivateUsageTotal *uint `json:"billingCyclePrivateUsageTotal,omitempty" xmlrpc:"billingCyclePrivateUsageTotal,omitempty"` + + // The raw bandwidth usage data for the current billing cycle. One object is returned for each network this server is attached to. + BillingCyclePublicBandwidthUsage *Network_Bandwidth_Usage `json:"billingCyclePublicBandwidthUsage,omitempty" xmlrpc:"billingCyclePublicBandwidthUsage,omitempty"` + + // The total public inbound bandwidth for this item's resource for the current billing cycle. + BillingCyclePublicUsageIn *Float64 `json:"billingCyclePublicUsageIn,omitempty" xmlrpc:"billingCyclePublicUsageIn,omitempty"` + + // The total public outbound bandwidth for this item's resource for the current billing cycle. + BillingCyclePublicUsageOut *Float64 `json:"billingCyclePublicUsageOut,omitempty" xmlrpc:"billingCyclePublicUsageOut,omitempty"` + + // The total public bandwidth for this item's resource for the current billing cycle. + BillingCyclePublicUsageTotal *uint `json:"billingCyclePublicUsageTotal,omitempty" xmlrpc:"billingCyclePublicUsageTotal,omitempty"` + + // The server that this tracking object tracks. + Resource *Hardware_Server `json:"resource,omitempty" xmlrpc:"resource,omitempty"` +} + +// SoftLayer [[SoftLayer_Metric_Tracking_Object|tracking objects]] can model various kinds of measured data, from server and virtual server bandwidth usage to CPU use to remote storage usage. SoftLayer_Metric_Tracking_Object_Type models one of these types and is referred to in tracking objects to reflect what type of data they track. +type Metric_Tracking_Object_Type struct { + Entity + + // Description A tracking object type's key name. This is a shorter description of what kind of data a tracking object group is polling. + Keyname *string `json:"keyname,omitempty" xmlrpc:"keyname,omitempty"` + + // Description A tracking object type's name. This describes what kind of data a tracking object group is polling. + Name *string `json:"name,omitempty" xmlrpc:"name,omitempty"` +} + +// SoftLayer_Metric_Tracking_Object_VirtualDedicatedRack models tracking objects specific to virtual dedicated racks. Bandwidth Pooling aggregate the bandwidth used by multiple servers within the rack. +type Metric_Tracking_Object_VirtualDedicatedRack struct { + Metric_Tracking_Object_Abstract + + // The raw bandwidth usage data for the current billing cycle. One object is returned for each network this server is attached to. + BillingCycleBandwidthUsage []Network_Bandwidth_Usage `json:"billingCycleBandwidthUsage,omitempty" xmlrpc:"billingCycleBandwidthUsage,omitempty"` + + // A count of the raw bandwidth usage data for the current billing cycle. One object is returned for each network this server is attached to. + BillingCycleBandwidthUsageCount *uint `json:"billingCycleBandwidthUsageCount,omitempty" xmlrpc:"billingCycleBandwidthUsageCount,omitempty"` + + // The raw bandwidth usage data for the current billing cycle. One object is returned for each network this server is attached to. + BillingCyclePrivateBandwidthUsage []Network_Bandwidth_Usage `json:"billingCyclePrivateBandwidthUsage,omitempty" xmlrpc:"billingCyclePrivateBandwidthUsage,omitempty"` + + // A count of the raw bandwidth usage data for the current billing cycle. One object is returned for each network this server is attached to. + BillingCyclePrivateBandwidthUsageCount *uint `json:"billingCyclePrivateBandwidthUsageCount,omitempty" xmlrpc:"billingCyclePrivateBandwidthUsageCount,omitempty"` + + // The total private inbound bandwidth for this item's resource for the current billing cycle. + BillingCyclePrivateUsageIn *Float64 `json:"billingCyclePrivateUsageIn,omitempty" xmlrpc:"billingCyclePrivateUsageIn,omitempty"` + + // The total private outbound bandwidth for this item's resource for the current billing cycle. + BillingCyclePrivateUsageOut *Float64 `json:"billingCyclePrivateUsageOut,omitempty" xmlrpc:"billingCyclePrivateUsageOut,omitempty"` + + // The total private bandwidth for this item's resource for the current billing cycle. + BillingCyclePrivateUsageTotal *uint `json:"billingCyclePrivateUsageTotal,omitempty" xmlrpc:"billingCyclePrivateUsageTotal,omitempty"` + + // The raw bandwidth usage data for the current billing cycle. One object is returned for each network this server is attached to. + BillingCyclePublicBandwidthUsage *Network_Bandwidth_Usage `json:"billingCyclePublicBandwidthUsage,omitempty" xmlrpc:"billingCyclePublicBandwidthUsage,omitempty"` + + // The total public inbound bandwidth for this item's resource for the current billing cycle. + BillingCyclePublicUsageIn *Float64 `json:"billingCyclePublicUsageIn,omitempty" xmlrpc:"billingCyclePublicUsageIn,omitempty"` + + // The total public outbound bandwidth for this item's resource for the current billing cycle. + BillingCyclePublicUsageOut *Float64 `json:"billingCyclePublicUsageOut,omitempty" xmlrpc:"billingCyclePublicUsageOut,omitempty"` + + // The total public bandwidth for this item's resource for the current billing cycle. + BillingCyclePublicUsageTotal *uint `json:"billingCyclePublicUsageTotal,omitempty" xmlrpc:"billingCyclePublicUsageTotal,omitempty"` + + // The virtual rack that this tracking object tracks. + Resource *Network_Bandwidth_Version1_Allotment `json:"resource,omitempty" xmlrpc:"resource,omitempty"` +} + +// no documentation yet +type Metric_Tracking_Object_Virtual_Storage_Repository struct { + Metric_Tracking_Object_Abstract + + // The virtual storage repository that this tracking object tracks. + Resource *Virtual_Storage_Repository `json:"resource,omitempty" xmlrpc:"resource,omitempty"` +} diff --git a/vendor/github.com/softlayer/softlayer-go/datatypes/monitoring.go b/vendor/github.com/softlayer/softlayer-go/datatypes/monitoring.go new file mode 100644 index 0000000000..174f63b054 --- /dev/null +++ b/vendor/github.com/softlayer/softlayer-go/datatypes/monitoring.go @@ -0,0 +1,248 @@ +/** + * Copyright 2016 IBM Corp. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** + * AUTOMATICALLY GENERATED CODE - DO NOT MODIFY + */ + +package datatypes + +// A monitoring agent object contains information describing the agent. +type Monitoring_Agent struct { + Entity + + // The current status of the corresponding agent + AgentStatus *Monitoring_Agent_Status `json:"agentStatus,omitempty" xmlrpc:"agentStatus,omitempty"` + + // A count of all custom configuration profiles associated with the corresponding agent + ConfigurationProfileCount *uint `json:"configurationProfileCount,omitempty" xmlrpc:"configurationProfileCount,omitempty"` + + // All custom configuration profiles associated with the corresponding agent + ConfigurationProfiles []Configuration_Template_Section_Profile `json:"configurationProfiles,omitempty" xmlrpc:"configurationProfiles,omitempty"` + + // A template of an agent's current configuration which contains information about the structure of the configuration values. + ConfigurationTemplate *Configuration_Template `json:"configurationTemplate,omitempty" xmlrpc:"configurationTemplate,omitempty"` + + // Internal identifier of a configuration template that is used to configure this agent + ConfigurationTemplateId *int `json:"configurationTemplateId,omitempty" xmlrpc:"configurationTemplateId,omitempty"` + + // A count of the values associated with the corresponding Agent configuration. + ConfigurationValueCount *uint `json:"configurationValueCount,omitempty" xmlrpc:"configurationValueCount,omitempty"` + + // The values associated with the corresponding Agent configuration. + ConfigurationValues []Monitoring_Agent_Configuration_Value `json:"configurationValues,omitempty" xmlrpc:"configurationValues,omitempty"` + + // Created date + CreateDate *Time `json:"createDate,omitempty" xmlrpc:"createDate,omitempty"` + + // SoftLayer hardware related to the agent. + Hardware *Hardware `json:"hardware,omitempty" xmlrpc:"hardware,omitempty"` + + // Internal identifier of a monitoring agent + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // Last modified date + ModifyDate *Time `json:"modifyDate,omitempty" xmlrpc:"modifyDate,omitempty"` + + // Monitoring agent name + Name *string `json:"name,omitempty" xmlrpc:"name,omitempty"` + + // Contains general information relating to a single SoftLayer product. + ProductItem *Product_Item `json:"productItem,omitempty" xmlrpc:"productItem,omitempty"` + + // Indicates if this monitoring agent resides on your local box or on a SoftLayer monitoring cluster. + RemoteMonitoringAgentFlag *bool `json:"remoteMonitoringAgentFlag,omitempty" xmlrpc:"remoteMonitoringAgentFlag,omitempty"` + + // Internal identifier of a monitoring robot that this agent belongs to + RobotId *int `json:"robotId,omitempty" xmlrpc:"robotId,omitempty"` + + // A description for a specific installation of a Software Component + SoftwareDescription *Software_Description `json:"softwareDescription,omitempty" xmlrpc:"softwareDescription,omitempty"` + + // Internal identifier of a monitoring agent status + StatusId *int `json:"statusId,omitempty" xmlrpc:"statusId,omitempty"` + + // Monitoring agent status name. + StatusName *string `json:"statusName,omitempty" xmlrpc:"statusName,omitempty"` + + // Softlayer_Virtual_Guest object related to the monitoring agent, which this virtual guest object and hardware is on the server of the running agent. + VirtualGuest *Virtual_Guest `json:"virtualGuest,omitempty" xmlrpc:"virtualGuest,omitempty"` +} + +// The SoftLayer_Monitoring_Agent_Configuration_Template_Group class is consisted of configuration templates for agents in a monitoring package. +type Monitoring_Agent_Configuration_Template_Group struct { + Entity + + // no documentation yet + Account *Account `json:"account,omitempty" xmlrpc:"account,omitempty"` + + // Internal identifier of a SoftLayer account that this configuration template belongs to + AccountId *int `json:"accountId,omitempty" xmlrpc:"accountId,omitempty"` + + // A count of + ConfigurationTemplateCount *uint `json:"configurationTemplateCount,omitempty" xmlrpc:"configurationTemplateCount,omitempty"` + + // A count of + ConfigurationTemplateReferenceCount *uint `json:"configurationTemplateReferenceCount,omitempty" xmlrpc:"configurationTemplateReferenceCount,omitempty"` + + // no documentation yet + ConfigurationTemplateReferences []Monitoring_Agent_Configuration_Template_Group_Reference `json:"configurationTemplateReferences,omitempty" xmlrpc:"configurationTemplateReferences,omitempty"` + + // no documentation yet + ConfigurationTemplates []Configuration_Template `json:"configurationTemplates,omitempty" xmlrpc:"configurationTemplates,omitempty"` + + // Created date + CreateDate *Time `json:"createDate,omitempty" xmlrpc:"createDate,omitempty"` + + // Description of a monitoring agent configuration group + Description *string `json:"description,omitempty" xmlrpc:"description,omitempty"` + + // Internal identifier of a monitoring agent configuration group + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // no documentation yet + Item *Product_Item `json:"item,omitempty" xmlrpc:"item,omitempty"` + + // Internal identifier of a configuration template type + ItemId *int `json:"itemId,omitempty" xmlrpc:"itemId,omitempty"` + + // Last modified date + ModifyDate *Time `json:"modifyDate,omitempty" xmlrpc:"modifyDate,omitempty"` + + // Name of a monitoring agent configuration group + Name *string `json:"name,omitempty" xmlrpc:"name,omitempty"` +} + +// SoftLayer_Monitoring_Agent_Configuration_Template_Group_Reference class holds the reference information, essentially a SQL join, between a monitoring configuration group and agent configuration templates. +type Monitoring_Agent_Configuration_Template_Group_Reference struct { + Entity + + // no documentation yet + ConfigurationTemplate *Configuration_Template `json:"configurationTemplate,omitempty" xmlrpc:"configurationTemplate,omitempty"` + + // Internal identifier of a configuration template + ConfigurationTemplateId *int `json:"configurationTemplateId,omitempty" xmlrpc:"configurationTemplateId,omitempty"` + + // Internal identifier of a configuration group reference record + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // no documentation yet + TemplateGroup *Monitoring_Agent_Configuration_Template_Group `json:"templateGroup,omitempty" xmlrpc:"templateGroup,omitempty"` + + // Internal identifier of a monitoring agent configuration group + TemplateGroupId *int `json:"templateGroupId,omitempty" xmlrpc:"templateGroupId,omitempty"` +} + +// Monitoring agent configuration value +type Monitoring_Agent_Configuration_Value struct { + Entity + + // Internal identifier of a monitoring Agent that this configuration value + AgentId *int `json:"agentId,omitempty" xmlrpc:"agentId,omitempty"` + + // Internal identifier of a monitoring configuration definition by which + ConfigurationDefinitionId *int `json:"configurationDefinitionId,omitempty" xmlrpc:"configurationDefinitionId,omitempty"` + + // no documentation yet + Definition *Configuration_Template_Section_Definition `json:"definition,omitempty" xmlrpc:"definition,omitempty"` + + // User-friendly description of a configuration value. + Description *string `json:"description,omitempty" xmlrpc:"description,omitempty"` + + // Internal identifier of a monitoring configuration value. + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // The metric data type used to retrieve metric data currently being tracked. + MetricDataType *Container_Metric_Data_Type `json:"metricDataType,omitempty" xmlrpc:"metricDataType,omitempty"` + + // no documentation yet + MonitoringAgent *Monitoring_Agent `json:"monitoringAgent,omitempty" xmlrpc:"monitoringAgent,omitempty"` + + // no documentation yet + Profile *Configuration_Template_Section_Profile `json:"profile,omitempty" xmlrpc:"profile,omitempty"` + + // Internal identifier of a configuration profile. Configuration profile is associated with a configuration section type of "Template section". + // + // A "Template section" defines skeleton configuration definitions. For instance, if you want to monitor additional hard disks with "CPU/Memory/Disk Monitoring Agent", you will have to add a new configuration profiles. + ProfileId *int `json:"profileId,omitempty" xmlrpc:"profileId,omitempty"` + + // Configuration value + Value *string `json:"value,omitempty" xmlrpc:"value,omitempty"` +} + +// Monitoring agent status +type Monitoring_Agent_Status struct { + Entity + + // Description of a monitoring agent status + Description *string `json:"description,omitempty" xmlrpc:"description,omitempty"` + + // Internal identifier of a monitoring agent status + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // Monitoring agent status name + Name *string `json:"name,omitempty" xmlrpc:"name,omitempty"` +} + +// The SoftLayer_Monitoring_Robot data type contains general information relating to a monitoring robot. +type Monitoring_Robot struct { + Entity + + // The account associated with the corresponding robot. + Account *Account `json:"account,omitempty" xmlrpc:"account,omitempty"` + + // Internal identifier of a SoftLayer account that this robot belongs to + AccountId *int `json:"accountId,omitempty" xmlrpc:"accountId,omitempty"` + + // Internal identifier of a monitoring robot + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // A count of the program (monitoring agent) that gets details of a system or application and reporting of the metric data and triggers alarms for predefined events. + MonitoringAgentCount *uint `json:"monitoringAgentCount,omitempty" xmlrpc:"monitoringAgentCount,omitempty"` + + // The program (monitoring agent) that gets details of a system or application and reporting of the metric data and triggers alarms for predefined events. + MonitoringAgents []Monitoring_Agent `json:"monitoringAgents,omitempty" xmlrpc:"monitoringAgents,omitempty"` + + // Robot name + Name *string `json:"name,omitempty" xmlrpc:"name,omitempty"` + + // The current status of the robot. + RobotStatus *Monitoring_Robot_Status `json:"robotStatus,omitempty" xmlrpc:"robotStatus,omitempty"` + + // The SoftLayer_Software_Component that corresponds to the robot installation on the server. + SoftwareComponent *Software_Component `json:"softwareComponent,omitempty" xmlrpc:"softwareComponent,omitempty"` + + // Internal identifier of a monitoring robot status + StatusId *int `json:"statusId,omitempty" xmlrpc:"statusId,omitempty"` +} + +// Your monitoring robot will be in "Active" status under normal circumstances. If you perform an OS reload, your robot will be in "Reclaim" status until it's reloaded on your server or virtual server. +// +// Advanced monitoring system requires "Nimsoft Monitoring (Advanced)" service running and TCP ports 48000 - 48020 to be open on your server or virtual server. Monitoring agents cannot be managed nor can the usage data be updated if these ports are closed. Your monitoring robot will be in "Limited Connectivity" status if our monitoring management system cannot communicate with your system. +// +// See [[SoftLayer_Monitoring_Robot::resetStatus|resetStatus]] service for more details. +type Monitoring_Robot_Status struct { + Entity + + // Monitoring robot status description + Description *string `json:"description,omitempty" xmlrpc:"description,omitempty"` + + // Internal identifier of a monitoring robot status + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // Monitoring robot status name + Name *string `json:"name,omitempty" xmlrpc:"name,omitempty"` +} diff --git a/vendor/github.com/softlayer/softlayer-go/datatypes/network.go b/vendor/github.com/softlayer/softlayer-go/datatypes/network.go new file mode 100644 index 0000000000..1015424126 --- /dev/null +++ b/vendor/github.com/softlayer/softlayer-go/datatypes/network.go @@ -0,0 +1,5556 @@ +/** + * Copyright 2016 IBM Corp. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** + * AUTOMATICALLY GENERATED CODE - DO NOT MODIFY + */ + +package datatypes + +// no documentation yet +type Network struct { + Entity + + // The owning account identifier. + AccountId *int `json:"accountId,omitempty" xmlrpc:"accountId,omitempty"` + + // The size of the Network specified in CIDR notation. Specified in conjunction with the ``networkIdentifier`` to describe the bounding subnet size for the Network. Required for creation. See [[SoftLayer_Network/createObject]] documentation for creation details. + Cidr *int `json:"cidr,omitempty" xmlrpc:"cidr,omitempty"` + + // Unique identifier for the network. + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // A name for the Network. This is required during creation of a Network and is entirely user defined. + Name *string `json:"name,omitempty" xmlrpc:"name,omitempty"` + + // The starting IP address of the Network. Specified in conjunction with the ``cidr`` property to specify the bounding IP address space for the Network. Required for creation. See [[SoftLayer_Network/createObject]] documentation for creation details. + NetworkIdentifier *string `json:"networkIdentifier,omitempty" xmlrpc:"networkIdentifier,omitempty"` + + // Notes, or a description of the Network. This is entirely user defined. + Notes *string `json:"notes,omitempty" xmlrpc:"notes,omitempty"` + + // A count of the Subnets within the Network. These represent the realized segments of the Network and reside within a [[SoftLayer_Network_Pod|Pod]]. A Subnet must be specified when provisioning a compute resource within a Network. + SubnetCount *uint `json:"subnetCount,omitempty" xmlrpc:"subnetCount,omitempty"` + + // The Subnets within the Network. These represent the realized segments of the Network and reside within a [[SoftLayer_Network_Pod|Pod]]. A Subnet must be specified when provisioning a compute resource within a Network. + Subnets []Network_Subnet `json:"subnets,omitempty" xmlrpc:"subnets,omitempty"` +} + +// The SoftLayer_Network_Application_Delivery_Controller data type models a single instance of an application delivery controller. Local properties are read only, except for a ''notes'' property, which can be used to describe your application delivery controller service. The type's relational properties provide more information to the service's function and login information to the controller's backend management if advanced view is enabled. +type Network_Application_Delivery_Controller struct { + Entity + + // The SoftLayer customer account that owns an application delivery controller record. + Account *Account `json:"account,omitempty" xmlrpc:"account,omitempty"` + + // The unique identifier of the SoftLayer customer account that owns an application delivery controller record + AccountId *int `json:"accountId,omitempty" xmlrpc:"accountId,omitempty"` + + // The average daily public bandwidth usage for the current billing cycle. + AverageDailyPublicBandwidthUsage *Float64 `json:"averageDailyPublicBandwidthUsage,omitempty" xmlrpc:"averageDailyPublicBandwidthUsage,omitempty"` + + // The billing item for a Application Delivery Controller. + BillingItem *Billing_Item_Network_Application_Delivery_Controller `json:"billingItem,omitempty" xmlrpc:"billingItem,omitempty"` + + // Previous configurations for an Application Delivery Controller. + ConfigurationHistory []Network_Application_Delivery_Controller_Configuration_History `json:"configurationHistory,omitempty" xmlrpc:"configurationHistory,omitempty"` + + // A count of previous configurations for an Application Delivery Controller. + ConfigurationHistoryCount *uint `json:"configurationHistoryCount,omitempty" xmlrpc:"configurationHistoryCount,omitempty"` + + // The date that an application delivery controller record was created + CreateDate *Time `json:"createDate,omitempty" xmlrpc:"createDate,omitempty"` + + // The datacenter that the application delivery controller resides in. + Datacenter *Location `json:"datacenter,omitempty" xmlrpc:"datacenter,omitempty"` + + // A brief description of an application delivery controller record. + Description *string `json:"description,omitempty" xmlrpc:"description,omitempty"` + + // An application delivery controller's unique identifier + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // The date in which the license for this application delivery controller will expire. + LicenseExpirationDate *Time `json:"licenseExpirationDate,omitempty" xmlrpc:"licenseExpirationDate,omitempty"` + + // A count of the virtual IP address records that belong to an application delivery controller based load balancer. + LoadBalancerCount *uint `json:"loadBalancerCount,omitempty" xmlrpc:"loadBalancerCount,omitempty"` + + // The virtual IP address records that belong to an application delivery controller based load balancer. + LoadBalancers []Network_LoadBalancer_VirtualIpAddress `json:"loadBalancers,omitempty" xmlrpc:"loadBalancers,omitempty"` + + // A flag indicating that this Application Delivery Controller is a managed resource. + ManagedResourceFlag *bool `json:"managedResourceFlag,omitempty" xmlrpc:"managedResourceFlag,omitempty"` + + // An application delivery controller's management ip address. + ManagementIpAddress *string `json:"managementIpAddress,omitempty" xmlrpc:"managementIpAddress,omitempty"` + + // The date that an application delivery controller record was last modified + ModifyDate *Time `json:"modifyDate,omitempty" xmlrpc:"modifyDate,omitempty"` + + // An application delivery controller's name + Name *string `json:"name,omitempty" xmlrpc:"name,omitempty"` + + // The network VLAN that an application delivery controller resides on. + NetworkVlan *Network_Vlan `json:"networkVlan,omitempty" xmlrpc:"networkVlan,omitempty"` + + // A count of the network VLANs that an application delivery controller resides on. + NetworkVlanCount *uint `json:"networkVlanCount,omitempty" xmlrpc:"networkVlanCount,omitempty"` + + // The network VLANs that an application delivery controller resides on. + NetworkVlans []Network_Vlan `json:"networkVlans,omitempty" xmlrpc:"networkVlans,omitempty"` + + // Editable notes used to describe an application delivery controller's function + Notes *string `json:"notes,omitempty" xmlrpc:"notes,omitempty"` + + // The total public outbound bandwidth for the current billing cycle. + OutboundPublicBandwidthUsage *Float64 `json:"outboundPublicBandwidthUsage,omitempty" xmlrpc:"outboundPublicBandwidthUsage,omitempty"` + + // The password used to connect to an application delivery controller's management interface when it is operating in advanced view mode. + Password *Software_Component_Password `json:"password,omitempty" xmlrpc:"password,omitempty"` + + // An application delivery controller's primary public IP address. + PrimaryIpAddress *string `json:"primaryIpAddress,omitempty" xmlrpc:"primaryIpAddress,omitempty"` + + // The projected public outbound bandwidth for the current billing cycle. + ProjectedPublicBandwidthUsage *Float64 `json:"projectedPublicBandwidthUsage,omitempty" xmlrpc:"projectedPublicBandwidthUsage,omitempty"` + + // A count of a network application controller's subnets. A subnet is a group of IP addresses + SubnetCount *uint `json:"subnetCount,omitempty" xmlrpc:"subnetCount,omitempty"` + + // A network application controller's subnets. A subnet is a group of IP addresses + Subnets []Network_Subnet `json:"subnets,omitempty" xmlrpc:"subnets,omitempty"` + + // A count of + TagReferenceCount *uint `json:"tagReferenceCount,omitempty" xmlrpc:"tagReferenceCount,omitempty"` + + // no documentation yet + TagReferences []Tag_Reference `json:"tagReferences,omitempty" xmlrpc:"tagReferences,omitempty"` + + // no documentation yet + Type *Network_Application_Delivery_Controller_Type `json:"type,omitempty" xmlrpc:"type,omitempty"` + + // no documentation yet + TypeId *int `json:"typeId,omitempty" xmlrpc:"typeId,omitempty"` + + // A count of + VirtualIpAddressCount *uint `json:"virtualIpAddressCount,omitempty" xmlrpc:"virtualIpAddressCount,omitempty"` + + // no documentation yet + VirtualIpAddresses []Network_Application_Delivery_Controller_LoadBalancer_VirtualIpAddress `json:"virtualIpAddresses,omitempty" xmlrpc:"virtualIpAddresses,omitempty"` +} + +// The SoftLayer_Network_Application_Delivery_Controller_Configuration_History data type models a single instance of a configuration history entry for an application delivery controller. The configuration history entries are used to support creating backups of an application delivery controller's configuration state in order to restore them later if needed. +type Network_Application_Delivery_Controller_Configuration_History struct { + Entity + + // The application delivery controller that a configuration history record belongs to. + Controller *Network_Application_Delivery_Controller `json:"controller,omitempty" xmlrpc:"controller,omitempty"` + + // The date a configuration history record was created. + CreateDate *Time `json:"createDate,omitempty" xmlrpc:"createDate,omitempty"` + + // An configuration history record's unique identifier + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // Editable notes used to describe a configuration history record + Notes *string `json:"notes,omitempty" xmlrpc:"notes,omitempty"` +} + +// no documentation yet +type Network_Application_Delivery_Controller_LoadBalancer_Health_Attribute struct { + Entity + + // no documentation yet + HealthAttributeTypeId *int `json:"healthAttributeTypeId,omitempty" xmlrpc:"healthAttributeTypeId,omitempty"` + + // no documentation yet + HealthCheck *Network_Application_Delivery_Controller_LoadBalancer_Health_Check `json:"healthCheck,omitempty" xmlrpc:"healthCheck,omitempty"` + + // no documentation yet + HealthCheckId *int `json:"healthCheckId,omitempty" xmlrpc:"healthCheckId,omitempty"` + + // no documentation yet + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // no documentation yet + Type *Network_Application_Delivery_Controller_LoadBalancer_Health_Attribute_Type `json:"type,omitempty" xmlrpc:"type,omitempty"` + + // no documentation yet + Value *string `json:"value,omitempty" xmlrpc:"value,omitempty"` +} + +// no documentation yet +type Network_Application_Delivery_Controller_LoadBalancer_Health_Attribute_Type struct { + Entity + + // no documentation yet + Description *string `json:"description,omitempty" xmlrpc:"description,omitempty"` + + // no documentation yet + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // no documentation yet + Keyname *string `json:"keyname,omitempty" xmlrpc:"keyname,omitempty"` + + // no documentation yet + Name *string `json:"name,omitempty" xmlrpc:"name,omitempty"` + + // no documentation yet + ValueExpression *string `json:"valueExpression,omitempty" xmlrpc:"valueExpression,omitempty"` +} + +// no documentation yet +type Network_Application_Delivery_Controller_LoadBalancer_Health_Check struct { + Entity + + // A count of + AttributeCount *uint `json:"attributeCount,omitempty" xmlrpc:"attributeCount,omitempty"` + + // no documentation yet + Attributes []Network_Application_Delivery_Controller_LoadBalancer_Health_Attribute `json:"attributes,omitempty" xmlrpc:"attributes,omitempty"` + + // no documentation yet + HealthCheckTypeId *int `json:"healthCheckTypeId,omitempty" xmlrpc:"healthCheckTypeId,omitempty"` + + // no documentation yet + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // no documentation yet + Name *string `json:"name,omitempty" xmlrpc:"name,omitempty"` + + // no documentation yet + Notes *string `json:"notes,omitempty" xmlrpc:"notes,omitempty"` + + // A count of collection of scale load balancers that use this health check. + ScaleLoadBalancerCount *uint `json:"scaleLoadBalancerCount,omitempty" xmlrpc:"scaleLoadBalancerCount,omitempty"` + + // Collection of scale load balancers that use this health check. + ScaleLoadBalancers []Scale_LoadBalancer `json:"scaleLoadBalancers,omitempty" xmlrpc:"scaleLoadBalancers,omitempty"` + + // A count of + ServiceCount *uint `json:"serviceCount,omitempty" xmlrpc:"serviceCount,omitempty"` + + // no documentation yet + Services []Network_Application_Delivery_Controller_LoadBalancer_Service `json:"services,omitempty" xmlrpc:"services,omitempty"` + + // no documentation yet + Type *Network_Application_Delivery_Controller_LoadBalancer_Health_Check_Type `json:"type,omitempty" xmlrpc:"type,omitempty"` +} + +// no documentation yet +type Network_Application_Delivery_Controller_LoadBalancer_Health_Check_Type struct { + Entity + + // no documentation yet + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // no documentation yet + Keyname *string `json:"keyname,omitempty" xmlrpc:"keyname,omitempty"` + + // no documentation yet + Name *string `json:"name,omitempty" xmlrpc:"name,omitempty"` +} + +// no documentation yet +type Network_Application_Delivery_Controller_LoadBalancer_Routing_Method struct { + Entity + + // no documentation yet + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // no documentation yet + Keyname *string `json:"keyname,omitempty" xmlrpc:"keyname,omitempty"` + + // no documentation yet + Name *string `json:"name,omitempty" xmlrpc:"name,omitempty"` +} + +// no documentation yet +type Network_Application_Delivery_Controller_LoadBalancer_Routing_Type struct { + Entity + + // no documentation yet + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // no documentation yet + Keyname *string `json:"keyname,omitempty" xmlrpc:"keyname,omitempty"` + + // no documentation yet + Name *string `json:"name,omitempty" xmlrpc:"name,omitempty"` +} + +// no documentation yet +type Network_Application_Delivery_Controller_LoadBalancer_Service struct { + Entity + + // no documentation yet + Enabled *int `json:"enabled,omitempty" xmlrpc:"enabled,omitempty"` + + // A count of + GroupCount *uint `json:"groupCount,omitempty" xmlrpc:"groupCount,omitempty"` + + // A count of + GroupReferenceCount *uint `json:"groupReferenceCount,omitempty" xmlrpc:"groupReferenceCount,omitempty"` + + // no documentation yet + GroupReferences []Network_Application_Delivery_Controller_LoadBalancer_Service_Group_CrossReference `json:"groupReferences,omitempty" xmlrpc:"groupReferences,omitempty"` + + // no documentation yet + Groups []Network_Application_Delivery_Controller_LoadBalancer_Service_Group `json:"groups,omitempty" xmlrpc:"groups,omitempty"` + + // no documentation yet + HealthCheck *Network_Application_Delivery_Controller_LoadBalancer_Health_Check `json:"healthCheck,omitempty" xmlrpc:"healthCheck,omitempty"` + + // A count of + HealthCheckCount *uint `json:"healthCheckCount,omitempty" xmlrpc:"healthCheckCount,omitempty"` + + // no documentation yet + HealthChecks []Network_Application_Delivery_Controller_LoadBalancer_Health_Check `json:"healthChecks,omitempty" xmlrpc:"healthChecks,omitempty"` + + // no documentation yet + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // no documentation yet + IpAddress *Network_Subnet_IpAddress `json:"ipAddress,omitempty" xmlrpc:"ipAddress,omitempty"` + + // no documentation yet + IpAddressId *int `json:"ipAddressId,omitempty" xmlrpc:"ipAddressId,omitempty"` + + // no documentation yet + Name *string `json:"name,omitempty" xmlrpc:"name,omitempty"` + + // no documentation yet + Notes *string `json:"notes,omitempty" xmlrpc:"notes,omitempty"` + + // no documentation yet + Port *int `json:"port,omitempty" xmlrpc:"port,omitempty"` + + // no documentation yet + ServiceGroup *Network_Application_Delivery_Controller_LoadBalancer_Service_Group `json:"serviceGroup,omitempty" xmlrpc:"serviceGroup,omitempty"` + + // no documentation yet + Status *string `json:"status,omitempty" xmlrpc:"status,omitempty"` +} + +// no documentation yet +type Network_Application_Delivery_Controller_LoadBalancer_Service_Group struct { + Entity + + // no documentation yet + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // no documentation yet + Name *string `json:"name,omitempty" xmlrpc:"name,omitempty"` + + // no documentation yet + Notes *string `json:"notes,omitempty" xmlrpc:"notes,omitempty"` + + // no documentation yet + RoutingMethod *Network_Application_Delivery_Controller_LoadBalancer_Routing_Method `json:"routingMethod,omitempty" xmlrpc:"routingMethod,omitempty"` + + // no documentation yet + RoutingMethodId *int `json:"routingMethodId,omitempty" xmlrpc:"routingMethodId,omitempty"` + + // no documentation yet + RoutingType *Network_Application_Delivery_Controller_LoadBalancer_Routing_Type `json:"routingType,omitempty" xmlrpc:"routingType,omitempty"` + + // no documentation yet + RoutingTypeId *int `json:"routingTypeId,omitempty" xmlrpc:"routingTypeId,omitempty"` + + // A count of + ServiceCount *uint `json:"serviceCount,omitempty" xmlrpc:"serviceCount,omitempty"` + + // A count of + ServiceReferenceCount *uint `json:"serviceReferenceCount,omitempty" xmlrpc:"serviceReferenceCount,omitempty"` + + // no documentation yet + ServiceReferences []Network_Application_Delivery_Controller_LoadBalancer_Service_Group_CrossReference `json:"serviceReferences,omitempty" xmlrpc:"serviceReferences,omitempty"` + + // no documentation yet + Services []Network_Application_Delivery_Controller_LoadBalancer_Service `json:"services,omitempty" xmlrpc:"services,omitempty"` + + // The timeout value for connections from remote clients to the load balancer. Timeout values are only valid for HTTP service groups. + Timeout *int `json:"timeout,omitempty" xmlrpc:"timeout,omitempty"` + + // no documentation yet + VirtualServer *Network_Application_Delivery_Controller_LoadBalancer_VirtualServer `json:"virtualServer,omitempty" xmlrpc:"virtualServer,omitempty"` + + // A count of + VirtualServerCount *uint `json:"virtualServerCount,omitempty" xmlrpc:"virtualServerCount,omitempty"` + + // no documentation yet + VirtualServers []Network_Application_Delivery_Controller_LoadBalancer_VirtualServer `json:"virtualServers,omitempty" xmlrpc:"virtualServers,omitempty"` +} + +// no documentation yet +type Network_Application_Delivery_Controller_LoadBalancer_Service_Group_CrossReference struct { + Entity + + // no documentation yet + Service *Network_Application_Delivery_Controller_LoadBalancer_Service `json:"service,omitempty" xmlrpc:"service,omitempty"` + + // no documentation yet + ServiceGroup *Network_Application_Delivery_Controller_LoadBalancer_Service_Group `json:"serviceGroup,omitempty" xmlrpc:"serviceGroup,omitempty"` + + // no documentation yet + ServiceGroupId *int `json:"serviceGroupId,omitempty" xmlrpc:"serviceGroupId,omitempty"` + + // no documentation yet + ServiceId *int `json:"serviceId,omitempty" xmlrpc:"serviceId,omitempty"` + + // no documentation yet + Weight *int `json:"weight,omitempty" xmlrpc:"weight,omitempty"` +} + +// no documentation yet +type Network_Application_Delivery_Controller_LoadBalancer_VirtualIpAddress struct { + Entity + + // no documentation yet + Account *Account `json:"account,omitempty" xmlrpc:"account,omitempty"` + + // The unique identifier of the SoftLayer customer account that owns the virtual IP address + AccountId *int `json:"accountId,omitempty" xmlrpc:"accountId,omitempty"` + + // A virtual IP address's associated application delivery controller. + ApplicationDeliveryController *Network_Application_Delivery_Controller `json:"applicationDeliveryController,omitempty" xmlrpc:"applicationDeliveryController,omitempty"` + + // A count of a virtual IP address's associated application delivery controllers. + ApplicationDeliveryControllerCount *uint `json:"applicationDeliveryControllerCount,omitempty" xmlrpc:"applicationDeliveryControllerCount,omitempty"` + + // A virtual IP address's associated application delivery controllers. + ApplicationDeliveryControllers []Network_Application_Delivery_Controller `json:"applicationDeliveryControllers,omitempty" xmlrpc:"applicationDeliveryControllers,omitempty"` + + // The current billing item for the load balancer virtual IP. This is only valid when dedicatedFlag is false. This is an independent virtual IP, and if canceled, will only affect the associated virtual IP. + BillingItem *Billing_Item `json:"billingItem,omitempty" xmlrpc:"billingItem,omitempty"` + + // The connection limit for this virtual IP address + ConnectionLimit *int `json:"connectionLimit,omitempty" xmlrpc:"connectionLimit,omitempty"` + + // The units for the connection limit + ConnectionLimitUnits *string `json:"connectionLimitUnits,omitempty" xmlrpc:"connectionLimitUnits,omitempty"` + + // The current billing item for the load balancing device housing the virtual IP. This billing item represents a device which could contain other virtual IPs. Caution should be taken when canceling. This is only valid when dedicatedFlag is true. + DedicatedBillingItem *Billing_Item_Network_LoadBalancer `json:"dedicatedBillingItem,omitempty" xmlrpc:"dedicatedBillingItem,omitempty"` + + // A flag that determines if a VIP is dedicated or not. This is used to override the connection limit and use an unlimited value. + DedicatedFlag *bool `json:"dedicatedFlag,omitempty" xmlrpc:"dedicatedFlag,omitempty"` + + // Denotes whether the virtual IP is configured within a high availability cluster. + HighAvailabilityFlag *bool `json:"highAvailabilityFlag,omitempty" xmlrpc:"highAvailabilityFlag,omitempty"` + + // The unique identifier of the virtual IP address record + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // no documentation yet + IpAddress *Network_Subnet_IpAddress `json:"ipAddress,omitempty" xmlrpc:"ipAddress,omitempty"` + + // ID of the IP address this virtual IP utilizes + IpAddressId *int `json:"ipAddressId,omitempty" xmlrpc:"ipAddressId,omitempty"` + + // no documentation yet + LoadBalancerHardware []Hardware `json:"loadBalancerHardware,omitempty" xmlrpc:"loadBalancerHardware,omitempty"` + + // A count of + LoadBalancerHardwareCount *uint `json:"loadBalancerHardwareCount,omitempty" xmlrpc:"loadBalancerHardwareCount,omitempty"` + + // A flag indicating that the load balancer is a managed resource. + ManagedResourceFlag *bool `json:"managedResourceFlag,omitempty" xmlrpc:"managedResourceFlag,omitempty"` + + // User-created notes for this load balancer virtual IP address + Notes *string `json:"notes,omitempty" xmlrpc:"notes,omitempty"` + + // A count of the list of security ciphers enabled for this virtual IP address + SecureTransportCipherCount *uint `json:"secureTransportCipherCount,omitempty" xmlrpc:"secureTransportCipherCount,omitempty"` + + // The list of security ciphers enabled for this virtual IP address + SecureTransportCiphers []Network_Application_Delivery_Controller_LoadBalancer_VirtualIpAddress_SecureTransportCipher `json:"secureTransportCiphers,omitempty" xmlrpc:"secureTransportCiphers,omitempty"` + + // A count of the list of secure transport protocols enabled for this virtual IP address + SecureTransportProtocolCount *uint `json:"secureTransportProtocolCount,omitempty" xmlrpc:"secureTransportProtocolCount,omitempty"` + + // The list of secure transport protocols enabled for this virtual IP address + SecureTransportProtocols []Network_Application_Delivery_Controller_LoadBalancer_VirtualIpAddress_SecureTransportProtocol `json:"secureTransportProtocols,omitempty" xmlrpc:"secureTransportProtocols,omitempty"` + + // The SSL certificate currently associated with the VIP. + SecurityCertificate *Security_Certificate `json:"securityCertificate,omitempty" xmlrpc:"securityCertificate,omitempty"` + + // The SSL certificate currently associated with the VIP. Provides chosen certificate visibility to unprivileged users. + SecurityCertificateEntry *Security_Certificate_Entry `json:"securityCertificateEntry,omitempty" xmlrpc:"securityCertificateEntry,omitempty"` + + // The unique identifier of the Security Certificate to be utilized when SSL support is enabled. + SecurityCertificateId *int `json:"securityCertificateId,omitempty" xmlrpc:"securityCertificateId,omitempty"` + + // Determines if the VIP currently has SSL acceleration enabled + SslActiveFlag *bool `json:"sslActiveFlag,omitempty" xmlrpc:"sslActiveFlag,omitempty"` + + // Determines if the VIP is _allowed_ to utilize SSL acceleration + SslEnabledFlag *bool `json:"sslEnabledFlag,omitempty" xmlrpc:"sslEnabledFlag,omitempty"` + + // A count of + VirtualServerCount *uint `json:"virtualServerCount,omitempty" xmlrpc:"virtualServerCount,omitempty"` + + // no documentation yet + VirtualServers []Network_Application_Delivery_Controller_LoadBalancer_VirtualServer `json:"virtualServers,omitempty" xmlrpc:"virtualServers,omitempty"` +} + +// A single cipher configured for a load balancer virtual IP address instance. Instances of this class are immutable and should reflect a cipher that is configurable on a load balancer device. +type Network_Application_Delivery_Controller_LoadBalancer_VirtualIpAddress_SecureTransportCipher struct { + Entity + + // Unique identifier for the cipher instance + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // Identifier for the associated encryption algorithm + KeyName *string `json:"keyName,omitempty" xmlrpc:"keyName,omitempty"` + + // no documentation yet + VirtualIpAddress *Network_Application_Delivery_Controller_LoadBalancer_VirtualIpAddress `json:"virtualIpAddress,omitempty" xmlrpc:"virtualIpAddress,omitempty"` + + // Identifier for the associated [[SoftLayer_Network_Application_Delivery_Controller_LoadBalancer_VirtualIpAddress (type)|virtual IP address]] instance + VirtualIpAddressId *int `json:"virtualIpAddressId,omitempty" xmlrpc:"virtualIpAddressId,omitempty"` +} + +// Links a SSL transport protocol with a virtual IP address instance. Instances of this class are immutable and should reflect a protocol that is configurable on a load balancer device. +type Network_Application_Delivery_Controller_LoadBalancer_VirtualIpAddress_SecureTransportProtocol struct { + Entity + + // Unique identifier for the protocol instance + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // Identifier for the associated communication protocol + KeyName *string `json:"keyName,omitempty" xmlrpc:"keyName,omitempty"` + + // no documentation yet + VirtualIpAddress *Network_Application_Delivery_Controller_LoadBalancer_VirtualIpAddress `json:"virtualIpAddress,omitempty" xmlrpc:"virtualIpAddress,omitempty"` + + // Identifier for the associated [[SoftLayer_Network_Application_Delivery_Controller_LoadBalancer_VirtualIpAddress (type)|virtual IP address]] instance + VirtualIpAddressId *int `json:"virtualIpAddressId,omitempty" xmlrpc:"virtualIpAddressId,omitempty"` +} + +// no documentation yet +type Network_Application_Delivery_Controller_LoadBalancer_VirtualServer struct { + Entity + + // no documentation yet + Allocation *int `json:"allocation,omitempty" xmlrpc:"allocation,omitempty"` + + // no documentation yet + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // no documentation yet + Name *string `json:"name,omitempty" xmlrpc:"name,omitempty"` + + // no documentation yet + Notes *string `json:"notes,omitempty" xmlrpc:"notes,omitempty"` + + // no documentation yet + Port *int `json:"port,omitempty" xmlrpc:"port,omitempty"` + + // no documentation yet + RoutingMethod *Network_Application_Delivery_Controller_LoadBalancer_Routing_Method `json:"routingMethod,omitempty" xmlrpc:"routingMethod,omitempty"` + + // no documentation yet + RoutingMethodId *int `json:"routingMethodId,omitempty" xmlrpc:"routingMethodId,omitempty"` + + // A count of collection of scale load balancers this virtual server applies to. + ScaleLoadBalancerCount *uint `json:"scaleLoadBalancerCount,omitempty" xmlrpc:"scaleLoadBalancerCount,omitempty"` + + // Collection of scale load balancers this virtual server applies to. + ScaleLoadBalancers []Scale_LoadBalancer `json:"scaleLoadBalancers,omitempty" xmlrpc:"scaleLoadBalancers,omitempty"` + + // A count of + ServiceGroupCount *uint `json:"serviceGroupCount,omitempty" xmlrpc:"serviceGroupCount,omitempty"` + + // no documentation yet + ServiceGroups []Network_Application_Delivery_Controller_LoadBalancer_Service_Group `json:"serviceGroups,omitempty" xmlrpc:"serviceGroups,omitempty"` + + // no documentation yet + VirtualIpAddress *Network_Application_Delivery_Controller_LoadBalancer_VirtualIpAddress `json:"virtualIpAddress,omitempty" xmlrpc:"virtualIpAddress,omitempty"` + + // no documentation yet + VirtualIpAddressId *int `json:"virtualIpAddressId,omitempty" xmlrpc:"virtualIpAddressId,omitempty"` +} + +// no documentation yet +type Network_Application_Delivery_Controller_Type struct { + Entity + + // no documentation yet + KeyName *string `json:"keyName,omitempty" xmlrpc:"keyName,omitempty"` + + // no documentation yet + Name *string `json:"name,omitempty" xmlrpc:"name,omitempty"` +} + +// A SoftLayer_Network_Backbone represents a single backbone connection from SoftLayer to the public Internet, from the Internet to the SoftLayer private network, or a link that connects the private networks between SoftLayer's datacenters. The SoftLayer_Network_Backbone data type is a collection of data associated with one of those connections. +type Network_Backbone struct { + Entity + + // The numeric portion of the bandwidth capacity of a SoftLayer backbone. For instance, if a backbone is rated at "1 GigE" capacity then the capacity property of the backbone is 1. + Capacity *int `json:"capacity,omitempty" xmlrpc:"capacity,omitempty"` + + // The unit portion of the bandwidth capacity of a SoftLayer backbone. For instance, if a backbone is rated at "10 G" capacity then the capacityUnits property of the backbone is "G". + CapacityUnits *string `json:"capacityUnits,omitempty" xmlrpc:"capacityUnits,omitempty"` + + // A backbone's status. + Health *string `json:"health,omitempty" xmlrpc:"health,omitempty"` + + // A backbone's internal identifier. + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // Which of the SoftLayer datacenters a backbone is connected to. + Location *Location `json:"location,omitempty" xmlrpc:"location,omitempty"` + + // A backbone's name. This is usually the name of the backbone's network provider followed by a number in case SoftLayer uses more than one backbone from a provider. Backbone provider numbers start with the number one and increment from there. + Name *string `json:"name,omitempty" xmlrpc:"name,omitempty"` + + // A backbone's primary network component. + NetworkComponent *Network_Component `json:"networkComponent,omitempty" xmlrpc:"networkComponent,omitempty"` + + // The internal identifier of the network component that backbone is connected to. + NetworkComponentId *int `json:"networkComponentId,omitempty" xmlrpc:"networkComponentId,omitempty"` + + // Whether a SoftLayer backbone connects to the public Internet, to the private network, or connecting the private networks of SoftLayer's datacenters. Type is either the string "public", "private", or "private-interconnect". + Type *string `json:"type,omitempty" xmlrpc:"type,omitempty"` +} + +// no documentation yet +type Network_Backbone_Location_Dependent struct { + Entity + + // no documentation yet + DependentLocation *Location `json:"dependentLocation,omitempty" xmlrpc:"dependentLocation,omitempty"` + + // no documentation yet + DependentLocationId *int `json:"dependentLocationId,omitempty" xmlrpc:"dependentLocationId,omitempty"` + + // no documentation yet + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // no documentation yet + SourceLocation *Location `json:"sourceLocation,omitempty" xmlrpc:"sourceLocation,omitempty"` + + // no documentation yet + SourceLocationId *int `json:"sourceLocationId,omitempty" xmlrpc:"sourceLocationId,omitempty"` +} + +// The SoftLayer_Network_Bandwidth_Usage data type contains specific information relating to bandwidth utilization at a specific point in time on a given network interface. +type Network_Bandwidth_Usage struct { + Entity + + // Incoming bandwidth utilization. + AmountIn *Float64 `json:"amountIn,omitempty" xmlrpc:"amountIn,omitempty"` + + // Outgoing bandwidth utilization. + AmountOut *Float64 `json:"amountOut,omitempty" xmlrpc:"amountOut,omitempty"` + + // ID of the bandwidth usage detail type for this record. + BandwidthUsageDetailTypeId *Float64 `json:"bandwidthUsageDetailTypeId,omitempty" xmlrpc:"bandwidthUsageDetailTypeId,omitempty"` + + // The tracking object this bandwidth usage record describes. + TrackingObject *Metric_Tracking_Object `json:"trackingObject,omitempty" xmlrpc:"trackingObject,omitempty"` + + // In and out bandwidth utilization for a specified time stamp. + Type *Network_Bandwidth_Version1_Usage_Detail_Type `json:"type,omitempty" xmlrpc:"type,omitempty"` +} + +// The SoftLayer_Network_Bandwidth_Usage_Detail data type contains specific information relating to bandwidth utilization at a specific point in time on a given network interface. +type Network_Bandwidth_Usage_Detail struct { + Entity + + // The account tied to this tracking object + Account *Account `json:"account,omitempty" xmlrpc:"account,omitempty"` + + // Incoming bandwidth utilization. + AmountIn *Float64 `json:"amountIn,omitempty" xmlrpc:"amountIn,omitempty"` + + // Outgoing bandwidth utilization. + AmountOut *Float64 `json:"amountOut,omitempty" xmlrpc:"amountOut,omitempty"` + + // ID of the bandwidth usage detail type for this record. + BandwidthUsageDetailTypeId *Float64 `json:"bandwidthUsageDetailTypeId,omitempty" xmlrpc:"bandwidthUsageDetailTypeId,omitempty"` + + // The tracking object this bandwidth usage record describes. + TrackingObject *Metric_Tracking_Object `json:"trackingObject,omitempty" xmlrpc:"trackingObject,omitempty"` + + // In and out bandwidth utilization for a specified time stamp. + Type *Network_Bandwidth_Version1_Usage_Detail_Type `json:"type,omitempty" xmlrpc:"type,omitempty"` +} + +// The SoftLayer_Network_Bandwidth_Version1_Allocation data type contains general information relating to a single bandwidth allocation record. +type Network_Bandwidth_Version1_Allocation struct { + Entity + + // A bandwidth allotment detail. + AllotmentDetail *Network_Bandwidth_Version1_Allotment_Detail `json:"allotmentDetail,omitempty" xmlrpc:"allotmentDetail,omitempty"` + + // The amount of bandwidth allocated. + Amount *Float64 `json:"amount,omitempty" xmlrpc:"amount,omitempty"` + + // Billing item associated with this hardware allocation. + BillingItem *Billing_Item_Hardware `json:"billingItem,omitempty" xmlrpc:"billingItem,omitempty"` + + // Internal ID associated with this allocation. + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` +} + +// The SoftLayer_Network_Bandwidth_Version1_Allotment class provides methods and data structures necessary to work with an array of hardware objects associated with a single Bandwidth Pooling. +type Network_Bandwidth_Version1_Allotment struct { + Entity + + // The account associated with this virtual rack. + Account *Account `json:"account,omitempty" xmlrpc:"account,omitempty"` + + // The user account identifier associated with this allotment. + AccountId *int `json:"accountId,omitempty" xmlrpc:"accountId,omitempty"` + + // A count of the bandwidth allotment detail records associated with this virtual rack. + ActiveDetailCount *uint `json:"activeDetailCount,omitempty" xmlrpc:"activeDetailCount,omitempty"` + + // The bandwidth allotment detail records associated with this virtual rack. + ActiveDetails []Network_Bandwidth_Version1_Allotment_Detail `json:"activeDetails,omitempty" xmlrpc:"activeDetails,omitempty"` + + // A count of the Application Delivery Controller contained within a virtual rack. + ApplicationDeliveryControllerCount *uint `json:"applicationDeliveryControllerCount,omitempty" xmlrpc:"applicationDeliveryControllerCount,omitempty"` + + // The Application Delivery Controller contained within a virtual rack. + ApplicationDeliveryControllers []Network_Application_Delivery_Controller `json:"applicationDeliveryControllers,omitempty" xmlrpc:"applicationDeliveryControllers,omitempty"` + + // The average daily public bandwidth usage for the current billing cycle. + AverageDailyPublicBandwidthUsage *Float64 `json:"averageDailyPublicBandwidthUsage,omitempty" xmlrpc:"averageDailyPublicBandwidthUsage,omitempty"` + + // The bandwidth allotment type of this virtual rack. + BandwidthAllotmentType *Network_Bandwidth_Version1_Allotment_Type `json:"bandwidthAllotmentType,omitempty" xmlrpc:"bandwidthAllotmentType,omitempty"` + + // An identifier marking this allotment as a virtual private rack (1) or a bandwidth pooling(2). + BandwidthAllotmentTypeId *int `json:"bandwidthAllotmentTypeId,omitempty" xmlrpc:"bandwidthAllotmentTypeId,omitempty"` + + // A count of the bare metal server instances contained within a virtual rack. + BareMetalInstanceCount *uint `json:"bareMetalInstanceCount,omitempty" xmlrpc:"bareMetalInstanceCount,omitempty"` + + // The bare metal server instances contained within a virtual rack. + BareMetalInstances []Hardware `json:"bareMetalInstances,omitempty" xmlrpc:"bareMetalInstances,omitempty"` + + // A virtual rack's raw bandwidth usage data for an account's current billing cycle. One object is returned for each network this server is attached to. + BillingCycleBandwidthUsage []Network_Bandwidth_Usage `json:"billingCycleBandwidthUsage,omitempty" xmlrpc:"billingCycleBandwidthUsage,omitempty"` + + // A count of a virtual rack's raw bandwidth usage data for an account's current billing cycle. One object is returned for each network this server is attached to. + BillingCycleBandwidthUsageCount *uint `json:"billingCycleBandwidthUsageCount,omitempty" xmlrpc:"billingCycleBandwidthUsageCount,omitempty"` + + // A virtual rack's raw private network bandwidth usage data for an account's current billing cycle. + BillingCyclePrivateBandwidthUsage *Network_Bandwidth_Usage `json:"billingCyclePrivateBandwidthUsage,omitempty" xmlrpc:"billingCyclePrivateBandwidthUsage,omitempty"` + + // A virtual rack's raw public network bandwidth usage data for an account's current billing cycle. + BillingCyclePublicBandwidthUsage *Network_Bandwidth_Usage `json:"billingCyclePublicBandwidthUsage,omitempty" xmlrpc:"billingCyclePublicBandwidthUsage,omitempty"` + + // The total public bandwidth used in this virtual rack for an account's current billing cycle. + BillingCyclePublicUsageTotal *uint `json:"billingCyclePublicUsageTotal,omitempty" xmlrpc:"billingCyclePublicUsageTotal,omitempty"` + + // A virtual rack's billing item. + BillingItem *Billing_Item `json:"billingItem,omitempty" xmlrpc:"billingItem,omitempty"` + + // Creation date for an allotment. + CreateDate *Time `json:"createDate,omitempty" xmlrpc:"createDate,omitempty"` + + // An object that provides commonly used bandwidth summary components for the current billing cycle. + CurrentBandwidthSummary *Metric_Tracking_Object_Bandwidth_Summary `json:"currentBandwidthSummary,omitempty" xmlrpc:"currentBandwidthSummary,omitempty"` + + // A count of the bandwidth allotment detail records associated with this virtual rack. + DetailCount *uint `json:"detailCount,omitempty" xmlrpc:"detailCount,omitempty"` + + // The bandwidth allotment detail records associated with this virtual rack. + Details []Network_Bandwidth_Version1_Allotment_Detail `json:"details,omitempty" xmlrpc:"details,omitempty"` + + // End date for an allotment. + EndDate *Time `json:"endDate,omitempty" xmlrpc:"endDate,omitempty"` + + // The hardware contained within a virtual rack. + Hardware []Hardware `json:"hardware,omitempty" xmlrpc:"hardware,omitempty"` + + // A count of the hardware contained within a virtual rack. + HardwareCount *uint `json:"hardwareCount,omitempty" xmlrpc:"hardwareCount,omitempty"` + + // A virtual rack's internal identifier. + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // The total public inbound bandwidth used in this virtual rack for an account's current billing cycle. + InboundPublicBandwidthUsage *Float64 `json:"inboundPublicBandwidthUsage,omitempty" xmlrpc:"inboundPublicBandwidthUsage,omitempty"` + + // The location group associated with this virtual rack. + LocationGroup *Location_Group `json:"locationGroup,omitempty" xmlrpc:"locationGroup,omitempty"` + + // Location Group Id for an allotment + LocationGroupId *int `json:"locationGroupId,omitempty" xmlrpc:"locationGroupId,omitempty"` + + // A count of the managed bare metal server instances contained within a virtual rack. + ManagedBareMetalInstanceCount *uint `json:"managedBareMetalInstanceCount,omitempty" xmlrpc:"managedBareMetalInstanceCount,omitempty"` + + // The managed bare metal server instances contained within a virtual rack. + ManagedBareMetalInstances []Hardware `json:"managedBareMetalInstances,omitempty" xmlrpc:"managedBareMetalInstances,omitempty"` + + // The managed hardware contained within a virtual rack. + ManagedHardware []Hardware `json:"managedHardware,omitempty" xmlrpc:"managedHardware,omitempty"` + + // A count of the managed hardware contained within a virtual rack. + ManagedHardwareCount *uint `json:"managedHardwareCount,omitempty" xmlrpc:"managedHardwareCount,omitempty"` + + // A count of the managed Virtual Server contained within a virtual rack. + ManagedVirtualGuestCount *uint `json:"managedVirtualGuestCount,omitempty" xmlrpc:"managedVirtualGuestCount,omitempty"` + + // The managed Virtual Server contained within a virtual rack. + ManagedVirtualGuests []Virtual_Guest `json:"managedVirtualGuests,omitempty" xmlrpc:"managedVirtualGuests,omitempty"` + + // A virtual rack's metric tracking object. This object records all periodic polled data available to this rack. + MetricTrackingObject *Metric_Tracking_Object_VirtualDedicatedRack `json:"metricTrackingObject,omitempty" xmlrpc:"metricTrackingObject,omitempty"` + + // The metric tracking object id for this allotment. + MetricTrackingObjectId *int `json:"metricTrackingObjectId,omitempty" xmlrpc:"metricTrackingObjectId,omitempty"` + + // Text A virtual rack's name. + Name *string `json:"name,omitempty" xmlrpc:"name,omitempty"` + + // The total public outbound bandwidth used in this virtual rack for an account's current billing cycle. + OutboundPublicBandwidthUsage *Float64 `json:"outboundPublicBandwidthUsage,omitempty" xmlrpc:"outboundPublicBandwidthUsage,omitempty"` + + // Whether the bandwidth usage for this bandwidth pool for the current billing cycle exceeds the allocation. + OverBandwidthAllocationFlag *int `json:"overBandwidthAllocationFlag,omitempty" xmlrpc:"overBandwidthAllocationFlag,omitempty"` + + // The private network only hardware contained within a virtual rack. + PrivateNetworkOnlyHardware []Hardware `json:"privateNetworkOnlyHardware,omitempty" xmlrpc:"privateNetworkOnlyHardware,omitempty"` + + // A count of the private network only hardware contained within a virtual rack. + PrivateNetworkOnlyHardwareCount *uint `json:"privateNetworkOnlyHardwareCount,omitempty" xmlrpc:"privateNetworkOnlyHardwareCount,omitempty"` + + // Whether the bandwidth usage for this bandwidth pool for the current billing cycle is projected to exceed the allocation. + ProjectedOverBandwidthAllocationFlag *int `json:"projectedOverBandwidthAllocationFlag,omitempty" xmlrpc:"projectedOverBandwidthAllocationFlag,omitempty"` + + // The projected public outbound bandwidth for this virtual server for the current billing cycle. + ProjectedPublicBandwidthUsage *Float64 `json:"projectedPublicBandwidthUsage,omitempty" xmlrpc:"projectedPublicBandwidthUsage,omitempty"` + + // no documentation yet + ServiceProvider *Service_Provider `json:"serviceProvider,omitempty" xmlrpc:"serviceProvider,omitempty"` + + // Service Provider Id for an allotment + ServiceProviderId *int `json:"serviceProviderId,omitempty" xmlrpc:"serviceProviderId,omitempty"` + + // The combined allocated bandwidth for all servers in a virtual rack. + TotalBandwidthAllocated *uint `json:"totalBandwidthAllocated,omitempty" xmlrpc:"totalBandwidthAllocated,omitempty"` + + // A count of the Virtual Server contained within a virtual rack. + VirtualGuestCount *uint `json:"virtualGuestCount,omitempty" xmlrpc:"virtualGuestCount,omitempty"` + + // The Virtual Server contained within a virtual rack. + VirtualGuests []Virtual_Guest `json:"virtualGuests,omitempty" xmlrpc:"virtualGuests,omitempty"` +} + +// The SoftLayer_Network_Bandwidth_Version1_Allotment_Detail data type contains specific information relating to a single bandwidth allotment record. +type Network_Bandwidth_Version1_Allotment_Detail struct { + Entity + + // Allocated bandwidth. + Allocation *Network_Bandwidth_Version1_Allocation `json:"allocation,omitempty" xmlrpc:"allocation,omitempty"` + + // Allocated bandwidth. + AllocationId *int `json:"allocationId,omitempty" xmlrpc:"allocationId,omitempty"` + + // The parent Bandwidth Pool. + BandwidthAllotment *Network_Bandwidth_Version1_Allotment `json:"bandwidthAllotment,omitempty" xmlrpc:"bandwidthAllotment,omitempty"` + + // Bandwidth Pool associated with this detail. + BandwidthAllotmentId *int `json:"bandwidthAllotmentId,omitempty" xmlrpc:"bandwidthAllotmentId,omitempty"` + + // Bandwidth used. + BandwidthUsage []Network_Bandwidth_Version1_Usage `json:"bandwidthUsage,omitempty" xmlrpc:"bandwidthUsage,omitempty"` + + // A count of bandwidth used. + BandwidthUsageCount *uint `json:"bandwidthUsageCount,omitempty" xmlrpc:"bandwidthUsageCount,omitempty"` + + // Beginning this date the bandwidth allotment is active. + EffectiveDate *Time `json:"effectiveDate,omitempty" xmlrpc:"effectiveDate,omitempty"` + + // From this date the bandwidth allotment is no longer active. + EndEffectiveDate *Time `json:"endEffectiveDate,omitempty" xmlrpc:"endEffectiveDate,omitempty"` + + // Internal ID associated with this allotment detail. + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // Service Provider Id for an allotment + ServiceProviderId *int `json:"serviceProviderId,omitempty" xmlrpc:"serviceProviderId,omitempty"` +} + +// The SoftLayer_Network_Bandwidth_Version1_Allotment_Type contains a description of the associated SoftLayer_Network_Bandwidth_Version1_Allotment object. +type Network_Bandwidth_Version1_Allotment_Type struct { + Entity + + // no documentation yet + Description *string `json:"description,omitempty" xmlrpc:"description,omitempty"` + + // no documentation yet + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // no documentation yet + KeyName *string `json:"keyName,omitempty" xmlrpc:"keyName,omitempty"` + + // no documentation yet + ShortDescription *string `json:"shortDescription,omitempty" xmlrpc:"shortDescription,omitempty"` +} + +// The SoftLayer_Network_Bandwidth_Version1_Host type contains general information used to the route a server to its pod. +type Network_Bandwidth_Version1_Host struct { + Entity + + // Pod ID for this host device. + PodId *int `json:"podId,omitempty" xmlrpc:"podId,omitempty"` +} + +// All bandwidth tracking is maintained through the switch that the bandwidth is used through. All bandwidth is stored in a "pod" repository. An interface links the hardware switch with the pod repository identification number. This is only relevant to bandwidth data. It is not common to use this. +type Network_Bandwidth_Version1_Interface struct { + Entity + + // The host for an interface. This is not to be confused with a SoftLayer hardware + Host *Network_Bandwidth_Version1_Host `json:"host,omitempty" xmlrpc:"host,omitempty"` + + // A interface's host. The host stores the pod number for the bandwidth data. + HostId *int `json:"hostId,omitempty" xmlrpc:"hostId,omitempty"` + + // The switch for an interface. + NetworkComponent *Network_Component `json:"networkComponent,omitempty" xmlrpc:"networkComponent,omitempty"` + + // The network component for this interface. + NetworkComponentId *int `json:"networkComponentId,omitempty" xmlrpc:"networkComponentId,omitempty"` +} + +// The SoftLayer_Network_Bandwidth_Version1_Usage data type contains general information relating to a single bandwidth usage record. +type Network_Bandwidth_Version1_Usage struct { + Entity + + // Bandwidth allotment detail for this hardware. + BandwidthAllotmentDetail *Network_Bandwidth_Version1_Allotment_Detail `json:"bandwidthAllotmentDetail,omitempty" xmlrpc:"bandwidthAllotmentDetail,omitempty"` + + // Bandwidth usage details for this hardware. + BandwidthUsageDetail []Network_Bandwidth_Version1_Usage_Detail `json:"bandwidthUsageDetail,omitempty" xmlrpc:"bandwidthUsageDetail,omitempty"` + + // A count of bandwidth usage details for this hardware. + BandwidthUsageDetailCount *uint `json:"bandwidthUsageDetailCount,omitempty" xmlrpc:"bandwidthUsageDetailCount,omitempty"` +} + +// The SoftLayer_Network_Bandwidth_Version1_Usage_Detail data type contains specific information relating to bandwidth utilization at a specific point in time on a given network interface. +type Network_Bandwidth_Version1_Usage_Detail struct { + Entity + + // Incoming bandwidth utilization . + AmountIn *Float64 `json:"amountIn,omitempty" xmlrpc:"amountIn,omitempty"` + + // Outgoing bandwidth utilization . + AmountOut *Float64 `json:"amountOut,omitempty" xmlrpc:"amountOut,omitempty"` + + // In and out bandwidth utilization for a specified time stamp. + BandwidthUsage *Network_Bandwidth_Version1_Usage `json:"bandwidthUsage,omitempty" xmlrpc:"bandwidthUsage,omitempty"` + + // Describes this bandwidth utilization record as on the public or private network interface. + BandwidthUsageDetailType *Network_Bandwidth_Version1_Usage_Detail_Type `json:"bandwidthUsageDetailType,omitempty" xmlrpc:"bandwidthUsageDetailType,omitempty"` + + // Day and time this bandwidth utilization event was recorded. + Day *Time `json:"day,omitempty" xmlrpc:"day,omitempty"` +} + +// The SoftLayer_Network_Bandwidth_Usage_Detail data type contains specific information relating to bandwidth utilization at a specific point in time on a given network interface. +type Network_Bandwidth_Version1_Usage_Detail_Total struct { + Entity + + // The account tied to this tracking object + Account *Account `json:"account,omitempty" xmlrpc:"account,omitempty"` + + // Incoming bandwidth utilization. + AmountIn *Float64 `json:"amountIn,omitempty" xmlrpc:"amountIn,omitempty"` + + // Outgoing bandwidth utilization. + AmountOut *Float64 `json:"amountOut,omitempty" xmlrpc:"amountOut,omitempty"` + + // ID of the bandwidth usage detail type for this record. + BandwidthUsageDetailTypeId *Float64 `json:"bandwidthUsageDetailTypeId,omitempty" xmlrpc:"bandwidthUsageDetailTypeId,omitempty"` + + // The tracking object this bandwidth usage record describes. + TrackingObject *Metric_Tracking_Object `json:"trackingObject,omitempty" xmlrpc:"trackingObject,omitempty"` + + // In and out bandwidth utilization for a specified time stamp. + Type *Network_Bandwidth_Version1_Usage_Detail_Type `json:"type,omitempty" xmlrpc:"type,omitempty"` +} + +// The SoftLayer_Network_Bandwidth_Version1_Usage_Detail_Type data type contains generic information relating to the types of bandwidth records available, currently just public and private. +type Network_Bandwidth_Version1_Usage_Detail_Type struct { + Entity + + // Database key associated with this bandwidth detail type. + Alias *string `json:"alias,omitempty" xmlrpc:"alias,omitempty"` +} + +// Every piece of hardware running in SoftLayer's datacenters connected to the public, private, or management networks (where applicable) have a corresponding network component. These network components are modeled by the SoftLayer_Network_Component data type. These data types reflect the servers' local ethernet and remote management interfaces. +type Network_Component struct { + Entity + + // Reboot/power (rebootDefault, rebootSoft, rebootHard, powerOn, powerOff and powerCycle) command currently executing by the server's remote management card. + ActiveCommand *Hardware_Component_RemoteManagement_Command_Request `json:"activeCommand,omitempty" xmlrpc:"activeCommand,omitempty"` + + // The network component linking this object to a child device + DownlinkComponent *Network_Component `json:"downlinkComponent,omitempty" xmlrpc:"downlinkComponent,omitempty"` + + // The duplex mode of a network component. + DuplexMode *Network_Component_Duplex_Mode `json:"duplexMode,omitempty" xmlrpc:"duplexMode,omitempty"` + + // A network component's Duplex mode. + DuplexModeId *string `json:"duplexModeId,omitempty" xmlrpc:"duplexModeId,omitempty"` + + // The hardware that a network component resides in. + Hardware *Hardware `json:"hardware,omitempty" xmlrpc:"hardware,omitempty"` + + // The internal identifier of the hardware that a network component belongs to. + HardwareId *int `json:"hardwareId,omitempty" xmlrpc:"hardwareId,omitempty"` + + // no documentation yet + HighAvailabilityFirewallFlag *bool `json:"highAvailabilityFirewallFlag,omitempty" xmlrpc:"highAvailabilityFirewallFlag,omitempty"` + + // A network component's internal identifier. + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // A hardware switch's interface to the bandwidth pod. + Interface *Network_Bandwidth_Version1_Interface `json:"interface,omitempty" xmlrpc:"interface,omitempty"` + + // A count of the records of all IP addresses bound to a network component. + IpAddressBindingCount *uint `json:"ipAddressBindingCount,omitempty" xmlrpc:"ipAddressBindingCount,omitempty"` + + // The records of all IP addresses bound to a network component. + IpAddressBindings []Network_Component_IpAddress `json:"ipAddressBindings,omitempty" xmlrpc:"ipAddressBindings,omitempty"` + + // A count of + IpAddressCount *uint `json:"ipAddressCount,omitempty" xmlrpc:"ipAddressCount,omitempty"` + + // no documentation yet + IpAddresses []Network_Subnet_IpAddress `json:"ipAddresses,omitempty" xmlrpc:"ipAddresses,omitempty"` + + // The IP address of an IPMI-based management network component. + IpmiIpAddress *string `json:"ipmiIpAddress,omitempty" xmlrpc:"ipmiIpAddress,omitempty"` + + // The MAC address of an IPMI-based management network component. + IpmiMacAddress *string `json:"ipmiMacAddress,omitempty" xmlrpc:"ipmiMacAddress,omitempty"` + + // Last reboot/power (rebootDefault, rebootSoft, rebootHard, powerOn, powerOff and powerCycle) command issued to the server's remote management card. + LastCommand *Hardware_Component_RemoteManagement_Command_Request `json:"lastCommand,omitempty" xmlrpc:"lastCommand,omitempty"` + + // A network component's unique MAC address. IPMI-based management network interfaces may not have a MAC address. + MacAddress *string `json:"macAddress,omitempty" xmlrpc:"macAddress,omitempty"` + + // A network component's maximum allowed speed, measured in Mbit per second. ''maxSpeed'' is determined by the capabilities of the network interface and the port speed purchased on your SoftLayer server. + MaxSpeed *int `json:"maxSpeed,omitempty" xmlrpc:"maxSpeed,omitempty"` + + // The metric tracking object for this network component. + MetricTrackingObject *Metric_Tracking_Object `json:"metricTrackingObject,omitempty" xmlrpc:"metricTrackingObject,omitempty"` + + // The date a network component was last modified. + ModifyDate *Time `json:"modifyDate,omitempty" xmlrpc:"modifyDate,omitempty"` + + // A network component's short name. For most servers this is the string "eth" for ethernet ports or "mgmt" for remote management ports. Use this in conjunction with the ''port'' property to identify a network component. For instance, the "eth0" interface on a server has the network component name "eth" and port 0. + Name *string `json:"name,omitempty" xmlrpc:"name,omitempty"` + + // The upstream network component firewall. + NetworkComponentFirewall *Network_Component_Firewall `json:"networkComponentFirewall,omitempty" xmlrpc:"networkComponentFirewall,omitempty"` + + // A network component's associated group. + NetworkComponentGroup *Network_Component_Group `json:"networkComponentGroup,omitempty" xmlrpc:"networkComponentGroup,omitempty"` + + // All network devices in SoftLayer's network hierarchy that this device is connected to. + NetworkHardware []Hardware `json:"networkHardware,omitempty" xmlrpc:"networkHardware,omitempty"` + + // A count of all network devices in SoftLayer's network hierarchy that this device is connected to. + NetworkHardwareCount *uint `json:"networkHardwareCount,omitempty" xmlrpc:"networkHardwareCount,omitempty"` + + // The VLAN that a network component's subnet is associated with. + NetworkVlan *Network_Vlan `json:"networkVlan,omitempty" xmlrpc:"networkVlan,omitempty"` + + // The unique internal id of the network VLAN that the port belongs to. + NetworkVlanId *int `json:"networkVlanId,omitempty" xmlrpc:"networkVlanId,omitempty"` + + // A count of the VLANs that are trunked to this network component. + NetworkVlanTrunkCount *uint `json:"networkVlanTrunkCount,omitempty" xmlrpc:"networkVlanTrunkCount,omitempty"` + + // The VLANs that are trunked to this network component. + NetworkVlanTrunks []Network_Component_Network_Vlan_Trunk `json:"networkVlanTrunks,omitempty" xmlrpc:"networkVlanTrunks,omitempty"` + + // A network component's port number. Most hardware has more than one network interface. The port property separates these interfaces. Use this in conjunction with the ''name'' property to identify a network component. For instance, the "eth0" interface on a server has the network component name "eth" and port 0. + Port *int `json:"port,omitempty" xmlrpc:"port,omitempty"` + + // A network component's primary IP address. IPMI-based management network interfaces may not have an IP address. + PrimaryIpAddress *string `json:"primaryIpAddress,omitempty" xmlrpc:"primaryIpAddress,omitempty"` + + // The primary IPv4 Address record for a network component. + PrimaryIpAddressRecord *Network_Subnet_IpAddress `json:"primaryIpAddressRecord,omitempty" xmlrpc:"primaryIpAddressRecord,omitempty"` + + // The subnet of the primary IP address assigned to this network component. + PrimarySubnet *Network_Subnet `json:"primarySubnet,omitempty" xmlrpc:"primarySubnet,omitempty"` + + // The primary IPv6 Address record for a network component. + PrimaryVersion6IpAddressRecord *Network_Subnet_IpAddress `json:"primaryVersion6IpAddressRecord,omitempty" xmlrpc:"primaryVersion6IpAddressRecord,omitempty"` + + // A count of the last five reboot/power (rebootDefault, rebootSoft, rebootHard, powerOn, powerOff and powerCycle) commands issued to the server's remote management card. + RecentCommandCount *uint `json:"recentCommandCount,omitempty" xmlrpc:"recentCommandCount,omitempty"` + + // The last five reboot/power (rebootDefault, rebootSoft, rebootHard, powerOn, powerOff and powerCycle) commands issued to the server's remote management card. + RecentCommands []Hardware_Component_RemoteManagement_Command_Request `json:"recentCommands,omitempty" xmlrpc:"recentCommands,omitempty"` + + // Indicates whether the network component is participating in a group of two or more components capable of being operationally redundant, if enabled. + RedundancyCapableFlag *bool `json:"redundancyCapableFlag,omitempty" xmlrpc:"redundancyCapableFlag,omitempty"` + + // Indicates whether the network component is participating in a group of two or more components which is actively providing link redundancy. + RedundancyEnabledFlag *bool `json:"redundancyEnabledFlag,omitempty" xmlrpc:"redundancyEnabledFlag,omitempty"` + + // A count of user(s) credentials to issue commands and/or interact with the server's remote management card. + RemoteManagementUserCount *uint `json:"remoteManagementUserCount,omitempty" xmlrpc:"remoteManagementUserCount,omitempty"` + + // User(s) credentials to issue commands and/or interact with the server's remote management card. + RemoteManagementUsers []Hardware_Component_RemoteManagement_User `json:"remoteManagementUsers,omitempty" xmlrpc:"remoteManagementUsers,omitempty"` + + // A network component's routers. + Router *Hardware `json:"router,omitempty" xmlrpc:"router,omitempty"` + + // A network component's speed, measured in Mbit per second. + Speed *int `json:"speed,omitempty" xmlrpc:"speed,omitempty"` + + // A network component's status. This can take one of four possible values: "ACTIVE", "DISABLE", "USER_OFF", or "MACWAIT". "ACTIVE" network components are enabled and in use on a servers. "DISABLE" status components have been administratively disabled by SoftLayer accounting or abuse. "USER_OFF" components have been administratively disabled by you, the user. "MACWAIT" components only exist on network components that have not been provisioned. You should never see a network interface in MACWAIT state. If you happen to see one please contact SoftLayer support. + Status *string `json:"status,omitempty" xmlrpc:"status,omitempty"` + + // Whether a network component's primary ip address is from a storage network subnet or not. + StorageNetworkFlag *bool `json:"storageNetworkFlag,omitempty" xmlrpc:"storageNetworkFlag,omitempty"` + + // A count of a network component's subnets. A subnet is a group of IP addresses + SubnetCount *uint `json:"subnetCount,omitempty" xmlrpc:"subnetCount,omitempty"` + + // A network component's subnets. A subnet is a group of IP addresses + Subnets []Network_Subnet `json:"subnets,omitempty" xmlrpc:"subnets,omitempty"` + + // The network component linking this object to parent + UplinkComponent *Network_Component `json:"uplinkComponent,omitempty" xmlrpc:"uplinkComponent,omitempty"` + + // The duplex mode of the uplink network component linking to this object + UplinkDuplexMode *Network_Component_Duplex_Mode `json:"uplinkDuplexMode,omitempty" xmlrpc:"uplinkDuplexMode,omitempty"` +} + +// Duplex Mode allows finer grained control over networking options and settings. +type Network_Component_Duplex_Mode struct { + Entity + + // no documentation yet + Description *string `json:"description,omitempty" xmlrpc:"description,omitempty"` + + // no documentation yet + Keyname *string `json:"keyname,omitempty" xmlrpc:"keyname,omitempty"` + + // no documentation yet + Name *string `json:"name,omitempty" xmlrpc:"name,omitempty"` +} + +// The SoftLayer_Network_Component_Firewall data type contains general information relating to a single SoftLayer network component firewall. This is the object which ties the running rules to a specific downstream server. Use the [[SoftLayer Network Firewall Template]] service to pull SoftLayer recommended rule set templates. Use the [[SoftLayer Network Firewall Update Request]] service to submit a firewall update request. +type Network_Component_Firewall struct { + Entity + + // A count of the additional subnets linked to this network component firewall, that inherit rules from the host that the context slot is attached to. + ApplyServerRuleSubnetCount *uint `json:"applyServerRuleSubnetCount,omitempty" xmlrpc:"applyServerRuleSubnetCount,omitempty"` + + // The additional subnets linked to this network component firewall, that inherit rules from the host that the context slot is attached to. + ApplyServerRuleSubnets []Network_Subnet `json:"applyServerRuleSubnets,omitempty" xmlrpc:"applyServerRuleSubnets,omitempty"` + + // The billing item for a Hardware Firewall (Dedicated). + BillingItem *Billing_Item `json:"billingItem,omitempty" xmlrpc:"billingItem,omitempty"` + + // The network component of the guest virtual server that this network component firewall belongs to. + GuestNetworkComponent *Virtual_Guest_Network_Component `json:"guestNetworkComponent,omitempty" xmlrpc:"guestNetworkComponent,omitempty"` + + // Unique ID for the network component of the switch interface that this network component firewall is attached to. + GuestNetworkComponentId *int `json:"guestNetworkComponentId,omitempty" xmlrpc:"guestNetworkComponentId,omitempty"` + + // Unique ID for the network component firewall. + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // The network component of the switch interface that this network component firewall belongs to. + NetworkComponent *Network_Component `json:"networkComponent,omitempty" xmlrpc:"networkComponent,omitempty"` + + // Unique ID for the network component of the switch interface that this network component firewall is attached to. + NetworkComponentId *int `json:"networkComponentId,omitempty" xmlrpc:"networkComponentId,omitempty"` + + // The update requests made for this firewall. + NetworkFirewallUpdateRequest []Network_Firewall_Update_Request `json:"networkFirewallUpdateRequest,omitempty" xmlrpc:"networkFirewallUpdateRequest,omitempty"` + + // A count of the update requests made for this firewall. + NetworkFirewallUpdateRequestCount *uint `json:"networkFirewallUpdateRequestCount,omitempty" xmlrpc:"networkFirewallUpdateRequestCount,omitempty"` + + // A count of the currently running rule set of this network component firewall. + RuleCount *uint `json:"ruleCount,omitempty" xmlrpc:"ruleCount,omitempty"` + + // The currently running rule set of this network component firewall. + Rules []Network_Component_Firewall_Rule `json:"rules,omitempty" xmlrpc:"rules,omitempty"` + + // Current status of the network component firewall. + Status *string `json:"status,omitempty" xmlrpc:"status,omitempty"` + + // A count of the additional subnets linked to this network component firewall. + SubnetCount *uint `json:"subnetCount,omitempty" xmlrpc:"subnetCount,omitempty"` + + // The additional subnets linked to this network component firewall. + Subnets []Network_Subnet `json:"subnets,omitempty" xmlrpc:"subnets,omitempty"` +} + +// A SoftLayer_Network_Component_Firewall_Rule object type represents a currently running firewall rule and contains relative information. Use the [[SoftLayer Network Firewall Update Request]] service to submit a firewall update request. Use the [[SoftLayer Network Firewall Template]] service to pull SoftLayer recommended rule set templates. +type Network_Component_Firewall_Rule struct { + Entity + + // The action that the rule is to take [permit or deny]. + Action *string `json:"action,omitempty" xmlrpc:"action,omitempty"` + + // The destination IP address considered for determining rule application. + DestinationIpAddress *string `json:"destinationIpAddress,omitempty" xmlrpc:"destinationIpAddress,omitempty"` + + // The CIDR is used for determining rule application. This value will + DestinationIpCidr *int `json:"destinationIpCidr,omitempty" xmlrpc:"destinationIpCidr,omitempty"` + + // The destination IP subnet mask considered for determining rule application. + DestinationIpSubnetMask *string `json:"destinationIpSubnetMask,omitempty" xmlrpc:"destinationIpSubnetMask,omitempty"` + + // The ending (upper end of range) destination port considered for determining rule application. + DestinationPortRangeEnd *int `json:"destinationPortRangeEnd,omitempty" xmlrpc:"destinationPortRangeEnd,omitempty"` + + // The starting (lower end of range) destination port considered for determining rule application. + DestinationPortRangeStart *int `json:"destinationPortRangeStart,omitempty" xmlrpc:"destinationPortRangeStart,omitempty"` + + // The rule's internal identifier. + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // The network component firewall that this rule belongs to. + NetworkComponentFirewall *Network_Component_Firewall `json:"networkComponentFirewall,omitempty" xmlrpc:"networkComponentFirewall,omitempty"` + + // The notes field for the rule. + Notes *string `json:"notes,omitempty" xmlrpc:"notes,omitempty"` + + // The numeric value describing the order in which the rule should be applied. + OrderValue *int `json:"orderValue,omitempty" xmlrpc:"orderValue,omitempty"` + + // The protocol considered for determining rule application. + Protocol *string `json:"protocol,omitempty" xmlrpc:"protocol,omitempty"` + + // The source IP address considered for determining rule application. + SourceIpAddress *string `json:"sourceIpAddress,omitempty" xmlrpc:"sourceIpAddress,omitempty"` + + // The CIDR is used for determining rule application. This value will + SourceIpCidr *int `json:"sourceIpCidr,omitempty" xmlrpc:"sourceIpCidr,omitempty"` + + // The source IP subnet mask considered for determining rule application. + SourceIpSubnetMask *string `json:"sourceIpSubnetMask,omitempty" xmlrpc:"sourceIpSubnetMask,omitempty"` + + // Current status of the network component firewall. + Status *string `json:"status,omitempty" xmlrpc:"status,omitempty"` + + // Whether this rule is an IPv4 rule or an IPv6 rule. If + Version *int `json:"version,omitempty" xmlrpc:"version,omitempty"` +} + +// A SoftLayer_Network_Component_Firewall_Subnets object type represents the current linked subnets and contains relative information. Use the [[SoftLayer Network Firewall Update Request]] service to submit a firewall update request. Use the [[SoftLayer Network Firewall Template]] service to pull SoftLayer recommended rule set templates. +type Network_Component_Firewall_Subnets struct { + Entity + + // A boolean flag that indicates whether the subnet should receive all the rules intended for the host on this context slot. + ApplyServerRulesFlag *bool `json:"applyServerRulesFlag,omitempty" xmlrpc:"applyServerRulesFlag,omitempty"` + + // The network component firewall that write rules for this subnet. + NetworkComponentFirewall *Network_Component_Firewall `json:"networkComponentFirewall,omitempty" xmlrpc:"networkComponentFirewall,omitempty"` + + // The subnet that this link binds to the network component firewall. + Subnet *Network_Subnet `json:"subnet,omitempty" xmlrpc:"subnet,omitempty"` + + // The unique identifier of the subnet being linked to the network component firewall. + SubnetId *int `json:"subnetId,omitempty" xmlrpc:"subnetId,omitempty"` +} + +// no documentation yet +type Network_Component_Group struct { + Entity + + // no documentation yet + GroupTypeId *int `json:"groupTypeId,omitempty" xmlrpc:"groupTypeId,omitempty"` + + // A count of a network component group's associated network components. + NetworkComponentCount *uint `json:"networkComponentCount,omitempty" xmlrpc:"networkComponentCount,omitempty"` + + // A network component group's associated network components. + NetworkComponents []Network_Component `json:"networkComponents,omitempty" xmlrpc:"networkComponents,omitempty"` +} + +// The SoftLayer_Network_Component_IpAddress data type contains general information relating to the binding of a single network component to a single SoftLayer IP address. +type Network_Component_IpAddress struct { + Entity + + // The IP address associated with this object's network component. + IpAddress *Network_Subnet_IpAddress `json:"ipAddress,omitempty" xmlrpc:"ipAddress,omitempty"` + + // The network component associated with this object's IP address. + NetworkComponent *Network_Component `json:"networkComponent,omitempty" xmlrpc:"networkComponent,omitempty"` +} + +// Represents the association between a Network_Component and Network_Vlan in the manner of a 'trunk'. Trunking a VLAN to a port allows that ports to receive and send packets tagged with the corresponding VLAN number. +type Network_Component_Network_Vlan_Trunk struct { + Entity + + // The network component that the VLAN is being trunked to. + NetworkComponent *Network_Component `json:"networkComponent,omitempty" xmlrpc:"networkComponent,omitempty"` + + // The network component's identifier. + NetworkComponentId *int `json:"networkComponentId,omitempty" xmlrpc:"networkComponentId,omitempty"` + + // The VLAN that is being trunked to the network component. + NetworkVlan *Network_Vlan `json:"networkVlan,omitempty" xmlrpc:"networkVlan,omitempty"` + + // The identifier of the network VLAN that is a trunk on the network component. + NetworkVlanId *int `json:"networkVlanId,omitempty" xmlrpc:"networkVlanId,omitempty"` +} + +// The SoftLayer_Network_Component_RemoteManagement data type contains general information relating to a single SoftLayer remote management network component. +type Network_Component_RemoteManagement struct { + Network_Component +} + +// The SoftLayer_Network_Component_Uplink_Hardware data type abstracts information related to network connections between SoftLayer hardware and SoftLayer network components. +// +// It is populated via triggers on the network_connection table (SoftLayer_Network_Connection), so you shouldn't have to delete or insert records into this table, ever. +// +// +type Network_Component_Uplink_Hardware struct { + Entity + + // A network component uplink's connected [[SoftLayer_Hardware|Hardware]]. + Hardware *Hardware `json:"hardware,omitempty" xmlrpc:"hardware,omitempty"` + + // The [[SoftLayer_Network_Component|Network Component]] that a uplink connection belongs to.. + NetworkComponent *Network_Component `json:"networkComponent,omitempty" xmlrpc:"networkComponent,omitempty"` +} + +// The SoftLayer_Network_ContentDelivery_Account data type models an individual CDN account. CDN accounts contain references to the SoftLayer customer account they belong to, login credentials for upload services, and a CDN account's status. Please contact SoftLayer sales to purchase or cancel a CDN account +type Network_ContentDelivery_Account struct { + Entity + + // The customer account that a CDN account belongs to. + Account *Account `json:"account,omitempty" xmlrpc:"account,omitempty"` + + // The internal identifier of the customer account that a CDN account belongs to. + AccountId *int `json:"accountId,omitempty" xmlrpc:"accountId,omitempty"` + + // The CDN account id that this CDN account is associated with. + AssociatedCdnAccountId *string `json:"associatedCdnAccountId,omitempty" xmlrpc:"associatedCdnAccountId,omitempty"` + + // A count of the IP addresses that are used for the content authentication service. + AuthenticationIpAddressCount *uint `json:"authenticationIpAddressCount,omitempty" xmlrpc:"authenticationIpAddressCount,omitempty"` + + // The IP addresses that are used for the content authentication service. + AuthenticationIpAddresses []Network_ContentDelivery_Authentication_Address `json:"authenticationIpAddresses,omitempty" xmlrpc:"authenticationIpAddresses,omitempty"` + + // The current billing item for a CDN account. + BillingItem *Billing_Item `json:"billingItem,omitempty" xmlrpc:"billingItem,omitempty"` + + // The name of a CDN account. + CdnAccountName *string `json:"cdnAccountName,omitempty" xmlrpc:"cdnAccountName,omitempty"` + + // A brief note on a CDN account. + CdnAccountNote *string `json:"cdnAccountNote,omitempty" xmlrpc:"cdnAccountNote,omitempty"` + + // The solution type of a CDN account. + CdnSolutionName *string `json:"cdnSolutionName,omitempty" xmlrpc:"cdnSolutionName,omitempty"` + + // The date that a CDN account was created. + CreateDate *Time `json:"createDate,omitempty" xmlrpc:"createDate,omitempty"` + + // Indicates if CDN account is dependent on other service. If set, this CDN account is limited to these services: createOriginPullMapping, deleteOriginPullRule, getOriginPullMappingInformation, getCdnUrls, purgeCache, loadContent, manageHttpCompression + DependantServiceFlag *bool `json:"dependantServiceFlag,omitempty" xmlrpc:"dependantServiceFlag,omitempty"` + + // A CDN account's internal identifier. + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // Indicates if it is a legacy CDN or not + LegacyCdnFlag *bool `json:"legacyCdnFlag,omitempty" xmlrpc:"legacyCdnFlag,omitempty"` + + // Indicates if CDN logging is enabled. + LogEnabledFlag *string `json:"logEnabledFlag,omitempty" xmlrpc:"logEnabledFlag,omitempty"` + + // Indicates if customer is allowed to access the CDN provider's management portal. + ProviderPortalAccessFlag *bool `json:"providerPortalAccessFlag,omitempty" xmlrpc:"providerPortalAccessFlag,omitempty"` + + // A CDN account's status presented in a more detailed data type. + Status *Network_ContentDelivery_Account_Status `json:"status,omitempty" xmlrpc:"status,omitempty"` + + // The internal identifier of a CDN status + StatusId *int `json:"statusId,omitempty" xmlrpc:"statusId,omitempty"` + + // Indicates if the token authentication service is enabled or not. + TokenAuthenticationEnabledFlag *bool `json:"tokenAuthenticationEnabledFlag,omitempty" xmlrpc:"tokenAuthenticationEnabledFlag,omitempty"` +} + +// The SoftLayer_Network_ContentDelivery_Account_Status contains information on a CDN account. +type Network_ContentDelivery_Account_Status struct { + Entity + + // A longer description of a CDN account's status. + Description *string `json:"description,omitempty" xmlrpc:"description,omitempty"` + + // A CDN account status' internal identifier. + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // A CDN account status' name. + Name *string `json:"name,omitempty" xmlrpc:"name,omitempty"` +} + +// The SoftLayer_Network_ContentDelivery_Authentication_Address data type models an individual IP address that CDN allow or deny access from. +type Network_ContentDelivery_Authentication_Address struct { + Entity + + // The type of access on an IP address. It can be "ALLOW" or "DENY" + AccessType *string `json:"accessType,omitempty" xmlrpc:"accessType,omitempty"` + + // The internal identifier of the CDN account + CdnAccountId *int `json:"cdnAccountId,omitempty" xmlrpc:"cdnAccountId,omitempty"` + + // The created date + CreateDate *Time `json:"createDate,omitempty" xmlrpc:"createDate,omitempty"` + + // The internal identifier of an authentication IP address + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // The IP address that you want to block or allow access to + IpAddress *string `json:"ipAddress,omitempty" xmlrpc:"ipAddress,omitempty"` + + // The last modified date + ModifyDate *Time `json:"modifyDate,omitempty" xmlrpc:"modifyDate,omitempty"` + + // The name of an authentication IP. This helps you to keep track of IP addresses. + Name *string `json:"name,omitempty" xmlrpc:"name,omitempty"` + + // The priority of an authentication IP address. The smaller number, the higher in priority. Higher priority IP will be matched first. + Priority *int `json:"priority,omitempty" xmlrpc:"priority,omitempty"` + + // The internal identifier of the user who created an authentication IP record + UserId *int `json:"userId,omitempty" xmlrpc:"userId,omitempty"` +} + +// The SoftLayer_Network_ContentDelivery_Authentication_Address data type models an individual IP address that CDN allow or deny access from. +type Network_ContentDelivery_Authentication_Token struct { + Entity + + // The internal identifier of a CDN account + CdnAccountId *int `json:"cdnAccountId,omitempty" xmlrpc:"cdnAccountId,omitempty"` + + // The client IP address. This is optional. + ClientIp *string `json:"clientIp,omitempty" xmlrpc:"clientIp,omitempty"` + + // The created date + CreateDate *Time `json:"createDate,omitempty" xmlrpc:"createDate,omitempty"` + + // The customer id. You can use this optional value to tie a user id to an authentication token. + Name *string `json:"name,omitempty" xmlrpc:"name,omitempty"` + + // The referrer information. This is optional. + Referrer *string `json:"referrer,omitempty" xmlrpc:"referrer,omitempty"` + + // The managed token string + Token *string `json:"token,omitempty" xmlrpc:"token,omitempty"` +} + +// The SoftLayer_Network_Customer_Subnet data type contains general information relating to a single customer subnet (remote). +type Network_Customer_Subnet struct { + Entity + + // The account id a customer subnet belongs to. + AccountId *int `json:"accountId,omitempty" xmlrpc:"accountId,omitempty"` + + // A subnet's Classless Inter-Domain Routing prefix. This is a number between 0 and 32 signifying the number of bits in a subnet's netmask. These bits separate a subnet's network address from it's host addresses. It performs the same function as the ''netmask'' property, but is represented as an integer. + Cidr *int `json:"cidr,omitempty" xmlrpc:"cidr,omitempty"` + + // A customer subnet's unique identifier. + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // A count of all ip addresses associated with a subnet. + IpAddressCount *uint `json:"ipAddressCount,omitempty" xmlrpc:"ipAddressCount,omitempty"` + + // All ip addresses associated with a subnet. + IpAddresses []Network_Customer_Subnet_IpAddress `json:"ipAddresses,omitempty" xmlrpc:"ipAddresses,omitempty"` + + // A bitmask in dotted-quad format that is used to separate a subnet's network address from it's host addresses. This performs the same function as the ''cidr'' property, but is expressed in a string format. + Netmask *string `json:"netmask,omitempty" xmlrpc:"netmask,omitempty"` + + // A subnet's network identifier. This is the first IP address of a subnet. + NetworkIdentifier *string `json:"networkIdentifier,omitempty" xmlrpc:"networkIdentifier,omitempty"` + + // The total number of ip addresses in a subnet. + TotalIpAddresses *int `json:"totalIpAddresses,omitempty" xmlrpc:"totalIpAddresses,omitempty"` +} + +// The SoftLayer_Network_Customer_Subnet_IpAddress data type contains general information relating to a single Customer Subnet (Remote) IPv4 address. +type Network_Customer_Subnet_IpAddress struct { + Entity + + // Unique identifier for an ip address. + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // An IP address expressed in dotted quad format. + IpAddress *string `json:"ipAddress,omitempty" xmlrpc:"ipAddress,omitempty"` + + // An IP address' user defined note. + Notes *string `json:"notes,omitempty" xmlrpc:"notes,omitempty"` + + // The customer subnet (remote) that the ip address belongs to. + Subnet *Network_Customer_Subnet `json:"subnet,omitempty" xmlrpc:"subnet,omitempty"` + + // The unique identifier for the customer subnet (remote) the ip address belongs to. + SubnetId *int `json:"subnetId,omitempty" xmlrpc:"subnetId,omitempty"` + + // A count of all the address translations that are tied to an IP address. + TranslationCount *uint `json:"translationCount,omitempty" xmlrpc:"translationCount,omitempty"` + + // All the address translations that are tied to an IP address. + Translations []Network_Tunnel_Module_Context_Address_Translation `json:"translations,omitempty" xmlrpc:"translations,omitempty"` +} + +// The SoftLayer_Network_Firewall_AccessControlList data type contains general information relating to a single SoftLayer firewall access to controll list. This is the object which ties the running rules to a specific context. Use the [[SoftLayer Network Firewall Template]] service to pull SoftLayer recommended rule set templates. Use the [[SoftLayer Network Firewall Update Request]] service to submit a firewall update request. +type Network_Firewall_AccessControlList struct { + Entity + + // no documentation yet + Direction *string `json:"direction,omitempty" xmlrpc:"direction,omitempty"` + + // no documentation yet + FirewallContextInterfaceId *int `json:"firewallContextInterfaceId,omitempty" xmlrpc:"firewallContextInterfaceId,omitempty"` + + // no documentation yet + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // A count of the update requests made for this firewall. + NetworkFirewallUpdateRequestCount *uint `json:"networkFirewallUpdateRequestCount,omitempty" xmlrpc:"networkFirewallUpdateRequestCount,omitempty"` + + // The update requests made for this firewall. + NetworkFirewallUpdateRequests []Network_Firewall_Update_Request `json:"networkFirewallUpdateRequests,omitempty" xmlrpc:"networkFirewallUpdateRequests,omitempty"` + + // no documentation yet + NetworkVlan *Network_Vlan `json:"networkVlan,omitempty" xmlrpc:"networkVlan,omitempty"` + + // A count of the currently running rule set of this context access control list firewall. + RuleCount *uint `json:"ruleCount,omitempty" xmlrpc:"ruleCount,omitempty"` + + // The currently running rule set of this context access control list firewall. + Rules []Network_Vlan_Firewall_Rule `json:"rules,omitempty" xmlrpc:"rules,omitempty"` +} + +// The SoftLayer_Network_Firewall_Interface data type contains general information relating to a single SoftLayer firewall interface. This is the object which ties the firewall context access control list to a firewall. Use the [[SoftLayer Network Firewall Template]] service to pull SoftLayer recommended rule set templates. Use the [[SoftLayer Network Firewall Update Request]] service to submit a firewall update request. +type Network_Firewall_Interface struct { + Network_Firewall_Module_Context_Interface +} + +// no documentation yet +type Network_Firewall_Module_Context_Interface struct { + Entity + + // A count of + FirewallContextAccessControlListCount *uint `json:"firewallContextAccessControlListCount,omitempty" xmlrpc:"firewallContextAccessControlListCount,omitempty"` + + // no documentation yet + FirewallContextAccessControlLists []Network_Firewall_AccessControlList `json:"firewallContextAccessControlLists,omitempty" xmlrpc:"firewallContextAccessControlLists,omitempty"` + + // no documentation yet + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // no documentation yet + Name *string `json:"name,omitempty" xmlrpc:"name,omitempty"` + + // no documentation yet + NetworkVlan *Network_Vlan `json:"networkVlan,omitempty" xmlrpc:"networkVlan,omitempty"` +} + +// The SoftLayer_Network_Firewall_Template type contains general information for a SoftLayer network firewall template. +// +// Firewall templates are recommend rule sets for use with SoftLayer Hardware Firewall (Dedicated). These optimized templates are designed to balance security restriction with application availability. The templates given may be altered to provide custom network security, or may be used as-is for basic security. At least one rule set MUST be applied for the firewall to block traffic. Use the [[SoftLayer Network Component Firewall]] service to view current rules. Use the [[SoftLayer Network Firewall Update Request]] service to submit a firewall update request. +type Network_Firewall_Template struct { + Entity + + // A Firewall template's internal identifier. + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // The name of the firewall rules template. + Name *string `json:"name,omitempty" xmlrpc:"name,omitempty"` + + // A count of the rule set that belongs to this firewall rules template. + RuleCount *uint `json:"ruleCount,omitempty" xmlrpc:"ruleCount,omitempty"` + + // The rule set that belongs to this firewall rules template. + Rules []Network_Firewall_Template_Rule `json:"rules,omitempty" xmlrpc:"rules,omitempty"` +} + +// The SoftLayer_Network_Component_Firewall_Rule type contains general information relating to a single SoftLayer firewall template rule. Use the [[SoftLayer Network Component Firewall]] service to view current rules. Use the [[SoftLayer Network Firewall Update Request]] service to submit a firewall update request. +type Network_Firewall_Template_Rule struct { + Entity + + // The action that this template rule is to take [permit or deny]. + Action *string `json:"action,omitempty" xmlrpc:"action,omitempty"` + + // The destination IP address considered for determining rule application. + DestinationIpAddress *string `json:"destinationIpAddress,omitempty" xmlrpc:"destinationIpAddress,omitempty"` + + // The destination IP subnet mask considered for determining rule application. + DestinationIpSubnetMask *string `json:"destinationIpSubnetMask,omitempty" xmlrpc:"destinationIpSubnetMask,omitempty"` + + // The ending (upper end of range) destination port considered for determining rule application. + DestinationPortRangeEnd *int `json:"destinationPortRangeEnd,omitempty" xmlrpc:"destinationPortRangeEnd,omitempty"` + + // The starting (lower end of range) destination port considered for determining rule application. + DestinationPortRangeStart *int `json:"destinationPortRangeStart,omitempty" xmlrpc:"destinationPortRangeStart,omitempty"` + + // The firewall template that this rule is attached to. + FirewallTemplate *Network_Firewall_Template `json:"firewallTemplate,omitempty" xmlrpc:"firewallTemplate,omitempty"` + + // The unique identifier of the firewall template that a firewall template rule is associated with. + FirewallTemplateId *int `json:"firewallTemplateId,omitempty" xmlrpc:"firewallTemplateId,omitempty"` + + // A Firewall template rule's internal identifier. + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // The notes field for the firewall template rule. + Notes *string `json:"notes,omitempty" xmlrpc:"notes,omitempty"` + + // The numeric value describing the order in which the rule set should be applied. + OrderValue *int `json:"orderValue,omitempty" xmlrpc:"orderValue,omitempty"` + + // The protocol considered for determining rule application. + Protocol *string `json:"protocol,omitempty" xmlrpc:"protocol,omitempty"` + + // The source IP address considered for determining rule application. + SourceIpAddress *string `json:"sourceIpAddress,omitempty" xmlrpc:"sourceIpAddress,omitempty"` + + // The source IP subnet mask considered for determining rule application. + SourceIpSubnetMask *string `json:"sourceIpSubnetMask,omitempty" xmlrpc:"sourceIpSubnetMask,omitempty"` +} + +// The SoftLayer_Network_Firewall_Update_Request data type contains information relating to a SoftLayer network firewall update request. Use the [[SoftLayer Network Component Firewall]] service to view current rules. Use the [[SoftLayer Network Firewall Template]] service to pull SoftLayer recommended rule set templates. +type Network_Firewall_Update_Request struct { + Entity + + // Timestamp of when the rules from the update request were applied to the firewall. + ApplyDate *Time `json:"applyDate,omitempty" xmlrpc:"applyDate,omitempty"` + + // The user that authorized this firewall update request. + AuthorizingUser *User_Interface `json:"authorizingUser,omitempty" xmlrpc:"authorizingUser,omitempty"` + + // The unique identifier of the user that authorized the update request. + AuthorizingUserId *int `json:"authorizingUserId,omitempty" xmlrpc:"authorizingUserId,omitempty"` + + // The type of user that authorized the update request [EMP or USR]. + AuthorizingUserType *string `json:"authorizingUserType,omitempty" xmlrpc:"authorizingUserType,omitempty"` + + // Flag indicating whether the request is for a rule bypass configuration [0 or 1]. + BypassFlag *bool `json:"bypassFlag,omitempty" xmlrpc:"bypassFlag,omitempty"` + + // Timestamp of the creation of the record. + CreateDate *Time `json:"createDate,omitempty" xmlrpc:"createDate,omitempty"` + + // The unique identifier of the firewall access control list that the rule set is destined for. + FirewallContextAccessControlListId *int `json:"firewallContextAccessControlListId,omitempty" xmlrpc:"firewallContextAccessControlListId,omitempty"` + + // The downstream virtual server that the rule set will be applied to. + Guest *Virtual_Guest `json:"guest,omitempty" xmlrpc:"guest,omitempty"` + + // The downstream server that the rule set will be applied to. + Hardware *Hardware `json:"hardware,omitempty" xmlrpc:"hardware,omitempty"` + + // The unique identifier of the server that the rule set is destined to protect. + HardwareId *int `json:"hardwareId,omitempty" xmlrpc:"hardwareId,omitempty"` + + // The unique identifier of the firewall update request. + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // The network component firewall that the rule set will be applied to. + NetworkComponentFirewall *Network_Component_Firewall `json:"networkComponentFirewall,omitempty" xmlrpc:"networkComponentFirewall,omitempty"` + + // The unique identifier of the network component firewall that the rule set is destined for. + NetworkComponentFirewallId *int `json:"networkComponentFirewallId,omitempty" xmlrpc:"networkComponentFirewallId,omitempty"` + + // A count of the group of rules contained within the update request. + RuleCount *uint `json:"ruleCount,omitempty" xmlrpc:"ruleCount,omitempty"` + + // The group of rules contained within the update request. + Rules []Network_Firewall_Update_Request_Rule `json:"rules,omitempty" xmlrpc:"rules,omitempty"` +} + +// A SoftLayer_Ticket_Update_Customer is a single update made by a customer to a ticket. +type Network_Firewall_Update_Request_Customer struct { + Network_Firewall_Update_Request +} + +// The SoftLayer_Network_Firewall_Update_Request_Employee data type returns a user object for the SoftLayer employee that created the request. +type Network_Firewall_Update_Request_Employee struct { + Network_Firewall_Update_Request +} + +// The SoftLayer_Network_Firewall_Update_Request_Rule type contains information relating to a SoftLayer network firewall update request rule. This rule is a member of a [[SoftLayer Network Firewall Update Request]]. Use the [[SoftLayer Network Component Firewall]] service to view current rules. Use the [[SoftLayer Network Firewall Template]] service to pull SoftLayer recommended rule set templates. +type Network_Firewall_Update_Request_Rule struct { + Entity + + // The action that this update request rule is to take [permit or deny]. + Action *string `json:"action,omitempty" xmlrpc:"action,omitempty"` + + // The destination IP address considered for determining rule application. + DestinationIpAddress *string `json:"destinationIpAddress,omitempty" xmlrpc:"destinationIpAddress,omitempty"` + + // The CIDR is used for determining rule application. This value will + DestinationIpCidr *int `json:"destinationIpCidr,omitempty" xmlrpc:"destinationIpCidr,omitempty"` + + // The destination IP subnet mask considered for determining rule application. + DestinationIpSubnetMask *string `json:"destinationIpSubnetMask,omitempty" xmlrpc:"destinationIpSubnetMask,omitempty"` + + // The ending (upper end of range) destination port considered for determining rule application. + DestinationPortRangeEnd *int `json:"destinationPortRangeEnd,omitempty" xmlrpc:"destinationPortRangeEnd,omitempty"` + + // The starting (lower end of range) destination port considered for determining rule application. + DestinationPortRangeStart *int `json:"destinationPortRangeStart,omitempty" xmlrpc:"destinationPortRangeStart,omitempty"` + + // The update request that this rule belongs to. + FirewallUpdateRequest *Network_Firewall_Update_Request `json:"firewallUpdateRequest,omitempty" xmlrpc:"firewallUpdateRequest,omitempty"` + + // The unique identifier of the firewall update request that a firewall update request rule is associated with. + FirewallUpdateRequestId *int `json:"firewallUpdateRequestId,omitempty" xmlrpc:"firewallUpdateRequestId,omitempty"` + + // A Firewall update request rule's internal identifier. + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // The notes field for the firewall update request rule. + Notes *string `json:"notes,omitempty" xmlrpc:"notes,omitempty"` + + // The numeric value describing the order in which the rule should be applied. + OrderValue *int `json:"orderValue,omitempty" xmlrpc:"orderValue,omitempty"` + + // The protocol considered for determining rule application. + Protocol *string `json:"protocol,omitempty" xmlrpc:"protocol,omitempty"` + + // The source IP address considered for determining rule application. + SourceIpAddress *string `json:"sourceIpAddress,omitempty" xmlrpc:"sourceIpAddress,omitempty"` + + // The CIDR is used for determining rule application. This value will + SourceIpCidr *int `json:"sourceIpCidr,omitempty" xmlrpc:"sourceIpCidr,omitempty"` + + // The source IP subnet mask considered for determining rule application. + SourceIpSubnetMask *string `json:"sourceIpSubnetMask,omitempty" xmlrpc:"sourceIpSubnetMask,omitempty"` + + // Whether this rule is an IPv4 rule or an IPv6 rule. If + Version *int `json:"version,omitempty" xmlrpc:"version,omitempty"` +} + +// The SoftLayer_Network_Firewall_Update_Request_Rule_Version6 type contains information relating to a SoftLayer network firewall update request rule for IPv6. This rule is a member of a [[SoftLayer Network Firewall Update Request]]. Use the [[SoftLayer Network Component Firewall]] service to view current rules. Use the [[SoftLayer Network Firewall Template]] service to pull SoftLayer recommended rule set templates. +type Network_Firewall_Update_Request_Rule_Version6 struct { + Network_Firewall_Update_Request_Rule +} + +// no documentation yet +type Network_Gateway struct { + Entity + + // The account for this gateway. + Account *Account `json:"account,omitempty" xmlrpc:"account,omitempty"` + + // The internal identifier of the account assigned to this gateway. + AccountId *int `json:"accountId,omitempty" xmlrpc:"accountId,omitempty"` + + // The VRRP group number for this gateway. This is set internally and cannot be provided on create. + GroupNumber *int `json:"groupNumber,omitempty" xmlrpc:"groupNumber,omitempty"` + + // A gateway's internal identifier. + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // A count of all VLANs trunked to this gateway. + InsideVlanCount *uint `json:"insideVlanCount,omitempty" xmlrpc:"insideVlanCount,omitempty"` + + // All VLANs trunked to this gateway. + InsideVlans []Network_Gateway_Vlan `json:"insideVlans,omitempty" xmlrpc:"insideVlans,omitempty"` + + // A count of the members for this gateway. + MemberCount *uint `json:"memberCount,omitempty" xmlrpc:"memberCount,omitempty"` + + // The members for this gateway. + Members []Network_Gateway_Member `json:"members,omitempty" xmlrpc:"members,omitempty"` + + // A gateway's name. This is required on create and can be no more than 255 characters. + Name *string `json:"name,omitempty" xmlrpc:"name,omitempty"` + + // A gateway's network space. Currently, only 'private' or 'both' is allowed. When this value is 'private', it is a backend gateway only. Otherwise, it is a gateway for both frontend and backend traffic. + NetworkSpace *string `json:"networkSpace,omitempty" xmlrpc:"networkSpace,omitempty"` + + // The private gateway IP address. + PrivateIpAddress *Network_Subnet_IpAddress `json:"privateIpAddress,omitempty" xmlrpc:"privateIpAddress,omitempty"` + + // The internal identifier of the private IP address for this gateway. + PrivateIpAddressId *int `json:"privateIpAddressId,omitempty" xmlrpc:"privateIpAddressId,omitempty"` + + // The private VLAN for accessing this gateway. + PrivateVlan *Network_Vlan `json:"privateVlan,omitempty" xmlrpc:"privateVlan,omitempty"` + + // The internal identifier of the private VLAN for this gateway. + PrivateVlanId *int `json:"privateVlanId,omitempty" xmlrpc:"privateVlanId,omitempty"` + + // The public gateway IP address. + PublicIpAddress *Network_Subnet_IpAddress `json:"publicIpAddress,omitempty" xmlrpc:"publicIpAddress,omitempty"` + + // The internal identifier of the public IP address for this gateway. + PublicIpAddressId *int `json:"publicIpAddressId,omitempty" xmlrpc:"publicIpAddressId,omitempty"` + + // The public gateway IPv6 address. + PublicIpv6Address *Network_Subnet_IpAddress `json:"publicIpv6Address,omitempty" xmlrpc:"publicIpv6Address,omitempty"` + + // The internal identifier of the public IPv6 address for this gateway. + PublicIpv6AddressId *int `json:"publicIpv6AddressId,omitempty" xmlrpc:"publicIpv6AddressId,omitempty"` + + // The public VLAN for accessing this gateway. + PublicVlan *Network_Vlan `json:"publicVlan,omitempty" xmlrpc:"publicVlan,omitempty"` + + // The internal identifier of the public VLAN for this gateway. This is set internally and cannot be provided on create. + PublicVlanId *int `json:"publicVlanId,omitempty" xmlrpc:"publicVlanId,omitempty"` + + // The current status of the gateway. + Status *Network_Gateway_Status `json:"status,omitempty" xmlrpc:"status,omitempty"` + + // The current status of this gateway. This is always active unless there is a process running to change the gateway. This can not be set on creation. + StatusId *int `json:"statusId,omitempty" xmlrpc:"statusId,omitempty"` +} + +// no documentation yet +type Network_Gateway_Member struct { + Entity + + // The device for this member. + Hardware *Hardware `json:"hardware,omitempty" xmlrpc:"hardware,omitempty"` + + // The internal identifier of the hardware for this member. + HardwareId *int `json:"hardwareId,omitempty" xmlrpc:"hardwareId,omitempty"` + + // A gateway member's internal identifier. + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // The gateway this member belongs to. + NetworkGateway *Network_Gateway `json:"networkGateway,omitempty" xmlrpc:"networkGateway,omitempty"` + + // The internal identifier of the gateway this member belongs to. + NetworkGatewayId *int `json:"networkGatewayId,omitempty" xmlrpc:"networkGatewayId,omitempty"` + + // The priority for this gateway member. This is set internally and cannot be provided on create. + Priority *int `json:"priority,omitempty" xmlrpc:"priority,omitempty"` +} + +// no documentation yet +type Network_Gateway_Status struct { + Entity + + // A gateway status's description. + Description *string `json:"description,omitempty" xmlrpc:"description,omitempty"` + + // A gateway status's internal identifier. + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // A gateway status's programmatic name. + KeyName *string `json:"keyName,omitempty" xmlrpc:"keyName,omitempty"` + + // A gateway status's human-friendly name. + Name *string `json:"name,omitempty" xmlrpc:"name,omitempty"` +} + +// no documentation yet +type Network_Gateway_Vlan struct { + Entity + + // If true, this VLAN is bypassed. If false, it is routed through the gateway. + BypassFlag *bool `json:"bypassFlag,omitempty" xmlrpc:"bypassFlag,omitempty"` + + // A gateway VLAN's internal identifier. + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // The gateway this VLAN is attached to. + NetworkGateway *Network_Gateway `json:"networkGateway,omitempty" xmlrpc:"networkGateway,omitempty"` + + // The internal identifier of the gateway this VLAN is attached to. + NetworkGatewayId *int `json:"networkGatewayId,omitempty" xmlrpc:"networkGatewayId,omitempty"` + + // The network VLAN record. + NetworkVlan *Network_Vlan `json:"networkVlan,omitempty" xmlrpc:"networkVlan,omitempty"` + + // The internal identifier of the network VLAN. + NetworkVlanId *int `json:"networkVlanId,omitempty" xmlrpc:"networkVlanId,omitempty"` +} + +// The SoftLayer_Network_LBaaS_Listener type presents a data structure for a load balancers listener, also called frontend. +type Network_LBaaS_Listener struct { + Entity + + // Limit of connections a listener can accept + ConnectionLimit *int `json:"connectionLimit,omitempty" xmlrpc:"connectionLimit,omitempty"` + + // Specifies when the listener was created. + CreateDate *Time `json:"createDate,omitempty" xmlrpc:"createDate,omitempty"` + + // no documentation yet + DefaultPool *Network_LBaaS_Pool `json:"defaultPool,omitempty" xmlrpc:"defaultPool,omitempty"` + + // no documentation yet + LoadBalancer *Network_LBaaS_LoadBalancer `json:"loadBalancer,omitempty" xmlrpc:"loadBalancer,omitempty"` + + // Specifies when the listener was updated previously. + ModifyDate *Time `json:"modifyDate,omitempty" xmlrpc:"modifyDate,omitempty"` + + // Listeners protocol, one of "TCP", "HTTP", "HTTPS". + Protocol *string `json:"protocol,omitempty" xmlrpc:"protocol,omitempty"` + + // Listeners protocol port number. + ProtocolPort *int `json:"protocolPort,omitempty" xmlrpc:"protocolPort,omitempty"` + + // The provisioning status of listener. + ProvisioningStatus *string `json:"provisioningStatus,omitempty" xmlrpc:"provisioningStatus,omitempty"` + + // This references to SSL/TLS certificate (optional) for a listener + TlsCertificateId *int `json:"tlsCertificateId,omitempty" xmlrpc:"tlsCertificateId,omitempty"` + + // The UUID of a listener. + Uuid *string `json:"uuid,omitempty" xmlrpc:"uuid,omitempty"` +} + +// The SoftLayer_Network_LBaaS_LoadBalancer type presents a structure containing attributes of a load balancer, and its related objects including listeners, pools and members. +type Network_LBaaS_LoadBalancer struct { + Entity + + // The account this load balancer belongs to. + AccountId *int `json:"accountId,omitempty" xmlrpc:"accountId,omitempty"` + + // Specifies when a load balancer was created. + CreateDate *Time `json:"createDate,omitempty" xmlrpc:"createDate,omitempty"` + + // Datacenter, where load balancer is located. + Datacenter *Location `json:"datacenter,omitempty" xmlrpc:"datacenter,omitempty"` + + // Description of a load balancer. + Description *string `json:"description,omitempty" xmlrpc:"description,omitempty"` + + // no documentation yet + IpAddress *Network_Subnet_IpAddress `json:"ipAddress,omitempty" xmlrpc:"ipAddress,omitempty"` + + // Specifies if a load balancer is public=1 or private=0. + IsPublic *int `json:"isPublic,omitempty" xmlrpc:"isPublic,omitempty"` + + // A count of listeners assigned to load balancer. + ListenerCount *uint `json:"listenerCount,omitempty" xmlrpc:"listenerCount,omitempty"` + + // Listeners assigned to load balancer. + Listeners []Network_LBaaS_Listener `json:"listeners,omitempty" xmlrpc:"listeners,omitempty"` + + // This references to location with type datacenter + LocationId *int `json:"locationId,omitempty" xmlrpc:"locationId,omitempty"` + + // A count of members assigned to load balancer. + MemberCount *uint `json:"memberCount,omitempty" xmlrpc:"memberCount,omitempty"` + + // Members assigned to load balancer. + Members []Network_LBaaS_Member `json:"members,omitempty" xmlrpc:"members,omitempty"` + + // Specifies when a load balancer was updated last. + ModifyDate *Time `json:"modifyDate,omitempty" xmlrpc:"modifyDate,omitempty"` + + // The load balancer's name. + Name *string `json:"name,omitempty" xmlrpc:"name,omitempty"` + + // The operation status "ONLINE" or "OFFLINE" of a load balancer. + OperatingStatus *string `json:"operatingStatus,omitempty" xmlrpc:"operatingStatus,omitempty"` + + // Error message of previous API call in case of failure + PreviousErrorText *string `json:"previousErrorText,omitempty" xmlrpc:"previousErrorText,omitempty"` + + // The provisioning status of a load balancer. + ProvisioningStatus *string `json:"provisioningStatus,omitempty" xmlrpc:"provisioningStatus,omitempty"` + + // The UUID of a load balancer. + Uuid *string `json:"uuid,omitempty" xmlrpc:"uuid,omitempty"` +} + +// SoftLayer_Network_LBaaS_LoadBalancerProtocolConfiguration specifies the protocol, port, maximum number of allowed connections and session stickiness for load balancer's front- and backend. +type Network_LBaaS_LoadBalancerProtocolConfiguration struct { + Entity + + // Backends port + BackendPort *int `json:"backendPort,omitempty" xmlrpc:"backendPort,omitempty"` + + // <
  • Total number of current sessions
  • Total number of error requests
  • Total number of received packets
  • Total number of transmitted packets
  • Total number of accepted/alive connections
  • Request rate
  • Number of members down
  • NUmber of members up
  • Throughput
  • Connection rate
  • +type Network_LBaaS_LoadBalancerStatistics struct { + Entity + + // Number of members in DOWN health state + NumberOfMembersDown *int `json:"numberOfMembersDown,omitempty" xmlrpc:"numberOfMembersDown,omitempty"` + + // Number of members in UP health state + NumberOfMembersUp *int `json:"numberOfMembersUp,omitempty" xmlrpc:"numberOfMembersUp,omitempty"` + + // Number of total established connections + TotalConnections *int `json:"totalConnections,omitempty" xmlrpc:"totalConnections,omitempty"` + + // Number of total current sessions + TotalCurrentSessions *int `json:"totalCurrentSessions,omitempty" xmlrpc:"totalCurrentSessions,omitempty"` +} + +// The SoftLayer_Network_LBaaS_Member represents the backend member for a load balancer. It can be either a virtual server or a bare metal machine. +type Network_LBaaS_Member struct { + Entity + + // The IP address of a load balancer member. + Address *string `json:"address,omitempty" xmlrpc:"address,omitempty"` + + // Specifies when a load balancers + CreateDate *Time `json:"createDate,omitempty" xmlrpc:"createDate,omitempty"` + + // no documentation yet + LoadBalancer *Network_LBaaS_LoadBalancer `json:"loadBalancer,omitempty" xmlrpc:"loadBalancer,omitempty"` + + // Specifies when a load balancers + ModifyDate *Time `json:"modifyDate,omitempty" xmlrpc:"modifyDate,omitempty"` + + // The provisioning status of a load balancer member. + ProvisioningStatus *string `json:"provisioningStatus,omitempty" xmlrpc:"provisioningStatus,omitempty"` + + // The UUID of a load balancer member. + Uuid *string `json:"uuid,omitempty" xmlrpc:"uuid,omitempty"` + + // The weight of a load balancer member. + Weight *int `json:"weight,omitempty" xmlrpc:"weight,omitempty"` +} + +// SoftLayer_Network_LBaaS_MemberHealth is a collection member metrics retrieved from a LBaaS VSI instance. The available metrics are:
    • Name of the member
    • Status of the member up or down
    • Uuid of the member
    +type Network_LBaaS_MemberHealth struct { + Entity + + // Members status (UP/DOWN). + Status *string `json:"status,omitempty" xmlrpc:"status,omitempty"` + + // Members UUID. + Uuid *string `json:"uuid,omitempty" xmlrpc:"uuid,omitempty"` +} + +// The SoftLayer_Network_LBaaS_Pool type presents a structure containing attributes of a load balancer pool such as the protocol, protocol port and the load balancing algorithm used. +type Network_LBaaS_Pool struct { + Entity + + // Create date of the pool instance + CreateDate *Time `json:"createDate,omitempty" xmlrpc:"createDate,omitempty"` + + // no documentation yet + LoadBalancer *Network_LBaaS_LoadBalancer `json:"loadBalancer,omitempty" xmlrpc:"loadBalancer,omitempty"` + + // Load balancing algorithm: "ROUNDROBIN", "WEIGHTED_RR", "LEASTCONNECTION" + LoadBalancingAlgorithm *string `json:"loadBalancingAlgorithm,omitempty" xmlrpc:"loadBalancingAlgorithm,omitempty"` + + // A count of + MemberCount *uint `json:"memberCount,omitempty" xmlrpc:"memberCount,omitempty"` + + // no documentation yet + Members []Network_LBaaS_Member `json:"members,omitempty" xmlrpc:"members,omitempty"` + + // Last updated date of the pool + ModifyDate *Time `json:"modifyDate,omitempty" xmlrpc:"modifyDate,omitempty"` + + // Backends protocol, supported protocols are "TCP", "HTTP" + Protocol *string `json:"protocol,omitempty" xmlrpc:"protocol,omitempty"` + + // Backends protocol port + ProtocolPort *int `json:"protocolPort,omitempty" xmlrpc:"protocolPort,omitempty"` + + // Provisioning status of a load balancer pool. + ProvisioningStatus *string `json:"provisioningStatus,omitempty" xmlrpc:"provisioningStatus,omitempty"` + + // no documentation yet + SessionAffinity *Network_LBaaS_SessionAffinity `json:"sessionAffinity,omitempty" xmlrpc:"sessionAffinity,omitempty"` + + // Instance uuid of the pool + Uuid *string `json:"uuid,omitempty" xmlrpc:"uuid,omitempty"` +} + +// SoftLayer_Network_LBaaS_PoolMembersHealth provides statistics of members belonging to a particular pool. +type Network_LBaaS_PoolMembersHealth struct { + Entity + + // Members statistics of the pool + MembersHealth []Network_LBaaS_MemberHealth `json:"membersHealth,omitempty" xmlrpc:"membersHealth,omitempty"` + + // Instance uuid of the pool + PoolUuid *string `json:"poolUuid,omitempty" xmlrpc:"poolUuid,omitempty"` +} + +// SoftLayer_Network_LBaaS_SessionAffinity represents the session affinity, aka session persistence, configuration for a load balancer backend pool. +type Network_LBaaS_SessionAffinity struct { + Entity + + // no documentation yet + Pool *Network_LBaaS_Pool `json:"pool,omitempty" xmlrpc:"pool,omitempty"` + + // Type of the session persistence + Type *string `json:"type,omitempty" xmlrpc:"type,omitempty"` +} + +// The SoftLayer_Network_LoadBalancer_Global_Account data type contains the properties for a single global load balancer account. The properties you are able to edit are fallbackIp, loadBalanceTypeId, and notes. The hosts relational property can be used for creating and editing hosts that belong to the global load balancer account. The [[SoftLayer_Network_LoadBalancer_Global_Account::editObject|editObject]] method contains details on creating and edited hosts through the hosts relational property. +type Network_LoadBalancer_Global_Account struct { + Entity + + // Your SoftLayer customer account. + Account *Account `json:"account,omitempty" xmlrpc:"account,omitempty"` + + // The maximum number of hosts that a global load balancer account is allowed to have. + AllowedNumberOfHosts *int `json:"allowedNumberOfHosts,omitempty" xmlrpc:"allowedNumberOfHosts,omitempty"` + + // The average amount of connections per second used within the current billing cycle. This number is updated daily. + AverageConnectionsPerSecond *Float64 `json:"averageConnectionsPerSecond,omitempty" xmlrpc:"averageConnectionsPerSecond,omitempty"` + + // The current billing item for a Global Load Balancer account. + BillingItem *Billing_Item `json:"billingItem,omitempty" xmlrpc:"billingItem,omitempty"` + + // The amount of connections per second a global load balancer account may use within a billing cycle without being billed for an overage. + ConnectionsPerSecond *int `json:"connectionsPerSecond,omitempty" xmlrpc:"connectionsPerSecond,omitempty"` + + // The IP address that will be return to a DNS request when none of the hosts for a global load balancer account could be returned. + FallbackIp *string `json:"fallbackIp,omitempty" xmlrpc:"fallbackIp,omitempty"` + + // A count of the hosts in the load balancing pool for a global load balancer account. + HostCount *uint `json:"hostCount,omitempty" xmlrpc:"hostCount,omitempty"` + + // The hostname of a global load balancer account that is being load balanced. + Hostname *string `json:"hostname,omitempty" xmlrpc:"hostname,omitempty"` + + // The hosts in the load balancing pool for a global load balancer account. + Hosts []Network_LoadBalancer_Global_Host `json:"hosts,omitempty" xmlrpc:"hosts,omitempty"` + + // The unique identifier of a global load balancer account. + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // The load balance method of a global load balancer account + LoadBalanceType *Network_LoadBalancer_Global_Type `json:"loadBalanceType,omitempty" xmlrpc:"loadBalanceType,omitempty"` + + // The identifier of the load balance method for a global load balancer account. + LoadBalanceTypeId *int `json:"loadBalanceTypeId,omitempty" xmlrpc:"loadBalanceTypeId,omitempty"` + + // A flag indicating that the global load balancer is a managed resource. + ManagedResourceFlag *bool `json:"managedResourceFlag,omitempty" xmlrpc:"managedResourceFlag,omitempty"` + + // Additional customer defined information for a global load balancer account. + Notes *string `json:"notes,omitempty" xmlrpc:"notes,omitempty"` +} + +// The SoftLayer_Network_LoadBalancer_Global_Host data type represents a single host that belongs to a global load balancer account's load balancing pool. +// +// The destination IP address of a host must be one that belongs to your SoftLayer customer account, or to a datacenter load balancer virtual ip that belongs to your SoftLayer customer account. The destination IP address and port of a global load balancer host is a required field and must exist during creation and can not be removed. The acceptable values for the health check type are 'none', 'http', and 'tcp'. The status property is updated in 5 minute intervals and the hits property is updated in 10 minute intervals. +// +// The order of the host is only important if you are using the 'failover' load balance method, and the weight is only important if you are using the 'weighted round robin' load balance method. +type Network_LoadBalancer_Global_Host struct { + Entity + + // The IP address of the host that will be returned by the global load balancers in response to a dns request. + DestinationIp *string `json:"destinationIp,omitempty" xmlrpc:"destinationIp,omitempty"` + + // The port of the host that will be used for health checks. + DestinationPort *int `json:"destinationPort,omitempty" xmlrpc:"destinationPort,omitempty"` + + // Whether the host is enabled or not. The value can be '0' for disabled, or '1' for enabled. + Enabled *int `json:"enabled,omitempty" xmlrpc:"enabled,omitempty"` + + // The health check type of a host. Valid values include 'none', 'http', and 'tcp'. + HealthCheck *string `json:"healthCheck,omitempty" xmlrpc:"healthCheck,omitempty"` + + // The number of times the host was selected by the load balance method. + Hits *Float64 `json:"hits,omitempty" xmlrpc:"hits,omitempty"` + + // The unique identifier of a global load balancer host. + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // The order of this host within the load balance pool. This is only significant if the load balance method is set to failover. + LoadBalanceOrder *int `json:"loadBalanceOrder,omitempty" xmlrpc:"loadBalanceOrder,omitempty"` + + // The global load balancer account a host belongs to. + LoadBalancerAccount *Network_LoadBalancer_Global_Account `json:"loadBalancerAccount,omitempty" xmlrpc:"loadBalancerAccount,omitempty"` + + // The location of a host in a datacenter.serverRoom format. + Location *string `json:"location,omitempty" xmlrpc:"location,omitempty"` + + // The health status of a host. The status can be either 'UP', 'DOWN', or null which could mean that the health check type is set to 'none' or an update to the ip, port, or health check type was recently done and the host is waiting for the new status. + Status *string `json:"status,omitempty" xmlrpc:"status,omitempty"` + + // The load balance weight of a host. The total weight of all hosts in the load balancing pool must not exceed 100. + Weight *int `json:"weight,omitempty" xmlrpc:"weight,omitempty"` +} + +// The SoftLayer_Network_LoadBalancer_Global_Type data type represents a single load balance method that can be assigned to a global load balancer account. The load balance method determines how hosts in a load balancing pool are chosen by the global load balancers. +type Network_LoadBalancer_Global_Type struct { + Entity + + // The unique identifier of a load balance method. + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // The name of a load balance method. + Name *string `json:"name,omitempty" xmlrpc:"name,omitempty"` +} + +// The SoftLayer_Network_LoadBalancer_Service data type contains all the information relating to a specific service (destination) on a particular load balancer. +// +// Information retained on the object itself is the the source and destination of the service, routing type, weight, and whether or not the service is currently enabled. +type Network_LoadBalancer_Service struct { + Entity + + // Connection limit on this service. + ConnectionLimit *int `json:"connectionLimit,omitempty" xmlrpc:"connectionLimit,omitempty"` + + // Creation Date of this service + CreateDate *Time `json:"createDate,omitempty" xmlrpc:"createDate,omitempty"` + + // The IP Address of the real server you wish to direct traffic to. Your account must own this IP + DestinationIpAddress *string `json:"destinationIpAddress,omitempty" xmlrpc:"destinationIpAddress,omitempty"` + + // The port on the real server to direct the traffic. This can be different than the source port. If you wish to obfuscate your HTTP traffic, you can accept requests on port 80 on the load balancer, then redirect them to port 932 on your real server. + DestinationPort *int `json:"destinationPort,omitempty" xmlrpc:"destinationPort,omitempty"` + + // A flag (either true or false) that determines if this particular service should be enabled on the load balancer. Set to false to bring the server out of rotation without losing your configuration + Enabled *bool `json:"enabled,omitempty" xmlrpc:"enabled,omitempty"` + + // The health check type for this service. If one is supplied, the load balancer will occasionally ping your server to determine if it is still up. Servers that are down are removed from the queue and will not be used to handle requests until their status returns to "up". The value of the health check is determined directly by what option you have selected for the routing type. + // + // {| + // |- + // ! Type + // ! Valid Health Checks + // |- + // | HTTP + // | HTTP, TCP, ICMP + // |- + // | TCP + // | HTTP, TCP, ICMP + // |- + // | FTP + // | TCP, ICMP + // |- + // | DNS + // | DNS, ICMP + // |- + // | UDP + // | None + // |} + // + // + HealthCheck *string `json:"healthCheck,omitempty" xmlrpc:"healthCheck,omitempty"` + + // The URL provided here (starting with /) is what the load balancer will request in order to perform a custom HTTP health check. You must specify either "GET /location/of/file.html" or "HEAD /location/of/file.php" + HealthCheckURL *string `json:"healthCheckURL,omitempty" xmlrpc:"healthCheckURL,omitempty"` + + // The expected response from the custom HTTP health check. If the requested page contains this response, the check succeeds. + HealthResponse *string `json:"healthResponse,omitempty" xmlrpc:"healthResponse,omitempty"` + + // Unique ID for this object, used for the getObject method, and must be set if you are editing this object. + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // Last modification date of this service + ModifyDate *Time `json:"modifyDate,omitempty" xmlrpc:"modifyDate,omitempty"` + + // Name of the load balancer service + Name *string `json:"name,omitempty" xmlrpc:"name,omitempty"` + + // Holds whether this server is up or down. Does not affect load balancer configuration at all, just for the customer's informational purposes + Notes *string `json:"notes,omitempty" xmlrpc:"notes,omitempty"` + + // Peak historical connections since the creation of this service. Is reset any time you make a configuration change + PeakConnections *int `json:"peakConnections,omitempty" xmlrpc:"peakConnections,omitempty"` + + // The port on the load balancer that this service maps to. This is the port for incoming traffic, it needs to be shared with other services to form a group. + SourcePort *int `json:"sourcePort,omitempty" xmlrpc:"sourcePort,omitempty"` + + // The connection type of this service. Valid values are HTTP, FTP, TCP, UDP, and DNS. The value of this variable affects available values of healthCheck, listed in that variable's description + Type *string `json:"type,omitempty" xmlrpc:"type,omitempty"` + + // The load balancer that this service belongs to. + Vip *Network_LoadBalancer_VirtualIpAddress `json:"vip,omitempty" xmlrpc:"vip,omitempty"` + + // Unique ID for this object's parent. Probably not useful in the API, as this object will always be a child of a VirtualIpAddress anyway. + VipId *int `json:"vipId,omitempty" xmlrpc:"vipId,omitempty"` + + // Weight affects the choices the load balancer makes between your services. The weight of each service is expressed as a percentage of the TOTAL CONNECTION LIMIT on the virtual IP Address. All services draw from the same pool of connections, so if you expect to have 4 times as much HTTP traffic as HTTPS, your weights for the above example routes would be 40%, 40%, 10%, 10% respectively. The weights should add up to 100% If you go over 100%, an exception will be thrown. Weights must be whole numbers, no fractions or decimals are accepted. + Weight *int `json:"weight,omitempty" xmlrpc:"weight,omitempty"` +} + +// The SoftLayer_Network_LoadBalancer_VirtualIpAddress data type contains all the information relating to a specific load balancer assigned to a customer account. +// +// Information retained on the object itself is the virtual IP address, load balancing method, and any notes that are related to the load balancer. There is also an array of SoftLayer_Network_LoadBalancer_Service objects, which represent the load balancer services, explained more fully in the SoftLayer_Network_LoadBalancer_Service documentation. +type Network_LoadBalancer_VirtualIpAddress struct { + Entity + + // The account that owns this load balancer. + Account *Account `json:"account,omitempty" xmlrpc:"account,omitempty"` + + // The current billing item for the Load Balancer. + BillingItem *Billing_Item `json:"billingItem,omitempty" xmlrpc:"billingItem,omitempty"` + + // Connection limit on this VIP. Can be upgraded through the upgradeConnectionLimit() function + ConnectionLimit *int `json:"connectionLimit,omitempty" xmlrpc:"connectionLimit,omitempty"` + + // If false, this VIP and associated services may be edited via the portal or the API. If true, you must configure this VIP manually on the device. + CustomerManagedFlag *int `json:"customerManagedFlag,omitempty" xmlrpc:"customerManagedFlag,omitempty"` + + // Unique ID for this object, used for the getObject method, and must be set if you are editing this object. + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // The load balancing method that determines which server is used "next" by the load balancer. The method is stored in an abbreviated form, represented in parentheses after the full name. Methods include: Round Robin (Value "rr"): Each server is used sequentially in a circular queue Shortest Response (Value "sr"): The server with the lowest ping at the last health check gets the next request Least Connections (Value "lc"): The server with the least current connections is given the next request Persistent IP - Round Robin (Value "pi"): The same server will be returned to a request during a users session. Servers are chosen through round robin. Persistent IP - Shortest Response (Value "pi-sr"): The same server will be returned to a request during a users session. Servers are chosen through shortest response. Persistent IP - Least Connections (Value "pi-lc"): The same server will be returned to a request during a users session. Servers are chosen through least connections. Insert Cookie - Round Robin (Value "ic"): Inserts a cookie into the HTTP stream that will tie that client to a particular balanced server. Servers are chosen through round robin. Insert Cookie - Shortest Response (Value "ic-sr"): Inserts a cookie into the HTTP stream that will tie that client to a particular balanced server. Servers are chosen through shortest response. Insert Cookie - Least Connections (Value "ic-lc"): Inserts a cookie into the HTTP stream that will tie that client to a particular balanced server. Servers are chosen through least connections. + LoadBalancingMethod *string `json:"loadBalancingMethod,omitempty" xmlrpc:"loadBalancingMethod,omitempty"` + + // A human readable version of loadBalancingMethod, intended mainly for API users. + LoadBalancingMethodFullName *string `json:"loadBalancingMethodFullName,omitempty" xmlrpc:"loadBalancingMethodFullName,omitempty"` + + // A flag indicating that the load balancer is a managed resource. + ManagedResourceFlag *bool `json:"managedResourceFlag,omitempty" xmlrpc:"managedResourceFlag,omitempty"` + + // Date this load balancer was last modified + ModifyDate *Time `json:"modifyDate,omitempty" xmlrpc:"modifyDate,omitempty"` + + // The name of the load balancer instance + Name *string `json:"name,omitempty" xmlrpc:"name,omitempty"` + + // User-created notes on this load balancer. + Notes *string `json:"notes,omitempty" xmlrpc:"notes,omitempty"` + + // The unique identifier of the Security Certificate to be utilized when SSL support is enabled. + SecurityCertificateId *int `json:"securityCertificateId,omitempty" xmlrpc:"securityCertificateId,omitempty"` + + // A count of the services on this load balancer. + ServiceCount *uint `json:"serviceCount,omitempty" xmlrpc:"serviceCount,omitempty"` + + // the services on this load balancer. + Services []Network_LoadBalancer_Service `json:"services,omitempty" xmlrpc:"services,omitempty"` + + // This is the port for incoming traffic. + SourcePort *int `json:"sourcePort,omitempty" xmlrpc:"sourcePort,omitempty"` + + // The connection type of this VIP. Valid values are HTTP, FTP, TCP, UDP, and DNS. + Type *string `json:"type,omitempty" xmlrpc:"type,omitempty"` + + // The virtual, public-facing IP address for your load balancer. This is the address of all incoming traffic + VirtualIpAddress *string `json:"virtualIpAddress,omitempty" xmlrpc:"virtualIpAddress,omitempty"` +} + +// The Syslog class holds a single line from the Networking Firewall "Syslog" record, for firewall detected and blocked attempts on a server. +type Network_Logging_Syslog struct { + Entity + + // Timestamp for when the connection was blocked by the firewall + CreateDate *Time `json:"createDate,omitempty" xmlrpc:"createDate,omitempty"` + + // The Destination IP Address of the blocked connection (your end) + DestinationIpAddress *string `json:"destinationIpAddress,omitempty" xmlrpc:"destinationIpAddress,omitempty"` + + // The Destination Port of the blocked connection (your end) + DestinationPort *int `json:"destinationPort,omitempty" xmlrpc:"destinationPort,omitempty"` + + // This tells you what kind of firewall event this log line is for: accept or deny. + EventType *string `json:"eventType,omitempty" xmlrpc:"eventType,omitempty"` + + // Raw syslog message for the event + Message *string `json:"message,omitempty" xmlrpc:"message,omitempty"` + + // Connection protocol used to make the call that was blocked (tcp, udp, etc) + Protocol *string `json:"protocol,omitempty" xmlrpc:"protocol,omitempty"` + + // The Source IP Address of the call that was blocked (attacker's end) + SourceIpAddress *string `json:"sourceIpAddress,omitempty" xmlrpc:"sourceIpAddress,omitempty"` + + // The Source Port where the blocked connection was established (attacker's end) + SourcePort *int `json:"sourcePort,omitempty" xmlrpc:"sourcePort,omitempty"` + + // If this is an aggregation of syslog events, this property shows the total events. + TotalEvents *int `json:"totalEvents,omitempty" xmlrpc:"totalEvents,omitempty"` +} + +// The SoftLayer_Network_Media_Transcode_Account contains information regarding a transcode account. +type Network_Media_Transcode_Account struct { + Entity + + // The SoftLayer account information + Account *Account `json:"account,omitempty" xmlrpc:"account,omitempty"` + + // The internal identifier of a SoftLayer account + AccountId *int `json:"accountId,omitempty" xmlrpc:"accountId,omitempty"` + + // The created date + CreateDate *Time `json:"createDate,omitempty" xmlrpc:"createDate,omitempty"` + + // The internal identifier of a transcode account + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // The last modified date + ModifyDate *Time `json:"modifyDate,omitempty" xmlrpc:"modifyDate,omitempty"` + + // A count of transcode jobs + TranscodeJobCount *uint `json:"transcodeJobCount,omitempty" xmlrpc:"transcodeJobCount,omitempty"` + + // Transcode jobs + TranscodeJobs []Network_Media_Transcode_Job `json:"transcodeJobs,omitempty" xmlrpc:"transcodeJobs,omitempty"` +} + +// The SoftLayer_Network_Media_Transcode_Job contains information regarding a transcode job such as input file, output format, user id and so on. +type Network_Media_Transcode_Job struct { + Entity + + // The auto-deletion duration in seconds. This value determines how long the input file will be kept on the storage. + AutoDeleteDuration *int `json:"autoDeleteDuration,omitempty" xmlrpc:"autoDeleteDuration,omitempty"` + + // The size of an input file in byte + ByteIn *int `json:"byteIn,omitempty" xmlrpc:"byteIn,omitempty"` + + // The created date + CreateDate *Time `json:"createDate,omitempty" xmlrpc:"createDate,omitempty"` + + // no documentation yet + History []Network_Media_Transcode_Job_History `json:"history,omitempty" xmlrpc:"history,omitempty"` + + // A count of + HistoryCount *uint `json:"historyCount,omitempty" xmlrpc:"historyCount,omitempty"` + + // The internal identifier of a transcode job + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // The input file name + InputFile *string `json:"inputFile,omitempty" xmlrpc:"inputFile,omitempty"` + + // The last modified date + ModifyDate *Time `json:"modifyDate,omitempty" xmlrpc:"modifyDate,omitempty"` + + // The name of a transcode job + Name *string `json:"name,omitempty" xmlrpc:"name,omitempty"` + + // The output file name + OutputFile *string `json:"outputFile,omitempty" xmlrpc:"outputFile,omitempty"` + + // The transcode service account + TranscodeAccount *Network_Media_Transcode_Account `json:"transcodeAccount,omitempty" xmlrpc:"transcodeAccount,omitempty"` + + // The internal identifier of SoftLayer account + TranscodeAccountId *int `json:"transcodeAccountId,omitempty" xmlrpc:"transcodeAccountId,omitempty"` + + // The unique id of a transcode job + TranscodeJobGuid *string `json:"transcodeJobGuid,omitempty" xmlrpc:"transcodeJobGuid,omitempty"` + + // The unique id of a pre-defined output format + TranscodePresetGuid *string `json:"transcodePresetGuid,omitempty" xmlrpc:"transcodePresetGuid,omitempty"` + + // The name of a transcode output preset + TranscodePresetName *string `json:"transcodePresetName,omitempty" xmlrpc:"transcodePresetName,omitempty"` + + // The status information of a transcode job + TranscodeStatus *Network_Media_Transcode_Job_Status `json:"transcodeStatus,omitempty" xmlrpc:"transcodeStatus,omitempty"` + + // The internal identifier of a transcode status + TranscodeStatusId *int `json:"transcodeStatusId,omitempty" xmlrpc:"transcodeStatusId,omitempty"` + + // The status of a transcode job + TranscodeStatusName *string `json:"transcodeStatusName,omitempty" xmlrpc:"transcodeStatusName,omitempty"` + + // The SoftLayer user that created the transcode job + User *User_Customer `json:"user,omitempty" xmlrpc:"user,omitempty"` + + // The internal identifier of the user who created a transcode job + UserId *int `json:"userId,omitempty" xmlrpc:"userId,omitempty"` + + // Watermark to apply to job + Watermark *Container_Network_Media_Transcode_Job_Watermark `json:"watermark,omitempty" xmlrpc:"watermark,omitempty"` +} + +// no documentation yet +type Network_Media_Transcode_Job_History struct { + Entity + + // The creation date + CreateDate *Time `json:"createDate,omitempty" xmlrpc:"createDate,omitempty"` + + // The note created by system + PublicNotes *string `json:"publicNotes,omitempty" xmlrpc:"publicNotes,omitempty"` + + // The internal identifier of a transcode job + TranscodeJobId *int `json:"transcodeJobId,omitempty" xmlrpc:"transcodeJobId,omitempty"` + + // The status of a transcode job + TranscodeStatusName *string `json:"transcodeStatusName,omitempty" xmlrpc:"transcodeStatusName,omitempty"` +} + +// The SoftLayer_Network_Media_Transcode_Job_Status contains information on a transcode job status. +type Network_Media_Transcode_Job_Status struct { + Entity + + // The description of a transcode job status + Description *string `json:"description,omitempty" xmlrpc:"description,omitempty"` + + // The internal identifier of a transcode job status + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // The status name + Name *string `json:"name,omitempty" xmlrpc:"name,omitempty"` +} + +// no documentation yet +type Network_Message_Delivery struct { + Entity + + // The SoftLayer customer account that a network message delivery account belongs to. + Account *Account `json:"account,omitempty" xmlrpc:"account,omitempty"` + + // no documentation yet + AccountId *int `json:"accountId,omitempty" xmlrpc:"accountId,omitempty"` + + // The billing item for a network message delivery account. + BillingItem *Billing_Item `json:"billingItem,omitempty" xmlrpc:"billingItem,omitempty"` + + // no documentation yet + CreateDate *Time `json:"createDate,omitempty" xmlrpc:"createDate,omitempty"` + + // no documentation yet + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // no documentation yet + ModifyDate *Time `json:"modifyDate,omitempty" xmlrpc:"modifyDate,omitempty"` + + // no documentation yet + Password *string `json:"password,omitempty" xmlrpc:"password,omitempty"` + + // The message delivery type of a network message delivery account. + Type *Network_Message_Delivery_Type `json:"type,omitempty" xmlrpc:"type,omitempty"` + + // no documentation yet + TypeId *int `json:"typeId,omitempty" xmlrpc:"typeId,omitempty"` + + // no documentation yet + Username *string `json:"username,omitempty" xmlrpc:"username,omitempty"` + + // The vendor for a network message delivery account. + Vendor *Network_Message_Delivery_Vendor `json:"vendor,omitempty" xmlrpc:"vendor,omitempty"` + + // no documentation yet + VendorId *int `json:"vendorId,omitempty" xmlrpc:"vendorId,omitempty"` +} + +// no documentation yet +type Network_Message_Delivery_Attribute struct { + Entity + + // no documentation yet + NetworkMessageDelivery *Network_Message_Delivery `json:"networkMessageDelivery,omitempty" xmlrpc:"networkMessageDelivery,omitempty"` + + // no documentation yet + Value *string `json:"value,omitempty" xmlrpc:"value,omitempty"` +} + +// no documentation yet +type Network_Message_Delivery_Email_Sendgrid struct { + Network_Message_Delivery + + // The contact e-mail address used by SendGrid. + EmailAddress *string `json:"emailAddress,omitempty" xmlrpc:"emailAddress,omitempty"` + + // A flag that determines if a SendGrid e-mail delivery account has access to send mail through the SendGrid SMTP server. + SmtpAccess *string `json:"smtpAccess,omitempty" xmlrpc:"smtpAccess,omitempty"` +} + +// no documentation yet +type Network_Message_Delivery_Type struct { + Entity + + // no documentation yet + Description *string `json:"description,omitempty" xmlrpc:"description,omitempty"` + + // no documentation yet + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // no documentation yet + KeyName *string `json:"keyName,omitempty" xmlrpc:"keyName,omitempty"` + + // no documentation yet + Name *string `json:"name,omitempty" xmlrpc:"name,omitempty"` +} + +// no documentation yet +type Network_Message_Delivery_Vendor struct { + Entity + + // no documentation yet + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // no documentation yet + KeyName *string `json:"keyName,omitempty" xmlrpc:"keyName,omitempty"` + + // no documentation yet + Name *string `json:"name,omitempty" xmlrpc:"name,omitempty"` +} + +// The SoftLayer_Network_Message_Queue data type contains general information relating to Message Queue account +type Network_Message_Queue struct { + Entity + + // The account that a message queue belongs to. + Account *Account `json:"account,omitempty" xmlrpc:"account,omitempty"` + + // A message queue's associated [[SoftLayer_Account|account]] id. + AccountId *int `json:"accountId,omitempty" xmlrpc:"accountId,omitempty"` + + // The current billing item for this message queue account. + BillingItem *Billing_Item `json:"billingItem,omitempty" xmlrpc:"billingItem,omitempty"` + + // The date that a message queue account was created. + CreateDate *Time `json:"createDate,omitempty" xmlrpc:"createDate,omitempty"` + + // A message queue's internal identification number + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // A message queue status' internal identifier. + MessageQueueStatusId *int `json:"messageQueueStatusId,omitempty" xmlrpc:"messageQueueStatusId,omitempty"` + + // A unique message queue account name + Name *string `json:"name,omitempty" xmlrpc:"name,omitempty"` + + // A count of all available message queue nodes + NodeCount *uint `json:"nodeCount,omitempty" xmlrpc:"nodeCount,omitempty"` + + // All available message queue nodes + Nodes []Network_Message_Queue_Node `json:"nodes,omitempty" xmlrpc:"nodes,omitempty"` + + // Brief notes on this message queue account + Notes *string `json:"notes,omitempty" xmlrpc:"notes,omitempty"` + + // A message queue account status. + Status *Network_Message_Queue_Status `json:"status,omitempty" xmlrpc:"status,omitempty"` +} + +// The SoftLayer_Network_Message_Queue_Node data type contains general information relating to Message Queue node +type Network_Message_Queue_Node struct { + Entity + + // A unique account name in this message queue node + AccountName *string `json:"accountName,omitempty" xmlrpc:"accountName,omitempty"` + + // A message queue node's internal identification number + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // The message queue account this node belongs to. + MessageQueue *Network_Message_Queue `json:"messageQueue,omitempty" xmlrpc:"messageQueue,omitempty"` + + // A message queue node's associated message queue id. + MessageQueueId *int `json:"messageQueueId,omitempty" xmlrpc:"messageQueueId,omitempty"` + + // A message queue node's metric tracking object. This object records all request and notification count data for this message queue node. + MetricTrackingObject *Metric_Tracking_Object `json:"metricTrackingObject,omitempty" xmlrpc:"metricTrackingObject,omitempty"` + + // A user-friendly name of this message queue node + Name *string `json:"name,omitempty" xmlrpc:"name,omitempty"` + + // Brief notes on this message queue node + Notes *string `json:"notes,omitempty" xmlrpc:"notes,omitempty"` + + // no documentation yet + ServiceResource *Network_Service_Resource `json:"serviceResource,omitempty" xmlrpc:"serviceResource,omitempty"` +} + +// The SoftLayer_Network_Message_Queue_Status data type contains general information relating to Message Queue account status. +type Network_Message_Queue_Status struct { + Entity + + // A brief description on a message queue account status + Description *string `json:"description,omitempty" xmlrpc:"description,omitempty"` + + // A message queue status's internal identification number + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // A user-friendly name of a message queue account status + Name *string `json:"name,omitempty" xmlrpc:"name,omitempty"` +} + +// no documentation yet +type Network_Monitor struct { + Entity +} + +// The SoftLayer_Network_Monitor_Version1_Incident data type models a single virtual server or physical hardware network monitoring event. SoftLayer_Network_Monitor_Version1_Incidents are created when the SoftLayer monitoring system detects a service down on your hardware of virtual server. As the incident is resolved it's status changes from "SERVICE FAILURE" to "COMPLETED". +type Network_Monitor_Version1_Incident struct { + Entity + + // A network monitoring incident's status, either the string "SERVICE FAILURE" denoting an ongoing incident or "COMPLETE" meaning the incident has been resolved. + Status *string `json:"status,omitempty" xmlrpc:"status,omitempty"` +} + +// The Monitoring_Query_Host type represents a monitoring instance. It consists of a hardware ID to monitor, an IP address attached to that hardware ID, a method of monitoring, and what to do in the instance that the monitor ever fails. +type Network_Monitor_Version1_Query_Host struct { + Entity + + // The argument to be used for this monitor, if necessary. The lowest monitoring levels (like ping) ignore this setting, but higher levels like HTTP custom use it. + Arg1Value *string `json:"arg1Value,omitempty" xmlrpc:"arg1Value,omitempty"` + + // Virtual Guest Identification Number for the guest being monitored. + GuestId *int `json:"guestId,omitempty" xmlrpc:"guestId,omitempty"` + + // The hardware that is being monitored by this monitoring instance + Hardware *Hardware `json:"hardware,omitempty" xmlrpc:"hardware,omitempty"` + + // The ID of the hardware being monitored + HardwareId *int `json:"hardwareId,omitempty" xmlrpc:"hardwareId,omitempty"` + + // Identification Number for the host being monitored. + HostId *int `json:"hostId,omitempty" xmlrpc:"hostId,omitempty"` + + // The unique identifier for this object + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // The IP address to be monitored. Must be attached to the hardware on this object + IpAddress *string `json:"ipAddress,omitempty" xmlrpc:"ipAddress,omitempty"` + + // The most recent result for this particular monitoring instance. + LastResult *Network_Monitor_Version1_Query_Result `json:"lastResult,omitempty" xmlrpc:"lastResult,omitempty"` + + // The type of monitoring query that is executed when this hardware is monitored. + QueryType *Network_Monitor_Version1_Query_Type `json:"queryType,omitempty" xmlrpc:"queryType,omitempty"` + + // The ID of the query type to use. + QueryTypeId *int `json:"queryTypeId,omitempty" xmlrpc:"queryTypeId,omitempty"` + + // The action taken when a monitor fails. + ResponseAction *Network_Monitor_Version1_Query_ResponseType `json:"responseAction,omitempty" xmlrpc:"responseAction,omitempty"` + + // The ID of the response action to take when the monitor fails + ResponseActionId *int `json:"responseActionId,omitempty" xmlrpc:"responseActionId,omitempty"` + + // The status of this monitoring instance. Anything other than "ON" means that the monitor has been disabled + Status *string `json:"status,omitempty" xmlrpc:"status,omitempty"` + + // The number of 5-minute cycles to wait before the "responseAction" is taken. If set to 0, the response action will be taken immediately + WaitCycles *int `json:"waitCycles,omitempty" xmlrpc:"waitCycles,omitempty"` +} + +// The monitoring stratum type stores the maximum level of the various components of the monitoring system that a particular hardware object has access to. This object cannot be accessed by ID, and cannot be modified. The user can access this object through Hardware_Server->availableMonitoring. +// +// There are two values on this object that are important: +// # monitorLevel determines the highest level of SoftLayer_Network_Monitor_Version1_Query_Type object that can be placed in a monitoring instance on this server +// # responseLevel determines the highest level of SoftLayer_Network_Monitor_Version1_Query_ResponseType object that can be placed in a monitoring instance on this server +// +// +// Also note that the query type and response types are available through getAllQueryTypes and getAllResponseTypes, respectively. +type Network_Monitor_Version1_Query_Host_Stratum struct { + Entity + + // The hardware object that these monitoring permissions applies to. + Hardware *Hardware `json:"hardware,omitempty" xmlrpc:"hardware,omitempty"` + + // The highest level of a monitoring query type allowed on this server + MonitorLevel *int `json:"monitorLevel,omitempty" xmlrpc:"monitorLevel,omitempty"` + + // The highest level of a monitoring response type allowed on this server + ResponseLevel *int `json:"responseLevel,omitempty" xmlrpc:"responseLevel,omitempty"` +} + +// The ResponseType type stores only an ID and a description of the response type. The only use for this object is in reference. The user chooses a response action that would be appropriate for a monitoring instance, and sets the ResponseTypeId to the SoftLayer_Network_Monitor_Version1_Query_Host->responseActionId value. +// +// The user can retrieve all available ResponseTypes with the getAllObjects method on this service. +type Network_Monitor_Version1_Query_ResponseType struct { + Entity + + // The description of the action the monitoring system will take on failure + ActionDescription *string `json:"actionDescription,omitempty" xmlrpc:"actionDescription,omitempty"` + + // The unique identifier for this object + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // The level of this response. The level the customer has access to is determined by values in SoftLayer_Network_Monitor_Version1_Query_Host_Stratum + Level *int `json:"level,omitempty" xmlrpc:"level,omitempty"` +} + +// The monitoring result object is used to show the status of the actions taken by the monitoring system. +// +// In general, only the responseStatus variable is needed, as it holds the information on the status of the service. +type Network_Monitor_Version1_Query_Result struct { + Entity + + // The timestamp of when this monitor was co + FinishTime *Time `json:"finishTime,omitempty" xmlrpc:"finishTime,omitempty"` + + // References the queryHost that this response relates to. + QueryHost *Network_Monitor_Version1_Query_Host `json:"queryHost,omitempty" xmlrpc:"queryHost,omitempty"` + + // The response status for this server. The response status meanings are: 0: Down/Critical: Server is down and/or has passed the critical response threshold (extremely long ping response, abnormal behavior, etc.) 1: Warning - Server may be recovering from a previous down state, or may have taken too long to respond 2: Up 3: Not used 4: Unknown - An unknown error has occurred. If the problem persists, contact support. 5: Unknown - An unknown error has occurred. If the problem persists, contact support. + ResponseStatus *int `json:"responseStatus,omitempty" xmlrpc:"responseStatus,omitempty"` + + // The length of time it took the server to respond + ResponseTime *Float64 `json:"responseTime,omitempty" xmlrpc:"responseTime,omitempty"` +} + +// The MonitorType type stores a name, long description, and default arguments for the monitor types. The only use for this object is in reference. The user chooses a monitoring type that would be appropriate for their server, and sets the id of the Query_Type to SoftLayer_Network_Monitor_Version1_Query_Host->queryTypeId +// +// The user can retrieve all available Query Types with the getAllObjects method on this service. +type Network_Monitor_Version1_Query_Type struct { + Entity + + // The type of parameter sent to the monitoring command. + ArgumentDescription *string `json:"argumentDescription,omitempty" xmlrpc:"argumentDescription,omitempty"` + + // Long description of the monitoring type. + Description *string `json:"description,omitempty" xmlrpc:"description,omitempty"` + + // The unique identifier for this object + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // The level of this monitoring type. The level the customer has access to is determined by values in SoftLayer_Network_Monitor_Version1_Query_Host_Stratum + MonitorLevel *int `json:"monitorLevel,omitempty" xmlrpc:"monitorLevel,omitempty"` + + // Short name of the monitoring type + Name *string `json:"name,omitempty" xmlrpc:"name,omitempty"` +} + +// SoftLayer_Network_Pod refers to a portion of a data center that share a Backend Customer Router (BCR) and usually a front-end counterpart known as a Frontend Customer Router (FCR). A Pod primarily denotes a logical location within the network and the physical aspects that support networks. This is in contrast to representing a specific physical location. +// +// A ``Pod`` is identified by a ``name``, which is unique. A Pod name follows the format 'dddnn.podii', where 'ddd' is a data center code, 'nn' is the data center number, 'pod' is a literal string and 'ii' is a two digit, left-zero- padded number which corresponds to a Backend Customer Router (BCR) of the desired data center. Examples: +// * dal09.pod01 = Dallas 9, Pod 1 (ie. bcr01) +// * sjc01.pod04 = San Jose 1, Pod 4 (ie. bcr04) +// * ams01.pod01 = Amsterdam 1, Pod 1 (ie. bcr01) +type Network_Pod struct { + Entity + + // Identifier for this Pod's Backend Customer Router (BCR) + BackendRouterId *int `json:"backendRouterId,omitempty" xmlrpc:"backendRouterId,omitempty"` + + // Host name of Pod's Backend Customer Router (BCR), e.g. bcr01a.dal09 + BackendRouterName *string `json:"backendRouterName,omitempty" xmlrpc:"backendRouterName,omitempty"` + + // The list of capabilities this Pod has. + Capabilities []string `json:"capabilities,omitempty" xmlrpc:"capabilities,omitempty"` + + // Long form name of the data center in which this Pod resides, e.g. Dallas 9 + DatacenterLongName *string `json:"datacenterLongName,omitempty" xmlrpc:"datacenterLongName,omitempty"` + + // Name of data center in which this Pod resides, e.g. dal09 + DatacenterName *string `json:"datacenterName,omitempty" xmlrpc:"datacenterName,omitempty"` + + // (optional) Identifier for this Pod's Frontend Customer Router (FCR) + FrontendRouterId *int `json:"frontendRouterId,omitempty" xmlrpc:"frontendRouterId,omitempty"` + + // Host name of Pod's Frontend Customer Router (FCR), e.g. fcr01a.dal09 + FrontendRouterName *string `json:"frontendRouterName,omitempty" xmlrpc:"frontendRouterName,omitempty"` + + // The unique name of the Pod. See [[SoftLayer_Network_Pod (type)]] for details of the name's construction. + Name *string `json:"name,omitempty" xmlrpc:"name,omitempty"` +} + +// no documentation yet +type Network_Protection_Address struct { + Entity + + // no documentation yet + Account *Account `json:"account,omitempty" xmlrpc:"account,omitempty"` + + // no documentation yet + DepartmentId *int `json:"departmentId,omitempty" xmlrpc:"departmentId,omitempty"` + + // no documentation yet + IpAddress *string `json:"ipAddress,omitempty" xmlrpc:"ipAddress,omitempty"` + + // no documentation yet + Location *Location `json:"location,omitempty" xmlrpc:"location,omitempty"` + + // no documentation yet + ManagementMethodType *string `json:"managementMethodType,omitempty" xmlrpc:"managementMethodType,omitempty"` + + // no documentation yet + ModifiedUser *User_Employee `json:"modifiedUser,omitempty" xmlrpc:"modifiedUser,omitempty"` + + // no documentation yet + PrimaryRouter *Hardware_Router `json:"primaryRouter,omitempty" xmlrpc:"primaryRouter,omitempty"` + + // no documentation yet + ServiceProvider *Service_Provider `json:"serviceProvider,omitempty" xmlrpc:"serviceProvider,omitempty"` + + // no documentation yet + Subnet *Network_Subnet `json:"subnet,omitempty" xmlrpc:"subnet,omitempty"` + + // no documentation yet + SubnetIpAddress *Network_Subnet_IpAddress `json:"subnetIpAddress,omitempty" xmlrpc:"subnetIpAddress,omitempty"` + + // no documentation yet + TerminatedUser *User_Employee `json:"terminatedUser,omitempty" xmlrpc:"terminatedUser,omitempty"` + + // no documentation yet + Ticket *Ticket `json:"ticket,omitempty" xmlrpc:"ticket,omitempty"` + + // A count of + TransactionCount *uint `json:"transactionCount,omitempty" xmlrpc:"transactionCount,omitempty"` + + // no documentation yet + Transactions []Provisioning_Version1_Transaction `json:"transactions,omitempty" xmlrpc:"transactions,omitempty"` + + // no documentation yet + UserDepartment *User_Employee_Department `json:"userDepartment,omitempty" xmlrpc:"userDepartment,omitempty"` + + // no documentation yet + UserRecord *User_Employee `json:"userRecord,omitempty" xmlrpc:"userRecord,omitempty"` +} + +// Regional Internet Registries are the organizations who delegate IP address blocks to other groups or organizations around the Internet. The information contained in this data type is used throughout the networking-related services in our systems. +type Network_Regional_Internet_Registry struct { + Entity + + // Unique ID of the object + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // The system-level name of the registry + KeyName *string `json:"keyName,omitempty" xmlrpc:"keyName,omitempty"` + + // The friendly name of the registry + Name *string `json:"name,omitempty" xmlrpc:"name,omitempty"` +} + +// +// This is a Beta release of the Security Group feature. The use of this feature is restricted to select +// users. When the Beta period is over, security groups will be available for all users. Contact sgbeta@us.ibm.com +// using 'Security Groups' in the subject line with any questions. +// +// +// The SoftLayer_Network_SecurityGroup data type contains general information for a single security group. +// Security groups contain a set of [[SoftLayer_Network_SecurityGroup_Rule (type)|rules]] that handle traffic +// to virtual guest instances and a set of +// [[SoftLayer_Virtual_Network_SecurityGroup_NetworkComponentBinding (type)|bindings]] to associate virtual guest +// network components with the security group. +type Network_SecurityGroup struct { + Entity + + // The account for this security group + Account *Account `json:"account,omitempty" xmlrpc:"account,omitempty"` + + // The date a security group was created + CreateDate *Time `json:"createDate,omitempty" xmlrpc:"createDate,omitempty"` + + // The description for a security group + Description *string `json:"description,omitempty" xmlrpc:"description,omitempty"` + + // The unique ID for a security group + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // The date a security group was last modified + ModifyDate *Time `json:"modifyDate,omitempty" xmlrpc:"modifyDate,omitempty"` + + // The name for a security group + Name *string `json:"name,omitempty" xmlrpc:"name,omitempty"` + + // A count of the network component bindings for this security group + NetworkComponentBindingCount *uint `json:"networkComponentBindingCount,omitempty" xmlrpc:"networkComponentBindingCount,omitempty"` + + // The network component bindings for this security group + NetworkComponentBindings []Virtual_Network_SecurityGroup_NetworkComponentBinding `json:"networkComponentBindings,omitempty" xmlrpc:"networkComponentBindings,omitempty"` + + // A count of the rules for this security group + RuleCount *uint `json:"ruleCount,omitempty" xmlrpc:"ruleCount,omitempty"` + + // The rules for this security group + Rules []Network_SecurityGroup_Rule `json:"rules,omitempty" xmlrpc:"rules,omitempty"` +} + +// The SoftLayer_Network_SecurityGroup_Rule data type contains general information for a single rule that belongs to a [[SoftLayer_Network_SecurityGroup|security group]]. Rule information in this type define how to handle incoming (ingress) or outgoing (egress) traffic to the public and private interfaces of a virtual guest. +type Network_SecurityGroup_Rule struct { + Entity + + // The direction of traffic (ingress or egress) + Direction *string `json:"direction,omitempty" xmlrpc:"direction,omitempty"` + + // IPv4 or IPv6. If the remoteIp or ethertype properties are not specified, the default is IPv4. + Ethertype *string `json:"ethertype,omitempty" xmlrpc:"ethertype,omitempty"` + + // The unique ID for a rule + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // The end of the port range for allowed traffic + PortRangeMax *int `json:"portRangeMax,omitempty" xmlrpc:"portRangeMax,omitempty"` + + // The start of the port range for allowed traffic + PortRangeMin *int `json:"portRangeMin,omitempty" xmlrpc:"portRangeMin,omitempty"` + + // The protocol of packets (icmp, tcp, or udp) + Protocol *string `json:"protocol,omitempty" xmlrpc:"protocol,omitempty"` + + // The remote security group allowed as part of this rule + RemoteGroup *Network_SecurityGroup `json:"remoteGroup,omitempty" xmlrpc:"remoteGroup,omitempty"` + + // The ID of the remote security group allowed as part of the rule. This property is mutually exclusive with the remoteIp property + RemoteGroupId *int `json:"remoteGroupId,omitempty" xmlrpc:"remoteGroupId,omitempty"` + + // CIDR or IP address for allowed connections. This property is mutually exclusive with the remoteGroupId property + RemoteIp *string `json:"remoteIp,omitempty" xmlrpc:"remoteIp,omitempty"` + + // The security group of this rule + SecurityGroup *Network_SecurityGroup `json:"securityGroup,omitempty" xmlrpc:"securityGroup,omitempty"` + + // The ID of the security group that owns the rule. + SecurityGroupId *int `json:"securityGroupId,omitempty" xmlrpc:"securityGroupId,omitempty"` +} + +// The SoftLayer_Network_Security_Scanner_Request data type represents a single vulnerability scan request. It provides information on when the scan was created, last updated, and the current status. The status messages are as follows: +// *Scan Pending +// *Scan Processing +// *Scan Complete +// *Scan Cancelled +// *Generating Report. +type Network_Security_Scanner_Request struct { + Entity + + // The account associated with a security scan request. + Account *Account `json:"account,omitempty" xmlrpc:"account,omitempty"` + + // A request's associated customer account identifier. + AccountId *int `json:"accountId,omitempty" xmlrpc:"accountId,omitempty"` + + // The date and time that the request is created. + CreateDate *Time `json:"createDate,omitempty" xmlrpc:"createDate,omitempty"` + + // The virtual guest a security scan is run against. + Guest *Virtual_Guest `json:"guest,omitempty" xmlrpc:"guest,omitempty"` + + // Virtual Guest Identification Number for the guest this security scanner request belongs to. + GuestId *int `json:"guestId,omitempty" xmlrpc:"guestId,omitempty"` + + // The hardware a security scan is run against. + Hardware *Hardware `json:"hardware,omitempty" xmlrpc:"hardware,omitempty"` + + // The identifier of the hardware item a scan is run on. + HardwareId *int `json:"hardwareId,omitempty" xmlrpc:"hardwareId,omitempty"` + + // Identification Number for the host this security scanner request belongs to. + HostId *int `json:"hostId,omitempty" xmlrpc:"hostId,omitempty"` + + // A security scan request's internal identifier. + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // The IP address that a scan will be performed on. + IpAddress *string `json:"ipAddress,omitempty" xmlrpc:"ipAddress,omitempty"` + + // The date and time that the request was last modified. + ModifyDate *Time `json:"modifyDate,omitempty" xmlrpc:"modifyDate,omitempty"` + + // Flag whether the requestor owns the hardware the scan was run on. This flag will return for hardware servers only, virtual servers will result in a null return even if you have a request out for them. + RequestorOwnedFlag *bool `json:"requestorOwnedFlag,omitempty" xmlrpc:"requestorOwnedFlag,omitempty"` + + // A security scan request's status. + Status *Network_Security_Scanner_Request_Status `json:"status,omitempty" xmlrpc:"status,omitempty"` + + // A request status identifier. + StatusId *int `json:"statusId,omitempty" xmlrpc:"statusId,omitempty"` +} + +// The SoftLayer_Network_Security_Scanner_Request_Status data type represents the current status of a vulnerability scan. The status messages are as follows: +// *Scan Pending +// *Scan Processing +// *Scan Complete +// *Scan Cancelled +// *Generating Report. +// +// +// The status of a vulnerability scan will change over the course of a scan's execution. +type Network_Security_Scanner_Request_Status struct { + Entity + + // The identifier of a vulnerability scan's status. + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // The status message of a vulnerability scan. + Name *string `json:"name,omitempty" xmlrpc:"name,omitempty"` +} + +// Many general services that SoftLayer provides are tracked on the customer portal with a quick status message. These status message provide users with a quick reference to the health of a service, whether it's up or down. These services include SoftLayer's Internet backbone connections, VPN entry points, and router networks. The SoftLayer_Network_Service_Health data type provides the relationship between these services and their health status. +type Network_Service_Health struct { + Entity + + // The date that a service's status was created. + CreateDate *Time `json:"createDate,omitempty" xmlrpc:"createDate,omitempty"` + + // A service's location. + Location *Location `json:"location,omitempty" xmlrpc:"location,omitempty"` + + // A service's location identifier. + LocationId *int `json:"locationId,omitempty" xmlrpc:"locationId,omitempty"` + + // The date that a service's status was last changed. + ModifyDate *Time `json:"modifyDate,omitempty" xmlrpc:"modifyDate,omitempty"` + + // The status portion of a service/status relationship. + Status *Network_Service_Health_Status `json:"status,omitempty" xmlrpc:"status,omitempty"` + + // A service's status identifier. + StatusId *int `json:"statusId,omitempty" xmlrpc:"statusId,omitempty"` +} + +// Many general services that SoftLayer provides are marked by a status message. These health messages give portal users a quick way of determining the state of a SoftLayer service. Services range from backbones to VPN endpoints and routers. Generally a health status is either "Up" or "Down". +type Network_Service_Health_Status struct { + Entity + + // The status of a SoftLayer service. This is typically "Up" or "Down". + Name *string `json:"name,omitempty" xmlrpc:"name,omitempty"` +} + +// The SoftLayer_Network_Service_Resource is used to store information related to a service. It is used for determining the correct resource to connect to for a given service, like NAS, Evault, etc. +type Network_Service_Resource struct { + Entity + + // no documentation yet + ApiHost *string `json:"apiHost,omitempty" xmlrpc:"apiHost,omitempty"` + + // no documentation yet + ApiPassword *string `json:"apiPassword,omitempty" xmlrpc:"apiPassword,omitempty"` + + // no documentation yet + ApiPath *string `json:"apiPath,omitempty" xmlrpc:"apiPath,omitempty"` + + // no documentation yet + ApiPort *string `json:"apiPort,omitempty" xmlrpc:"apiPort,omitempty"` + + // no documentation yet + ApiProtocol *string `json:"apiProtocol,omitempty" xmlrpc:"apiProtocol,omitempty"` + + // no documentation yet + ApiUsername *string `json:"apiUsername,omitempty" xmlrpc:"apiUsername,omitempty"` + + // no documentation yet + ApiVersion *string `json:"apiVersion,omitempty" xmlrpc:"apiVersion,omitempty"` + + // A count of + AttributeCount *uint `json:"attributeCount,omitempty" xmlrpc:"attributeCount,omitempty"` + + // no documentation yet + Attributes []Network_Service_Resource_Attribute `json:"attributes,omitempty" xmlrpc:"attributes,omitempty"` + + // The backend IP address for this resource + BackendIpAddress *string `json:"backendIpAddress,omitempty" xmlrpc:"backendIpAddress,omitempty"` + + // no documentation yet + Datacenter *Location `json:"datacenter,omitempty" xmlrpc:"datacenter,omitempty"` + + // The frontend IP address for this resource + FrontendIpAddress *string `json:"frontendIpAddress,omitempty" xmlrpc:"frontendIpAddress,omitempty"` + + // no documentation yet + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // The name associated with this resource + Name *string `json:"name,omitempty" xmlrpc:"name,omitempty"` + + // The hardware information associated with this resource. + NetworkDevice *Hardware `json:"networkDevice,omitempty" xmlrpc:"networkDevice,omitempty"` + + // no documentation yet + SshUsername *string `json:"sshUsername,omitempty" xmlrpc:"sshUsername,omitempty"` + + // The network information associated with this resource. + Type *Network_Service_Resource_Type `json:"type,omitempty" xmlrpc:"type,omitempty"` +} + +// no documentation yet +type Network_Service_Resource_Attribute struct { + Entity + + // no documentation yet + AttributeType *Network_Service_Resource_Attribute_Type `json:"attributeType,omitempty" xmlrpc:"attributeType,omitempty"` + + // no documentation yet + ServiceResource *Network_Service_Resource `json:"serviceResource,omitempty" xmlrpc:"serviceResource,omitempty"` + + // no documentation yet + Value *string `json:"value,omitempty" xmlrpc:"value,omitempty"` +} + +// no documentation yet +type Network_Service_Resource_Attribute_Type struct { + Entity + + // no documentation yet + Keyname *string `json:"keyname,omitempty" xmlrpc:"keyname,omitempty"` +} + +// no documentation yet +type Network_Service_Resource_Hub struct { + Network_Service_Resource +} + +// no documentation yet +type Network_Service_Resource_Hub_Swift struct { + Network_Service_Resource_Hub +} + +// no documentation yet +type Network_Service_Resource_MonitoringHub struct { + Network_Service_Resource + + // no documentation yet + AdnServicesIp *string `json:"adnServicesIp,omitempty" xmlrpc:"adnServicesIp,omitempty"` + + // no documentation yet + HubAddress *string `json:"hubAddress,omitempty" xmlrpc:"hubAddress,omitempty"` + + // no documentation yet + HubConnectionTimeout *string `json:"hubConnectionTimeout,omitempty" xmlrpc:"hubConnectionTimeout,omitempty"` + + // no documentation yet + RobotsCount *string `json:"robotsCount,omitempty" xmlrpc:"robotsCount,omitempty"` + + // no documentation yet + RobotsMax *string `json:"robotsMax,omitempty" xmlrpc:"robotsMax,omitempty"` +} + +// no documentation yet +type Network_Service_Resource_NimsoftLandingHub struct { + Network_Service_Resource_MonitoringHub +} + +// no documentation yet +type Network_Service_Resource_Type struct { + Entity + + // A count of + ServiceResourceCount *uint `json:"serviceResourceCount,omitempty" xmlrpc:"serviceResourceCount,omitempty"` + + // no documentation yet + ServiceResources []Network_Service_Resource `json:"serviceResources,omitempty" xmlrpc:"serviceResources,omitempty"` + + // no documentation yet + Type *string `json:"type,omitempty" xmlrpc:"type,omitempty"` +} + +// The SoftLayer_Network_Service_Vpn_Overrides data type contains information relating user ids to subnet ids when VPN access is manually configured. It is essentially an entry in a 'white list' of subnets a SoftLayer portal VPN user may access. +type Network_Service_Vpn_Overrides struct { + Entity + + // The internal identifier of the record. + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // Subnet components accessible by a SoftLayer VPN portal user. + Subnet *Network_Subnet `json:"subnet,omitempty" xmlrpc:"subnet,omitempty"` + + // The identifier of a subnet accessible by the SoftLayer portal VPN user. + SubnetId *int `json:"subnetId,omitempty" xmlrpc:"subnetId,omitempty"` + + // SoftLayer VPN portal user. + User *User_Customer `json:"user,omitempty" xmlrpc:"user,omitempty"` + + // The identifier of the SoftLayer portal VPN user. + UserId *int `json:"userId,omitempty" xmlrpc:"userId,omitempty"` +} + +// The SoftLayer_Network_Storage data type contains general information regarding a Storage product such as account id, access username and password, the Storage product type, and the server the Storage service is associated with. Currently, only EVault backup storage has an associated server. +type Network_Storage struct { + Entity + + // The account that a Storage services belongs to. + Account *Account `json:"account,omitempty" xmlrpc:"account,omitempty"` + + // The internal identifier of the SoftLayer customer account that a Storage account belongs to. + AccountId *int `json:"accountId,omitempty" xmlrpc:"accountId,omitempty"` + + // Other usernames and passwords associated with a Storage volume. + AccountPassword *Account_Password `json:"accountPassword,omitempty" xmlrpc:"accountPassword,omitempty"` + + // A count of the currently active transactions on a network storage volume. + ActiveTransactionCount *uint `json:"activeTransactionCount,omitempty" xmlrpc:"activeTransactionCount,omitempty"` + + // The currently active transactions on a network storage volume. + ActiveTransactions []Provisioning_Version1_Transaction `json:"activeTransactions,omitempty" xmlrpc:"activeTransactions,omitempty"` + + // The SoftLayer_Hardware objects which are allowed access to this storage volume. + AllowedHardware []Hardware `json:"allowedHardware,omitempty" xmlrpc:"allowedHardware,omitempty"` + + // A count of the SoftLayer_Hardware objects which are allowed access to this storage volume. + AllowedHardwareCount *uint `json:"allowedHardwareCount,omitempty" xmlrpc:"allowedHardwareCount,omitempty"` + + // A count of the SoftLayer_Network_Subnet_IpAddress objects which are allowed access to this storage volume. + AllowedIpAddressCount *uint `json:"allowedIpAddressCount,omitempty" xmlrpc:"allowedIpAddressCount,omitempty"` + + // The SoftLayer_Network_Subnet_IpAddress objects which are allowed access to this storage volume. + AllowedIpAddresses []Network_Subnet_IpAddress `json:"allowedIpAddresses,omitempty" xmlrpc:"allowedIpAddresses,omitempty"` + + // The SoftLayer_Hardware objects which are allowed access to this storage volume's Replicant. + AllowedReplicationHardware []Hardware `json:"allowedReplicationHardware,omitempty" xmlrpc:"allowedReplicationHardware,omitempty"` + + // A count of the SoftLayer_Hardware objects which are allowed access to this storage volume's Replicant. + AllowedReplicationHardwareCount *uint `json:"allowedReplicationHardwareCount,omitempty" xmlrpc:"allowedReplicationHardwareCount,omitempty"` + + // A count of the SoftLayer_Network_Subnet_IpAddress objects which are allowed access to this storage volume's Replicant. + AllowedReplicationIpAddressCount *uint `json:"allowedReplicationIpAddressCount,omitempty" xmlrpc:"allowedReplicationIpAddressCount,omitempty"` + + // The SoftLayer_Network_Subnet_IpAddress objects which are allowed access to this storage volume's Replicant. + AllowedReplicationIpAddresses []Network_Subnet_IpAddress `json:"allowedReplicationIpAddresses,omitempty" xmlrpc:"allowedReplicationIpAddresses,omitempty"` + + // A count of the SoftLayer_Network_Subnet objects which are allowed access to this storage volume's Replicant. + AllowedReplicationSubnetCount *uint `json:"allowedReplicationSubnetCount,omitempty" xmlrpc:"allowedReplicationSubnetCount,omitempty"` + + // The SoftLayer_Network_Subnet objects which are allowed access to this storage volume's Replicant. + AllowedReplicationSubnets []Network_Subnet `json:"allowedReplicationSubnets,omitempty" xmlrpc:"allowedReplicationSubnets,omitempty"` + + // A count of the SoftLayer_Hardware objects which are allowed access to this storage volume's Replicant. + AllowedReplicationVirtualGuestCount *uint `json:"allowedReplicationVirtualGuestCount,omitempty" xmlrpc:"allowedReplicationVirtualGuestCount,omitempty"` + + // The SoftLayer_Hardware objects which are allowed access to this storage volume's Replicant. + AllowedReplicationVirtualGuests []Virtual_Guest `json:"allowedReplicationVirtualGuests,omitempty" xmlrpc:"allowedReplicationVirtualGuests,omitempty"` + + // A count of the SoftLayer_Network_Subnet objects which are allowed access to this storage volume. + AllowedSubnetCount *uint `json:"allowedSubnetCount,omitempty" xmlrpc:"allowedSubnetCount,omitempty"` + + // The SoftLayer_Network_Subnet objects which are allowed access to this storage volume. + AllowedSubnets []Network_Subnet `json:"allowedSubnets,omitempty" xmlrpc:"allowedSubnets,omitempty"` + + // A count of the SoftLayer_Virtual_Guest objects which are allowed access to this storage volume. + AllowedVirtualGuestCount *uint `json:"allowedVirtualGuestCount,omitempty" xmlrpc:"allowedVirtualGuestCount,omitempty"` + + // The SoftLayer_Virtual_Guest objects which are allowed access to this storage volume. + AllowedVirtualGuests []Virtual_Guest `json:"allowedVirtualGuests,omitempty" xmlrpc:"allowedVirtualGuests,omitempty"` + + // The current billing item for a Storage volume. + BillingItem *Billing_Item `json:"billingItem,omitempty" xmlrpc:"billingItem,omitempty"` + + // no documentation yet + BillingItemCategory *Product_Item_Category `json:"billingItemCategory,omitempty" xmlrpc:"billingItemCategory,omitempty"` + + // The amount of space used by the volume, in bytes. + BytesUsed *string `json:"bytesUsed,omitempty" xmlrpc:"bytesUsed,omitempty"` + + // A Storage account's capacity, measured in gigabytes. + CapacityGb *int `json:"capacityGb,omitempty" xmlrpc:"capacityGb,omitempty"` + + // The date a network storage volume was created. + CreateDate *Time `json:"createDate,omitempty" xmlrpc:"createDate,omitempty"` + + // The schedule id which was executed to create a snapshot. + CreationScheduleId *string `json:"creationScheduleId,omitempty" xmlrpc:"creationScheduleId,omitempty"` + + // A count of + CredentialCount *uint `json:"credentialCount,omitempty" xmlrpc:"credentialCount,omitempty"` + + // no documentation yet + Credentials []Network_Storage_Credential `json:"credentials,omitempty" xmlrpc:"credentials,omitempty"` + + // The Daily Schedule which is associated with this network storage volume. + DailySchedule *Network_Storage_Schedule `json:"dailySchedule,omitempty" xmlrpc:"dailySchedule,omitempty"` + + // A count of the events which have taken place on a network storage volume. + EventCount *uint `json:"eventCount,omitempty" xmlrpc:"eventCount,omitempty"` + + // The events which have taken place on a network storage volume. + Events []Network_Storage_Event `json:"events,omitempty" xmlrpc:"events,omitempty"` + + // Retrieves the NFS Network Mount Address Name for a given File Storage Volume. + FileNetworkMountAddress *string `json:"fileNetworkMountAddress,omitempty" xmlrpc:"fileNetworkMountAddress,omitempty"` + + // The unique identification number of the guest associated with a Storage volume. + GuestId *int `json:"guestId,omitempty" xmlrpc:"guestId,omitempty"` + + // When applicable, the hardware associated with a Storage service. + Hardware *Hardware `json:"hardware,omitempty" xmlrpc:"hardware,omitempty"` + + // The server that is associated with a Storage service. + HardwareId *int `json:"hardwareId,omitempty" xmlrpc:"hardwareId,omitempty"` + + // no documentation yet + HasEncryptionAtRest *bool `json:"hasEncryptionAtRest,omitempty" xmlrpc:"hasEncryptionAtRest,omitempty"` + + // The unique identification number of the host associated with a Storage volume. + HostId *int `json:"hostId,omitempty" xmlrpc:"hostId,omitempty"` + + // The Hourly Schedule which is associated with this network storage volume. + HourlySchedule *Network_Storage_Schedule `json:"hourlySchedule,omitempty" xmlrpc:"hourlySchedule,omitempty"` + + // A Storage account's unique identifier. + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // The maximum number of IOPs selected for this volume. + Iops *string `json:"iops,omitempty" xmlrpc:"iops,omitempty"` + + // Determines whether a volume is ready to order snapshot space, or, if snapshot space is already available, to assign a snapshot schedule, or to take a manual snapshot. + IsReadyForSnapshot *bool `json:"isReadyForSnapshot,omitempty" xmlrpc:"isReadyForSnapshot,omitempty"` + + // Determines whether a volume is ready to have Hosts authorized to access it. This does not indicate whether another operation may be blocking, please refer to this volume's volumeStatus property for details. + IsReadyToMount *bool `json:"isReadyToMount,omitempty" xmlrpc:"isReadyToMount,omitempty"` + + // A count of relationship between a container volume and iSCSI LUNs. + IscsiLunCount *uint `json:"iscsiLunCount,omitempty" xmlrpc:"iscsiLunCount,omitempty"` + + // Relationship between a container volume and iSCSI LUNs. + IscsiLuns []Network_Storage `json:"iscsiLuns,omitempty" xmlrpc:"iscsiLuns,omitempty"` + + // The ID of the LUN volume. + LunId *string `json:"lunId,omitempty" xmlrpc:"lunId,omitempty"` + + // A count of the manually-created snapshots associated with this SoftLayer_Network_Storage volume. Does not support pagination by result limit and offset. + ManualSnapshotCount *uint `json:"manualSnapshotCount,omitempty" xmlrpc:"manualSnapshotCount,omitempty"` + + // The manually-created snapshots associated with this SoftLayer_Network_Storage volume. Does not support pagination by result limit and offset. + ManualSnapshots []Network_Storage `json:"manualSnapshots,omitempty" xmlrpc:"manualSnapshots,omitempty"` + + // A network storage volume's metric tracking object. This object records all periodic polled data available to this volume. + MetricTrackingObject *Metric_Tracking_Object `json:"metricTrackingObject,omitempty" xmlrpc:"metricTrackingObject,omitempty"` + + // Whether or not a network storage volume may be mounted. + MountableFlag *string `json:"mountableFlag,omitempty" xmlrpc:"mountableFlag,omitempty"` + + // The current status of split or move operation as a part of volume duplication. + MoveAndSplitStatus *string `json:"moveAndSplitStatus,omitempty" xmlrpc:"moveAndSplitStatus,omitempty"` + + // A Storage account's type. Valid examples are "NAS", "LOCKBOX", "ISCSI", "EVAULT", and "HUB". + NasType *string `json:"nasType,omitempty" xmlrpc:"nasType,omitempty"` + + // Public notes related to a Storage volume. + Notes *string `json:"notes,omitempty" xmlrpc:"notes,omitempty"` + + // A count of the subscribers that will be notified for usage amount warnings and overages. + NotificationSubscriberCount *uint `json:"notificationSubscriberCount,omitempty" xmlrpc:"notificationSubscriberCount,omitempty"` + + // The subscribers that will be notified for usage amount warnings and overages. + NotificationSubscribers []Notification_User_Subscriber `json:"notificationSubscribers,omitempty" xmlrpc:"notificationSubscribers,omitempty"` + + // The name of the snapshot that this volume was duplicated from. + OriginalSnapshotName *string `json:"originalSnapshotName,omitempty" xmlrpc:"originalSnapshotName,omitempty"` + + // The name of the volume that this volume was duplicated from. + OriginalVolumeName *string `json:"originalVolumeName,omitempty" xmlrpc:"originalVolumeName,omitempty"` + + // The size (in GB) of the volume that this volume was duplicated from, or in the case of iSCSI LUNs, the size of the base originally-provisioned LUN. + OriginalVolumeSize *string `json:"originalVolumeSize,omitempty" xmlrpc:"originalVolumeSize,omitempty"` + + // A volume's configured SoftLayer_Network_Storage_Iscsi_OS_Type. + OsType *Network_Storage_Iscsi_OS_Type `json:"osType,omitempty" xmlrpc:"osType,omitempty"` + + // A volume's configured SoftLayer_Network_Storage_Iscsi_OS_Type ID. + OsTypeId *string `json:"osTypeId,omitempty" xmlrpc:"osTypeId,omitempty"` + + // A count of the volumes or snapshots partnered with a network storage volume in a parental role. + ParentPartnershipCount *uint `json:"parentPartnershipCount,omitempty" xmlrpc:"parentPartnershipCount,omitempty"` + + // The volumes or snapshots partnered with a network storage volume in a parental role. + ParentPartnerships []Network_Storage_Partnership `json:"parentPartnerships,omitempty" xmlrpc:"parentPartnerships,omitempty"` + + // The parent volume of a volume in a complex storage relationship. + ParentVolume *Network_Storage `json:"parentVolume,omitempty" xmlrpc:"parentVolume,omitempty"` + + // A count of the volumes or snapshots partnered with a network storage volume. + PartnershipCount *uint `json:"partnershipCount,omitempty" xmlrpc:"partnershipCount,omitempty"` + + // The volumes or snapshots partnered with a network storage volume. + Partnerships []Network_Storage_Partnership `json:"partnerships,omitempty" xmlrpc:"partnerships,omitempty"` + + // The password used to access a non-EVault Storage volume. This password is used to register the EVault server agent with the vault backup system. + Password *string `json:"password,omitempty" xmlrpc:"password,omitempty"` + + // A count of all permissions group(s) this volume is in. + PermissionsGroupCount *uint `json:"permissionsGroupCount,omitempty" xmlrpc:"permissionsGroupCount,omitempty"` + + // All permissions group(s) this volume is in. + PermissionsGroups []Network_Storage_Group `json:"permissionsGroups,omitempty" xmlrpc:"permissionsGroups,omitempty"` + + // The properties used to provide additional details about a network storage volume. + Properties []Network_Storage_Property `json:"properties,omitempty" xmlrpc:"properties,omitempty"` + + // A count of the properties used to provide additional details about a network storage volume. + PropertyCount *uint `json:"propertyCount,omitempty" xmlrpc:"propertyCount,omitempty"` + + // The number of IOPs provisioned for this volume. + ProvisionedIops *string `json:"provisionedIops,omitempty" xmlrpc:"provisionedIops,omitempty"` + + // A count of the iSCSI LUN volumes being replicated by this network storage volume. + ReplicatingLunCount *uint `json:"replicatingLunCount,omitempty" xmlrpc:"replicatingLunCount,omitempty"` + + // The iSCSI LUN volumes being replicated by this network storage volume. + ReplicatingLuns []Network_Storage `json:"replicatingLuns,omitempty" xmlrpc:"replicatingLuns,omitempty"` + + // The network storage volume being replicated by a volume. + ReplicatingVolume *Network_Storage `json:"replicatingVolume,omitempty" xmlrpc:"replicatingVolume,omitempty"` + + // A count of the volume replication events. + ReplicationEventCount *uint `json:"replicationEventCount,omitempty" xmlrpc:"replicationEventCount,omitempty"` + + // The volume replication events. + ReplicationEvents []Network_Storage_Event `json:"replicationEvents,omitempty" xmlrpc:"replicationEvents,omitempty"` + + // A count of the network storage volumes configured to be replicants of a volume. + ReplicationPartnerCount *uint `json:"replicationPartnerCount,omitempty" xmlrpc:"replicationPartnerCount,omitempty"` + + // The network storage volumes configured to be replicants of a volume. + ReplicationPartners []Network_Storage `json:"replicationPartners,omitempty" xmlrpc:"replicationPartners,omitempty"` + + // The Replication Schedule associated with a network storage volume. + ReplicationSchedule *Network_Storage_Schedule `json:"replicationSchedule,omitempty" xmlrpc:"replicationSchedule,omitempty"` + + // The current replication status of a network storage volume. Indicates Failover or Failback status. + ReplicationStatus *string `json:"replicationStatus,omitempty" xmlrpc:"replicationStatus,omitempty"` + + // A count of the schedules which are associated with a network storage volume. + ScheduleCount *uint `json:"scheduleCount,omitempty" xmlrpc:"scheduleCount,omitempty"` + + // The schedules which are associated with a network storage volume. + Schedules []Network_Storage_Schedule `json:"schedules,omitempty" xmlrpc:"schedules,omitempty"` + + // Service Provider ID + ServiceProviderId *int `json:"serviceProviderId,omitempty" xmlrpc:"serviceProviderId,omitempty"` + + // The network resource a Storage service is connected to. + ServiceResource *Network_Service_Resource `json:"serviceResource,omitempty" xmlrpc:"serviceResource,omitempty"` + + // The IP address of a Storage resource. + ServiceResourceBackendIpAddress *string `json:"serviceResourceBackendIpAddress,omitempty" xmlrpc:"serviceResourceBackendIpAddress,omitempty"` + + // The name of a Storage's network resource. + ServiceResourceName *string `json:"serviceResourceName,omitempty" xmlrpc:"serviceResourceName,omitempty"` + + // A volume's configured snapshot space size. + SnapshotCapacityGb *string `json:"snapshotCapacityGb,omitempty" xmlrpc:"snapshotCapacityGb,omitempty"` + + // A count of the snapshots associated with this SoftLayer_Network_Storage volume. + SnapshotCount *uint `json:"snapshotCount,omitempty" xmlrpc:"snapshotCount,omitempty"` + + // The creation timestamp of the snapshot on the storage platform. + SnapshotCreationTimestamp *string `json:"snapshotCreationTimestamp,omitempty" xmlrpc:"snapshotCreationTimestamp,omitempty"` + + // The percentage of used snapshot space after which to delete automated snapshots. + SnapshotDeletionThresholdPercentage *string `json:"snapshotDeletionThresholdPercentage,omitempty" xmlrpc:"snapshotDeletionThresholdPercentage,omitempty"` + + // The snapshot size in bytes. + SnapshotSizeBytes *string `json:"snapshotSizeBytes,omitempty" xmlrpc:"snapshotSizeBytes,omitempty"` + + // A volume's available snapshot reservation space. + SnapshotSpaceAvailable *string `json:"snapshotSpaceAvailable,omitempty" xmlrpc:"snapshotSpaceAvailable,omitempty"` + + // The snapshots associated with this SoftLayer_Network_Storage volume. + Snapshots []Network_Storage `json:"snapshots,omitempty" xmlrpc:"snapshots,omitempty"` + + // no documentation yet + StaasVersion *string `json:"staasVersion,omitempty" xmlrpc:"staasVersion,omitempty"` + + // A count of the network storage groups this volume is attached to. + StorageGroupCount *uint `json:"storageGroupCount,omitempty" xmlrpc:"storageGroupCount,omitempty"` + + // The network storage groups this volume is attached to. + StorageGroups []Network_Storage_Group `json:"storageGroups,omitempty" xmlrpc:"storageGroups,omitempty"` + + // no documentation yet + StorageTierLevel *string `json:"storageTierLevel,omitempty" xmlrpc:"storageTierLevel,omitempty"` + + // A description of the Storage object. + StorageType *Network_Storage_Type `json:"storageType,omitempty" xmlrpc:"storageType,omitempty"` + + // A storage object's type. + StorageTypeId *string `json:"storageTypeId,omitempty" xmlrpc:"storageTypeId,omitempty"` + + // The amount of space used by the volume. + TotalBytesUsed *string `json:"totalBytesUsed,omitempty" xmlrpc:"totalBytesUsed,omitempty"` + + // The total snapshot retention count of all schedules on this network storage volume. + TotalScheduleSnapshotRetentionCount *uint `json:"totalScheduleSnapshotRetentionCount,omitempty" xmlrpc:"totalScheduleSnapshotRetentionCount,omitempty"` + + // This flag indicates whether this storage type is upgradable or not. + UpgradableFlag *bool `json:"upgradableFlag,omitempty" xmlrpc:"upgradableFlag,omitempty"` + + // The usage notification for SL Storage services. + UsageNotification *Notification `json:"usageNotification,omitempty" xmlrpc:"usageNotification,omitempty"` + + // The username used to access a non-EVault Storage volume. This username is used to register the EVault server agent with the vault backup system. + Username *string `json:"username,omitempty" xmlrpc:"username,omitempty"` + + // The type of network storage service. + VendorName *string `json:"vendorName,omitempty" xmlrpc:"vendorName,omitempty"` + + // When applicable, the virtual guest associated with a Storage service. + VirtualGuest *Virtual_Guest `json:"virtualGuest,omitempty" xmlrpc:"virtualGuest,omitempty"` + + // The username and password history for a Storage service. + VolumeHistory []Network_Storage_History `json:"volumeHistory,omitempty" xmlrpc:"volumeHistory,omitempty"` + + // A count of the username and password history for a Storage service. + VolumeHistoryCount *uint `json:"volumeHistoryCount,omitempty" xmlrpc:"volumeHistoryCount,omitempty"` + + // The current status of a network storage volume. + VolumeStatus *string `json:"volumeStatus,omitempty" xmlrpc:"volumeStatus,omitempty"` + + // The account username and password for the EVault webCC interface. + WebccAccount *Account_Password `json:"webccAccount,omitempty" xmlrpc:"webccAccount,omitempty"` + + // The Weekly Schedule which is associated with this network storage volume. + WeeklySchedule *Network_Storage_Schedule `json:"weeklySchedule,omitempty" xmlrpc:"weeklySchedule,omitempty"` +} + +// no documentation yet +type Network_Storage_Allowed_Host struct { + Entity + + // A count of the SoftLayer_Network_Storage_Group objects this SoftLayer_Network_Storage_Allowed_Host is present in. + AssignedGroupCount *uint `json:"assignedGroupCount,omitempty" xmlrpc:"assignedGroupCount,omitempty"` + + // The SoftLayer_Network_Storage_Group objects this SoftLayer_Network_Storage_Allowed_Host is present in. + AssignedGroups []Network_Storage_Group `json:"assignedGroups,omitempty" xmlrpc:"assignedGroups,omitempty"` + + // A count of the SoftLayer_Network_Storage primary volumes whose replicas are allowed access. + AssignedReplicationVolumeCount *uint `json:"assignedReplicationVolumeCount,omitempty" xmlrpc:"assignedReplicationVolumeCount,omitempty"` + + // The SoftLayer_Network_Storage primary volumes whose replicas are allowed access. + AssignedReplicationVolumes []Network_Storage `json:"assignedReplicationVolumes,omitempty" xmlrpc:"assignedReplicationVolumes,omitempty"` + + // A count of the SoftLayer_Network_Storage volumes to which this SoftLayer_Network_Storage_Allowed_Host is allowed access. + AssignedVolumeCount *uint `json:"assignedVolumeCount,omitempty" xmlrpc:"assignedVolumeCount,omitempty"` + + // The SoftLayer_Network_Storage volumes to which this SoftLayer_Network_Storage_Allowed_Host is allowed access. + AssignedVolumes []Network_Storage `json:"assignedVolumes,omitempty" xmlrpc:"assignedVolumes,omitempty"` + + // The SoftLayer_Network_Storage_Credential this allowed host uses. + Credential *Network_Storage_Credential `json:"credential,omitempty" xmlrpc:"credential,omitempty"` + + // The credential this allowed host will use + CredentialId *int `json:"credentialId,omitempty" xmlrpc:"credentialId,omitempty"` + + // The internal identifier of the igroup + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // The name of allowed host, usually an IQN or other identifier + Name *string `json:"name,omitempty" xmlrpc:"name,omitempty"` + + // no documentation yet + ResourceTableId *int `json:"resourceTableId,omitempty" xmlrpc:"resourceTableId,omitempty"` + + // no documentation yet + ResourceTableName *string `json:"resourceTableName,omitempty" xmlrpc:"resourceTableName,omitempty"` +} + +// no documentation yet +type Network_Storage_Allowed_Host_Hardware struct { + Network_Storage_Allowed_Host + + // The SoftLayer_Hardware object which this SoftLayer_Network_Storage_Allowed_Host is referencing. + Resource *Hardware `json:"resource,omitempty" xmlrpc:"resource,omitempty"` +} + +// no documentation yet +type Network_Storage_Allowed_Host_IpAddress struct { + Network_Storage_Allowed_Host + + // The SoftLayer_Network_Subnet_IpAddress object which this SoftLayer_Network_Storage_Allowed_Host is referencing. + Resource *Network_Subnet_IpAddress `json:"resource,omitempty" xmlrpc:"resource,omitempty"` +} + +// no documentation yet +type Network_Storage_Allowed_Host_Subnet struct { + Network_Storage_Allowed_Host + + // The SoftLayer_Network_Subnet object which this SoftLayer_Network_Storage_Allowed_Host is referencing. + Resource *Network_Subnet `json:"resource,omitempty" xmlrpc:"resource,omitempty"` +} + +// no documentation yet +type Network_Storage_Allowed_Host_VirtualGuest struct { + Network_Storage_Allowed_Host + + // The SoftLayer_Virtual_Guest object which this SoftLayer_Network_Storage_Allowed_Host is referencing. + Resource *Virtual_Guest `json:"resource,omitempty" xmlrpc:"resource,omitempty"` +} + +// The SoftLayer_Network_Storage_Backup contains general information regarding a Storage backup service such as account id, username, maximum capacity, password, Storage's product type and the server id. +type Network_Storage_Backup struct { + Network_Storage + + // Peak number of bytes used in the vault for the current billing cycle. + CurrentCyclePeakUsage *uint `json:"currentCyclePeakUsage,omitempty" xmlrpc:"currentCyclePeakUsage,omitempty"` + + // Peak number of bytes used in the vault for the previous billing cycle. + PreviousCyclePeakUsage *uint `json:"previousCyclePeakUsage,omitempty" xmlrpc:"previousCyclePeakUsage,omitempty"` +} + +// The SoftLayer_Network_Storage_Backup_Evault contains general information regarding an EVault Storage service such as account id, username, maximum capacity, password, Storage's product type and the server id. +type Network_Storage_Backup_Evault struct { + Network_Storage_Backup +} + +// The SoftLayer_Network_Storage_Backup_Evault_Version6 contains the same properties as the SoftLayer_Network_Storage_Backup_Evault. Additional properties available for the EVault Storage type: softwareComponent, totalBytesUsed, backupJobDetails, restoreJobDetails and agentStatuses +type Network_Storage_Backup_Evault_Version6 struct { + Network_Storage_Backup_Evault + + // A count of statuses (most of the time will be one status) for the agent tied to the EVault Storage services. + AgentStatusCount *uint `json:"agentStatusCount,omitempty" xmlrpc:"agentStatusCount,omitempty"` + + // Statuses (most of the time will be one status) for the agent tied to the EVault Storage services. + AgentStatuses []Container_Network_Storage_Evault_WebCc_AgentStatus `json:"agentStatuses,omitempty" xmlrpc:"agentStatuses,omitempty"` + + // A count of all the of the backup jobs for the EVault Storage account. + BackupJobDetailCount *uint `json:"backupJobDetailCount,omitempty" xmlrpc:"backupJobDetailCount,omitempty"` + + // All the of the backup jobs for the EVault Storage account. + BackupJobDetails []Container_Network_Storage_Evault_WebCc_JobDetails `json:"backupJobDetails,omitempty" xmlrpc:"backupJobDetails,omitempty"` + + // A count of the billing items for plugins tied to the EVault Storage service. + PluginBillingItemCount *uint `json:"pluginBillingItemCount,omitempty" xmlrpc:"pluginBillingItemCount,omitempty"` + + // The billing items for plugins tied to the EVault Storage service. + PluginBillingItems []Billing_Item `json:"pluginBillingItems,omitempty" xmlrpc:"pluginBillingItems,omitempty"` + + // A count of all the of the restore jobs for the EVault Storage account. + RestoreJobDetailCount *uint `json:"restoreJobDetailCount,omitempty" xmlrpc:"restoreJobDetailCount,omitempty"` + + // All the of the restore jobs for the EVault Storage account. + RestoreJobDetails []Container_Network_Storage_Evault_WebCc_JobDetails `json:"restoreJobDetails,omitempty" xmlrpc:"restoreJobDetails,omitempty"` + + // The software component for the EVault base client. + SoftwareComponent *Software_Component `json:"softwareComponent,omitempty" xmlrpc:"softwareComponent,omitempty"` + + // A count of retrieve the task information for the EVault Storage service. + TaskCount *uint `json:"taskCount,omitempty" xmlrpc:"taskCount,omitempty"` + + // Retrieve the task information for the EVault Storage service. + Tasks []Container_Network_Storage_Evault_Vault_Task `json:"tasks,omitempty" xmlrpc:"tasks,omitempty"` +} + +// The SoftLayer_Network_Storage_Credential data type will give you an overview of the usernames that are currently attached to your storage device. +type Network_Storage_Credential struct { + Entity + + // This is the account that the storage credential is tied to. + Account *Account `json:"account,omitempty" xmlrpc:"account,omitempty"` + + // This is the account id associated with the volume. + AccountId *string `json:"accountId,omitempty" xmlrpc:"accountId,omitempty"` + + // This is the data that the record was created in the table. + CreateDate *Time `json:"createDate,omitempty" xmlrpc:"createDate,omitempty"` + + // no documentation yet + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // This is the date that the record was last updated in the table. + ModifyDate *Time `json:"modifyDate,omitempty" xmlrpc:"modifyDate,omitempty"` + + // This is the id of the type of credential that this object represents. + NasCredentialTypeId *int `json:"nasCredentialTypeId,omitempty" xmlrpc:"nasCredentialTypeId,omitempty"` + + // These are the SoftLayer_Network_Storage_Allowed_Host entries that this credential is assigned to. + NetworkStorageAllowedHosts *Network_Storage_Allowed_Host `json:"networkStorageAllowedHosts,omitempty" xmlrpc:"networkStorageAllowedHosts,omitempty"` + + // This is the password associated with the volume. + Password *string `json:"password,omitempty" xmlrpc:"password,omitempty"` + + // These are the types of storage that the credential can be assigned to. + Type *Network_Storage_Credential_Type `json:"type,omitempty" xmlrpc:"type,omitempty"` + + // This is the username associated with the volume. + Username *string `json:"username,omitempty" xmlrpc:"username,omitempty"` + + // A count of these are the SoftLayer_Network_Storage volumes that this credential is assigned to. + VolumeCount *uint `json:"volumeCount,omitempty" xmlrpc:"volumeCount,omitempty"` + + // These are the SoftLayer_Network_Storage volumes that this credential is assigned to. + Volumes []Network_Storage `json:"volumes,omitempty" xmlrpc:"volumes,omitempty"` +} + +// <<< +type Network_Storage_Credential_Type struct { + Entity + + // The date a credential type was created. + CreateDate *Time `json:"createDate,omitempty" xmlrpc:"createDate,omitempty"` + + // A short description of the credential type + Description *string `json:"description,omitempty" xmlrpc:"description,omitempty"` + + // The key name of the credential type. + KeyName *string `json:"keyName,omitempty" xmlrpc:"keyName,omitempty"` + + // The date a credential was last modified. + ModifyDate *Time `json:"modifyDate,omitempty" xmlrpc:"modifyDate,omitempty"` + + // The human readable name of the credential type. + Name *string `json:"name,omitempty" xmlrpc:"name,omitempty"` +} + +// no documentation yet +type Network_Storage_Daily_Usage struct { + Entity + + // no documentation yet + BytesUsed *uint `json:"bytesUsed,omitempty" xmlrpc:"bytesUsed,omitempty"` + + // no documentation yet + CdnHttpBandwidth *uint `json:"cdnHttpBandwidth,omitempty" xmlrpc:"cdnHttpBandwidth,omitempty"` + + // no documentation yet + CreateDate *Time `json:"createDate,omitempty" xmlrpc:"createDate,omitempty"` + + // no documentation yet + NasVolume *Network_Storage `json:"nasVolume,omitempty" xmlrpc:"nasVolume,omitempty"` + + // no documentation yet + NasVolumeId *int `json:"nasVolumeId,omitempty" xmlrpc:"nasVolumeId,omitempty"` + + // no documentation yet + PublicBandwidthOut *uint `json:"publicBandwidthOut,omitempty" xmlrpc:"publicBandwidthOut,omitempty"` +} + +// Storage volumes can create various events to keep track of what has occurred to the volume. Events provide an audit trail that can be used to verify that various tasks have occurred, such as snapshots to be created by a schedule or remote replication synchronization. +type Network_Storage_Event struct { + Entity + + // The date an event was created. + CreateDate *Time `json:"createDate,omitempty" xmlrpc:"createDate,omitempty"` + + // The message text for an event. + Message *string `json:"message,omitempty" xmlrpc:"message,omitempty"` + + // A schedule that is associated with an event. Not all events will have a schedule. + Schedule *Network_Storage_Schedule `json:"schedule,omitempty" xmlrpc:"schedule,omitempty"` + + // An identifier for the schedule which is associated with an event. + ScheduleId *int `json:"scheduleId,omitempty" xmlrpc:"scheduleId,omitempty"` + + // An identifier for the type of an event. + TypeId *int `json:"typeId,omitempty" xmlrpc:"typeId,omitempty"` + + // The associated volume for an event. + Volume *Network_Storage `json:"volume,omitempty" xmlrpc:"volume,omitempty"` + + // The volume id which an event is associated with. + VolumeId *int `json:"volumeId,omitempty" xmlrpc:"volumeId,omitempty"` +} + +// no documentation yet +type Network_Storage_Group struct { + Entity + + // The SoftLayer_Account which owns this group. + Account *Account `json:"account,omitempty" xmlrpc:"account,omitempty"` + + // The account ID which owns this group + AccountId *int `json:"accountId,omitempty" xmlrpc:"accountId,omitempty"` + + // The friendly name of this group + Alias *string `json:"alias,omitempty" xmlrpc:"alias,omitempty"` + + // A count of the allowed hosts list for this group. + AllowedHostCount *uint `json:"allowedHostCount,omitempty" xmlrpc:"allowedHostCount,omitempty"` + + // The allowed hosts list for this group. + AllowedHosts []Network_Storage_Allowed_Host `json:"allowedHosts,omitempty" xmlrpc:"allowedHosts,omitempty"` + + // A count of the network storage volumes this group is attached to. + AttachedVolumeCount *uint `json:"attachedVolumeCount,omitempty" xmlrpc:"attachedVolumeCount,omitempty"` + + // The network storage volumes this group is attached to. + AttachedVolumes []Network_Storage `json:"attachedVolumes,omitempty" xmlrpc:"attachedVolumes,omitempty"` + + // The date this group was created. + CreateDate *Time `json:"createDate,omitempty" xmlrpc:"createDate,omitempty"` + + // The type which defines this group. + GroupType *Network_Storage_Group_Type `json:"groupType,omitempty" xmlrpc:"groupType,omitempty"` + + // The SoftLayer_Network_Storage_Group_Type which describes this group. + GroupTypeId *int `json:"groupTypeId,omitempty" xmlrpc:"groupTypeId,omitempty"` + + // The internal identifier of the group + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // no documentation yet + ModifyDate *Time `json:"modifyDate,omitempty" xmlrpc:"modifyDate,omitempty"` + + // The OS Type this group is configured for. + OsType *Network_Storage_Iscsi_OS_Type `json:"osType,omitempty" xmlrpc:"osType,omitempty"` + + // A SoftLayer_Network_Storage_OS_Type Operating System designation that this group was created for. + OsTypeId *int `json:"osTypeId,omitempty" xmlrpc:"osTypeId,omitempty"` + + // The network resource this group is created on. + ServiceResource *Network_Service_Resource `json:"serviceResource,omitempty" xmlrpc:"serviceResource,omitempty"` + + // A SoftLayer_Network_Service_Resource that this group was created on. + ServiceResourceId *int `json:"serviceResourceId,omitempty" xmlrpc:"serviceResourceId,omitempty"` +} + +// no documentation yet +type Network_Storage_Group_Iscsi struct { + Network_Storage_Group +} + +// no documentation yet +type Network_Storage_Group_Nfs struct { + Network_Storage_Group +} + +// no documentation yet +type Network_Storage_Group_Type struct { + Entity + + // no documentation yet + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // no documentation yet + KeyName *string `json:"keyName,omitempty" xmlrpc:"keyName,omitempty"` + + // no documentation yet + Name *string `json:"name,omitempty" xmlrpc:"name,omitempty"` +} + +// The SoftLayer_Network_Storage_History contains the username/password past history for Storage services except Evault. Information such as the username, passwords, notes and the date of the password change may be retrieved. +type Network_Storage_History struct { + Entity + + // The account that the Storage services belongs to. + Account *Account `json:"account,omitempty" xmlrpc:"account,omitempty"` + + // Date the password was changed. + CreateDate *Time `json:"createDate,omitempty" xmlrpc:"createDate,omitempty"` + + // The Storage service that the password history belongs to. + NasVolume *Network_Storage `json:"nasVolume,omitempty" xmlrpc:"nasVolume,omitempty"` + + // Past notes for the Storage service. + Notes *string `json:"notes,omitempty" xmlrpc:"notes,omitempty"` + + // Password for the Storage service that was used in the past. + Password *string `json:"password,omitempty" xmlrpc:"password,omitempty"` + + // Username for the Storage service. + Username *string `json:"username,omitempty" xmlrpc:"username,omitempty"` +} + +// The SoftLayer_Network_Storage_Hub data type models Virtual Server type Storage storage offerings. +type Network_Storage_Hub struct { + Network_Storage + + // A count of the billing items tied to a Storage service's bandwidth usage. + BandwidthBillingItemCount *uint `json:"bandwidthBillingItemCount,omitempty" xmlrpc:"bandwidthBillingItemCount,omitempty"` + + // The billing items tied to a Storage service's bandwidth usage. + BandwidthBillingItems []Billing_Item `json:"bandwidthBillingItems,omitempty" xmlrpc:"bandwidthBillingItems,omitempty"` +} + +// no documentation yet +type Network_Storage_Hub_Cleversafe_Account struct { + Entity + + // SoftLayer account to which an IBM Cloud Object Storage account belongs to. + Account *Account `json:"account,omitempty" xmlrpc:"account,omitempty"` + + // The ID of the SoftLayer_Account which this IBM Cloud Object Storage account is associated with. + AccountId *int `json:"accountId,omitempty" xmlrpc:"accountId,omitempty"` + + // An associated parent billing item which is active. Includes billing items which are scheduled to be cancelled in the future. + BillingItem *Billing_Item `json:"billingItem,omitempty" xmlrpc:"billingItem,omitempty"` + + // An associated parent billing item which has been cancelled. + CancelledBillingItem *Billing_Item `json:"cancelledBillingItem,omitempty" xmlrpc:"cancelledBillingItem,omitempty"` + + // A count of credentials used for generating an AWS signature. Max of 2. + CredentialCount *uint `json:"credentialCount,omitempty" xmlrpc:"credentialCount,omitempty"` + + // Credentials used for generating an AWS signature. Max of 2. + Credentials []Network_Storage_Credential `json:"credentials,omitempty" xmlrpc:"credentials,omitempty"` + + // The IMS ID of an IBM Cloud Object Storage account. + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // Provides an interface to various metrics relating to the usage of an IBM Cloud Object Storage account. + MetricTrackingObject *Metric_Tracking_Object `json:"metricTrackingObject,omitempty" xmlrpc:"metricTrackingObject,omitempty"` + + // A user-defined field of notes. + Notes *string `json:"notes,omitempty" xmlrpc:"notes,omitempty"` + + // Human readable identifier of IBM Cloud Object Storage accounts. + Username *string `json:"username,omitempty" xmlrpc:"username,omitempty"` + + // Unique identifier for an IBM Cloud Object Storage account. + Uuid *string `json:"uuid,omitempty" xmlrpc:"uuid,omitempty"` +} + +// no documentation yet +type Network_Storage_Hub_Swift struct { + Network_Storage_Hub + + // A count of + StorageNodeCount *uint `json:"storageNodeCount,omitempty" xmlrpc:"storageNodeCount,omitempty"` + + // no documentation yet + StorageNodes []Network_Service_Resource `json:"storageNodes,omitempty" xmlrpc:"storageNodes,omitempty"` +} + +// no documentation yet +type Network_Storage_Hub_Swift_Container struct { + Network_Storage_Hub_Swift +} + +// no documentation yet +type Network_Storage_Hub_Swift_Share struct { + Entity +} + +// no documentation yet +type Network_Storage_Hub_Swift_Version1 struct { + Network_Storage_Hub_Swift +} + +// The iscsi data type provides access to additional information about an iscsi volume such as the snapshot capacity limit and replication partners. +type Network_Storage_Iscsi struct { + Network_Storage +} + +// The iscsi EqualLogic Version 3 data type provides access to additional information about an iscsi volume such as the available snapshot reserve space. +type Network_Storage_Iscsi_EqualLogic_Version3 struct { + Network_Storage_Iscsi +} + +// An iscsi replicant receives incoming data from an associated iscsi volume. While the replicant is not in failover mode it will not be mountable. Upon failover the replicant can be mounted and used as a normal volume. It is suggested to only do this as part of a disaster recovery plan. +type Network_Storage_Iscsi_EqualLogic_Version3_Replicant struct { + Network_Storage_Iscsi_EqualLogic_Version3 + + // When a replicant is in the process of synchronizing with the parent volume this flag will be true. + FailbackInProgressFlag *bool `json:"failbackInProgressFlag,omitempty" xmlrpc:"failbackInProgressFlag,omitempty"` + + // The volume name for an iscsi replicant. + VolumeName *string `json:"volumeName,omitempty" xmlrpc:"volumeName,omitempty"` +} + +// An iscsi snapshot is a point-in-time view of the data on an associated iscsi volume. Iscsi snapshots use a copy-on-write technology to minimize the amount of snapshot space used. When a snapshot is initially created it will use no snapshot space. At the time data changes on a volume which existed when a snapshot was created the original data will be saved in the associated volume's snapshot reserve space. +// +// As a snapshot is created offline it must be set mountable in order to mount it via an iscsi initiator service. +type Network_Storage_Iscsi_EqualLogic_Version3_Snapshot struct { + Network_Storage_Iscsi_EqualLogic_Version3 + + // If applicable, the schedule which was executed to create a snapshot. + CreationSchedule *Network_Storage_Schedule `json:"creationSchedule,omitempty" xmlrpc:"creationSchedule,omitempty"` + + // The volume name for an iscsi snapshot. + VolumeName *string `json:"volumeName,omitempty" xmlrpc:"volumeName,omitempty"` +} + +// no documentation yet +type Network_Storage_Iscsi_OS_Type struct { + Entity + + // The date this OS type record was created. + CreateDate *Time `json:"createDate,omitempty" xmlrpc:"createDate,omitempty"` + + // The description of this OS type + Description *string `json:"description,omitempty" xmlrpc:"description,omitempty"` + + // The internal identifier of the OS type selection + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // The key name of this OS type + KeyName *string `json:"keyName,omitempty" xmlrpc:"keyName,omitempty"` + + // The name of this OS type + Name *string `json:"name,omitempty" xmlrpc:"name,omitempty"` +} + +// The SoftLayer_Network_Storage_Nas contains general information regarding a NAS Storage service such as account id, username, password, maximum capacity, Storage's product type and capacity. +type Network_Storage_Nas struct { + Network_Storage + + // no documentation yet + RecentBytesUsed *Network_Storage_Daily_Usage `json:"recentBytesUsed,omitempty" xmlrpc:"recentBytesUsed,omitempty"` +} + +// The SoftLayer_Network_Storage_OpenStack_Object data type models OpenStack specific object storage objects. These storages authenticate through Keystone to access Swift. +type Network_Storage_OpenStack_Object struct { + Network_Storage + + // A count of the billing item tied to an OpenStack Object Storage's bandwidth service. + BandwidthBillingItemCount *uint `json:"bandwidthBillingItemCount,omitempty" xmlrpc:"bandwidthBillingItemCount,omitempty"` + + // The billing item tied to an OpenStack Object Storage's bandwidth service. + BandwidthBillingItems []Billing_Item `json:"bandwidthBillingItems,omitempty" xmlrpc:"bandwidthBillingItems,omitempty"` +} + +// A network storage partnership is used to link multiple volumes to each other. These partnerships describe replication hierarchies or link volume snapshots to their associated storage volume. +type Network_Storage_Partnership struct { + Entity + + // The date a partnership was created. + CreateDate *Time `json:"createDate,omitempty" xmlrpc:"createDate,omitempty"` + + // The date a partnership was last modified. + ModifyDate *Time `json:"modifyDate,omitempty" xmlrpc:"modifyDate,omitempty"` + + // The associated child volume for a partnership. + PartnerVolume *Network_Storage `json:"partnerVolume,omitempty" xmlrpc:"partnerVolume,omitempty"` + + // The child volume id which a partnership is associated with. + PartnerVolumeId *int `json:"partnerVolumeId,omitempty" xmlrpc:"partnerVolumeId,omitempty"` + + // The type provides a standardized definition for a partnership. + Type *Network_Storage_Partnership_Type `json:"type,omitempty" xmlrpc:"type,omitempty"` + + // The associated parent volume for a partnership. + Volume *Network_Storage `json:"volume,omitempty" xmlrpc:"volume,omitempty"` + + // The volume id which a partnership is associated with. + VolumeId *int `json:"volumeId,omitempty" xmlrpc:"volumeId,omitempty"` +} + +// A network storage partnership type is used to define the link between two volumes. +type Network_Storage_Partnership_Type struct { + Entity + + // A type's description, for example 'ISCSI snapshot partnership'. + Description *string `json:"description,omitempty" xmlrpc:"description,omitempty"` + + // A type's key name, for example 'ISCSI_SNAPSHOT'. + Keyname *string `json:"keyname,omitempty" xmlrpc:"keyname,omitempty"` + + // A type's name, for example 'ISCSI Snapshot'. + Name *string `json:"name,omitempty" xmlrpc:"name,omitempty"` +} + +// A property provides additional information about a volume which it is assigned to. This information can range from "Mountable" flags to utilized snapshot space. +type Network_Storage_Property struct { + Entity + + // The date a property was created. + CreateDate *Time `json:"createDate,omitempty" xmlrpc:"createDate,omitempty"` + + // The date a property was last modified; + ModifyDate *Time `json:"modifyDate,omitempty" xmlrpc:"modifyDate,omitempty"` + + // The type provides a standardized definition for a property. + Type *Network_Storage_Property_Type `json:"type,omitempty" xmlrpc:"type,omitempty"` + + // The value of a property. + Value *string `json:"value,omitempty" xmlrpc:"value,omitempty"` + + // The associated volume for a property. + Volume *Network_Storage `json:"volume,omitempty" xmlrpc:"volume,omitempty"` + + // The volume id which a property is associated with. + VolumeId *int `json:"volumeId,omitempty" xmlrpc:"volumeId,omitempty"` +} + +// The storage property types provide standard definitions for properties which can be used with any type for Storage offering. The properties provide additional information about a volume which they are assigned to. +type Network_Storage_Property_Type struct { + Entity + + // A type's description, for example 'Determines whether the volume is currently mountable'. + Description *string `json:"description,omitempty" xmlrpc:"description,omitempty"` + + // A type's keyname, for example 'MOUNTABLE'. + Keyname *string `json:"keyname,omitempty" xmlrpc:"keyname,omitempty"` + + // A type's name, for example 'Mountable'. + Name *string `json:"name,omitempty" xmlrpc:"name,omitempty"` +} + +// no documentation yet +type Network_Storage_Replicant struct { + Network_Storage + + // When a replicant is in the process of synchronizing with the parent volume this flag will be true. + FailbackInProgressFlag *string `json:"failbackInProgressFlag,omitempty" xmlrpc:"failbackInProgressFlag,omitempty"` + + // The volume name for a replicant. + VolumeName *string `json:"volumeName,omitempty" xmlrpc:"volumeName,omitempty"` +} + +// Schedules can be created for select Storage services, such as iscsi. These schedules are used to perform various tasks such as scheduling snapshots or synchronizing replicants. +type Network_Storage_Schedule struct { + Entity + + // A flag which determines if a schedule is active. + Active *int `json:"active,omitempty" xmlrpc:"active,omitempty"` + + // The date a schedule was created. + CreateDate *Time `json:"createDate,omitempty" xmlrpc:"createDate,omitempty"` + + // The day of the month parameter of this schedule. + DayOfMonth *string `json:"dayOfMonth,omitempty" xmlrpc:"dayOfMonth,omitempty"` + + // The day of the week parameter of this schedule. + DayOfWeek *string `json:"dayOfWeek,omitempty" xmlrpc:"dayOfWeek,omitempty"` + + // A count of events which have been created as the result of a schedule execution. + EventCount *uint `json:"eventCount,omitempty" xmlrpc:"eventCount,omitempty"` + + // Events which have been created as the result of a schedule execution. + Events []Network_Storage_Event `json:"events,omitempty" xmlrpc:"events,omitempty"` + + // The hour parameter of this schedule. + Hour *string `json:"hour,omitempty" xmlrpc:"hour,omitempty"` + + // A schedule's internal identifier. + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // The minute parameter of this schedule. + Minute *string `json:"minute,omitempty" xmlrpc:"minute,omitempty"` + + // The date a schedule was last modified. + ModifyDate *Time `json:"modifyDate,omitempty" xmlrpc:"modifyDate,omitempty"` + + // The month of the year parameter of this schedule. + MonthOfYear *string `json:"monthOfYear,omitempty" xmlrpc:"monthOfYear,omitempty"` + + // A schedule's name, for example 'Daily'. + Name *string `json:"name,omitempty" xmlrpc:"name,omitempty"` + + // The associated partnership for a schedule. + Partnership *Network_Storage_Partnership `json:"partnership,omitempty" xmlrpc:"partnership,omitempty"` + + // The partnership id which a schedule is associated with. + PartnershipId *int `json:"partnershipId,omitempty" xmlrpc:"partnershipId,omitempty"` + + // Properties used for configuration of a schedule. + Properties []Network_Storage_Schedule_Property `json:"properties,omitempty" xmlrpc:"properties,omitempty"` + + // A count of properties used for configuration of a schedule. + PropertyCount *uint `json:"propertyCount,omitempty" xmlrpc:"propertyCount,omitempty"` + + // A count of replica snapshots which have been created as the result of this schedule's execution. + ReplicaSnapshotCount *uint `json:"replicaSnapshotCount,omitempty" xmlrpc:"replicaSnapshotCount,omitempty"` + + // Replica snapshots which have been created as the result of this schedule's execution. + ReplicaSnapshots []Network_Storage `json:"replicaSnapshots,omitempty" xmlrpc:"replicaSnapshots,omitempty"` + + // The number of snapshots this schedule is configured to retain. + RetentionCount *string `json:"retentionCount,omitempty" xmlrpc:"retentionCount,omitempty"` + + // A count of snapshots which have been created as the result of this schedule's execution. + SnapshotCount *uint `json:"snapshotCount,omitempty" xmlrpc:"snapshotCount,omitempty"` + + // Snapshots which have been created as the result of this schedule's execution. + Snapshots []Network_Storage `json:"snapshots,omitempty" xmlrpc:"snapshots,omitempty"` + + // The type provides a standardized definition for a schedule. + Type *Network_Storage_Schedule_Type `json:"type,omitempty" xmlrpc:"type,omitempty"` + + // The type id which a schedule is associated with. + TypeId *int `json:"typeId,omitempty" xmlrpc:"typeId,omitempty"` + + // The associated volume for a schedule. + Volume *Network_Storage `json:"volume,omitempty" xmlrpc:"volume,omitempty"` + + // The volume id which a schedule is associated with. + VolumeId *int `json:"volumeId,omitempty" xmlrpc:"volumeId,omitempty"` +} + +// Schedule properties provide attributes such as start date, end date, interval, and other properties to a storage schedule. +type Network_Storage_Schedule_Property struct { + Entity + + // The date a schedule property was created. + CreateDate *Time `json:"createDate,omitempty" xmlrpc:"createDate,omitempty"` + + // A schedule property's internal identifier. + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // The date a schedule property was last modified. + ModifyDate *Time `json:"modifyDate,omitempty" xmlrpc:"modifyDate,omitempty"` + + // The associated schedule for a property. + Schedule *Network_Storage_Schedule `json:"schedule,omitempty" xmlrpc:"schedule,omitempty"` + + // The type provides a standardized definition for a property. + Type *Network_Storage_Schedule_Property_Type `json:"type,omitempty" xmlrpc:"type,omitempty"` + + // An identifier for the type of a property. + TypeId *int `json:"typeId,omitempty" xmlrpc:"typeId,omitempty"` + + // The value of a property. + Value *string `json:"value,omitempty" xmlrpc:"value,omitempty"` +} + +// A schedule property type is used to allow for a standardized method of defining network storage schedules. +type Network_Storage_Schedule_Property_Type struct { + Entity + + // A type's description, for example 'Date for the schedule to start.'. + Description *string `json:"description,omitempty" xmlrpc:"description,omitempty"` + + // A schedule property type's internal identifier. + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // A schedule property type's key name, for example 'START_DATE'. + Keyname *string `json:"keyname,omitempty" xmlrpc:"keyname,omitempty"` + + // A schedule property type's name, for example 'Start Date'. + Name *string `json:"name,omitempty" xmlrpc:"name,omitempty"` + + // The type of Storage volume type which a property type may be associated with. + NasType *string `json:"nasType,omitempty" xmlrpc:"nasType,omitempty"` +} + +// A schedule type is used to define what a schedule was created to do. When creating a schedule to take snapshots of a volume, the 'Snapshot' schedule type would be used. +type Network_Storage_Schedule_Type struct { + Entity + + // A schedule type's internal identifier. + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // A schedule type's key name, for example 'SNAPSHOT'. + Keyname *string `json:"keyname,omitempty" xmlrpc:"keyname,omitempty"` + + // A schedule type's name, for example 'Snapshot'. + Name *string `json:"name,omitempty" xmlrpc:"name,omitempty"` +} + +// no documentation yet +type Network_Storage_Snapshot struct { + Network_Storage + + // If applicable, the schedule which was executed to create a snapshot. + CreationSchedule *Network_Storage_Schedule `json:"creationSchedule,omitempty" xmlrpc:"creationSchedule,omitempty"` + + // The volume name for the snapshot. + VolumeName *string `json:"volumeName,omitempty" xmlrpc:"volumeName,omitempty"` +} + +// The SoftLayer_Network_Storage_Type contains a description of the associated SoftLayer_Network_Storage object. +type Network_Storage_Type struct { + Entity + + // Human readable description for the associated SoftLayer_Network_Storage object. + Description *string `json:"description,omitempty" xmlrpc:"description,omitempty"` + + // ID which corresponds with storageTypeId on storage objects. + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // Machine readable description code for the associated SoftLayer_Network_Storage object. + KeyName *string `json:"keyName,omitempty" xmlrpc:"keyName,omitempty"` + + // A count of the SoftLayer_Network_Storage object that uses this type. + VolumeCount *uint `json:"volumeCount,omitempty" xmlrpc:"volumeCount,omitempty"` + + // The SoftLayer_Network_Storage object that uses this type. + Volumes []Network_Storage `json:"volumes,omitempty" xmlrpc:"volumes,omitempty"` +} + +// The SoftLayer_Network_Subnet data type contains general information relating to a single SoftLayer subnet. Personal information in this type such as names, addresses, and phone numbers are assigned to the account only and not to users belonging to the account. +type Network_Subnet struct { + Entity + + // no documentation yet + Account *Account `json:"account,omitempty" xmlrpc:"account,omitempty"` + + // If present, the active registration for this subnet. + ActiveRegistration *Network_Subnet_Registration `json:"activeRegistration,omitempty" xmlrpc:"activeRegistration,omitempty"` + + // All the swip transactions associated with a subnet that are still active. + ActiveSwipTransaction *Network_Subnet_Swip_Transaction `json:"activeSwipTransaction,omitempty" xmlrpc:"activeSwipTransaction,omitempty"` + + // The billing item for a subnet. + ActiveTransaction *Provisioning_Version1_Transaction `json:"activeTransaction,omitempty" xmlrpc:"activeTransaction,omitempty"` + + // Identifier which distinguishes whether the subnet is public or private address space. + AddressSpace *string `json:"addressSpace,omitempty" xmlrpc:"addressSpace,omitempty"` + + // The SoftLayer_Network_Storage_Allowed_Host information to connect this Subnet to Network Storage supporting access control lists. + AllowedHost *Network_Storage_Allowed_Host `json:"allowedHost,omitempty" xmlrpc:"allowedHost,omitempty"` + + // The SoftLayer_Network_Storage objects that this SoftLayer_Hardware has access to. + AllowedNetworkStorage []Network_Storage `json:"allowedNetworkStorage,omitempty" xmlrpc:"allowedNetworkStorage,omitempty"` + + // A count of the SoftLayer_Network_Storage objects that this SoftLayer_Hardware has access to. + AllowedNetworkStorageCount *uint `json:"allowedNetworkStorageCount,omitempty" xmlrpc:"allowedNetworkStorageCount,omitempty"` + + // A count of the SoftLayer_Network_Storage objects whose Replica that this SoftLayer_Hardware has access to. + AllowedNetworkStorageReplicaCount *uint `json:"allowedNetworkStorageReplicaCount,omitempty" xmlrpc:"allowedNetworkStorageReplicaCount,omitempty"` + + // The SoftLayer_Network_Storage objects whose Replica that this SoftLayer_Hardware has access to. + AllowedNetworkStorageReplicas []Network_Storage `json:"allowedNetworkStorageReplicas,omitempty" xmlrpc:"allowedNetworkStorageReplicas,omitempty"` + + // The billing item for a subnet. + BillingItem *Billing_Item `json:"billingItem,omitempty" xmlrpc:"billingItem,omitempty"` + + // A count of + BoundDescendantCount *uint `json:"boundDescendantCount,omitempty" xmlrpc:"boundDescendantCount,omitempty"` + + // no documentation yet + BoundDescendants []Network_Subnet `json:"boundDescendants,omitempty" xmlrpc:"boundDescendants,omitempty"` + + // A count of + BoundRouterCount *uint `json:"boundRouterCount,omitempty" xmlrpc:"boundRouterCount,omitempty"` + + // Whether or not this subnet is associated with a router. Subnets that are not associated with a router cannot be routed. + BoundRouterFlag *bool `json:"boundRouterFlag,omitempty" xmlrpc:"boundRouterFlag,omitempty"` + + // no documentation yet + BoundRouters []Hardware `json:"boundRouters,omitempty" xmlrpc:"boundRouters,omitempty"` + + // The last IP address in a subnet is the subnet's broadcast address. This is an IP address that will broadcast network requests to the entire subnet and may not be assigned to a network interface. + BroadcastAddress *string `json:"broadcastAddress,omitempty" xmlrpc:"broadcastAddress,omitempty"` + + // no documentation yet + Children []Network_Subnet `json:"children,omitempty" xmlrpc:"children,omitempty"` + + // A count of + ChildrenCount *uint `json:"childrenCount,omitempty" xmlrpc:"childrenCount,omitempty"` + + // A subnet's Classless Inter-Domain Routing prefix. This is a number between 0 and 32 signifying the number of bits in a subnet's netmask. These bits separate a subnet's network address from it's host addresses. It performs the same function as the ''netmask'' property, but is represented as an integer. + Cidr *int `json:"cidr,omitempty" xmlrpc:"cidr,omitempty"` + + // The data center this subnet may be routed within. + Datacenter *Location_Datacenter `json:"datacenter,omitempty" xmlrpc:"datacenter,omitempty"` + + // A count of + DescendantCount *uint `json:"descendantCount,omitempty" xmlrpc:"descendantCount,omitempty"` + + // no documentation yet + Descendants []Network_Subnet `json:"descendants,omitempty" xmlrpc:"descendants,omitempty"` + + // no documentation yet + DisplayLabel *string `json:"displayLabel,omitempty" xmlrpc:"displayLabel,omitempty"` + + // A static routed ip address + EndPointIpAddress *Network_Subnet_IpAddress `json:"endPointIpAddress,omitempty" xmlrpc:"endPointIpAddress,omitempty"` + + // A subnet's gateway address. This is an IP address that belongs to the router on the subnet and may not be assigned to a network interface. + Gateway *string `json:"gateway,omitempty" xmlrpc:"gateway,omitempty"` + + // no documentation yet + GlobalIpRecord *Network_Subnet_IpAddress_Global `json:"globalIpRecord,omitempty" xmlrpc:"globalIpRecord,omitempty"` + + // The hardware using IP addresses on this subnet. + Hardware []Hardware `json:"hardware,omitempty" xmlrpc:"hardware,omitempty"` + + // A count of the hardware using IP addresses on this subnet. + HardwareCount *uint `json:"hardwareCount,omitempty" xmlrpc:"hardwareCount,omitempty"` + + // A subnet's internal identifier. + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // A count of all the ip addresses associated with a subnet. + IpAddressCount *uint `json:"ipAddressCount,omitempty" xmlrpc:"ipAddressCount,omitempty"` + + // All the ip addresses associated with a subnet. + IpAddresses []Network_Subnet_IpAddress `json:"ipAddresses,omitempty" xmlrpc:"ipAddresses,omitempty"` + + // no documentation yet + IsCustomerOwned *bool `json:"isCustomerOwned,omitempty" xmlrpc:"isCustomerOwned,omitempty"` + + // no documentation yet + IsCustomerRoutable *bool `json:"isCustomerRoutable,omitempty" xmlrpc:"isCustomerRoutable,omitempty"` + + // The last time this subnet was last modified + ModifyDate *Time `json:"modifyDate,omitempty" xmlrpc:"modifyDate,omitempty"` + + // A bitmask in dotted-quad format that is used to separate a subnet's network address from it's host addresses. This performs the same function as the ''cidr'' property, but is expressed in a string format. + Netmask *string `json:"netmask,omitempty" xmlrpc:"netmask,omitempty"` + + // A subnet's associated network component. + NetworkComponent *Network_Component `json:"networkComponent,omitempty" xmlrpc:"networkComponent,omitempty"` + + // The upstream network component firewall. + NetworkComponentFirewall *Network_Component_Firewall `json:"networkComponentFirewall,omitempty" xmlrpc:"networkComponentFirewall,omitempty"` + + // The Private Network identifier this subnet is within, if applicable. + NetworkId *int `json:"networkId,omitempty" xmlrpc:"networkId,omitempty"` + + // A subnet's network identifier. This is the first IP address of a subnet and may not be assigned to a network interface. + NetworkIdentifier *string `json:"networkIdentifier,omitempty" xmlrpc:"networkIdentifier,omitempty"` + + // A count of + NetworkProtectionAddressCount *uint `json:"networkProtectionAddressCount,omitempty" xmlrpc:"networkProtectionAddressCount,omitempty"` + + // no documentation yet + NetworkProtectionAddresses []Network_Protection_Address `json:"networkProtectionAddresses,omitempty" xmlrpc:"networkProtectionAddresses,omitempty"` + + // A count of iPSec network tunnels that have access to a private subnet. + NetworkTunnelContextCount *uint `json:"networkTunnelContextCount,omitempty" xmlrpc:"networkTunnelContextCount,omitempty"` + + // IPSec network tunnels that have access to a private subnet. + NetworkTunnelContexts []Network_Tunnel_Module_Context `json:"networkTunnelContexts,omitempty" xmlrpc:"networkTunnelContexts,omitempty"` + + // The VLAN object that a subnet is associated with. + NetworkVlan *Network_Vlan `json:"networkVlan,omitempty" xmlrpc:"networkVlan,omitempty"` + + // A subnet's associated VLAN's internal identifier. + NetworkVlanId *int `json:"networkVlanId,omitempty" xmlrpc:"networkVlanId,omitempty"` + + // This is the note field. + Note *string `json:"note,omitempty" xmlrpc:"note,omitempty"` + + // The pod in which this subnet resides. + PodName *string `json:"podName,omitempty" xmlrpc:"podName,omitempty"` + + // A count of + ProtectedIpAddressCount *uint `json:"protectedIpAddressCount,omitempty" xmlrpc:"protectedIpAddressCount,omitempty"` + + // no documentation yet + ProtectedIpAddresses []Network_Subnet_IpAddress `json:"protectedIpAddresses,omitempty" xmlrpc:"protectedIpAddresses,omitempty"` + + // no documentation yet + RegionalInternetRegistry *Network_Regional_Internet_Registry `json:"regionalInternetRegistry,omitempty" xmlrpc:"regionalInternetRegistry,omitempty"` + + // A count of all registrations that have been created for this subnet. + RegistrationCount *uint `json:"registrationCount,omitempty" xmlrpc:"registrationCount,omitempty"` + + // All registrations that have been created for this subnet. + Registrations []Network_Subnet_Registration `json:"registrations,omitempty" xmlrpc:"registrations,omitempty"` + + // A count of the resource groups in which this subnet is a member. + ResourceGroupCount *uint `json:"resourceGroupCount,omitempty" xmlrpc:"resourceGroupCount,omitempty"` + + // The resource groups in which this subnet is a member. + ResourceGroups []Resource_Group `json:"resourceGroups,omitempty" xmlrpc:"resourceGroups,omitempty"` + + // The reverse DNS domain associated with this subnet. + ReverseDomain *Dns_Domain `json:"reverseDomain,omitempty" xmlrpc:"reverseDomain,omitempty"` + + // An identifier of the role the subnet is within. Roles dictate how a subnet may be used. + RoleKeyName *string `json:"roleKeyName,omitempty" xmlrpc:"roleKeyName,omitempty"` + + // The name of the role the subnet is within. Roles dictate how a subnet may be used. + RoleName *string `json:"roleName,omitempty" xmlrpc:"roleName,omitempty"` + + // The identifier for the type of route then subnet is currently configured for. + RoutingTypeKeyName *string `json:"routingTypeKeyName,omitempty" xmlrpc:"routingTypeKeyName,omitempty"` + + // The name for the type of route then subnet is currently configured for. + RoutingTypeName *string `json:"routingTypeName,omitempty" xmlrpc:"routingTypeName,omitempty"` + + // A subnet can be one of several types. PRIMARY, ADDITIONAL_PRIMARY, SECONDARY, ROUTED_TO_VLAN, SECONDARY_ON_VLAN, and STATIC_IP_ROUTED. The type determines the order in which many subnets are sorted in the SoftLayer customer portal. This groups subnets of similar type together. + SortOrder *string `json:"sortOrder,omitempty" xmlrpc:"sortOrder,omitempty"` + + // A subnet can be one of several types. PRIMARY, ADDITIONAL_PRIMARY, SECONDARY, ROUTED_TO_VLAN, SECONDARY_ON_VLAN, STORAGE_NETWORK, and STATIC_IP_ROUTED. A "PRIMARY" subnet is the primary network bound to a VLAN within the softlayer network. An "ADDITIONAL_PRIMARY" subnet is bound to a network VLAN to augment the pool of available primary IP addresses that may be assigned to a server. A "SECONDARY" subnet is any of the secondary subnet's bound to a VLAN interface. A "ROUTED_TO_VLAN" subnet is a portable subnet that can be routed to any server on a vlan. A "SECONDARY_ON_VLAN" subnet also doesn't exist as a VLAN interface, but is routed directly to a VLAN instead of a single IP address by SoftLayer's routers. + SubnetType *string `json:"subnetType,omitempty" xmlrpc:"subnetType,omitempty"` + + // All the swip transactions associated with a subnet. + SwipTransaction []Network_Subnet_Swip_Transaction `json:"swipTransaction,omitempty" xmlrpc:"swipTransaction,omitempty"` + + // A count of all the swip transactions associated with a subnet. + SwipTransactionCount *uint `json:"swipTransactionCount,omitempty" xmlrpc:"swipTransactionCount,omitempty"` + + // The number of IP addresses contained within this subnet. + TotalIpAddresses *Float64 `json:"totalIpAddresses,omitempty" xmlrpc:"totalIpAddresses,omitempty"` + + // A count of + UnboundDescendantCount *uint `json:"unboundDescendantCount,omitempty" xmlrpc:"unboundDescendantCount,omitempty"` + + // no documentation yet + UnboundDescendants []Network_Subnet `json:"unboundDescendants,omitempty" xmlrpc:"unboundDescendants,omitempty"` + + // The number of IP addresses that can be addressed within this subnet. For IPv4 subnets with a CIDR value of at most 30, a discount of 3 is taken from the total number of IP addresses for the subnet's unusable network, gateway and broadcast IP addresses. For IPv6 subnets with a CIDR value of at most 126, a discount of 2 is taken for the subnet's network and gateway IP addresses. + UsableIpAddressCount *Float64 `json:"usableIpAddressCount,omitempty" xmlrpc:"usableIpAddressCount,omitempty"` + + // Provides the total number of utilized IP addresses on this subnet. The primary consumer of IP addresses are compute resources, which can consume more than one address. This value is only supported for primary subnet types. + UtilizedIpAddressCount *uint `json:"utilizedIpAddressCount,omitempty" xmlrpc:"utilizedIpAddressCount,omitempty"` + + // This is the Internet Protocol version. Current values may be either 4 or 6. + Version *int `json:"version,omitempty" xmlrpc:"version,omitempty"` + + // A count of the Virtual Servers using IP addresses on this subnet. + VirtualGuestCount *uint `json:"virtualGuestCount,omitempty" xmlrpc:"virtualGuestCount,omitempty"` + + // The Virtual Servers using IP addresses on this subnet. + VirtualGuests []Virtual_Guest `json:"virtualGuests,omitempty" xmlrpc:"virtualGuests,omitempty"` +} + +// The SoftLayer_Network_Subnet_IpAddress data type contains general information relating to a single SoftLayer IPv4 address. +type Network_Subnet_IpAddress struct { + Entity + + // The SoftLayer_Network_Storage_Allowed_Host information to connect this IP Address to Network Storage supporting access control lists. + AllowedHost *Network_Storage_Allowed_Host `json:"allowedHost,omitempty" xmlrpc:"allowedHost,omitempty"` + + // The SoftLayer_Network_Storage objects that this SoftLayer_Hardware has access to. + AllowedNetworkStorage []Network_Storage `json:"allowedNetworkStorage,omitempty" xmlrpc:"allowedNetworkStorage,omitempty"` + + // A count of the SoftLayer_Network_Storage objects that this SoftLayer_Hardware has access to. + AllowedNetworkStorageCount *uint `json:"allowedNetworkStorageCount,omitempty" xmlrpc:"allowedNetworkStorageCount,omitempty"` + + // A count of the SoftLayer_Network_Storage objects whose Replica that this SoftLayer_Hardware has access to. + AllowedNetworkStorageReplicaCount *uint `json:"allowedNetworkStorageReplicaCount,omitempty" xmlrpc:"allowedNetworkStorageReplicaCount,omitempty"` + + // The SoftLayer_Network_Storage objects whose Replica that this SoftLayer_Hardware has access to. + AllowedNetworkStorageReplicas []Network_Storage `json:"allowedNetworkStorageReplicas,omitempty" xmlrpc:"allowedNetworkStorageReplicas,omitempty"` + + // The application delivery controller using this address. + ApplicationDeliveryController *Network_Application_Delivery_Controller `json:"applicationDeliveryController,omitempty" xmlrpc:"applicationDeliveryController,omitempty"` + + // A count of an IPSec network tunnel's address translations. These translations use a SoftLayer ip address from an assigned static NAT subnet to deliver the packets to the remote (customer) destination. + ContextTunnelTranslationCount *uint `json:"contextTunnelTranslationCount,omitempty" xmlrpc:"contextTunnelTranslationCount,omitempty"` + + // An IPSec network tunnel's address translations. These translations use a SoftLayer ip address from an assigned static NAT subnet to deliver the packets to the remote (customer) destination. + ContextTunnelTranslations []Network_Tunnel_Module_Context_Address_Translation `json:"contextTunnelTranslations,omitempty" xmlrpc:"contextTunnelTranslations,omitempty"` + + // A count of all the subnets routed to an IP address. + EndpointSubnetCount *uint `json:"endpointSubnetCount,omitempty" xmlrpc:"endpointSubnetCount,omitempty"` + + // All the subnets routed to an IP address. + EndpointSubnets []Network_Subnet `json:"endpointSubnets,omitempty" xmlrpc:"endpointSubnets,omitempty"` + + // A network component that is statically routed to an IP address. + GuestNetworkComponent *Virtual_Guest_Network_Component `json:"guestNetworkComponent,omitempty" xmlrpc:"guestNetworkComponent,omitempty"` + + // A network component that is statically routed to an IP address. + GuestNetworkComponentBinding *Virtual_Guest_Network_Component_IpAddress `json:"guestNetworkComponentBinding,omitempty" xmlrpc:"guestNetworkComponentBinding,omitempty"` + + // A server that this IP address is routed to. + Hardware *Hardware `json:"hardware,omitempty" xmlrpc:"hardware,omitempty"` + + // An IP's internal identifier. + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // An IP address expressed in dotted quad format. + IpAddress *string `json:"ipAddress,omitempty" xmlrpc:"ipAddress,omitempty"` + + // Indicates if an IP address is reserved to be used as the network broadcast address and cannot be assigned to a network interface + IsBroadcast *bool `json:"isBroadcast,omitempty" xmlrpc:"isBroadcast,omitempty"` + + // Indicates if an IP address is reserved to a gateway and cannot be assigned to a network interface + IsGateway *bool `json:"isGateway,omitempty" xmlrpc:"isGateway,omitempty"` + + // Indicates if an IP address is reserved to a network address and cannot be assigned to a network interface + IsNetwork *bool `json:"isNetwork,omitempty" xmlrpc:"isNetwork,omitempty"` + + // Indicates if an IP address is reserved and cannot be assigned to a network interface + IsReserved *bool `json:"isReserved,omitempty" xmlrpc:"isReserved,omitempty"` + + // A network component that is statically routed to an IP address. + NetworkComponent *Network_Component `json:"networkComponent,omitempty" xmlrpc:"networkComponent,omitempty"` + + // An IP address' user defined note. + Note *string `json:"note,omitempty" xmlrpc:"note,omitempty"` + + // The network gateway appliance using this address as the private IP address. + PrivateNetworkGateway *Network_Gateway `json:"privateNetworkGateway,omitempty" xmlrpc:"privateNetworkGateway,omitempty"` + + // no documentation yet + ProtectionAddress []Network_Protection_Address `json:"protectionAddress,omitempty" xmlrpc:"protectionAddress,omitempty"` + + // A count of + ProtectionAddressCount *uint `json:"protectionAddressCount,omitempty" xmlrpc:"protectionAddressCount,omitempty"` + + // The network gateway appliance using this address as the public IP address. + PublicNetworkGateway *Network_Gateway `json:"publicNetworkGateway,omitempty" xmlrpc:"publicNetworkGateway,omitempty"` + + // An IPMI-based management network component of the IP address. + RemoteManagementNetworkComponent *Network_Component `json:"remoteManagementNetworkComponent,omitempty" xmlrpc:"remoteManagementNetworkComponent,omitempty"` + + // An IP address' associated subnet. + Subnet *Network_Subnet `json:"subnet,omitempty" xmlrpc:"subnet,omitempty"` + + // An IP address' subnet id. + SubnetId *int `json:"subnetId,omitempty" xmlrpc:"subnetId,omitempty"` + + // All events for this IP address stored in the datacenter syslogs from the last 24 hours + SyslogEventsOneDay []Network_Logging_Syslog `json:"syslogEventsOneDay,omitempty" xmlrpc:"syslogEventsOneDay,omitempty"` + + // A count of all events for this IP address stored in the datacenter syslogs from the last 24 hours + SyslogEventsOneDayCount *uint `json:"syslogEventsOneDayCount,omitempty" xmlrpc:"syslogEventsOneDayCount,omitempty"` + + // A count of all events for this IP address stored in the datacenter syslogs from the last 7 days + SyslogEventsSevenDayCount *uint `json:"syslogEventsSevenDayCount,omitempty" xmlrpc:"syslogEventsSevenDayCount,omitempty"` + + // All events for this IP address stored in the datacenter syslogs from the last 7 days + SyslogEventsSevenDays []Network_Logging_Syslog `json:"syslogEventsSevenDays,omitempty" xmlrpc:"syslogEventsSevenDays,omitempty"` + + // Top Ten network datacenter syslog events, grouped by destination port, for the last 24 hours + TopTenSyslogEventsByDestinationPortOneDay []Network_Logging_Syslog `json:"topTenSyslogEventsByDestinationPortOneDay,omitempty" xmlrpc:"topTenSyslogEventsByDestinationPortOneDay,omitempty"` + + // A count of top Ten network datacenter syslog events, grouped by destination port, for the last 24 hours + TopTenSyslogEventsByDestinationPortOneDayCount *uint `json:"topTenSyslogEventsByDestinationPortOneDayCount,omitempty" xmlrpc:"topTenSyslogEventsByDestinationPortOneDayCount,omitempty"` + + // A count of top Ten network datacenter syslog events, grouped by destination port, for the last 7 days + TopTenSyslogEventsByDestinationPortSevenDayCount *uint `json:"topTenSyslogEventsByDestinationPortSevenDayCount,omitempty" xmlrpc:"topTenSyslogEventsByDestinationPortSevenDayCount,omitempty"` + + // Top Ten network datacenter syslog events, grouped by destination port, for the last 7 days + TopTenSyslogEventsByDestinationPortSevenDays []Network_Logging_Syslog `json:"topTenSyslogEventsByDestinationPortSevenDays,omitempty" xmlrpc:"topTenSyslogEventsByDestinationPortSevenDays,omitempty"` + + // Top Ten network datacenter syslog events, grouped by source port, for the last 24 hours + TopTenSyslogEventsByProtocolsOneDay []Network_Logging_Syslog `json:"topTenSyslogEventsByProtocolsOneDay,omitempty" xmlrpc:"topTenSyslogEventsByProtocolsOneDay,omitempty"` + + // A count of top Ten network datacenter syslog events, grouped by source port, for the last 24 hours + TopTenSyslogEventsByProtocolsOneDayCount *uint `json:"topTenSyslogEventsByProtocolsOneDayCount,omitempty" xmlrpc:"topTenSyslogEventsByProtocolsOneDayCount,omitempty"` + + // A count of top Ten network datacenter syslog events, grouped by source port, for the last 7 days + TopTenSyslogEventsByProtocolsSevenDayCount *uint `json:"topTenSyslogEventsByProtocolsSevenDayCount,omitempty" xmlrpc:"topTenSyslogEventsByProtocolsSevenDayCount,omitempty"` + + // Top Ten network datacenter syslog events, grouped by source port, for the last 7 days + TopTenSyslogEventsByProtocolsSevenDays []Network_Logging_Syslog `json:"topTenSyslogEventsByProtocolsSevenDays,omitempty" xmlrpc:"topTenSyslogEventsByProtocolsSevenDays,omitempty"` + + // Top Ten network datacenter syslog events, grouped by source ip address, for the last 24 hours + TopTenSyslogEventsBySourceIpOneDay []Network_Logging_Syslog `json:"topTenSyslogEventsBySourceIpOneDay,omitempty" xmlrpc:"topTenSyslogEventsBySourceIpOneDay,omitempty"` + + // A count of top Ten network datacenter syslog events, grouped by source ip address, for the last 24 hours + TopTenSyslogEventsBySourceIpOneDayCount *uint `json:"topTenSyslogEventsBySourceIpOneDayCount,omitempty" xmlrpc:"topTenSyslogEventsBySourceIpOneDayCount,omitempty"` + + // A count of top Ten network datacenter syslog events, grouped by source ip address, for the last 7 days + TopTenSyslogEventsBySourceIpSevenDayCount *uint `json:"topTenSyslogEventsBySourceIpSevenDayCount,omitempty" xmlrpc:"topTenSyslogEventsBySourceIpSevenDayCount,omitempty"` + + // Top Ten network datacenter syslog events, grouped by source ip address, for the last 7 days + TopTenSyslogEventsBySourceIpSevenDays []Network_Logging_Syslog `json:"topTenSyslogEventsBySourceIpSevenDays,omitempty" xmlrpc:"topTenSyslogEventsBySourceIpSevenDays,omitempty"` + + // Top Ten network datacenter syslog events, grouped by source port, for the last 24 hours + TopTenSyslogEventsBySourcePortOneDay []Network_Logging_Syslog `json:"topTenSyslogEventsBySourcePortOneDay,omitempty" xmlrpc:"topTenSyslogEventsBySourcePortOneDay,omitempty"` + + // A count of top Ten network datacenter syslog events, grouped by source port, for the last 24 hours + TopTenSyslogEventsBySourcePortOneDayCount *uint `json:"topTenSyslogEventsBySourcePortOneDayCount,omitempty" xmlrpc:"topTenSyslogEventsBySourcePortOneDayCount,omitempty"` + + // A count of top Ten network datacenter syslog events, grouped by source port, for the last 7 days + TopTenSyslogEventsBySourcePortSevenDayCount *uint `json:"topTenSyslogEventsBySourcePortSevenDayCount,omitempty" xmlrpc:"topTenSyslogEventsBySourcePortSevenDayCount,omitempty"` + + // Top Ten network datacenter syslog events, grouped by source port, for the last 7 days + TopTenSyslogEventsBySourcePortSevenDays []Network_Logging_Syslog `json:"topTenSyslogEventsBySourcePortSevenDays,omitempty" xmlrpc:"topTenSyslogEventsBySourcePortSevenDays,omitempty"` + + // A virtual guest that this IP address is routed to. + VirtualGuest *Virtual_Guest `json:"virtualGuest,omitempty" xmlrpc:"virtualGuest,omitempty"` + + // A count of virtual licenses allocated for an IP Address. + VirtualLicenseCount *uint `json:"virtualLicenseCount,omitempty" xmlrpc:"virtualLicenseCount,omitempty"` + + // Virtual licenses allocated for an IP Address. + VirtualLicenses []Software_VirtualLicense `json:"virtualLicenses,omitempty" xmlrpc:"virtualLicenses,omitempty"` +} + +// no documentation yet +type Network_Subnet_IpAddress_Global struct { + Entity + + // no documentation yet + Account *Account `json:"account,omitempty" xmlrpc:"account,omitempty"` + + // The active transaction associated with this Global IP. + ActiveTransaction *Provisioning_Version1_Transaction `json:"activeTransaction,omitempty" xmlrpc:"activeTransaction,omitempty"` + + // The billing item for this Global IP. + BillingItem *Billing_Item_Network_Subnet_IpAddress_Global `json:"billingItem,omitempty" xmlrpc:"billingItem,omitempty"` + + // A Global IP Address' associated description + Description *int `json:"description,omitempty" xmlrpc:"description,omitempty"` + + // no documentation yet + DestinationIpAddress *Network_Subnet_IpAddress `json:"destinationIpAddress,omitempty" xmlrpc:"destinationIpAddress,omitempty"` + + // A Global IP Address' associated [[SoftLayer_Network_Subnet_IpAddress|ipAddress]] ID + DestinationIpAddressId *int `json:"destinationIpAddressId,omitempty" xmlrpc:"destinationIpAddressId,omitempty"` + + // A Global IP Address' unique identifier + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // no documentation yet + IpAddress *Network_Subnet_IpAddress `json:"ipAddress,omitempty" xmlrpc:"ipAddress,omitempty"` + + // A Global IP Address' associated [[SoftLayer_Account|account]] ID + IpAddressId *int `json:"ipAddressId,omitempty" xmlrpc:"ipAddressId,omitempty"` + + // A Global IP Address' associated type [[SoftLayer_Network_Subnet_IpAddress_Global_Type|id]] ID + TypeId *int `json:"typeId,omitempty" xmlrpc:"typeId,omitempty"` +} + +// The SoftLayer_Network_Subnet_IpAddress data type contains general information relating to a single SoftLayer IPv6 address. +type Network_Subnet_IpAddress_Version6 struct { + Network_Subnet_IpAddress + + // The network gateway appliance using this address as the public IPv6 address. + PublicVersion6NetworkGateway *Network_Gateway `json:"publicVersion6NetworkGateway,omitempty" xmlrpc:"publicVersion6NetworkGateway,omitempty"` +} + +// The subnet registration data type contains general information relating to a single subnet registration instance. These registration instances can be updated to reflect changes, and will record the changes in the [[SoftLayer_Network_Subnet_Registration_Event|events]]. +type Network_Subnet_Registration struct { + Entity + + // The account that this registration belongs to. + Account *Account `json:"account,omitempty" xmlrpc:"account,omitempty"` + + // The registration object's associated [[SoftLayer_Account|account]] id + AccountId *int `json:"accountId,omitempty" xmlrpc:"accountId,omitempty"` + + // The CIDR prefix for the registered subnet + Cidr *int `json:"cidr,omitempty" xmlrpc:"cidr,omitempty"` + + // no documentation yet + CreateDate *Time `json:"createDate,omitempty" xmlrpc:"createDate,omitempty"` + + // A count of the cross-reference records that tie the [[SoftLayer_Account_Regional_Registry_Detail]] objects to the registration object. + DetailReferenceCount *uint `json:"detailReferenceCount,omitempty" xmlrpc:"detailReferenceCount,omitempty"` + + // The cross-reference records that tie the [[SoftLayer_Account_Regional_Registry_Detail]] objects to the registration object. + DetailReferences []Network_Subnet_Registration_Details `json:"detailReferences,omitempty" xmlrpc:"detailReferences,omitempty"` + + // A count of the related registration events. + EventCount *uint `json:"eventCount,omitempty" xmlrpc:"eventCount,omitempty"` + + // The related registration events. + Events []Network_Subnet_Registration_Event `json:"events,omitempty" xmlrpc:"events,omitempty"` + + // Unique ID of the registration object + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // no documentation yet + ModifyDate *Time `json:"modifyDate,omitempty" xmlrpc:"modifyDate,omitempty"` + + // The "network" detail object. + NetworkDetail *Account_Regional_Registry_Detail `json:"networkDetail,omitempty" xmlrpc:"networkDetail,omitempty"` + + // The RIR-specific handle or name of the registered subnet. This field is read-only. + NetworkHandle *string `json:"networkHandle,omitempty" xmlrpc:"networkHandle,omitempty"` + + // The base IP address of the registered subnet + NetworkIdentifier *string `json:"networkIdentifier,omitempty" xmlrpc:"networkIdentifier,omitempty"` + + // The "person" detail object. + PersonDetail *Account_Regional_Registry_Detail `json:"personDetail,omitempty" xmlrpc:"personDetail,omitempty"` + + // The related Regional Internet Registry. + RegionalInternetRegistry *Network_Regional_Internet_Registry `json:"regionalInternetRegistry,omitempty" xmlrpc:"regionalInternetRegistry,omitempty"` + + // The RIR handle that this registration object belongs to. This field may not be populated until the registration is complete. + RegionalInternetRegistryHandle *Account_Rwhois_Handle `json:"regionalInternetRegistryHandle,omitempty" xmlrpc:"regionalInternetRegistryHandle,omitempty"` + + // The registration object's associated [[SoftLayer_Account_Rwhois_Handle|RIR handle]] id + RegionalInternetRegistryHandleId *int `json:"regionalInternetRegistryHandleId,omitempty" xmlrpc:"regionalInternetRegistryHandleId,omitempty"` + + // The registration object's associated [[SoftLayer_Network_Regional_Internet_Registry|RIR]] id + RegionalInternetRegistryId *int `json:"regionalInternetRegistryId,omitempty" xmlrpc:"regionalInternetRegistryId,omitempty"` + + // The status of this registration. + Status *Network_Subnet_Registration_Status `json:"status,omitempty" xmlrpc:"status,omitempty"` + + // The registration object's associated [[SoftLayer_Network_Subnet_Registration_Status|status]] id + StatusId *int `json:"statusId,omitempty" xmlrpc:"statusId,omitempty"` + + // The subnet that this registration pertains to. + Subnet *Network_Subnet `json:"subnet,omitempty" xmlrpc:"subnet,omitempty"` +} + +// APNIC-specific registration object. For more detail see [[SoftLayer_Network_Subnet_Registration (type)|SoftLayer_Network_Subnet_Registration]]. +type Network_Subnet_Registration_Apnic struct { + Network_Subnet_Registration +} + +// ARIN-specific registration object. For more detail see [[SoftLayer_Network_Subnet_Registration (type)|SoftLayer_Network_Subnet_Registration]]. +type Network_Subnet_Registration_Arin struct { + Network_Subnet_Registration +} + +// The SoftLayer_Network_Subnet_Registration_Details objects are used to relate [[SoftLayer_Account_Regional_Registry_Detail]] objects to a [[SoftLayer_Network_Subnet_Registration]] object. This allows for easy reuse of registration details. It is important to note that only one detail object per type may be associated to a registration object. +type Network_Subnet_Registration_Details struct { + Entity + + // no documentation yet + CreateDate *Time `json:"createDate,omitempty" xmlrpc:"createDate,omitempty"` + + // The related [[SoftLayer_Account_Regional_Registry_Detail|detail object]]. + Detail *Account_Regional_Registry_Detail `json:"detail,omitempty" xmlrpc:"detail,omitempty"` + + // Numeric ID of the related [[SoftLayer_Account_Regional_Registry_Detail]] object + DetailId *int `json:"detailId,omitempty" xmlrpc:"detailId,omitempty"` + + // Unique numeric ID of the object + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // no documentation yet + ModifyDate *Time `json:"modifyDate,omitempty" xmlrpc:"modifyDate,omitempty"` + + // The related [[SoftLayer_Network_Subnet_Registration|registration object]]. + Registration *Network_Subnet_Registration `json:"registration,omitempty" xmlrpc:"registration,omitempty"` + + // Numeric ID of the related [[SoftLayer_Network_Subnet_Registration]] object + RegistrationId *int `json:"registrationId,omitempty" xmlrpc:"registrationId,omitempty"` +} + +// Each time a [[SoftLayer_Network_Subnet_Registration|subnet registration]] object is created or modified, the system will generate an event for it. Additional actions that would create an event include RIR responses and error cases. * +type Network_Subnet_Registration_Event struct { + Entity + + // no documentation yet + CreateDate *Time `json:"createDate,omitempty" xmlrpc:"createDate,omitempty"` + + // Unique numeric ID of the event object + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // A string message indicating what took place during this event + Message *string `json:"message,omitempty" xmlrpc:"message,omitempty"` + + // no documentation yet + ModifyDate *Time `json:"modifyDate,omitempty" xmlrpc:"modifyDate,omitempty"` + + // The registration this event pertains to. + Registration *Network_Subnet_Registration `json:"registration,omitempty" xmlrpc:"registration,omitempty"` + + // The numeric ID of the related [[SoftLayer_Network_Subnet_Registration]] object + RegistrationId *int `json:"registrationId,omitempty" xmlrpc:"registrationId,omitempty"` + + // The type of this event. + Type *Network_Subnet_Registration_Event_Type `json:"type,omitempty" xmlrpc:"type,omitempty"` + + // The numeric ID of the associated [[SoftLayer_Network_Subnet_Registration_Event_Type|event type]] object + TypeId *int `json:"typeId,omitempty" xmlrpc:"typeId,omitempty"` +} + +// Subnet Registration Event Type objects describe the nature of a [[SoftLayer_Network_Subnet_Registration_Event]] +// +// The standard values for these objects are as follows:
    • REGISTRATION_CREATED - Indicates that the registration has been created
    • REGISTRATION_UPDATED - Indicates that the registration has been updated
    • REGISTRATION_CANCELLED - Indicates that the registration has been cancelled
    • RIR_RESPONSE - Indicates that an action taken against the RIR has produced a response. More details will be provided in the event message.
    • ERROR - Indicates that an error has been encountered. More details will be provided in the event message.
    • NOTE - An employee or other system has entered a note regarding the registration. The note content will be provided in the event message.
    +type Network_Subnet_Registration_Event_Type struct { + Entity + + // no documentation yet + CreateDate *Time `json:"createDate,omitempty" xmlrpc:"createDate,omitempty"` + + // Unique numeric ID of the event type object + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // Code-friendly string name of the event type + KeyName *string `json:"keyName,omitempty" xmlrpc:"keyName,omitempty"` + + // no documentation yet + ModifyDate *Time `json:"modifyDate,omitempty" xmlrpc:"modifyDate,omitempty"` + + // Human-readable name of the event type + Name *string `json:"name,omitempty" xmlrpc:"name,omitempty"` +} + +// RIPE-specific registration object. For more detail see [[SoftLayer_Network_Subnet_Registration (type)|SoftLayer_Network_Subnet_Registration]]. +type Network_Subnet_Registration_Ripe struct { + Network_Subnet_Registration +} + +// Subnet Registration Status objects describe the current status of a subnet registration. +// +// The standard values for these objects are as follows:
    • OPEN - Indicates that the registration object is new and has yet to be submitted to the RIR
    • PENDING - Indicates that the registration object has been submitted to the RIR and is awaiting response
    • COMPLETE - Indicates that the RIR action has completed
    • DELETED - Indicates that the registration object has been gracefully removed is no longer valid
    • CANCELLED - Indicates that the registration object has been abruptly removed is no longer valid
    +type Network_Subnet_Registration_Status struct { + Entity + + // no documentation yet + CreateDate *Time `json:"createDate,omitempty" xmlrpc:"createDate,omitempty"` + + // Unique numeric ID of the status object + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // Code-friendly string name of the status + KeyName *string `json:"keyName,omitempty" xmlrpc:"keyName,omitempty"` + + // no documentation yet + ModifyDate *Time `json:"modifyDate,omitempty" xmlrpc:"modifyDate,omitempty"` + + // Human-readable name of the status + Name *string `json:"name,omitempty" xmlrpc:"name,omitempty"` +} + +// Every SoftLayer customer account has contact information associated with it for reverse WHOIS purposes. An account's RWHOIS data, modeled by the SoftLayer_Network_Subnet_Rwhois_Data data type, is used by SoftLayer's reverse WHOIS server as well as for SWIP transactions. SoftLayer's reverse WHOIS servers respond to WHOIS queries for IP addresses belonging to a customer's servers, returning this RWHOIS data. +// +// A SoftLayer customer's RWHOIS data may not necessarily match their account or portal users' contact information. +type Network_Subnet_Rwhois_Data struct { + Entity + + // An email address associated with an account's RWHOIS data that is responsible for responding to network abuse queries about malicious traffic coming from your servers' IP addresses. + AbuseEmail *string `json:"abuseEmail,omitempty" xmlrpc:"abuseEmail,omitempty"` + + // The SoftLayer customer account associated with this reverse WHOIS data. + Account *Account `json:"account,omitempty" xmlrpc:"account,omitempty"` + + // An account's RWHOIS data's associated account identifier. + AccountId *int `json:"accountId,omitempty" xmlrpc:"accountId,omitempty"` + + // The first line of the mailing address associated with an account's RWHOIS data. + Address1 *string `json:"address1,omitempty" xmlrpc:"address1,omitempty"` + + // The second line of the mailing address associated with an account's RWHOIS data. + Address2 *string `json:"address2,omitempty" xmlrpc:"address2,omitempty"` + + // The city of the mailing address associated with an account's RWHOIS data. + City *string `json:"city,omitempty" xmlrpc:"city,omitempty"` + + // The company name associated with an account's RWHOIS data. + CompanyName *string `json:"companyName,omitempty" xmlrpc:"companyName,omitempty"` + + // A two-letter abbreviation of the country of the mailing address associated with an account's RWHOIS data. + Country *string `json:"country,omitempty" xmlrpc:"country,omitempty"` + + // The date an account's RWHOIS data was created. + CreateDate *Time `json:"createDate,omitempty" xmlrpc:"createDate,omitempty"` + + // The first name associated with an account's RWHOIS data. + FirstName *string `json:"firstName,omitempty" xmlrpc:"firstName,omitempty"` + + // An account's RWHOIS data's internal identifier. + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // The last name associated with an account's RWHOIS data. + LastName *string `json:"lastName,omitempty" xmlrpc:"lastName,omitempty"` + + // The date an account's RWHOIS data was last modified. + ModifyDate *Time `json:"modifyDate,omitempty" xmlrpc:"modifyDate,omitempty"` + + // The postal code of the mailing address associated with an account's RWHOIS data. + PostalCode *string `json:"postalCode,omitempty" xmlrpc:"postalCode,omitempty"` + + // Whether an account's RWHOIS data refers to a private residence or not. + PrivateResidenceFlag *bool `json:"privateResidenceFlag,omitempty" xmlrpc:"privateResidenceFlag,omitempty"` + + // A two-letter abbreviation of the state of the mailing address associated with an account's RWHOIS data. If an account does not reside in a province then this is typically blank. + State *string `json:"state,omitempty" xmlrpc:"state,omitempty"` +} + +// The SoftLayer_Network_Subnet_Swip_Transaction data type contains basic information tracked at SoftLayer to allow automation of Swip creation, update, and removal requests. A specific transaction is attached to an accountId and a subnetId. This also contains a "Status Name" which tells the customer what the transaction is doing: +// +// +// * REQUEST QUEUED: Request is queued up to be sent to ARIN +// * REQUEST SENT: The email request has been sent to ARIN +// * REQUEST CONFIRMED: ARIN has confirmed that the request is good, and should be available in 24 hours +// * OK: The subnet has been checked with WHOIS and it the SWIP transaction has completed correctly +// * REMOVE QUEUED: A subnet is queued to be removed from ARIN's systems +// * REMOVE SENT: The removal email request has been sent to ARIN +// * REMOVE CONFIRMED: ARIN has confirmed that the removal request is good, and the subnet should be clear in WHOIS in 24 hours +// * DELETED: This specific SWIP Transaction has been removed from ARIN and is no longer in effect +// * SOFTLAYER MANUALLY PROCESSING: Sometimes a request doesn't go through correctly and has to be manually processed by SoftLayer. This may take some time. +type Network_Subnet_Swip_Transaction struct { + Entity + + // The Account whose RWHOIS data was used to SWIP this subnet + Account *Account `json:"account,omitempty" xmlrpc:"account,omitempty"` + + // A SWIP transaction's unique identifier. + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // A Name describing which state a SWIP transaction is in. + StatusName *string `json:"statusName,omitempty" xmlrpc:"statusName,omitempty"` + + // The subnet that this SWIP transaction was created for. + Subnet *Network_Subnet `json:"subnet,omitempty" xmlrpc:"subnet,omitempty"` + + // ID Number of the Subnet for this SWIP transaction. + SubnetId *int `json:"subnetId,omitempty" xmlrpc:"subnetId,omitempty"` +} + +// no documentation yet +type Network_TippingPointReporting struct { + Entity +} + +// The SoftLayer_Network_Tunnel_Module_Context data type contains general information relating to a single SoftLayer network tunnel. The SoftLayer_Network_Tunnel_Module_Context is useful to gather information such as related customer subnets (remote) and internal subnets (local) associated with the network tunnel as well as other information needed to manage the network tunnel. Account and billing information related to the network tunnel can also be retrieved. +type Network_Tunnel_Module_Context struct { + Entity + + // The account that a network tunnel belongs to. + Account *Account `json:"account,omitempty" xmlrpc:"account,omitempty"` + + // A network tunnel's account identifier. + AccountId *int `json:"accountId,omitempty" xmlrpc:"accountId,omitempty"` + + // The transaction that is currently applying configurations for the network tunnel. + ActiveTransaction *Provisioning_Version1_Transaction `json:"activeTransaction,omitempty" xmlrpc:"activeTransaction,omitempty"` + + // A count of a network tunnel's address translations. + AddressTranslationCount *uint `json:"addressTranslationCount,omitempty" xmlrpc:"addressTranslationCount,omitempty"` + + // A network tunnel's address translations. + AddressTranslations []Network_Tunnel_Module_Context_Address_Translation `json:"addressTranslations,omitempty" xmlrpc:"addressTranslations,omitempty"` + + // A flag used to specify when advanced configurations, complex configurations that require manual setup, are being applied to network devices for a network tunnel. When the flag is set to true (1), a network tunnel cannot be configured through the management portal nor the API. + AdvancedConfigurationFlag *int `json:"advancedConfigurationFlag,omitempty" xmlrpc:"advancedConfigurationFlag,omitempty"` + + // A count of subnets that provide access to SoftLayer services such as the management portal and the SoftLayer API. + AllAvailableServiceSubnetCount *uint `json:"allAvailableServiceSubnetCount,omitempty" xmlrpc:"allAvailableServiceSubnetCount,omitempty"` + + // Subnets that provide access to SoftLayer services such as the management portal and the SoftLayer API. + AllAvailableServiceSubnets []Network_Subnet `json:"allAvailableServiceSubnets,omitempty" xmlrpc:"allAvailableServiceSubnets,omitempty"` + + // The current billing item for network tunnel. + BillingItem *Billing_Item `json:"billingItem,omitempty" xmlrpc:"billingItem,omitempty"` + + // The date a network tunnel was created. + CreateDate *Time `json:"createDate,omitempty" xmlrpc:"createDate,omitempty"` + + // The remote end of a network tunnel. This end of the network tunnel resides on an outside network and will be sending and receiving the IPSec packets. + CustomerPeerIpAddress *string `json:"customerPeerIpAddress,omitempty" xmlrpc:"customerPeerIpAddress,omitempty"` + + // A count of remote subnets that are allowed access through a network tunnel. + CustomerSubnetCount *uint `json:"customerSubnetCount,omitempty" xmlrpc:"customerSubnetCount,omitempty"` + + // Remote subnets that are allowed access through a network tunnel. + CustomerSubnets []Network_Customer_Subnet `json:"customerSubnets,omitempty" xmlrpc:"customerSubnets,omitempty"` + + // The datacenter location for one end of the network tunnel that allows access to account's private subnets. + Datacenter *Location `json:"datacenter,omitempty" xmlrpc:"datacenter,omitempty"` + + // The name giving to a network tunnel by a user. + FriendlyName *string `json:"friendlyName,omitempty" xmlrpc:"friendlyName,omitempty"` + + // A network tunnel's unique identifier. + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // The local end of a network tunnel. This end of the network tunnel resides on the SoftLayer networks and allows access to remote end of the tunnel to subnets on SoftLayer networks. + InternalPeerIpAddress *string `json:"internalPeerIpAddress,omitempty" xmlrpc:"internalPeerIpAddress,omitempty"` + + // A count of private subnets that can be accessed through the network tunnel. + InternalSubnetCount *uint `json:"internalSubnetCount,omitempty" xmlrpc:"internalSubnetCount,omitempty"` + + // Private subnets that can be accessed through the network tunnel. + InternalSubnets []Network_Subnet `json:"internalSubnets,omitempty" xmlrpc:"internalSubnets,omitempty"` + + // The date a network tunnel was last modified. + // + // NOTE: This date should NOT be used to determine when the network tunnel configurations were last applied to the network device. + ModifyDate *Time `json:"modifyDate,omitempty" xmlrpc:"modifyDate,omitempty"` + + // A network tunnel's unique name used on the network device. + Name *string `json:"name,omitempty" xmlrpc:"name,omitempty"` + + // Authentication used to generate keys for protecting the negotiations for a network tunnel. + PhaseOneAuthentication *string `json:"phaseOneAuthentication,omitempty" xmlrpc:"phaseOneAuthentication,omitempty"` + + // Determines the strength of the key used in the key exchange process. The higher the group number the stronger the key is and the more secure it is. However, processing time will increase as the strength of the key increases. Both peers in the must use the Diffie-Hellman Group. + PhaseOneDiffieHellmanGroup *int `json:"phaseOneDiffieHellmanGroup,omitempty" xmlrpc:"phaseOneDiffieHellmanGroup,omitempty"` + + // Encryption used to generate keys for protecting the negotiations for a network tunnel. + PhaseOneEncryption *string `json:"phaseOneEncryption,omitempty" xmlrpc:"phaseOneEncryption,omitempty"` + + // Amount of time (in seconds) allowed to pass before the encryption key expires. A new key is generated without interrupting service. Valid times are from 120 to 172800 seconds. + PhaseOneKeylife *int `json:"phaseOneKeylife,omitempty" xmlrpc:"phaseOneKeylife,omitempty"` + + // The authentication used in phase 2 proposal negotiation process. + PhaseTwoAuthentication *string `json:"phaseTwoAuthentication,omitempty" xmlrpc:"phaseTwoAuthentication,omitempty"` + + // Determines the strength of the key used in the key exchange process. The higher the group number the stronger the key is and the more secure it is. However, processing time will increase as the strength of the key increases. Both peers must use the Diffie-Hellman Group. + PhaseTwoDiffieHellmanGroup *int `json:"phaseTwoDiffieHellmanGroup,omitempty" xmlrpc:"phaseTwoDiffieHellmanGroup,omitempty"` + + // The encryption used in phase 2 proposal negotiation process. + PhaseTwoEncryption *string `json:"phaseTwoEncryption,omitempty" xmlrpc:"phaseTwoEncryption,omitempty"` + + // Amount of time (in seconds) allowed to pass before the encryption key expires. A new key is generated without interrupting service. Valid times are from 120 to 172800 seconds. + PhaseTwoKeylife *int `json:"phaseTwoKeylife,omitempty" xmlrpc:"phaseTwoKeylife,omitempty"` + + // Determines if the generated keys are made from previous keys. When PFS is specified, a Diffie-Hellman exchange occurs each time a new security association is negotiated. + PhaseTwoPerfectForwardSecrecy *int `json:"phaseTwoPerfectForwardSecrecy,omitempty" xmlrpc:"phaseTwoPerfectForwardSecrecy,omitempty"` + + // A key used so that peers authenticate each other. This key is hashed by using the phase one encryption and phase one authentication. + PresharedKey *string `json:"presharedKey,omitempty" xmlrpc:"presharedKey,omitempty"` + + // A count of service subnets that can be access through the network tunnel. + ServiceSubnetCount *uint `json:"serviceSubnetCount,omitempty" xmlrpc:"serviceSubnetCount,omitempty"` + + // Service subnets that can be access through the network tunnel. + ServiceSubnets []Network_Subnet `json:"serviceSubnets,omitempty" xmlrpc:"serviceSubnets,omitempty"` + + // A count of subnets used for a network tunnel's address translations. + StaticRouteSubnetCount *uint `json:"staticRouteSubnetCount,omitempty" xmlrpc:"staticRouteSubnetCount,omitempty"` + + // Subnets used for a network tunnel's address translations. + StaticRouteSubnets []Network_Subnet `json:"staticRouteSubnets,omitempty" xmlrpc:"staticRouteSubnets,omitempty"` + + // The transaction history for this network tunnel. + TransactionHistory []Provisioning_Version1_Transaction `json:"transactionHistory,omitempty" xmlrpc:"transactionHistory,omitempty"` + + // A count of the transaction history for this network tunnel. + TransactionHistoryCount *uint `json:"transactionHistoryCount,omitempty" xmlrpc:"transactionHistoryCount,omitempty"` +} + +// The SoftLayer_Network_Tunnel_Module_Context_Address_Translation data type contains general information relating to a single address translation. Information such as notes, ip addresses, along with record information, and network tunnel data may be retrieved. +type Network_Tunnel_Module_Context_Address_Translation struct { + Entity + + // The ip address record that will receive the encrypted traffic. + CustomerIpAddress *string `json:"customerIpAddress,omitempty" xmlrpc:"customerIpAddress,omitempty"` + + // The unique identifier for the ip address record that will receive the encrypted traffic. + CustomerIpAddressId *int `json:"customerIpAddressId,omitempty" xmlrpc:"customerIpAddressId,omitempty"` + + // The ip address record for the ip that will receive the encrypted traffic from the IPSec network tunnel. + CustomerIpAddressRecord *Network_Customer_Subnet_IpAddress `json:"customerIpAddressRecord,omitempty" xmlrpc:"customerIpAddressRecord,omitempty"` + + // An address translation's unique identifier. + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // The ip address record that will deliver the encrypted traffic. + InternalIpAddress *string `json:"internalIpAddress,omitempty" xmlrpc:"internalIpAddress,omitempty"` + + // The unique identifier for the ip address record that will deliver the encrypted traffic. + InternalIpAddressId *int `json:"internalIpAddressId,omitempty" xmlrpc:"internalIpAddressId,omitempty"` + + // The ip address record for the ip that will deliver the encrypted traffic from the IPSec network tunnel. + InternalIpAddressRecord *Network_Subnet_IpAddress `json:"internalIpAddressRecord,omitempty" xmlrpc:"internalIpAddressRecord,omitempty"` + + // The IPSec network tunnel an address translation belongs to. + NetworkTunnelContext *Network_Tunnel_Module_Context `json:"networkTunnelContext,omitempty" xmlrpc:"networkTunnelContext,omitempty"` + + // An address translation's network tunnel identifier. + NetworkTunnelContextId *int `json:"networkTunnelContextId,omitempty" xmlrpc:"networkTunnelContextId,omitempty"` + + // A name or description given to an address translation to help identify the address translation. + Notes *string `json:"notes,omitempty" xmlrpc:"notes,omitempty"` +} + +// The SoftLayer_Network_Vlan data type models a single VLAN within SoftLayer's public and private networks. a Virtual LAN is a structure that associates network interfaces on routers, switches, and servers in different locations to act as if they were on the same local network broadcast domain. VLANs are a central part of the SoftLayer network. They can determine how new IP subnets are routed and how individual servers communicate to each other. +type Network_Vlan struct { + Entity + + // The SoftLayer customer account associated with a VLAN. + Account *Account `json:"account,omitempty" xmlrpc:"account,omitempty"` + + // The internal identifier of the SoftLayer customer account that a VLAN is associated with. + AccountId *int `json:"accountId,omitempty" xmlrpc:"accountId,omitempty"` + + // A count of a VLAN's additional primary subnets. These are used to extend the number of servers attached to the VLAN by adding more ip addresses to the primary IP address pool. + AdditionalPrimarySubnetCount *uint `json:"additionalPrimarySubnetCount,omitempty" xmlrpc:"additionalPrimarySubnetCount,omitempty"` + + // A VLAN's additional primary subnets. These are used to extend the number of servers attached to the VLAN by adding more ip addresses to the primary IP address pool. + AdditionalPrimarySubnets []Network_Subnet `json:"additionalPrimarySubnets,omitempty" xmlrpc:"additionalPrimarySubnets,omitempty"` + + // The gateway this VLAN is inside of. + AttachedNetworkGateway *Network_Gateway `json:"attachedNetworkGateway,omitempty" xmlrpc:"attachedNetworkGateway,omitempty"` + + // Whether or not this VLAN is inside a gateway. + AttachedNetworkGatewayFlag *bool `json:"attachedNetworkGatewayFlag,omitempty" xmlrpc:"attachedNetworkGatewayFlag,omitempty"` + + // The inside VLAN record if this VLAN is inside a network gateway. + AttachedNetworkGatewayVlan *Network_Gateway_Vlan `json:"attachedNetworkGatewayVlan,omitempty" xmlrpc:"attachedNetworkGatewayVlan,omitempty"` + + // The billing item for a network vlan. + BillingItem *Billing_Item `json:"billingItem,omitempty" xmlrpc:"billingItem,omitempty"` + + // A flag indicating that a network vlan is on a Hardware Firewall (Dedicated). + DedicatedFirewallFlag *int `json:"dedicatedFirewallFlag,omitempty" xmlrpc:"dedicatedFirewallFlag,omitempty"` + + // The extension router that a VLAN is associated with. + ExtensionRouter *Hardware_Router `json:"extensionRouter,omitempty" xmlrpc:"extensionRouter,omitempty"` + + // A count of a firewalled Vlan's network components. + FirewallGuestNetworkComponentCount *uint `json:"firewallGuestNetworkComponentCount,omitempty" xmlrpc:"firewallGuestNetworkComponentCount,omitempty"` + + // A firewalled Vlan's network components. + FirewallGuestNetworkComponents []Network_Component_Firewall `json:"firewallGuestNetworkComponents,omitempty" xmlrpc:"firewallGuestNetworkComponents,omitempty"` + + // A count of a firewalled vlan's inbound/outbound interfaces. + FirewallInterfaceCount *uint `json:"firewallInterfaceCount,omitempty" xmlrpc:"firewallInterfaceCount,omitempty"` + + // A firewalled vlan's inbound/outbound interfaces. + FirewallInterfaces []Network_Firewall_Module_Context_Interface `json:"firewallInterfaces,omitempty" xmlrpc:"firewallInterfaces,omitempty"` + + // A count of a firewalled Vlan's network components. + FirewallNetworkComponentCount *uint `json:"firewallNetworkComponentCount,omitempty" xmlrpc:"firewallNetworkComponentCount,omitempty"` + + // A firewalled Vlan's network components. + FirewallNetworkComponents []Network_Component_Firewall `json:"firewallNetworkComponents,omitempty" xmlrpc:"firewallNetworkComponents,omitempty"` + + // A count of the currently running rule set of a firewalled VLAN. + FirewallRuleCount *uint `json:"firewallRuleCount,omitempty" xmlrpc:"firewallRuleCount,omitempty"` + + // The currently running rule set of a firewalled VLAN. + FirewallRules []Network_Vlan_Firewall_Rule `json:"firewallRules,omitempty" xmlrpc:"firewallRules,omitempty"` + + // A count of the networking components that are connected to a VLAN. + GuestNetworkComponentCount *uint `json:"guestNetworkComponentCount,omitempty" xmlrpc:"guestNetworkComponentCount,omitempty"` + + // The networking components that are connected to a VLAN. + GuestNetworkComponents []Virtual_Guest_Network_Component `json:"guestNetworkComponents,omitempty" xmlrpc:"guestNetworkComponents,omitempty"` + + // All of the hardware that exists on a VLAN. Hardware is associated with a VLAN by its networking components. + Hardware []Hardware `json:"hardware,omitempty" xmlrpc:"hardware,omitempty"` + + // A count of all of the hardware that exists on a VLAN. Hardware is associated with a VLAN by its networking components. + HardwareCount *uint `json:"hardwareCount,omitempty" xmlrpc:"hardwareCount,omitempty"` + + // no documentation yet + HighAvailabilityFirewallFlag *bool `json:"highAvailabilityFirewallFlag,omitempty" xmlrpc:"highAvailabilityFirewallFlag,omitempty"` + + // A VLAN's internal identifier. This should not be confused with the ''vlanNumber'' property, which is used in network configuration. + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // A flag indicating that a vlan can be assigned to a host that has local disk functionality. + LocalDiskStorageCapabilityFlag *bool `json:"localDiskStorageCapabilityFlag,omitempty" xmlrpc:"localDiskStorageCapabilityFlag,omitempty"` + + // The date a VLAN was last modified. + ModifyDate *Time `json:"modifyDate,omitempty" xmlrpc:"modifyDate,omitempty"` + + // The optional name for this VLAN + Name *string `json:"name,omitempty" xmlrpc:"name,omitempty"` + + // The network in which this VLAN resides. + Network *Network `json:"network,omitempty" xmlrpc:"network,omitempty"` + + // A count of the networking components that are connected to a VLAN. + NetworkComponentCount *uint `json:"networkComponentCount,omitempty" xmlrpc:"networkComponentCount,omitempty"` + + // A count of the network components that are connected to this VLAN through a trunk. + NetworkComponentTrunkCount *uint `json:"networkComponentTrunkCount,omitempty" xmlrpc:"networkComponentTrunkCount,omitempty"` + + // The network components that are connected to this VLAN through a trunk. + NetworkComponentTrunks []Network_Component_Network_Vlan_Trunk `json:"networkComponentTrunks,omitempty" xmlrpc:"networkComponentTrunks,omitempty"` + + // The networking components that are connected to a VLAN. + NetworkComponents []Network_Component `json:"networkComponents,omitempty" xmlrpc:"networkComponents,omitempty"` + + // Identifier to denote whether a VLAN is used for public or private connectivity. + NetworkSpace *string `json:"networkSpace,omitempty" xmlrpc:"networkSpace,omitempty"` + + // The Hardware Firewall (Dedicated) for a network vlan. + NetworkVlanFirewall *Network_Vlan_Firewall `json:"networkVlanFirewall,omitempty" xmlrpc:"networkVlanFirewall,omitempty"` + + // The note for this vlan. + Note *string `json:"note,omitempty" xmlrpc:"note,omitempty"` + + // The primary router that a VLAN is associated with. Every SoftLayer VLAN is connected to more than one router for greater network redundancy. + PrimaryRouter *Hardware_Router `json:"primaryRouter,omitempty" xmlrpc:"primaryRouter,omitempty"` + + // A VLAN's primary subnet. Each VLAN has at least one subnet, usually the subnet that is assigned to a server or new IP address block when it's purchased. + PrimarySubnet *Network_Subnet `json:"primarySubnet,omitempty" xmlrpc:"primarySubnet,omitempty"` + + // A count of + PrimarySubnetCount *uint `json:"primarySubnetCount,omitempty" xmlrpc:"primarySubnetCount,omitempty"` + + // The internal identifier of the primary subnet addressed on a VLAN. + PrimarySubnetId *int `json:"primarySubnetId,omitempty" xmlrpc:"primarySubnetId,omitempty"` + + // A VLAN's primary IPv6 subnet. Some VLAN's may not have a primary IPv6 subnet. + PrimarySubnetVersion6 *Network_Subnet `json:"primarySubnetVersion6,omitempty" xmlrpc:"primarySubnetVersion6,omitempty"` + + // no documentation yet + PrimarySubnets []Network_Subnet `json:"primarySubnets,omitempty" xmlrpc:"primarySubnets,omitempty"` + + // A count of the gateways this VLAN is the private VLAN of. + PrivateNetworkGatewayCount *uint `json:"privateNetworkGatewayCount,omitempty" xmlrpc:"privateNetworkGatewayCount,omitempty"` + + // The gateways this VLAN is the private VLAN of. + PrivateNetworkGateways []Network_Gateway `json:"privateNetworkGateways,omitempty" xmlrpc:"privateNetworkGateways,omitempty"` + + // A count of + ProtectedIpAddressCount *uint `json:"protectedIpAddressCount,omitempty" xmlrpc:"protectedIpAddressCount,omitempty"` + + // no documentation yet + ProtectedIpAddresses []Network_Subnet_IpAddress `json:"protectedIpAddresses,omitempty" xmlrpc:"protectedIpAddresses,omitempty"` + + // A count of the gateways this VLAN is the public VLAN of. + PublicNetworkGatewayCount *uint `json:"publicNetworkGatewayCount,omitempty" xmlrpc:"publicNetworkGatewayCount,omitempty"` + + // The gateways this VLAN is the public VLAN of. + PublicNetworkGateways []Network_Gateway `json:"publicNetworkGateways,omitempty" xmlrpc:"publicNetworkGateways,omitempty"` + + // A count of the resource groups in which this VLAN is a member. + ResourceGroupCount *uint `json:"resourceGroupCount,omitempty" xmlrpc:"resourceGroupCount,omitempty"` + + // The resource group member for a network vlan. + ResourceGroupMember []Resource_Group_Member `json:"resourceGroupMember,omitempty" xmlrpc:"resourceGroupMember,omitempty"` + + // A count of the resource group member for a network vlan. + ResourceGroupMemberCount *uint `json:"resourceGroupMemberCount,omitempty" xmlrpc:"resourceGroupMemberCount,omitempty"` + + // The resource groups in which this VLAN is a member. + ResourceGroups []Resource_Group `json:"resourceGroups,omitempty" xmlrpc:"resourceGroups,omitempty"` + + // A flag indicating that a vlan can be assigned to a host that has SAN disk functionality. + SanStorageCapabilityFlag *bool `json:"sanStorageCapabilityFlag,omitempty" xmlrpc:"sanStorageCapabilityFlag,omitempty"` + + // A count of collection of scale VLANs this VLAN applies to. + ScaleVlanCount *uint `json:"scaleVlanCount,omitempty" xmlrpc:"scaleVlanCount,omitempty"` + + // Collection of scale VLANs this VLAN applies to. + ScaleVlans []Scale_Network_Vlan `json:"scaleVlans,omitempty" xmlrpc:"scaleVlans,omitempty"` + + // The secondary router that a VLAN is associated with. Every SoftLayer VLAN is connected to more than one router for greater network redundancy. + SecondaryRouter *Hardware `json:"secondaryRouter,omitempty" xmlrpc:"secondaryRouter,omitempty"` + + // A count of the subnets that exist as secondary interfaces on a VLAN + SecondarySubnetCount *uint `json:"secondarySubnetCount,omitempty" xmlrpc:"secondarySubnetCount,omitempty"` + + // The subnets that exist as secondary interfaces on a VLAN + SecondarySubnets []Network_Subnet `json:"secondarySubnets,omitempty" xmlrpc:"secondarySubnets,omitempty"` + + // A count of all of the subnets that exist as VLAN interfaces. + SubnetCount *uint `json:"subnetCount,omitempty" xmlrpc:"subnetCount,omitempty"` + + // All of the subnets that exist as VLAN interfaces. + Subnets []Network_Subnet `json:"subnets,omitempty" xmlrpc:"subnets,omitempty"` + + // A count of references to all tags for this VLAN. + TagReferenceCount *uint `json:"tagReferenceCount,omitempty" xmlrpc:"tagReferenceCount,omitempty"` + + // References to all tags for this VLAN. + TagReferences []Tag_Reference `json:"tagReferences,omitempty" xmlrpc:"tagReferences,omitempty"` + + // The number of primary IP addresses in a VLAN. + TotalPrimaryIpAddressCount *uint `json:"totalPrimaryIpAddressCount,omitempty" xmlrpc:"totalPrimaryIpAddressCount,omitempty"` + + // The type of this VLAN. + Type *Network_Vlan_Type `json:"type,omitempty" xmlrpc:"type,omitempty"` + + // A count of all of the Virtual Servers that are connected to a VLAN. + VirtualGuestCount *uint `json:"virtualGuestCount,omitempty" xmlrpc:"virtualGuestCount,omitempty"` + + // All of the Virtual Servers that are connected to a VLAN. + VirtualGuests []Virtual_Guest `json:"virtualGuests,omitempty" xmlrpc:"virtualGuests,omitempty"` + + // A VLAN's number as recorded within the SoftLayer network. This is configured directly on SoftLayer's networking equipment and should not be confused with a VLAN's ''id'' property. + VlanNumber *int `json:"vlanNumber,omitempty" xmlrpc:"vlanNumber,omitempty"` +} + +// The SoftLayer_Network_Vlan_Firewall data type contains general information relating to a single SoftLayer VLAN firewall. This is the object which ties the running rules to a specific downstream server. Use the [[SoftLayer Network Firewall Template]] service to pull SoftLayer recommended rule set templates. Use the [[SoftLayer Network Firewall Update Request]] service to submit a firewall update request. +type Network_Vlan_Firewall struct { + Entity + + // A flag to indicate if the firewall is in administrative bypass mode. In other words, no rules are being applied to the traffic coming through. + AdministrativeBypassFlag *string `json:"administrativeBypassFlag,omitempty" xmlrpc:"administrativeBypassFlag,omitempty"` + + // The billing item for a Hardware Firewall (Dedicated). + BillingItem *Billing_Item `json:"billingItem,omitempty" xmlrpc:"billingItem,omitempty"` + + // Whether or not this firewall can be directly logged in to. + CustomerManagedFlag *bool `json:"customerManagedFlag,omitempty" xmlrpc:"customerManagedFlag,omitempty"` + + // The datacenter that the firewall resides in. + Datacenter *Location `json:"datacenter,omitempty" xmlrpc:"datacenter,omitempty"` + + // The firewall device type. + FirewallType *string `json:"firewallType,omitempty" xmlrpc:"firewallType,omitempty"` + + // A name reflecting the hostname and domain of the firewall. This is created from the combined values of the firewall's logical name and vlan number automatically, and thus can not be edited directly. + FullyQualifiedDomainName *string `json:"fullyQualifiedDomainName,omitempty" xmlrpc:"fullyQualifiedDomainName,omitempty"` + + // A firewall's unique identifier. + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // The credentials to log in to a firewall device. This is only present for dedicated appliances. + ManagementCredentials *Software_Component_Password `json:"managementCredentials,omitempty" xmlrpc:"managementCredentials,omitempty"` + + // A count of the update requests made for this firewall. + NetworkFirewallUpdateRequestCount *uint `json:"networkFirewallUpdateRequestCount,omitempty" xmlrpc:"networkFirewallUpdateRequestCount,omitempty"` + + // The update requests made for this firewall. + NetworkFirewallUpdateRequests []Network_Firewall_Update_Request `json:"networkFirewallUpdateRequests,omitempty" xmlrpc:"networkFirewallUpdateRequests,omitempty"` + + // The VLAN object that a firewall is associated with and protecting. + NetworkVlan *Network_Vlan `json:"networkVlan,omitempty" xmlrpc:"networkVlan,omitempty"` + + // A count of the VLAN objects that a firewall is associated with and protecting. + NetworkVlanCount *uint `json:"networkVlanCount,omitempty" xmlrpc:"networkVlanCount,omitempty"` + + // The VLAN objects that a firewall is associated with and protecting. + NetworkVlans []Network_Vlan `json:"networkVlans,omitempty" xmlrpc:"networkVlans,omitempty"` + + // A firewall's primary IP address. This field will be the IP shown when doing network traces and reverse DNS and is a read-only property. + PrimaryIpAddress *string `json:"primaryIpAddress,omitempty" xmlrpc:"primaryIpAddress,omitempty"` + + // A count of the currently running rule set of this network component firewall. + RuleCount *uint `json:"ruleCount,omitempty" xmlrpc:"ruleCount,omitempty"` + + // The currently running rule set of this network component firewall. + Rules []Network_Vlan_Firewall_Rule `json:"rules,omitempty" xmlrpc:"rules,omitempty"` + + // A count of + TagReferenceCount *uint `json:"tagReferenceCount,omitempty" xmlrpc:"tagReferenceCount,omitempty"` + + // no documentation yet + TagReferences []Tag_Reference `json:"tagReferences,omitempty" xmlrpc:"tagReferences,omitempty"` +} + +// A SoftLayer_Network_Component_Firewall_Rule object type represents a currently running firewall rule and contains relative information. Use the [[SoftLayer Network Firewall Update Request]] service to submit a firewall update request. Use the [[SoftLayer Network Firewall Template]] service to pull SoftLayer recommended rule set templates. +type Network_Vlan_Firewall_Rule struct { + Entity + + // The action that the rule is to take [permit or deny]. + Action *string `json:"action,omitempty" xmlrpc:"action,omitempty"` + + // The destination IP address considered for determining rule application. + DestinationIpAddress *string `json:"destinationIpAddress,omitempty" xmlrpc:"destinationIpAddress,omitempty"` + + // The CIDR is used for determining rule application. This value will + DestinationIpCidr *int `json:"destinationIpCidr,omitempty" xmlrpc:"destinationIpCidr,omitempty"` + + // The destination IP subnet mask considered for determining rule application. + DestinationIpSubnetMask *string `json:"destinationIpSubnetMask,omitempty" xmlrpc:"destinationIpSubnetMask,omitempty"` + + // The ending (upper end of range) destination port considered for determining rule application. + DestinationPortRangeEnd *int `json:"destinationPortRangeEnd,omitempty" xmlrpc:"destinationPortRangeEnd,omitempty"` + + // The starting (lower end of range) destination port considered for determining rule application. + DestinationPortRangeStart *int `json:"destinationPortRangeStart,omitempty" xmlrpc:"destinationPortRangeStart,omitempty"` + + // The rule's internal identifier. + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // The network component firewall that this rule belongs to. + NetworkComponentFirewall *Network_Component_Firewall `json:"networkComponentFirewall,omitempty" xmlrpc:"networkComponentFirewall,omitempty"` + + // The notes field for the rule. + Notes *string `json:"notes,omitempty" xmlrpc:"notes,omitempty"` + + // The numeric value describing the order in which the rule should be applied. + OrderValue *int `json:"orderValue,omitempty" xmlrpc:"orderValue,omitempty"` + + // The protocol considered for determining rule application. + Protocol *string `json:"protocol,omitempty" xmlrpc:"protocol,omitempty"` + + // The source IP address considered for determining rule application. + SourceIpAddress *string `json:"sourceIpAddress,omitempty" xmlrpc:"sourceIpAddress,omitempty"` + + // The CIDR is used for determining rule application. This value will + SourceIpCidr *int `json:"sourceIpCidr,omitempty" xmlrpc:"sourceIpCidr,omitempty"` + + // The source IP subnet mask considered for determining rule application. + SourceIpSubnetMask *string `json:"sourceIpSubnetMask,omitempty" xmlrpc:"sourceIpSubnetMask,omitempty"` + + // Current status of the network component firewall. + Status *string `json:"status,omitempty" xmlrpc:"status,omitempty"` + + // Whether this rule is an IPv4 rule or an IPv6 rule. If + Version *int `json:"version,omitempty" xmlrpc:"version,omitempty"` +} + +// no documentation yet +type Network_Vlan_Type struct { + Entity + + // no documentation yet + Description *string `json:"description,omitempty" xmlrpc:"description,omitempty"` + + // no documentation yet + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // no documentation yet + KeyName *string `json:"keyName,omitempty" xmlrpc:"keyName,omitempty"` + + // no documentation yet + Name *string `json:"name,omitempty" xmlrpc:"name,omitempty"` +} diff --git a/vendor/github.com/softlayer/softlayer-go/datatypes/notification.go b/vendor/github.com/softlayer/softlayer-go/datatypes/notification.go new file mode 100644 index 0000000000..225d8602ab --- /dev/null +++ b/vendor/github.com/softlayer/softlayer-go/datatypes/notification.go @@ -0,0 +1,648 @@ +/** + * Copyright 2016 IBM Corp. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** + * AUTOMATICALLY GENERATED CODE - DO NOT MODIFY + */ + +package datatypes + +// Details provided for the notification are basic. Details such as the related preferences, name and keyname for the notification can be retrieved. The keyname property for the notification can be used to refer to a notification when integrating into the SoftLayer Notification system. The name property can used more for display purposes. +type Notification struct { + Entity + + // Unique identifier for the notification. + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // Name that can be used by external systems to refer to a notification. + KeyName *string `json:"keyName,omitempty" xmlrpc:"keyName,omitempty"` + + // Friendly name for the notification. + Name *string `json:"name,omitempty" xmlrpc:"name,omitempty"` + + // A count of the preferences related to the notification. These are preferences are configurable and optional for subscribers to use. + PreferenceCount *uint `json:"preferenceCount,omitempty" xmlrpc:"preferenceCount,omitempty"` + + // The preferences related to the notification. These are preferences are configurable and optional for subscribers to use. + Preferences []Notification_Preference `json:"preferences,omitempty" xmlrpc:"preferences,omitempty"` + + // A count of the required preferences related to the notification. While configurable, the subscriber does not have the option whether to use the preference. + RequiredPreferenceCount *uint `json:"requiredPreferenceCount,omitempty" xmlrpc:"requiredPreferenceCount,omitempty"` + + // The required preferences related to the notification. While configurable, the subscriber does not have the option whether to use the preference. + RequiredPreferences []Notification_Preference `json:"requiredPreferences,omitempty" xmlrpc:"requiredPreferences,omitempty"` +} + +// Provides details for the delivery methods available. +type Notification_Delivery_Method struct { + Entity + + // Determines if the delivery method is still used by the system. + Active *int `json:"active,omitempty" xmlrpc:"active,omitempty"` + + // Description used for the delivery method. + Description *string `json:"description,omitempty" xmlrpc:"description,omitempty"` + + // Unique identifier for the various notification delivery methods. + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // Name that can be used by external systems to refer to delivery method. + KeyName *string `json:"keyName,omitempty" xmlrpc:"keyName,omitempty"` + + // Friendly name used for the delivery method. + Name *string `json:"name,omitempty" xmlrpc:"name,omitempty"` +} + +// This is an extension of the SoftLayer_Notification class. These are implementation details specific to those notifications which can be subscribed to and received on a mobile device. +type Notification_Mobile struct { + Notification +} + +// no documentation yet +type Notification_Occurrence_Account struct { + Entity + + // no documentation yet + Account *Account `json:"account,omitempty" xmlrpc:"account,omitempty"` + + // no documentation yet + Active *int `json:"active,omitempty" xmlrpc:"active,omitempty"` + + // no documentation yet + LastNotificationUpdate *Notification_Occurrence_Update `json:"lastNotificationUpdate,omitempty" xmlrpc:"lastNotificationUpdate,omitempty"` + + // no documentation yet + NotificationOccurrenceEvent *Notification_Occurrence_Event `json:"notificationOccurrenceEvent,omitempty" xmlrpc:"notificationOccurrenceEvent,omitempty"` +} + +// no documentation yet +type Notification_Occurrence_Event struct { + Entity + + // Indicates whether or not this event has been acknowledged by the user. + AcknowledgedFlag *bool `json:"acknowledgedFlag,omitempty" xmlrpc:"acknowledgedFlag,omitempty"` + + // A count of a collection of attachments for this event which provide supplementary information to impacted users some examples are RFO (Reason For Outage) and root cause analysis documents. + AttachmentCount *uint `json:"attachmentCount,omitempty" xmlrpc:"attachmentCount,omitempty"` + + // A collection of attachments for this event which provide supplementary information to impacted users some examples are RFO (Reason For Outage) and root cause analysis documents. + Attachments []Notification_Occurrence_Event_Attachment `json:"attachments,omitempty" xmlrpc:"attachments,omitempty"` + + // When this event will end. + EndDate *Time `json:"endDate,omitempty" xmlrpc:"endDate,omitempty"` + + // The first update for this event. + FirstUpdate *Notification_Occurrence_Update `json:"firstUpdate,omitempty" xmlrpc:"firstUpdate,omitempty"` + + // Unique identifier for this event. + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // A count of a collection of accounts impacted by this event. Each impacted account record relates directly to a [[SoftLayer_Account]]. + ImpactedAccountCount *uint `json:"impactedAccountCount,omitempty" xmlrpc:"impactedAccountCount,omitempty"` + + // A collection of accounts impacted by this event. Each impacted account record relates directly to a [[SoftLayer_Account]]. + ImpactedAccounts []Notification_Occurrence_Account `json:"impactedAccounts,omitempty" xmlrpc:"impactedAccounts,omitempty"` + + // A count of a collection of resources impacted by this event. Each record will relate to some physical resource that the user has access to such as [[SoftLayer_Hardware]] or [[SoftLayer_Virtual_Guest]]. + ImpactedResourceCount *uint `json:"impactedResourceCount,omitempty" xmlrpc:"impactedResourceCount,omitempty"` + + // A collection of resources impacted by this event. Each record will relate to some physical resource that the user has access to such as [[SoftLayer_Hardware]] or [[SoftLayer_Virtual_Guest]]. + ImpactedResources []Notification_Occurrence_Resource `json:"impactedResources,omitempty" xmlrpc:"impactedResources,omitempty"` + + // A count of a collection of users impacted by this event. Each impacted user record relates directly to a [[SoftLayer_User_Customer]]. + ImpactedUserCount *uint `json:"impactedUserCount,omitempty" xmlrpc:"impactedUserCount,omitempty"` + + // A collection of users impacted by this event. Each impacted user record relates directly to a [[SoftLayer_User_Customer]]. + ImpactedUsers []Notification_Occurrence_User `json:"impactedUsers,omitempty" xmlrpc:"impactedUsers,omitempty"` + + // Latest count of users impacted by this event. + LastImpactedUserCount *int `json:"lastImpactedUserCount,omitempty" xmlrpc:"lastImpactedUserCount,omitempty"` + + // The last update for this event. + LastUpdate *Notification_Occurrence_Update `json:"lastUpdate,omitempty" xmlrpc:"lastUpdate,omitempty"` + + // When this event was last updated. + ModifyDate *Time `json:"modifyDate,omitempty" xmlrpc:"modifyDate,omitempty"` + + // The type of event such as planned or unplanned maintenance. + NotificationOccurrenceEventType *Notification_Occurrence_Event_Type `json:"notificationOccurrenceEventType,omitempty" xmlrpc:"notificationOccurrenceEventType,omitempty"` + + // no documentation yet + RecoveryTime *int `json:"recoveryTime,omitempty" xmlrpc:"recoveryTime,omitempty"` + + // When this event started. + StartDate *Time `json:"startDate,omitempty" xmlrpc:"startDate,omitempty"` + + // no documentation yet + StatusCode *Notification_Occurrence_Status_Code `json:"statusCode,omitempty" xmlrpc:"statusCode,omitempty"` + + // Brief description of this event. + Subject *string `json:"subject,omitempty" xmlrpc:"subject,omitempty"` + + // Details of this event. + Summary *string `json:"summary,omitempty" xmlrpc:"summary,omitempty"` + + // Unique identifier for the [[SoftLayer_Ticket]] associated with this event. + SystemTicketId *int `json:"systemTicketId,omitempty" xmlrpc:"systemTicketId,omitempty"` + + // A count of all updates for this event. + UpdateCount *uint `json:"updateCount,omitempty" xmlrpc:"updateCount,omitempty"` + + // All updates for this event. + Updates []Notification_Occurrence_Update `json:"updates,omitempty" xmlrpc:"updates,omitempty"` +} + +// SoftLayer events can have have files attached to them by a SoftLayer employee. Attaching a file to a event is a way to provide supplementary information such as a RFO (reason for outage) document or root cause analysis. The SoftLayer_Notification_Occurrence_Event_Attachment data type models a single file attached to a event. +type Notification_Occurrence_Event_Attachment struct { + Entity + + // The date the file was attached to the event. + CreateDate *Time `json:"createDate,omitempty" xmlrpc:"createDate,omitempty"` + + // The name of the file attached to the event. + FileName *string `json:"fileName,omitempty" xmlrpc:"fileName,omitempty"` + + // The size of the file, measured in bytes. + FileSize *string `json:"fileSize,omitempty" xmlrpc:"fileSize,omitempty"` + + // A event attachments' unique identifier. + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // no documentation yet + NotificationOccurrenceEvent *Notification_Occurrence_Event `json:"notificationOccurrenceEvent,omitempty" xmlrpc:"notificationOccurrenceEvent,omitempty"` + + // The unique event identifier that the file is attached to. + NotificationOccurrenceEventId *int `json:"notificationOccurrenceEventId,omitempty" xmlrpc:"notificationOccurrenceEventId,omitempty"` +} + +// This represents the type of SoftLayer_Notification_Occurrence_Event. +type Notification_Occurrence_Event_Type struct { + Entity + + // The friendly unique identifier for this event type. + KeyName *string `json:"keyName,omitempty" xmlrpc:"keyName,omitempty"` +} + +// This type contains general information relating to any hardware or services that may be impacted by a SoftLayer_Notification_Occurrence_Event. +type Notification_Occurrence_Resource struct { + Entity + + // no documentation yet + Active *int `json:"active,omitempty" xmlrpc:"active,omitempty"` + + // <<< EOT A label which gives some background as to what piece of + FilterLabel *string `json:"filterLabel,omitempty" xmlrpc:"filterLabel,omitempty"` + + // The associated event. + NotificationOccurrenceEvent *Notification_Occurrence_Event `json:"notificationOccurrenceEvent,omitempty" xmlrpc:"notificationOccurrenceEvent,omitempty"` + + // <<< EOT The unique identifier for the associated + NotificationOccurrenceEventId *int `json:"notificationOccurrenceEventId,omitempty" xmlrpc:"notificationOccurrenceEventId,omitempty"` + + // The physical resource. + Resource *Entity `json:"resource,omitempty" xmlrpc:"resource,omitempty"` + + // <<< EOT The unique identifier for the [[SoftLayer_Account]] associated with + ResourceAccountId *int `json:"resourceAccountId,omitempty" xmlrpc:"resourceAccountId,omitempty"` + + // no documentation yet + ResourceName *string `json:"resourceName,omitempty" xmlrpc:"resourceName,omitempty"` + + // <<< EOT The unique identifier for the physical resource that is associated + ResourceTableId *int `json:"resourceTableId,omitempty" xmlrpc:"resourceTableId,omitempty"` +} + +// This type contains general information related to a [[SoftLayer_Hardware]] resource that is impacted by a [[SoftLayer_Notification_Occurrence_Event]]. +type Notification_Occurrence_Resource_Hardware struct { + Notification_Occurrence_Resource + + // no documentation yet + Hostname *string `json:"hostname,omitempty" xmlrpc:"hostname,omitempty"` + + // no documentation yet + PrivateIp *string `json:"privateIp,omitempty" xmlrpc:"privateIp,omitempty"` + + // no documentation yet + PublicIp *string `json:"publicIp,omitempty" xmlrpc:"publicIp,omitempty"` + + // no documentation yet + ResourceType *string `json:"resourceType,omitempty" xmlrpc:"resourceType,omitempty"` +} + +// This type contains general information related to a [[SoftLayer_Network_Application_Delivery_Controller]] resource that is impacted by a [[SoftLayer_Notification_Occurrence_Event]]. +type Notification_Occurrence_Resource_Network_Application_Delivery_Controller struct { + Notification_Occurrence_Resource + + // no documentation yet + Hostname *string `json:"hostname,omitempty" xmlrpc:"hostname,omitempty"` + + // no documentation yet + PrivateIp *string `json:"privateIp,omitempty" xmlrpc:"privateIp,omitempty"` + + // no documentation yet + PublicIp *string `json:"publicIp,omitempty" xmlrpc:"publicIp,omitempty"` + + // no documentation yet + ResourceType *string `json:"resourceType,omitempty" xmlrpc:"resourceType,omitempty"` +} + +// This type contains general information related to a [[SoftLayer_Network_Application_Delivery_Controller_LoadBalancer_VirtualIpAddress]] resource that is impacted by a [[SoftLayer_Notification_Occurrence_Event]]. +type Notification_Occurrence_Resource_Network_Application_Delivery_Controller_LoadBalancer_VirtualIpAddress struct { + Notification_Occurrence_Resource + + // no documentation yet + Hostname *string `json:"hostname,omitempty" xmlrpc:"hostname,omitempty"` + + // no documentation yet + PublicIp *string `json:"publicIp,omitempty" xmlrpc:"publicIp,omitempty"` + + // no documentation yet + ResourceType *string `json:"resourceType,omitempty" xmlrpc:"resourceType,omitempty"` +} + +// This type contains general information related to a [[SoftLayer_Network_Storage_Iscsi_EqualLogic]] resource that is impacted by a [[SoftLayer_Notification_Occurrence_Event]]. +type Notification_Occurrence_Resource_Network_Storage_Iscsi_EqualLogic struct { + Notification_Occurrence_Resource + + // no documentation yet + Hostname *string `json:"hostname,omitempty" xmlrpc:"hostname,omitempty"` + + // no documentation yet + PrivateIp *string `json:"privateIp,omitempty" xmlrpc:"privateIp,omitempty"` + + // no documentation yet + ResourceType *string `json:"resourceType,omitempty" xmlrpc:"resourceType,omitempty"` +} + +// This type contains general information related to a [[SoftLayer_Network_Storage_Iscsi_NetApp]] resource that is impacted by a [[SoftLayer_Notification_Occurrence_Event]]. +type Notification_Occurrence_Resource_Network_Storage_Iscsi_NetApp struct { + Notification_Occurrence_Resource + + // no documentation yet + Hostname *string `json:"hostname,omitempty" xmlrpc:"hostname,omitempty"` + + // no documentation yet + PrivateIp *string `json:"privateIp,omitempty" xmlrpc:"privateIp,omitempty"` + + // no documentation yet + ResourceType *string `json:"resourceType,omitempty" xmlrpc:"resourceType,omitempty"` +} + +// This type contains general information related to a [[SoftLayer_Network_Storage_Lockbox]] resource that is impacted by a [[SoftLayer_Notification_Occurrence_Event]]. +type Notification_Occurrence_Resource_Network_Storage_Lockbox struct { + Notification_Occurrence_Resource + + // no documentation yet + Hostname *string `json:"hostname,omitempty" xmlrpc:"hostname,omitempty"` + + // no documentation yet + PrivateIp *string `json:"privateIp,omitempty" xmlrpc:"privateIp,omitempty"` + + // no documentation yet + ResourceType *string `json:"resourceType,omitempty" xmlrpc:"resourceType,omitempty"` +} + +// This type contains general information related to a [[SoftLayer_Network_Storage_Nas]] resource that is impacted by a [[SoftLayer_Notification_Occurrence_Event]]. +type Notification_Occurrence_Resource_Network_Storage_Nas struct { + Notification_Occurrence_Resource + + // no documentation yet + Hostname *string `json:"hostname,omitempty" xmlrpc:"hostname,omitempty"` + + // no documentation yet + PrivateIp *string `json:"privateIp,omitempty" xmlrpc:"privateIp,omitempty"` + + // no documentation yet + ResourceType *string `json:"resourceType,omitempty" xmlrpc:"resourceType,omitempty"` +} + +// This type contains general information related to a [[SoftLayer_Network_Storage_NetApp_Volume]] resource that is impacted by a [[SoftLayer_Notification_Occurrence_Event]]. +type Notification_Occurrence_Resource_Network_Storage_NetApp_Volume struct { + Notification_Occurrence_Resource + + // no documentation yet + Hostname *string `json:"hostname,omitempty" xmlrpc:"hostname,omitempty"` + + // no documentation yet + PrivateIp *string `json:"privateIp,omitempty" xmlrpc:"privateIp,omitempty"` + + // no documentation yet + ResourceType *string `json:"resourceType,omitempty" xmlrpc:"resourceType,omitempty"` +} + +// This type contains general information related to a [[SoftLayer_Virtual_Guest]] resource that is impacted by a [[SoftLayer_Notification_Occurrence_Event]]. +type Notification_Occurrence_Resource_Virtual struct { + Notification_Occurrence_Resource + + // no documentation yet + Hostname *string `json:"hostname,omitempty" xmlrpc:"hostname,omitempty"` + + // no documentation yet + PrivateIp *string `json:"privateIp,omitempty" xmlrpc:"privateIp,omitempty"` + + // no documentation yet + PublicIp *string `json:"publicIp,omitempty" xmlrpc:"publicIp,omitempty"` + + // no documentation yet + ResourceType *string `json:"resourceType,omitempty" xmlrpc:"resourceType,omitempty"` +} + +// no documentation yet +type Notification_Occurrence_Status_Code struct { + Entity + + // no documentation yet + Description *string `json:"description,omitempty" xmlrpc:"description,omitempty"` + + // no documentation yet + KeyName *string `json:"keyName,omitempty" xmlrpc:"keyName,omitempty"` + + // no documentation yet + Name *string `json:"name,omitempty" xmlrpc:"name,omitempty"` +} + +// no documentation yet +type Notification_Occurrence_Update struct { + Entity + + // no documentation yet + Contents *string `json:"contents,omitempty" xmlrpc:"contents,omitempty"` + + // no documentation yet + CreateDate *Time `json:"createDate,omitempty" xmlrpc:"createDate,omitempty"` + + // no documentation yet + Employee *User_Employee `json:"employee,omitempty" xmlrpc:"employee,omitempty"` + + // no documentation yet + EndDate *Time `json:"endDate,omitempty" xmlrpc:"endDate,omitempty"` + + // no documentation yet + NotificationOccurrenceEvent *Notification_Occurrence_Event `json:"notificationOccurrenceEvent,omitempty" xmlrpc:"notificationOccurrenceEvent,omitempty"` + + // no documentation yet + StartDate *Time `json:"startDate,omitempty" xmlrpc:"startDate,omitempty"` +} + +// This type contains general information relating to a user that may be impacted by a [[SoftLayer_Notification_Occurrence_Event]]. +type Notification_Occurrence_User struct { + Entity + + // no documentation yet + AcknowledgedFlag *int `json:"acknowledgedFlag,omitempty" xmlrpc:"acknowledgedFlag,omitempty"` + + // no documentation yet + Active *int `json:"active,omitempty" xmlrpc:"active,omitempty"` + + // no documentation yet + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // A count of a collection of resources impacted by the associated event. + ImpactedResourceCount *uint `json:"impactedResourceCount,omitempty" xmlrpc:"impactedResourceCount,omitempty"` + + // A collection of resources impacted by the associated event. + ImpactedResources []Notification_Occurrence_Resource `json:"impactedResources,omitempty" xmlrpc:"impactedResources,omitempty"` + + // The associated event. + NotificationOccurrenceEvent *Notification_Occurrence_Event `json:"notificationOccurrenceEvent,omitempty" xmlrpc:"notificationOccurrenceEvent,omitempty"` + + // The impacted user. + User *User_Customer `json:"user,omitempty" xmlrpc:"user,omitempty"` + + // no documentation yet + UsrRecordId *int `json:"usrRecordId,omitempty" xmlrpc:"usrRecordId,omitempty"` +} + +// Retrieve details for preferences. Preferences are used to allow the subscriber to modify their subscription in various ways. Details such as friendly name, keyname maximum and minimum values can be retrieved. These provide details to help configure subscriber preferences correctly. +type Notification_Preference struct { + Entity + + // A description of what the preference is used for. + Description *string `json:"description,omitempty" xmlrpc:"description,omitempty"` + + // Unique identifier for the notification preference. + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // Name that can be used by external systems to refer to preference. + KeyName *string `json:"keyName,omitempty" xmlrpc:"keyName,omitempty"` + + // Largest value allowed for the preference. + MaximumValue *string `json:"maximumValue,omitempty" xmlrpc:"maximumValue,omitempty"` + + // Smallest value allowed for the preference. + MinimumValue *string `json:"minimumValue,omitempty" xmlrpc:"minimumValue,omitempty"` + + // Friendly name for the notification. + Name *string `json:"name,omitempty" xmlrpc:"name,omitempty"` + + // The unit of measure used for the preference's value, minimum and maximum as well. + Units *string `json:"units,omitempty" xmlrpc:"units,omitempty"` + + // Default value used when setting up preferences for a new subscriber. + Value *string `json:"value,omitempty" xmlrpc:"value,omitempty"` +} + +// no documentation yet +type Notification_Subscriber struct { + Entity + + // no documentation yet + Active *int `json:"active,omitempty" xmlrpc:"active,omitempty"` + + // no documentation yet + CreateDate *Time `json:"createDate,omitempty" xmlrpc:"createDate,omitempty"` + + // A count of + DeliveryMethodCount *uint `json:"deliveryMethodCount,omitempty" xmlrpc:"deliveryMethodCount,omitempty"` + + // no documentation yet + DeliveryMethods []Notification_Subscriber_Delivery_Method `json:"deliveryMethods,omitempty" xmlrpc:"deliveryMethods,omitempty"` + + // no documentation yet + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // no documentation yet + ModifyDate *Time `json:"modifyDate,omitempty" xmlrpc:"modifyDate,omitempty"` + + // no documentation yet + Notification *Notification `json:"notification,omitempty" xmlrpc:"notification,omitempty"` + + // no documentation yet + NotificationId *int `json:"notificationId,omitempty" xmlrpc:"notificationId,omitempty"` + + // no documentation yet + NotificationSubscriberTypeId *int `json:"notificationSubscriberTypeId,omitempty" xmlrpc:"notificationSubscriberTypeId,omitempty"` + + // no documentation yet + NotificationSubscriberTypeResourceId *int `json:"notificationSubscriberTypeResourceId,omitempty" xmlrpc:"notificationSubscriberTypeResourceId,omitempty"` +} + +// no documentation yet +type Notification_Subscriber_Customer struct { + Notification_Subscriber + + // no documentation yet + SubscriberRecord *User_Customer `json:"subscriberRecord,omitempty" xmlrpc:"subscriberRecord,omitempty"` +} + +// Provides details for the subscriber's delivery methods. +type Notification_Subscriber_Delivery_Method struct { + Entity + + // Indicates the subscriber's delivery method availability for notifications. + Active *int `json:"active,omitempty" xmlrpc:"active,omitempty"` + + // Date the subscriber's delivery method was created. + CreateDate *Time `json:"createDate,omitempty" xmlrpc:"createDate,omitempty"` + + // Date the subscriber's delivery method was last modified. + ModifyDate *Time `json:"modifyDate,omitempty" xmlrpc:"modifyDate,omitempty"` + + // no documentation yet + NotificationDeliveryMethod *Notification_Delivery_Method `json:"notificationDeliveryMethod,omitempty" xmlrpc:"notificationDeliveryMethod,omitempty"` + + // Identifier for the notification delivery method. + NotificationDeliveryMethodId *int `json:"notificationDeliveryMethodId,omitempty" xmlrpc:"notificationDeliveryMethodId,omitempty"` + + // no documentation yet + NotificationSubscriber *Notification_Subscriber `json:"notificationSubscriber,omitempty" xmlrpc:"notificationSubscriber,omitempty"` + + // Identifier for the subscriber. + NotificationSubscriberId *int `json:"notificationSubscriberId,omitempty" xmlrpc:"notificationSubscriberId,omitempty"` +} + +// A notification subscriber will have details pertaining to the subscriber's notification subscription. You can receive details such as preferences, details of the preferences, delivery methods and the delivery methods for the subscriber. +// +// NOTE: There are preferences and delivery methods that cannot be modified. Also, there are some subscriptions that are required. +type Notification_User_Subscriber struct { + Entity + + // The current status of the subscription. + Active *int `json:"active,omitempty" xmlrpc:"active,omitempty"` + + // A count of the delivery methods used to send the subscribed notification. + DeliveryMethodCount *uint `json:"deliveryMethodCount,omitempty" xmlrpc:"deliveryMethodCount,omitempty"` + + // The delivery methods used to send the subscribed notification. + DeliveryMethods []Notification_Delivery_Method `json:"deliveryMethods,omitempty" xmlrpc:"deliveryMethods,omitempty"` + + // Unique identifier of the subscriber that will receive the alerts. + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // Notification subscribed to. + Notification *Notification `json:"notification,omitempty" xmlrpc:"notification,omitempty"` + + // Unique identifier of the notification subscribed to. + NotificationId *int `json:"notificationId,omitempty" xmlrpc:"notificationId,omitempty"` + + // A count of associated subscriber preferences used for the notification subscription. For example, preferences include number of deliveries (limit) and threshold. + PreferenceCount *uint `json:"preferenceCount,omitempty" xmlrpc:"preferenceCount,omitempty"` + + // Associated subscriber preferences used for the notification subscription. For example, preferences include number of deliveries (limit) and threshold. + Preferences []Notification_User_Subscriber_Preference `json:"preferences,omitempty" xmlrpc:"preferences,omitempty"` + + // A count of preference details such as description, minimum and maximum limits, default value and unit of measure. + PreferencesDetailCount *uint `json:"preferencesDetailCount,omitempty" xmlrpc:"preferencesDetailCount,omitempty"` + + // Preference details such as description, minimum and maximum limits, default value and unit of measure. + PreferencesDetails []Notification_Preference `json:"preferencesDetails,omitempty" xmlrpc:"preferencesDetails,omitempty"` + + // The subscriber id to resource id mapping. + ResourceRecord *Notification_User_Subscriber_Resource `json:"resourceRecord,omitempty" xmlrpc:"resourceRecord,omitempty"` + + // User record for the subscription. + UserRecord *User_Customer `json:"userRecord,omitempty" xmlrpc:"userRecord,omitempty"` + + // Unique identifier of the user the subscription is for. + UserRecordId *int `json:"userRecordId,omitempty" xmlrpc:"userRecordId,omitempty"` +} + +// A notification subscriber will have details pertaining to the subscriber's notification subscription. You can receive details such as preferences, details of the preferences, delivery methods and the delivery methods for the subscriber. +// +// NOTE: There are preferences and delivery methods that cannot be modified. Also, there are some subscriptions that are required. +type Notification_User_Subscriber_Billing struct { + Notification_User_Subscriber +} + +// Provides mapping details of how the subscriber's notification will be delivered. This maps the subscriber's id with all the delivery method ids used to delivery the notification. +type Notification_User_Subscriber_Delivery_Method struct { + Entity + + // Determines if the delivery method is active for the user. + Active *int `json:"active,omitempty" xmlrpc:"active,omitempty"` + + // Provides details for the method used to deliver the notification (email, sms, ticket). + DeliveryMethod *Notification_Delivery_Method `json:"deliveryMethod,omitempty" xmlrpc:"deliveryMethod,omitempty"` + + // Unique identifier of the method used to deliver notification. + NotificationMethodId *int `json:"notificationMethodId,omitempty" xmlrpc:"notificationMethodId,omitempty"` + + // The Subscriber information tied to the delivery method. + NotificationUserSubscriber *Notification_User_Subscriber `json:"notificationUserSubscriber,omitempty" xmlrpc:"notificationUserSubscriber,omitempty"` + + // Unique identifier of the subscriber tied to the delivery method. + NotificationUserSubscriberId *int `json:"notificationUserSubscriberId,omitempty" xmlrpc:"notificationUserSubscriberId,omitempty"` +} + +// A notification subscriber will have details pertaining to the subscriber's notification subscription. You can receive details such as preferences, details of the preferences, delivery methods and the delivery methods for the subscriber. +// +// NOTE: There are preferences and delivery methods that cannot be modified. Also, there are some subscriptions that are required. +type Notification_User_Subscriber_Mobile struct { + Notification_User_Subscriber +} + +// Preferences are settings that can be modified to change the behavior of the subscription. For example, modify the limit preference to only receive notifications 10 times instead of 1 during a billing cycle. +// +// NOTE: Some preferences have certain restrictions on values that can be set. +type Notification_User_Subscriber_Preference struct { + Entity + + // Details such name, keyname, minimum and maximum values for the preference. + DefaultPreference *Notification_Preference `json:"defaultPreference,omitempty" xmlrpc:"defaultPreference,omitempty"` + + // Unique identifier for the subscriber's preferences. + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // Unique identifier of the default preference for which the subscriber preference is based on. For example, if no preferences are supplied during the creation of a subscriber. The default values are pulled using this property. + NotificationPreferenceId *int `json:"notificationPreferenceId,omitempty" xmlrpc:"notificationPreferenceId,omitempty"` + + // Details of the subscriber tied to the preference. + NotificationUserSubscriber *Notification_User_Subscriber `json:"notificationUserSubscriber,omitempty" xmlrpc:"notificationUserSubscriber,omitempty"` + + // Unique identifier of the subscriber tied to the subscriber preference. + NotificationUserSubscriberId *int `json:"notificationUserSubscriberId,omitempty" xmlrpc:"notificationUserSubscriberId,omitempty"` + + // The user supplied value to "override" the "default" preference's value. + Value *string `json:"value,omitempty" xmlrpc:"value,omitempty"` +} + +// Retrieve identifier cross-reference information. SoftLayer_Notification_User_Subscriber_Resource provides the resource table id and subscriber id relation. The resource table id is the id of the service the subscriber receives alerts for. This resource table id could be the unique identifier for a Storage Evault service, Global Load Balancer or CDN service. +type Notification_User_Subscriber_Resource struct { + Entity + + // The Subscriber information tied to the resource service. + NotificationUserSubscriber *Notification_User_Subscriber `json:"notificationUserSubscriber,omitempty" xmlrpc:"notificationUserSubscriber,omitempty"` + + // Unique identifier of the subscriber that will receive the alerts for the resource subscribed to a notification. + NotificationUserSubscriberId *int `json:"notificationUserSubscriberId,omitempty" xmlrpc:"notificationUserSubscriberId,omitempty"` + + // Unique identifier for a SoftLayer service that is subscribed to a notification. Currently, the SoftLayer services that can be subscribed to notifications are: + // + // Storage EVault CDN Global Load Balancer + // + // + ResourceTableId *int `json:"resourceTableId,omitempty" xmlrpc:"resourceTableId,omitempty"` +} diff --git a/vendor/github.com/softlayer/softlayer-go/datatypes/product.go b/vendor/github.com/softlayer/softlayer-go/datatypes/product.go new file mode 100644 index 0000000000..a71d97985c --- /dev/null +++ b/vendor/github.com/softlayer/softlayer-go/datatypes/product.go @@ -0,0 +1,1836 @@ +/** + * Copyright 2016 IBM Corp. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** + * AUTOMATICALLY GENERATED CODE - DO NOT MODIFY + */ + +package datatypes + +// A Catalog is defined as a set of prices for products that SoftLayer offers for sale. These prices are organized into packages which represent the different servers and services that SoftLayer offers. +type Product_Catalog struct { + Entity + + // A count of brands using this Catalog + BrandCount *uint `json:"brandCount,omitempty" xmlrpc:"brandCount,omitempty"` + + // Brands using this Catalog + Brands []Brand `json:"brands,omitempty" xmlrpc:"brands,omitempty"` + + // A count of packages available in this catalog + PackageCount *uint `json:"packageCount,omitempty" xmlrpc:"packageCount,omitempty"` + + // Packages available in this catalog + Packages []Product_Package `json:"packages,omitempty" xmlrpc:"packages,omitempty"` + + // A count of prices available in this catalog + PriceCount *uint `json:"priceCount,omitempty" xmlrpc:"priceCount,omitempty"` + + // Prices available in this catalog + Prices []Product_Item_Price `json:"prices,omitempty" xmlrpc:"prices,omitempty"` + + // A count of products available in catalog + ProductCount *uint `json:"productCount,omitempty" xmlrpc:"productCount,omitempty"` + + // Products available in catalog + Products []Product_Item `json:"products,omitempty" xmlrpc:"products,omitempty"` +} + +// The SoftLayer_Product_Catalog_Item_Price type assigns an Item Price to a Catalog. This relation defines the composition of Item Prices in a Catalog. +type Product_Catalog_Item_Price struct { + Entity + + // Catalog being assigned + Catalog *Product_Catalog `json:"catalog,omitempty" xmlrpc:"catalog,omitempty"` + + // The id of the Catalog the Item Price is part of. + CatalogId *int `json:"catalogId,omitempty" xmlrpc:"catalogId,omitempty"` + + // The time the Item Price was defined in the Catalog + CreateDate *Time `json:"createDate,omitempty" xmlrpc:"createDate,omitempty"` + + // The time the Item Price was changed for the Catalog + ModifyDate *Time `json:"modifyDate,omitempty" xmlrpc:"modifyDate,omitempty"` + + // Price being assigned + Price *Product_Item_Price `json:"price,omitempty" xmlrpc:"price,omitempty"` + + // The id of the Item Price that is part of the Catalog. + PriceId *int `json:"priceId,omitempty" xmlrpc:"priceId,omitempty"` +} + +// The SoftLayer_Product_Group data type contains product group relationship. +type Product_Group struct { + Entity + + // The name of the product group. + Name *string `json:"name,omitempty" xmlrpc:"name,omitempty"` +} + +// The SoftLayer_Product_Item data type contains general information relating to a single SoftLayer product. +type Product_Item struct { + Entity + + // A count of + ActivePresaleEventCount *uint `json:"activePresaleEventCount,omitempty" xmlrpc:"activePresaleEventCount,omitempty"` + + // no documentation yet + ActivePresaleEvents []Sales_Presale_Event `json:"activePresaleEvents,omitempty" xmlrpc:"activePresaleEvents,omitempty"` + + // A count of active usage based prices. + ActiveUsagePriceCount *uint `json:"activeUsagePriceCount,omitempty" xmlrpc:"activeUsagePriceCount,omitempty"` + + // Active usage based prices. + ActiveUsagePrices []Product_Item_Price `json:"activeUsagePrices,omitempty" xmlrpc:"activeUsagePrices,omitempty"` + + // A count of the attribute values for a product item. These are additional properties that give extra information about the product being sold. + AttributeCount *uint `json:"attributeCount,omitempty" xmlrpc:"attributeCount,omitempty"` + + // The attribute values for a product item. These are additional properties that give extra information about the product being sold. + Attributes []Product_Item_Attribute `json:"attributes,omitempty" xmlrpc:"attributes,omitempty"` + + // A count of attributes that govern when an item may no longer be available. + AvailabilityAttributeCount *uint `json:"availabilityAttributeCount,omitempty" xmlrpc:"availabilityAttributeCount,omitempty"` + + // Attributes that govern when an item may no longer be available. + AvailabilityAttributes []Product_Item_Attribute `json:"availabilityAttributes,omitempty" xmlrpc:"availabilityAttributes,omitempty"` + + // An item's special billing type, if applicable. + BillingType *string `json:"billingType,omitempty" xmlrpc:"billingType,omitempty"` + + // An item's included products. Some items have other items included in them that we specifically detail. They are here called Bundled Items. An example is Plesk unlimited. It as a bundled item labeled 'SiteBuilder'. These are the SoftLayer_Product_Item_Bundles objects. + Bundle []Product_Item_Bundles `json:"bundle,omitempty" xmlrpc:"bundle,omitempty"` + + // A count of an item's included products. Some items have other items included in them that we specifically detail. They are here called Bundled Items. An example is Plesk unlimited. It as a bundled item labeled 'SiteBuilder'. These are the SoftLayer_Product_Item_Bundles objects. + BundleCount *uint `json:"bundleCount,omitempty" xmlrpc:"bundleCount,omitempty"` + + // Some Product Items have capacity information such as RAM and bandwidth, and others. This provides the numerical representation of the capacity given in the description of this product item. + Capacity *Float64 `json:"capacity,omitempty" xmlrpc:"capacity,omitempty"` + + // When the product capacity is best described as a range, this holds the ceiling of the range. + CapacityMaximum *string `json:"capacityMaximum,omitempty" xmlrpc:"capacityMaximum,omitempty"` + + // When the product capacity is best described as a range, this holds the floor of the range. + CapacityMinimum *string `json:"capacityMinimum,omitempty" xmlrpc:"capacityMinimum,omitempty"` + + // This flag indicates that this product is restricted by a capacity on a related product. + CapacityRestrictedProductFlag *bool `json:"capacityRestrictedProductFlag,omitempty" xmlrpc:"capacityRestrictedProductFlag,omitempty"` + + // An item's associated item categories. + Categories []Product_Item_Category `json:"categories,omitempty" xmlrpc:"categories,omitempty"` + + // A count of an item's associated item categories. + CategoryCount *uint `json:"categoryCount,omitempty" xmlrpc:"categoryCount,omitempty"` + + // A count of some product items have configuration templates which can be used to during provisioning of that product. + ConfigurationTemplateCount *uint `json:"configurationTemplateCount,omitempty" xmlrpc:"configurationTemplateCount,omitempty"` + + // Some product items have configuration templates which can be used to during provisioning of that product. + ConfigurationTemplates []Configuration_Template `json:"configurationTemplates,omitempty" xmlrpc:"configurationTemplates,omitempty"` + + // An item's conflicts. For example, McAfee LinuxShield cannot be ordered with Windows. It was not meant for that operating system and as such is a conflict. + Conflicts []Product_Item_Resource_Conflict `json:"conflicts,omitempty" xmlrpc:"conflicts,omitempty"` + + // This flag indicates that this product is restricted by the number of cores on the compute instance. This is deprecated. Use [[SoftLayer_Product_Item/getCapacityRestrictedProductFlag|getCapacityRestrictedProductFlag]] + CoreRestrictedItemFlag *bool `json:"coreRestrictedItemFlag,omitempty" xmlrpc:"coreRestrictedItemFlag,omitempty"` + + // A product's description + Description *string `json:"description,omitempty" xmlrpc:"description,omitempty"` + + // Some product items have a downgrade path. This is the first product item in the downgrade path. + DowngradeItem *Product_Item `json:"downgradeItem,omitempty" xmlrpc:"downgradeItem,omitempty"` + + // A count of some product items have a downgrade path. These are those product items. + DowngradeItemCount *uint `json:"downgradeItemCount,omitempty" xmlrpc:"downgradeItemCount,omitempty"` + + // Some product items have a downgrade path. These are those product items. + DowngradeItems []Product_Item `json:"downgradeItems,omitempty" xmlrpc:"downgradeItems,omitempty"` + + // An item's category conflicts. For example, 10 Gbps redundant network functionality cannot be ordered with a secondary GPU and as such is a conflict. + GlobalCategoryConflicts []Product_Item_Resource_Conflict `json:"globalCategoryConflicts,omitempty" xmlrpc:"globalCategoryConflicts,omitempty"` + + // The generic hardware component that this item represents. + HardwareGenericComponentModel *Hardware_Component_Model_Generic `json:"hardwareGenericComponentModel,omitempty" xmlrpc:"hardwareGenericComponentModel,omitempty"` + + // no documentation yet + HideFromPortalFlag *bool `json:"hideFromPortalFlag,omitempty" xmlrpc:"hideFromPortalFlag,omitempty"` + + // A product's internal identification number + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // An item's inventory status per datacenter. + Inventory []Product_Package_Inventory `json:"inventory,omitempty" xmlrpc:"inventory,omitempty"` + + // A count of an item's inventory status per datacenter. + InventoryCount *uint `json:"inventoryCount,omitempty" xmlrpc:"inventoryCount,omitempty"` + + // Flag to indicate the server product is engineered for a multi-server solution. (Deprecated) + IsEngineeredServerProduct *bool `json:"isEngineeredServerProduct,omitempty" xmlrpc:"isEngineeredServerProduct,omitempty"` + + // An item's primary item category. + ItemCategory *Product_Item_Category `json:"itemCategory,omitempty" xmlrpc:"itemCategory,omitempty"` + + // A products tax category internal identification number + ItemTaxCategoryId *int `json:"itemTaxCategoryId,omitempty" xmlrpc:"itemTaxCategoryId,omitempty"` + + // A unique key name for the product. + KeyName *string `json:"keyName,omitempty" xmlrpc:"keyName,omitempty"` + + // no documentation yet + LocalDiskFlag *bool `json:"localDiskFlag,omitempty" xmlrpc:"localDiskFlag,omitempty"` + + // An item's location conflicts. For example, Dual Path network functionality cannot be ordered in WDC and as such is a conflict. + LocationConflicts []Product_Item_Resource_Conflict `json:"locationConflicts,omitempty" xmlrpc:"locationConflicts,omitempty"` + + // Detailed product description + LongDescription *string `json:"longDescription,omitempty" xmlrpc:"longDescription,omitempty"` + + // no documentation yet + ObjectStorageClusterGeolocationType *string `json:"objectStorageClusterGeolocationType,omitempty" xmlrpc:"objectStorageClusterGeolocationType,omitempty"` + + // no documentation yet + ObjectStorageItemFlag *bool `json:"objectStorageItemFlag,omitempty" xmlrpc:"objectStorageItemFlag,omitempty"` + + // no documentation yet + ObjectStorageServiceClass *string `json:"objectStorageServiceClass,omitempty" xmlrpc:"objectStorageServiceClass,omitempty"` + + // A count of a collection of all the SoftLayer_Product_Package(s) in which this item exists. + PackageCount *uint `json:"packageCount,omitempty" xmlrpc:"packageCount,omitempty"` + + // A collection of all the SoftLayer_Product_Package(s) in which this item exists. + Packages []Product_Package `json:"packages,omitempty" xmlrpc:"packages,omitempty"` + + // The number of cores that a processor has. + PhysicalCoreCapacity *string `json:"physicalCoreCapacity,omitempty" xmlrpc:"physicalCoreCapacity,omitempty"` + + // A count of + PresaleEventCount *uint `json:"presaleEventCount,omitempty" xmlrpc:"presaleEventCount,omitempty"` + + // no documentation yet + PresaleEvents []Sales_Presale_Event `json:"presaleEvents,omitempty" xmlrpc:"presaleEvents,omitempty"` + + // A count of a product item's prices. + PriceCount *uint `json:"priceCount,omitempty" xmlrpc:"priceCount,omitempty"` + + // A product item's prices. + Prices []Product_Item_Price `json:"prices,omitempty" xmlrpc:"prices,omitempty"` + + // If an item must be ordered with another item, it will have a requirement item here. + Requirements []Product_Item_Requirement `json:"requirements,omitempty" xmlrpc:"requirements,omitempty"` + + // A count of an item's rules. This includes the requirements and conflicts to resources that an item has. + RuleCount *uint `json:"ruleCount,omitempty" xmlrpc:"ruleCount,omitempty"` + + // An item's rules. This includes the requirements and conflicts to resources that an item has. + Rules []Product_Item_Rule `json:"rules,omitempty" xmlrpc:"rules,omitempty"` + + // The SoftLayer_Software_Description tied to this item. This will only be populated for software items. + SoftwareDescription *Software_Description `json:"softwareDescription,omitempty" xmlrpc:"softwareDescription,omitempty"` + + // The unique identifier of the SoftLayer_Software_Description tied to this item. + SoftwareDescriptionId *int `json:"softwareDescriptionId,omitempty" xmlrpc:"softwareDescriptionId,omitempty"` + + // An item's tax category, if applicable. + TaxCategory *Product_Item_Tax_Category `json:"taxCategory,omitempty" xmlrpc:"taxCategory,omitempty"` + + // A count of third-party policy assignments for this product. + ThirdPartyPolicyAssignmentCount *uint `json:"thirdPartyPolicyAssignmentCount,omitempty" xmlrpc:"thirdPartyPolicyAssignmentCount,omitempty"` + + // Third-party policy assignments for this product. + ThirdPartyPolicyAssignments []Product_Item_Policy_Assignment `json:"thirdPartyPolicyAssignments,omitempty" xmlrpc:"thirdPartyPolicyAssignments,omitempty"` + + // The 3rd party vendor for a support subscription item. (Deprecated) + ThirdPartySupportVendor *string `json:"thirdPartySupportVendor,omitempty" xmlrpc:"thirdPartySupportVendor,omitempty"` + + // The total number of physical processing cores (excluding virtual cores / hyperthreads) for this server. + TotalPhysicalCoreCapacity *int `json:"totalPhysicalCoreCapacity,omitempty" xmlrpc:"totalPhysicalCoreCapacity,omitempty"` + + // Shows the total number of cores. This is deprecated. Use [[SoftLayer_Product_Item/getCapacity|getCapacity]] for guest_core products and [[SoftLayer_Product_Item/getTotalPhysicalCoreCapacity|getTotalPhysicalCoreCapacity]] for server products + TotalPhysicalCoreCount *int `json:"totalPhysicalCoreCount,omitempty" xmlrpc:"totalPhysicalCoreCount,omitempty"` + + // The total number of processors for this server. + TotalProcessorCapacity *int `json:"totalProcessorCapacity,omitempty" xmlrpc:"totalProcessorCapacity,omitempty"` + + // The unit of measurement that a product item is measured in. + Units *string `json:"units,omitempty" xmlrpc:"units,omitempty"` + + // Some product items have an upgrade path. This is the next product item in the upgrade path. + UpgradeItem *Product_Item `json:"upgradeItem,omitempty" xmlrpc:"upgradeItem,omitempty"` + + // A count of some product items have an upgrade path. These are those upgrade product items. + UpgradeItemCount *uint `json:"upgradeItemCount,omitempty" xmlrpc:"upgradeItemCount,omitempty"` + + // A products upgrade item's internal identification number + UpgradeItemId *int `json:"upgradeItemId,omitempty" xmlrpc:"upgradeItemId,omitempty"` + + // Some product items have an upgrade path. These are those upgrade product items. + UpgradeItems []Product_Item `json:"upgradeItems,omitempty" xmlrpc:"upgradeItems,omitempty"` +} + +// The [[SoftLayer_Product_Item_Attribute]] data type allows us to describe a [[SoftLayer_Product_Item]] by attaching specific attributes, which may dictate how it interacts with other products and services. Most, if not all, of these attributes are geared towards internal usage, so customers should rarely be concerned with them. +type Product_Item_Attribute struct { + Entity + + // This represents the attribute type of this product attribute. + AttributeType *Product_Item_Attribute_Type `json:"attributeType,omitempty" xmlrpc:"attributeType,omitempty"` + + // This represents the attribute type's key name of this product attribute. + AttributeTypeKeyName *string `json:"attributeTypeKeyName,omitempty" xmlrpc:"attributeTypeKeyName,omitempty"` + + // This is the primary key value for the product attribute. + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // This represents the product that an attribute is tied to. + Item *Product_Item `json:"item,omitempty" xmlrpc:"item,omitempty"` + + // This is a foreign key value for the [[SoftLayer_Product_Item_Attribute_Type]]. + ItemAttributeTypeId *int `json:"itemAttributeTypeId,omitempty" xmlrpc:"itemAttributeTypeId,omitempty"` + + // This is a foreign key value for the [[SoftLayer_Product_Item]]. + ItemId *int `json:"itemId,omitempty" xmlrpc:"itemId,omitempty"` + + // This is the value for the attribute. + Value *string `json:"value,omitempty" xmlrpc:"value,omitempty"` +} + +// The [[SoftLayer_Product_Item_Attribute_Type]] data type defines the available type of product attributes that are available. This allows for convenient reference to a [[SoftLayer_Product_Item_Attribute|product attribute]] by a unique key name value. +type Product_Item_Attribute_Type struct { + Entity + + // This is the unique identifier of the attribute type. + KeyName *string `json:"keyName,omitempty" xmlrpc:"keyName,omitempty"` + + // This is the user-friendly readable name of the attribute type. + Name *string `json:"name,omitempty" xmlrpc:"name,omitempty"` +} + +// The SoftLayer_Product_Item_Billing_Type data type models special billing types for non-monthly billed items in the SoftLayer product catalog. +type Product_Item_Billing_Type struct { + Entity + + // A keyword describing a SoftLayer product item billing type. + Name *string `json:"name,omitempty" xmlrpc:"name,omitempty"` +} + +// The SoftLayer_Product_Item_Bundles contains item to price cross references Relates a category, price and item to a bundle. Match bundle ids to see all items and prices in a particular bundle. +type Product_Item_Bundles struct { + Entity + + // Item in bundle. + BundleItem *Product_Item `json:"bundleItem,omitempty" xmlrpc:"bundleItem,omitempty"` + + // Identifier for bundle. + BundleItemId *int `json:"bundleItemId,omitempty" xmlrpc:"bundleItemId,omitempty"` + + // Category bundle falls in. + Category *Product_Item_Category `json:"category,omitempty" xmlrpc:"category,omitempty"` + + // Identifier for record. + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // Price of item in bundle + ItemPrice *Product_Item_Price `json:"itemPrice,omitempty" xmlrpc:"itemPrice,omitempty"` + + // Identifier for price. + ItemPriceId *int `json:"itemPriceId,omitempty" xmlrpc:"itemPriceId,omitempty"` +} + +// The SoftLayer_Product_Item_Category data type contains general category information for prices. +type Product_Item_Category struct { + Entity + + // A count of the billing items associated with an account that share a category code with an item category's category code. + BillingItemCount *uint `json:"billingItemCount,omitempty" xmlrpc:"billingItemCount,omitempty"` + + // The billing items associated with an account that share a category code with an item category's category code. + BillingItems []Billing_Item `json:"billingItems,omitempty" xmlrpc:"billingItems,omitempty"` + + // The code used to identify this category. + CategoryCode *string `json:"categoryCode,omitempty" xmlrpc:"categoryCode,omitempty"` + + // This invoice item's "item category group". + Group *Product_Item_Category_Group `json:"group,omitempty" xmlrpc:"group,omitempty"` + + // A count of a collection of service offering category groups. Each group contains a collection of items associated with this category. + GroupCount *uint `json:"groupCount,omitempty" xmlrpc:"groupCount,omitempty"` + + // A collection of service offering category groups. Each group contains a collection of items associated with this category. + Groups []Product_Package_Item_Category_Group `json:"groups,omitempty" xmlrpc:"groups,omitempty"` + + // identifier for category. + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // The friendly, descriptive name of the category as seen on the order forms and on invoices. + Name *string `json:"name,omitempty" xmlrpc:"name,omitempty"` + + // A count of any unique options associated with an item category. + OrderOptionCount *uint `json:"orderOptionCount,omitempty" xmlrpc:"orderOptionCount,omitempty"` + + // Any unique options associated with an item category. + OrderOptions []Product_Item_Category_Order_Option_Type `json:"orderOptions,omitempty" xmlrpc:"orderOptions,omitempty"` + + // A count of a list of configuration available in this category.' + PackageConfigurationCount *uint `json:"packageConfigurationCount,omitempty" xmlrpc:"packageConfigurationCount,omitempty"` + + // A list of configuration available in this category.' + PackageConfigurations []Product_Package_Order_Configuration `json:"packageConfigurations,omitempty" xmlrpc:"packageConfigurations,omitempty"` + + // A count of a list of preset configurations this category is used in.' + PresetConfigurationCount *uint `json:"presetConfigurationCount,omitempty" xmlrpc:"presetConfigurationCount,omitempty"` + + // A list of preset configurations this category is used in.' + PresetConfigurations []Product_Package_Preset_Configuration `json:"presetConfigurations,omitempty" xmlrpc:"presetConfigurations,omitempty"` + + // Quantity that can be ordered. If 0, it will inherit the quantity from the server quantity ordered. Otherwise it can be specified with the order separately + QuantityLimit *int `json:"quantityLimit,omitempty" xmlrpc:"quantityLimit,omitempty"` + + // A count of the questions that are associated with an item category. + QuestionCount *uint `json:"questionCount,omitempty" xmlrpc:"questionCount,omitempty"` + + // A count of the question references that are associated with an item category. + QuestionReferenceCount *uint `json:"questionReferenceCount,omitempty" xmlrpc:"questionReferenceCount,omitempty"` + + // The question references that are associated with an item category. + QuestionReferences []Product_Item_Category_Question_Xref `json:"questionReferences,omitempty" xmlrpc:"questionReferences,omitempty"` + + // The questions that are associated with an item category. + Questions []Product_Item_Category_Question `json:"questions,omitempty" xmlrpc:"questions,omitempty"` + + // The sort order of the category. It may be used to affect the order in which the category may appear in lists (on order forms and invoices). + SortOrder *int `json:"sortOrder,omitempty" xmlrpc:"sortOrder,omitempty"` +} + +// The SoftLayer_Product_Item_Category_Group data type contains general category group information. +type Product_Item_Category_Group struct { + Entity + + // identifier for category group. + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // The friendly, descriptive name of the category group as seen on the order forms and on invoices. + Name *string `json:"name,omitempty" xmlrpc:"name,omitempty"` +} + +// The SoftLayer_Product_Item_Category_Order_Option_Type data type contains options that can be applied to orders for prices. +type Product_Item_Category_Order_Option_Type struct { + Entity + + // An item category order type's description. + Description *string `json:"description,omitempty" xmlrpc:"description,omitempty"` + + // An item category order type's unique identifier. + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // A simple description for an item category order type. + Keyname *string `json:"keyname,omitempty" xmlrpc:"keyname,omitempty"` + + // An item category order type's name. + Name *string `json:"name,omitempty" xmlrpc:"name,omitempty"` + + // The value of the item category type's option. + Value *string `json:"value,omitempty" xmlrpc:"value,omitempty"` +} + +// The SoftLayer_Product_Item_Category_Question data type represents a single question to be answered by an end user. The question may or may not be required which can be located by looking at the 'required' property on the item category references. The answerValueExpression property is a regular expression that is used to validate the answer to the question. The description and valueExample properties can be used to get an idea of the type of answer that should be provided. +type Product_Item_Category_Question struct { + Entity + + // The type of answer expected. + AnswerValueExpression *string `json:"answerValueExpression,omitempty" xmlrpc:"answerValueExpression,omitempty"` + + // The description for the question. + Description *string `json:"description,omitempty" xmlrpc:"description,omitempty"` + + // The type of field that should be used in an HTML form to accept an answer from an end user. + FieldType *Product_Item_Category_Question_Field_Type `json:"fieldType,omitempty" xmlrpc:"fieldType,omitempty"` + + // The type of field to use. + FieldTypeId *int `json:"fieldTypeId,omitempty" xmlrpc:"fieldTypeId,omitempty"` + + // identifier for category. + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // A count of the link between an item category and an item category question. + ItemCategoryReferenceCount *uint `json:"itemCategoryReferenceCount,omitempty" xmlrpc:"itemCategoryReferenceCount,omitempty"` + + // The link between an item category and an item category question. + ItemCategoryReferences []Product_Item_Category_Question_Xref `json:"itemCategoryReferences,omitempty" xmlrpc:"itemCategoryReferences,omitempty"` + + // The keyname for the question. + KeyName *string `json:"keyName,omitempty" xmlrpc:"keyName,omitempty"` + + // The question for the category. + Question *string `json:"question,omitempty" xmlrpc:"question,omitempty"` + + // An example and/or explanation of what the answer for the question is expected to look like. + ValueExample *string `json:"valueExample,omitempty" xmlrpc:"valueExample,omitempty"` +} + +// The SoftLayer_Product_Item_Category_Question_Field_Type data type represents the recommended type of field that should be rendered on an HTML form. +type Product_Item_Category_Question_Field_Type struct { + Entity + + // Identifier for the question type. + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // Keyname for the question field type. + KeyName *string `json:"keyName,omitempty" xmlrpc:"keyName,omitempty"` + + // Short name for the question field type. + Name *string `json:"name,omitempty" xmlrpc:"name,omitempty"` +} + +// The SoftLayer_Product_Item_Category_Question_Xref data type represents a link between an item category and an item category question. It also contains a 'required' field that designates if the question is required to be answered for the given item category. +type Product_Item_Category_Question_Xref struct { + Entity + + // Identifier for category question xref record. + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // The product item category that this reference points to. + ItemCategory *Product_Item_Category `json:"itemCategory,omitempty" xmlrpc:"itemCategory,omitempty"` + + // Identifier for item category. + ItemCategoryId *int `json:"itemCategoryId,omitempty" xmlrpc:"itemCategoryId,omitempty"` + + // Identifier for the question. + LocationId *int `json:"locationId,omitempty" xmlrpc:"locationId,omitempty"` + + // The item category question that this reference points to. + Question *Product_Item_Category_Question `json:"question,omitempty" xmlrpc:"question,omitempty"` + + // Identifier for the question. + QuestionId *int `json:"questionId,omitempty" xmlrpc:"questionId,omitempty"` + + // Flag to indicate whether an answer is required for the question.. + Required *bool `json:"required,omitempty" xmlrpc:"required,omitempty"` +} + +// no documentation yet +type Product_Item_Link_ThePlanet struct { + Entity + + // no documentation yet + Item *Product_Item `json:"item,omitempty" xmlrpc:"item,omitempty"` + + // no documentation yet + ServiceProvider *Service_Provider `json:"serviceProvider,omitempty" xmlrpc:"serviceProvider,omitempty"` +} + +// Represents the assignment of a policy to a product. The existence of a record means that the associated product is subject to the terms defined in the document content of the policy. +type Product_Item_Policy_Assignment struct { + Entity + + // no documentation yet + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // The name of the assigned policy. + PolicyName *string `json:"policyName,omitempty" xmlrpc:"policyName,omitempty"` + + // The [[SoftLayer_Product_Item]] for this policy assignment. + Product *Product_Item `json:"product,omitempty" xmlrpc:"product,omitempty"` + + // no documentation yet + ProductId *int `json:"productId,omitempty" xmlrpc:"productId,omitempty"` +} + +// The SoftLayer_Product_Item_Price data type contains general information relating to a single SoftLayer product item price. You can find out what packages each price is in as well as which category under which this price is sold. All prices are returned in floating point values measured in US Dollars ($USD). +type Product_Item_Price struct { + Entity + + // A count of the account that the item price is restricted to. + AccountRestrictionCount *uint `json:"accountRestrictionCount,omitempty" xmlrpc:"accountRestrictionCount,omitempty"` + + // The account that the item price is restricted to. + AccountRestrictions []Product_Item_Price_Account_Restriction `json:"accountRestrictions,omitempty" xmlrpc:"accountRestrictions,omitempty"` + + // A count of + AttributeCount *uint `json:"attributeCount,omitempty" xmlrpc:"attributeCount,omitempty"` + + // no documentation yet + Attributes []Product_Item_Price_Attribute `json:"attributes,omitempty" xmlrpc:"attributes,omitempty"` + + // Whether the price is for Big Data OS/Journal disks only. (Deprecated) + BigDataOsJournalDiskFlag *bool `json:"bigDataOsJournalDiskFlag,omitempty" xmlrpc:"bigDataOsJournalDiskFlag,omitempty"` + + // A count of cross reference for bundles + BundleReferenceCount *uint `json:"bundleReferenceCount,omitempty" xmlrpc:"bundleReferenceCount,omitempty"` + + // cross reference for bundles + BundleReferences []Product_Item_Bundles `json:"bundleReferences,omitempty" xmlrpc:"bundleReferences,omitempty"` + + // The maximum capacity value for which this price is suitable. + CapacityRestrictionMaximum *string `json:"capacityRestrictionMaximum,omitempty" xmlrpc:"capacityRestrictionMaximum,omitempty"` + + // The minimum capacity value for which this price is suitable. + CapacityRestrictionMinimum *string `json:"capacityRestrictionMinimum,omitempty" xmlrpc:"capacityRestrictionMinimum,omitempty"` + + // The type of capacity restriction by which this price must abide. + CapacityRestrictionType *string `json:"capacityRestrictionType,omitempty" xmlrpc:"capacityRestrictionType,omitempty"` + + // All categories which this item is a member. + Categories []Product_Item_Category `json:"categories,omitempty" xmlrpc:"categories,omitempty"` + + // A count of all categories which this item is a member. + CategoryCount *uint `json:"categoryCount,omitempty" xmlrpc:"categoryCount,omitempty"` + + // This flag is used by the [[SoftLayer_Hardware::getUpgradeItems|getUpgradeItems]] method to indicate if a product price is used for the current billing item. + CurrentPriceFlag *bool `json:"currentPriceFlag,omitempty" xmlrpc:"currentPriceFlag,omitempty"` + + // Whether this price defines a software license for its product item. + DefinedSoftwareLicenseFlag *bool `json:"definedSoftwareLicenseFlag,omitempty" xmlrpc:"definedSoftwareLicenseFlag,omitempty"` + + // The hourly price for this item, should this item be part of an hourly pricing package. + HourlyRecurringFee *Float64 `json:"hourlyRecurringFee,omitempty" xmlrpc:"hourlyRecurringFee,omitempty"` + + // The unique identifier of a Product Item Price. + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // An item price's inventory status per datacenter. + Inventory []Product_Package_Inventory `json:"inventory,omitempty" xmlrpc:"inventory,omitempty"` + + // A count of an item price's inventory status per datacenter. + InventoryCount *uint `json:"inventoryCount,omitempty" xmlrpc:"inventoryCount,omitempty"` + + // The product item a price is tied to. + Item *Product_Item `json:"item,omitempty" xmlrpc:"item,omitempty"` + + // The unique identifier for a product Item + ItemId *int `json:"itemId,omitempty" xmlrpc:"itemId,omitempty"` + + // The labor fee for a product item price. + LaborFee *Float64 `json:"laborFee,omitempty" xmlrpc:"laborFee,omitempty"` + + // The id of the [[SoftLayer_Location_Group_Pricing]] that this price is part of. If set to null, the price is considered a standard price, which can be used with any location when ordering. + // + // During order [[SoftLayer_Product_Order/verifyOrder|verification]] and [[SoftLayer_Product_Order/placeOrder|placement]], if a standard price is used, that price may be replaced with a location based price, which does not have this property set to null. The location based price must be part of a [[SoftLayer_Location_Group_Pricing]] that has the location being ordered in order for this to happen. + LocationGroupId *int `json:"locationGroupId,omitempty" xmlrpc:"locationGroupId,omitempty"` + + // On sale flag. + OnSaleFlag *bool `json:"onSaleFlag,omitempty" xmlrpc:"onSaleFlag,omitempty"` + + // The one time fee for a product item price. + OneTimeFee *Float64 `json:"oneTimeFee,omitempty" xmlrpc:"oneTimeFee,omitempty"` + + // A price's total tax amount of the one time fees (oneTimeFee, laborFee, and setupFee). This is only populated after the order is verified via SoftLayer_Product_Order::verifyOrder() + OneTimeFeeTax *Float64 `json:"oneTimeFeeTax,omitempty" xmlrpc:"oneTimeFeeTax,omitempty"` + + // Order options for the category that this price is associated with. + OrderOptions []Product_Item_Category_Order_Option_Type `json:"orderOptions,omitempty" xmlrpc:"orderOptions,omitempty"` + + // A count of + OrderPremiumCount *uint `json:"orderPremiumCount,omitempty" xmlrpc:"orderPremiumCount,omitempty"` + + // no documentation yet + OrderPremiums []Product_Item_Price_Premium `json:"orderPremiums,omitempty" xmlrpc:"orderPremiums,omitempty"` + + // A count of a price's packages under which this item is sold. + PackageCount *uint `json:"packageCount,omitempty" xmlrpc:"packageCount,omitempty"` + + // A count of cross reference for packages + PackageReferenceCount *uint `json:"packageReferenceCount,omitempty" xmlrpc:"packageReferenceCount,omitempty"` + + // cross reference for packages + PackageReferences []Product_Package_Item_Prices `json:"packageReferences,omitempty" xmlrpc:"packageReferences,omitempty"` + + // A price's packages under which this item is sold. + Packages []Product_Package `json:"packages,omitempty" xmlrpc:"packages,omitempty"` + + // A count of a list of preset configurations this price is used in.' + PresetConfigurationCount *uint `json:"presetConfigurationCount,omitempty" xmlrpc:"presetConfigurationCount,omitempty"` + + // A list of preset configurations this price is used in.' + PresetConfigurations []Product_Package_Preset_Configuration `json:"presetConfigurations,omitempty" xmlrpc:"presetConfigurations,omitempty"` + + // The pricing location group that this price is applicable for. Prices that have a pricing location group will only be available for ordering with the locations specified on the location group. + PricingLocationGroup *Location_Group_Pricing `json:"pricingLocationGroup,omitempty" xmlrpc:"pricingLocationGroup,omitempty"` + + // A recurring fee is a fee that happens every billing period. This fee is represented as a floating point decimal in US dollars ($USD). + ProratedRecurringFee *Float64 `json:"proratedRecurringFee,omitempty" xmlrpc:"proratedRecurringFee,omitempty"` + + // A price's tax amount of the recurring fee. This is only populated after the order is verified via SoftLayer_Product_Order::verifyOrder() + ProratedRecurringFeeTax *Float64 `json:"proratedRecurringFeeTax,omitempty" xmlrpc:"proratedRecurringFeeTax,omitempty"` + + // no documentation yet + Quantity *int `json:"quantity,omitempty" xmlrpc:"quantity,omitempty"` + + // A recurring fee is a fee that happens every billing period. This fee is represented as a floating point decimal in US dollars ($USD). + RecurringFee *Float64 `json:"recurringFee,omitempty" xmlrpc:"recurringFee,omitempty"` + + // A price's tax amount of the recurring fee. This is only populated after the order is verified via SoftLayer_Product_Order::verifyOrder() + RecurringFeeTax *Float64 `json:"recurringFeeTax,omitempty" xmlrpc:"recurringFeeTax,omitempty"` + + // The number of server cores required to order this item. This is deprecated. Use [[SoftLayer_Product_Item_Price/getCapacityRestrictionMinimum|getCapacityRestrictionMinimum]] and [[SoftLayer_Product_Item_Price/getCapacityRestrictionMaximum|getCapacityRestrictionMaximum]] + RequiredCoreCount *int `json:"requiredCoreCount,omitempty" xmlrpc:"requiredCoreCount,omitempty"` + + // The setup fee associated with a product item price. + SetupFee *Float64 `json:"setupFee,omitempty" xmlrpc:"setupFee,omitempty"` + + // Used for ordering items on sales orders. + Sort *int `json:"sort,omitempty" xmlrpc:"sort,omitempty"` + + // The rate for a usage based item + UsageRate *Float64 `json:"usageRate,omitempty" xmlrpc:"usageRate,omitempty"` +} + +// The SoftLayer_Product_Item_Price data type gives more information about the item price restrictions. An item price may be restricted to one or more accounts. If the item price is restricted to an account, only that account will see the restriction details. +type Product_Item_Price_Account_Restriction struct { + Entity + + // The account the item price is restricted to. + Account *Account `json:"account,omitempty" xmlrpc:"account,omitempty"` + + // The account id for the item price account restriction. + AccountId *int `json:"accountId,omitempty" xmlrpc:"accountId,omitempty"` + + // The unique identifier for the item price account restriction. + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // The item price that has the account restriction. + ItemPrice *Product_Item_Price `json:"itemPrice,omitempty" xmlrpc:"itemPrice,omitempty"` + + // The item price id for the item price account restriction. + ItemPriceId *int `json:"itemPriceId,omitempty" xmlrpc:"itemPriceId,omitempty"` +} + +// no documentation yet +type Product_Item_Price_Attribute struct { + Entity + + // no documentation yet + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // no documentation yet + ItemPrice *Product_Item_Price `json:"itemPrice,omitempty" xmlrpc:"itemPrice,omitempty"` + + // no documentation yet + ItemPriceAttributeType *Product_Item_Price_Attribute_Type `json:"itemPriceAttributeType,omitempty" xmlrpc:"itemPriceAttributeType,omitempty"` + + // no documentation yet + ItemPriceAttributeTypeId *int `json:"itemPriceAttributeTypeId,omitempty" xmlrpc:"itemPriceAttributeTypeId,omitempty"` + + // no documentation yet + ItemPriceId *int `json:"itemPriceId,omitempty" xmlrpc:"itemPriceId,omitempty"` + + // no documentation yet + Value *string `json:"value,omitempty" xmlrpc:"value,omitempty"` +} + +// no documentation yet +type Product_Item_Price_Attribute_Type struct { + Entity + + // no documentation yet + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // no documentation yet + Keyname *string `json:"keyname,omitempty" xmlrpc:"keyname,omitempty"` +} + +// no documentation yet +type Product_Item_Price_Premium struct { + Entity + + // no documentation yet + HourlyModifier *Float64 `json:"hourlyModifier,omitempty" xmlrpc:"hourlyModifier,omitempty"` + + // no documentation yet + ItemPrice *Product_Item_Price `json:"itemPrice,omitempty" xmlrpc:"itemPrice,omitempty"` + + // no documentation yet + ItemPriceId *int `json:"itemPriceId,omitempty" xmlrpc:"itemPriceId,omitempty"` + + // no documentation yet + Location *Location `json:"location,omitempty" xmlrpc:"location,omitempty"` + + // no documentation yet + LocationId *int `json:"locationId,omitempty" xmlrpc:"locationId,omitempty"` + + // no documentation yet + MonthlyModifier *Float64 `json:"monthlyModifier,omitempty" xmlrpc:"monthlyModifier,omitempty"` + + // no documentation yet + Package *Product_Package `json:"package,omitempty" xmlrpc:"package,omitempty"` + + // no documentation yet + PackageId *int `json:"packageId,omitempty" xmlrpc:"packageId,omitempty"` +} + +// The SoftLayer_Product_Item_Requirement data type contains information relating to what requirements, if any, exist for an item. The requiredItemId local property is the item id that is required. +type Product_Item_Requirement struct { + Entity + + // Identifier for this record. + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // Item requirement applies to. + Item *Product_Item `json:"item,omitempty" xmlrpc:"item,omitempty"` + + // This is the id of the item affected by the requirement. + ItemId *int `json:"itemId,omitempty" xmlrpc:"itemId,omitempty"` + + // This is a custom message to display to the user when this requirement shortfall arises. + Message *string `json:"message,omitempty" xmlrpc:"message,omitempty"` + + // The product containing the requirement. + Product *Product_Item `json:"product,omitempty" xmlrpc:"product,omitempty"` + + // This is the id of the item required. + RequiredItemId *int `json:"requiredItemId,omitempty" xmlrpc:"requiredItemId,omitempty"` +} + +// no documentation yet +type Product_Item_Resource_Conflict struct { + Entity + + // no documentation yet + Item *Product_Item `json:"item,omitempty" xmlrpc:"item,omitempty"` + + // The unique identifier of the item that contains the conflict. + ItemId *int `json:"itemId,omitempty" xmlrpc:"itemId,omitempty"` + + // An optional conflict message. + Message *string `json:"message,omitempty" xmlrpc:"message,omitempty"` + + // no documentation yet + Package *Product_Package `json:"package,omitempty" xmlrpc:"package,omitempty"` + + // The unique identifier of the service offering that is associated with the conflict. + PackageId *int `json:"packageId,omitempty" xmlrpc:"packageId,omitempty"` + + // The unique identifier of the conflicting type. + ResourceTableId *int `json:"resourceTableId,omitempty" xmlrpc:"resourceTableId,omitempty"` +} + +// no documentation yet +type Product_Item_Resource_Conflict_Item struct { + Product_Item_Resource_Conflict + + // A product item that conflicts with another product item. + Resource *Product_Item `json:"resource,omitempty" xmlrpc:"resource,omitempty"` +} + +// no documentation yet +type Product_Item_Resource_Conflict_Item_Category struct { + Product_Item_Resource_Conflict + + // An item category that conflicts with a product item. + Resource *Product_Item_Category `json:"resource,omitempty" xmlrpc:"resource,omitempty"` +} + +// no documentation yet +type Product_Item_Resource_Conflict_Location struct { + Product_Item_Resource_Conflict + + // A location that conflicts with a product item. + Resource *Location `json:"resource,omitempty" xmlrpc:"resource,omitempty"` +} + +// The item rule data type represents a rule that must be followed when the item assigned to the rule is ordered. The type and operation applied to the resources of the rule will affect how the rule is checked during ordering. +type Product_Item_Rule struct { + Entity + + // The product item that a rule applies to. + Item *Product_Item `json:"item,omitempty" xmlrpc:"item,omitempty"` + + // A count of + ItemCategoryResourceCount *uint `json:"itemCategoryResourceCount,omitempty" xmlrpc:"itemCategoryResourceCount,omitempty"` + + // no documentation yet + ItemCategoryResources []Product_Item_Rule_Resource_Item_Category `json:"itemCategoryResources,omitempty" xmlrpc:"itemCategoryResources,omitempty"` + + // The unique identifier of the item that the rule applies to. + ItemId *int `json:"itemId,omitempty" xmlrpc:"itemId,omitempty"` + + // A count of + ItemResourceCount *uint `json:"itemResourceCount,omitempty" xmlrpc:"itemResourceCount,omitempty"` + + // no documentation yet + ItemResources []Product_Item_Rule_Resource_Item `json:"itemResources,omitempty" xmlrpc:"itemResources,omitempty"` + + // A count of + LocationResourceCount *uint `json:"locationResourceCount,omitempty" xmlrpc:"locationResourceCount,omitempty"` + + // no documentation yet + LocationResources []Product_Item_Rule_Resource_Location `json:"locationResources,omitempty" xmlrpc:"locationResources,omitempty"` + + // An optional message shown for when the rule is found to be invalid when ordering. + Message *string `json:"message,omitempty" xmlrpc:"message,omitempty"` + + // no documentation yet + Operation *string `json:"operation,omitempty" xmlrpc:"operation,omitempty"` + + // The package that a rule is applicable to when ordering. If no package exists, the rule applies to any package. + Package *Product_Package `json:"package,omitempty" xmlrpc:"package,omitempty"` + + // The unique identifier of the service offering that is associated with the rule. + PackageId *int `json:"packageId,omitempty" xmlrpc:"packageId,omitempty"` + + // A count of + PermissionResourceCount *uint `json:"permissionResourceCount,omitempty" xmlrpc:"permissionResourceCount,omitempty"` + + // no documentation yet + PermissionResources []Product_Item_Rule_Resource_Permission `json:"permissionResources,omitempty" xmlrpc:"permissionResources,omitempty"` + + // A count of resources for this rule that are validated when ordering. + ResourceCount *uint `json:"resourceCount,omitempty" xmlrpc:"resourceCount,omitempty"` + + // Resources for this rule that are validated when ordering. + Resources []Product_Item_Rule_Resource `json:"resources,omitempty" xmlrpc:"resources,omitempty"` + + // The type a rule is. The type affects how the rule is validated when ordering. + Type *Product_Item_Rule_Type `json:"type,omitempty" xmlrpc:"type,omitempty"` + + // The unique identifier of the type of resource rule. + TypeId *int `json:"typeId,omitempty" xmlrpc:"typeId,omitempty"` +} + +// The item rule resource data type represents a resource that is part of an item rule. The item rule resource is used when its item rule is checked on an order. +type Product_Item_Rule_Resource struct { + Entity + + // no documentation yet + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // The unique identifier of the resource. + ResourceTableId *int `json:"resourceTableId,omitempty" xmlrpc:"resourceTableId,omitempty"` + + // no documentation yet + Rule *Product_Item_Rule `json:"rule,omitempty" xmlrpc:"rule,omitempty"` + + // The unique identifier of the rule this resource is included in. + RuleId *int `json:"ruleId,omitempty" xmlrpc:"ruleId,omitempty"` +} + +// no documentation yet +type Product_Item_Rule_Resource_Item struct { + Product_Item_Rule_Resource + + // A product item that the associated rule applies to. + Resource *Product_Item `json:"resource,omitempty" xmlrpc:"resource,omitempty"` +} + +// no documentation yet +type Product_Item_Rule_Resource_Item_Category struct { + Product_Item_Rule_Resource + + // An item category that the associated rule applies to. + Resource *Product_Item_Category `json:"resource,omitempty" xmlrpc:"resource,omitempty"` +} + +// no documentation yet +type Product_Item_Rule_Resource_Location struct { + Product_Item_Rule_Resource + + // A location that the associated rule applies to. + Resource *Location `json:"resource,omitempty" xmlrpc:"resource,omitempty"` +} + +// no documentation yet +type Product_Item_Rule_Resource_Permission struct { + Product_Item_Rule_Resource + + // A user permission that the associated rule applies to. + Resource *User_Customer_CustomerPermission_Permission `json:"resource,omitempty" xmlrpc:"resource,omitempty"` +} + +// The item rule type data type represents the type of an item rule. +type Product_Item_Rule_Type struct { + Entity + + // The identifier for the item rule type. + KeyName *string `json:"keyName,omitempty" xmlrpc:"keyName,omitempty"` +} + +// The SoftLayer_Product_Item_Tax_Category data type contains the tax categories that are associated with products. +type Product_Item_Tax_Category struct { + Entity + + // An internal identifier for each tax category. + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // A count of + ItemCount *uint `json:"itemCount,omitempty" xmlrpc:"itemCount,omitempty"` + + // no documentation yet + Items []Product_Item `json:"items,omitempty" xmlrpc:"items,omitempty"` + + // The key name of the tax category. + KeyName *string `json:"keyName,omitempty" xmlrpc:"keyName,omitempty"` + + // The name of the tax category. + Name *string `json:"name,omitempty" xmlrpc:"name,omitempty"` + + // The status of the tax category. + StatusFlag *int `json:"statusFlag,omitempty" xmlrpc:"statusFlag,omitempty"` +} + +// no documentation yet +type Product_Order struct { + Entity +} + +// The SoftLayer_Product_Package data type contains information about packages from which orders can be generated. Packages contain general information regarding what is in them, where they are currently sold, availability, and pricing. +type Product_Package struct { + Entity + + // The results from this call are similar to [[SoftLayer_Product_Package/getCategories|getCategories]], but these ONLY include account-restricted prices. Not all accounts have restricted pricing. + AccountRestrictedCategories []Product_Item_Category `json:"accountRestrictedCategories,omitempty" xmlrpc:"accountRestrictedCategories,omitempty"` + + // A count of the results from this call are similar to [[SoftLayer_Product_Package/getCategories|getCategories]], but these ONLY include account-restricted prices. Not all accounts have restricted pricing. + AccountRestrictedCategoryCount *uint `json:"accountRestrictedCategoryCount,omitempty" xmlrpc:"accountRestrictedCategoryCount,omitempty"` + + // The flag to indicate if there are any restricted prices in a package for the currently-active account. + AccountRestrictedPricesFlag *bool `json:"accountRestrictedPricesFlag,omitempty" xmlrpc:"accountRestrictedPricesFlag,omitempty"` + + // A count of the available preset configurations for this package. + ActivePresetCount *uint `json:"activePresetCount,omitempty" xmlrpc:"activePresetCount,omitempty"` + + // The available preset configurations for this package. + ActivePresets []Product_Package_Preset `json:"activePresets,omitempty" xmlrpc:"activePresets,omitempty"` + + // A count of a collection of valid RAM items available for purchase in this package. + ActiveRamItemCount *uint `json:"activeRamItemCount,omitempty" xmlrpc:"activeRamItemCount,omitempty"` + + // A collection of valid RAM items available for purchase in this package. + ActiveRamItems []Product_Item `json:"activeRamItems,omitempty" xmlrpc:"activeRamItems,omitempty"` + + // A count of a collection of valid server items available for purchase in this package. + ActiveServerItemCount *uint `json:"activeServerItemCount,omitempty" xmlrpc:"activeServerItemCount,omitempty"` + + // A collection of valid server items available for purchase in this package. + ActiveServerItems []Product_Item `json:"activeServerItems,omitempty" xmlrpc:"activeServerItems,omitempty"` + + // A count of a collection of valid software items available for purchase in this package. + ActiveSoftwareItemCount *uint `json:"activeSoftwareItemCount,omitempty" xmlrpc:"activeSoftwareItemCount,omitempty"` + + // A collection of valid software items available for purchase in this package. + ActiveSoftwareItems []Product_Item `json:"activeSoftwareItems,omitempty" xmlrpc:"activeSoftwareItems,omitempty"` + + // A count of a collection of [[SoftLayer_Product_Item_Price]] objects for pay-as-you-go usage. + ActiveUsagePriceCount *uint `json:"activeUsagePriceCount,omitempty" xmlrpc:"activeUsagePriceCount,omitempty"` + + // A collection of [[SoftLayer_Product_Item_Price]] objects for pay-as-you-go usage. + ActiveUsagePrices []Product_Item_Price `json:"activeUsagePrices,omitempty" xmlrpc:"activeUsagePrices,omitempty"` + + // This flag indicates that the package is an additional service. + AdditionalServiceFlag *bool `json:"additionalServiceFlag,omitempty" xmlrpc:"additionalServiceFlag,omitempty"` + + // A count of + AttributeCount *uint `json:"attributeCount,omitempty" xmlrpc:"attributeCount,omitempty"` + + // no documentation yet + Attributes []Product_Package_Attribute `json:"attributes,omitempty" xmlrpc:"attributes,omitempty"` + + // A count of a collection of valid locations for this package. (Deprecated - Use [[SoftLayer_Product_Package/getRegions|getRegions]]) + AvailableLocationCount *uint `json:"availableLocationCount,omitempty" xmlrpc:"availableLocationCount,omitempty"` + + // A collection of valid locations for this package. (Deprecated - Use [[SoftLayer_Product_Package/getRegions|getRegions]]) + AvailableLocations []Product_Package_Locations `json:"availableLocations,omitempty" xmlrpc:"availableLocations,omitempty"` + + // The maximum number of available disk storage units associated with the servers in a package. + AvailableStorageUnits *uint `json:"availableStorageUnits,omitempty" xmlrpc:"availableStorageUnits,omitempty"` + + // This is a collection of categories ([[SoftLayer_Product_Item_Category]]) associated with a package which can be used for ordering. These categories have several objects prepopulated which are useful when determining the available products for purchase. The categories contain groups ([[SoftLayer_Product_Package_Item_Category_Group]]) that organize the products and prices by similar features. For example, operating systems will be grouped by their manufacturer and virtual server disks will be grouped by their disk type (SAN vs. local). Each group will contain prices ([[SoftLayer_Product_Item_Price]]) which you can use determine the cost of each product. Each price has a product ([[SoftLayer_Product_Item]]) which provides the name and other useful information about the server, service or software you may purchase. + Categories []Product_Item_Category `json:"categories,omitempty" xmlrpc:"categories,omitempty"` + + // The item categories associated with a package, including information detailing which item categories are required as part of a SoftLayer product order. + Configuration []Product_Package_Order_Configuration `json:"configuration,omitempty" xmlrpc:"configuration,omitempty"` + + // A count of the item categories associated with a package, including information detailing which item categories are required as part of a SoftLayer product order. + ConfigurationCount *uint `json:"configurationCount,omitempty" xmlrpc:"configurationCount,omitempty"` + + // A count of a collection of valid RAM items available for purchase in this package. + DefaultRamItemCount *uint `json:"defaultRamItemCount,omitempty" xmlrpc:"defaultRamItemCount,omitempty"` + + // A collection of valid RAM items available for purchase in this package. + DefaultRamItems []Product_Item `json:"defaultRamItems,omitempty" xmlrpc:"defaultRamItems,omitempty"` + + // A count of the package that represents a multi-server solution. (Deprecated) + DeploymentCount *uint `json:"deploymentCount,omitempty" xmlrpc:"deploymentCount,omitempty"` + + // The node type for a package in a solution deployment. + DeploymentNodeType *string `json:"deploymentNodeType,omitempty" xmlrpc:"deploymentNodeType,omitempty"` + + // A count of the packages that are allowed in a multi-server solution. (Deprecated) + DeploymentPackageCount *uint `json:"deploymentPackageCount,omitempty" xmlrpc:"deploymentPackageCount,omitempty"` + + // The packages that are allowed in a multi-server solution. (Deprecated) + DeploymentPackages []Product_Package `json:"deploymentPackages,omitempty" xmlrpc:"deploymentPackages,omitempty"` + + // The solution deployment type. + DeploymentType *string `json:"deploymentType,omitempty" xmlrpc:"deploymentType,omitempty"` + + // The package that represents a multi-server solution. (Deprecated) + Deployments []Product_Package `json:"deployments,omitempty" xmlrpc:"deployments,omitempty"` + + // A generic description of the processor type and count. This includes HTML, so you may want to strip these tags if you plan to use it. + Description *string `json:"description,omitempty" xmlrpc:"description,omitempty"` + + // This flag indicates the package does not allow custom disk partitions. + DisallowCustomDiskPartitions *bool `json:"disallowCustomDiskPartitions,omitempty" xmlrpc:"disallowCustomDiskPartitions,omitempty"` + + // The Softlayer order step is optionally step-based. This returns the first SoftLayer_Product_Package_Order_Step in the step-based order process. + FirstOrderStep *Product_Package_Order_Step `json:"firstOrderStep,omitempty" xmlrpc:"firstOrderStep,omitempty"` + + // This is only needed for step-based order verification. We use this for the order forms, but it is not required. This step is the first SoftLayer_Product_Package_Step for this package. Use this for for filtering which item categories are returned as a part of SoftLayer_Product_Package_Order_Configuration. + FirstOrderStepId *int `json:"firstOrderStepId,omitempty" xmlrpc:"firstOrderStepId,omitempty"` + + // Whether the package is a specialized network gateway appliance package. + GatewayApplianceFlag *bool `json:"gatewayApplianceFlag,omitempty" xmlrpc:"gatewayApplianceFlag,omitempty"` + + // This flag indicates that the package supports GPUs. + GpuFlag *bool `json:"gpuFlag,omitempty" xmlrpc:"gpuFlag,omitempty"` + + // Determines whether the package contains prices that can be ordered hourly. + HourlyBillingAvailableFlag *bool `json:"hourlyBillingAvailableFlag,omitempty" xmlrpc:"hourlyBillingAvailableFlag,omitempty"` + + // A package's internal identifier. Everything regarding a SoftLayer_Product_Package is tied back to this id. + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // no documentation yet + IsActive *int `json:"isActive,omitempty" xmlrpc:"isActive,omitempty"` + + // The item-item conflicts associated with a package. + ItemConflicts []Product_Item_Resource_Conflict `json:"itemConflicts,omitempty" xmlrpc:"itemConflicts,omitempty"` + + // A count of a collection of valid items available for purchase in this package. + ItemCount *uint `json:"itemCount,omitempty" xmlrpc:"itemCount,omitempty"` + + // The item-location conflicts associated with a package. + ItemLocationConflicts []Product_Item_Resource_Conflict `json:"itemLocationConflicts,omitempty" xmlrpc:"itemLocationConflicts,omitempty"` + + // A count of a collection of SoftLayer_Product_Item_Prices that are valid for this package. + ItemPriceCount *uint `json:"itemPriceCount,omitempty" xmlrpc:"itemPriceCount,omitempty"` + + // A count of cross reference for item prices + ItemPriceReferenceCount *uint `json:"itemPriceReferenceCount,omitempty" xmlrpc:"itemPriceReferenceCount,omitempty"` + + // cross reference for item prices + ItemPriceReferences []Product_Package_Item_Prices `json:"itemPriceReferences,omitempty" xmlrpc:"itemPriceReferences,omitempty"` + + // A collection of SoftLayer_Product_Item_Prices that are valid for this package. + ItemPrices []Product_Item_Price `json:"itemPrices,omitempty" xmlrpc:"itemPrices,omitempty"` + + // A collection of valid items available for purchase in this package. + Items []Product_Item `json:"items,omitempty" xmlrpc:"items,omitempty"` + + // A unique key name for the package. + KeyName *string `json:"keyName,omitempty" xmlrpc:"keyName,omitempty"` + + // A count of a collection of valid locations for this package. (Deprecated - Use [[SoftLayer_Product_Package/getRegions|getRegions]]) + LocationCount *uint `json:"locationCount,omitempty" xmlrpc:"locationCount,omitempty"` + + // A collection of valid locations for this package. (Deprecated - Use [[SoftLayer_Product_Package/getRegions|getRegions]]) + Locations []Location `json:"locations,omitempty" xmlrpc:"locations,omitempty"` + + // The lowest server [[SoftLayer_Product_Item_Price]] related to this package. + LowestServerPrice *Product_Item_Price `json:"lowestServerPrice,omitempty" xmlrpc:"lowestServerPrice,omitempty"` + + // The maximum available network speed associated with the package. + MaximumPortSpeed *uint `json:"maximumPortSpeed,omitempty" xmlrpc:"maximumPortSpeed,omitempty"` + + // The minimum available network speed associated with the package. + MinimumPortSpeed *uint `json:"minimumPortSpeed,omitempty" xmlrpc:"minimumPortSpeed,omitempty"` + + // This flag indicates that this is a MongoDB engineered package. (Deprecated) + MongoDbEngineeredFlag *bool `json:"mongoDbEngineeredFlag,omitempty" xmlrpc:"mongoDbEngineeredFlag,omitempty"` + + // The description of the package. For server packages, this is usually a detailed description of processor type and count. + Name *string `json:"name,omitempty" xmlrpc:"name,omitempty"` + + // A count of the premium price modifiers associated with the [[SoftLayer_Product_Item_Price]] and [[SoftLayer_Location]] objects in a package. + OrderPremiumCount *uint `json:"orderPremiumCount,omitempty" xmlrpc:"orderPremiumCount,omitempty"` + + // The premium price modifiers associated with the [[SoftLayer_Product_Item_Price]] and [[SoftLayer_Location]] objects in a package. + OrderPremiums []Product_Item_Price_Premium `json:"orderPremiums,omitempty" xmlrpc:"orderPremiums,omitempty"` + + // This flag indicates the package is pre-configured. (Deprecated) + PreconfiguredFlag *bool `json:"preconfiguredFlag,omitempty" xmlrpc:"preconfiguredFlag,omitempty"` + + // Whether the package requires the user to define a preset configuration. + PresetConfigurationRequiredFlag *bool `json:"presetConfigurationRequiredFlag,omitempty" xmlrpc:"presetConfigurationRequiredFlag,omitempty"` + + // Whether the package prevents the user from specifying a Vlan. + PreventVlanSelectionFlag *bool `json:"preventVlanSelectionFlag,omitempty" xmlrpc:"preventVlanSelectionFlag,omitempty"` + + // This flag indicates the package is for a private hosted cloud deployment. (Deprecated) + PrivateHostedCloudPackageFlag *bool `json:"privateHostedCloudPackageFlag,omitempty" xmlrpc:"privateHostedCloudPackageFlag,omitempty"` + + // The server role of the private hosted cloud deployment. (Deprecated) + PrivateHostedCloudPackageType *string `json:"privateHostedCloudPackageType,omitempty" xmlrpc:"privateHostedCloudPackageType,omitempty"` + + // Whether the package only has access to the private network. + PrivateNetworkOnlyFlag *bool `json:"privateNetworkOnlyFlag,omitempty" xmlrpc:"privateNetworkOnlyFlag,omitempty"` + + // Whether the package is a specialized mass storage QuantaStor package. + QuantaStorPackageFlag *bool `json:"quantaStorPackageFlag,omitempty" xmlrpc:"quantaStorPackageFlag,omitempty"` + + // This flag indicates the package does not allow different disks with RAID. + RaidDiskRestrictionFlag *bool `json:"raidDiskRestrictionFlag,omitempty" xmlrpc:"raidDiskRestrictionFlag,omitempty"` + + // This flag determines if the package contains a redundant power supply product. + RedundantPowerFlag *bool `json:"redundantPowerFlag,omitempty" xmlrpc:"redundantPowerFlag,omitempty"` + + // A count of the regional locations that a package is available in. + RegionCount *uint `json:"regionCount,omitempty" xmlrpc:"regionCount,omitempty"` + + // The regional locations that a package is available in. + Regions []Location_Region `json:"regions,omitempty" xmlrpc:"regions,omitempty"` + + // The resource group template that describes a multi-server solution. (Deprecated) + ResourceGroupTemplate *Resource_Group_Template `json:"resourceGroupTemplate,omitempty" xmlrpc:"resourceGroupTemplate,omitempty"` + + // This currently contains no information but is here for future use. + SubDescription *string `json:"subDescription,omitempty" xmlrpc:"subDescription,omitempty"` + + // The top level category code for this service offering. + TopLevelItemCategoryCode *string `json:"topLevelItemCategoryCode,omitempty" xmlrpc:"topLevelItemCategoryCode,omitempty"` + + // The type of service offering. This property can be used to help filter packages. + Type *Product_Package_Type `json:"type,omitempty" xmlrpc:"type,omitempty"` + + // The server unit size this package will match to. + UnitSize *int `json:"unitSize,omitempty" xmlrpc:"unitSize,omitempty"` +} + +// no documentation yet +type Product_Package_Attribute struct { + Entity + + // no documentation yet + AttributeType *Product_Package_Attribute_Type `json:"attributeType,omitempty" xmlrpc:"attributeType,omitempty"` + + // no documentation yet + Package *Product_Package `json:"package,omitempty" xmlrpc:"package,omitempty"` + + // no documentation yet + Value *string `json:"value,omitempty" xmlrpc:"value,omitempty"` +} + +// no documentation yet +type Product_Package_Attribute_Type struct { + Entity + + // no documentation yet + Description *string `json:"description,omitempty" xmlrpc:"description,omitempty"` + + // no documentation yet + KeyName *string `json:"keyName,omitempty" xmlrpc:"keyName,omitempty"` + + // no documentation yet + Name *string `json:"name,omitempty" xmlrpc:"name,omitempty"` +} + +// SoftLayer keeps near real-time track of the number of items available in it's product catalog inventory. The SoftLayer_Product_Package_Inventory data type models one of these inventory records. SoftLayer tracks inventory per product package and item per datacenter. This type is useful if you need to purchase specific servers in a specific location, and wish to check their availability before ordering. +// +// The data from this type is used primarily on the SoftLayer outlet website. +type Product_Package_Inventory struct { + Entity + + // The number of units available for purchase in SoftLayer's inventory for a single item in a single datacenter. + AvailableInventoryCount *int `json:"availableInventoryCount,omitempty" xmlrpc:"availableInventoryCount,omitempty"` + + // The product package item that is associated with an inventory record. + Item *Product_Item `json:"item,omitempty" xmlrpc:"item,omitempty"` + + // The unique identifier of the product item that an inventory record is associated with. + ItemId *int `json:"itemId,omitempty" xmlrpc:"itemId,omitempty"` + + // The datacenter that an inventory record is located in. + Location *Location `json:"location,omitempty" xmlrpc:"location,omitempty"` + + // The unique identifier of the datacenter that an inventory record is located in. + LocationId *int `json:"locationId,omitempty" xmlrpc:"locationId,omitempty"` + + // The date that an inventory record was last updated. + ModifyDate *Time `json:"modifyDate,omitempty" xmlrpc:"modifyDate,omitempty"` + + // Whether an inventory record is marked as "overstock". Overstock records appear at the top portion of the SoftLayer outlet website. + OverstockFlag *int `json:"overstockFlag,omitempty" xmlrpc:"overstockFlag,omitempty"` + + // The product package that is associated with an inventory record. + Package *Product_Package `json:"package,omitempty" xmlrpc:"package,omitempty"` + + // The unique identifier of the product package that an inventory record is associated with. + PackageId *int `json:"packageId,omitempty" xmlrpc:"packageId,omitempty"` +} + +// This class is used to organize categories for a service offering. A service offering (usually) contains multiple categories (e.g., server, os, disk0, ram). This class allows us to organize the prices into related item category groups. +type Product_Package_Item_Category_Group struct { + Entity + + // no documentation yet + Category *Product_Item_Category `json:"category,omitempty" xmlrpc:"category,omitempty"` + + // The item category id associated with this group. + ItemCategoryId *int `json:"itemCategoryId,omitempty" xmlrpc:"itemCategoryId,omitempty"` + + // no documentation yet + Package *Product_Package `json:"package,omitempty" xmlrpc:"package,omitempty"` + + // The service offering id associated with this group. + PackageId *int `json:"packageId,omitempty" xmlrpc:"packageId,omitempty"` + + // A count of + PriceCount *uint `json:"priceCount,omitempty" xmlrpc:"priceCount,omitempty"` + + // no documentation yet + Prices []Product_Item_Price `json:"prices,omitempty" xmlrpc:"prices,omitempty"` + + // The sort value for this group. + Sort *int `json:"sort,omitempty" xmlrpc:"sort,omitempty"` + + // An optional title associated with this group. E.g., for operating systems, this will be the manufacturer. + Title *string `json:"title,omitempty" xmlrpc:"title,omitempty"` +} + +// The SoftLayer_Product_Package_Item_Prices contains price to package cross references Relates a category, price and item to a bundle. Match bundle ids to see all items and prices in a particular bundle. +type Product_Package_Item_Prices struct { + Entity + + // The unique identifier for SoftLayer_Product_Package_Item_Price. This is only needed as a reference. The important data is the itemPriceId property. + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // The item price to which this object belongs. The item price has details regarding cost for the item it belongs to. + ItemPrice *Product_Item_Price `json:"itemPrice,omitempty" xmlrpc:"itemPrice,omitempty"` + + // The SoftLayer_Product_Item_Price id. This value is to be used when placing orders. To get more information about this item price, go from the item price to the item description + ItemPriceId *int `json:"itemPriceId,omitempty" xmlrpc:"itemPriceId,omitempty"` + + // The package to which this object belongs. + Package *Product_Package `json:"package,omitempty" xmlrpc:"package,omitempty"` + + // The Package ID to which this price reference belongs + PackageId *int `json:"packageId,omitempty" xmlrpc:"packageId,omitempty"` +} + +// This data type is a cross-reference between the SoftLayer_Product_Package and the SoftLayer_Product_Item(s) that belong in the SoftLayer_Product_Package. +type Product_Package_Items struct { + Entity + + // The unique identifier for this object. It is not used anywhere but in this object. + Id *string `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // The item to which this object belongs. + Item *Product_Item `json:"item,omitempty" xmlrpc:"item,omitempty"` + + // The SoftLayer_Product_Item id to which this instance of the object belongs. + ItemId *int `json:"itemId,omitempty" xmlrpc:"itemId,omitempty"` + + // The package to which this object belongs. + Package *Product_Package `json:"package,omitempty" xmlrpc:"package,omitempty"` + + // The SoftLayer_Product_Package id to which this instance of the object belongs. + PackageId *int `json:"packageId,omitempty" xmlrpc:"packageId,omitempty"` +} + +// Most packages are available in many locations. This object describes that availability for each package. +type Product_Package_Locations struct { + Entity + + // This describes the availability of the package tied to this location. + DeliveryTimeInformation *string `json:"deliveryTimeInformation,omitempty" xmlrpc:"deliveryTimeInformation,omitempty"` + + // A simple flag which describes whether or not this location is available for this package. + IsAvailable *int `json:"isAvailable,omitempty" xmlrpc:"isAvailable,omitempty"` + + // The location to which this object belongs. + Location *Location `json:"location,omitempty" xmlrpc:"location,omitempty"` + + // The location id tied to this object. + LocationId *int `json:"locationId,omitempty" xmlrpc:"locationId,omitempty"` + + // The package to which this object belongs. + Package *Product_Package `json:"package,omitempty" xmlrpc:"package,omitempty"` + + // The SoftLayer_Product_Package ID tied to this object. + PackageId *int `json:"packageId,omitempty" xmlrpc:"packageId,omitempty"` +} + +// This datatype describes the item categories that are required for each package to be ordered. For instance, for package 2, there will be many required categories. When submitting an order for a server, there must be at most 1 price for each category whose "isRequired" is set. Examples of required categories: - server - ram - bandwidth - disk0 +// +// There are others, but these are the main ones. For each required category, a SoftLayer_Product_Item_Price must be chosen that is valid for the package. +// +// +type Product_Package_Order_Configuration struct { + Entity + + // The error message displayed if the submitted order does not contain this item category, if it is required. + ErrorMessage *string `json:"errorMessage,omitempty" xmlrpc:"errorMessage,omitempty"` + + // The unique identifier for this object. + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // This is a flag which tells SoftLayer_Product_Order::verifyOrder() whether or not this category is required. If this is set, then the order submitted must contain a SoftLayer_Product_Item_Price with this category as part of the order. + IsRequired *int `json:"isRequired,omitempty" xmlrpc:"isRequired,omitempty"` + + // The item category for this configuration instance. + ItemCategory *Product_Item_Category `json:"itemCategory,omitempty" xmlrpc:"itemCategory,omitempty"` + + // The SoftLayer_Product_Item_Category. + ItemCategoryId *int `json:"itemCategoryId,omitempty" xmlrpc:"itemCategoryId,omitempty"` + + // The order step ID for this particular option in the package. + OrderStepId *int `json:"orderStepId,omitempty" xmlrpc:"orderStepId,omitempty"` + + // The package to which this instance belongs. + Package *Product_Package `json:"package,omitempty" xmlrpc:"package,omitempty"` + + // The PackageId tied to this instance. + PackageId *int `json:"packageId,omitempty" xmlrpc:"packageId,omitempty"` + + // This is an integer used to show the order in which each item Category should be displayed. This is merely the suggested order. + Sort *int `json:"sort,omitempty" xmlrpc:"sort,omitempty"` + + // The step to which this instance belongs. + Step *Product_Package_Order_Step `json:"step,omitempty" xmlrpc:"step,omitempty"` +} + +// Each package has at least 1 step to the ordering process. For server orders, there are many. Each step has certain item categories which are displayed. This type describes the steps for each package. +type Product_Package_Order_Step struct { + Entity + + // The unique identifier for this object. It is not used anywhere but in this object. + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // A count of the next steps in the ordering process for the package tied to this object, including this step. + InclusivePreviousStepCount *uint `json:"inclusivePreviousStepCount,omitempty" xmlrpc:"inclusivePreviousStepCount,omitempty"` + + // The next steps in the ordering process for the package tied to this object, including this step. + InclusivePreviousSteps []Product_Package_Order_Step_Next `json:"inclusivePreviousSteps,omitempty" xmlrpc:"inclusivePreviousSteps,omitempty"` + + // A count of the next steps in the ordering process for the package tied to this object. + NextStepCount *uint `json:"nextStepCount,omitempty" xmlrpc:"nextStepCount,omitempty"` + + // The next steps in the ordering process for the package tied to this object. + NextSteps []Product_Package_Order_Step_Next `json:"nextSteps,omitempty" xmlrpc:"nextSteps,omitempty"` + + // A count of the item to which this object belongs. + PreviousStepCount *uint `json:"previousStepCount,omitempty" xmlrpc:"previousStepCount,omitempty"` + + // The item to which this object belongs. + PreviousSteps []Product_Package_Order_Step_Next `json:"previousSteps,omitempty" xmlrpc:"previousSteps,omitempty"` + + // The number of the step in the order process for this package. These are sequential and only needed for step-based ordering. + Step *string `json:"step,omitempty" xmlrpc:"step,omitempty"` +} + +// This datatype simply describes which steps are next in line for ordering. +type Product_Package_Order_Step_Next struct { + Entity + + // The unique identifier for this object. + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // The unique identifier for SoftLayer_Product_Package_Order_Step for the next step in the process. + NextOrderStepId *int `json:"nextOrderStepId,omitempty" xmlrpc:"nextOrderStepId,omitempty"` + + // The unique identifier for SoftLayer_Product_Package_Order_Step for the current step. + OrderStepId *int `json:"orderStepId,omitempty" xmlrpc:"orderStepId,omitempty"` + + // The SoftLayer_Product_Package_Order_Step to which this object belongs. + Step *Product_Package_Order_Step `json:"step,omitempty" xmlrpc:"step,omitempty"` +} + +// Package presets are used to simplify ordering by eliminating the need for price ids when submitting orders. +// +// Orders submitted with a preset id defined will use the prices included in the package preset. Prices submitted on an order with a preset id will replace the prices included in the package preset for that prices category. If the package preset has a fixed configuration flag (fixedConfigurationFlag) set then the prices included in the preset configuration cannot be replaced by prices submitted on the order. The only exception to the fixed configuration flag would be if a price submitted on the order is an account-restricted price for the same product item. +type Product_Package_Preset struct { + Entity + + // no documentation yet + AvailableStorageUnits *uint `json:"availableStorageUnits,omitempty" xmlrpc:"availableStorageUnits,omitempty"` + + // The item categories that are included in this package preset configuration. + Categories []Product_Item_Category `json:"categories,omitempty" xmlrpc:"categories,omitempty"` + + // A count of the item categories that are included in this package preset configuration. + CategoryCount *uint `json:"categoryCount,omitempty" xmlrpc:"categoryCount,omitempty"` + + // The preset configuration (category and price). + Configuration []Product_Package_Preset_Configuration `json:"configuration,omitempty" xmlrpc:"configuration,omitempty"` + + // A count of the preset configuration (category and price). + ConfigurationCount *uint `json:"configurationCount,omitempty" xmlrpc:"configurationCount,omitempty"` + + // A description of the package preset. + Description *string `json:"description,omitempty" xmlrpc:"description,omitempty"` + + // A package preset with this flag set will not allow the price's defined in the preset configuration to be overriden during order placement. + FixedConfigurationFlag *bool `json:"fixedConfigurationFlag,omitempty" xmlrpc:"fixedConfigurationFlag,omitempty"` + + // A preset's internal identifier. Everything regarding a SoftLayer_Product_Package_Preset is tied back to this id. + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // The status of the package preset. + IsActive *string `json:"isActive,omitempty" xmlrpc:"isActive,omitempty"` + + // The key name of the package preset. For the base configuration of a package the preset key name is "DEFAULT". + KeyName *string `json:"keyName,omitempty" xmlrpc:"keyName,omitempty"` + + // The lowest server prices related to this package preset. + LowestPresetServerPrice *Product_Item_Price `json:"lowestPresetServerPrice,omitempty" xmlrpc:"lowestPresetServerPrice,omitempty"` + + // The name of the package preset. + Name *string `json:"name,omitempty" xmlrpc:"name,omitempty"` + + // The package this preset belongs to. + Package *Product_Package `json:"package,omitempty" xmlrpc:"package,omitempty"` + + // The item categories associated with a package preset, including information detailing which item categories are required as part of a SoftLayer product order. + PackageConfiguration []Product_Package_Order_Configuration `json:"packageConfiguration,omitempty" xmlrpc:"packageConfiguration,omitempty"` + + // A count of the item categories associated with a package preset, including information detailing which item categories are required as part of a SoftLayer product order. + PackageConfigurationCount *uint `json:"packageConfigurationCount,omitempty" xmlrpc:"packageConfigurationCount,omitempty"` + + // The package id for the package this preset belongs to. + PackageId *int `json:"packageId,omitempty" xmlrpc:"packageId,omitempty"` + + // A count of the item prices that are included in this package preset configuration. + PriceCount *uint `json:"priceCount,omitempty" xmlrpc:"priceCount,omitempty"` + + // The item prices that are included in this package preset configuration. + Prices []Product_Item_Price `json:"prices,omitempty" xmlrpc:"prices,omitempty"` + + // A count of describes how all disks in this preset will be configured. + StorageGroupTemplateArrayCount *uint `json:"storageGroupTemplateArrayCount,omitempty" xmlrpc:"storageGroupTemplateArrayCount,omitempty"` + + // Describes how all disks in this preset will be configured. + StorageGroupTemplateArrays []Configuration_Storage_Group_Template_Group `json:"storageGroupTemplateArrays,omitempty" xmlrpc:"storageGroupTemplateArrays,omitempty"` + + // The starting hourly price for this configuration. Additional options not defined in the preset may increase the cost. + TotalMinimumHourlyFee *Float64 `json:"totalMinimumHourlyFee,omitempty" xmlrpc:"totalMinimumHourlyFee,omitempty"` + + // The starting monthly price for this configuration. Additional options not defined in the preset may increase the cost. + TotalMinimumRecurringFee *Float64 `json:"totalMinimumRecurringFee,omitempty" xmlrpc:"totalMinimumRecurringFee,omitempty"` +} + +// Package preset attributes contain supplementary information for a package preset. +type Product_Package_Preset_Attribute struct { + Entity + + // no documentation yet + AttributeType *Product_Package_Preset_Attribute_Type `json:"attributeType,omitempty" xmlrpc:"attributeType,omitempty"` + + // The internal identifier of the type of attribute that a pacakge preset attribute belongs to. + AttributeTypeId *int `json:"attributeTypeId,omitempty" xmlrpc:"attributeTypeId,omitempty"` + + // A package preset attribute's internal identifier. + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // no documentation yet + Preset *Product_Package_Preset `json:"preset,omitempty" xmlrpc:"preset,omitempty"` + + // The internal identifier of the package preset an attribute belongs to. + PresetId *int `json:"presetId,omitempty" xmlrpc:"presetId,omitempty"` + + // A package preset's attribute value. + Value *string `json:"value,omitempty" xmlrpc:"value,omitempty"` +} + +// SoftLayer_Product_Package_Preset_Attribute_Type models the type of attribute that can be assigned to a package preset. +type Product_Package_Preset_Attribute_Type struct { + Entity + + // A brief description of a package preset attribute type. + Description *string `json:"description,omitempty" xmlrpc:"description,omitempty"` + + // A package preset attribute type's internal identifier. + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // A package preset attribute type's key name. This is typically a shorter version of an attribute type's name. + KeyName *string `json:"keyName,omitempty" xmlrpc:"keyName,omitempty"` + + // A package preset attribute type's name. + Name *string `json:"name,omitempty" xmlrpc:"name,omitempty"` +} + +// no documentation yet +type Product_Package_Preset_Configuration struct { + Entity + + // no documentation yet + Category *Product_Item_Category `json:"category,omitempty" xmlrpc:"category,omitempty"` + + // no documentation yet + PackagePreset *Product_Package_Preset `json:"packagePreset,omitempty" xmlrpc:"packagePreset,omitempty"` + + // no documentation yet + Price *Product_Item_Price `json:"price,omitempty" xmlrpc:"price,omitempty"` +} + +// The SoftLayer_Product_Package_Server data type contains summarized information for bare metal servers regarding pricing, processor stats, and feature sets. +type Product_Package_Server struct { + Entity + + // no documentation yet + Catalog *Product_Catalog `json:"catalog,omitempty" xmlrpc:"catalog,omitempty"` + + // The unique identifier of a [[SoftLayer_Product_Catalog]]. + CatalogId *int `json:"catalogId,omitempty" xmlrpc:"catalogId,omitempty"` + + // Comma-separated list of datacenter names this server is available in + Datacenters *string `json:"datacenters,omitempty" xmlrpc:"datacenters,omitempty"` + + // The minimum amount of RAM the server is configured with. + DefaultRamCapacity *Float64 `json:"defaultRamCapacity,omitempty" xmlrpc:"defaultRamCapacity,omitempty"` + + // Flag to indicate if the server configuration supports dual path network routing. + DualPathNetworkFlag *bool `json:"dualPathNetworkFlag,omitempty" xmlrpc:"dualPathNetworkFlag,omitempty"` + + // no documentation yet + FlexCoreServerFlag *bool `json:"flexCoreServerFlag,omitempty" xmlrpc:"flexCoreServerFlag,omitempty"` + + // Indicates whether or not the server contains a GPU. + GpuFlag *bool `json:"gpuFlag,omitempty" xmlrpc:"gpuFlag,omitempty"` + + // Flag to determine if a server is available for hourly billing. + HourlyBillingFlag *bool `json:"hourlyBillingFlag,omitempty" xmlrpc:"hourlyBillingFlag,omitempty"` + + // The unique identifier of a [[SoftLayer_Product_Package_Server]]. + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // no documentation yet + Item *Product_Item `json:"item,omitempty" xmlrpc:"item,omitempty"` + + // The unique identifier of a [[SoftLayer_Product_Item]]. + ItemId *int `json:"itemId,omitempty" xmlrpc:"itemId,omitempty"` + + // no documentation yet + ItemPrice *Product_Item_Price `json:"itemPrice,omitempty" xmlrpc:"itemPrice,omitempty"` + + // The unique identifier of a [[SoftLayer_Product_Item_Price]]. + ItemPriceId *int `json:"itemPriceId,omitempty" xmlrpc:"itemPriceId,omitempty"` + + // The maximum number of hard drives the server can support. + MaximumDriveCount *int `json:"maximumDriveCount,omitempty" xmlrpc:"maximumDriveCount,omitempty"` + + // The maximum available network speed for the server. + MaximumPortSpeed *Float64 `json:"maximumPortSpeed,omitempty" xmlrpc:"maximumPortSpeed,omitempty"` + + // The maximum amount of RAM the server can support. + MaximumRamCapacity *Float64 `json:"maximumRamCapacity,omitempty" xmlrpc:"maximumRamCapacity,omitempty"` + + // The minimum available network speed for the server. + MinimumPortSpeed *Float64 `json:"minimumPortSpeed,omitempty" xmlrpc:"minimumPortSpeed,omitempty"` + + // no documentation yet + NetworkGatewayApplianceRoleFlag *bool `json:"networkGatewayApplianceRoleFlag,omitempty" xmlrpc:"networkGatewayApplianceRoleFlag,omitempty"` + + // Indicates whether or not the server is being sold as part of an outlet package. + OutletFlag *bool `json:"outletFlag,omitempty" xmlrpc:"outletFlag,omitempty"` + + // no documentation yet + Package *Product_Package `json:"package,omitempty" xmlrpc:"package,omitempty"` + + // The unique identifier of a [[SoftLayer_Product_Package]]. + PackageId *int `json:"packageId,omitempty" xmlrpc:"packageId,omitempty"` + + // The type of service offering/package. + PackageType *string `json:"packageType,omitempty" xmlrpc:"packageType,omitempty"` + + // Flag to indicate if the server is an IBM Power server. + PowerServerFlag *bool `json:"powerServerFlag,omitempty" xmlrpc:"powerServerFlag,omitempty"` + + // no documentation yet + Preset *Product_Package_Preset `json:"preset,omitempty" xmlrpc:"preset,omitempty"` + + // The unique identifier of a [[SoftLayer_Product_Package_Preset]]. + PresetId *int `json:"presetId,omitempty" xmlrpc:"presetId,omitempty"` + + // Indicates whether or not the server can only be configured with a private network. + PrivateNetworkOnlyFlag *bool `json:"privateNetworkOnlyFlag,omitempty" xmlrpc:"privateNetworkOnlyFlag,omitempty"` + + // The processor's bus speed. + ProcessorBusSpeed *string `json:"processorBusSpeed,omitempty" xmlrpc:"processorBusSpeed,omitempty"` + + // The amount of cache the processor has. + ProcessorCache *string `json:"processorCache,omitempty" xmlrpc:"processorCache,omitempty"` + + // The number of cores in each processor. + ProcessorCores *int `json:"processorCores,omitempty" xmlrpc:"processorCores,omitempty"` + + // The number of processors the server has. + ProcessorCount *int `json:"processorCount,omitempty" xmlrpc:"processorCount,omitempty"` + + // The manufacturer of the server's processor. + ProcessorManufacturer *string `json:"processorManufacturer,omitempty" xmlrpc:"processorManufacturer,omitempty"` + + // The model of the server's processor. + ProcessorModel *string `json:"processorModel,omitempty" xmlrpc:"processorModel,omitempty"` + + // The name of the server's processor. + ProcessorName *string `json:"processorName,omitempty" xmlrpc:"processorName,omitempty"` + + // The processor speed. + ProcessorSpeed *string `json:"processorSpeed,omitempty" xmlrpc:"processorSpeed,omitempty"` + + // The name of the server product. + ProductName *string `json:"productName,omitempty" xmlrpc:"productName,omitempty"` + + // Indicates whether or not the server has the capability to support a redundant power supply. + RedundantPowerFlag *bool `json:"redundantPowerFlag,omitempty" xmlrpc:"redundantPowerFlag,omitempty"` + + // Flag to indicate if the server is SAP certified. + SapCertifiedServerFlag *bool `json:"sapCertifiedServerFlag,omitempty" xmlrpc:"sapCertifiedServerFlag,omitempty"` + + // The hourly starting price for the server. This includes a sum of all the minimum required items, including RAM and hard drives. Not all servers are available hourly. + StartingHourlyPrice *Float64 `json:"startingHourlyPrice,omitempty" xmlrpc:"startingHourlyPrice,omitempty"` + + // The monthly starting price for the server. This includes a sum of all the minimum required items, including RAM and hard drives. + StartingMonthlyPrice *Float64 `json:"startingMonthlyPrice,omitempty" xmlrpc:"startingMonthlyPrice,omitempty"` + + // The total number of processor cores available for the server. + TotalCoreCount *int `json:"totalCoreCount,omitempty" xmlrpc:"totalCoreCount,omitempty"` + + // Flag to indicate if the server configuration supports TXT/TPM. + TxtTpmFlag *bool `json:"txtTpmFlag,omitempty" xmlrpc:"txtTpmFlag,omitempty"` + + // The size of the server. + UnitSize *int `json:"unitSize,omitempty" xmlrpc:"unitSize,omitempty"` +} + +// The [[SoftLayer_Product_Package_Server_Option]] data type contains various data points associated with package servers that can be used in selection criteria. +type Product_Package_Server_Option struct { + Entity + + // The unique identifier of a Catalog. + CatalogId *int `json:"catalogId,omitempty" xmlrpc:"catalogId,omitempty"` + + // A description of the option. + Description *string `json:"description,omitempty" xmlrpc:"description,omitempty"` + + // The unique identifier of a Package Server Option. + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // The type of option. + Type *string `json:"type,omitempty" xmlrpc:"type,omitempty"` + + // The value of the the option. + Value *string `json:"value,omitempty" xmlrpc:"value,omitempty"` +} + +// The [[SoftLayer_Product_Package_Type]] object indicates the type for a service offering (package). The type can be used to filter packages. For example, if you are looking for the package representing virtual servers, you can filter on the type's key name of '''VIRTUAL_SERVER_INSTANCE'''. For bare metal servers by core or CPU, filter on '''BARE_METAL_CORE''' or '''BARE_METAL_CPU''', respectively. +type Product_Package_Type struct { + Entity + + // The package type's unique identifier. + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // The unique key name of the package type. Use this value when filtering. + KeyName *string `json:"keyName,omitempty" xmlrpc:"keyName,omitempty"` + + // The name of the package type. + Name *string `json:"name,omitempty" xmlrpc:"name,omitempty"` + + // A count of all the packages associated with the given package type. + PackageCount *uint `json:"packageCount,omitempty" xmlrpc:"packageCount,omitempty"` + + // All the packages associated with the given package type. + Packages []Product_Package `json:"packages,omitempty" xmlrpc:"packages,omitempty"` +} + +// The SoftLayer_Product_Upgrade_Request data type contains general information relating to a hardware, virtual server, or service upgrade. It also relates a [[SoftLayer_Billing_Order]] to a [[SoftLayer_Ticket]]. +type Product_Upgrade_Request struct { + Entity + + // The account that an order belongs to + Account *Account `json:"account,omitempty" xmlrpc:"account,omitempty"` + + // The unique internal id of a SoftLayer account + AccountId *int `json:"accountId,omitempty" xmlrpc:"accountId,omitempty"` + + // Indicates that the upgrade request has completed or has been cancelled. + CompletedFlag *bool `json:"completedFlag,omitempty" xmlrpc:"completedFlag,omitempty"` + + // The date an upgrade request was created. + CreateDate *Time `json:"createDate,omitempty" xmlrpc:"createDate,omitempty"` + + // The unique internal id of the last modified user + EmployeeId *int `json:"employeeId,omitempty" xmlrpc:"employeeId,omitempty"` + + // The unique internal id of the virtual server that an upgrade will be done + GuestId *int `json:"guestId,omitempty" xmlrpc:"guestId,omitempty"` + + // The unique internal id of the hardware that an upgrade will be done + HardwareId *int `json:"hardwareId,omitempty" xmlrpc:"hardwareId,omitempty"` + + // An upgrade request's internal identifier. + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // This is the invoice associated with the upgrade request. For hourly servers or services, an invoice will not be available. + Invoice *Billing_Invoice `json:"invoice,omitempty" xmlrpc:"invoice,omitempty"` + + // The time that system admin starts working on the order item. This is used for upgrade orders. + MaintenanceStartTimeUtc *Time `json:"maintenanceStartTimeUtc,omitempty" xmlrpc:"maintenanceStartTimeUtc,omitempty"` + + // The date an upgrade request was last modified. + ModifyDate *Time `json:"modifyDate,omitempty" xmlrpc:"modifyDate,omitempty"` + + // An order record associated to the upgrade request + Order *Billing_Order `json:"order,omitempty" xmlrpc:"order,omitempty"` + + // The unique internal id of the order that an upgrade request is related to + OrderId *int `json:"orderId,omitempty" xmlrpc:"orderId,omitempty"` + + // The total amount of fees + OrderTotal *Float64 `json:"orderTotal,omitempty" xmlrpc:"orderTotal,omitempty"` + + // The prorated total amount of recurring fees + ProratedTotal *Float64 `json:"proratedTotal,omitempty" xmlrpc:"proratedTotal,omitempty"` + + // A server object associated with the upgrade request if any. + Server *Hardware `json:"server,omitempty" xmlrpc:"server,omitempty"` + + // The current status of the upgrade request. + Status *Product_Upgrade_Request_Status `json:"status,omitempty" xmlrpc:"status,omitempty"` + + // The unique internal id of an upgrade status + StatusId *int `json:"statusId,omitempty" xmlrpc:"statusId,omitempty"` + + // The ticket that is used to coordinate the upgrade process. + Ticket *Ticket `json:"ticket,omitempty" xmlrpc:"ticket,omitempty"` + + // The unique internal id of the ticket related to an upgrade request + TicketId *int `json:"ticketId,omitempty" xmlrpc:"ticketId,omitempty"` + + // The user that placed the order. + User *User_Customer `json:"user,omitempty" xmlrpc:"user,omitempty"` + + // The unique internal id of the customer who place the order + UserId *int `json:"userId,omitempty" xmlrpc:"userId,omitempty"` + + // A virtual server object associated with the upgrade request if any. + VirtualGuest *Virtual_Guest `json:"virtualGuest,omitempty" xmlrpc:"virtualGuest,omitempty"` +} + +// The SoftLayer_Product_Upgrade_Request_Status data type contains detailed information relating to an hardware or software upgrade request. +type Product_Upgrade_Request_Status struct { + Entity + + // The detailed description of an upgrade request status. + Description *string `json:"description,omitempty" xmlrpc:"description,omitempty"` + + // An internal identifier of an upgrade request status. + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // The name of an upgrade request status. + Name *string `json:"name,omitempty" xmlrpc:"name,omitempty"` + + // The status code of an upgrade request status. + StatusCode *string `json:"statusCode,omitempty" xmlrpc:"statusCode,omitempty"` +} diff --git a/vendor/github.com/softlayer/softlayer-go/datatypes/provisioning.go b/vendor/github.com/softlayer/softlayer-go/datatypes/provisioning.go new file mode 100644 index 0000000000..e79fac2291 --- /dev/null +++ b/vendor/github.com/softlayer/softlayer-go/datatypes/provisioning.go @@ -0,0 +1,303 @@ +/** + * Copyright 2016 IBM Corp. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** + * AUTOMATICALLY GENERATED CODE - DO NOT MODIFY + */ + +package datatypes + +// The SoftLayer_Provisioning_Hook contains all the information needed to add a hook into a server/Virtual provision and os reload. +type Provisioning_Hook struct { + Entity + + // no documentation yet + Account *Account `json:"account,omitempty" xmlrpc:"account,omitempty"` + + // The ID of the account the script belongs to. + AccountId *int `json:"accountId,omitempty" xmlrpc:"accountId,omitempty"` + + // no documentation yet + CreateDate *Time `json:"createDate,omitempty" xmlrpc:"createDate,omitempty"` + + // no documentation yet + HookType *Provisioning_Hook_Type `json:"hookType,omitempty" xmlrpc:"hookType,omitempty"` + + // no documentation yet + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // no documentation yet + ModifyDate *Time `json:"modifyDate,omitempty" xmlrpc:"modifyDate,omitempty"` + + // The name of the hook. + Name *string `json:"name,omitempty" xmlrpc:"name,omitempty"` + + // The ID of the type of hook the script is identified as. Currently only CUSTOMER_PROVIDED_HOOK has been implemented. + TypeId *int `json:"typeId,omitempty" xmlrpc:"typeId,omitempty"` + + // The endpoint that the script will be downloaded from (USERNAME AND PASSWORD SHOULD BE INCLUDED HERE). If the endpoint is HTTP, the script will only be downloaded. If the endpoint is HTTPS, the script will be downloaded and executed. + Uri *string `json:"uri,omitempty" xmlrpc:"uri,omitempty"` +} + +// no documentation yet +type Provisioning_Hook_Type struct { + Entity + + // no documentation yet + Description *string `json:"description,omitempty" xmlrpc:"description,omitempty"` + + // no documentation yet + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // no documentation yet + KeyName *string `json:"keyName,omitempty" xmlrpc:"keyName,omitempty"` + + // no documentation yet + Name *string `json:"name,omitempty" xmlrpc:"name,omitempty"` +} + +// The SoftLayer_Provisioning_Maintenance_Classification represent a maintenance type for the specific hardware maintenance desired. +type Provisioning_Maintenance_Classification struct { + Entity + + // The id of the maintenance classification. + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // no documentation yet + ItemCategories []Provisioning_Maintenance_Classification_Item_Category `json:"itemCategories,omitempty" xmlrpc:"itemCategories,omitempty"` + + // A count of + ItemCategoryCount *uint `json:"itemCategoryCount,omitempty" xmlrpc:"itemCategoryCount,omitempty"` + + // The number of slots required for the maintenance classification. + Slots *int `json:"slots,omitempty" xmlrpc:"slots,omitempty"` + + // The type or name of the maintenance classification. + Type *string `json:"type,omitempty" xmlrpc:"type,omitempty"` +} + +// no documentation yet +type Provisioning_Maintenance_Classification_Item_Category struct { + Entity + + // no documentation yet + ItemCategoryId *int `json:"itemCategoryId,omitempty" xmlrpc:"itemCategoryId,omitempty"` + + // no documentation yet + MaintenanceClassification *Provisioning_Maintenance_Classification `json:"maintenanceClassification,omitempty" xmlrpc:"maintenanceClassification,omitempty"` + + // no documentation yet + MaintenanceClassificationId *int `json:"maintenanceClassificationId,omitempty" xmlrpc:"maintenanceClassificationId,omitempty"` +} + +// The SoftLayer_Provisioning_Maintenance_Slots represent the available slots for a given maintenance window at a SoftLayer data center. +type Provisioning_Maintenance_Slots struct { + Entity + + // The available slots for a maintenance window. + AvailableSlots *int `json:"availableSlots,omitempty" xmlrpc:"availableSlots,omitempty"` +} + +// no documentation yet +type Provisioning_Maintenance_Ticket struct { + Entity + + // no documentation yet + AvailableSlots *Provisioning_Maintenance_Slots `json:"availableSlots,omitempty" xmlrpc:"availableSlots,omitempty"` + + // no documentation yet + MaintClassId *int `json:"maintClassId,omitempty" xmlrpc:"maintClassId,omitempty"` + + // no documentation yet + MaintWindowId *int `json:"maintWindowId,omitempty" xmlrpc:"maintWindowId,omitempty"` + + // no documentation yet + MaintenanceClass *Provisioning_Maintenance_Classification `json:"maintenanceClass,omitempty" xmlrpc:"maintenanceClass,omitempty"` + + // no documentation yet + MaintenanceDate *Time `json:"maintenanceDate,omitempty" xmlrpc:"maintenanceDate,omitempty"` + + // no documentation yet + Ticket *Ticket `json:"ticket,omitempty" xmlrpc:"ticket,omitempty"` + + // no documentation yet + TicketId *int `json:"ticketId,omitempty" xmlrpc:"ticketId,omitempty"` +} + +// The SoftLayer_Provisioning_Maintenance_Window represent a time window that SoftLayer performs a hardware or software maintenance and upgrades. +type Provisioning_Maintenance_Window struct { + Entity + + // The date and time a maintenance window is scheduled to begin. + BeginDate *Time `json:"beginDate,omitempty" xmlrpc:"beginDate,omitempty"` + + // An ISO-8601 numeric representation of the day of the week that a maintenance window is performed. 1: Monday, 7: Sunday + DayOfWeek *int `json:"dayOfWeek,omitempty" xmlrpc:"dayOfWeek,omitempty"` + + // The date and time a maintenance window is scheduled to end. + EndDate *Time `json:"endDate,omitempty" xmlrpc:"endDate,omitempty"` + + // Id of the maintenance window + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // An internal identifier of the location (data center) record that a maintenance window takes place in. + LocationId *int `json:"locationId,omitempty" xmlrpc:"locationId,omitempty"` + + // An internal identifier of the datacenter timezone. + PortalTzId *int `json:"portalTzId,omitempty" xmlrpc:"portalTzId,omitempty"` +} + +// The SoftLayer_Provisioning_Version1_Transaction data type contains general information relating to a single SoftLayer hardware transaction. +// +// SoftLayer customers are unable to change their hardware transactions. +type Provisioning_Version1_Transaction struct { + Entity + + // The account that a transaction belongs to. + Account *Account `json:"account,omitempty" xmlrpc:"account,omitempty"` + + // The date a transaction was created. + CreateDate *Time `json:"createDate,omitempty" xmlrpc:"createDate,omitempty"` + + // The amount of seconds that have elapsed since the transaction was last modified. + ElapsedSeconds *int `json:"elapsedSeconds,omitempty" xmlrpc:"elapsedSeconds,omitempty"` + + // The guest record for this transaction. + Guest *Virtual_Guest `json:"guest,omitempty" xmlrpc:"guest,omitempty"` + + // A transaction's associated guest identification number. + GuestId *int `json:"guestId,omitempty" xmlrpc:"guestId,omitempty"` + + // The hardware object for this transaction. + Hardware *Hardware `json:"hardware,omitempty" xmlrpc:"hardware,omitempty"` + + // A transaction's associated hardware identification number. + HardwareId *int `json:"hardwareId,omitempty" xmlrpc:"hardwareId,omitempty"` + + // A transaction's identifying number. + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // no documentation yet + Loopback []Provisioning_Version1_Transaction `json:"loopback,omitempty" xmlrpc:"loopback,omitempty"` + + // A count of + LoopbackCount *uint `json:"loopbackCount,omitempty" xmlrpc:"loopbackCount,omitempty"` + + // The date a transaction was last modified. + ModifyDate *Time `json:"modifyDate,omitempty" xmlrpc:"modifyDate,omitempty"` + + // A count of + PendingTransactionCount *uint `json:"pendingTransactionCount,omitempty" xmlrpc:"pendingTransactionCount,omitempty"` + + // no documentation yet + PendingTransactions []Provisioning_Version1_Transaction `json:"pendingTransactions,omitempty" xmlrpc:"pendingTransactions,omitempty"` + + // The date the transaction status was last modified. + StatusChangeDate *Time `json:"statusChangeDate,omitempty" xmlrpc:"statusChangeDate,omitempty"` + + // no documentation yet + TicketScheduledActionReference []Ticket_Attachment `json:"ticketScheduledActionReference,omitempty" xmlrpc:"ticketScheduledActionReference,omitempty"` + + // A count of + TicketScheduledActionReferenceCount *uint `json:"ticketScheduledActionReferenceCount,omitempty" xmlrpc:"ticketScheduledActionReferenceCount,omitempty"` + + // A transaction's group. This group object determines what type of service is being done on the hardware. + TransactionGroup *Provisioning_Version1_Transaction_Group `json:"transactionGroup,omitempty" xmlrpc:"transactionGroup,omitempty"` + + // A transaction's status. This status object determines the state it is in the transaction group. + TransactionStatus *Provisioning_Version1_Transaction_Status `json:"transactionStatus,omitempty" xmlrpc:"transactionStatus,omitempty"` +} + +// The SoftLayer_Provisioning_Version1_Transaction_Group data type contains general information relating to a single SoftLayer hardware transaction group. +// +// SoftLayer customers are unable to change their hardware transactions or the hardware transaction group. +type Provisioning_Version1_Transaction_Group struct { + Entity + + // Average time, in minutes, for this type of transaction to complete. Please note that this is only an estimate. + AverageTimeToComplete *Float64 `json:"averageTimeToComplete,omitempty" xmlrpc:"averageTimeToComplete,omitempty"` + + // A transaction group's name. + Name *string `json:"name,omitempty" xmlrpc:"name,omitempty"` +} + +// no documentation yet +type Provisioning_Version1_Transaction_History struct { + Entity + + // The finish date of a transaction history record. + FinishDate *Time `json:"finishDate,omitempty" xmlrpc:"finishDate,omitempty"` + + // The guest from where transaction history originates. + Guest *Virtual_Guest `json:"guest,omitempty" xmlrpc:"guest,omitempty"` + + // The guest ID associated with a transaction history. + GuestId *int `json:"guestId,omitempty" xmlrpc:"guestId,omitempty"` + + // The hardware from where transaction history originates. + Hardware *Hardware `json:"hardware,omitempty" xmlrpc:"hardware,omitempty"` + + // The hardware ID associated with a transaction history. + HardwareId *int `json:"hardwareId,omitempty" xmlrpc:"hardwareId,omitempty"` + + // The host ID associated with a transaction history. + HostId *int `json:"hostId,omitempty" xmlrpc:"hostId,omitempty"` + + // The ID associated with a transaction history. + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // The start date of a transaction history record. + StartDate *Time `json:"startDate,omitempty" xmlrpc:"startDate,omitempty"` + + // The transaction from where transaction history originates. + Transaction *Provisioning_Version1_Transaction `json:"transaction,omitempty" xmlrpc:"transaction,omitempty"` + + // The transaction ID associated with a transaction history. + TransactionId *int `json:"transactionId,omitempty" xmlrpc:"transactionId,omitempty"` + + // The transaction status of a transaction history. + TransactionStatus *Provisioning_Version1_Transaction_Status `json:"transactionStatus,omitempty" xmlrpc:"transactionStatus,omitempty"` + + // The transaction status ID associated with a transaction history. + TransactionStatusId *int `json:"transactionStatusId,omitempty" xmlrpc:"transactionStatusId,omitempty"` +} + +// The SoftLayer_Provisioning_Version1_Transaction_Status data type contains general information relating to a single SoftLayer hardware transaction status. +// +// SoftLayer customers are unable to change their hardware transaction status. +type Provisioning_Version1_Transaction_Status struct { + Entity + + // Hardware transaction status average duration. + AverageDuration *Float64 `json:"averageDuration,omitempty" xmlrpc:"averageDuration,omitempty"` + + // Transaction status friendly name. + FriendlyName *string `json:"friendlyName,omitempty" xmlrpc:"friendlyName,omitempty"` + + // Transaction status name. + Name *string `json:"name,omitempty" xmlrpc:"name,omitempty"` + + // A count of + NonCompletedTransactionCount *uint `json:"nonCompletedTransactionCount,omitempty" xmlrpc:"nonCompletedTransactionCount,omitempty"` + + // no documentation yet + NonCompletedTransactions []Provisioning_Version1_Transaction `json:"nonCompletedTransactions,omitempty" xmlrpc:"nonCompletedTransactions,omitempty"` +} + +// no documentation yet +type Provisioning_Version1_Transaction_SubnetMigration struct { + Provisioning_Version1_Transaction +} diff --git a/vendor/github.com/softlayer/softlayer-go/datatypes/resource.go b/vendor/github.com/softlayer/softlayer-go/datatypes/resource.go new file mode 100644 index 0000000000..f2a293ad26 --- /dev/null +++ b/vendor/github.com/softlayer/softlayer-go/datatypes/resource.go @@ -0,0 +1,413 @@ +/** + * Copyright 2016 IBM Corp. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** + * AUTOMATICALLY GENERATED CODE - DO NOT MODIFY + */ + +package datatypes + +// no documentation yet +type Resource_Configuration struct { + Entity +} + +// no documentation yet +type Resource_Group struct { + Entity + + // A count of a resource group's associated group ancestors. + AncestorGroupCount *uint `json:"ancestorGroupCount,omitempty" xmlrpc:"ancestorGroupCount,omitempty"` + + // A resource group's associated group ancestors. + AncestorGroups []Resource_Group `json:"ancestorGroups,omitempty" xmlrpc:"ancestorGroups,omitempty"` + + // A count of a resource group's associated attributes. + AttributeCount *uint `json:"attributeCount,omitempty" xmlrpc:"attributeCount,omitempty"` + + // A resource group's associated attributes. + Attributes []Resource_Group_Attribute `json:"attributes,omitempty" xmlrpc:"attributes,omitempty"` + + // A resource group's creation date. + CreateDate *Time `json:"createDate,omitempty" xmlrpc:"createDate,omitempty"` + + // A resource group's description. + Description *string `json:"description,omitempty" xmlrpc:"description,omitempty"` + + // A count of a resource group's associated hardware members. + HardwareMemberCount *uint `json:"hardwareMemberCount,omitempty" xmlrpc:"hardwareMemberCount,omitempty"` + + // A resource group's associated hardware members. + HardwareMembers []Resource_Group_Member `json:"hardwareMembers,omitempty" xmlrpc:"hardwareMembers,omitempty"` + + // A resource group's ID. + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // A resource group's keyname. + KeyName *string `json:"keyName,omitempty" xmlrpc:"keyName,omitempty"` + + // A count of a resource group's associated members. + MemberCount *uint `json:"memberCount,omitempty" xmlrpc:"memberCount,omitempty"` + + // A resource group's associated members. + Members []Resource_Group_Member `json:"members,omitempty" xmlrpc:"members,omitempty"` + + // A resource group's name. + Name *string `json:"name,omitempty" xmlrpc:"name,omitempty"` + + // A resource group's associated root resource group. + RootResourceGroup *Resource_Group `json:"rootResourceGroup,omitempty" xmlrpc:"rootResourceGroup,omitempty"` + + // no documentation yet + RootResourceGroupId *int `json:"rootResourceGroupId,omitempty" xmlrpc:"rootResourceGroupId,omitempty"` + + // A count of a resource group's associated subnet members. + SubnetMemberCount *uint `json:"subnetMemberCount,omitempty" xmlrpc:"subnetMemberCount,omitempty"` + + // A resource group's associated subnet members. + SubnetMembers []Resource_Group_Member `json:"subnetMembers,omitempty" xmlrpc:"subnetMembers,omitempty"` + + // A resource group's associated template. + Template *Resource_Group_Template `json:"template,omitempty" xmlrpc:"template,omitempty"` + + // A resource group's template ID. + TemplateId *int `json:"templateId,omitempty" xmlrpc:"templateId,omitempty"` + + // A count of a resource group's associated VLAN members. + VlanMemberCount *uint `json:"vlanMemberCount,omitempty" xmlrpc:"vlanMemberCount,omitempty"` + + // A resource group's associated VLAN members. + VlanMembers []Resource_Group_Member `json:"vlanMembers,omitempty" xmlrpc:"vlanMembers,omitempty"` +} + +// no documentation yet +type Resource_Group_Attribute struct { + Entity + + // A resource group attribute's creation date. + CreateDate *Time `json:"createDate,omitempty" xmlrpc:"createDate,omitempty"` + + // A resource group attribute's resource group. + Group *Resource_Group `json:"group,omitempty" xmlrpc:"group,omitempty"` + + // A resource group attribute's ID. + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // A resource group attribute's type. + Type *Resource_Group_Attribute_Type `json:"type,omitempty" xmlrpc:"type,omitempty"` + + // A resource group attribute's value. + Value *string `json:"value,omitempty" xmlrpc:"value,omitempty"` +} + +// no documentation yet +type Resource_Group_Attribute_Type struct { + Entity + + // no documentation yet + Description *string `json:"description,omitempty" xmlrpc:"description,omitempty"` + + // no documentation yet + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // no documentation yet + KeyName *string `json:"keyName,omitempty" xmlrpc:"keyName,omitempty"` + + // no documentation yet + Name *string `json:"name,omitempty" xmlrpc:"name,omitempty"` +} + +// The SoftLayer_Resource_Group_Descendant_Reference data type simplifies the link between one SoftLayer_Resource_Group_Member object and all of its parents. +// +// +type Resource_Group_Descendant_Reference struct { + Entity + + // no documentation yet + Group *Resource_Group `json:"group,omitempty" xmlrpc:"group,omitempty"` + + // no documentation yet + GroupMember *Resource_Group_Member `json:"groupMember,omitempty" xmlrpc:"groupMember,omitempty"` +} + +// no documentation yet +type Resource_Group_Member struct { + Entity + + // A count of a resource group member's associated attributes. + AttributeCount *uint `json:"attributeCount,omitempty" xmlrpc:"attributeCount,omitempty"` + + // A resource group member's associated attributes. + Attributes []Resource_Group_Member_Attribute `json:"attributes,omitempty" xmlrpc:"attributes,omitempty"` + + // A resource group member's creation date. + CreateDate *Time `json:"createDate,omitempty" xmlrpc:"createDate,omitempty"` + + // A count of a resource group member's associated member descendants. + DescendantMemberCount *uint `json:"descendantMemberCount,omitempty" xmlrpc:"descendantMemberCount,omitempty"` + + // A resource group member's associated member descendants. + DescendantMembers []Resource_Group_Member `json:"descendantMembers,omitempty" xmlrpc:"descendantMembers,omitempty"` + + // A resource group member's resource group. + Group *Resource_Group `json:"group,omitempty" xmlrpc:"group,omitempty"` + + // A resource group member's ID. + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // A count of a resource group member's associated roles. + RoleCount *uint `json:"roleCount,omitempty" xmlrpc:"roleCount,omitempty"` + + // A resource group member's associated roles. + Roles []Resource_Group_Role `json:"roles,omitempty" xmlrpc:"roles,omitempty"` + + // A resource group member's status. + Status *string `json:"status,omitempty" xmlrpc:"status,omitempty"` + + // A resource group member's type. + Type *Resource_Group_Member_Type `json:"type,omitempty" xmlrpc:"type,omitempty"` +} + +// no documentation yet +type Resource_Group_Member_Attribute struct { + Entity + + // A resource group member attribute's creation date. + CreateDate *Time `json:"createDate,omitempty" xmlrpc:"createDate,omitempty"` + + // A resource group member attribute's ID. + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // A resource group member attribute's resource group member. + Member *Resource_Group_Member `json:"member,omitempty" xmlrpc:"member,omitempty"` + + // A resource group member attribute's type. + Type *Resource_Group_Member_Attribute_Type `json:"type,omitempty" xmlrpc:"type,omitempty"` + + // A resource group member attribute's value. + Value *string `json:"value,omitempty" xmlrpc:"value,omitempty"` +} + +// no documentation yet +type Resource_Group_Member_Attribute_Type struct { + Entity + + // no documentation yet + Description *string `json:"description,omitempty" xmlrpc:"description,omitempty"` + + // no documentation yet + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // no documentation yet + KeyName *string `json:"keyName,omitempty" xmlrpc:"keyName,omitempty"` + + // no documentation yet + Name *string `json:"name,omitempty" xmlrpc:"name,omitempty"` +} + +// no documentation yet +type Resource_Group_Member_CloudStack_Version3_Cluster struct { + Resource_Group_Member + + // A resource group member's associated cluster. + Resource *Resource_Group `json:"resource,omitempty" xmlrpc:"resource,omitempty"` +} + +// no documentation yet +type Resource_Group_Member_CloudStack_Version3_Pod struct { + Resource_Group_Member + + // A resource group member's associated pod. + Resource *Resource_Group `json:"resource,omitempty" xmlrpc:"resource,omitempty"` +} + +// no documentation yet +type Resource_Group_Member_CloudStack_Version3_Zone struct { + Resource_Group_Member + + // A resource group member's associated zone. + Resource *Resource_Group `json:"resource,omitempty" xmlrpc:"resource,omitempty"` +} + +// no documentation yet +type Resource_Group_Member_Hardware struct { + Resource_Group_Member + + // A resource group member's associated hardware. + Resource *Hardware `json:"resource,omitempty" xmlrpc:"resource,omitempty"` + + // A resource group hardware member's associated server arbiter-only state. + ServerArbiterOnly *Resource_Group_Member_Attribute `json:"serverArbiterOnly,omitempty" xmlrpc:"serverArbiterOnly,omitempty"` + + // A resource group hardware member's associated server hidden state. + ServerHidden *Resource_Group_Member_Attribute `json:"serverHidden,omitempty" xmlrpc:"serverHidden,omitempty"` + + // A resource group hardware member's associated server priority. + ServerPriority *Resource_Group_Member_Attribute `json:"serverPriority,omitempty" xmlrpc:"serverPriority,omitempty"` + + // A resource group hardware member's associated server slave delay (in seconds). + ServerSlaveDelay *Resource_Group_Member_Attribute `json:"serverSlaveDelay,omitempty" xmlrpc:"serverSlaveDelay,omitempty"` + + // A resource group hardware member's associated server tags (in JSON format). + ServerTags *Resource_Group_Member_Attribute `json:"serverTags,omitempty" xmlrpc:"serverTags,omitempty"` + + // A resource group hardware member's associated server vote count. + ServerVotes *Resource_Group_Member_Attribute `json:"serverVotes,omitempty" xmlrpc:"serverVotes,omitempty"` +} + +// no documentation yet +type Resource_Group_Member_Network_Storage struct { + Resource_Group_Member + + // A resource group member's associated network storage. + Resource *Network_Storage `json:"resource,omitempty" xmlrpc:"resource,omitempty"` +} + +// no documentation yet +type Resource_Group_Member_Network_Subnet struct { + Resource_Group_Member + + // A resource group member's associated network subnet. + Resource *Network_Subnet `json:"resource,omitempty" xmlrpc:"resource,omitempty"` +} + +// no documentation yet +type Resource_Group_Member_Network_Vlan struct { + Resource_Group_Member + + // A resource group member's associated network VLAN. + Resource *Network_Vlan `json:"resource,omitempty" xmlrpc:"resource,omitempty"` +} + +// no documentation yet +type Resource_Group_Member_Resource_Group struct { + Resource_Group_Member + + // A resource group member's associated resource group. + Resource *Resource_Group `json:"resource,omitempty" xmlrpc:"resource,omitempty"` +} + +// no documentation yet +type Resource_Group_Member_Role_Link struct { + Entity + + // A resource group member's ID. + GroupMemberId *int `json:"groupMemberId,omitempty" xmlrpc:"groupMemberId,omitempty"` + + // A resource group's template role ID. + GroupTemplateRoleId *int `json:"groupTemplateRoleId,omitempty" xmlrpc:"groupTemplateRoleId,omitempty"` +} + +// no documentation yet +type Resource_Group_Member_Software_Component_Password struct { + Resource_Group_Member + + // A resource group member's associated software component password. + Resource *Software_Component_Password `json:"resource,omitempty" xmlrpc:"resource,omitempty"` +} + +// no documentation yet +type Resource_Group_Member_Type struct { + Entity + + // A resource group member's type description. + Description *string `json:"description,omitempty" xmlrpc:"description,omitempty"` + + // A resource group member's type keyname. + KeyName *string `json:"keyName,omitempty" xmlrpc:"keyName,omitempty"` +} + +// no documentation yet +type Resource_Group_Member_Virtual_Host_Pool struct { + Resource_Group_Member +} + +// no documentation yet +type Resource_Group_Role struct { + Entity + + // A resource group role's description. + Description *string `json:"description,omitempty" xmlrpc:"description,omitempty"` + + // A resource group role's ID. + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // A resource group role's keyname. + KeyName *string `json:"keyName,omitempty" xmlrpc:"keyName,omitempty"` + + // A count of a resource group's role. + MemberLinkCount *uint `json:"memberLinkCount,omitempty" xmlrpc:"memberLinkCount,omitempty"` + + // A resource group's role. + MemberLinks []Resource_Group_Member_Role_Link `json:"memberLinks,omitempty" xmlrpc:"memberLinks,omitempty"` +} + +// no documentation yet +type Resource_Group_Template struct { + Entity + + // no documentation yet + Children []Resource_Group_Template `json:"children,omitempty" xmlrpc:"children,omitempty"` + + // A count of + ChildrenCount *uint `json:"childrenCount,omitempty" xmlrpc:"childrenCount,omitempty"` + + // A resource group template's description. + Description *string `json:"description,omitempty" xmlrpc:"description,omitempty"` + + // no documentation yet + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // A resource group template's keyname. + KeyName *string `json:"keyName,omitempty" xmlrpc:"keyName,omitempty"` + + // A count of + MemberCount *uint `json:"memberCount,omitempty" xmlrpc:"memberCount,omitempty"` + + // no documentation yet + Members []Resource_Group_Template_Member `json:"members,omitempty" xmlrpc:"members,omitempty"` + + // no documentation yet + Package *Product_Package `json:"package,omitempty" xmlrpc:"package,omitempty"` +} + +// no documentation yet +type Resource_Group_Template_Member struct { + Entity + + // no documentation yet + MaxQuantity *int `json:"maxQuantity,omitempty" xmlrpc:"maxQuantity,omitempty"` + + // no documentation yet + MinQuantity *int `json:"minQuantity,omitempty" xmlrpc:"minQuantity,omitempty"` + + // no documentation yet + Role *Resource_Group_Role `json:"role,omitempty" xmlrpc:"role,omitempty"` + + // no documentation yet + RoleId *int `json:"roleId,omitempty" xmlrpc:"roleId,omitempty"` + + // no documentation yet + Template *Resource_Group_Template `json:"template,omitempty" xmlrpc:"template,omitempty"` + + // no documentation yet + TemplateId *int `json:"templateId,omitempty" xmlrpc:"templateId,omitempty"` +} + +// no documentation yet +type Resource_Metadata struct { + Entity +} diff --git a/vendor/github.com/softlayer/softlayer-go/datatypes/sales.go b/vendor/github.com/softlayer/softlayer-go/datatypes/sales.go new file mode 100644 index 0000000000..bf667aab49 --- /dev/null +++ b/vendor/github.com/softlayer/softlayer-go/datatypes/sales.go @@ -0,0 +1,62 @@ +/** + * Copyright 2016 IBM Corp. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** + * AUTOMATICALLY GENERATED CODE - DO NOT MODIFY + */ + +package datatypes + +// The presale event data types indicate the information regarding an individual presale event. The '''locationId''' will indicate the datacenter associated with the presale event. The '''itemId''' will indicate the product item associated with a particular presale event - however these are more rare. The '''startDate''' and '''endDate''' will provide information regarding when the presale event is available for use. At the end of the presale event, the server or services purchased will be available once approved and provisioned. +type Sales_Presale_Event struct { + Entity + + // A flag to indicate that the presale event is currently active. A presale event is active if the current time is between the start and end dates. + ActiveFlag *bool `json:"activeFlag,omitempty" xmlrpc:"activeFlag,omitempty"` + + // Description of the presale event. + Description *string `json:"description,omitempty" xmlrpc:"description,omitempty"` + + // End date of the presale event. Orders can be approved and provisioned after this date. + EndDate *Time `json:"endDate,omitempty" xmlrpc:"endDate,omitempty"` + + // A flag to indicate that the presale event is expired. A presale event is expired if the current time is after the end date. + ExpiredFlag *bool `json:"expiredFlag,omitempty" xmlrpc:"expiredFlag,omitempty"` + + // Presale event unique identifier. + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // The [[SoftLayer_Product_Item]] associated with the presale event. + Item *Product_Item `json:"item,omitempty" xmlrpc:"item,omitempty"` + + // [[SoftLayer_Product_Item]] id associated with the presale event. + ItemId *int `json:"itemId,omitempty" xmlrpc:"itemId,omitempty"` + + // The [[SoftLayer_Location]] associated with the presale event. + Location *Location `json:"location,omitempty" xmlrpc:"location,omitempty"` + + // [[SoftLayer_Location]] id for the presale event. + LocationId *int `json:"locationId,omitempty" xmlrpc:"locationId,omitempty"` + + // A count of the orders ([[SoftLayer_Billing_Order]]) associated with this presale event that were created for the customer's account. + OrderCount *uint `json:"orderCount,omitempty" xmlrpc:"orderCount,omitempty"` + + // The orders ([[SoftLayer_Billing_Order]]) associated with this presale event that were created for the customer's account. + Orders []Billing_Order `json:"orders,omitempty" xmlrpc:"orders,omitempty"` + + // Start date of the presale event. Orders cannot be approved before this date. + StartDate *Time `json:"startDate,omitempty" xmlrpc:"startDate,omitempty"` +} diff --git a/vendor/github.com/softlayer/softlayer-go/datatypes/scale.go b/vendor/github.com/softlayer/softlayer-go/datatypes/scale.go new file mode 100644 index 0000000000..4ecc2a0f1d --- /dev/null +++ b/vendor/github.com/softlayer/softlayer-go/datatypes/scale.go @@ -0,0 +1,559 @@ +/** + * Copyright 2016 IBM Corp. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** + * AUTOMATICALLY GENERATED CODE - DO NOT MODIFY + */ + +package datatypes + +// no documentation yet +type Scale_Asset struct { + Entity + + // When this asset was created. + CreateDate *Time `json:"createDate,omitempty" xmlrpc:"createDate,omitempty"` + + // When set and true any edit that happens on this object, be it calling edit on this directly or setting as a child while editing a parent object, will end up being a deletion. + DeleteFlag *bool `json:"deleteFlag,omitempty" xmlrpc:"deleteFlag,omitempty"` + + // An asset's internal identifier. + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // The group this asset belongs to. + ScaleGroup *Scale_Group `json:"scaleGroup,omitempty" xmlrpc:"scaleGroup,omitempty"` + + // The identifier of the group this asset belongs to. + ScaleGroupId *int `json:"scaleGroupId,omitempty" xmlrpc:"scaleGroupId,omitempty"` +} + +// no documentation yet +type Scale_Asset_Hardware struct { + Scale_Asset + + // The hardware for this asset. + Hardware *Hardware `json:"hardware,omitempty" xmlrpc:"hardware,omitempty"` + + // The identifier of the hardware for this asset. + HardwareId *int `json:"hardwareId,omitempty" xmlrpc:"hardwareId,omitempty"` +} + +// no documentation yet +type Scale_Asset_Virtual_Guest struct { + Scale_Asset + + // The guest for this asset. + VirtualGuest *Virtual_Guest `json:"virtualGuest,omitempty" xmlrpc:"virtualGuest,omitempty"` + + // The identifier of the guest for this asset. + VirtualGuestId *int `json:"virtualGuestId,omitempty" xmlrpc:"virtualGuestId,omitempty"` +} + +// no documentation yet +type Scale_Group struct { + Entity + + // The account for this scaling group. + Account *Account `json:"account,omitempty" xmlrpc:"account,omitempty"` + + // The identifier of the account assigned to this group. + AccountId *int `json:"accountId,omitempty" xmlrpc:"accountId,omitempty"` + + // If this is true, this group will scale down members in a way to preserve the balance across VLANs. If there is ambiguity about which member to use to maintain balance, the terminationPolicy is used to resolve it. This is false by default and can only be set to true if there are multiple VLANs that are being balanced across. + BalancedTerminationFlag *bool `json:"balancedTerminationFlag,omitempty" xmlrpc:"balancedTerminationFlag,omitempty"` + + // The number of seconds this group will wait after lastActionDate before performing another action. Be advised, this can be overridden per policy. While strongly discouraged, a value of 0 effectively disables cooldown. + Cooldown *int `json:"cooldown,omitempty" xmlrpc:"cooldown,omitempty"` + + // When this group was created. + CreateDate *Time `json:"createDate,omitempty" xmlrpc:"createDate,omitempty"` + + // This value is only available on the template for creating and editing a group. It will be null when retrieved. When this value is provided on create or edit, guests will be scaled up or down to meet this number. This number must be in the range provided by minimumMemberCount and maximumMemberCount. This value can only be present during create or edit when this group is active. Note, guests that are created as a result of this value can possibly be removed after cooldown by a policy. + DesiredMemberCount *int `json:"desiredMemberCount,omitempty" xmlrpc:"desiredMemberCount,omitempty"` + + // A group's internal identifier. + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // The date of the last action on this group or its create date + LastActionDate *Time `json:"lastActionDate,omitempty" xmlrpc:"lastActionDate,omitempty"` + + // A count of collection of load balancers for this auto scale group. + LoadBalancerCount *uint `json:"loadBalancerCount,omitempty" xmlrpc:"loadBalancerCount,omitempty"` + + // Collection of load balancers for this auto scale group. + LoadBalancers []Scale_LoadBalancer `json:"loadBalancers,omitempty" xmlrpc:"loadBalancers,omitempty"` + + // A count of collection of log entries for this group. + LogCount *uint `json:"logCount,omitempty" xmlrpc:"logCount,omitempty"` + + // Collection of log entries for this group. + Logs []Scale_Group_Log `json:"logs,omitempty" xmlrpc:"logs,omitempty"` + + // The greatest number of virtual guest members that are allowed on this group. Any attempts to add a guest member will fail if it will result in the total guest member count of this group to be above this number. If this number is edited and is less than the current guest member count, guests will be removed to at least be no greater than this number. + MaximumMemberCount *int `json:"maximumMemberCount,omitempty" xmlrpc:"maximumMemberCount,omitempty"` + + // The fewest number of virtual guest members that are allowed on this group. Any attempts to remove a guest member will fail if it will result in the total guest member count of this group to be below this number. If this number is edited and is larger than the current guest member count, guests will be added to at least reach this number. + MinimumMemberCount *int `json:"minimumMemberCount,omitempty" xmlrpc:"minimumMemberCount,omitempty"` + + // When this group was last modified. + ModifyDate *Time `json:"modifyDate,omitempty" xmlrpc:"modifyDate,omitempty"` + + // The name of this scale group. It must be unique on the account. + Name *string `json:"name,omitempty" xmlrpc:"name,omitempty"` + + // A count of collection of VLANs for this auto scale group. VLANs are optional. This can contain a public or private VLAN or both. When a single VLAN for a public/private type is given it can be a non-purchased VLAN only if the minimumMemberCount on the group is >= 1. This can also contain any number of public/private purchased VLANs and members are staggered across them when scaled up. + NetworkVlanCount *uint `json:"networkVlanCount,omitempty" xmlrpc:"networkVlanCount,omitempty"` + + // Collection of VLANs for this auto scale group. VLANs are optional. This can contain a public or private VLAN or both. When a single VLAN for a public/private type is given it can be a non-purchased VLAN only if the minimumMemberCount on the group is >= 1. This can also contain any number of public/private purchased VLANs and members are staggered across them when scaled up. + NetworkVlans []Scale_Network_Vlan `json:"networkVlans,omitempty" xmlrpc:"networkVlans,omitempty"` + + // Collection of policies for this group. This can be empty. + Policies []Scale_Policy `json:"policies,omitempty" xmlrpc:"policies,omitempty"` + + // A count of collection of policies for this group. This can be empty. + PolicyCount *uint `json:"policyCount,omitempty" xmlrpc:"policyCount,omitempty"` + + // The regional group for this scale group. + RegionalGroup *Location_Group_Regional `json:"regionalGroup,omitempty" xmlrpc:"regionalGroup,omitempty"` + + // The identifier of the regional group this scaling group is assigned to. + RegionalGroupId *int `json:"regionalGroupId,omitempty" xmlrpc:"regionalGroupId,omitempty"` + + // The status for this scale group. + Status *Scale_Group_Status `json:"status,omitempty" xmlrpc:"status,omitempty"` + + // If true, this group is suspended. + SuspendedFlag *bool `json:"suspendedFlag,omitempty" xmlrpc:"suspendedFlag,omitempty"` + + // The termination policy for this scaling group. + TerminationPolicy *Scale_Termination_Policy `json:"terminationPolicy,omitempty" xmlrpc:"terminationPolicy,omitempty"` + + // The termination policy for the group. This determines which member to choose to delete when scaling downwards. + TerminationPolicyId *int `json:"terminationPolicyId,omitempty" xmlrpc:"terminationPolicyId,omitempty"` + + // A count of collection of guests that have been pinned to this group. Guest assets are only used for certain trigger checks such as resource watches. They do not count towards the auto scaling guest counts of this group in anyway and are never automatically added or removed. + VirtualGuestAssetCount *uint `json:"virtualGuestAssetCount,omitempty" xmlrpc:"virtualGuestAssetCount,omitempty"` + + // Collection of guests that have been pinned to this group. Guest assets are only used for certain trigger checks such as resource watches. They do not count towards the auto scaling guest counts of this group in anyway and are never automatically added or removed. + VirtualGuestAssets []Scale_Asset_Virtual_Guest `json:"virtualGuestAssets,omitempty" xmlrpc:"virtualGuestAssets,omitempty"` + + // A count of collection of guests that have been scaled with the group. When this group is active, the count of guests here is guaranteed to be between minimumMemberCount and maximumMemberCount inclusively. + VirtualGuestMemberCount *uint `json:"virtualGuestMemberCount,omitempty" xmlrpc:"virtualGuestMemberCount,omitempty"` + + // This is the template to create guest members with. This is the same template accepted by the createObject call on SoftLayer_Virtual_Guest with some caveats. The hostname provided will have an arbitrary value appended to it for each guest created. Also, hourlyBillingFlag cannot be false, and if the datacenter is provided it must be in the region of this group. Finally, VLANs cannot be provided for the template, it will use VLANs provided to this group instead. + // + // Note, if this template is edited on an existing group the previous template values are not kept and are not considered during termination. This means a group's guest members could effectively be a hybrid of multiple templates because this value was changed after some guest members were created but before others were created. + VirtualGuestMemberTemplate *Virtual_Guest `json:"virtualGuestMemberTemplate,omitempty" xmlrpc:"virtualGuestMemberTemplate,omitempty"` + + // Collection of guests that have been scaled with the group. When this group is active, the count of guests here is guaranteed to be between minimumMemberCount and maximumMemberCount inclusively. + VirtualGuestMembers []Scale_Member_Virtual_Guest `json:"virtualGuestMembers,omitempty" xmlrpc:"virtualGuestMembers,omitempty"` +} + +// no documentation yet +type Scale_Group_Log struct { + Entity + + // When this event occurred. + CreateDate *Time `json:"createDate,omitempty" xmlrpc:"createDate,omitempty"` + + // A textual description of what happened during this action. + Description *string `json:"description,omitempty" xmlrpc:"description,omitempty"` + + // This log's internal identifier. + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // The group this log refers to. + ScaleGroup *Scale_Group `json:"scaleGroup,omitempty" xmlrpc:"scaleGroup,omitempty"` + + // The identifier of the group this log refers to. + ScaleGroupId *int `json:"scaleGroupId,omitempty" xmlrpc:"scaleGroupId,omitempty"` +} + +// no documentation yet +type Scale_Group_Status struct { + Entity + + // A status's internal identifier. + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // A status's programmatic name. + KeyName *string `json:"keyName,omitempty" xmlrpc:"keyName,omitempty"` + + // A status's human-friendly name. + Name *string `json:"name,omitempty" xmlrpc:"name,omitempty"` +} + +// no documentation yet +type Scale_LoadBalancer struct { + Entity + + // The percentage of connections allocated to this virtual server. + AllocationPercent *int `json:"allocationPercent,omitempty" xmlrpc:"allocationPercent,omitempty"` + + // When this load balancer configuration was created. + CreateDate *Time `json:"createDate,omitempty" xmlrpc:"createDate,omitempty"` + + // When set and true any edit that happens on this object, be it calling edit on this directly or setting as a child while editing a parent object, will end up being a deletion. + DeleteFlag *bool `json:"deleteFlag,omitempty" xmlrpc:"deleteFlag,omitempty"` + + // The health check for this configuration. + HealthCheck *Network_Application_Delivery_Controller_LoadBalancer_Health_Check `json:"healthCheck,omitempty" xmlrpc:"healthCheck,omitempty"` + + // The identifier for the health check of this load balancer configuration + HealthCheckId *int `json:"healthCheckId,omitempty" xmlrpc:"healthCheckId,omitempty"` + + // The load balancer configuration's internal identifier. + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // When this load balancer configuration was last modified. + ModifyDate *Time `json:"modifyDate,omitempty" xmlrpc:"modifyDate,omitempty"` + + // The port for this load balancer configuration. + Port *int `json:"port,omitempty" xmlrpc:"port,omitempty"` + + // The routing method. + RoutingMethod *Network_Application_Delivery_Controller_LoadBalancer_Routing_Method `json:"routingMethod,omitempty" xmlrpc:"routingMethod,omitempty"` + + // The routing type. + RoutingType *Network_Application_Delivery_Controller_LoadBalancer_Routing_Type `json:"routingType,omitempty" xmlrpc:"routingType,omitempty"` + + // The group this load balancer configuration is for. + ScaleGroup *Scale_Group `json:"scaleGroup,omitempty" xmlrpc:"scaleGroup,omitempty"` + + // The identifier of the group this load balancer configuration applies to. + ScaleGroupId *int `json:"scaleGroupId,omitempty" xmlrpc:"scaleGroupId,omitempty"` + + // The ID of the virtual IP address. + VirtualIpAddressId *int `json:"virtualIpAddressId,omitempty" xmlrpc:"virtualIpAddressId,omitempty"` + + // The virtual server for this configuration. + VirtualServer *Network_Application_Delivery_Controller_LoadBalancer_VirtualServer `json:"virtualServer,omitempty" xmlrpc:"virtualServer,omitempty"` + + // The identifier of the virtual server this load balancer configuration uses. + VirtualServerId *int `json:"virtualServerId,omitempty" xmlrpc:"virtualServerId,omitempty"` + + // The port on the virtual server. + VirtualServerPort *int `json:"virtualServerPort,omitempty" xmlrpc:"virtualServerPort,omitempty"` +} + +// no documentation yet +type Scale_Member struct { + Entity + + // When this member was created. + CreateDate *Time `json:"createDate,omitempty" xmlrpc:"createDate,omitempty"` + + // A member's internal identifier. + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // The group this member belongs to. + ScaleGroup *Scale_Group `json:"scaleGroup,omitempty" xmlrpc:"scaleGroup,omitempty"` + + // The identifier of the group this member belongs to. + ScaleGroupId *int `json:"scaleGroupId,omitempty" xmlrpc:"scaleGroupId,omitempty"` +} + +// no documentation yet +type Scale_Member_Virtual_Guest struct { + Scale_Member + + // The guest for this member. + VirtualGuest *Virtual_Guest `json:"virtualGuest,omitempty" xmlrpc:"virtualGuest,omitempty"` + + // The identifier of the guest for this member. + VirtualGuestId *int `json:"virtualGuestId,omitempty" xmlrpc:"virtualGuestId,omitempty"` +} + +// no documentation yet +type Scale_Network_Vlan struct { + Entity + + // When this network VLAN reference was created. + CreateDate *Time `json:"createDate,omitempty" xmlrpc:"createDate,omitempty"` + + // When set and true any edit that happens on this object, be it calling edit on this directly or setting as a child while editing a parent object, will end up being a deletion. + DeleteFlag *bool `json:"deleteFlag,omitempty" xmlrpc:"deleteFlag,omitempty"` + + // The network VLAN reference's internal identifier. + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // The network VLAN to scale with. + NetworkVlan *Network_Vlan `json:"networkVlan,omitempty" xmlrpc:"networkVlan,omitempty"` + + // The identifier for the VLAN to scale with. + NetworkVlanId *int `json:"networkVlanId,omitempty" xmlrpc:"networkVlanId,omitempty"` + + // The group this network VLAN is for. + ScaleGroup *Scale_Group `json:"scaleGroup,omitempty" xmlrpc:"scaleGroup,omitempty"` + + // The identifier of the group this network VLAN reference applies to. + ScaleGroupId *int `json:"scaleGroupId,omitempty" xmlrpc:"scaleGroupId,omitempty"` +} + +// no documentation yet +type Scale_Policy struct { + Entity + + // A count of the actions to perform upon any trigger hit. Currently this must be a single value. + ActionCount *uint `json:"actionCount,omitempty" xmlrpc:"actionCount,omitempty"` + + // The actions to perform upon any trigger hit. Currently this must be a single value. + Actions []Scale_Policy_Action `json:"actions,omitempty" xmlrpc:"actions,omitempty"` + + // The number of seconds this policy will wait after lastActionDate on group before performing another action. If not present, the group's cooldown value is used. + Cooldown *int `json:"cooldown,omitempty" xmlrpc:"cooldown,omitempty"` + + // When this policy was created. + CreateDate *Time `json:"createDate,omitempty" xmlrpc:"createDate,omitempty"` + + // When set and true any edit that happens on this object, be it calling edit on this directly or setting as a child while editing a parent object, will end up being a deletion. + DeleteFlag *bool `json:"deleteFlag,omitempty" xmlrpc:"deleteFlag,omitempty"` + + // A policy's internal identifier. + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // When this policy was last modified. + ModifyDate *Time `json:"modifyDate,omitempty" xmlrpc:"modifyDate,omitempty"` + + // The name of this policy. It must be unique within the group. + Name *string `json:"name,omitempty" xmlrpc:"name,omitempty"` + + // A count of the one-time triggers to check for this group. + OneTimeTriggerCount *uint `json:"oneTimeTriggerCount,omitempty" xmlrpc:"oneTimeTriggerCount,omitempty"` + + // The one-time triggers to check for this group. + OneTimeTriggers []Scale_Policy_Trigger_OneTime `json:"oneTimeTriggers,omitempty" xmlrpc:"oneTimeTriggers,omitempty"` + + // A count of the repeating triggers to check for this group. + RepeatingTriggerCount *uint `json:"repeatingTriggerCount,omitempty" xmlrpc:"repeatingTriggerCount,omitempty"` + + // The repeating triggers to check for this group. + RepeatingTriggers []Scale_Policy_Trigger_Repeating `json:"repeatingTriggers,omitempty" xmlrpc:"repeatingTriggers,omitempty"` + + // A count of the resource-use triggers to check for this group. + ResourceUseTriggerCount *uint `json:"resourceUseTriggerCount,omitempty" xmlrpc:"resourceUseTriggerCount,omitempty"` + + // The resource-use triggers to check for this group. + ResourceUseTriggers []Scale_Policy_Trigger_ResourceUse `json:"resourceUseTriggers,omitempty" xmlrpc:"resourceUseTriggers,omitempty"` + + // A count of the scale actions to perform upon any trigger hit. Currently this must be a single value. + ScaleActionCount *uint `json:"scaleActionCount,omitempty" xmlrpc:"scaleActionCount,omitempty"` + + // The scale actions to perform upon any trigger hit. Currently this must be a single value. + ScaleActions []Scale_Policy_Action_Scale `json:"scaleActions,omitempty" xmlrpc:"scaleActions,omitempty"` + + // The group this policy is on. + ScaleGroup *Scale_Group `json:"scaleGroup,omitempty" xmlrpc:"scaleGroup,omitempty"` + + // The identifier of the group this member belongs to. + ScaleGroupId *int `json:"scaleGroupId,omitempty" xmlrpc:"scaleGroupId,omitempty"` + + // A count of the triggers to check for this group. + TriggerCount *uint `json:"triggerCount,omitempty" xmlrpc:"triggerCount,omitempty"` + + // The triggers to check for this group. + Triggers []Scale_Policy_Trigger `json:"triggers,omitempty" xmlrpc:"triggers,omitempty"` +} + +// no documentation yet +type Scale_Policy_Action struct { + Entity + + // When this action was created. + CreateDate *Time `json:"createDate,omitempty" xmlrpc:"createDate,omitempty"` + + // When set and true any edit that happens on this object, be it calling edit on this directly or setting as a child while editing a parent object, will end up being a deletion. + DeleteFlag *bool `json:"deleteFlag,omitempty" xmlrpc:"deleteFlag,omitempty"` + + // An action's internal identifier. + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // Then this action was last modified. + ModifyDate *Time `json:"modifyDate,omitempty" xmlrpc:"modifyDate,omitempty"` + + // The policy this action is on. + ScalePolicy *Scale_Policy `json:"scalePolicy,omitempty" xmlrpc:"scalePolicy,omitempty"` + + // The policy this action is on. + ScalePolicyId *int `json:"scalePolicyId,omitempty" xmlrpc:"scalePolicyId,omitempty"` + + // The type of action. + Type *Scale_Policy_Action_Type `json:"type,omitempty" xmlrpc:"type,omitempty"` + + // The identifier of this action's type. + TypeId *int `json:"typeId,omitempty" xmlrpc:"typeId,omitempty"` +} + +// no documentation yet +type Scale_Policy_Action_Scale struct { + Scale_Policy_Action + + // The number to scale by. This number has different meanings based on type. + Amount *int `json:"amount,omitempty" xmlrpc:"amount,omitempty"` + + // The type of scale to perform. Possible values: + // + // + // * ABSOLUTE - Force the group to be set at a specific number of group members. This may include scaling up or + // down or not at all. If the amount is outside of the min/max range of the group, an error occurs. + // * PERCENT - Scale the group up or down based on the positive or negative percentage given in amount. The + // number is a percent of the current group member count. Any extra percent after the decimal point is always ignored. If the resulting amount is zero, -1 or 1 is used depending upon whether the percentage was negative or positive respectively. + // * RELATIVE - Scale the group up or down by the positive or negative value given in amount. + ScaleType *string `json:"scaleType,omitempty" xmlrpc:"scaleType,omitempty"` +} + +// no documentation yet +type Scale_Policy_Action_Type struct { + Entity + + // This type's internal identifier. + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // An action type's programmatic name. + KeyName *string `json:"keyName,omitempty" xmlrpc:"keyName,omitempty"` + + // An action type's human-friendly name. + Name *string `json:"name,omitempty" xmlrpc:"name,omitempty"` +} + +// no documentation yet +type Scale_Policy_Trigger struct { + Entity + + // When this trigger was created. + CreateDate *Time `json:"createDate,omitempty" xmlrpc:"createDate,omitempty"` + + // When set and true any edit that happens on this object, be it calling edit on this directly or setting as a child while editing a parent object, will end up being a deletion. + DeleteFlag *bool `json:"deleteFlag,omitempty" xmlrpc:"deleteFlag,omitempty"` + + // A trigger's internal identifier. + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // When this trigger was last modified. + ModifyDate *Time `json:"modifyDate,omitempty" xmlrpc:"modifyDate,omitempty"` + + // The policy this trigger is on. + ScalePolicy *Scale_Policy `json:"scalePolicy,omitempty" xmlrpc:"scalePolicy,omitempty"` + + // The policy this trigger is on. + ScalePolicyId *int `json:"scalePolicyId,omitempty" xmlrpc:"scalePolicyId,omitempty"` + + // The type of trigger. + Type *Scale_Policy_Trigger_Type `json:"type,omitempty" xmlrpc:"type,omitempty"` + + // The type of trigger this is. + TypeId *int `json:"typeId,omitempty" xmlrpc:"typeId,omitempty"` +} + +// no documentation yet +type Scale_Policy_Trigger_OneTime struct { + Scale_Policy_Trigger + + // The date to execute the policy. + Date *Time `json:"date,omitempty" xmlrpc:"date,omitempty"` +} + +// no documentation yet +type Scale_Policy_Trigger_Repeating struct { + Scale_Policy_Trigger + + // The cron-formatted schedule. This is run in the UTC timezone. + Schedule *string `json:"schedule,omitempty" xmlrpc:"schedule,omitempty"` +} + +// no documentation yet +type Scale_Policy_Trigger_ResourceUse struct { + Scale_Policy_Trigger + + // A count of the resource watches for this trigger. + WatchCount *uint `json:"watchCount,omitempty" xmlrpc:"watchCount,omitempty"` + + // The resource watches for this trigger. + Watches []Scale_Policy_Trigger_ResourceUse_Watch `json:"watches,omitempty" xmlrpc:"watches,omitempty"` +} + +// no documentation yet +type Scale_Policy_Trigger_ResourceUse_Watch struct { + Entity + + // The algorithm to use when aggregating and comparing. Currently, the only value that is accepted is EWMA (Exponential Weighted Moving Average). EWMA is the default value if no value is given. + Algorithm *string `json:"algorithm,omitempty" xmlrpc:"algorithm,omitempty"` + + // When this watch was created. + CreateDate *Time `json:"createDate,omitempty" xmlrpc:"createDate,omitempty"` + + // When set and true any edit that happens on this object, be it calling edit on this directly or setting as a child while editing a parent object, will end up being a deletion. + DeleteFlag *bool `json:"deleteFlag,omitempty" xmlrpc:"deleteFlag,omitempty"` + + // A watch's internal identifier. + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // The metric to watch. Possible values: + // + // + // * host.cpu.percent - On a scale of 0 to 100, the percent CPU a guest is using. + // * host.network.backend.in and host.network.frontend.in - The network bytes-per-second incoming on the interface + // of either the frontend or backend network. + // * host.network.backend.out and host.network.frontend.out - The network bytes-per-second incoming on the interface + // of either the frontend or backend network. + Metric *string `json:"metric,omitempty" xmlrpc:"metric,omitempty"` + + // When this watch was last modified. + ModifyDate *Time `json:"modifyDate,omitempty" xmlrpc:"modifyDate,omitempty"` + + // The operator to use for comparison. The only two valid values are ">" and "<". + Operator *string `json:"operator,omitempty" xmlrpc:"operator,omitempty"` + + // The number of seconds the values are aggregated for when compared to value. If values are not retrieved steadily and consecutively for the length of this period, nothing is compared. + Period *int `json:"period,omitempty" xmlrpc:"period,omitempty"` + + // The trigger this watch is on. + ScalePolicyTrigger *Scale_Policy_Trigger_ResourceUse `json:"scalePolicyTrigger,omitempty" xmlrpc:"scalePolicyTrigger,omitempty"` + + // The trigger this watch is on. + ScalePolicyTriggerId *int `json:"scalePolicyTriggerId,omitempty" xmlrpc:"scalePolicyTriggerId,omitempty"` + + // The value to compare against. Although the value is a string, validation will be done on the value for restrictions (such as numeric-only) based on the metric. + Value *string `json:"value,omitempty" xmlrpc:"value,omitempty"` +} + +// no documentation yet +type Scale_Policy_Trigger_Type struct { + Entity + + // A trigger type's internal identifier. + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // A trigger type's programmatic name. + KeyName *string `json:"keyName,omitempty" xmlrpc:"keyName,omitempty"` + + // A trigger type's human-friendly name. + Name *string `json:"name,omitempty" xmlrpc:"name,omitempty"` +} + +// no documentation yet +type Scale_Termination_Policy struct { + Entity + + // A termination policy's internal identifier. + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // A termination policy's programmatic name. + KeyName *string `json:"keyName,omitempty" xmlrpc:"keyName,omitempty"` + + // A termination policy's human-friendly name. + Name *string `json:"name,omitempty" xmlrpc:"name,omitempty"` +} diff --git a/vendor/github.com/softlayer/softlayer-go/datatypes/search.go b/vendor/github.com/softlayer/softlayer-go/datatypes/search.go new file mode 100644 index 0000000000..5afb44aacd --- /dev/null +++ b/vendor/github.com/softlayer/softlayer-go/datatypes/search.go @@ -0,0 +1,26 @@ +/** + * Copyright 2016 IBM Corp. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** + * AUTOMATICALLY GENERATED CODE - DO NOT MODIFY + */ + +package datatypes + +// no documentation yet +type Search struct { + Entity +} diff --git a/vendor/github.com/softlayer/softlayer-go/datatypes/security.go b/vendor/github.com/softlayer/softlayer-go/datatypes/security.go new file mode 100644 index 0000000000..d77cdaafeb --- /dev/null +++ b/vendor/github.com/softlayer/softlayer-go/datatypes/security.go @@ -0,0 +1,283 @@ +/** + * Copyright 2016 IBM Corp. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** + * AUTOMATICALLY GENERATED CODE - DO NOT MODIFY + */ + +package datatypes + +// no documentation yet +type Security_Certificate struct { + Entity + + // The number of services currently associated with the certificate. + AssociatedServiceCount *int `json:"associatedServiceCount,omitempty" xmlrpc:"associatedServiceCount,omitempty"` + + // The certificate provided publicly to clients requesting identity credentials. This certificate is usually signed by a source trusted by the client or a signature chain can be established between this certificate and the truested certificate. + // + // This property may only be modified when no services are associated. See associatedServiceCount. + Certificate *string `json:"certificate,omitempty" xmlrpc:"certificate,omitempty"` + + // The signing request used to request a certificate authority generate a signed certificate. + // + // This property may only be modified when no services are associated. See associatedServiceCount. + CertificateSigningRequest *string `json:"certificateSigningRequest,omitempty" xmlrpc:"certificateSigningRequest,omitempty"` + + // The common name (usually a domain name) encoded within the certificate. + // + // This property is read only. Changes made will be silently ignored. + CommonName *string `json:"commonName,omitempty" xmlrpc:"commonName,omitempty"` + + // The date the certificate _record_ was created. The contents of the certificate may of changed since the record was created, so this does not represent anything about the certificate itself. + // + // This property is read only. Changes made will be silently ignored. + CreateDate *Time `json:"createDate,omitempty" xmlrpc:"createDate,omitempty"` + + // The ID of the certificate record. + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // The intermediate certificate authorities certificate that completes the certificate chain for the issued certificate. Required when clients will only trust the root certificate. + // + // This property may only be modified when no services are associated. See associatedServiceCount. + IntermediateCertificate *string `json:"intermediateCertificate,omitempty" xmlrpc:"intermediateCertificate,omitempty"` + + // The size (number of bits) of the public key represented by the certificate. + KeySize *int `json:"keySize,omitempty" xmlrpc:"keySize,omitempty"` + + // A count of the load balancers virtual IP addresses currently associated with the certificate. + LoadBalancerVirtualIpAddressCount *uint `json:"loadBalancerVirtualIpAddressCount,omitempty" xmlrpc:"loadBalancerVirtualIpAddressCount,omitempty"` + + // The load balancers virtual IP addresses currently associated with the certificate. + LoadBalancerVirtualIpAddresses []Network_Application_Delivery_Controller_LoadBalancer_VirtualIpAddress `json:"loadBalancerVirtualIpAddresses,omitempty" xmlrpc:"loadBalancerVirtualIpAddresses,omitempty"` + + // The date the certificate _record_ was last modified.The contents of the certificate may of changed since the record was created, so this does not represent anything about the certificate itself. + // + // This property is read only. Changes made will be silently ignored. + ModifyDate *Time `json:"modifyDate,omitempty" xmlrpc:"modifyDate,omitempty"` + + // A note to help describe the certificate. + Notes *string `json:"notes,omitempty" xmlrpc:"notes,omitempty"` + + // The organizational name encoded in the certificate. + // + // This property is read only. Changes made will be silently ignored. + OrganizationName *string `json:"organizationName,omitempty" xmlrpc:"organizationName,omitempty"` + + // The private key in the key/certificate pair. + // + // This property may only be modified when no services are associated. See associatedServiceCount. + PrivateKey *string `json:"privateKey,omitempty" xmlrpc:"privateKey,omitempty"` + + // The UTC timestamp representing the beginning of the certificate's validity + // + // This property is read only. Changes made will be silently ignored. + ValidityBegin *Time `json:"validityBegin,omitempty" xmlrpc:"validityBegin,omitempty"` + + // The number of days remaining in the validity period for the certificate. + // + // This property is read only. Changes made will be silently ignored. + ValidityDays *int `json:"validityDays,omitempty" xmlrpc:"validityDays,omitempty"` + + // The UTC timestamp representing the end of the certificate's validity period. + // + // This property is read only. Changes made will be silently ignored. + ValidityEnd *Time `json:"validityEnd,omitempty" xmlrpc:"validityEnd,omitempty"` +} + +// no documentation yet +type Security_Certificate_Entry struct { + Entity + + // The ID of the certificate record. + CertificateId *int `json:"certificateId,omitempty" xmlrpc:"certificateId,omitempty"` + + // The common name (usually a domain name) encoded within the certificate. + CommonName *string `json:"commonName,omitempty" xmlrpc:"commonName,omitempty"` + + // The size (number of bits) of the public key represented by the certificate. + KeySize *int `json:"keySize,omitempty" xmlrpc:"keySize,omitempty"` + + // The organizational name encoded in the certificate. + OrganizationName *string `json:"organizationName,omitempty" xmlrpc:"organizationName,omitempty"` + + // The UTC timestamp representing the beginning of the certificate's validity + ValidityBegin *Time `json:"validityBegin,omitempty" xmlrpc:"validityBegin,omitempty"` + + // The number of days remaining in the validity period for the certificate. + ValidityDays *int `json:"validityDays,omitempty" xmlrpc:"validityDays,omitempty"` + + // The UTC timestamp representing the end of the certificate's validity period. + ValidityEnd *Time `json:"validityEnd,omitempty" xmlrpc:"validityEnd,omitempty"` +} + +// SoftLayer_Security_Certificate_Request data type is used to harness your SSL certificate order to a Certificate Authority. This contains data that is required by a Certificate Authority to place an SSL certificate order. +type Security_Certificate_Request struct { + Entity + + // The account to which a SSL certificate request belongs. + Account *Account `json:"account,omitempty" xmlrpc:"account,omitempty"` + + // This is a reference to your SoftLayer account. + AccountId *int `json:"accountId,omitempty" xmlrpc:"accountId,omitempty"` + + // The email address of a person who will approve your SSL certificate order. This is usually an email address of your domain administrator. + ApproverEmailAddress *string `json:"approverEmailAddress,omitempty" xmlrpc:"approverEmailAddress,omitempty"` + + // The Certificate Authority name + CertificateAuthorityName *string `json:"certificateAuthorityName,omitempty" xmlrpc:"certificateAuthorityName,omitempty"` + + // A Certificate Signing Request (CSR) string + CertificateSigningRequest *string `json:"certificateSigningRequest,omitempty" xmlrpc:"certificateSigningRequest,omitempty"` + + // A domain name of a SSL certificate request + CommonName *string `json:"commonName,omitempty" xmlrpc:"commonName,omitempty"` + + // The date a SSL certificate request was created. + CreateDate *Time `json:"createDate,omitempty" xmlrpc:"createDate,omitempty"` + + // The date of your SSL certificate went into effect + EffectiveDate *Time `json:"effectiveDate,omitempty" xmlrpc:"effectiveDate,omitempty"` + + // The expiration date of your SSL certificate + ExpirationDate *Time `json:"expirationDate,omitempty" xmlrpc:"expirationDate,omitempty"` + + // The internal identifier of an SSL certificate request + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // The date a SSL certificate request was last modified. + ModifyDate *Time `json:"modifyDate,omitempty" xmlrpc:"modifyDate,omitempty"` + + // The order contains the information related to a SSL certificate request. + Order *Billing_Order `json:"order,omitempty" xmlrpc:"order,omitempty"` + + // The associated order item for this SSL certificate request. + OrderItem *Billing_Order_Item `json:"orderItem,omitempty" xmlrpc:"orderItem,omitempty"` + + // The status of a SSL certificate request. + Status *Security_Certificate_Request_Status `json:"status,omitempty" xmlrpc:"status,omitempty"` + + // A status id reflecting the state of a SSL certificate request + StatusId *int `json:"statusId,omitempty" xmlrpc:"statusId,omitempty"` + + // The technical contact email address. + TechnicalContactEmailAddress *string `json:"technicalContactEmailAddress,omitempty" xmlrpc:"technicalContactEmailAddress,omitempty"` +} + +// Represents a server type that can be specified when ordering an SSL certificate. +type Security_Certificate_Request_ServerType struct { + Entity + + // The description of the certificate server type. + Description *string `json:"description,omitempty" xmlrpc:"description,omitempty"` + + // The internal identifier of the certificate server type. + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // The name of the certificate server type. + Name *string `json:"name,omitempty" xmlrpc:"name,omitempty"` + + // The value of the certificate server type. + Value *int `json:"value,omitempty" xmlrpc:"value,omitempty"` +} + +// Represents the status of an SSL certificate request. +type Security_Certificate_Request_Status struct { + Entity + + // The description of a SSL certificate request status + Description *string `json:"description,omitempty" xmlrpc:"description,omitempty"` + + // The internal identifier of an SSL certificate request status + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // The status name + Name *string `json:"name,omitempty" xmlrpc:"name,omitempty"` +} + +// SoftLayer_Security_Directory_Service_Host_Xref_Hardware extends the [[SoftLayer_Security_Directory_Service_Host_Xref]] data type to include hardware specific properties. +type Security_Directory_Service_Host_Xref_Hardware struct { + Entity + + // The hardware object. + Host *Hardware `json:"host,omitempty" xmlrpc:"host,omitempty"` +} + +// Encryption algorithm intended for use in SSL/TLS communications +type Security_SecureTransportCipher struct { + Entity + + // Unique identifier for the encryption algorithm + KeyName *string `json:"keyName,omitempty" xmlrpc:"keyName,omitempty"` +} + +// Protocol intended for use in secure communications +type Security_SecureTransportProtocol struct { + Entity + + // Unique identifier for the protocol + KeyName *string `json:"keyName,omitempty" xmlrpc:"keyName,omitempty"` + + // List of the supported encryption ciphers + SupportedSecureTransportCiphers []Security_SecureTransportCipher `json:"supportedSecureTransportCiphers,omitempty" xmlrpc:"supportedSecureTransportCiphers,omitempty"` +} + +// no documentation yet +type Security_Ssh_Key struct { + Entity + + // no documentation yet + Account *Account `json:"account,omitempty" xmlrpc:"account,omitempty"` + + // A count of the image template groups that are linked to an SSH key. + BlockDeviceTemplateGroupCount *uint `json:"blockDeviceTemplateGroupCount,omitempty" xmlrpc:"blockDeviceTemplateGroupCount,omitempty"` + + // The image template groups that are linked to an SSH key. + BlockDeviceTemplateGroups []Virtual_Guest_Block_Device_Template_Group `json:"blockDeviceTemplateGroups,omitempty" xmlrpc:"blockDeviceTemplateGroups,omitempty"` + + // The date a ssh key was added. + // + // This property is read only. Changes made will be silently ignored. + CreateDate *Time `json:"createDate,omitempty" xmlrpc:"createDate,omitempty"` + + // A short sequence of bytes used to authenticate or lookup a longer ssh key. This will automatically be generated upon adding or modifying the ssh key. + // + // This property is read only. Changes made will be silently ignored. + Fingerprint *string `json:"fingerprint,omitempty" xmlrpc:"fingerprint,omitempty"` + + // The ID of the ssh key record. + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // The ssh key. + Key *string `json:"key,omitempty" xmlrpc:"key,omitempty"` + + // A descriptive name used to identify a ssh key. + Label *string `json:"label,omitempty" xmlrpc:"label,omitempty"` + + // The date a ssh key was last modified. + // + // This property is read only. Changes made will be silently ignored. + ModifyDate *Time `json:"modifyDate,omitempty" xmlrpc:"modifyDate,omitempty"` + + // A small note about a ssh key to use at your discretion. + Notes *string `json:"notes,omitempty" xmlrpc:"notes,omitempty"` + + // A count of the OS root users that are linked to an SSH key. + SoftwarePasswordCount *uint `json:"softwarePasswordCount,omitempty" xmlrpc:"softwarePasswordCount,omitempty"` + + // The OS root users that are linked to an SSH key. + SoftwarePasswords []Software_Component_Password `json:"softwarePasswords,omitempty" xmlrpc:"softwarePasswords,omitempty"` +} diff --git a/vendor/github.com/softlayer/softlayer-go/datatypes/service.go b/vendor/github.com/softlayer/softlayer-go/datatypes/service.go new file mode 100644 index 0000000000..c73b1f48bd --- /dev/null +++ b/vendor/github.com/softlayer/softlayer-go/datatypes/service.go @@ -0,0 +1,55 @@ +/** + * Copyright 2016 IBM Corp. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** + * AUTOMATICALLY GENERATED CODE - DO NOT MODIFY + */ + +package datatypes + +// The SoftLayer_Service_External_Resource is a placeholder that references a service being provided outside of the standard SoftLayer system. +type Service_External_Resource struct { + Entity + + // The customer account that is consuming the service. + Account *Account `json:"account,omitempty" xmlrpc:"account,omitempty"` + + // The customer account that is consuming the related service. + AccountId *int `json:"accountId,omitempty" xmlrpc:"accountId,omitempty"` + + // The unique identifier in the service provider's system. + ExternalIdentifier *string `json:"externalIdentifier,omitempty" xmlrpc:"externalIdentifier,omitempty"` + + // An external resource's unique identifier in the SoftLayer system. + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` +} + +// no documentation yet +type Service_Provider struct { + Entity + + // no documentation yet + Description *string `json:"description,omitempty" xmlrpc:"description,omitempty"` + + // no documentation yet + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // no documentation yet + KeyName *string `json:"keyName,omitempty" xmlrpc:"keyName,omitempty"` + + // no documentation yet + Name *string `json:"name,omitempty" xmlrpc:"name,omitempty"` +} diff --git a/vendor/github.com/softlayer/softlayer-go/datatypes/softlayer.go b/vendor/github.com/softlayer/softlayer-go/datatypes/softlayer.go new file mode 100644 index 0000000000..a1f8f0b8db --- /dev/null +++ b/vendor/github.com/softlayer/softlayer-go/datatypes/softlayer.go @@ -0,0 +1,104 @@ +/** + * Copyright 2016 IBM Corp. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package datatypes + +import ( + "errors" + "fmt" + "reflect" + "strconv" + "strings" + "time" +) + +// Void is a dummy type for identifying void return values from methods +type Void int + +// Time type overrides the default json marshaler with the SoftLayer custom format +type Time struct { + time.Time +} + +func (r Time) String() string { + return r.Time.Format(time.RFC3339) +} + +// MarshalJSON returns the json encoding of the datatypes.Time receiver. This +// override is necessary to ensure datetimes are formatted in the way SoftLayer +// expects - that is, using the RFC3339 format, without nanoseconds. +func (r Time) MarshalJSON() ([]byte, error) { + return []byte(`"` + r.String() + `"`), nil +} + +// MarshalText returns a text encoding of the datatypes.Time receiver. This +// is mainly provided to complete what might be expected of a type that +// implements the Marshaler interface. +func (r Time) MarshalText() ([]byte, error) { + return []byte(r.String()), nil +} + +// FIXME: Need to have special unmarshaling of some values defined as float type +// in the metadata that actually come down as strings in the api. +// e.g. SoftLayer_Product_Item.capacity +// Float64 is a float type that deals with some of the oddities when +// unmarshalling from the SLAPI +// +// Code borrowed from https://github.com/sudorandom/softlayer-go/blob/master/slapi/types/float.go +type Float64 float64 + +// UnmarshalJSON statisied the json.Unmarshaler interface +func (f *Float64) UnmarshalJSON(data []byte) error { + + // Attempt parsing the float normally + v, err := strconv.ParseFloat(string(data), 64) + + // Attempt parsing the float as a string + if err != nil { + if len(data) < 2 || data[0] != '"' || data[len(data)-1] != '"' { + return fmt.Errorf("malformed data") + } + + v, err = strconv.ParseFloat(string(data[1:len(data)-1]), 64) + if err != nil { + return err + } + } + *f = Float64(v) + return nil +} + +// Used to set the appropriate complexType field in the passed product order. +// Employs reflection to determine the type of the passed value and use it +// to derive the complexType to send to SoftLayer. +func SetComplexType(v interface{}) error { + orderDataPtr := reflect.ValueOf(v) + if orderDataPtr.Type().Name() != "" { + return errors.New("Did not pass a pointer to a product order.") + } + + orderDataValue := reflect.Indirect(reflect.ValueOf(v)) + orderDataType := orderDataValue.Type().Name() + if !strings.HasPrefix(orderDataType, "Container_Product_Order") { + return fmt.Errorf("Did not pass a pointer to a product order: %s", orderDataType) + } + + complexTypeField := orderDataValue.FieldByName("ComplexType") + complexType := "SoftLayer_" + orderDataType + complexTypeField.Set(reflect.ValueOf(&complexType)) + + return nil +} diff --git a/vendor/github.com/softlayer/softlayer-go/datatypes/software.go b/vendor/github.com/softlayer/softlayer-go/datatypes/software.go new file mode 100644 index 0000000000..6c6541f3f6 --- /dev/null +++ b/vendor/github.com/softlayer/softlayer-go/datatypes/software.go @@ -0,0 +1,768 @@ +/** + * Copyright 2016 IBM Corp. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** + * AUTOMATICALLY GENERATED CODE - DO NOT MODIFY + */ + +package datatypes + +// SoftLayer_Software_AccountLicense is a class that represents software licenses that are tied only to a customer's account and not to any particular hardware, IP address, etc. +type Software_AccountLicense struct { + Entity + + // The customer account this Account License belongs to. + Account *Account `json:"account,omitempty" xmlrpc:"account,omitempty"` + + // The ID of the SoftLayer Account to which this Account License belongs to. + AccountId *int `json:"accountId,omitempty" xmlrpc:"accountId,omitempty"` + + // The billing item for a software account license. + BillingItem *Billing_Item `json:"billingItem,omitempty" xmlrpc:"billingItem,omitempty"` + + // Some Account Licenses have capacity information such as CPU specified in the units key. This provides the numerical representation of the capacity of the units. + Capacity *string `json:"capacity,omitempty" xmlrpc:"capacity,omitempty"` + + // The License Key for this specific Account License. + Key *string `json:"key,omitempty" xmlrpc:"key,omitempty"` + + // The SoftLayer_Software_Description that this account license is for. + SoftwareDescription *Software_Description `json:"softwareDescription,omitempty" xmlrpc:"softwareDescription,omitempty"` + + // The unit of measurement that an account license has the capacity of. + Units *string `json:"units,omitempty" xmlrpc:"units,omitempty"` +} + +// A SoftLayer_Software_Component ties the installation of a specific piece of software onto a specific piece of hardware. +// +// SoftLayer_Software_Component works with SoftLayer_Software_License and SoftLayer_Software_Description to tie this all together. +// +//
    • SoftLayer_Software_Component is the installation of a specific piece of software onto a specific piece of hardware in accordance to a software license.
      • SoftLayer_Software_License dictates when and how a specific piece of software may be installed onto a piece of hardware.
        • SoftLayer_Software_Description describes a specific piece of software which can be installed onto hardware in accordance with it's license agreement.
    +type Software_Component struct { + Entity + + // The average amount of time that a software component takes to install. + AverageInstallationDuration *uint `json:"averageInstallationDuration,omitempty" xmlrpc:"averageInstallationDuration,omitempty"` + + // The billing item for a software component. + BillingItem *Billing_Item `json:"billingItem,omitempty" xmlrpc:"billingItem,omitempty"` + + // The hardware this Software Component is installed upon. + Hardware *Hardware `json:"hardware,omitempty" xmlrpc:"hardware,omitempty"` + + // Hardware Identification Number for the server this Software Component is installed upon. + HardwareId *int `json:"hardwareId,omitempty" xmlrpc:"hardwareId,omitempty"` + + // An ID number identifying this Software Component (Software Installation) + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // The manufacturer code that is needed to activate a license. + ManufacturerActivationCode *string `json:"manufacturerActivationCode,omitempty" xmlrpc:"manufacturerActivationCode,omitempty"` + + // A license key for this specific installation of software, if it is needed. + ManufacturerLicenseInstance *string `json:"manufacturerLicenseInstance,omitempty" xmlrpc:"manufacturerLicenseInstance,omitempty"` + + // A count of username/Password pairs used for access to this Software Installation. + PasswordCount *uint `json:"passwordCount,omitempty" xmlrpc:"passwordCount,omitempty"` + + // History Records for Software Passwords. + PasswordHistory []Software_Component_Password_History `json:"passwordHistory,omitempty" xmlrpc:"passwordHistory,omitempty"` + + // A count of history Records for Software Passwords. + PasswordHistoryCount *uint `json:"passwordHistoryCount,omitempty" xmlrpc:"passwordHistoryCount,omitempty"` + + // Username/Password pairs used for access to this Software Installation. + Passwords []Software_Component_Password `json:"passwords,omitempty" xmlrpc:"passwords,omitempty"` + + // The Software Description of this Software Component. + SoftwareDescription *Software_Description `json:"softwareDescription,omitempty" xmlrpc:"softwareDescription,omitempty"` + + // The License this Software Component uses. + SoftwareLicense *Software_License `json:"softwareLicense,omitempty" xmlrpc:"softwareLicense,omitempty"` + + // The virtual guest this software component is installed upon. + VirtualGuest *Virtual_Guest `json:"virtualGuest,omitempty" xmlrpc:"virtualGuest,omitempty"` +} + +// This object specifies a specific type of Software Component: An analytics instance. Analytics installations have a specific default ports and patterns for usernames and passwords. Defaults are initiated by this object. +type Software_Component_Analytics struct { + Software_Component +} + +// This object specifies a specific Software Component: An Urchin instance. Urchin installations have a specific default port (9999) and a pattern for usernames and passwords. Defaults are initiated by this object. +type Software_Component_Analytics_Urchin struct { + Software_Component_Analytics +} + +// This object specifies a specific type of Software Component: An Anti-virus/spyware instance. Anti-virus/spyware installations have specific properties and methods such as SoftLayer_Software_Component_AntivirusSpyware::updateAntivirusSpywarePolicy. Defaults are initiated by this object. +type Software_Component_AntivirusSpyware struct { + Software_Component +} + +// The SoftLayer_Software_Component_AntivirusSpyware_Mcafee represents a single anti-virus/spyware software component. +type Software_Component_AntivirusSpyware_Mcafee struct { + Software_Component_AntivirusSpyware +} + +// The SoftLayer_Software_Component_AntivirusSpyware_Mcafee_Epo_Version36 data type represents a single McAfee Secure anti-virus/spyware software component that uses the ePolicy Orchestrator version 3.6 backend. +type Software_Component_AntivirusSpyware_Mcafee_Epo_Version36 struct { + Software_Component_AntivirusSpyware_Mcafee + + // The virus scan agent details. + AgentDetails *McAfee_Epolicy_Orchestrator_Version36_Agent_Details `json:"agentDetails,omitempty" xmlrpc:"agentDetails,omitempty"` + + // The current anti-virus policy. + CurrentAntivirusPolicy *int `json:"currentAntivirusPolicy,omitempty" xmlrpc:"currentAntivirusPolicy,omitempty"` + + // The virus definition file version. + DataFileVersion *McAfee_Epolicy_Orchestrator_Version36_Product_Properties `json:"dataFileVersion,omitempty" xmlrpc:"dataFileVersion,omitempty"` + + // The version of ePolicy Orchestrator that the anti-virus/spyware client communicates with. + EpoVersion *string `json:"epoVersion,omitempty" xmlrpc:"epoVersion,omitempty"` + + // A count of the latest access protection events. + LatestAccessProtectionEventCount *uint `json:"latestAccessProtectionEventCount,omitempty" xmlrpc:"latestAccessProtectionEventCount,omitempty"` + + // The latest access protection events. + LatestAccessProtectionEvents []McAfee_Epolicy_Orchestrator_Version36_Antivirus_Event_AccessProtection `json:"latestAccessProtectionEvents,omitempty" xmlrpc:"latestAccessProtectionEvents,omitempty"` + + // A count of the latest anti-virus events. + LatestAntivirusEventCount *uint `json:"latestAntivirusEventCount,omitempty" xmlrpc:"latestAntivirusEventCount,omitempty"` + + // The latest anti-virus events. + LatestAntivirusEvents []McAfee_Epolicy_Orchestrator_Version36_Antivirus_Event `json:"latestAntivirusEvents,omitempty" xmlrpc:"latestAntivirusEvents,omitempty"` + + // A count of the latest spyware events. + LatestSpywareEventCount *uint `json:"latestSpywareEventCount,omitempty" xmlrpc:"latestSpywareEventCount,omitempty"` + + // The latest spyware events. + LatestSpywareEvents []McAfee_Epolicy_Orchestrator_Version36_Antivirus_Event `json:"latestSpywareEvents,omitempty" xmlrpc:"latestSpywareEvents,omitempty"` + + // The current transaction status of a server. + TransactionStatus *string `json:"transactionStatus,omitempty" xmlrpc:"transactionStatus,omitempty"` +} + +// The SoftLayer_Software_Component_AntivirusSpyware_Mcafee_Epo_Version45 data type represents a single McAfee Secure anti-virus/spyware software component that uses the ePolicy Orchestrator version 4.5 backend. +type Software_Component_AntivirusSpyware_Mcafee_Epo_Version45 struct { + Software_Component_AntivirusSpyware_Mcafee + + // The virus scan agent details. + AgentDetails *McAfee_Epolicy_Orchestrator_Version45_Agent_Details `json:"agentDetails,omitempty" xmlrpc:"agentDetails,omitempty"` + + // The current anti-virus policy. + CurrentAntivirusPolicy *int `json:"currentAntivirusPolicy,omitempty" xmlrpc:"currentAntivirusPolicy,omitempty"` + + // The virus definition file version. + DataFileVersion *McAfee_Epolicy_Orchestrator_Version45_Product_Properties `json:"dataFileVersion,omitempty" xmlrpc:"dataFileVersion,omitempty"` + + // The version of ePolicy Orchestrator that the anti-virus/spyware client communicates with. + EpoVersion *string `json:"epoVersion,omitempty" xmlrpc:"epoVersion,omitempty"` + + // A count of the latest access protection events. + LatestAccessProtectionEventCount *uint `json:"latestAccessProtectionEventCount,omitempty" xmlrpc:"latestAccessProtectionEventCount,omitempty"` + + // The latest access protection events. + LatestAccessProtectionEvents []McAfee_Epolicy_Orchestrator_Version45_Event `json:"latestAccessProtectionEvents,omitempty" xmlrpc:"latestAccessProtectionEvents,omitempty"` + + // A count of the latest anti-virus events. + LatestAntivirusEventCount *uint `json:"latestAntivirusEventCount,omitempty" xmlrpc:"latestAntivirusEventCount,omitempty"` + + // The latest anti-virus events. + LatestAntivirusEvents []McAfee_Epolicy_Orchestrator_Version45_Event `json:"latestAntivirusEvents,omitempty" xmlrpc:"latestAntivirusEvents,omitempty"` + + // A count of the latest spyware events + LatestSpywareEventCount *uint `json:"latestSpywareEventCount,omitempty" xmlrpc:"latestSpywareEventCount,omitempty"` + + // The latest spyware events + LatestSpywareEvents []McAfee_Epolicy_Orchestrator_Version45_Event `json:"latestSpywareEvents,omitempty" xmlrpc:"latestSpywareEvents,omitempty"` + + // The current transaction status of a server. + TransactionStatus *string `json:"transactionStatus,omitempty" xmlrpc:"transactionStatus,omitempty"` +} + +// This object specifies a specific type of Software Component: A control panel instance. Control panel installations have a specific default ports and patterns for usernames and passwords. Defaults are initiated by this object. +type Software_Component_ControlPanel struct { + Software_Component +} + +// This object specifies a specific Software Component: A cPanel instance. cPanel installations have a specific default port (2086) and a pattern for usernames and passwords. Defaults are initiated by this object. +type Software_Component_ControlPanel_Cpanel struct { + Software_Component +} + +// This object specifies a specific type of control panel Software Component: An Idera instance. +type Software_Component_ControlPanel_Idera struct { + Software_Component +} + +// This object specifies a specific type of Software Component: A Idera Server Backup instance. +type Software_Component_ControlPanel_Idera_ServerBackup struct { + Software_Component_ControlPanel_Idera +} + +// This object is a parent class for Microsoft Products, like Web Matrix +type Software_Component_ControlPanel_Microsoft struct { + Software_Component +} + +// This object specifies a specific Software Component: A WebPlatform instance. WebPlatform installations have a specific xml config with usernames and passwords. Defaults are initiated by this object. +type Software_Component_ControlPanel_Microsoft_WebPlatform struct { + Software_Component_ControlPanel_Microsoft +} + +// This object is a parent class for SWSoft Products, like Plesk +type Software_Component_ControlPanel_Parallels struct { + Software_Component +} + +// This object specifies a specific Software Component: A Plesk instance produced by SWSoft. SWSoft Plesk installations have a specific default port (8443) and a pattern for usernames and passwords. Defaults are initiated by this object. +type Software_Component_ControlPanel_Parallels_Plesk struct { + Software_Component_ControlPanel_Parallels +} + +// This object specifies a specific type of control panel Software Component: A R1soft instance. +type Software_Component_ControlPanel_R1soft struct { + Software_Component +} + +// This object specifies a specific type of Software Component: A R1soft continuous data protection instance. +type Software_Component_ControlPanel_R1soft_Cdp struct { + Software_Component_ControlPanel_R1soft +} + +// This object specifies a specific type of Software Component: A R1Soft Server Backup instance. +type Software_Component_ControlPanel_R1soft_ServerBackup struct { + Software_Component_ControlPanel_R1soft +} + +// This object is a parent class for SWSoft Products, like Plesk +type Software_Component_ControlPanel_Swsoft struct { + Software_Component +} + +// This object specifies a specific Software Component: A Helm instance produced by Webhost Automation. WEbhost Automation's Helm installations have a specific default port (8086) and a pattern for usernames and passwords. Defaults are initiated by this object. +type Software_Component_ControlPanel_WebhostAutomation struct { + Software_Component +} + +// This object specifies a specific type of Software Component: A Host Intrusion Protection System instance. +type Software_Component_HostIps struct { + Software_Component +} + +// The SoftLayer_Software_Component_HostIps_Mcafee represents a single host IPS software component. +type Software_Component_HostIps_Mcafee struct { + Software_Component_HostIps +} + +// The SoftLayer_Software_Component_HostIps_Mcafee_Epo_Version36_Hips data type represents a single McAfee Secure Host IPS software component that uses the ePolicy Orchestrator version 3.6 backend. +type Software_Component_HostIps_Mcafee_Epo_Version36_Hips struct { + Software_Component_HostIps_Mcafee + + // The host IPS agent details. + AgentDetails *McAfee_Epolicy_Orchestrator_Version36_Agent_Details `json:"agentDetails,omitempty" xmlrpc:"agentDetails,omitempty"` + + // A count of the names of the possible policy options for the application mode setting. + ApplicationModePolicyNameCount *uint `json:"applicationModePolicyNameCount,omitempty" xmlrpc:"applicationModePolicyNameCount,omitempty"` + + // The names of the possible policy options for the application mode setting. + ApplicationModePolicyNames []McAfee_Epolicy_Orchestrator_Version36_Policy_Object `json:"applicationModePolicyNames,omitempty" xmlrpc:"applicationModePolicyNames,omitempty"` + + // A count of the names of the possible policy options for the application rule set setting. + ApplicationRuleSetPolicyNameCount *uint `json:"applicationRuleSetPolicyNameCount,omitempty" xmlrpc:"applicationRuleSetPolicyNameCount,omitempty"` + + // The names of the possible policy options for the application rule set setting. + ApplicationRuleSetPolicyNames []McAfee_Epolicy_Orchestrator_Version36_Policy_Object `json:"applicationRuleSetPolicyNames,omitempty" xmlrpc:"applicationRuleSetPolicyNames,omitempty"` + + // A count of the names of the possible options for the enforcement policy setting. + EnforcementPolicyNameCount *uint `json:"enforcementPolicyNameCount,omitempty" xmlrpc:"enforcementPolicyNameCount,omitempty"` + + // The names of the possible options for the enforcement policy setting. + EnforcementPolicyNames []McAfee_Epolicy_Orchestrator_Version36_Policy_Object `json:"enforcementPolicyNames,omitempty" xmlrpc:"enforcementPolicyNames,omitempty"` + + // The version of ePolicy Orchestrator that the host IPS client communicates with. + EpoVersion *string `json:"epoVersion,omitempty" xmlrpc:"epoVersion,omitempty"` + + // A count of the names of the possible policy options for the firewall mode setting. + FirewallModePolicyNameCount *uint `json:"firewallModePolicyNameCount,omitempty" xmlrpc:"firewallModePolicyNameCount,omitempty"` + + // The names of the possible policy options for the firewall mode setting. + FirewallModePolicyNames []McAfee_Epolicy_Orchestrator_Version36_Policy_Object `json:"firewallModePolicyNames,omitempty" xmlrpc:"firewallModePolicyNames,omitempty"` + + // A count of the names of the possible policy options for the firewall rule set setting. + FirewallRuleSetPolicyNameCount *uint `json:"firewallRuleSetPolicyNameCount,omitempty" xmlrpc:"firewallRuleSetPolicyNameCount,omitempty"` + + // The names of the possible policy options for the firewall rule set setting. + FirewallRuleSetPolicyNames []McAfee_Epolicy_Orchestrator_Version36_Policy_Object `json:"firewallRuleSetPolicyNames,omitempty" xmlrpc:"firewallRuleSetPolicyNames,omitempty"` + + // A count of the names of the possible policy options for the host IPS mode setting. + IpsModePolicyNameCount *uint `json:"ipsModePolicyNameCount,omitempty" xmlrpc:"ipsModePolicyNameCount,omitempty"` + + // The names of the possible policy options for the host IPS mode setting. + IpsModePolicyNames []McAfee_Epolicy_Orchestrator_Version36_Policy_Object `json:"ipsModePolicyNames,omitempty" xmlrpc:"ipsModePolicyNames,omitempty"` + + // A count of the names of the possible policy options for the host IPS protection setting. + IpsProtectionPolicyNameCount *uint `json:"ipsProtectionPolicyNameCount,omitempty" xmlrpc:"ipsProtectionPolicyNameCount,omitempty"` + + // The names of the possible policy options for the host IPS protection setting. + IpsProtectionPolicyNames []McAfee_Epolicy_Orchestrator_Version36_Policy_Object `json:"ipsProtectionPolicyNames,omitempty" xmlrpc:"ipsProtectionPolicyNames,omitempty"` + + // The current transaction status of a server. + TransactionStatus *string `json:"transactionStatus,omitempty" xmlrpc:"transactionStatus,omitempty"` +} + +// The SoftLayer_Software_Component_HostIps_Mcafee_Epo_Version36_Hips_Version6 data type represents a single McAfee Secure Host IPS software component for version 6 of the Host IPS client and uses the ePolicy Orchestrator version 3.6 backend. +type Software_Component_HostIps_Mcafee_Epo_Version36_Hips_Version6 struct { + Software_Component_HostIps_Mcafee_Epo_Version36_Hips + + // A count of the blocked application events for this software component. + BlockedApplicationEventCount *uint `json:"blockedApplicationEventCount,omitempty" xmlrpc:"blockedApplicationEventCount,omitempty"` + + // The blocked application events for this software component. + BlockedApplicationEvents []McAfee_Epolicy_Orchestrator_Version36_Hips_Version6_BlockedApplicationEvent `json:"blockedApplicationEvents,omitempty" xmlrpc:"blockedApplicationEvents,omitempty"` + + // A count of the host IPS events for this software component. + IpsEventCount *uint `json:"ipsEventCount,omitempty" xmlrpc:"ipsEventCount,omitempty"` + + // The host IPS events for this software component. + IpsEvents []McAfee_Epolicy_Orchestrator_Version36_Hips_Version6_IPSEvent `json:"ipsEvents,omitempty" xmlrpc:"ipsEvents,omitempty"` +} + +// The SoftLayer_Software_Component_HostIps_Mcafee_Epo_Version36_Hips_Version7 data type represents a single McAfee Secure Host IPS software component for version 7 of the Host IPS client and uses the ePolicy Orchestrator version 3.6 backend. +type Software_Component_HostIps_Mcafee_Epo_Version36_Hips_Version7 struct { + Software_Component_HostIps_Mcafee_Epo_Version36_Hips + + // A count of the blocked application events for this software component. + BlockedApplicationEventCount *uint `json:"blockedApplicationEventCount,omitempty" xmlrpc:"blockedApplicationEventCount,omitempty"` + + // The blocked application events for this software component. + BlockedApplicationEvents []McAfee_Epolicy_Orchestrator_Version36_Hips_Version7_BlockedApplicationEvent `json:"blockedApplicationEvents,omitempty" xmlrpc:"blockedApplicationEvents,omitempty"` + + // A count of the host IPS events for this software component. + IpsEventCount *uint `json:"ipsEventCount,omitempty" xmlrpc:"ipsEventCount,omitempty"` + + // The host IPS events for this software component. + IpsEvents []McAfee_Epolicy_Orchestrator_Version36_Hips_Version7_IPSEvent `json:"ipsEvents,omitempty" xmlrpc:"ipsEvents,omitempty"` +} + +// The SoftLayer_Software_Component_HostIps_Mcafee_Epo_Version45_Hips data type represents a single McAfee Secure Host IPS software component that uses the ePolicy Orchestrator version 4.5 backend. +type Software_Component_HostIps_Mcafee_Epo_Version45_Hips struct { + Software_Component_HostIps_Mcafee + + // The host IPS agent details. + AgentDetails *McAfee_Epolicy_Orchestrator_Version45_Agent_Details `json:"agentDetails,omitempty" xmlrpc:"agentDetails,omitempty"` + + // A count of the names of the possible policy options for the application mode setting. + ApplicationModePolicyNameCount *uint `json:"applicationModePolicyNameCount,omitempty" xmlrpc:"applicationModePolicyNameCount,omitempty"` + + // The names of the possible policy options for the application mode setting. + ApplicationModePolicyNames []McAfee_Epolicy_Orchestrator_Version45_Policy_Object `json:"applicationModePolicyNames,omitempty" xmlrpc:"applicationModePolicyNames,omitempty"` + + // A count of the names of the possible policy options for the application rule set setting. + ApplicationRuleSetPolicyNameCount *uint `json:"applicationRuleSetPolicyNameCount,omitempty" xmlrpc:"applicationRuleSetPolicyNameCount,omitempty"` + + // The names of the possible policy options for the application rule set setting. + ApplicationRuleSetPolicyNames []McAfee_Epolicy_Orchestrator_Version45_Policy_Object `json:"applicationRuleSetPolicyNames,omitempty" xmlrpc:"applicationRuleSetPolicyNames,omitempty"` + + // A count of the blocked application events for this software component. + BlockedApplicationEventCount *uint `json:"blockedApplicationEventCount,omitempty" xmlrpc:"blockedApplicationEventCount,omitempty"` + + // The blocked application events for this software component. + BlockedApplicationEvents []McAfee_Epolicy_Orchestrator_Version45_Event `json:"blockedApplicationEvents,omitempty" xmlrpc:"blockedApplicationEvents,omitempty"` + + // A count of the names of the possible options for the enforcement policy setting. + EnforcementPolicyNameCount *uint `json:"enforcementPolicyNameCount,omitempty" xmlrpc:"enforcementPolicyNameCount,omitempty"` + + // The names of the possible options for the enforcement policy setting. + EnforcementPolicyNames []McAfee_Epolicy_Orchestrator_Version45_Policy_Object `json:"enforcementPolicyNames,omitempty" xmlrpc:"enforcementPolicyNames,omitempty"` + + // The version of ePolicy Orchestrator that the host IPS client communicates with. + EpoVersion *string `json:"epoVersion,omitempty" xmlrpc:"epoVersion,omitempty"` + + // A count of the names of the possible policy options for the firewall mode setting. + FirewallModePolicyNameCount *uint `json:"firewallModePolicyNameCount,omitempty" xmlrpc:"firewallModePolicyNameCount,omitempty"` + + // The names of the possible policy options for the firewall mode setting. + FirewallModePolicyNames []McAfee_Epolicy_Orchestrator_Version45_Policy_Object `json:"firewallModePolicyNames,omitempty" xmlrpc:"firewallModePolicyNames,omitempty"` + + // A count of the names of the possible policy options for the firewall rule set setting. + FirewallRuleSetPolicyNameCount *uint `json:"firewallRuleSetPolicyNameCount,omitempty" xmlrpc:"firewallRuleSetPolicyNameCount,omitempty"` + + // The names of the possible policy options for the firewall rule set setting. + FirewallRuleSetPolicyNames []McAfee_Epolicy_Orchestrator_Version45_Policy_Object `json:"firewallRuleSetPolicyNames,omitempty" xmlrpc:"firewallRuleSetPolicyNames,omitempty"` + + // A count of the host IPS events for this software component. + IpsEventCount *uint `json:"ipsEventCount,omitempty" xmlrpc:"ipsEventCount,omitempty"` + + // The host IPS events for this software component. + IpsEvents []McAfee_Epolicy_Orchestrator_Version45_Event `json:"ipsEvents,omitempty" xmlrpc:"ipsEvents,omitempty"` + + // A count of the names of the possible policy options for the host IPS mode setting. + IpsModePolicyNameCount *uint `json:"ipsModePolicyNameCount,omitempty" xmlrpc:"ipsModePolicyNameCount,omitempty"` + + // The names of the possible policy options for the host IPS mode setting. + IpsModePolicyNames []McAfee_Epolicy_Orchestrator_Version45_Policy_Object `json:"ipsModePolicyNames,omitempty" xmlrpc:"ipsModePolicyNames,omitempty"` + + // A count of the names of the possible policy options for the host IPS protection setting. + IpsProtectionPolicyNameCount *uint `json:"ipsProtectionPolicyNameCount,omitempty" xmlrpc:"ipsProtectionPolicyNameCount,omitempty"` + + // The names of the possible policy options for the host IPS protection setting. + IpsProtectionPolicyNames []McAfee_Epolicy_Orchestrator_Version45_Policy_Object `json:"ipsProtectionPolicyNames,omitempty" xmlrpc:"ipsProtectionPolicyNames,omitempty"` + + // The current transaction status of a server. + TransactionStatus *string `json:"transactionStatus,omitempty" xmlrpc:"transactionStatus,omitempty"` +} + +// The SoftLayer_Software_Component_HostIps_Mcafee_Epo_Version45_Hips_Version7 data type represents a single McAfee Secure Host IPS software component for version 7 of the Host IPS client and uses the ePolicy Orchestrator version 4.5 backend. +type Software_Component_HostIps_Mcafee_Epo_Version45_Hips_Version7 struct { + Software_Component_HostIps_Mcafee_Epo_Version45_Hips +} + +// The SoftLayer_Software_Component_HostIps_Mcafee_Epo_Version45_Hips_Version8 data type represents a single McAfee Secure Host IPS software component for version 8 of the Host IPS client and uses the ePolicy Orchestrator version 4.5 backend. +type Software_Component_HostIps_Mcafee_Epo_Version45_Hips_Version8 struct { + Software_Component_HostIps_Mcafee_Epo_Version45_Hips +} + +// SoftLayer_Software_Component_OperatingSystem extends the [[SoftLayer_Software_Component]] data type to include operating system specific properties. +type Software_Component_OperatingSystem struct { + Software_Component + + // The date in which the license for this software expires. + LicenseExpirationDate *Time `json:"licenseExpirationDate,omitempty" xmlrpc:"licenseExpirationDate,omitempty"` + + // A count of an operating system's associated [[SoftLayer_Hardware_Component_Partition_Template|Partition Templates]] that can be used to configure a hardware drive. + PartitionTemplateCount *uint `json:"partitionTemplateCount,omitempty" xmlrpc:"partitionTemplateCount,omitempty"` + + // An operating system's associated [[SoftLayer_Hardware_Component_Partition_Template|Partition Templates]] that can be used to configure a hardware drive. + PartitionTemplates []Hardware_Component_Partition_Template `json:"partitionTemplates,omitempty" xmlrpc:"partitionTemplates,omitempty"` + + // An operating systems associated [[SoftLayer_Provisioning_Version1_Transaction_Group|Transaction Group]]. A transaction group is a list of operations that will occur during the installment of an operating system. + ReloadTransactionGroup *Provisioning_Version1_Transaction_Group `json:"reloadTransactionGroup,omitempty" xmlrpc:"reloadTransactionGroup,omitempty"` +} + +// This object specifies a specific type of Software Component: A package instance. +type Software_Component_Package struct { + Software_Component +} + +// This object specifies a specific type of Software Component: A package management instance. +type Software_Component_Package_Management struct { + Software_Component_Package +} + +// This object specifies a specific type of Software Component: A Ksplice instance. +type Software_Component_Package_Management_Ksplice struct { + Software_Component_Package_Management +} + +// This SoftLayer_Software_Component_Password data type contains a password for a specific software component instance. +type Software_Component_Password struct { + Entity + + // The date this username/password pair was created. + CreateDate *Time `json:"createDate,omitempty" xmlrpc:"createDate,omitempty"` + + // An id number for this specific username/password pair. + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // The date of the last modification to this username/password pair. + ModifyDate *Time `json:"modifyDate,omitempty" xmlrpc:"modifyDate,omitempty"` + + // A note string stored for this username/password pair. + Notes *string `json:"notes,omitempty" xmlrpc:"notes,omitempty"` + + // The password part of the username/password pair. + Password *string `json:"password,omitempty" xmlrpc:"password,omitempty"` + + // The application access port for the Software Component. + Port *int `json:"port,omitempty" xmlrpc:"port,omitempty"` + + // The SoftLayer_Software_Component instance that this username/password pair is valid for. + Software *Software_Component `json:"software,omitempty" xmlrpc:"software,omitempty"` + + // An id number for the software component this username/password pair is valid for. + SoftwareId *int `json:"softwareId,omitempty" xmlrpc:"softwareId,omitempty"` + + // A count of sSH keys to be installed on the server during provisioning or an OS reload. + SshKeyCount *uint `json:"sshKeyCount,omitempty" xmlrpc:"sshKeyCount,omitempty"` + + // SSH keys to be installed on the server during provisioning or an OS reload. + SshKeys []Security_Ssh_Key `json:"sshKeys,omitempty" xmlrpc:"sshKeys,omitempty"` + + // The username part of the username/password pair. + Username *string `json:"username,omitempty" xmlrpc:"username,omitempty"` +} + +// This object allows you to find the history of password changes for a specific SoftLayer_Software Component +type Software_Component_Password_History struct { + Entity + + // The date this username/password pair was created. + CreateDate *Time `json:"createDate,omitempty" xmlrpc:"createDate,omitempty"` + + // A note string stored for this username/password pair. + Notes *string `json:"notes,omitempty" xmlrpc:"notes,omitempty"` + + // The password part of this specific password history instance. + Password *string `json:"password,omitempty" xmlrpc:"password,omitempty"` + + // An installed and licensed instance of a piece of software + SoftwareComponent *Software_Component `json:"softwareComponent,omitempty" xmlrpc:"softwareComponent,omitempty"` + + // The id number for the Software Component this username/password pair is for. + SoftwareComponentId *int `json:"softwareComponentId,omitempty" xmlrpc:"softwareComponentId,omitempty"` + + // The username part of this specific password history instance. + Username *string `json:"username,omitempty" xmlrpc:"username,omitempty"` +} + +// This object specifies a specific type of Software Component: A security instance. Security installations have custom configurations for password requirements. +type Software_Component_Security struct { + Software_Component +} + +// This object specifies a specific Software Component: A SafeNet instance. SafeNet installations have custom configurations for password requirements. +type Software_Component_Security_SafeNet struct { + Software_Component_Security +} + +// This class holds a description for a specific installation of a Software Component. +// +// SoftLayer_Software_Licenses tie a Software Component (A specific installation on a piece of hardware) to it's description. +// +// The "Manufacturer" and "Name" properties of a SoftLayer_Software_Description are used by the framework to factory specific objects, objects that may have special methods for that specific piece of software, or objects that contain application specific data, such as default ports. For example, if you create a SoftLayer_Software_Component who's SoftLayer_Software_License points to the SoftLayer_Software_Description for "Swsoft" "Plesk", you'll actually get a SoftLayer_Software_Component_Swsoft_Plesk object. +type Software_Description struct { + Entity + + // A count of + AttributeCount *uint `json:"attributeCount,omitempty" xmlrpc:"attributeCount,omitempty"` + + // no documentation yet + Attributes []Software_Description_Attribute `json:"attributes,omitempty" xmlrpc:"attributes,omitempty"` + + // The average amount of time that a software description takes to install. + AverageInstallationDuration *int `json:"averageInstallationDuration,omitempty" xmlrpc:"averageInstallationDuration,omitempty"` + + // A count of a list of the software descriptions that are compatible with this software description. + CompatibleSoftwareDescriptionCount *uint `json:"compatibleSoftwareDescriptionCount,omitempty" xmlrpc:"compatibleSoftwareDescriptionCount,omitempty"` + + // A list of the software descriptions that are compatible with this software description. + CompatibleSoftwareDescriptions []Software_Description `json:"compatibleSoftwareDescriptions,omitempty" xmlrpc:"compatibleSoftwareDescriptions,omitempty"` + + // This is set to '1' if this Software Description describes a Control Panel. + ControlPanel *int `json:"controlPanel,omitempty" xmlrpc:"controlPanel,omitempty"` + + // A count of the feature attributes of a software description. + FeatureCount *uint `json:"featureCount,omitempty" xmlrpc:"featureCount,omitempty"` + + // The feature attributes of a software description. + Features []Software_Description_Feature `json:"features,omitempty" xmlrpc:"features,omitempty"` + + // An ID number to identify this Software Description. + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // The latest version of a software description. + LatestVersion []Software_Description `json:"latestVersion,omitempty" xmlrpc:"latestVersion,omitempty"` + + // A count of the latest version of a software description. + LatestVersionCount *uint `json:"latestVersionCount,omitempty" xmlrpc:"latestVersionCount,omitempty"` + + // The unit of measurement (day, month, or year) for license registration. Used in conjunction with licenseTermValue to determine overall license registration length of a new license. + LicenseTermUnit *string `json:"licenseTermUnit,omitempty" xmlrpc:"licenseTermUnit,omitempty"` + + // The number of units (licenseTermUnit) a new license is valid for at the time of registration. + LicenseTermValue *int `json:"licenseTermValue,omitempty" xmlrpc:"licenseTermValue,omitempty"` + + // The manufacturer, name and version of a piece of software. + LongDescription *string `json:"longDescription,omitempty" xmlrpc:"longDescription,omitempty"` + + // The name of the manufacturer for this specific piece of software. This name is used by SoftLayer_Software_Component to tailor make (factory) specific types of Software Components that know details like default ports. + Manufacturer *string `json:"manufacturer,omitempty" xmlrpc:"manufacturer,omitempty"` + + // The name of this specific piece of software. This name is used by SoftLayer_Software_Component to tailor make (factory) specific types of Software Components that know details like default ports. + Name *string `json:"name,omitempty" xmlrpc:"name,omitempty"` + + // This is set to '1' if this Software Description describes an Operating System. + OperatingSystem *int `json:"operatingSystem,omitempty" xmlrpc:"operatingSystem,omitempty"` + + // A count of the various product items to which this software description is linked. + ProductItemCount *uint `json:"productItemCount,omitempty" xmlrpc:"productItemCount,omitempty"` + + // The various product items to which this software description is linked. + ProductItems []Product_Item `json:"productItems,omitempty" xmlrpc:"productItems,omitempty"` + + // This details the provisioning transaction group for this software. This is only valid for Operating System software. + ProvisionTransactionGroup *Provisioning_Version1_Transaction_Group `json:"provisionTransactionGroup,omitempty" xmlrpc:"provisionTransactionGroup,omitempty"` + + // A reference code is structured as three tokens separated by underscores. The first token represents the product, the second is the version of the product, and the third is whether the software is 32 or 64bit. + ReferenceCode *string `json:"referenceCode,omitempty" xmlrpc:"referenceCode,omitempty"` + + // The transaction group that a software description belongs to. A transaction group is a sequence of transactions that must be performed in a specific order for the installation of software. + ReloadTransactionGroup *Provisioning_Version1_Transaction_Group `json:"reloadTransactionGroup,omitempty" xmlrpc:"reloadTransactionGroup,omitempty"` + + // The default user created for a given a software description. + RequiredUser *string `json:"requiredUser,omitempty" xmlrpc:"requiredUser,omitempty"` + + // A count of software Licenses that govern this Software Description. + SoftwareLicenseCount *uint `json:"softwareLicenseCount,omitempty" xmlrpc:"softwareLicenseCount,omitempty"` + + // Software Licenses that govern this Software Description. + SoftwareLicenses []Software_License `json:"softwareLicenses,omitempty" xmlrpc:"softwareLicenses,omitempty"` + + // A suggestion for an upgrade path from this Software Description + UpgradeSoftwareDescription *Software_Description `json:"upgradeSoftwareDescription,omitempty" xmlrpc:"upgradeSoftwareDescription,omitempty"` + + // Contains the ID of the suggested upgrade from this Software_Description to a more powerful software installation. + UpgradeSoftwareDescriptionId *int `json:"upgradeSoftwareDescriptionId,omitempty" xmlrpc:"upgradeSoftwareDescriptionId,omitempty"` + + // A suggestion for an upgrade path from this Software Description (Deprecated - Use upgradeSoftwareDescription) + UpgradeSwDesc *Software_Description `json:"upgradeSwDesc,omitempty" xmlrpc:"upgradeSwDesc,omitempty"` + + // Contains the ID of the suggested upgrade from this Software_Description to a more powerful software installation. (Deprecated - Use upgradeSoftwareDescriptionId) + UpgradeSwDescId *int `json:"upgradeSwDescId,omitempty" xmlrpc:"upgradeSwDescId,omitempty"` + + // A count of + ValidFilesystemTypeCount *uint `json:"validFilesystemTypeCount,omitempty" xmlrpc:"validFilesystemTypeCount,omitempty"` + + // no documentation yet + ValidFilesystemTypes []Configuration_Storage_Filesystem_Type `json:"validFilesystemTypes,omitempty" xmlrpc:"validFilesystemTypes,omitempty"` + + // The version of this specific piece of software. + Version *string `json:"version,omitempty" xmlrpc:"version,omitempty"` + + // This is set to '1' if this Software Description can be licensed to a Virtual Machine (an IP address). + VirtualLicense *int `json:"virtualLicense,omitempty" xmlrpc:"virtualLicense,omitempty"` + + // This is set to '1' if this Software Description a platform for hosting virtual servers. + VirtualizationPlatform *int `json:"virtualizationPlatform,omitempty" xmlrpc:"virtualizationPlatform,omitempty"` +} + +// The SoftLayer_Software_Description_Attribute data type represents an attributes associated with this software description. +type Software_Description_Attribute struct { + Entity + + // no documentation yet + SoftwareDescription *Software_Description `json:"softwareDescription,omitempty" xmlrpc:"softwareDescription,omitempty"` + + // no documentation yet + Type *Software_Description_Attribute_Type `json:"type,omitempty" xmlrpc:"type,omitempty"` + + // The value that was assigned to this attribute. + Value *string `json:"value,omitempty" xmlrpc:"value,omitempty"` +} + +// The SoftLayer_Software_Description_Attribute_Type data type represents the type of an attribute. +type Software_Description_Attribute_Type struct { + Entity + + // The keyname for this attribute type. + Keyname *string `json:"keyname,omitempty" xmlrpc:"keyname,omitempty"` +} + +// The SoftLayer_Software_Description_Feature data type represents a single software description feature. A feature may show up on more than one software description and can not be created, modified, or removed. +type Software_Description_Feature struct { + Entity + + // The unique identifier for a software description feature. + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // A unique name used to reference this software description feature. + KeyName *string `json:"keyName,omitempty" xmlrpc:"keyName,omitempty"` + + // The name of a software description feature. + Name *string `json:"name,omitempty" xmlrpc:"name,omitempty"` + + // The vendor that a software description feature belongs to. + Vendor *string `json:"vendor,omitempty" xmlrpc:"vendor,omitempty"` +} + +// This class represents a software description's required user +type Software_Description_RequiredUser struct { + Entity + + // If the default password is set the user will be created with that password, otherwise a random password is generated. + DefaultPassword *string `json:"defaultPassword,omitempty" xmlrpc:"defaultPassword,omitempty"` + + // If this software has a required user (such as "root") this string contains it's name. + Username *string `json:"username,omitempty" xmlrpc:"username,omitempty"` +} + +// This class describes a specific type of license, like a Microsoft Windows Site License, a GPL license, or a license of another type. +type Software_License struct { + Entity + + // The account that owns this specific License instance. + Account *Account `json:"account,omitempty" xmlrpc:"account,omitempty"` + + // An ID number for this specific License type. + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // The account that owns this specific License instance. + Owner *Account `json:"owner,omitempty" xmlrpc:"owner,omitempty"` + + // A Description of the software that this license instance is valid for. + SoftwareDescription *Software_Description `json:"softwareDescription,omitempty" xmlrpc:"softwareDescription,omitempty"` + + // The ID number of a Software Description that this specific license is valid for. + SoftwareDescriptionId *int `json:"softwareDescriptionId,omitempty" xmlrpc:"softwareDescriptionId,omitempty"` +} + +// SoftLayer_Software_VirtualLicense is the application class that handles a special type of Software License. Most software licenses are licensed to a specific hardware ID; virtual licenses are designed for virtual machines and therefore are assigned to an IP Address. Not all software packages can be "virtual licensed". +type Software_VirtualLicense struct { + Entity + + // The customer account this Virtual License belongs to. + Account *Account `json:"account,omitempty" xmlrpc:"account,omitempty"` + + // The ID of the SoftLayer Account to which this Virtual License belongs to. + AccountId *int `json:"accountId,omitempty" xmlrpc:"accountId,omitempty"` + + // The billing item for a software virtual license. + BillingItem *Billing_Item `json:"billingItem,omitempty" xmlrpc:"billingItem,omitempty"` + + // The hardware record to which the software virtual license is assigned. + HostHardware *Hardware_Server `json:"hostHardware,omitempty" xmlrpc:"hostHardware,omitempty"` + + // The ID of the SoftLayer Hardware Server record to which this Virtual License belongs. + HostHardwareId *int `json:"hostHardwareId,omitempty" xmlrpc:"hostHardwareId,omitempty"` + + // An ID number for this Virtual License instance. + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // The specific IP address this Virtual License belongs to. + IpAddress *string `json:"ipAddress,omitempty" xmlrpc:"ipAddress,omitempty"` + + // The IP Address record associated with a virtual license. + IpAddressRecord *Network_Subnet_IpAddress `json:"ipAddressRecord,omitempty" xmlrpc:"ipAddressRecord,omitempty"` + + // The License Key for this specific Virtual License. + Key *string `json:"key,omitempty" xmlrpc:"key,omitempty"` + + // A "notes" string attached to this specific Virtual License. + Notes *string `json:"notes,omitempty" xmlrpc:"notes,omitempty"` + + // The SoftLayer_Software_Description that this virtual license is for. + SoftwareDescription *Software_Description `json:"softwareDescription,omitempty" xmlrpc:"softwareDescription,omitempty"` + + // The Software Description ID this Virtual License is for. + SoftwareDescriptionId *int `json:"softwareDescriptionId,omitempty" xmlrpc:"softwareDescriptionId,omitempty"` + + // The subnet this Virtual License's IP address belongs to. + Subnet *Network_Subnet `json:"subnet,omitempty" xmlrpc:"subnet,omitempty"` + + // The ID of the SoftLayer Network Subnet this Virtual License belongs to. + SubnetId *int `json:"subnetId,omitempty" xmlrpc:"subnetId,omitempty"` +} diff --git a/vendor/github.com/softlayer/softlayer-go/datatypes/survey.go b/vendor/github.com/softlayer/softlayer-go/datatypes/survey.go new file mode 100644 index 0000000000..243c10073e --- /dev/null +++ b/vendor/github.com/softlayer/softlayer-go/datatypes/survey.go @@ -0,0 +1,150 @@ +/** + * Copyright 2016 IBM Corp. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** + * AUTOMATICALLY GENERATED CODE - DO NOT MODIFY + */ + +package datatypes + +// The SoftLayer_Survey data type contains general information relating to a single SoftLayer survey. +type Survey struct { + Entity + + // A flag indicating if a survey can be taken. + Active *int `json:"active,omitempty" xmlrpc:"active,omitempty"` + + // The date that a survey had originally started. + CreateDate *Time `json:"createDate,omitempty" xmlrpc:"createDate,omitempty"` + + // A survey's id. + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // A survey's name or title. + Name *string `json:"name,omitempty" xmlrpc:"name,omitempty"` + + // A count of the questions for a survey. + QuestionCount *uint `json:"questionCount,omitempty" xmlrpc:"questionCount,omitempty"` + + // The questions for a survey. + Questions []Survey_Question `json:"questions,omitempty" xmlrpc:"questions,omitempty"` + + // The status of the survey + Status *Survey_Status `json:"status,omitempty" xmlrpc:"status,omitempty"` + + // The status id of the survey. + StatusId *int `json:"statusId,omitempty" xmlrpc:"statusId,omitempty"` + + // The type of survey + Type *Survey_Type `json:"type,omitempty" xmlrpc:"type,omitempty"` + + // The type id of the survey. + TypeId *int `json:"typeId,omitempty" xmlrpc:"typeId,omitempty"` +} + +// The SoftLayer_Survey_Answer data type contains general information relating to a single SoftLayer survey answer. +type Survey_Answer struct { + Entity + + // A survey answer's answer that a user can response too. + Answer *string `json:"answer,omitempty" xmlrpc:"answer,omitempty"` + + // A value indicating the order in when a survey answer will be displayed to a user. + AnswerOrder *int `json:"answerOrder,omitempty" xmlrpc:"answerOrder,omitempty"` + + // A survey answer's Id. + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // The survey question that this answer belongs to. + SurveyQuestion *Survey_Question `json:"surveyQuestion,omitempty" xmlrpc:"surveyQuestion,omitempty"` + + // A survey answer's associated [[SoftLayer_Survey_Question|Survey Question]] Id. + SurveyQuestionId *int `json:"surveyQuestionId,omitempty" xmlrpc:"surveyQuestionId,omitempty"` +} + +// The SoftLayer_Survey_Question data type contains general information relating to a single SoftLayer survey question. +type Survey_Question struct { + Entity + + // A count of the possible answers for a survey question. + AnswerCount *uint `json:"answerCount,omitempty" xmlrpc:"answerCount,omitempty"` + + // The possible answers for a survey question. + Answers []Survey_Answer `json:"answers,omitempty" xmlrpc:"answers,omitempty"` + + // A survey question's Id. + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // A flag indicating that a survey question requires a response. + IsRequired *int `json:"isRequired,omitempty" xmlrpc:"isRequired,omitempty"` + + // A flag indicating that a survey question can have multiple answers responded to. + MultiAnswer *int `json:"multiAnswer,omitempty" xmlrpc:"multiAnswer,omitempty"` + + // A survey question's question. + Question *string `json:"question,omitempty" xmlrpc:"question,omitempty"` + + // A value indicating the order in when a survey question will be asked. + QuestionOrder *int `json:"questionOrder,omitempty" xmlrpc:"questionOrder,omitempty"` + + // The survey that a question belongs to. + Survey *Survey `json:"survey,omitempty" xmlrpc:"survey,omitempty"` + + // A survey question's associated [[SoftLayer_Survey|Survey]] Id. + SurveyId *int `json:"surveyId,omitempty" xmlrpc:"surveyId,omitempty"` +} + +// The SoftLayer_Survey_Response data type contains general information relating to a single SoftLayer survey response. +type Survey_Response struct { + Entity + + // The user typed response for the [[SoftLayer_Survey_Answer|Survey Answer]] that a response is associated with. + OtherAnswer *string `json:"otherAnswer,omitempty" xmlrpc:"otherAnswer,omitempty"` + + // The survey answer that this response was to. + SurveyAnswer *Survey_Answer `json:"surveyAnswer,omitempty" xmlrpc:"surveyAnswer,omitempty"` + + // The Id of the [[SoftLayer_Survey_Answer|Survey Answer]] that a response was made for. + SurveyAnswerId *int `json:"surveyAnswerId,omitempty" xmlrpc:"surveyAnswerId,omitempty"` +} + +// The SoftLayer_Survey_Status data type contains survey status information. +type Survey_Status struct { + Entity + + // Description of a survey status + Description *string `json:"description,omitempty" xmlrpc:"description,omitempty"` + + // Internal identifier of a survey status + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // Name of a survey status + Name *string `json:"name,omitempty" xmlrpc:"name,omitempty"` +} + +// The SoftLayer_Survey_Type data type contains survey type information. +type Survey_Type struct { + Entity + + // Description of a survey type + Description *string `json:"description,omitempty" xmlrpc:"description,omitempty"` + + // Internal identifier of a survey type + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // Name of a survey type + Name *string `json:"name,omitempty" xmlrpc:"name,omitempty"` +} diff --git a/vendor/github.com/softlayer/softlayer-go/datatypes/tag.go b/vendor/github.com/softlayer/softlayer-go/datatypes/tag.go new file mode 100644 index 0000000000..b4cc0a8164 --- /dev/null +++ b/vendor/github.com/softlayer/softlayer-go/datatypes/tag.go @@ -0,0 +1,149 @@ +/** + * Copyright 2016 IBM Corp. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** + * AUTOMATICALLY GENERATED CODE - DO NOT MODIFY + */ + +package datatypes + +// The SoftLayer_Tag data type is an optional type associated with hardware. The account ID that the tag is tied to, and the tag itself are stored in this data type. There is also a flag to denote whether the tag is internal or not. +type Tag struct { + Entity + + // The account to which the tag is tied. + Account *Account `json:"account,omitempty" xmlrpc:"account,omitempty"` + + // Account the tag belongs to. + AccountId *int `json:"accountId,omitempty" xmlrpc:"accountId,omitempty"` + + // Unique identifier for a tag. + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // Indicates whether a tag is internal. + Internal *int `json:"internal,omitempty" xmlrpc:"internal,omitempty"` + + // Name of the tag. The characters permitted are A-Z, 0-9, whitespace, _ (underscore), + Name *string `json:"name,omitempty" xmlrpc:"name,omitempty"` + + // A count of references that tie object to the tag. + ReferenceCount *uint `json:"referenceCount,omitempty" xmlrpc:"referenceCount,omitempty"` + + // References that tie object to the tag. + References []Tag_Reference `json:"references,omitempty" xmlrpc:"references,omitempty"` +} + +// no documentation yet +type Tag_Reference struct { + Entity + + // no documentation yet + Customer *User_Customer `json:"customer,omitempty" xmlrpc:"customer,omitempty"` + + // no documentation yet + EmpRecordId *int `json:"empRecordId,omitempty" xmlrpc:"empRecordId,omitempty"` + + // no documentation yet + Employee *User_Employee `json:"employee,omitempty" xmlrpc:"employee,omitempty"` + + // no documentation yet + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // no documentation yet + ResourceTableId *int `json:"resourceTableId,omitempty" xmlrpc:"resourceTableId,omitempty"` + + // no documentation yet + Tag *Tag `json:"tag,omitempty" xmlrpc:"tag,omitempty"` + + // no documentation yet + TagId *int `json:"tagId,omitempty" xmlrpc:"tagId,omitempty"` + + // no documentation yet + TagType *Tag_Type `json:"tagType,omitempty" xmlrpc:"tagType,omitempty"` + + // no documentation yet + TagTypeId *int `json:"tagTypeId,omitempty" xmlrpc:"tagTypeId,omitempty"` + + // no documentation yet + UsrRecordId *int `json:"usrRecordId,omitempty" xmlrpc:"usrRecordId,omitempty"` +} + +// no documentation yet +type Tag_Reference_Hardware struct { + Tag_Reference + + // no documentation yet + Resource *Hardware `json:"resource,omitempty" xmlrpc:"resource,omitempty"` +} + +// no documentation yet +type Tag_Reference_Network_Application_Delivery_Controller struct { + Tag_Reference + + // no documentation yet + Resource *Network_Application_Delivery_Controller `json:"resource,omitempty" xmlrpc:"resource,omitempty"` +} + +// no documentation yet +type Tag_Reference_Network_Vlan struct { + Tag_Reference + + // no documentation yet + Resource *Network_Vlan `json:"resource,omitempty" xmlrpc:"resource,omitempty"` +} + +// no documentation yet +type Tag_Reference_Network_Vlan_Firewall struct { + Tag_Reference + + // no documentation yet + Resource *Network_Vlan_Firewall `json:"resource,omitempty" xmlrpc:"resource,omitempty"` +} + +// no documentation yet +type Tag_Reference_Resource_Group struct { + Tag_Reference + + // no documentation yet + Resource *Resource_Group `json:"resource,omitempty" xmlrpc:"resource,omitempty"` +} + +// no documentation yet +type Tag_Reference_Virtual_Guest struct { + Tag_Reference + + // no documentation yet + Resource *Virtual_Guest `json:"resource,omitempty" xmlrpc:"resource,omitempty"` +} + +// no documentation yet +type Tag_Reference_Virtual_Guest_Block_Device_Template_Group struct { + Tag_Reference + + // no documentation yet + Resource *Virtual_Guest_Block_Device_Template_Group `json:"resource,omitempty" xmlrpc:"resource,omitempty"` +} + +// no documentation yet +type Tag_Type struct { + Entity + + // no documentation yet + Description *string `json:"description,omitempty" xmlrpc:"description,omitempty"` + + // no documentation yet + KeyName *string `json:"keyName,omitempty" xmlrpc:"keyName,omitempty"` +} diff --git a/vendor/github.com/softlayer/softlayer-go/datatypes/ticket.go b/vendor/github.com/softlayer/softlayer-go/datatypes/ticket.go new file mode 100644 index 0000000000..b28764b069 --- /dev/null +++ b/vendor/github.com/softlayer/softlayer-go/datatypes/ticket.go @@ -0,0 +1,702 @@ +/** + * Copyright 2016 IBM Corp. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** + * AUTOMATICALLY GENERATED CODE - DO NOT MODIFY + */ + +package datatypes + +// The SoftLayer_Ticket data type models a single SoftLayer customer support or notification ticket. Each ticket object contains references to it's updates, the user it's assigned to, the SoftLayer department and employee that it's assigned to, and any hardware objects or attached files associated with the ticket. Tickets are described in further detail on the [[SoftLayer_Ticket]] service page. +// +// To create a support ticket execute the [[SoftLayer_Ticket::createStandardTicket|createStandardTicket]] or [[SoftLayer_Ticket::createAdministrativeTicket|createAdministrativeTicket]] methods in the SoftLayer_Ticket service. To create an upgrade ticket for the SoftLayer sales group execute the [[SoftLayer_Ticket::createUpgradeTicket|createUpgradeTicket]]. +type Ticket struct { + Entity + + // The SoftLayer customer account associated with a ticket. + Account *Account `json:"account,omitempty" xmlrpc:"account,omitempty"` + + // An internal identifier of the SoftLayer customer account that a ticket is associated with. + AccountId *int `json:"accountId,omitempty" xmlrpc:"accountId,omitempty"` + + // A count of + AssignedAgentCount *uint `json:"assignedAgentCount,omitempty" xmlrpc:"assignedAgentCount,omitempty"` + + // no documentation yet + AssignedAgents []User_Customer `json:"assignedAgents,omitempty" xmlrpc:"assignedAgents,omitempty"` + + // The portal user that a ticket is assigned to. + AssignedUser *User_Customer `json:"assignedUser,omitempty" xmlrpc:"assignedUser,omitempty"` + + // An internal identifier of the portal user that a ticket is assigned to. + AssignedUserId *int `json:"assignedUserId,omitempty" xmlrpc:"assignedUserId,omitempty"` + + // A count of the list of additional emails to notify when a ticket update is made. + AttachedAdditionalEmailCount *uint `json:"attachedAdditionalEmailCount,omitempty" xmlrpc:"attachedAdditionalEmailCount,omitempty"` + + // The list of additional emails to notify when a ticket update is made. + AttachedAdditionalEmails []User_Customer_AdditionalEmail `json:"attachedAdditionalEmails,omitempty" xmlrpc:"attachedAdditionalEmails,omitempty"` + + // A count of the Dedicated Hosts associated with a ticket. This is used in cases where a ticket is directly associated with one or more Dedicated Hosts. + AttachedDedicatedHostCount *uint `json:"attachedDedicatedHostCount,omitempty" xmlrpc:"attachedDedicatedHostCount,omitempty"` + + // The Dedicated Hosts associated with a ticket. This is used in cases where a ticket is directly associated with one or more Dedicated Hosts. + AttachedDedicatedHosts []Virtual_DedicatedHost `json:"attachedDedicatedHosts,omitempty" xmlrpc:"attachedDedicatedHosts,omitempty"` + + // A count of the files attached to a ticket. + AttachedFileCount *uint `json:"attachedFileCount,omitempty" xmlrpc:"attachedFileCount,omitempty"` + + // The files attached to a ticket. + AttachedFiles []Ticket_Attachment_File `json:"attachedFiles,omitempty" xmlrpc:"attachedFiles,omitempty"` + + // The hardware associated with a ticket. This is used in cases where a ticket is directly associated with one or more pieces of hardware. + AttachedHardware []Hardware `json:"attachedHardware,omitempty" xmlrpc:"attachedHardware,omitempty"` + + // no documentation yet + AttachedHardwareCount *uint `json:"attachedHardwareCount,omitempty" xmlrpc:"attachedHardwareCount,omitempty"` + + // A count of + AttachedResourceCount *uint `json:"attachedResourceCount,omitempty" xmlrpc:"attachedResourceCount,omitempty"` + + // no documentation yet + AttachedResources []Ticket_Attachment `json:"attachedResources,omitempty" xmlrpc:"attachedResources,omitempty"` + + // A count of the virtual guests associated with a ticket. This is used in cases where a ticket is directly associated with one or more virtualized guests installations or Virtual Servers. + AttachedVirtualGuestCount *uint `json:"attachedVirtualGuestCount,omitempty" xmlrpc:"attachedVirtualGuestCount,omitempty"` + + // The virtual guests associated with a ticket. This is used in cases where a ticket is directly associated with one or more virtualized guests installations or Virtual Servers. + AttachedVirtualGuests []Virtual_Guest `json:"attachedVirtualGuests,omitempty" xmlrpc:"attachedVirtualGuests,omitempty"` + + // Ticket is waiting on a response from a customer flag. + AwaitingUserResponseFlag *bool `json:"awaitingUserResponseFlag,omitempty" xmlrpc:"awaitingUserResponseFlag,omitempty"` + + // Whether a ticket has a one-time charge associated with it. Standard tickets are free while administrative tickets typically cost $3 USD. + BillableFlag *bool `json:"billableFlag,omitempty" xmlrpc:"billableFlag,omitempty"` + + // A service cancellation request. + CancellationRequest *Billing_Item_Cancellation_Request `json:"cancellationRequest,omitempty" xmlrpc:"cancellationRequest,omitempty"` + + // no documentation yet + ChangeOwnerFlag *bool `json:"changeOwnerFlag,omitempty" xmlrpc:"changeOwnerFlag,omitempty"` + + // The date that a ticket was created. + CreateDate *Time `json:"createDate,omitempty" xmlrpc:"createDate,omitempty"` + + // A count of + EmployeeAttachmentCount *uint `json:"employeeAttachmentCount,omitempty" xmlrpc:"employeeAttachmentCount,omitempty"` + + // no documentation yet + EmployeeAttachments []User_Employee `json:"employeeAttachments,omitempty" xmlrpc:"employeeAttachments,omitempty"` + + // Feedback left by a portal or API user on their experiences in a ticket. Final comments may be created after a ticket is closed. + FinalComments *string `json:"finalComments,omitempty" xmlrpc:"finalComments,omitempty"` + + // The first physical or virtual server attached to a ticket. + FirstAttachedResource *Ticket_Attachment `json:"firstAttachedResource,omitempty" xmlrpc:"firstAttachedResource,omitempty"` + + // The first update made to a ticket. This is typically the contents of a ticket when it's created. + FirstUpdate *Ticket_Update `json:"firstUpdate,omitempty" xmlrpc:"firstUpdate,omitempty"` + + // The SoftLayer department that a ticket is assigned to. + Group *Ticket_Group `json:"group,omitempty" xmlrpc:"group,omitempty"` + + // The internal identifier of the SoftLayer department that a ticket is assigned to. + GroupId *int `json:"groupId,omitempty" xmlrpc:"groupId,omitempty"` + + // A ticket's internal identifier. Each ticket is defined by a unique identifier. + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // A count of the invoice items associated with a ticket. Ticket based invoice items only exist when a ticket incurs a fee that has been invoiced. + InvoiceItemCount *uint `json:"invoiceItemCount,omitempty" xmlrpc:"invoiceItemCount,omitempty"` + + // The invoice items associated with a ticket. Ticket based invoice items only exist when a ticket incurs a fee that has been invoiced. + InvoiceItems []Billing_Invoice_Item `json:"invoiceItems,omitempty" xmlrpc:"invoiceItems,omitempty"` + + // no documentation yet + LastActivity *Ticket_Activity `json:"lastActivity,omitempty" xmlrpc:"lastActivity,omitempty"` + + // The date that a ticket was last modified. A modification does not necessarily mean that an update was added. + LastEditDate *Time `json:"lastEditDate,omitempty" xmlrpc:"lastEditDate,omitempty"` + + // The type of user who last edited or updated a ticket. This is either "EMPLOYEE" or "USER". + LastEditType *string `json:"lastEditType,omitempty" xmlrpc:"lastEditType,omitempty"` + + // no documentation yet + LastEditor *User_Interface `json:"lastEditor,omitempty" xmlrpc:"lastEditor,omitempty"` + + // The date that the last ticket update was made + LastResponseDate *Time `json:"lastResponseDate,omitempty" xmlrpc:"lastResponseDate,omitempty"` + + // The last update made to a ticket. + LastUpdate *Ticket_Update `json:"lastUpdate,omitempty" xmlrpc:"lastUpdate,omitempty"` + + // A timestamp of the last time the Ticket was viewed by the active user. + LastViewedDate *Time `json:"lastViewedDate,omitempty" xmlrpc:"lastViewedDate,omitempty"` + + // A ticket's associated location within the SoftLayer location hierarchy. + Location *Location `json:"location,omitempty" xmlrpc:"location,omitempty"` + + // The internal identifier of the location associated with a ticket. + LocationId *int `json:"locationId,omitempty" xmlrpc:"locationId,omitempty"` + + // The date that a ticket was last updated. + ModifyDate *Time `json:"modifyDate,omitempty" xmlrpc:"modifyDate,omitempty"` + + // True if there are new, unread updates to this ticket for the current user, False otherwise. + NewUpdatesFlag *bool `json:"newUpdatesFlag,omitempty" xmlrpc:"newUpdatesFlag,omitempty"` + + // Whether or not the user who owns a ticket is notified via email when a ticket is updated. + NotifyUserOnUpdateFlag *bool `json:"notifyUserOnUpdateFlag,omitempty" xmlrpc:"notifyUserOnUpdateFlag,omitempty"` + + // The IP address of the user who opened a ticket. + OriginatingIpAddress *string `json:"originatingIpAddress,omitempty" xmlrpc:"originatingIpAddress,omitempty"` + + // no documentation yet + Priority *int `json:"priority,omitempty" xmlrpc:"priority,omitempty"` + + // no documentation yet + ResponsibleBrandId *int `json:"responsibleBrandId,omitempty" xmlrpc:"responsibleBrandId,omitempty"` + + // A count of + ScheduledActionCount *uint `json:"scheduledActionCount,omitempty" xmlrpc:"scheduledActionCount,omitempty"` + + // no documentation yet + ScheduledActions []Provisioning_Version1_Transaction `json:"scheduledActions,omitempty" xmlrpc:"scheduledActions,omitempty"` + + // The amount of money in US Dollars ($USD) that a ticket has charged to an account. A ticket's administrative billing amount is a one time charge and only applies to administrative support tickets. + ServerAdministrationBillingAmount *int `json:"serverAdministrationBillingAmount,omitempty" xmlrpc:"serverAdministrationBillingAmount,omitempty"` + + // The invoice associated with a ticket. Only tickets with an associated administrative charge have an invoice. + ServerAdministrationBillingInvoice *Billing_Invoice `json:"serverAdministrationBillingInvoice,omitempty" xmlrpc:"serverAdministrationBillingInvoice,omitempty"` + + // The internal identifier of the invoice associated with a ticket's administrative charge. Only tickets with an administrative charge have an associated invoice. + ServerAdministrationBillingInvoiceId *int `json:"serverAdministrationBillingInvoiceId,omitempty" xmlrpc:"serverAdministrationBillingInvoiceId,omitempty"` + + // Whether a ticket is a standard or an administrative support ticket. Administrative support tickets typically incur a $3 USD charge. + ServerAdministrationFlag *int `json:"serverAdministrationFlag,omitempty" xmlrpc:"serverAdministrationFlag,omitempty"` + + // The refund invoice associated with a ticket. Only tickets with a refund applied in them have an associated refund invoice. + ServerAdministrationRefundInvoice *Billing_Invoice `json:"serverAdministrationRefundInvoice,omitempty" xmlrpc:"serverAdministrationRefundInvoice,omitempty"` + + // The internal identifier of the refund invoice associated with a ticket. Only tickets with an account refund associated with them have an associated refund invoice. + ServerAdministrationRefundInvoiceId *int `json:"serverAdministrationRefundInvoiceId,omitempty" xmlrpc:"serverAdministrationRefundInvoiceId,omitempty"` + + // no documentation yet + ServiceProvider *Service_Provider `json:"serviceProvider,omitempty" xmlrpc:"serviceProvider,omitempty"` + + // no documentation yet + ServiceProviderId *int `json:"serviceProviderId,omitempty" xmlrpc:"serviceProviderId,omitempty"` + + // A ticket's internal identifier at its service provider. Each ticket is defined by a unique identifier. + ServiceProviderResourceId *int `json:"serviceProviderResourceId,omitempty" xmlrpc:"serviceProviderResourceId,omitempty"` + + // no documentation yet + State []Ticket_State `json:"state,omitempty" xmlrpc:"state,omitempty"` + + // A count of + StateCount *uint `json:"stateCount,omitempty" xmlrpc:"stateCount,omitempty"` + + // A ticket's status. + Status *Ticket_Status `json:"status,omitempty" xmlrpc:"status,omitempty"` + + // A ticket status' internal identifier. + StatusId *int `json:"statusId,omitempty" xmlrpc:"statusId,omitempty"` + + // A ticket's subject. Only standard support tickets have an associated subject. A standard support ticket's title corresponds with it's subject's name. + Subject *Ticket_Subject `json:"subject,omitempty" xmlrpc:"subject,omitempty"` + + // An internal identifier of the pre-set subject that a ticket is associated with. Standard support tickets have a subject set while administrative tickets have a null subject. A standard support ticket's title is the name of it's associated subject. + SubjectId *int `json:"subjectId,omitempty" xmlrpc:"subjectId,omitempty"` + + // A count of + TagReferenceCount *uint `json:"tagReferenceCount,omitempty" xmlrpc:"tagReferenceCount,omitempty"` + + // no documentation yet + TagReferences []Tag_Reference `json:"tagReferences,omitempty" xmlrpc:"tagReferences,omitempty"` + + // A ticket's title. This is typically a brief summary of the issue described in the ticket. + Title *string `json:"title,omitempty" xmlrpc:"title,omitempty"` + + // no documentation yet + TotalUpdateCount *int `json:"totalUpdateCount,omitempty" xmlrpc:"totalUpdateCount,omitempty"` + + // A count of a ticket's updates. + UpdateCount *uint `json:"updateCount,omitempty" xmlrpc:"updateCount,omitempty"` + + // A ticket's updates. + Updates []Ticket_Update `json:"updates,omitempty" xmlrpc:"updates,omitempty"` + + // Whether a user is able to update a ticket. + UserEditableFlag *bool `json:"userEditableFlag,omitempty" xmlrpc:"userEditableFlag,omitempty"` +} + +// no documentation yet +type Ticket_Activity struct { + Entity + + // no documentation yet + CreateDate *Time `json:"createDate,omitempty" xmlrpc:"createDate,omitempty"` + + // no documentation yet + CreateTimestamp *Time `json:"createTimestamp,omitempty" xmlrpc:"createTimestamp,omitempty"` + + // no documentation yet + Editor *User_Interface `json:"editor,omitempty" xmlrpc:"editor,omitempty"` + + // no documentation yet + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // no documentation yet + Ticket *Ticket `json:"ticket,omitempty" xmlrpc:"ticket,omitempty"` + + // no documentation yet + TicketUpdate *Ticket_Update `json:"ticketUpdate,omitempty" xmlrpc:"ticketUpdate,omitempty"` + + // no documentation yet + Value *string `json:"value,omitempty" xmlrpc:"value,omitempty"` +} + +// SoftLayer tickets have the ability to be associated with specific pieces of hardware in a customer's inventory. Attaching hardware to a ticket can greatly increase response time from SoftLayer for issues that are related to one or more specific servers on a customer's account. The SoftLayer_Ticket_Attachment_Hardware data type models the relationship between a piece of hardware and a ticket. Only one attachment record may exist per hardware item per ticket. +type Ticket_Attachment struct { + Entity + + // no documentation yet + AssignedAgent *User_Customer `json:"assignedAgent,omitempty" xmlrpc:"assignedAgent,omitempty"` + + // The internal identifier of an item that is attached to a ticket. + AttachmentId *int `json:"attachmentId,omitempty" xmlrpc:"attachmentId,omitempty"` + + // The date that an item was attached to a ticket. + CreateDate *Time `json:"createDate,omitempty" xmlrpc:"createDate,omitempty"` + + // A ticket attachment's internal identifier. + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // no documentation yet + ScheduledAction *Provisioning_Version1_Transaction `json:"scheduledAction,omitempty" xmlrpc:"scheduledAction,omitempty"` + + // The ticket that an item is attached to. + Ticket *Ticket `json:"ticket,omitempty" xmlrpc:"ticket,omitempty"` + + // The internal identifier of the ticket that an item is attached to. + TicketId *int `json:"ticketId,omitempty" xmlrpc:"ticketId,omitempty"` +} + +// no documentation yet +type Ticket_Attachment_Assigned_Agent struct { + Ticket_Attachment + + // The internal identifier of an assigned Agent that is attached to a ticket. + AssignedAgentId *int `json:"assignedAgentId,omitempty" xmlrpc:"assignedAgentId,omitempty"` + + // no documentation yet + Resource *User_Customer `json:"resource,omitempty" xmlrpc:"resource,omitempty"` +} + +// This datatype contains tickets referenced from card change request +type Ticket_Attachment_CardChangeRequest struct { + Ticket_Attachment + + // The card change request that is attached to a ticket. + Resource *Billing_Payment_Card_ChangeRequest `json:"resource,omitempty" xmlrpc:"resource,omitempty"` +} + +// SoftLayer tickets have the ability to be associated with specific pieces of dedicated hosts in a customer's inventory. Attaching a dedicated host to a ticket can greatly increase response time from SoftLayer for issues that are related to one or more specific servers on a customer's account. The SoftLayer_Ticket_Attachment_Dedicated_Host data type models the relationship between a dedicated host and a ticket. Only one attachment record may exist per dedicated host item per ticket. +type Ticket_Attachment_Dedicated_Host struct { + Ticket_Attachment + + // The Dedicated Host that is attached to a ticket. + DedicatedHost *Virtual_DedicatedHost `json:"dedicatedHost,omitempty" xmlrpc:"dedicatedHost,omitempty"` + + // The internal identifier of the Dedicated Host that is attached to a ticket. + DedicatedHostId *int `json:"dedicatedHostId,omitempty" xmlrpc:"dedicatedHostId,omitempty"` + + // The Dedicated Host that is attached to a ticket. + Resource *Virtual_DedicatedHost `json:"resource,omitempty" xmlrpc:"resource,omitempty"` +} + +// SoftLayer tickets can have have files attached to them. Attaching a file to a ticket is a good way to report issues, provide documentation, and give examples of an issue. Both SoftLayer customers and employees have the ability to attach files to a ticket. The SoftLayer_Ticket_Attachment_File data type models a single file attached to a ticket. +type Ticket_Attachment_File struct { + Entity + + // The date a file was originally attached to a ticket. + CreateDate *Time `json:"createDate,omitempty" xmlrpc:"createDate,omitempty"` + + // The name of a file attached to a ticket. + FileName *string `json:"fileName,omitempty" xmlrpc:"fileName,omitempty"` + + // The size of a file attached to a ticket, measured in bytes. + FileSize *string `json:"fileSize,omitempty" xmlrpc:"fileSize,omitempty"` + + // A ticket file attachment's internal identifier. + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // The date that a file attachment record was last modified. + ModifyDate *Time `json:"modifyDate,omitempty" xmlrpc:"modifyDate,omitempty"` + + // no documentation yet + Ticket *Ticket `json:"ticket,omitempty" xmlrpc:"ticket,omitempty"` + + // The internal identifier of the ticket that a file is attached to. + TicketId *int `json:"ticketId,omitempty" xmlrpc:"ticketId,omitempty"` + + // The ticket that a file is attached to. + Update *Ticket_Update `json:"update,omitempty" xmlrpc:"update,omitempty"` + + // The internal identifier of the ticket update the attached file is associated with. + UpdateId *int `json:"updateId,omitempty" xmlrpc:"updateId,omitempty"` + + // The internal identifier of the user that uploaded a ticket file attachment. This is only used when A file attachment's ''uploaderType'' is set to "USER". + UploaderId *string `json:"uploaderId,omitempty" xmlrpc:"uploaderId,omitempty"` + + // The type of user that attached a file to a ticket. This is either "USER" if the file was uploaded by a portal or API user or "EMPLOYEE" if the file was uploaded by a SoftLayer employee. + UploaderType *string `json:"uploaderType,omitempty" xmlrpc:"uploaderType,omitempty"` +} + +// SoftLayer tickets have the ability to be associated with specific pieces of hardware in a customer's inventory. Attaching hardware to a ticket can greatly increase response time from SoftLayer for issues that are related to one or more specific servers on a customer's account. The SoftLayer_Ticket_Attachment_Hardware data type models the relationship between a piece of hardware and a ticket. Only one attachment record may exist per hardware item per ticket. +type Ticket_Attachment_Hardware struct { + Ticket_Attachment + + // The hardware that is attached to a ticket. + Hardware *Hardware `json:"hardware,omitempty" xmlrpc:"hardware,omitempty"` + + // The internal identifier of a piece of hardware that is attached to a ticket. + HardwareId *int `json:"hardwareId,omitempty" xmlrpc:"hardwareId,omitempty"` + + // The hardware that is attached to a ticket. + Resource *Hardware `json:"resource,omitempty" xmlrpc:"resource,omitempty"` +} + +// This datatype contains tickets referenced from manual payments +type Ticket_Attachment_ManualPayment struct { + Ticket_Attachment + + // The manual payment that is attached to a ticket. + Resource *Billing_Payment_Card_ManualPayment `json:"resource,omitempty" xmlrpc:"resource,omitempty"` +} + +// no documentation yet +type Ticket_Attachment_Scheduled_Action struct { + Ticket_Attachment + + // no documentation yet + Resource *Provisioning_Version1_Transaction `json:"resource,omitempty" xmlrpc:"resource,omitempty"` + + // The internal identifier of a scheduled action transaction that is attached to a ticket. + RunDate *Time `json:"runDate,omitempty" xmlrpc:"runDate,omitempty"` + + // no documentation yet + Transaction *Provisioning_Version1_Transaction `json:"transaction,omitempty" xmlrpc:"transaction,omitempty"` + + // The internal identifier of a scheduled action transaction that is attached to a ticket. + TransactionId *int `json:"transactionId,omitempty" xmlrpc:"transactionId,omitempty"` +} + +// SoftLayer tickets have the ability to be associated with specific virtual guests in a customer's inventory. Attaching virtual guests to a ticket can greatly increase response time from SoftLayer for issues that are related to one or more specific servers on a customer's account. The SoftLayer_Ticket_Attachment_Virtual_Guest data type models the relationship between a virtual guest and a ticket. Only one attachment record may exist per virtual guest per ticket. +type Ticket_Attachment_Virtual_Guest struct { + Ticket_Attachment + + // The virtualized guest or CloudLayer Computing Instance that is attached to a ticket. + Resource *Virtual_Guest `json:"resource,omitempty" xmlrpc:"resource,omitempty"` + + // The virtualized guest or CloudLayer Computing Instance that is attached to a ticket. + VirtualGuest *Virtual_Guest `json:"virtualGuest,omitempty" xmlrpc:"virtualGuest,omitempty"` + + // The internal identifier of the virtualized guest or CloudLayer Computing Instance that is attached to a ticket. + VirtualGuestId *int `json:"virtualGuestId,omitempty" xmlrpc:"virtualGuestId,omitempty"` +} + +// no documentation yet +type Ticket_Chat struct { + Entity + + // no documentation yet + Agent *User_Employee `json:"agent,omitempty" xmlrpc:"agent,omitempty"` + + // no documentation yet + Customer *User_Customer `json:"customer,omitempty" xmlrpc:"customer,omitempty"` + + // no documentation yet + CustomerId *int `json:"customerId,omitempty" xmlrpc:"customerId,omitempty"` + + // no documentation yet + EndDate *Time `json:"endDate,omitempty" xmlrpc:"endDate,omitempty"` + + // no documentation yet + StartDate *Time `json:"startDate,omitempty" xmlrpc:"startDate,omitempty"` + + // no documentation yet + TicketUpdate *Ticket_Update_Chat `json:"ticketUpdate,omitempty" xmlrpc:"ticketUpdate,omitempty"` + + // no documentation yet + Transcript *string `json:"transcript,omitempty" xmlrpc:"transcript,omitempty"` +} + +// no documentation yet +type Ticket_Chat_Liveperson struct { + Ticket_Chat +} + +// no documentation yet +type Ticket_Chat_TranscriptLine struct { + Entity + + // no documentation yet + Speaker *User_Interface `json:"speaker,omitempty" xmlrpc:"speaker,omitempty"` +} + +// no documentation yet +type Ticket_Chat_TranscriptLine_Customer struct { + Ticket_Chat_TranscriptLine +} + +// no documentation yet +type Ticket_Chat_TranscriptLine_Employee struct { + Ticket_Chat_TranscriptLine +} + +// SoftLayer tickets have the ability to be assigned to one of SoftLayer's internal departments. The department that a ticket is assigned to is modeled by the SoftLayer_Ticket_Group data type. Ticket groups help to ensure that the proper department is handling a ticket. Standard support tickets are created from a number of pre-determined subjects. These subjects help determine which group a standard ticket is assigned to. +type Ticket_Group struct { + Entity + + // The category that a ticket group belongs to. + Category *Ticket_Group_Category `json:"category,omitempty" xmlrpc:"category,omitempty"` + + // A ticket group's internal identifier. + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // A ticket group's name. + Name *string `json:"name,omitempty" xmlrpc:"name,omitempty"` + + // The internal identifier for the category that a ticket group belongs to.. + TicketGroupCategoryId *int `json:"ticketGroupCategoryId,omitempty" xmlrpc:"ticketGroupCategoryId,omitempty"` +} + +// SoftLayer's support ticket groups represent the department at SoftLayer that is assigned to work one of your support tickets. Many departments are responsible for handling different types of tickets. These types of tickets are modeled in the SoftLayer_Ticket_Group_Category data type. Ticket group categories also help separate differentiate your tickets' issues in the SoftLayer customer portal. +type Ticket_Group_Category struct { + Entity + + // A ticket group category's unique identifier. + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // A ticket group category's name. + Name *string `json:"name,omitempty" xmlrpc:"name,omitempty"` +} + +// no documentation yet +type Ticket_Priority struct { + Entity +} + +// no documentation yet +type Ticket_State struct { + Entity + + // no documentation yet + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // no documentation yet + StateType *Ticket_State_Type `json:"stateType,omitempty" xmlrpc:"stateType,omitempty"` + + // no documentation yet + StateTypeId *int `json:"stateTypeId,omitempty" xmlrpc:"stateTypeId,omitempty"` + + // no documentation yet + Ticket *Ticket `json:"ticket,omitempty" xmlrpc:"ticket,omitempty"` + + // no documentation yet + TicketId *int `json:"ticketId,omitempty" xmlrpc:"ticketId,omitempty"` +} + +// no documentation yet +type Ticket_State_Type struct { + Entity + + // no documentation yet + Description *string `json:"description,omitempty" xmlrpc:"description,omitempty"` + + // no documentation yet + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // no documentation yet + KeyName *string `json:"keyName,omitempty" xmlrpc:"keyName,omitempty"` + + // no documentation yet + Name *string `json:"name,omitempty" xmlrpc:"name,omitempty"` +} + +// The SoftLayer_Ticket_Status data type models the state of a ticket as it is worked by SoftLayer and its customers. Tickets exist in one of three states: +// *'''OPEN''': Open tickets are considered unresolved issues by SoftLayer and can be assigned to a SoftLayer employee for work. Tickets created by portal or API users are created in the Open state. +// *'''ASSIGNED''': Assigned tickets are identical to open tickets, but are assigned to an individual SoftLayer employee. An assigned ticket is actively being worked by SoftLayer. +// *'''CLOSED''': Tickets are closed when the issue at hand is considered resolved. A SoftLayer employee can change a ticket's status from Closed to Open or Assigned if the need arises. +// +// +// A ticket usually goes from the Open to Assigned to Closed states during its life cycle. If a ticket is forwarded from one department to another it may change from the Assigned state back to Open until it is assigned to a member of the new department. +type Ticket_Status struct { + Entity + + // A ticket status' internal identifier. + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // A ticket status' name. + Name *string `json:"name,omitempty" xmlrpc:"name,omitempty"` +} + +// The SoftLayer_Ticket_Subject data type models one of the possible subjects that a standard support ticket may belong to. A basic support ticket's title matches it's corresponding subject's name. +type Ticket_Subject struct { + Entity + + // no documentation yet + Category *Ticket_Subject_Category `json:"category,omitempty" xmlrpc:"category,omitempty"` + + // The subject category id that this ticket subject belongs to. + CategoryId *int `json:"categoryId,omitempty" xmlrpc:"categoryId,omitempty"` + + // A child subject + Children []Ticket_Subject `json:"children,omitempty" xmlrpc:"children,omitempty"` + + // A count of a child subject + ChildrenCount *uint `json:"childrenCount,omitempty" xmlrpc:"childrenCount,omitempty"` + + // no documentation yet + Group *Ticket_Group `json:"group,omitempty" xmlrpc:"group,omitempty"` + + // A ticket subject's internal identifier. + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // A ticket subject's name. This name is used for a standard support ticket's title. + Name *string `json:"name,omitempty" xmlrpc:"name,omitempty"` + + // A parent subject + Parent *Ticket_Subject `json:"parent,omitempty" xmlrpc:"parent,omitempty"` + + // Specifies the parent subject id. + ParentId *int `json:"parentId,omitempty" xmlrpc:"parentId,omitempty"` +} + +// SoftLayer_Ticket_Subject_Category groups ticket subjects into logical group. +type Ticket_Subject_Category struct { + Entity + + // A unique identifier of a ticket subject category. + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // A ticket subject category name. + Name *string `json:"name,omitempty" xmlrpc:"name,omitempty"` + + // A count of + SubjectCount *uint `json:"subjectCount,omitempty" xmlrpc:"subjectCount,omitempty"` + + // no documentation yet + Subjects []Ticket_Subject `json:"subjects,omitempty" xmlrpc:"subjects,omitempty"` +} + +// no documentation yet +type Ticket_Survey struct { + Entity +} + +// no documentation yet +type Ticket_Type struct { + Entity + + // no documentation yet + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // no documentation yet + KeyName *string `json:"keyName,omitempty" xmlrpc:"keyName,omitempty"` +} + +// The SoftLayer_Ticket_Update type relates to a single update to a ticket, either by a customer or an employee. +type Ticket_Update struct { + Entity + + // no documentation yet + ChangeOwnerActivity *string `json:"changeOwnerActivity,omitempty" xmlrpc:"changeOwnerActivity,omitempty"` + + // The data a ticket update was created. + CreateDate *Time `json:"createDate,omitempty" xmlrpc:"createDate,omitempty"` + + // The user or SoftLayer employee who created a ticket update. + Editor *User_Interface `json:"editor,omitempty" xmlrpc:"editor,omitempty"` + + // The internal identifier of the SoftLayer portal or API user who created a ticket update. This is only used if a ticket update's ''editorType'' property is "USER". + EditorId *int `json:"editorId,omitempty" xmlrpc:"editorId,omitempty"` + + // The type user who created a ticket update. This is either "USER" for an update created by a SoftLayer portal or API user, "EMPLOYEE" for an update created by a SoftLayer employee, or "AUTO" if a ticket update was generated automatically by SoftLayer's backend systems. + EditorType *string `json:"editorType,omitempty" xmlrpc:"editorType,omitempty"` + + // The contents of a ticket update. + Entry *string `json:"entry,omitempty" xmlrpc:"entry,omitempty"` + + // The files attached to a ticket update. + FileAttachment []Ticket_Attachment_File `json:"fileAttachment,omitempty" xmlrpc:"fileAttachment,omitempty"` + + // A count of the files attached to a ticket update. + FileAttachmentCount *uint `json:"fileAttachmentCount,omitempty" xmlrpc:"fileAttachmentCount,omitempty"` + + // A ticket update's internal identifier. + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // The ticket that a ticket update belongs to. + Ticket *Ticket `json:"ticket,omitempty" xmlrpc:"ticket,omitempty"` + + // The internal identifier of the ticket that a ticket update belongs to. + TicketId *int `json:"ticketId,omitempty" xmlrpc:"ticketId,omitempty"` + + // The Type of update to this ticket + Type *Ticket_Update_Type `json:"type,omitempty" xmlrpc:"type,omitempty"` +} + +// A SoftLayer_Ticket_Update_Agent type models an update to a ticket made by an agent. +type Ticket_Update_Agent struct { + Ticket_Update +} + +// A SoftLayer_Ticket_Update_Chat is a chat between a customer and a customer service representative relating to a ticket. +type Ticket_Update_Chat struct { + Ticket_Update + + // The chat between the Customer and Agent + Chat *Ticket_Chat_Liveperson `json:"chat,omitempty" xmlrpc:"chat,omitempty"` +} + +// A SoftLayer_Ticket_Update_Customer is a single update made by a customer to a ticket. +type Ticket_Update_Customer struct { + Ticket_Update +} + +// The SoftLayer_Ticket_Update_Employee data type models an update to a ticket made by a SoftLayer employee. +type Ticket_Update_Employee struct { + Ticket_Update + + // A ticket update's response rating. Ticket updates posted by SoftLayer employees have the option of earning a rating from SoftLayer's customers. Ratings are based on a 1 - 5 scale, with one being a poor rating while 5 is a very high rating. This is only used if a ticket update's ''editorType'' property is "EMPLOYEE". + ResponseRating *int `json:"responseRating,omitempty" xmlrpc:"responseRating,omitempty"` +} + +// no documentation yet +type Ticket_Update_Type struct { + Entity + + // no documentation yet + Description *string `json:"description,omitempty" xmlrpc:"description,omitempty"` + + // no documentation yet + KeyName *string `json:"keyName,omitempty" xmlrpc:"keyName,omitempty"` + + // no documentation yet + Ticket *Ticket_Update `json:"ticket,omitempty" xmlrpc:"ticket,omitempty"` +} diff --git a/vendor/github.com/softlayer/softlayer-go/datatypes/user.go b/vendor/github.com/softlayer/softlayer-go/datatypes/user.go new file mode 100644 index 0000000000..dde8b35817 --- /dev/null +++ b/vendor/github.com/softlayer/softlayer-go/datatypes/user.go @@ -0,0 +1,1412 @@ +/** + * Copyright 2016 IBM Corp. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** + * AUTOMATICALLY GENERATED CODE - DO NOT MODIFY + */ + +package datatypes + +// This class represents a login/logout sheet for facility visitors. +type User_Access_Facility_Log struct { + Entity + + // This is the account associated with the log entry. For users under a customer's account, it is the customer's account. For contractors and others visiting a colocation area, it is the account associated with the area they visited. + Account *Account `json:"account,omitempty" xmlrpc:"account,omitempty"` + + // This is the account associated with a log record. For a customer logging into a datacenter, this is the customer's account. For a contractor or any other guest logging into a customer's cabinet or colocation cage, this is the customer's account. + AccountId *int `json:"accountId,omitempty" xmlrpc:"accountId,omitempty"` + + // This is the location of the facility. + Datacenter *Location `json:"datacenter,omitempty" xmlrpc:"datacenter,omitempty"` + + // This is a short description of why the person is at the location. + Description *string `json:"description,omitempty" xmlrpc:"description,omitempty"` + + // This is the colocation hardware that was visited. + Hardware *Hardware `json:"hardware,omitempty" xmlrpc:"hardware,omitempty"` + + // no documentation yet + HardwareId *int `json:"hardwareId,omitempty" xmlrpc:"hardwareId,omitempty"` + + // no documentation yet + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // no documentation yet + LocationId *int `json:"locationId,omitempty" xmlrpc:"locationId,omitempty"` + + // This is the type of person entering the facility. + LogType *User_Access_Facility_Log_Type `json:"logType,omitempty" xmlrpc:"logType,omitempty"` + + // This is the date and time the person arrived. + TimeIn *Time `json:"timeIn,omitempty" xmlrpc:"timeIn,omitempty"` + + // no documentation yet + TimeOut *Time `json:"timeOut,omitempty" xmlrpc:"timeOut,omitempty"` + + // no documentation yet + Visitor *Entity `json:"visitor,omitempty" xmlrpc:"visitor,omitempty"` +} + +// no documentation yet +type User_Access_Facility_Log_Type struct { + Entity + + // no documentation yet + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // no documentation yet + KeyName *string `json:"keyName,omitempty" xmlrpc:"keyName,omitempty"` + + // no documentation yet + Name *string `json:"name,omitempty" xmlrpc:"name,omitempty"` +} + +// This class represents a facility visitor that is not an active employee or customer. +type User_Access_Facility_Visitor struct { + Entity + + // no documentation yet + CompanyName *string `json:"companyName,omitempty" xmlrpc:"companyName,omitempty"` + + // no documentation yet + FirstName *string `json:"firstName,omitempty" xmlrpc:"firstName,omitempty"` + + // no documentation yet + LastName *string `json:"lastName,omitempty" xmlrpc:"lastName,omitempty"` + + // no documentation yet + TypeId *int `json:"typeId,omitempty" xmlrpc:"typeId,omitempty"` + + // no documentation yet + VisitorType *User_Access_Facility_Visitor_Type `json:"visitorType,omitempty" xmlrpc:"visitorType,omitempty"` +} + +// no documentation yet +type User_Access_Facility_Visitor_Type struct { + Entity + + // no documentation yet + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // no documentation yet + KeyName *string `json:"keyName,omitempty" xmlrpc:"keyName,omitempty"` + + // no documentation yet + Name *string `json:"name,omitempty" xmlrpc:"name,omitempty"` +} + +// The SoftLayer_User_Customer data type contains general information relating to a single SoftLayer customer portal user. Personal information in this type such as names, addresses, and phone numbers are not necessarily associated with the customer account the user is assigned to. +type User_Customer struct { + User_Interface + + // The customer account that a user belongs to. + Account *Account `json:"account,omitempty" xmlrpc:"account,omitempty"` + + // A portal user's associated [[SoftLayer_Account|customer account]] id. + AccountId *int `json:"accountId,omitempty" xmlrpc:"accountId,omitempty"` + + // A count of + ActionCount *uint `json:"actionCount,omitempty" xmlrpc:"actionCount,omitempty"` + + // no documentation yet + Actions []User_Permission_Action `json:"actions,omitempty" xmlrpc:"actions,omitempty"` + + // A count of a portal user's additional email addresses. These email addresses are contacted when updates are made to support tickets. + AdditionalEmailCount *uint `json:"additionalEmailCount,omitempty" xmlrpc:"additionalEmailCount,omitempty"` + + // A portal user's additional email addresses. These email addresses are contacted when updates are made to support tickets. + AdditionalEmails []User_Customer_AdditionalEmail `json:"additionalEmails,omitempty" xmlrpc:"additionalEmails,omitempty"` + + // The first line of the mailing address belonging to a portal user. + Address1 *string `json:"address1,omitempty" xmlrpc:"address1,omitempty"` + + // The second line of the mailing address belonging to a portal user. + Address2 *string `json:"address2,omitempty" xmlrpc:"address2,omitempty"` + + // A portal user's AOL Instant Messenger screen name. + Aim *string `json:"aim,omitempty" xmlrpc:"aim,omitempty"` + + // A portal user's secondary phone number. + AlternatePhone *string `json:"alternatePhone,omitempty" xmlrpc:"alternatePhone,omitempty"` + + // A count of a portal user's API Authentication keys. There is a max limit of two API keys per user. + ApiAuthenticationKeyCount *uint `json:"apiAuthenticationKeyCount,omitempty" xmlrpc:"apiAuthenticationKeyCount,omitempty"` + + // A portal user's API Authentication keys. There is a max limit of two API keys per user. + ApiAuthenticationKeys []User_Customer_ApiAuthentication `json:"apiAuthenticationKeys,omitempty" xmlrpc:"apiAuthenticationKeys,omitempty"` + + // The authentication token used for logging into the SoftLayer customer portal. + AuthenticationToken *Container_User_Authentication_Token `json:"authenticationToken,omitempty" xmlrpc:"authenticationToken,omitempty"` + + // A count of the CDN accounts associated with a portal user. + CdnAccountCount *uint `json:"cdnAccountCount,omitempty" xmlrpc:"cdnAccountCount,omitempty"` + + // The CDN accounts associated with a portal user. + CdnAccounts []Network_ContentDelivery_Account `json:"cdnAccounts,omitempty" xmlrpc:"cdnAccounts,omitempty"` + + // A count of a portal user's child users. Some portal users may not have child users. + ChildUserCount *uint `json:"childUserCount,omitempty" xmlrpc:"childUserCount,omitempty"` + + // A portal user's child users. Some portal users may not have child users. + ChildUsers []User_Customer `json:"childUsers,omitempty" xmlrpc:"childUsers,omitempty"` + + // The city of the mailing address belonging to a portal user. + City *string `json:"city,omitempty" xmlrpc:"city,omitempty"` + + // A count of an user's associated closed tickets. + ClosedTicketCount *uint `json:"closedTicketCount,omitempty" xmlrpc:"closedTicketCount,omitempty"` + + // An user's associated closed tickets. + ClosedTickets []Ticket `json:"closedTickets,omitempty" xmlrpc:"closedTickets,omitempty"` + + // A portal user's associated company. This may not be the same company as the customer that owns this portal user. + CompanyName *string `json:"companyName,omitempty" xmlrpc:"companyName,omitempty"` + + // A two-letter abbreviation of the country in the mailing address belonging to a portal user. + Country *string `json:"country,omitempty" xmlrpc:"country,omitempty"` + + // The date a portal user's record was created. + CreateDate *Time `json:"createDate,omitempty" xmlrpc:"createDate,omitempty"` + + // Whether a portal user's time zone is affected by Daylight Savings Time. + DaylightSavingsTimeFlag *bool `json:"daylightSavingsTimeFlag,omitempty" xmlrpc:"daylightSavingsTimeFlag,omitempty"` + + // Flag used to deny access to all hardware and cloud computing instances upon user creation. + DenyAllResourceAccessOnCreateFlag *bool `json:"denyAllResourceAccessOnCreateFlag,omitempty" xmlrpc:"denyAllResourceAccessOnCreateFlag,omitempty"` + + // no documentation yet + DisplayName *string `json:"displayName,omitempty" xmlrpc:"displayName,omitempty"` + + // A portal user's email address. + Email *string `json:"email,omitempty" xmlrpc:"email,omitempty"` + + // A count of the external authentication bindings that link an external identifier to a SoftLayer user. + ExternalBindingCount *uint `json:"externalBindingCount,omitempty" xmlrpc:"externalBindingCount,omitempty"` + + // The external authentication bindings that link an external identifier to a SoftLayer user. + ExternalBindings []User_External_Binding `json:"externalBindings,omitempty" xmlrpc:"externalBindings,omitempty"` + + // A portal user's first name. + FirstName *string `json:"firstName,omitempty" xmlrpc:"firstName,omitempty"` + + // A user's password for the SoftLayer forums, hashed for auto-login capability from the SoftLayer customer portal + ForumPasswordHash *string `json:"forumPasswordHash,omitempty" xmlrpc:"forumPasswordHash,omitempty"` + + // A portal user's accessible hardware. These permissions control which hardware a user has access to in the SoftLayer customer portal. + Hardware []Hardware `json:"hardware,omitempty" xmlrpc:"hardware,omitempty"` + + // A count of a portal user's accessible hardware. These permissions control which hardware a user has access to in the SoftLayer customer portal. + HardwareCount *uint `json:"hardwareCount,omitempty" xmlrpc:"hardwareCount,omitempty"` + + // A count of hardware notifications associated with this user. A hardware notification links a user to a piece of hardware, and that user will be notified if any monitors on that hardware fail, if the monitors have a status of 'Notify User'. + HardwareNotificationCount *uint `json:"hardwareNotificationCount,omitempty" xmlrpc:"hardwareNotificationCount,omitempty"` + + // Hardware notifications associated with this user. A hardware notification links a user to a piece of hardware, and that user will be notified if any monitors on that hardware fail, if the monitors have a status of 'Notify User'. + HardwareNotifications []User_Customer_Notification_Hardware `json:"hardwareNotifications,omitempty" xmlrpc:"hardwareNotifications,omitempty"` + + // Whether or not a user has acknowledged the support policy. + HasAcknowledgedSupportPolicyFlag *bool `json:"hasAcknowledgedSupportPolicyFlag,omitempty" xmlrpc:"hasAcknowledgedSupportPolicyFlag,omitempty"` + + // Whether or not a portal user has access to all hardware on their account. + HasFullHardwareAccessFlag *bool `json:"hasFullHardwareAccessFlag,omitempty" xmlrpc:"hasFullHardwareAccessFlag,omitempty"` + + // Whether or not a portal user has access to all hardware on their account. + HasFullVirtualGuestAccessFlag *bool `json:"hasFullVirtualGuestAccessFlag,omitempty" xmlrpc:"hasFullVirtualGuestAccessFlag,omitempty"` + + // A portal user's ICQ UIN. + Icq *string `json:"icq,omitempty" xmlrpc:"icq,omitempty"` + + // A portal user's internal identifying number. + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // The IP addresses or IP ranges from which a user may login to the SoftLayer customer portal. Specify subnets in CIDR format and separate multiple addresses and subnets by commas. You may combine IPv4 and IPv6 addresses and subnets, for example: 192.168.0.0/16,fe80:021b::0/64. + IpAddressRestriction *string `json:"ipAddressRestriction,omitempty" xmlrpc:"ipAddressRestriction,omitempty"` + + // no documentation yet + IsMasterUserFlag *bool `json:"isMasterUserFlag,omitempty" xmlrpc:"isMasterUserFlag,omitempty"` + + // A portal user's last name. + LastName *string `json:"lastName,omitempty" xmlrpc:"lastName,omitempty"` + + // A count of + LayoutProfileCount *uint `json:"layoutProfileCount,omitempty" xmlrpc:"layoutProfileCount,omitempty"` + + // no documentation yet + LayoutProfiles []Layout_Profile `json:"layoutProfiles,omitempty" xmlrpc:"layoutProfiles,omitempty"` + + // The linked account integration mode + LinkedAccountIntegrationMode *string `json:"linkedAccountIntegrationMode,omitempty" xmlrpc:"linkedAccountIntegrationMode,omitempty"` + + // A user's locale. Locale holds user's language and region information. + Locale *Locale `json:"locale,omitempty" xmlrpc:"locale,omitempty"` + + // A portal user's associated [[SoftLayer_Locale|locale]] id. + LocaleId *int `json:"localeId,omitempty" xmlrpc:"localeId,omitempty"` + + // A count of a user's attempts to log into the SoftLayer customer portal. + LoginAttemptCount *uint `json:"loginAttemptCount,omitempty" xmlrpc:"loginAttemptCount,omitempty"` + + // A user's attempts to log into the SoftLayer customer portal. + LoginAttempts []User_Customer_Access_Authentication `json:"loginAttempts,omitempty" xmlrpc:"loginAttempts,omitempty"` + + // Determines if this portal user is managed by SAML federation. + ManagedByFederationFlag *bool `json:"managedByFederationFlag,omitempty" xmlrpc:"managedByFederationFlag,omitempty"` + + // Determines if this portal user is managed by IBMid federation. + ManagedByOpenIdConnectFlag *bool `json:"managedByOpenIdConnectFlag,omitempty" xmlrpc:"managedByOpenIdConnectFlag,omitempty"` + + // A count of a portal user's associated mobile device profiles. + MobileDeviceCount *uint `json:"mobileDeviceCount,omitempty" xmlrpc:"mobileDeviceCount,omitempty"` + + // A portal user's associated mobile device profiles. + MobileDevices []User_Customer_MobileDevice `json:"mobileDevices,omitempty" xmlrpc:"mobileDevices,omitempty"` + + // The date a portal user's record was last modified. + ModifyDate *Time `json:"modifyDate,omitempty" xmlrpc:"modifyDate,omitempty"` + + // A portal user's MSN address. + Msn *string `json:"msn,omitempty" xmlrpc:"msn,omitempty"` + + // no documentation yet + NameId *string `json:"nameId,omitempty" xmlrpc:"nameId,omitempty"` + + // A count of notification subscription records for the user. + NotificationSubscriberCount *uint `json:"notificationSubscriberCount,omitempty" xmlrpc:"notificationSubscriberCount,omitempty"` + + // Notification subscription records for the user. + NotificationSubscribers []Notification_Subscriber `json:"notificationSubscribers,omitempty" xmlrpc:"notificationSubscribers,omitempty"` + + // A portal user's office phone number. + OfficePhone *string `json:"officePhone,omitempty" xmlrpc:"officePhone,omitempty"` + + // The BlueID username associated to with this user, if the account is managed by OpenIDConnect / BlueID federation + OpenIdConnectUserName *string `json:"openIdConnectUserName,omitempty" xmlrpc:"openIdConnectUserName,omitempty"` + + // A count of an user's associated open tickets. + OpenTicketCount *uint `json:"openTicketCount,omitempty" xmlrpc:"openTicketCount,omitempty"` + + // An user's associated open tickets. + OpenTickets []Ticket `json:"openTickets,omitempty" xmlrpc:"openTickets,omitempty"` + + // A count of a portal user's vpn accessible subnets. + OverrideCount *uint `json:"overrideCount,omitempty" xmlrpc:"overrideCount,omitempty"` + + // A portal user's vpn accessible subnets. + Overrides []Network_Service_Vpn_Overrides `json:"overrides,omitempty" xmlrpc:"overrides,omitempty"` + + // A portal user's parent user. If a SoftLayer_User_Customer has a null parentId property then it doesn't have a parent user. + Parent *User_Customer `json:"parent,omitempty" xmlrpc:"parent,omitempty"` + + // A portal user's parent user. Id a users parentId is ''null'' then it doesn't have a parent user in the customer portal. + ParentId *int `json:"parentId,omitempty" xmlrpc:"parentId,omitempty"` + + // The expiration date for the user's password + PasswordExpireDate *Time `json:"passwordExpireDate,omitempty" xmlrpc:"passwordExpireDate,omitempty"` + + // A count of a portal user's permissions. These permissions control that user's access to functions within the SoftLayer customer portal and API. + PermissionCount *uint `json:"permissionCount,omitempty" xmlrpc:"permissionCount,omitempty"` + + // no documentation yet + PermissionSystemVersion *int `json:"permissionSystemVersion,omitempty" xmlrpc:"permissionSystemVersion,omitempty"` + + // A portal user's permissions. These permissions control that user's access to functions within the SoftLayer customer portal and API. + Permissions []User_Customer_CustomerPermission_Permission `json:"permissions,omitempty" xmlrpc:"permissions,omitempty"` + + // The postal code of the mailing address belonging to an portal user. + PostalCode *string `json:"postalCode,omitempty" xmlrpc:"postalCode,omitempty"` + + // Whether a portal user may connect to the SoftLayer private network via PPTP VPN or not. + PptpVpnAllowedFlag *bool `json:"pptpVpnAllowedFlag,omitempty" xmlrpc:"pptpVpnAllowedFlag,omitempty"` + + // A count of + PreferenceCount *uint `json:"preferenceCount,omitempty" xmlrpc:"preferenceCount,omitempty"` + + // no documentation yet + Preferences []User_Preference `json:"preferences,omitempty" xmlrpc:"preferences,omitempty"` + + // A count of + RoleCount *uint `json:"roleCount,omitempty" xmlrpc:"roleCount,omitempty"` + + // no documentation yet + Roles []User_Permission_Role `json:"roles,omitempty" xmlrpc:"roles,omitempty"` + + // no documentation yet + SalesforceUserLink *User_Customer_Link `json:"salesforceUserLink,omitempty" xmlrpc:"salesforceUserLink,omitempty"` + + // no documentation yet + SavedId *string `json:"savedId,omitempty" xmlrpc:"savedId,omitempty"` + + // Whether a user may change their security options (IP restriction, password expiration, or enforce security questions on login) which were pre-selected by their account's master user. + SecondaryLoginManagementFlag *bool `json:"secondaryLoginManagementFlag,omitempty" xmlrpc:"secondaryLoginManagementFlag,omitempty"` + + // Whether a user is required to answer a security question when logging into the SoftLayer customer portal. + SecondaryLoginRequiredFlag *bool `json:"secondaryLoginRequiredFlag,omitempty" xmlrpc:"secondaryLoginRequiredFlag,omitempty"` + + // The date when a user's password was last updated. + SecondaryPasswordModifyDate *Time `json:"secondaryPasswordModifyDate,omitempty" xmlrpc:"secondaryPasswordModifyDate,omitempty"` + + // The number of days for which a user's password is active. + SecondaryPasswordTimeoutDays *int `json:"secondaryPasswordTimeoutDays,omitempty" xmlrpc:"secondaryPasswordTimeoutDays,omitempty"` + + // A count of a portal user's security question answers. Some portal users may not have security answers or may not be configured to require answering a security question on login. + SecurityAnswerCount *uint `json:"securityAnswerCount,omitempty" xmlrpc:"securityAnswerCount,omitempty"` + + // A portal user's security question answers. Some portal users may not have security answers or may not be configured to require answering a security question on login. + SecurityAnswers []User_Customer_Security_Answer `json:"securityAnswers,omitempty" xmlrpc:"securityAnswers,omitempty"` + + // A phone number that can receive SMS text messages for this portal user. + Sms *string `json:"sms,omitempty" xmlrpc:"sms,omitempty"` + + // Whether a portal user may connect to the SoftLayer private network via SSL VPN or not. + SslVpnAllowedFlag *bool `json:"sslVpnAllowedFlag,omitempty" xmlrpc:"sslVpnAllowedFlag,omitempty"` + + // A two-letter abbreviation of the state in the mailing address belonging to a portal user. If a user does not reside in a province then this is typically blank. + State *string `json:"state,omitempty" xmlrpc:"state,omitempty"` + + // The date a portal users record's last status change. + StatusDate *Time `json:"statusDate,omitempty" xmlrpc:"statusDate,omitempty"` + + // A count of a user's notification subscription records. + SubscriberCount *uint `json:"subscriberCount,omitempty" xmlrpc:"subscriberCount,omitempty"` + + // A user's notification subscription records. + Subscribers []Notification_User_Subscriber `json:"subscribers,omitempty" xmlrpc:"subscribers,omitempty"` + + // A count of a user's successful attempts to log into the SoftLayer customer portal. + SuccessfulLoginCount *uint `json:"successfulLoginCount,omitempty" xmlrpc:"successfulLoginCount,omitempty"` + + // A user's successful attempts to log into the SoftLayer customer portal. + SuccessfulLogins []User_Customer_Access_Authentication `json:"successfulLogins,omitempty" xmlrpc:"successfulLogins,omitempty"` + + // Whether or not a user is required to acknowledge the support policy for portal access. + SupportPolicyAcknowledgementRequiredFlag *int `json:"supportPolicyAcknowledgementRequiredFlag,omitempty" xmlrpc:"supportPolicyAcknowledgementRequiredFlag,omitempty"` + + // A count of the surveys that a user has taken in the SoftLayer customer portal. + SurveyCount *uint `json:"surveyCount,omitempty" xmlrpc:"surveyCount,omitempty"` + + // Whether or not a user must take a brief survey the next time they log into the SoftLayer customer portal. + SurveyRequiredFlag *bool `json:"surveyRequiredFlag,omitempty" xmlrpc:"surveyRequiredFlag,omitempty"` + + // The surveys that a user has taken in the SoftLayer customer portal. + Surveys []Survey `json:"surveys,omitempty" xmlrpc:"surveys,omitempty"` + + // A count of an user's associated tickets. + TicketCount *uint `json:"ticketCount,omitempty" xmlrpc:"ticketCount,omitempty"` + + // An user's associated tickets. + Tickets []Ticket `json:"tickets,omitempty" xmlrpc:"tickets,omitempty"` + + // A portal user's time zone. + Timezone *Locale_Timezone `json:"timezone,omitempty" xmlrpc:"timezone,omitempty"` + + // A portal user's time zone. + TimezoneId *int `json:"timezoneId,omitempty" xmlrpc:"timezoneId,omitempty"` + + // A count of a user's unsuccessful attempts to log into the SoftLayer customer portal. + UnsuccessfulLoginCount *uint `json:"unsuccessfulLoginCount,omitempty" xmlrpc:"unsuccessfulLoginCount,omitempty"` + + // A user's unsuccessful attempts to log into the SoftLayer customer portal. + UnsuccessfulLogins []User_Customer_Access_Authentication `json:"unsuccessfulLogins,omitempty" xmlrpc:"unsuccessfulLogins,omitempty"` + + // A count of + UserLinkCount *uint `json:"userLinkCount,omitempty" xmlrpc:"userLinkCount,omitempty"` + + // no documentation yet + UserLinks []User_Customer_Link `json:"userLinks,omitempty" xmlrpc:"userLinks,omitempty"` + + // A portal user's status, which controls overall access to the SoftLayer customer portal and VPN access to the private network. + UserStatus *User_Customer_Status `json:"userStatus,omitempty" xmlrpc:"userStatus,omitempty"` + + // A number reflecting the state of a portal user. + UserStatusId *int `json:"userStatusId,omitempty" xmlrpc:"userStatusId,omitempty"` + + // A portal user's username. + Username *string `json:"username,omitempty" xmlrpc:"username,omitempty"` + + // The verification code from Bluemix BSS to save in the invitation + VerificationCode *string `json:"verificationCode,omitempty" xmlrpc:"verificationCode,omitempty"` + + // A count of a portal user's accessible CloudLayer Computing Instances. These permissions control which CloudLayer Computing Instances a user has access to in the SoftLayer customer portal. + VirtualGuestCount *uint `json:"virtualGuestCount,omitempty" xmlrpc:"virtualGuestCount,omitempty"` + + // A portal user's accessible CloudLayer Computing Instances. These permissions control which CloudLayer Computing Instances a user has access to in the SoftLayer customer portal. + VirtualGuests []Virtual_Guest `json:"virtualGuests,omitempty" xmlrpc:"virtualGuests,omitempty"` + + // Whether a portal user vpn subnets have been manual configured. + VpnManualConfig *bool `json:"vpnManualConfig,omitempty" xmlrpc:"vpnManualConfig,omitempty"` + + // A portal user's Yahoo! Chat name. + Yahoo *string `json:"yahoo,omitempty" xmlrpc:"yahoo,omitempty"` +} + +// SoftLayer_User_Customer_Access_Authentication models a single attempt to log into the SoftLayer customer portal. A SoftLayer_User_Customer_Access_Authentication record is created every time a user attempts to log into the portal. Use this service to audit your users' portal activity and diagnose potential security breaches of your SoftLayer portal accounts. +// +// Unsuccessful login attempts can be caused by an incorrect password, failing to answer or not answering a login security question if the user has them configured, or attempting to log in from an IP address outside of the user's IP address restriction list. +// +// SoftLayer employees periodically log into our customer portal as users to diagnose portal issues, verify settings and configuration, and to perform maintenance on your account or services. SoftLayer employees only log into customer accounts from the following IP ranges: +// * 2607:f0d0:1000::/48 +// * 2607:f0d0:2000::/48 +// * 2607:f0d0:3000::/48 +// * 66.228.118.67/32 +// * 66.228.118.86/32 +type User_Customer_Access_Authentication struct { + Entity + + // The date of an attempt to log into the SoftLayer customer portal. + CreateDate *Time `json:"createDate,omitempty" xmlrpc:"createDate,omitempty"` + + // The IP address of the user who attempted to log into the SoftLayer customer portal. + IpAddress *string `json:"ipAddress,omitempty" xmlrpc:"ipAddress,omitempty"` + + // Whether an attempt to log into the SoftLayer customer portal was successful or not. + SuccessFlag *bool `json:"successFlag,omitempty" xmlrpc:"successFlag,omitempty"` + + // The user who has attempted to log into the SoftLayer customer portal. + User *User_Customer `json:"user,omitempty" xmlrpc:"user,omitempty"` + + // The internal identifier of the user who attempted to log into the SoftLayer customer portal. + UserId *int `json:"userId,omitempty" xmlrpc:"userId,omitempty"` + + // The username used when attempting to log into the SoftLayer customer portal + Username *string `json:"username,omitempty" xmlrpc:"username,omitempty"` +} + +// The SoftLayer_User_Customer_AdditionalEmail data type contains the additional email for use in ticket update notifications. +type User_Customer_AdditionalEmail struct { + Entity + + // Email assigned to user for use in ticket update notifications. + Email *string `json:"email,omitempty" xmlrpc:"email,omitempty"` + + // The portal user that owns this additional email address. + User *User_Customer `json:"user,omitempty" xmlrpc:"user,omitempty"` + + // An internal identifier for the portal user who this additional email belongs to. + UserId *int `json:"userId,omitempty" xmlrpc:"userId,omitempty"` +} + +// The SoftLayer_User_Customer_ApiAuthentication type contains user's authentication key(s). +type User_Customer_ApiAuthentication struct { + Entity + + // The user's authentication key for API access. + AuthenticationKey *string `json:"authenticationKey,omitempty" xmlrpc:"authenticationKey,omitempty"` + + // The user's API authentication identifying number. + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // The IP addresses or IP ranges from which this user may access the SoftLayer API. Specify subnets in CIDR format and separate multiple addresses and subnets by commas. You may combine IPv4 and IPv6 addresses and subnets, for example: 192.168.0.0/16,fe80:021b::0/64. + IpAddressRestriction *string `json:"ipAddressRestriction,omitempty" xmlrpc:"ipAddressRestriction,omitempty"` + + // The user's authentication key modification date. + TimestampKey *int `json:"timestampKey,omitempty" xmlrpc:"timestampKey,omitempty"` + + // The user who owns the api authentication key. + User *User_Customer `json:"user,omitempty" xmlrpc:"user,omitempty"` + + // The user's identifying number. + UserId *int `json:"userId,omitempty" xmlrpc:"userId,omitempty"` +} + +// Each SoftLayer portal account is assigned a series of permissions that determine what access the user has to functions within the SoftLayer customer portal. This status is reflected in the SoftLayer_User_Customer_Status data type. Permissions differ from user status in that user status applies globally to the portal while user permissions are applied to specific portal functions. +type User_Customer_CustomerPermission_Permission struct { + Entity + + // A user permission's short name. + Key *string `json:"key,omitempty" xmlrpc:"key,omitempty"` + + // A user permission's key name. + KeyName *string `json:"keyName,omitempty" xmlrpc:"keyName,omitempty"` + + // A user permission's name. + Name *string `json:"name,omitempty" xmlrpc:"name,omitempty"` +} + +// The SoftLayer_User_Customer_External_Binding data type contains general information for a single external binding. This includes the 3rd party vendor, type of binding, and a unique identifier and password that is used to authenticate against the 3rd party service. +type User_Customer_External_Binding struct { + User_External_Binding + + // The SoftLayer user that the external authentication binding belongs to. + User *User_Customer `json:"user,omitempty" xmlrpc:"user,omitempty"` +} + +// The SoftLayer_User_Customer_External_Binding_Attribute data type contains the value for a single attribute associated with an external binding. External binding attributes contain additional information about an external binding. An attribute can be generic or specific to a 3rd party vendor. For example these attributes relate to Verisign: +// *Credential Type +// *Credential State +// *Credential Expiration Date +// *Credential Last Update Date +type User_Customer_External_Binding_Attribute struct { + User_External_Binding_Attribute +} + +// The SoftLayer_User_Customer_External_Binding_Phone data type contains information about an external binding that uses a phone call, SMS or mobile app for 2 form factor authentication. The external binding information is used when a SoftLayer customer logs into the SoftLayer customer portal or VPN to authenticate them against a trusted 3rd party, in this case using a mobile phone, mobile phone application or land-line phone. +// +// SoftLayer users with an active external binding will be prohibited from using the API for security reasons. +type User_Customer_External_Binding_Phone struct { + User_Customer_External_Binding + + // The current external binding status. It can be "ACTIVE" or "BLOCKED". + BindingStatus *string `json:"bindingStatus,omitempty" xmlrpc:"bindingStatus,omitempty"` + + // no documentation yet + PinLength *string `json:"pinLength,omitempty" xmlrpc:"pinLength,omitempty"` +} + +// The SoftLayer_User_Customer_External_Binding_Totp data type contains information about a single time-based one time password external binding. The external binding information is used when a SoftLayer customer logs into the SoftLayer customer portal to authenticate them. +// +// The information provided by this external binding data type includes: +// * The type of credential +// * The current state of the credential +// ** Active +// ** Inactive +// +// +// SoftLayer users with an active external binding will be prohibited from using the API for security reasons. +type User_Customer_External_Binding_Totp struct { + User_Customer_External_Binding +} + +// The SoftLayer_User_Customer_External_Binding_Type data type contains information relating to a type of external authentication binding. It contains a user friendly name as well as a unique key name. +type User_Customer_External_Binding_Type struct { + User_External_Binding_Type +} + +// The SoftLayer_User_Customer_External_Binding_Vendor data type contains information for a single external binding vendor. This information includes a user friendly vendor name, a unique version of the vendor name, and a unique internal identifier that can be used when creating a new external binding. +type User_Customer_External_Binding_Vendor struct { + User_External_Binding_Vendor +} + +// The SoftLayer_User_Customer_External_Binding_Verisign data type contains information about a single VeriSign external binding. The external binding information is used when a SoftLayer customer logs into the SoftLayer customer portal to authenticate them against a 3rd party, in this case VeriSign. +// +// The information provided by the VeriSign external binding data type includes: +// * The type of credential +// * The current state of the credential +// ** Enabled +// ** Disabled +// ** Locked +// * The credential's expiration date +// * The last time the credential was updated +// +// +// SoftLayer users with an active external binding will be prohibited from using the API for security reasons. +type User_Customer_External_Binding_Verisign struct { + User_Customer_External_Binding + + // The date that a VeriSign credential expires. + CredentialExpirationDate *string `json:"credentialExpirationDate,omitempty" xmlrpc:"credentialExpirationDate,omitempty"` + + // The last time a VeriSign credential was updated. + CredentialLastUpdateDate *string `json:"credentialLastUpdateDate,omitempty" xmlrpc:"credentialLastUpdateDate,omitempty"` + + // The current state of a VeriSign credential. This can be 'Enabled', 'Disabled', or 'Locked'. + CredentialState *string `json:"credentialState,omitempty" xmlrpc:"credentialState,omitempty"` + + // The type of VeriSign credential. This can be either 'Hardware' or 'Software'. + CredentialType *string `json:"credentialType,omitempty" xmlrpc:"credentialType,omitempty"` +} + +// no documentation yet +type User_Customer_Invitation struct { + Entity + + // no documentation yet + Code *string `json:"code,omitempty" xmlrpc:"code,omitempty"` + + // no documentation yet + CreateDate *Time `json:"createDate,omitempty" xmlrpc:"createDate,omitempty"` + + // no documentation yet + CreatorId *int `json:"creatorId,omitempty" xmlrpc:"creatorId,omitempty"` + + // no documentation yet + CreatorType *string `json:"creatorType,omitempty" xmlrpc:"creatorType,omitempty"` + + // no documentation yet + Email *string `json:"email,omitempty" xmlrpc:"email,omitempty"` + + // no documentation yet + ExistingBlueIdFlag *int `json:"existingBlueIdFlag,omitempty" xmlrpc:"existingBlueIdFlag,omitempty"` + + // no documentation yet + ExpirationDate *Time `json:"expirationDate,omitempty" xmlrpc:"expirationDate,omitempty"` + + // no documentation yet + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // no documentation yet + IsFederatedEmailDomainFlag *int `json:"isFederatedEmailDomainFlag,omitempty" xmlrpc:"isFederatedEmailDomainFlag,omitempty"` + + // no documentation yet + ModifyDate *Time `json:"modifyDate,omitempty" xmlrpc:"modifyDate,omitempty"` + + // no documentation yet + ResponseDate *Time `json:"responseDate,omitempty" xmlrpc:"responseDate,omitempty"` + + // no documentation yet + StatusId *int `json:"statusId,omitempty" xmlrpc:"statusId,omitempty"` + + // no documentation yet + User *User_Customer `json:"user,omitempty" xmlrpc:"user,omitempty"` + + // no documentation yet + UserId *int `json:"userId,omitempty" xmlrpc:"userId,omitempty"` +} + +// no documentation yet +type User_Customer_Link struct { + Entity + + // no documentation yet + CreateDate *Time `json:"createDate,omitempty" xmlrpc:"createDate,omitempty"` + + // no documentation yet + DefaultFlag *int `json:"defaultFlag,omitempty" xmlrpc:"defaultFlag,omitempty"` + + // no documentation yet + DestinationUserAlphanumericId *string `json:"destinationUserAlphanumericId,omitempty" xmlrpc:"destinationUserAlphanumericId,omitempty"` + + // no documentation yet + DestinationUserId *int `json:"destinationUserId,omitempty" xmlrpc:"destinationUserId,omitempty"` + + // no documentation yet + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // no documentation yet + ServiceProvider *Service_Provider `json:"serviceProvider,omitempty" xmlrpc:"serviceProvider,omitempty"` + + // no documentation yet + ServiceProviderId *int `json:"serviceProviderId,omitempty" xmlrpc:"serviceProviderId,omitempty"` + + // no documentation yet + User *User_Customer `json:"user,omitempty" xmlrpc:"user,omitempty"` + + // no documentation yet + UserId *int `json:"userId,omitempty" xmlrpc:"userId,omitempty"` +} + +// no documentation yet +type User_Customer_Link_ThePlanet struct { + User_Customer_Link +} + +// This class represents a mobile device belonging to a user. The device can be a phone, tablet, or possibly even some Android based net books. The purpose is to tie just enough info with the device and the user to enable push notifications through non-softlayer entities (Google, Apple, RIM). +type User_Customer_MobileDevice struct { + Entity + + // A count of notification subscriptions available to a mobile device. + AvailablePushNotificationSubscriptionCount *uint `json:"availablePushNotificationSubscriptionCount,omitempty" xmlrpc:"availablePushNotificationSubscriptionCount,omitempty"` + + // Notification subscriptions available to a mobile device. + AvailablePushNotificationSubscriptions []Notification `json:"availablePushNotificationSubscriptions,omitempty" xmlrpc:"availablePushNotificationSubscriptions,omitempty"` + + // Created date for the record. + CreateDate *Time `json:"createDate,omitempty" xmlrpc:"createDate,omitempty"` + + // The user this mobile device belongs to. + Customer *User_Customer `json:"customer,omitempty" xmlrpc:"customer,omitempty"` + + // The device resolution formatted width x height + DisplayResolutionXxY *string `json:"displayResolutionXxY,omitempty" xmlrpc:"displayResolutionXxY,omitempty"` + + // Record Identifier + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // Device type identifier. + MobileDeviceTypeId *int `json:"mobileDeviceTypeId,omitempty" xmlrpc:"mobileDeviceTypeId,omitempty"` + + // Mobile OS identifier. + MobileOperatingSystemId *int `json:"mobileOperatingSystemId,omitempty" xmlrpc:"mobileOperatingSystemId,omitempty"` + + // Device model number + ModelNumber *string `json:"modelNumber,omitempty" xmlrpc:"modelNumber,omitempty"` + + // Last modify date for the record. + ModifyDate *Time `json:"modifyDate,omitempty" xmlrpc:"modifyDate,omitempty"` + + // The name of the device the user is using. + Name *string `json:"name,omitempty" xmlrpc:"name,omitempty"` + + // The operating system this device is using + OperatingSystem *User_Customer_MobileDevice_OperatingSystem `json:"operatingSystem,omitempty" xmlrpc:"operatingSystem,omitempty"` + + // Device phone number + PhoneNumber *string `json:"phoneNumber,omitempty" xmlrpc:"phoneNumber,omitempty"` + + // A count of notification subscriptions attached to a mobile device. + PushNotificationSubscriptionCount *uint `json:"pushNotificationSubscriptionCount,omitempty" xmlrpc:"pushNotificationSubscriptionCount,omitempty"` + + // Notification subscriptions attached to a mobile device. + PushNotificationSubscriptions []Notification_User_Subscriber `json:"pushNotificationSubscriptions,omitempty" xmlrpc:"pushNotificationSubscriptions,omitempty"` + + // Device serial number + SerialNumber *string `json:"serialNumber,omitempty" xmlrpc:"serialNumber,omitempty"` + + // The token that is provided by the mobile device. + Token *string `json:"token,omitempty" xmlrpc:"token,omitempty"` + + // The type of device this user is using + Type *User_Customer_MobileDevice_Type `json:"type,omitempty" xmlrpc:"type,omitempty"` + + // User Identifier + UserId *int `json:"userId,omitempty" xmlrpc:"userId,omitempty"` +} + +// This class represents the mobile operating system installed on a user's registered mobile device. It assists us when determining the how to get a push notification to the user. +type User_Customer_MobileDevice_OperatingSystem struct { + Entity + + // Build revision number of the operating system. + BuildVersion *int `json:"buildVersion,omitempty" xmlrpc:"buildVersion,omitempty"` + + // Create date of the record. + CreateDate *Time `json:"createDate,omitempty" xmlrpc:"createDate,omitempty"` + + // Description of the mobile operating system.. + Description *string `json:"description,omitempty" xmlrpc:"description,omitempty"` + + // Indentifier for the record. + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // Major revision number of the operating system. + MajorVersion *int `json:"majorVersion,omitempty" xmlrpc:"majorVersion,omitempty"` + + // Minor revision number of the operating system. + MinorVersion *int `json:"minorVersion,omitempty" xmlrpc:"minorVersion,omitempty"` + + // Modify date of the record. + ModifyDate *Time `json:"modifyDate,omitempty" xmlrpc:"modifyDate,omitempty"` + + // Name of the mobile operating system. + Name *string `json:"name,omitempty" xmlrpc:"name,omitempty"` +} + +// Describes a supported class of mobile device. In this the word class is used in the context of classes of consumer electronic devices, the two most prominent examples being mobile phones and tablets. +type User_Customer_MobileDevice_Type struct { + Entity + + // Record create date. + CreateDate *Time `json:"createDate,omitempty" xmlrpc:"createDate,omitempty"` + + // A description of the device + Description *string `json:"description,omitempty" xmlrpc:"description,omitempty"` + + // Indentifier for record. + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // Last modify date for record. + ModifyDate *Time `json:"modifyDate,omitempty" xmlrpc:"modifyDate,omitempty"` + + // The common name of the device. + Name *string `json:"name,omitempty" xmlrpc:"name,omitempty"` +} + +// The Customer_Notification_Hardware object stores links between customers and the hardware devices they wish to monitor. This link is not enough, the user must be sure to also create SoftLayer_Network_Monitor_Version1_Query_Host instance with the response action set to "notify users" in order for the users linked to that hardware object to be notified on failure. +type User_Customer_Notification_Hardware struct { + Entity + + // The hardware object that will be monitored. + Hardware *Hardware `json:"hardware,omitempty" xmlrpc:"hardware,omitempty"` + + // The ID of the Hardware object that is to be monitored. + HardwareId *int `json:"hardwareId,omitempty" xmlrpc:"hardwareId,omitempty"` + + // The unique identifier for this object + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // The user that will be notified when the associated hardware object fails a monitoring instance. + User *User_Customer `json:"user,omitempty" xmlrpc:"user,omitempty"` + + // The ID of the SoftLayer_User_Customer object that represents the user to be notified on monitoring failure. + UserId *int `json:"userId,omitempty" xmlrpc:"userId,omitempty"` +} + +// The SoftLayer_User_Customer_Notification_Virtual_Guest object stores links between customers and the virtual guests they wish to monitor. This link is not enough, the user must be sure to also create SoftLayer_Network_Monitor_Version1_Query_Host instance with the response action set to "notify users" in order for the users linked to that hardware object to be notified on failure. +type User_Customer_Notification_Virtual_Guest struct { + Entity + + // The virtual guest object that will be monitored. + Guest *Virtual_Guest `json:"guest,omitempty" xmlrpc:"guest,omitempty"` + + // The ID of the virtual guest object that is to be monitored. + GuestId *int `json:"guestId,omitempty" xmlrpc:"guestId,omitempty"` + + // The unique identifier for this object + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // The user that will be notified when the associated virtual guest object fails a monitoring instance. + User *User_Customer `json:"user,omitempty" xmlrpc:"user,omitempty"` + + // The ID of the SoftLayer_User_Customer object that represents the user to be notified on monitoring failure. + UserId *int `json:"userId,omitempty" xmlrpc:"userId,omitempty"` +} + +// no documentation yet +type User_Customer_OpenIdConnect struct { + User_Customer +} + +// no documentation yet +type User_Customer_Prospect struct { + Entity + + // no documentation yet + Account *Account `json:"account,omitempty" xmlrpc:"account,omitempty"` + + // A count of + AssignedEmployeeCount *uint `json:"assignedEmployeeCount,omitempty" xmlrpc:"assignedEmployeeCount,omitempty"` + + // no documentation yet + AssignedEmployees []User_Employee `json:"assignedEmployees,omitempty" xmlrpc:"assignedEmployees,omitempty"` + + // A count of + QuoteCount *uint `json:"quoteCount,omitempty" xmlrpc:"quoteCount,omitempty"` + + // no documentation yet + Quotes []Billing_Order_Quote `json:"quotes,omitempty" xmlrpc:"quotes,omitempty"` + + // no documentation yet + Type *User_Customer_Prospect_Type `json:"type,omitempty" xmlrpc:"type,omitempty"` +} + +// Contains user information for Service Provider Enrollment. +type User_Customer_Prospect_ServiceProvider_EnrollRequest struct { + Entity + + // accountId of existing SoftLayer Customer + AccountId *int `json:"accountId,omitempty" xmlrpc:"accountId,omitempty"` + + // Service provider address1 + Address1 *string `json:"address1,omitempty" xmlrpc:"address1,omitempty"` + + // Service provider address2 + Address2 *string `json:"address2,omitempty" xmlrpc:"address2,omitempty"` + + // Credit card account number + CardAccountNumber *string `json:"cardAccountNumber,omitempty" xmlrpc:"cardAccountNumber,omitempty"` + + // Credit card expiration month + CardExpirationMonth *string `json:"cardExpirationMonth,omitempty" xmlrpc:"cardExpirationMonth,omitempty"` + + // Credit card expiration year + CardExpirationYear *string `json:"cardExpirationYear,omitempty" xmlrpc:"cardExpirationYear,omitempty"` + + // Type of credit card being used + CardType *string `json:"cardType,omitempty" xmlrpc:"cardType,omitempty"` + + // Credit card verification number + CardVerificationNumber *string `json:"cardVerificationNumber,omitempty" xmlrpc:"cardVerificationNumber,omitempty"` + + // Service provider city + City *string `json:"city,omitempty" xmlrpc:"city,omitempty"` + + // Service provider company name + CompanyName *string `json:"companyName,omitempty" xmlrpc:"companyName,omitempty"` + + // Catalyst company types. + CompanyType *Catalyst_Company_Type `json:"companyType,omitempty" xmlrpc:"companyType,omitempty"` + + // Id of the company type which best describes applicant's company + CompanyTypeId *int `json:"companyTypeId,omitempty" xmlrpc:"companyTypeId,omitempty"` + + // Service provider company url + CompanyUrl *string `json:"companyUrl,omitempty" xmlrpc:"companyUrl,omitempty"` + + // Service provider contact's email + ContactEmail *string `json:"contactEmail,omitempty" xmlrpc:"contactEmail,omitempty"` + + // Service provider contact's first name + ContactFirstName *string `json:"contactFirstName,omitempty" xmlrpc:"contactFirstName,omitempty"` + + // Service provider contact's last name + ContactLastName *string `json:"contactLastName,omitempty" xmlrpc:"contactLastName,omitempty"` + + // Service provider contact's Phone + ContactPhone *string `json:"contactPhone,omitempty" xmlrpc:"contactPhone,omitempty"` + + // Service provider country + Country *string `json:"country,omitempty" xmlrpc:"country,omitempty"` + + // Customer Prospect id + CustomerProspectId *int `json:"customerProspectId,omitempty" xmlrpc:"customerProspectId,omitempty"` + + // Id of the device fingerprint + DeviceFingerprintId *string `json:"deviceFingerprintId,omitempty" xmlrpc:"deviceFingerprintId,omitempty"` + + // Service provider email + Email *string `json:"email,omitempty" xmlrpc:"email,omitempty"` + + // Indicates if customer has an existing SoftLayer account + ExistingCustomerFlag *bool `json:"existingCustomerFlag,omitempty" xmlrpc:"existingCustomerFlag,omitempty"` + + // Service provider first name + FirstName *string `json:"firstName,omitempty" xmlrpc:"firstName,omitempty"` + + // IBM partner world id + IbmPartnerWorldId *string `json:"ibmPartnerWorldId,omitempty" xmlrpc:"ibmPartnerWorldId,omitempty"` + + // Indicates if the customer is IBM partner world member + IbmPartnerWorldMemberFlag *bool `json:"ibmPartnerWorldMemberFlag,omitempty" xmlrpc:"ibmPartnerWorldMemberFlag,omitempty"` + + // Service provider last name + LastName *string `json:"lastName,omitempty" xmlrpc:"lastName,omitempty"` + + // Flag indicating whether or not applicant acknowledged MSA + MasterAgreementCompleteFlag *bool `json:"masterAgreementCompleteFlag,omitempty" xmlrpc:"masterAgreementCompleteFlag,omitempty"` + + // Service provider office phone + OfficePhone *string `json:"officePhone,omitempty" xmlrpc:"officePhone,omitempty"` + + // Service provider postalCode + PostalCode *string `json:"postalCode,omitempty" xmlrpc:"postalCode,omitempty"` + + // Flag indicating whether or not applicant acknowledged service provider addendum + ServiceProviderAddendumFlag *bool `json:"serviceProviderAddendumFlag,omitempty" xmlrpc:"serviceProviderAddendumFlag,omitempty"` + + // Service provider state + State *string `json:"state,omitempty" xmlrpc:"state,omitempty"` + + // Survey responses + SurveyResponses []Survey_Response `json:"surveyResponses,omitempty" xmlrpc:"surveyResponses,omitempty"` + + // Applicant's VAT id, if one exists + VatId *string `json:"vatId,omitempty" xmlrpc:"vatId,omitempty"` +} + +// no documentation yet +type User_Customer_Prospect_Type struct { + Entity + + // no documentation yet + CreateDate *Time `json:"createDate,omitempty" xmlrpc:"createDate,omitempty"` + + // no documentation yet + Description *string `json:"description,omitempty" xmlrpc:"description,omitempty"` + + // no documentation yet + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // no documentation yet + KeyName *string `json:"keyName,omitempty" xmlrpc:"keyName,omitempty"` + + // no documentation yet + ModifyDate *Time `json:"modifyDate,omitempty" xmlrpc:"modifyDate,omitempty"` + + // no documentation yet + Name *string `json:"name,omitempty" xmlrpc:"name,omitempty"` +} + +// The SoftLayer_User_Customer_Security_Answer type contains user's answers to security questions. +type User_Customer_Security_Answer struct { + Entity + + // A user's answer. + Answer *string `json:"answer,omitempty" xmlrpc:"answer,omitempty"` + + // A user's answer identifying number. + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // The question the security answer is associated with. + Question *User_Security_Question `json:"question,omitempty" xmlrpc:"question,omitempty"` + + // A user's question identifying number. + QuestionId *int `json:"questionId,omitempty" xmlrpc:"questionId,omitempty"` + + // The user who the security answer belongs to. + User *User_Customer `json:"user,omitempty" xmlrpc:"user,omitempty"` + + // A user's identifying number. + UserId *int `json:"userId,omitempty" xmlrpc:"userId,omitempty"` +} + +// Each SoftLayer portal account is assigned a status code that determines how it's treated in the customer portal. This status is reflected in the SoftLayer_User_Customer_Status data type. Status differs from user permissions in that user status applies globally to the portal while user permissions are applied to specific portal functions. +type User_Customer_Status struct { + Entity + + // A user's status identifying number. + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // A user's status keyname + KeyName *string `json:"keyName,omitempty" xmlrpc:"keyName,omitempty"` + + // A user's status. This can be either "Active" for user accounts with portal access, "Inactive" for users disabled by another portal user, "Disabled" for accounts turned off by SoftLayer, or "VPN Only" for user accounts with no access to the customer portal but VPN access to the private network. + Name *string `json:"name,omitempty" xmlrpc:"name,omitempty"` +} + +// A SoftLayer_User_Employee models a single SoftLayer employee for the purposes of ticket updates created by SoftLayer employees. SoftLayer portal and API users cannot see individual employee names in ticket responses. SoftLayer employees can be assigned to customer accounts as a personal support representative. Employee names and email will be available if an employee is assigned to the account. +type User_Employee struct { + User_Interface + + // A count of + ActionCount *uint `json:"actionCount,omitempty" xmlrpc:"actionCount,omitempty"` + + // no documentation yet + Actions []User_Permission_Action `json:"actions,omitempty" xmlrpc:"actions,omitempty"` + + // no documentation yet + ChatTranscript []Ticket_Chat `json:"chatTranscript,omitempty" xmlrpc:"chatTranscript,omitempty"` + + // A count of + ChatTranscriptCount *uint `json:"chatTranscriptCount,omitempty" xmlrpc:"chatTranscriptCount,omitempty"` + + // no documentation yet + DisplayName *string `json:"displayName,omitempty" xmlrpc:"displayName,omitempty"` + + // A SoftLayer employee's email address. Email addresses are only visible to [[SoftLayer_Account|SoftLayer Accounts]] that are assigned to an employee + Email *string `json:"email,omitempty" xmlrpc:"email,omitempty"` + + // The department that a SoftLayer employee belongs to. + EmployeeDepartment *User_Employee_Department `json:"employeeDepartment,omitempty" xmlrpc:"employeeDepartment,omitempty"` + + // A SoftLayer employee's [[SoftLayer_User_Employee_Department|department]] id. + EmployeeDepartmentId *int `json:"employeeDepartmentId,omitempty" xmlrpc:"employeeDepartmentId,omitempty"` + + // A SoftLayer employee's first name. First names are only visible to [[SoftLayer_Account|SoftLayer Accounts]] that are assigned to an employee + FirstName *string `json:"firstName,omitempty" xmlrpc:"firstName,omitempty"` + + // A SoftLayer employee's last name. Last names are only visible to [[SoftLayer_Account|SoftLayer Accounts]] that are assigned to an employee + LastName *string `json:"lastName,omitempty" xmlrpc:"lastName,omitempty"` + + // A count of + LayoutProfileCount *uint `json:"layoutProfileCount,omitempty" xmlrpc:"layoutProfileCount,omitempty"` + + // no documentation yet + LayoutProfiles []Layout_Profile `json:"layoutProfiles,omitempty" xmlrpc:"layoutProfiles,omitempty"` + + // no documentation yet + MetricTrackingObject *Metric_Tracking_Object `json:"metricTrackingObject,omitempty" xmlrpc:"metricTrackingObject,omitempty"` + + // no documentation yet + OfficePhone *string `json:"officePhone,omitempty" xmlrpc:"officePhone,omitempty"` + + // A count of + RoleCount *uint `json:"roleCount,omitempty" xmlrpc:"roleCount,omitempty"` + + // no documentation yet + Roles []User_Permission_Role `json:"roles,omitempty" xmlrpc:"roles,omitempty"` + + // no documentation yet + TicketActivities []Ticket_Activity `json:"ticketActivities,omitempty" xmlrpc:"ticketActivities,omitempty"` + + // A count of + TicketActivityCount *uint `json:"ticketActivityCount,omitempty" xmlrpc:"ticketActivityCount,omitempty"` + + // A count of + TicketAttachmentReferenceCount *uint `json:"ticketAttachmentReferenceCount,omitempty" xmlrpc:"ticketAttachmentReferenceCount,omitempty"` + + // no documentation yet + TicketAttachmentReferences []Ticket_Attachment `json:"ticketAttachmentReferences,omitempty" xmlrpc:"ticketAttachmentReferences,omitempty"` + + // A representation of a SoftLayer employee's username. In all cases this should simply state "Employee". + Username *string `json:"username,omitempty" xmlrpc:"username,omitempty"` +} + +// SoftLayer_User_Employee_Department models a department within SoftLayer's internal employee hierarchy. Common departments include Support, Sales, Accounting, Development, Systems, and Networking. +type User_Employee_Department struct { + Entity + + // The name of one of SoftLayer's employee departments. + Name *string `json:"name,omitempty" xmlrpc:"name,omitempty"` +} + +// The SoftLayer_User_External_Binding data type contains general information for a single external binding. This includes the 3rd party vendor, type of binding, and a unique identifier and password that is used to authenticate against the 3rd party service. +type User_External_Binding struct { + Entity + + // The flag that determines whether the external binding is active will be used for authentication or not. + Active *bool `json:"active,omitempty" xmlrpc:"active,omitempty"` + + // A count of attributes of an external authentication binding. + AttributeCount *uint `json:"attributeCount,omitempty" xmlrpc:"attributeCount,omitempty"` + + // Attributes of an external authentication binding. + Attributes []User_External_Binding_Attribute `json:"attributes,omitempty" xmlrpc:"attributes,omitempty"` + + // Information regarding the billing item for external authentication. + BillingItem *Billing_Item `json:"billingItem,omitempty" xmlrpc:"billingItem,omitempty"` + + // The date that the external authentication binding was created. + CreateDate *Time `json:"createDate,omitempty" xmlrpc:"createDate,omitempty"` + + // The identifier used to identify this binding to an external authentication source. + ExternalId *string `json:"externalId,omitempty" xmlrpc:"externalId,omitempty"` + + // An external authentication binding's internal identifier. + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // An optional note for identifying the external binding. + Note *string `json:"note,omitempty" xmlrpc:"note,omitempty"` + + // The password used to authenticate the external id at an external authentication source. + Password *string `json:"password,omitempty" xmlrpc:"password,omitempty"` + + // The type of external authentication binding. + Type *User_External_Binding_Type `json:"type,omitempty" xmlrpc:"type,omitempty"` + + // The [[SoftLayer_User_External_Binding_Type|type]] identifier of an external authentication binding. + TypeId *int `json:"typeId,omitempty" xmlrpc:"typeId,omitempty"` + + // An external authentication binding's associated [[SoftLayer_User_Customer|user account]] id. + UserId *int `json:"userId,omitempty" xmlrpc:"userId,omitempty"` + + // The vendor of an external authentication binding. + Vendor *User_External_Binding_Vendor `json:"vendor,omitempty" xmlrpc:"vendor,omitempty"` + + // The [[SoftLayer_User_External_Binding_Vendor|vendor]] identifier of an external authentication binding. + VendorId *int `json:"vendorId,omitempty" xmlrpc:"vendorId,omitempty"` +} + +// The SoftLayer_User_External_Binding_Attribute data type contains the value for a single attribute associated with an external binding. External binding attributes contain additional information about an external binding. An attribute can be generic or specific to a 3rd party vendor. For example these attributes relate to Verisign: +// *Credential Type +// *Credential State +// *Credential Expiration Date +// *Credential Last Update Date +type User_External_Binding_Attribute struct { + Entity + + // The external authentication binding an attribute belongs to. + ExternalBinding *User_External_Binding `json:"externalBinding,omitempty" xmlrpc:"externalBinding,omitempty"` + + // The value of an external binding attribute. + Value *string `json:"value,omitempty" xmlrpc:"value,omitempty"` +} + +// The SoftLayer_User_External_Binding_Type data type contains information relating to a type of external authentication binding. It contains a user friendly name as well as a unique key name. +type User_External_Binding_Type struct { + Entity + + // The unique name used to identify a type of external authentication binding. + KeyName *string `json:"keyName,omitempty" xmlrpc:"keyName,omitempty"` + + // The user friendly name of a type of external authentication binding. + Name *string `json:"name,omitempty" xmlrpc:"name,omitempty"` +} + +// The SoftLayer_User_External_Binding_Vendor data type contains information for a single external binding vendor. This information includes a user friendly vendor name, a unique version of the vendor name, and a unique internal identifier that can be used when creating a new external binding. +type User_External_Binding_Vendor struct { + Entity + + // The unique identifier for an external binding vendor. + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // A unique version of the name property. + KeyName *string `json:"keyName,omitempty" xmlrpc:"keyName,omitempty"` + + // The user friendly name of an external binding vendor. + Name *string `json:"name,omitempty" xmlrpc:"name,omitempty"` +} + +// A SoftLayer_User_Interface represents a generic user instance within the SoftLayer API. The SoftLayer API uses SoftLayer_User_Interfaces in cases where a user object could be one of many types of users. Currently the [[SoftLayer_User_Customer]] and [[SoftLayer_User_Employee]] classes are abstracted by this type. +type User_Interface struct { + Entity +} + +// no documentation yet +type User_Permission_Action struct { + Entity + + // no documentation yet + CreateDate *Time `json:"createDate,omitempty" xmlrpc:"createDate,omitempty"` + + // no documentation yet + Description *string `json:"description,omitempty" xmlrpc:"description,omitempty"` + + // no documentation yet + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // no documentation yet + Key *string `json:"key,omitempty" xmlrpc:"key,omitempty"` + + // no documentation yet + KeyName *string `json:"keyName,omitempty" xmlrpc:"keyName,omitempty"` + + // no documentation yet + ModifyDate *Time `json:"modifyDate,omitempty" xmlrpc:"modifyDate,omitempty"` + + // no documentation yet + Name *string `json:"name,omitempty" xmlrpc:"name,omitempty"` +} + +// no documentation yet +type User_Permission_Group struct { + Entity + + // no documentation yet + Account *Account `json:"account,omitempty" xmlrpc:"account,omitempty"` + + // A permission groups associated [[SoftLayer_Account|customer account]] id. + AccountId *int `json:"accountId,omitempty" xmlrpc:"accountId,omitempty"` + + // A count of + ActionCount *uint `json:"actionCount,omitempty" xmlrpc:"actionCount,omitempty"` + + // no documentation yet + Actions []User_Permission_Action `json:"actions,omitempty" xmlrpc:"actions,omitempty"` + + // The date the permission group record was created. + CreateDate *Time `json:"createDate,omitempty" xmlrpc:"createDate,omitempty"` + + // The description of the permission group. + Description *string `json:"description,omitempty" xmlrpc:"description,omitempty"` + + // The date the temporary group will be destroyed. + ExpirationDate *Time `json:"expirationDate,omitempty" xmlrpc:"expirationDate,omitempty"` + + // A permission groups internal identifying number. + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // The date the permission group record was last modified. + ModifyDate *Time `json:"modifyDate,omitempty" xmlrpc:"modifyDate,omitempty"` + + // The name of the permission group. + Name *string `json:"name,omitempty" xmlrpc:"name,omitempty"` + + // A count of + RoleCount *uint `json:"roleCount,omitempty" xmlrpc:"roleCount,omitempty"` + + // no documentation yet + Roles []User_Permission_Role `json:"roles,omitempty" xmlrpc:"roles,omitempty"` + + // The type of the permission group. + Type *User_Permission_Group_Type `json:"type,omitempty" xmlrpc:"type,omitempty"` + + // The type of permission group. + TypeId *int `json:"typeId,omitempty" xmlrpc:"typeId,omitempty"` +} + +// no documentation yet +type User_Permission_Group_Type struct { + Entity + + // no documentation yet + CreateDate *Time `json:"createDate,omitempty" xmlrpc:"createDate,omitempty"` + + // A count of + GroupCount *uint `json:"groupCount,omitempty" xmlrpc:"groupCount,omitempty"` + + // no documentation yet + Groups []User_Permission_Group `json:"groups,omitempty" xmlrpc:"groups,omitempty"` + + // no documentation yet + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // no documentation yet + KeyName *string `json:"keyName,omitempty" xmlrpc:"keyName,omitempty"` + + // no documentation yet + ModifyDate *Time `json:"modifyDate,omitempty" xmlrpc:"modifyDate,omitempty"` + + // no documentation yet + Name *string `json:"name,omitempty" xmlrpc:"name,omitempty"` +} + +// no documentation yet +type User_Permission_Role struct { + Entity + + // no documentation yet + Account *Account `json:"account,omitempty" xmlrpc:"account,omitempty"` + + // A permission roles associated [[SoftLayer_Account|customer account]] id. + AccountId *int `json:"accountId,omitempty" xmlrpc:"accountId,omitempty"` + + // A count of + ActionCount *uint `json:"actionCount,omitempty" xmlrpc:"actionCount,omitempty"` + + // no documentation yet + Actions []User_Permission_Action `json:"actions,omitempty" xmlrpc:"actions,omitempty"` + + // The date the permission role record was created. + CreateDate *Time `json:"createDate,omitempty" xmlrpc:"createDate,omitempty"` + + // The description of the permission role. + Description *string `json:"description,omitempty" xmlrpc:"description,omitempty"` + + // A count of + GroupCount *uint `json:"groupCount,omitempty" xmlrpc:"groupCount,omitempty"` + + // no documentation yet + Groups []User_Permission_Group `json:"groups,omitempty" xmlrpc:"groups,omitempty"` + + // A permission roles internal identifying number. + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // The date the permission role record was last modified. + ModifyDate *Time `json:"modifyDate,omitempty" xmlrpc:"modifyDate,omitempty"` + + // The name of the permission role. + Name *string `json:"name,omitempty" xmlrpc:"name,omitempty"` + + // A flag showing if new users should be automatically added to this role. + NewUserDefaultFlag *int `json:"newUserDefaultFlag,omitempty" xmlrpc:"newUserDefaultFlag,omitempty"` + + // A flag showing if the permission role was created by our internal system for a single user. If this flag is set only a single user can be assigned to this permission role and it can not be deleted. + SystemFlag *int `json:"systemFlag,omitempty" xmlrpc:"systemFlag,omitempty"` + + // A count of + UserCount *uint `json:"userCount,omitempty" xmlrpc:"userCount,omitempty"` + + // no documentation yet + Users []User_Customer `json:"users,omitempty" xmlrpc:"users,omitempty"` +} + +// The SoftLayer_User_Preference data type contains a single user preference to a specific preference type. +type User_Preference struct { + Entity + + // Description of the user preference + Description *string `json:"description,omitempty" xmlrpc:"description,omitempty"` + + // Type of user preference + Type *User_Preference_Type `json:"type,omitempty" xmlrpc:"type,omitempty"` + + // The users current preference value + Value *string `json:"value,omitempty" xmlrpc:"value,omitempty"` +} + +// The SoftLayer_User_Preference_Type data type contains a single preference type including the accepted values. +type User_Preference_Type struct { + Entity + + // A description of the preference type + Description *string `json:"description,omitempty" xmlrpc:"description,omitempty"` + + // no documentation yet + KeyName *string `json:"keyName,omitempty" xmlrpc:"keyName,omitempty"` + + // The name of the preference type + Name *string `json:"name,omitempty" xmlrpc:"name,omitempty"` + + // An example of accepted preference values + ValueExample *string `json:"valueExample,omitempty" xmlrpc:"valueExample,omitempty"` +} + +// The SoftLayer_User_Security_Question data type contains questions. +type User_Security_Question struct { + Entity + + // A security question's display order. + DisplayOrder *int `json:"displayOrder,omitempty" xmlrpc:"displayOrder,omitempty"` + + // A security question's internal identifying number. + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // A security question's question. + Question *string `json:"question,omitempty" xmlrpc:"question,omitempty"` + + // A security question's viewable flag. + Viewable *int `json:"viewable,omitempty" xmlrpc:"viewable,omitempty"` +} diff --git a/vendor/github.com/softlayer/softlayer-go/datatypes/utility.go b/vendor/github.com/softlayer/softlayer-go/datatypes/utility.go new file mode 100644 index 0000000000..4711f01292 --- /dev/null +++ b/vendor/github.com/softlayer/softlayer-go/datatypes/utility.go @@ -0,0 +1,46 @@ +/** + * Copyright 2016 IBM Corp. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** + * AUTOMATICALLY GENERATED CODE - DO NOT MODIFY + */ + +package datatypes + +// Generic utility class used for gathering graphing parameters and actually creating graphs. +type Utility_Bandwidth_Graph struct { + Entity +} + +// no documentation yet +type Utility_Network struct { + Entity +} + +// no documentation yet +type Utility_ObjectFilter struct { + Entity +} + +// no documentation yet +type Utility_ObjectFilter_Operation struct { + Entity +} + +// no documentation yet +type Utility_ObjectFilter_Operation_Option struct { + Entity +} diff --git a/vendor/github.com/softlayer/softlayer-go/datatypes/virtual.go b/vendor/github.com/softlayer/softlayer-go/datatypes/virtual.go new file mode 100644 index 0000000000..752c8f7f8a --- /dev/null +++ b/vendor/github.com/softlayer/softlayer-go/datatypes/virtual.go @@ -0,0 +1,1313 @@ +/** + * Copyright 2016 IBM Corp. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** + * AUTOMATICALLY GENERATED CODE - DO NOT MODIFY + */ + +package datatypes + +// This type presents the structure for a DedicatedHost. The type contains relational properties to distinguish a host, associate an account to it. +type Virtual_DedicatedHost struct { + Entity + + // The account which dedicated host belongs to. + Account *Account `json:"account,omitempty" xmlrpc:"account,omitempty"` + + // Dedicated host's associated account id + AccountId *int `json:"accountId,omitempty" xmlrpc:"accountId,omitempty"` + + // The container representing allocations on a dedicated host. + AllocationStatus *Container_Virtual_DedicatedHost_AllocationStatus `json:"allocationStatus,omitempty" xmlrpc:"allocationStatus,omitempty"` + + // The backendRouter behind dedicated host's pool. + BackendRouter *Hardware_Router_Backend `json:"backendRouter,omitempty" xmlrpc:"backendRouter,omitempty"` + + // The billing item for a dedicated host. + BillingItem *Billing_Item_Virtual_DedicatedHost `json:"billingItem,omitempty" xmlrpc:"billingItem,omitempty"` + + // Capacity this dedicated host's cpu allocation is restricted to + CpuCount *int `json:"cpuCount,omitempty" xmlrpc:"cpuCount,omitempty"` + + // The date dedicated host was created. + CreateDate *Time `json:"createDate,omitempty" xmlrpc:"createDate,omitempty"` + + // The datacenter that the host resides in. + Datacenter *Location `json:"datacenter,omitempty" xmlrpc:"datacenter,omitempty"` + + // Capacity this dedicated host's disk allocation is restricted to + DiskCapacity *int `json:"diskCapacity,omitempty" xmlrpc:"diskCapacity,omitempty"` + + // A count of the guests associated with a host. + GuestCount *uint `json:"guestCount,omitempty" xmlrpc:"guestCount,omitempty"` + + // The guests associated with a host. + Guests []Virtual_Guest `json:"guests,omitempty" xmlrpc:"guests,omitempty"` + + // Unique ID for dedicated host. + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // Capacity this dedicated host's memory allocation is restricted to + MemoryCapacity *int `json:"memoryCapacity,omitempty" xmlrpc:"memoryCapacity,omitempty"` + + // The date dedicated host was last modified. + ModifyDate *Time `json:"modifyDate,omitempty" xmlrpc:"modifyDate,omitempty"` + + // Dedicated host's name. + Name *string `json:"name,omitempty" xmlrpc:"name,omitempty"` +} + +// The virtual disk image data type presents the structure in which a virtual disk image will be presented. +// +// Virtual block devices are assigned to disk images. +type Virtual_Disk_Image struct { + Entity + + // The billing item for a virtual disk image. + BillingItem *Billing_Item_Virtual_Disk_Image `json:"billingItem,omitempty" xmlrpc:"billingItem,omitempty"` + + // A count of the block devices that a disk image is attached to. Block devices connect computing instances to disk images. + BlockDeviceCount *uint `json:"blockDeviceCount,omitempty" xmlrpc:"blockDeviceCount,omitempty"` + + // The block devices that a disk image is attached to. Block devices connect computing instances to disk images. + BlockDevices []Virtual_Guest_Block_Device `json:"blockDevices,omitempty" xmlrpc:"blockDevices,omitempty"` + + // no documentation yet + BootableVolumeFlag *bool `json:"bootableVolumeFlag,omitempty" xmlrpc:"bootableVolumeFlag,omitempty"` + + // A disk image's size measured in gigabytes. + Capacity *int `json:"capacity,omitempty" xmlrpc:"capacity,omitempty"` + + // A disk image's unique md5 checksum. + Checksum *string `json:"checksum,omitempty" xmlrpc:"checksum,omitempty"` + + // A count of + CoalescedDiskImageCount *uint `json:"coalescedDiskImageCount,omitempty" xmlrpc:"coalescedDiskImageCount,omitempty"` + + // no documentation yet + CoalescedDiskImages []Virtual_Disk_Image `json:"coalescedDiskImages,omitempty" xmlrpc:"coalescedDiskImages,omitempty"` + + // no documentation yet + CopyOnWriteFlag *bool `json:"copyOnWriteFlag,omitempty" xmlrpc:"copyOnWriteFlag,omitempty"` + + // The date a disk image was created. + CreateDate *Time `json:"createDate,omitempty" xmlrpc:"createDate,omitempty"` + + // A brief description of a virtual disk image. + Description *string `json:"description,omitempty" xmlrpc:"description,omitempty"` + + // A disk image's unique ID. + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // no documentation yet + LocalDiskFlag *bool `json:"localDiskFlag,omitempty" xmlrpc:"localDiskFlag,omitempty"` + + // Whether this disk image is meant for storage of custom user data supplied with a Cloud Computing Instance order. + MetadataFlag *bool `json:"metadataFlag,omitempty" xmlrpc:"metadataFlag,omitempty"` + + // The date a disk image was last modified. + ModifyDate *Time `json:"modifyDate,omitempty" xmlrpc:"modifyDate,omitempty"` + + // A descriptive name used to identify a disk image to a user. + Name *string `json:"name,omitempty" xmlrpc:"name,omitempty"` + + // The ID of the the disk image that this disk image is based on, if applicable. + ParentId *int `json:"parentId,omitempty" xmlrpc:"parentId,omitempty"` + + // A count of references to the software that resides on a disk image. + SoftwareReferenceCount *uint `json:"softwareReferenceCount,omitempty" xmlrpc:"softwareReferenceCount,omitempty"` + + // References to the software that resides on a disk image. + SoftwareReferences []Virtual_Disk_Image_Software `json:"softwareReferences,omitempty" xmlrpc:"softwareReferences,omitempty"` + + // The original disk image that the current disk image was cloned from. + SourceDiskImage *Virtual_Disk_Image `json:"sourceDiskImage,omitempty" xmlrpc:"sourceDiskImage,omitempty"` + + // The storage repository that a disk image resides in. + StorageRepository *Virtual_Storage_Repository `json:"storageRepository,omitempty" xmlrpc:"storageRepository,omitempty"` + + // The [[SoftLayer_Virtual_Storage_Repository|storage repository]] that a disk image is in. + StorageRepositoryId *int `json:"storageRepositoryId,omitempty" xmlrpc:"storageRepositoryId,omitempty"` + + // The type of storage repository that a disk image resides in. + StorageRepositoryType *Virtual_Storage_Repository_Type `json:"storageRepositoryType,omitempty" xmlrpc:"storageRepositoryType,omitempty"` + + // The template that attaches a disk image to a [[SoftLayer_Virtual_Guest_Block_Device_Template_Group|archive]]. + TemplateBlockDevice *Virtual_Guest_Block_Device_Template `json:"templateBlockDevice,omitempty" xmlrpc:"templateBlockDevice,omitempty"` + + // A virtual disk image's type. + Type *Virtual_Disk_Image_Type `json:"type,omitempty" xmlrpc:"type,omitempty"` + + // A disk image's [[SoftLayer_Virtual_Disk_Image_Type|type]] ID + TypeId *int `json:"typeId,omitempty" xmlrpc:"typeId,omitempty"` + + // The unit of storage in which the size of the image is measured. Defaults to "GB" for gigabytes. + Units *string `json:"units,omitempty" xmlrpc:"units,omitempty"` + + // A disk image's unique ID on a virtualization platform. + Uuid *string `json:"uuid,omitempty" xmlrpc:"uuid,omitempty"` +} + +// A SoftLayer_Virtual_Disk_Image_Software record connects a computing instance's virtual disk images with software records. This can be useful if a disk image is directly associated with software such as operating systems. +type Virtual_Disk_Image_Software struct { + Entity + + // The virtual disk image that is associated with software. + DiskImage *Virtual_Disk_Image `json:"diskImage,omitempty" xmlrpc:"diskImage,omitempty"` + + // The unique identifier of a virtual disk image to software relationship. + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // A count of username/Password pairs used for access to a Software Installation. + PasswordCount *uint `json:"passwordCount,omitempty" xmlrpc:"passwordCount,omitempty"` + + // Username/Password pairs used for access to a Software Installation. + Passwords []Virtual_Disk_Image_Software_Password `json:"passwords,omitempty" xmlrpc:"passwords,omitempty"` + + // The software associated with a virtual disk image. + SoftwareDescription *Software_Description `json:"softwareDescription,omitempty" xmlrpc:"softwareDescription,omitempty"` + + // The unique identifier of the software that a virtual disk image is associated with. + SoftwareDescriptionId *int `json:"softwareDescriptionId,omitempty" xmlrpc:"softwareDescriptionId,omitempty"` +} + +// This SoftLayer_Virtual_Disk_Image_Software_Password data type contains a password for a specific virtual disk image software instance. +type Virtual_Disk_Image_Software_Password struct { + Entity + + // A virtual disk images' password. + Password *string `json:"password,omitempty" xmlrpc:"password,omitempty"` + + // The instance that this username/password pair is valid for. + Software *Virtual_Disk_Image_Software `json:"software,omitempty" xmlrpc:"software,omitempty"` + + // A virtual disk images' username. + Username *string `json:"username,omitempty" xmlrpc:"username,omitempty"` +} + +// SoftLayer_Virtual_Disk_Image_Type models the types of virtual disk images available to CloudLayer Computing Instances. Virtual disk image types describe if an image's data is preservable when upgraded, whether a disk contains a suspended virtual image, or if a disk contains crash dump information. +type Virtual_Disk_Image_Type struct { + Entity + + // A brief description of a virtual disk image type's function. + Description *string `json:"description,omitempty" xmlrpc:"description,omitempty"` + + // A virtual disk image type's key name. + KeyName *string `json:"keyName,omitempty" xmlrpc:"keyName,omitempty"` + + // A virtual disk image type's name. + Name *string `json:"name,omitempty" xmlrpc:"name,omitempty"` +} + +// The virtual guest data type presents the structure in which all virtual guests will be presented. Internally, the structure supports various virtualization platforms with no change to external interaction. +// +// A guest, also known as a virtual server, represents an allocation of resources on a virtual host. +type Virtual_Guest struct { + Entity + + // The account that a virtual guest belongs to. + Account *Account `json:"account,omitempty" xmlrpc:"account,omitempty"` + + // A computing instance's associated [[SoftLayer_Account|account]] id + AccountId *int `json:"accountId,omitempty" xmlrpc:"accountId,omitempty"` + + // no documentation yet + AccountOwnedPoolFlag *bool `json:"accountOwnedPoolFlag,omitempty" xmlrpc:"accountOwnedPoolFlag,omitempty"` + + // A virtual guest's currently active network monitoring incidents. + ActiveNetworkMonitorIncident []Network_Monitor_Version1_Incident `json:"activeNetworkMonitorIncident,omitempty" xmlrpc:"activeNetworkMonitorIncident,omitempty"` + + // A count of a virtual guest's currently active network monitoring incidents. + ActiveNetworkMonitorIncidentCount *uint `json:"activeNetworkMonitorIncidentCount,omitempty" xmlrpc:"activeNetworkMonitorIncidentCount,omitempty"` + + // A count of + ActiveTicketCount *uint `json:"activeTicketCount,omitempty" xmlrpc:"activeTicketCount,omitempty"` + + // no documentation yet + ActiveTickets []Ticket `json:"activeTickets,omitempty" xmlrpc:"activeTickets,omitempty"` + + // A transaction that is still be performed on a cloud server. + ActiveTransaction *Provisioning_Version1_Transaction `json:"activeTransaction,omitempty" xmlrpc:"activeTransaction,omitempty"` + + // A count of any active transaction(s) that are currently running for the server (example: os reload). + ActiveTransactionCount *uint `json:"activeTransactionCount,omitempty" xmlrpc:"activeTransactionCount,omitempty"` + + // Any active transaction(s) that are currently running for the server (example: os reload). + ActiveTransactions []Provisioning_Version1_Transaction `json:"activeTransactions,omitempty" xmlrpc:"activeTransactions,omitempty"` + + // The SoftLayer_Network_Storage_Allowed_Host information to connect this Virtual Guest to Network Storage volumes that require access control lists. + AllowedHost *Network_Storage_Allowed_Host `json:"allowedHost,omitempty" xmlrpc:"allowedHost,omitempty"` + + // The SoftLayer_Network_Storage objects that this SoftLayer_Virtual_Guest has access to. + AllowedNetworkStorage []Network_Storage `json:"allowedNetworkStorage,omitempty" xmlrpc:"allowedNetworkStorage,omitempty"` + + // A count of the SoftLayer_Network_Storage objects that this SoftLayer_Virtual_Guest has access to. + AllowedNetworkStorageCount *uint `json:"allowedNetworkStorageCount,omitempty" xmlrpc:"allowedNetworkStorageCount,omitempty"` + + // A count of the SoftLayer_Network_Storage objects whose Replica that this SoftLayer_Virtual_Guest has access to. + AllowedNetworkStorageReplicaCount *uint `json:"allowedNetworkStorageReplicaCount,omitempty" xmlrpc:"allowedNetworkStorageReplicaCount,omitempty"` + + // The SoftLayer_Network_Storage objects whose Replica that this SoftLayer_Virtual_Guest has access to. + AllowedNetworkStorageReplicas []Network_Storage `json:"allowedNetworkStorageReplicas,omitempty" xmlrpc:"allowedNetworkStorageReplicas,omitempty"` + + // A antivirus / spyware software component object. + AntivirusSpywareSoftwareComponent *Software_Component `json:"antivirusSpywareSoftwareComponent,omitempty" xmlrpc:"antivirusSpywareSoftwareComponent,omitempty"` + + // no documentation yet + ApplicationDeliveryController *Network_Application_Delivery_Controller `json:"applicationDeliveryController,omitempty" xmlrpc:"applicationDeliveryController,omitempty"` + + // A count of + AttributeCount *uint `json:"attributeCount,omitempty" xmlrpc:"attributeCount,omitempty"` + + // no documentation yet + Attributes []Virtual_Guest_Attribute `json:"attributes,omitempty" xmlrpc:"attributes,omitempty"` + + // An object that stores the maximum level for the monitoring query types and response types. + AvailableMonitoring []Network_Monitor_Version1_Query_Host_Stratum `json:"availableMonitoring,omitempty" xmlrpc:"availableMonitoring,omitempty"` + + // A count of an object that stores the maximum level for the monitoring query types and response types. + AvailableMonitoringCount *uint `json:"availableMonitoringCount,omitempty" xmlrpc:"availableMonitoringCount,omitempty"` + + // The average daily private bandwidth usage for the current billing cycle. + AverageDailyPrivateBandwidthUsage *Float64 `json:"averageDailyPrivateBandwidthUsage,omitempty" xmlrpc:"averageDailyPrivateBandwidthUsage,omitempty"` + + // The average daily public bandwidth usage for the current billing cycle. + AverageDailyPublicBandwidthUsage *Float64 `json:"averageDailyPublicBandwidthUsage,omitempty" xmlrpc:"averageDailyPublicBandwidthUsage,omitempty"` + + // A count of a guests's backend network components. + BackendNetworkComponentCount *uint `json:"backendNetworkComponentCount,omitempty" xmlrpc:"backendNetworkComponentCount,omitempty"` + + // A guests's backend network components. + BackendNetworkComponents []Virtual_Guest_Network_Component `json:"backendNetworkComponents,omitempty" xmlrpc:"backendNetworkComponents,omitempty"` + + // A count of a guest's backend or private router. + BackendRouterCount *uint `json:"backendRouterCount,omitempty" xmlrpc:"backendRouterCount,omitempty"` + + // A guest's backend or private router. + BackendRouters []Hardware `json:"backendRouters,omitempty" xmlrpc:"backendRouters,omitempty"` + + // A computing instance's allotted bandwidth (measured in GB). + BandwidthAllocation *Float64 `json:"bandwidthAllocation,omitempty" xmlrpc:"bandwidthAllocation,omitempty"` + + // A computing instance's allotted detail record. Allotment details link bandwidth allocation with allotments. + BandwidthAllotmentDetail *Network_Bandwidth_Version1_Allotment_Detail `json:"bandwidthAllotmentDetail,omitempty" xmlrpc:"bandwidthAllotmentDetail,omitempty"` + + // The raw bandwidth usage data for the current billing cycle. One object will be returned for each network this server is attached to. + BillingCycleBandwidthUsage []Network_Bandwidth_Usage `json:"billingCycleBandwidthUsage,omitempty" xmlrpc:"billingCycleBandwidthUsage,omitempty"` + + // A count of the raw bandwidth usage data for the current billing cycle. One object will be returned for each network this server is attached to. + BillingCycleBandwidthUsageCount *uint `json:"billingCycleBandwidthUsageCount,omitempty" xmlrpc:"billingCycleBandwidthUsageCount,omitempty"` + + // The raw private bandwidth usage data for the current billing cycle. + BillingCyclePrivateBandwidthUsage *Network_Bandwidth_Usage `json:"billingCyclePrivateBandwidthUsage,omitempty" xmlrpc:"billingCyclePrivateBandwidthUsage,omitempty"` + + // The raw public bandwidth usage data for the current billing cycle. + BillingCyclePublicBandwidthUsage *Network_Bandwidth_Usage `json:"billingCyclePublicBandwidthUsage,omitempty" xmlrpc:"billingCyclePublicBandwidthUsage,omitempty"` + + // The billing item for a CloudLayer Compute Instance. + BillingItem *Billing_Item_Virtual_Guest `json:"billingItem,omitempty" xmlrpc:"billingItem,omitempty"` + + // Determines whether the instance is ineligible for cancellation because it is disconnected. + BlockCancelBecauseDisconnectedFlag *bool `json:"blockCancelBecauseDisconnectedFlag,omitempty" xmlrpc:"blockCancelBecauseDisconnectedFlag,omitempty"` + + // A count of a computing instance's block devices. Block devices link [[SoftLayer_Virtual_Disk_Image|disk images]] to computing instances. + BlockDeviceCount *uint `json:"blockDeviceCount,omitempty" xmlrpc:"blockDeviceCount,omitempty"` + + // The global identifier for the image template that was used to provision or reload a guest. + BlockDeviceTemplateGroup *Virtual_Guest_Block_Device_Template_Group `json:"blockDeviceTemplateGroup,omitempty" xmlrpc:"blockDeviceTemplateGroup,omitempty"` + + // A computing instance's block devices. Block devices link [[SoftLayer_Virtual_Disk_Image|disk images]] to computing instances. + BlockDevices []Virtual_Guest_Block_Device `json:"blockDevices,omitempty" xmlrpc:"blockDevices,omitempty"` + + // A flag indicating a computing instance's console IP address is assigned. + ConsoleIpAddressFlag *bool `json:"consoleIpAddressFlag,omitempty" xmlrpc:"consoleIpAddressFlag,omitempty"` + + // A record containing information about a computing instance's console IP and port number. + ConsoleIpAddressRecord *Virtual_Guest_Network_Component_IpAddress `json:"consoleIpAddressRecord,omitempty" xmlrpc:"consoleIpAddressRecord,omitempty"` + + // A continuous data protection software component object. + ContinuousDataProtectionSoftwareComponent *Software_Component `json:"continuousDataProtectionSoftwareComponent,omitempty" xmlrpc:"continuousDataProtectionSoftwareComponent,omitempty"` + + // A guest's control panel. + ControlPanel *Software_Component `json:"controlPanel,omitempty" xmlrpc:"controlPanel,omitempty"` + + // The date a virtual computing instance was created. + CreateDate *Time `json:"createDate,omitempty" xmlrpc:"createDate,omitempty"` + + // An object that provides commonly used bandwidth summary components for the current billing cycle. + CurrentBandwidthSummary *Metric_Tracking_Object_Bandwidth_Summary `json:"currentBandwidthSummary,omitempty" xmlrpc:"currentBandwidthSummary,omitempty"` + + // The datacenter that a virtual guest resides in. + Datacenter *Location `json:"datacenter,omitempty" xmlrpc:"datacenter,omitempty"` + + // When true this flag specifies that a compute instance is to run on hosts that only have guests from the same account. + DedicatedAccountHostOnlyFlag *bool `json:"dedicatedAccountHostOnlyFlag,omitempty" xmlrpc:"dedicatedAccountHostOnlyFlag,omitempty"` + + // The dedicated host associated with this guest. + DedicatedHost *Virtual_DedicatedHost `json:"dedicatedHost,omitempty" xmlrpc:"dedicatedHost,omitempty"` + + // A computing instance's domain name + Domain *string `json:"domain,omitempty" xmlrpc:"domain,omitempty"` + + // A guest's associated EVault network storage service account. + EvaultNetworkStorage []Network_Storage `json:"evaultNetworkStorage,omitempty" xmlrpc:"evaultNetworkStorage,omitempty"` + + // A count of a guest's associated EVault network storage service account. + EvaultNetworkStorageCount *uint `json:"evaultNetworkStorageCount,omitempty" xmlrpc:"evaultNetworkStorageCount,omitempty"` + + // A computing instance's hardware firewall services. + FirewallServiceComponent *Network_Component_Firewall `json:"firewallServiceComponent,omitempty" xmlrpc:"firewallServiceComponent,omitempty"` + + // A count of a guest's frontend network components. + FrontendNetworkComponentCount *uint `json:"frontendNetworkComponentCount,omitempty" xmlrpc:"frontendNetworkComponentCount,omitempty"` + + // A guest's frontend network components. + FrontendNetworkComponents []Virtual_Guest_Network_Component `json:"frontendNetworkComponents,omitempty" xmlrpc:"frontendNetworkComponents,omitempty"` + + // A guest's frontend or public router. + FrontendRouters *Hardware `json:"frontendRouters,omitempty" xmlrpc:"frontendRouters,omitempty"` + + // A name reflecting the hostname and domain of the computing instance. + FullyQualifiedDomainName *string `json:"fullyQualifiedDomainName,omitempty" xmlrpc:"fullyQualifiedDomainName,omitempty"` + + // A guest's universally unique identifier. + GlobalIdentifier *string `json:"globalIdentifier,omitempty" xmlrpc:"globalIdentifier,omitempty"` + + // no documentation yet + GuestBootParameter *Virtual_Guest_Boot_Parameter `json:"guestBootParameter,omitempty" xmlrpc:"guestBootParameter,omitempty"` + + // The virtual host on which a virtual guest resides (available only on private clouds). + Host *Virtual_Host `json:"host,omitempty" xmlrpc:"host,omitempty"` + + // A host IPS software component object. + HostIpsSoftwareComponent *Software_Component `json:"hostIpsSoftwareComponent,omitempty" xmlrpc:"hostIpsSoftwareComponent,omitempty"` + + // A virtual computing instance's hostname + Hostname *string `json:"hostname,omitempty" xmlrpc:"hostname,omitempty"` + + // Whether or not a computing instance is billed hourly instead of monthly. + HourlyBillingFlag *bool `json:"hourlyBillingFlag,omitempty" xmlrpc:"hourlyBillingFlag,omitempty"` + + // Unique ID for a computing instance. + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // The total private inbound bandwidth for this computing instance for the current billing cycle. + InboundPrivateBandwidthUsage *Float64 `json:"inboundPrivateBandwidthUsage,omitempty" xmlrpc:"inboundPrivateBandwidthUsage,omitempty"` + + // The total public inbound bandwidth for this computing instance for the current billing cycle. + InboundPublicBandwidthUsage *Float64 `json:"inboundPublicBandwidthUsage,omitempty" xmlrpc:"inboundPublicBandwidthUsage,omitempty"` + + // A count of + InternalTagReferenceCount *uint `json:"internalTagReferenceCount,omitempty" xmlrpc:"internalTagReferenceCount,omitempty"` + + // no documentation yet + InternalTagReferences []Tag_Reference `json:"internalTagReferences,omitempty" xmlrpc:"internalTagReferences,omitempty"` + + // The last known power state of a virtual guest in the event the guest is turned off outside of IMS or has gone offline. + LastKnownPowerState *Virtual_Guest_Power_State `json:"lastKnownPowerState,omitempty" xmlrpc:"lastKnownPowerState,omitempty"` + + // The last transaction that a cloud server's operating system was loaded. + LastOperatingSystemReload *Provisioning_Version1_Transaction `json:"lastOperatingSystemReload,omitempty" xmlrpc:"lastOperatingSystemReload,omitempty"` + + // no documentation yet + LastPowerStateId *int `json:"lastPowerStateId,omitempty" xmlrpc:"lastPowerStateId,omitempty"` + + // The last transaction a cloud server had performed. + LastTransaction *Provisioning_Version1_Transaction `json:"lastTransaction,omitempty" xmlrpc:"lastTransaction,omitempty"` + + // The last timestamp of when the guest was verified as a resident virtual machine on the host's hypervisor platform. + LastVerifiedDate *Time `json:"lastVerifiedDate,omitempty" xmlrpc:"lastVerifiedDate,omitempty"` + + // A virtual guest's latest network monitoring incident. + LatestNetworkMonitorIncident *Network_Monitor_Version1_Incident `json:"latestNetworkMonitorIncident,omitempty" xmlrpc:"latestNetworkMonitorIncident,omitempty"` + + // A flag indicating that the virtual guest has at least one disk which is local to the host it runs on. This does not include a SWAP device. + LocalDiskFlag *bool `json:"localDiskFlag,omitempty" xmlrpc:"localDiskFlag,omitempty"` + + // Where guest is located within SoftLayer's location hierarchy. + Location *Location `json:"location,omitempty" xmlrpc:"location,omitempty"` + + // A flag indicating that the virtual guest is a managed resource. + ManagedResourceFlag *bool `json:"managedResourceFlag,omitempty" xmlrpc:"managedResourceFlag,omitempty"` + + // The maximum amount of CPU resources a computing instance may utilize. + MaxCpu *int `json:"maxCpu,omitempty" xmlrpc:"maxCpu,omitempty"` + + // The unit of the maximum amount of CPU resources a computing instance may utilize. + MaxCpuUnits *string `json:"maxCpuUnits,omitempty" xmlrpc:"maxCpuUnits,omitempty"` + + // The maximum amount of memory a computing instance may utilize. + MaxMemory *int `json:"maxMemory,omitempty" xmlrpc:"maxMemory,omitempty"` + + // The date of the most recent metric tracking poll performed. + MetricPollDate *Time `json:"metricPollDate,omitempty" xmlrpc:"metricPollDate,omitempty"` + + // A guest's metric tracking object. + MetricTrackingObject *Metric_Tracking_Object `json:"metricTrackingObject,omitempty" xmlrpc:"metricTrackingObject,omitempty"` + + // The metric tracking object id for this guest. + MetricTrackingObjectId *int `json:"metricTrackingObjectId,omitempty" xmlrpc:"metricTrackingObjectId,omitempty"` + + // The date a virtual computing instance was last modified. + ModifyDate *Time `json:"modifyDate,omitempty" xmlrpc:"modifyDate,omitempty"` + + // A count of + MonitoringAgentCount *uint `json:"monitoringAgentCount,omitempty" xmlrpc:"monitoringAgentCount,omitempty"` + + // no documentation yet + MonitoringAgents []Monitoring_Agent `json:"monitoringAgents,omitempty" xmlrpc:"monitoringAgents,omitempty"` + + // no documentation yet + MonitoringRobot *Monitoring_Robot `json:"monitoringRobot,omitempty" xmlrpc:"monitoringRobot,omitempty"` + + // A virtual guest's network monitoring services. + MonitoringServiceComponent *Network_Monitor_Version1_Query_Host_Stratum `json:"monitoringServiceComponent,omitempty" xmlrpc:"monitoringServiceComponent,omitempty"` + + // no documentation yet + MonitoringServiceEligibilityFlag *bool `json:"monitoringServiceEligibilityFlag,omitempty" xmlrpc:"monitoringServiceEligibilityFlag,omitempty"` + + // no documentation yet + MonitoringServiceFlag *bool `json:"monitoringServiceFlag,omitempty" xmlrpc:"monitoringServiceFlag,omitempty"` + + // The monitoring notification objects for this guest. Each object links this guest instance to a user account that will be notified if monitoring on this guest object fails + MonitoringUserNotification []User_Customer_Notification_Virtual_Guest `json:"monitoringUserNotification,omitempty" xmlrpc:"monitoringUserNotification,omitempty"` + + // A count of the monitoring notification objects for this guest. Each object links this guest instance to a user account that will be notified if monitoring on this guest object fails + MonitoringUserNotificationCount *uint `json:"monitoringUserNotificationCount,omitempty" xmlrpc:"monitoringUserNotificationCount,omitempty"` + + // A count of a guests's network components. + NetworkComponentCount *uint `json:"networkComponentCount,omitempty" xmlrpc:"networkComponentCount,omitempty"` + + // A guests's network components. + NetworkComponents []Virtual_Guest_Network_Component `json:"networkComponents,omitempty" xmlrpc:"networkComponents,omitempty"` + + // A count of a guests's network monitors. + NetworkMonitorCount *uint `json:"networkMonitorCount,omitempty" xmlrpc:"networkMonitorCount,omitempty"` + + // A count of all of a virtual guest's network monitoring incidents. + NetworkMonitorIncidentCount *uint `json:"networkMonitorIncidentCount,omitempty" xmlrpc:"networkMonitorIncidentCount,omitempty"` + + // All of a virtual guest's network monitoring incidents. + NetworkMonitorIncidents []Network_Monitor_Version1_Incident `json:"networkMonitorIncidents,omitempty" xmlrpc:"networkMonitorIncidents,omitempty"` + + // A guests's network monitors. + NetworkMonitors []Network_Monitor_Version1_Query_Host `json:"networkMonitors,omitempty" xmlrpc:"networkMonitors,omitempty"` + + // A guest's associated network storage accounts. + NetworkStorage []Network_Storage `json:"networkStorage,omitempty" xmlrpc:"networkStorage,omitempty"` + + // A count of a guest's associated network storage accounts. + NetworkStorageCount *uint `json:"networkStorageCount,omitempty" xmlrpc:"networkStorageCount,omitempty"` + + // A count of the network Vlans that a guest's network components are associated with. + NetworkVlanCount *uint `json:"networkVlanCount,omitempty" xmlrpc:"networkVlanCount,omitempty"` + + // The network Vlans that a guest's network components are associated with. + NetworkVlans []Network_Vlan `json:"networkVlans,omitempty" xmlrpc:"networkVlans,omitempty"` + + // A note of up to 1,000 characters about a virtual server. + Notes *string `json:"notes,omitempty" xmlrpc:"notes,omitempty"` + + // An open ticket requesting cancellation of this server, if one exists. + OpenCancellationTicket *Ticket `json:"openCancellationTicket,omitempty" xmlrpc:"openCancellationTicket,omitempty"` + + // A guest's operating system. + OperatingSystem *Software_Component_OperatingSystem `json:"operatingSystem,omitempty" xmlrpc:"operatingSystem,omitempty"` + + // A guest's operating system software description. + OperatingSystemReferenceCode *string `json:"operatingSystemReferenceCode,omitempty" xmlrpc:"operatingSystemReferenceCode,omitempty"` + + // The original package id provided with the order for a Cloud Computing Instance. + OrderedPackageId *string `json:"orderedPackageId,omitempty" xmlrpc:"orderedPackageId,omitempty"` + + // The total private outbound bandwidth for this computing instance for the current billing cycle. + OutboundPrivateBandwidthUsage *Float64 `json:"outboundPrivateBandwidthUsage,omitempty" xmlrpc:"outboundPrivateBandwidthUsage,omitempty"` + + // The total public outbound bandwidth for this computing instance for the current billing cycle. + OutboundPublicBandwidthUsage *Float64 `json:"outboundPublicBandwidthUsage,omitempty" xmlrpc:"outboundPublicBandwidthUsage,omitempty"` + + // Whether the bandwidth usage for this computing instance for the current billing cycle exceeds the allocation. + OverBandwidthAllocationFlag *int `json:"overBandwidthAllocationFlag,omitempty" xmlrpc:"overBandwidthAllocationFlag,omitempty"` + + // When true this virtual guest must be migrated using SoftLayer_Virtual_Guest::migrate. + PendingMigrationFlag *bool `json:"pendingMigrationFlag,omitempty" xmlrpc:"pendingMigrationFlag,omitempty"` + + // URI of the script to be downloaded and executed after installation is complete. This is deprecated in favor of supplementalCreateObjectOptions' postInstallScriptUri. + PostInstallScriptUri *string `json:"postInstallScriptUri,omitempty" xmlrpc:"postInstallScriptUri,omitempty"` + + // The current power state of a virtual guest. + PowerState *Virtual_Guest_Power_State `json:"powerState,omitempty" xmlrpc:"powerState,omitempty"` + + // A guest's primary private IP address. + PrimaryBackendIpAddress *string `json:"primaryBackendIpAddress,omitempty" xmlrpc:"primaryBackendIpAddress,omitempty"` + + // A guest's primary backend network component. + PrimaryBackendNetworkComponent *Virtual_Guest_Network_Component `json:"primaryBackendNetworkComponent,omitempty" xmlrpc:"primaryBackendNetworkComponent,omitempty"` + + // The guest's primary public IP address. + PrimaryIpAddress *string `json:"primaryIpAddress,omitempty" xmlrpc:"primaryIpAddress,omitempty"` + + // A guest's primary public network component. + PrimaryNetworkComponent *Virtual_Guest_Network_Component `json:"primaryNetworkComponent,omitempty" xmlrpc:"primaryNetworkComponent,omitempty"` + + // Whether the computing instance only has access to the private network. + PrivateNetworkOnlyFlag *bool `json:"privateNetworkOnlyFlag,omitempty" xmlrpc:"privateNetworkOnlyFlag,omitempty"` + + // Whether the bandwidth usage for this computing instance for the current billing cycle is projected to exceed the allocation. + ProjectedOverBandwidthAllocationFlag *int `json:"projectedOverBandwidthAllocationFlag,omitempty" xmlrpc:"projectedOverBandwidthAllocationFlag,omitempty"` + + // The projected public outbound bandwidth for this computing instance for the current billing cycle. + ProjectedPublicBandwidthUsage *Float64 `json:"projectedPublicBandwidthUsage,omitempty" xmlrpc:"projectedPublicBandwidthUsage,omitempty"` + + // no documentation yet + ProvisionDate *Time `json:"provisionDate,omitempty" xmlrpc:"provisionDate,omitempty"` + + // A count of recent events that impact this computing instance. + RecentEventCount *uint `json:"recentEventCount,omitempty" xmlrpc:"recentEventCount,omitempty"` + + // Recent events that impact this computing instance. + RecentEvents []Notification_Occurrence_Event `json:"recentEvents,omitempty" xmlrpc:"recentEvents,omitempty"` + + // The regional group this guest is in. + RegionalGroup *Location_Group_Regional `json:"regionalGroup,omitempty" xmlrpc:"regionalGroup,omitempty"` + + // no documentation yet + RegionalInternetRegistry *Network_Regional_Internet_Registry `json:"regionalInternetRegistry,omitempty" xmlrpc:"regionalInternetRegistry,omitempty"` + + // A count of collection of scale assets this guest corresponds to. + ScaleAssetCount *uint `json:"scaleAssetCount,omitempty" xmlrpc:"scaleAssetCount,omitempty"` + + // Collection of scale assets this guest corresponds to. + ScaleAssets []Scale_Asset `json:"scaleAssets,omitempty" xmlrpc:"scaleAssets,omitempty"` + + // The scale member for this guest, if applicable. + ScaleMember *Scale_Member_Virtual_Guest `json:"scaleMember,omitempty" xmlrpc:"scaleMember,omitempty"` + + // Whether or not this guest is a member of a scale group and was automatically created as part of a scale group action. + ScaledFlag *bool `json:"scaledFlag,omitempty" xmlrpc:"scaledFlag,omitempty"` + + // A count of a guest's vulnerability scan requests. + SecurityScanRequestCount *uint `json:"securityScanRequestCount,omitempty" xmlrpc:"securityScanRequestCount,omitempty"` + + // A guest's vulnerability scan requests. + SecurityScanRequests []Network_Security_Scanner_Request `json:"securityScanRequests,omitempty" xmlrpc:"securityScanRequests,omitempty"` + + // The server room that a guest is located at. There may be more than one server room for every data center. + ServerRoom *Location `json:"serverRoom,omitempty" xmlrpc:"serverRoom,omitempty"` + + // A count of a guest's installed software. + SoftwareComponentCount *uint `json:"softwareComponentCount,omitempty" xmlrpc:"softwareComponentCount,omitempty"` + + // A guest's installed software. + SoftwareComponents []Software_Component `json:"softwareComponents,omitempty" xmlrpc:"softwareComponents,omitempty"` + + // A count of sSH keys to be installed on the server during provisioning or an OS reload. + SshKeyCount *uint `json:"sshKeyCount,omitempty" xmlrpc:"sshKeyCount,omitempty"` + + // SSH keys to be installed on the server during provisioning or an OS reload. + SshKeys []Security_Ssh_Key `json:"sshKeys,omitempty" xmlrpc:"sshKeys,omitempty"` + + // The number of CPUs available to a computing instance upon startup. + StartCpus *int `json:"startCpus,omitempty" xmlrpc:"startCpus,omitempty"` + + // A computing instance's status. + Status *Virtual_Guest_Status `json:"status,omitempty" xmlrpc:"status,omitempty"` + + // A computing instances [[SoftLayer_Virtual_Guest_Status|status]] ID + StatusId *int `json:"statusId,omitempty" xmlrpc:"statusId,omitempty"` + + // Extra options needed for [[SoftLayer_Virtual_Guest/createObject|createObject]] and [[SoftLayer_Virtual_Guest/createObjects|createObjects]]. + SupplementalCreateObjectOptions *Virtual_Guest_SupplementalCreateObjectOptions `json:"supplementalCreateObjectOptions,omitempty" xmlrpc:"supplementalCreateObjectOptions,omitempty"` + + // A count of + TagReferenceCount *uint `json:"tagReferenceCount,omitempty" xmlrpc:"tagReferenceCount,omitempty"` + + // no documentation yet + TagReferences []Tag_Reference `json:"tagReferences,omitempty" xmlrpc:"tagReferences,omitempty"` + + // The type of this virtual guest. + Type *Virtual_Guest_Type `json:"type,omitempty" xmlrpc:"type,omitempty"` + + // Gives the type of guest categorized as PUBLIC, DEDICATED or PRIVATE. + TypeId *int `json:"typeId,omitempty" xmlrpc:"typeId,omitempty"` + + // A computing instance's associated upgrade request object if any. + UpgradeRequest *Product_Upgrade_Request `json:"upgradeRequest,omitempty" xmlrpc:"upgradeRequest,omitempty"` + + // A count of a list of users that have access to this computing instance. + UserCount *uint `json:"userCount,omitempty" xmlrpc:"userCount,omitempty"` + + // A base64 encoded string containing custom user data for a Cloud Computing Instance order. + UserData []Virtual_Guest_Attribute `json:"userData,omitempty" xmlrpc:"userData,omitempty"` + + // A count of a base64 encoded string containing custom user data for a Cloud Computing Instance order. + UserDataCount *uint `json:"userDataCount,omitempty" xmlrpc:"userDataCount,omitempty"` + + // A list of users that have access to this computing instance. + Users []User_Customer `json:"users,omitempty" xmlrpc:"users,omitempty"` + + // Unique ID for a computing instance's record on a virtualization platform. + Uuid *string `json:"uuid,omitempty" xmlrpc:"uuid,omitempty"` + + // The name of the bandwidth allotment that a hardware belongs too. + VirtualRack *Network_Bandwidth_Version1_Allotment `json:"virtualRack,omitempty" xmlrpc:"virtualRack,omitempty"` + + // The id of the bandwidth allotment that a computing instance belongs too. + VirtualRackId *int `json:"virtualRackId,omitempty" xmlrpc:"virtualRackId,omitempty"` + + // The name of the bandwidth allotment that a computing instance belongs too. + VirtualRackName *string `json:"virtualRackName,omitempty" xmlrpc:"virtualRackName,omitempty"` +} + +// no documentation yet +type Virtual_Guest_Attribute struct { + Entity + + // no documentation yet + Guest *Virtual_Guest `json:"guest,omitempty" xmlrpc:"guest,omitempty"` + + // no documentation yet + Type *Virtual_Guest_Attribute_Type `json:"type,omitempty" xmlrpc:"type,omitempty"` + + // A guest attribute's value. + Value *string `json:"value,omitempty" xmlrpc:"value,omitempty"` +} + +// no documentation yet +type Virtual_Guest_Attribute_Type struct { + Entity + + // no documentation yet + Keyname *string `json:"keyname,omitempty" xmlrpc:"keyname,omitempty"` + + // no documentation yet + Name *string `json:"name,omitempty" xmlrpc:"name,omitempty"` +} + +// no documentation yet +type Virtual_Guest_Attribute_UserData struct { + Virtual_Guest_Attribute +} + +// The block device data type presents the structure in which all block devices will be presented. A block device attaches a disk image to a guest. Internally, the structure supports various virtualization platforms with no change to external interaction. +// +// A guest, also known as a virtual server, represents an allocation of resources on a virtual host. +type Virtual_Guest_Block_Device struct { + Entity + + // A flag indicating if a block device can be booted from. + BootableFlag *int `json:"bootableFlag,omitempty" xmlrpc:"bootableFlag,omitempty"` + + // The date a block device was created. + CreateDate *Time `json:"createDate,omitempty" xmlrpc:"createDate,omitempty"` + + // A name used to identify a block device. + Device *string `json:"device,omitempty" xmlrpc:"device,omitempty"` + + // The disk image that a block device connects to in a computing instance. + DiskImage *Virtual_Disk_Image `json:"diskImage,omitempty" xmlrpc:"diskImage,omitempty"` + + // A block device [[SoftLayer_Virtual_Disk_Image|disk image]]'s unique ID. + DiskImageId *int `json:"diskImageId,omitempty" xmlrpc:"diskImageId,omitempty"` + + // The computing instance that this block device is attached to. + Guest *Virtual_Guest `json:"guest,omitempty" xmlrpc:"guest,omitempty"` + + // The [[SoftLayer_Virtual_Guest|computing instance]] that a block device is associated with. + GuestId *int `json:"guestId,omitempty" xmlrpc:"guestId,omitempty"` + + // A flag indicating if a block device can be plugged into a computing instance without having to shut down the instance. + HotPlugFlag *int `json:"hotPlugFlag,omitempty" xmlrpc:"hotPlugFlag,omitempty"` + + // A computing instance block device's unique ID. + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // The data a block device was last modified. + ModifyDate *Time `json:"modifyDate,omitempty" xmlrpc:"modifyDate,omitempty"` + + // The writing mode that a virtual block device is mounted as, either "RO" for read-only mode or "RW" for read and write mode. + MountMode *string `json:"mountMode,omitempty" xmlrpc:"mountMode,omitempty"` + + // The type of device that a virtual block device is mounted as, either "Disk" for a directly connected storage disk or "CD" for devices that are mounted as optical drives.. + MountType *string `json:"mountType,omitempty" xmlrpc:"mountType,omitempty"` + + // no documentation yet + Status *Virtual_Guest_Block_Device_Status `json:"status,omitempty" xmlrpc:"status,omitempty"` + + // The status of the device, either disconnected or connected + StatusId *int `json:"statusId,omitempty" xmlrpc:"statusId,omitempty"` + + // A block device's unique ID on a virtualization platform. + Uuid *string `json:"uuid,omitempty" xmlrpc:"uuid,omitempty"` +} + +// no documentation yet +type Virtual_Guest_Block_Device_Status struct { + Entity + + // no documentation yet + KeyName *string `json:"keyName,omitempty" xmlrpc:"keyName,omitempty"` + + // no documentation yet + Name *string `json:"name,omitempty" xmlrpc:"name,omitempty"` +} + +// The virtual block device template data type presents the structure in which all archived image templates are presented. +// +// A virtual block device template, also known as a image template, represents the image of a virtual guest instance. +type Virtual_Guest_Block_Device_Template struct { + Entity + + // A name that identifies a block device template. + Device *string `json:"device,omitempty" xmlrpc:"device,omitempty"` + + // A block device template's disk image. + DiskImage *Virtual_Disk_Image `json:"diskImage,omitempty" xmlrpc:"diskImage,omitempty"` + + // A block device template's [[SoftLayer_Virtual_Disk_Image|disk image]] ID. + DiskImageId *int `json:"diskImageId,omitempty" xmlrpc:"diskImageId,omitempty"` + + // The amount of disk space that a block device template is using. Use this number along with the units property to obtain the correct space used. + DiskSpace *Float64 `json:"diskSpace,omitempty" xmlrpc:"diskSpace,omitempty"` + + // A block device template's group. Several block device templates can be combined together into a group for archiving purposes. + Group *Virtual_Guest_Block_Device_Template_Group `json:"group,omitempty" xmlrpc:"group,omitempty"` + + // A block device template's [[SoftLayer_Virtual_Guest_Block_Device_Template_Group|group]] ID. + GroupId *int `json:"groupId,omitempty" xmlrpc:"groupId,omitempty"` + + // A block device template's unique ID. + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // The units that will be used with the disk space property to identify the amount of disk space used. + Units *string `json:"units,omitempty" xmlrpc:"units,omitempty"` +} + +// The virtual block device template group data type presents the structure in which a group of archived image templates will be presented. The structure consists of a parent template group which contain multiple child template group objects. Each child template group object represents the image template in a particular location. Unless editing/deleting a specific child template group object, it is best to use the parent object. +// +// A virtual block device template group, also known as an image template group, represents an image of a virtual guest instance. +type Virtual_Guest_Block_Device_Template_Group struct { + Entity + + // A block device template group's [[SoftLayer_Account|account]]. + Account *Account `json:"account,omitempty" xmlrpc:"account,omitempty"` + + // A count of + AccountContactCount *uint `json:"accountContactCount,omitempty" xmlrpc:"accountContactCount,omitempty"` + + // no documentation yet + AccountContacts []Account_Contact `json:"accountContacts,omitempty" xmlrpc:"accountContacts,omitempty"` + + // A block device template group's [[SoftLayer_Account|account]] ID + AccountId *int `json:"accountId,omitempty" xmlrpc:"accountId,omitempty"` + + // A count of the accounts which may have read-only access to an image template group. Will only be populated for parent template group objects. + AccountReferenceCount *uint `json:"accountReferenceCount,omitempty" xmlrpc:"accountReferenceCount,omitempty"` + + // The accounts which may have read-only access to an image template group. Will only be populated for parent template group objects. + AccountReferences []Virtual_Guest_Block_Device_Template_Group_Accounts `json:"accountReferences,omitempty" xmlrpc:"accountReferences,omitempty"` + + // A count of the block devices that are part of an image template group + BlockDeviceCount *uint `json:"blockDeviceCount,omitempty" xmlrpc:"blockDeviceCount,omitempty"` + + // The block devices that are part of an image template group + BlockDevices []Virtual_Guest_Block_Device_Template `json:"blockDevices,omitempty" xmlrpc:"blockDevices,omitempty"` + + // The total disk space of all images in a image template group. + BlockDevicesDiskSpaceTotal *Float64 `json:"blockDevicesDiskSpaceTotal,omitempty" xmlrpc:"blockDevicesDiskSpaceTotal,omitempty"` + + // The image template groups that are clones of an image template group. + Children []Virtual_Guest_Block_Device_Template_Group `json:"children,omitempty" xmlrpc:"children,omitempty"` + + // A count of the image template groups that are clones of an image template group. + ChildrenCount *uint `json:"childrenCount,omitempty" xmlrpc:"childrenCount,omitempty"` + + // The date a block device template group was created. + CreateDate *Time `json:"createDate,omitempty" xmlrpc:"createDate,omitempty"` + + // The location containing this image template group. Will only be populated for child template group objects. + Datacenter *Location `json:"datacenter,omitempty" xmlrpc:"datacenter,omitempty"` + + // A count of a collection of locations containing a copy of this image template group. Will only be populated for parent template group objects. + DatacenterCount *uint `json:"datacenterCount,omitempty" xmlrpc:"datacenterCount,omitempty"` + + // A collection of locations containing a copy of this image template group. Will only be populated for parent template group objects. + Datacenters []Location `json:"datacenters,omitempty" xmlrpc:"datacenters,omitempty"` + + // A flag indicating if this is a flex image. + FlexImageFlag *bool `json:"flexImageFlag,omitempty" xmlrpc:"flexImageFlag,omitempty"` + + // An image template's universally unique identifier. + GlobalIdentifier *string `json:"globalIdentifier,omitempty" xmlrpc:"globalIdentifier,omitempty"` + + // A block device template group's unique ID. + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // The virtual disk image type of this template. Value will be populated on parent and child, but only supports object filtering on the parent. + ImageType *Virtual_Disk_Image_Type `json:"imageType,omitempty" xmlrpc:"imageType,omitempty"` + + // The virtual disk image type keyname (e.g. SYSTEM, DISK_CAPTURE, ISO, etc) of this template. Value will be populated on parent and child, but only supports object filtering on the parent. + ImageTypeKeyName *string `json:"imageTypeKeyName,omitempty" xmlrpc:"imageTypeKeyName,omitempty"` + + // A user definable and optional name of a block device template group. + Name *string `json:"name,omitempty" xmlrpc:"name,omitempty"` + + // A block device template group's user defined note. + Note *string `json:"note,omitempty" xmlrpc:"note,omitempty"` + + // The image template group that another image template group was cloned from. + Parent *Virtual_Guest_Block_Device_Template_Group `json:"parent,omitempty" xmlrpc:"parent,omitempty"` + + // A block device template group's [[SoftLayer_Virtual_Guest_Block_Device_Template_Group|parent]] ID. This will only be set when a template group is created from a previously existing template group + ParentId *int `json:"parentId,omitempty" xmlrpc:"parentId,omitempty"` + + // no documentation yet + PublicFlag *int `json:"publicFlag,omitempty" xmlrpc:"publicFlag,omitempty"` + + // A count of the ssh keys to be implemented on the server when provisioned or reloaded from an image template group. + SshKeyCount *uint `json:"sshKeyCount,omitempty" xmlrpc:"sshKeyCount,omitempty"` + + // The ssh keys to be implemented on the server when provisioned or reloaded from an image template group. + SshKeys []Security_Ssh_Key `json:"sshKeys,omitempty" xmlrpc:"sshKeys,omitempty"` + + // A template group's status. + Status *Virtual_Guest_Block_Device_Template_Group_Status `json:"status,omitempty" xmlrpc:"status,omitempty"` + + // A block device template group's [[SoftLayer_Virtual_Guest_Block_Device_Template_Group_Status|status]] ID + StatusId *int `json:"statusId,omitempty" xmlrpc:"statusId,omitempty"` + + // The storage repository that an image template group resides on. + StorageRepository *Virtual_Storage_Repository `json:"storageRepository,omitempty" xmlrpc:"storageRepository,omitempty"` + + // A block device template group's user defined summary. + Summary *string `json:"summary,omitempty" xmlrpc:"summary,omitempty"` + + // A count of the tags associated with this image template group. + TagReferenceCount *uint `json:"tagReferenceCount,omitempty" xmlrpc:"tagReferenceCount,omitempty"` + + // The tags associated with this image template group. + TagReferences []Tag_Reference `json:"tagReferences,omitempty" xmlrpc:"tagReferences,omitempty"` + + // A transaction that is being performed on a image template group. + Transaction *Provisioning_Version1_Transaction `json:"transaction,omitempty" xmlrpc:"transaction,omitempty"` + + // A block device template group's [[SoftLayer_Provisioning_Version1_Transaction|transaction]] ID. This will only be set when there is a transaction being performed on the block device template group. + TransactionId *int `json:"transactionId,omitempty" xmlrpc:"transactionId,omitempty"` + + // A block device template group's [[SoftLayer_User|user]] ID + UserRecordId *int `json:"userRecordId,omitempty" xmlrpc:"userRecordId,omitempty"` +} + +// The SoftLayer_Virtual_Guest_Block_Device_Template_Group_Accounts data type represents the SoftLayer customer accounts which have access to provision CloudLayer Computing Instances from an image template group. +// +// All accounts other than the image template group owner have read-only access to that image template group. +// +// It is important to note that this data type should only exist to give accounts access to the parent template group object, not the child. All image template sharing between accounts should occur on the parent object. +type Virtual_Guest_Block_Device_Template_Group_Accounts struct { + Entity + + // The [[SoftLayer_Account|account]] that an image template group is shared with. + Account *Account `json:"account,omitempty" xmlrpc:"account,omitempty"` + + // The [[SoftLayer_Account|account]] ID which will have access to an image. + AccountId *int `json:"accountId,omitempty" xmlrpc:"accountId,omitempty"` + + // The date access was granted to an account. + CreateDate *Time `json:"createDate,omitempty" xmlrpc:"createDate,omitempty"` + + // The [[SoftLayer_Virtual_Guest_Block_Device_Template_Group|image template group]] that is shared with an account. + Group *Virtual_Guest_Block_Device_Template_Group `json:"group,omitempty" xmlrpc:"group,omitempty"` + + // The [[SoftLayer_Virtual_Guest_Block_Device_Template_Group|group]] ID which access will be granted to. + GroupId *int `json:"groupId,omitempty" xmlrpc:"groupId,omitempty"` +} + +// The virtual block device template group status data type represents the current status of the image template. Depending upon the status, the image template can be used for provisioning or reloading. +// +// For an operating system reload, the image template will need to have a status of 'Active' or 'Deprecated'. For a provision, the image template will need to have a status of 'Active' +// +// +type Virtual_Guest_Block_Device_Template_Group_Status struct { + Entity + + // no documentation yet + Description *string `json:"description,omitempty" xmlrpc:"description,omitempty"` + + // no documentation yet + KeyName *string `json:"keyName,omitempty" xmlrpc:"keyName,omitempty"` + + // no documentation yet + Name *string `json:"name,omitempty" xmlrpc:"name,omitempty"` +} + +// no documentation yet +type Virtual_Guest_Boot_Parameter struct { + Entity + + // no documentation yet + CreateDate *Time `json:"createDate,omitempty" xmlrpc:"createDate,omitempty"` + + // no documentation yet + Guest *Virtual_Guest `json:"guest,omitempty" xmlrpc:"guest,omitempty"` + + // no documentation yet + GuestBootParameterType *Virtual_Guest_Boot_Parameter_Type `json:"guestBootParameterType,omitempty" xmlrpc:"guestBootParameterType,omitempty"` + + // no documentation yet + GuestBootParameterTypeId *int `json:"guestBootParameterTypeId,omitempty" xmlrpc:"guestBootParameterTypeId,omitempty"` + + // no documentation yet + GuestId *int `json:"guestId,omitempty" xmlrpc:"guestId,omitempty"` + + // no documentation yet + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // no documentation yet + ModifyDate *Time `json:"modifyDate,omitempty" xmlrpc:"modifyDate,omitempty"` +} + +// Describes a virtual guest boot parameter. In this the word class is used in the context of arguments sent to cloud computing instances such as single user mode and boot into bash. +type Virtual_Guest_Boot_Parameter_Type struct { + Entity + + // Available boot options. + BootOption *string `json:"bootOption,omitempty" xmlrpc:"bootOption,omitempty"` + + // no documentation yet + CreateDate *Time `json:"createDate,omitempty" xmlrpc:"createDate,omitempty"` + + // A description of the boot parameter + Description *string `json:"description,omitempty" xmlrpc:"description,omitempty"` + + // Indentifier for record. + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // The key name of the boot parameter. + KeyName *string `json:"keyName,omitempty" xmlrpc:"keyName,omitempty"` + + // no documentation yet + ModifyDate *Time `json:"modifyDate,omitempty" xmlrpc:"modifyDate,omitempty"` + + // The common name of the boot parameter. + Name *string `json:"name,omitempty" xmlrpc:"name,omitempty"` + + // The virtual machine arguments + Value *string `json:"value,omitempty" xmlrpc:"value,omitempty"` +} + +// The virtual guest network component data type presents the structure in which all computing instance network components are presented. Internally, the structure supports various virtualization platforms with no change to external interaction. +// +// A guest, also known as a virtual server, represents an allocation of resources on a virtual host. +type Virtual_Guest_Network_Component struct { + Entity + + // The date a computing instance's network component was created. + CreateDate *Time `json:"createDate,omitempty" xmlrpc:"createDate,omitempty"` + + // The computing instance that this network component exists on. + Guest *Virtual_Guest `json:"guest,omitempty" xmlrpc:"guest,omitempty"` + + // The unique ID of the [[SoftLayer_Virtual_Guest|computing instance]] that this network component belongs to. + GuestId *int `json:"guestId,omitempty" xmlrpc:"guestId,omitempty"` + + // no documentation yet + HighAvailabilityFirewallFlag *bool `json:"highAvailabilityFirewallFlag,omitempty" xmlrpc:"highAvailabilityFirewallFlag,omitempty"` + + // A computing instance's network component's unique ID. + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // A count of the records of all IP addresses bound to a computing instance's network component. + IpAddressBindingCount *uint `json:"ipAddressBindingCount,omitempty" xmlrpc:"ipAddressBindingCount,omitempty"` + + // The records of all IP addresses bound to a computing instance's network component. + IpAddressBindings []Virtual_Guest_Network_Component_IpAddress `json:"ipAddressBindings,omitempty" xmlrpc:"ipAddressBindings,omitempty"` + + // A computing instance network component's unique MAC address. + MacAddress *string `json:"macAddress,omitempty" xmlrpc:"macAddress,omitempty"` + + // A computing instance network component's maximum allowed speed, measured in Mbit per second. ''maxSpeed'' is determined by the capabilities of the network interface and the port speed purchased on your SoftLayer computing instance. + MaxSpeed *int `json:"maxSpeed,omitempty" xmlrpc:"maxSpeed,omitempty"` + + // The date a computing instance's network component was last modified. + ModifyDate *Time `json:"modifyDate,omitempty" xmlrpc:"modifyDate,omitempty"` + + // A computing instance network component's short name. This is usually ''eth''. Use this in conjunction with the ''port'' property to identify a network component. For instance, the "eth0" interface on a server has the network component name "eth" and port 0. + Name *string `json:"name,omitempty" xmlrpc:"name,omitempty"` + + // The upstream network component firewall. + NetworkComponentFirewall *Network_Component_Firewall `json:"networkComponentFirewall,omitempty" xmlrpc:"networkComponentFirewall,omitempty"` + + // A computing instance's network component's [[SoftLayer_Virtual_Network|network]] ID + NetworkId *int `json:"networkId,omitempty" xmlrpc:"networkId,omitempty"` + + // The VLAN that a computing instance network component's subnet is associated with. + NetworkVlan *Network_Vlan `json:"networkVlan,omitempty" xmlrpc:"networkVlan,omitempty"` + + // A computing instance network component's port number. Most computing instances have more than one network interface. The port property separates these interfaces. Use this in conjunction with the ''name'' property to identify a network component. For instance, the "eth0" interface on a server has the network component name "eth" and port 0. + Port *int `json:"port,omitempty" xmlrpc:"port,omitempty"` + + // A computing instance network component's primary IP address. + PrimaryIpAddress *string `json:"primaryIpAddress,omitempty" xmlrpc:"primaryIpAddress,omitempty"` + + // no documentation yet + PrimaryIpAddressRecord *Network_Subnet_IpAddress `json:"primaryIpAddressRecord,omitempty" xmlrpc:"primaryIpAddressRecord,omitempty"` + + // A network component's subnet for its primary IP address + PrimarySubnet *Network_Subnet `json:"primarySubnet,omitempty" xmlrpc:"primarySubnet,omitempty"` + + // A network component's primary IPv6 IP address record. + PrimaryVersion6IpAddressRecord *Network_Subnet_IpAddress `json:"primaryVersion6IpAddressRecord,omitempty" xmlrpc:"primaryVersion6IpAddressRecord,omitempty"` + + // A network component's routers. + Router *Hardware_Router `json:"router,omitempty" xmlrpc:"router,omitempty"` + + // A count of the bindings associating security groups to this network component + SecurityGroupBindingCount *uint `json:"securityGroupBindingCount,omitempty" xmlrpc:"securityGroupBindingCount,omitempty"` + + // The bindings associating security groups to this network component + SecurityGroupBindings []Virtual_Network_SecurityGroup_NetworkComponentBinding `json:"securityGroupBindings,omitempty" xmlrpc:"securityGroupBindings,omitempty"` + + // A computing instance network component's speed, measured in Mbit per second. + Speed *int `json:"speed,omitempty" xmlrpc:"speed,omitempty"` + + // A computing instance network component's status. This can be one of four possible values: "ACTIVE", "DISABLED", "INACTIVE", or "ABUSE_DISCONNECT". "ACTIVE" network components are enabled and in use on a cloud instance. "ABUSE_DISCONNECT" status components have been administratively disabled by SoftLayer accounting or abuse. "DISABLED" components have been administratively disabled by you, the user. You should never see a network interface in MACWAIT state. If you happen to see one please contact SoftLayer support. + Status *string `json:"status,omitempty" xmlrpc:"status,omitempty"` + + // A count of a network component's subnets. A subnet is a group of IP addresses + SubnetCount *uint `json:"subnetCount,omitempty" xmlrpc:"subnetCount,omitempty"` + + // A network component's subnets. A subnet is a group of IP addresses + Subnets []Network_Subnet `json:"subnets,omitempty" xmlrpc:"subnets,omitempty"` + + // A computing instance's network component's unique ID on a virtualization platform. + Uuid *string `json:"uuid,omitempty" xmlrpc:"uuid,omitempty"` +} + +// The SoftLayer_Virtual_Guest_Network_Component_IpAddress data type contains general information relating to the binding of a single network component to a single SoftLayer IP address. +type Virtual_Guest_Network_Component_IpAddress struct { + Entity + + // The IP address associated with this object's network component. + IpAddress *Network_Subnet_IpAddress `json:"ipAddress,omitempty" xmlrpc:"ipAddress,omitempty"` + + // The unique ID of the [[SoftLayer_Network_Subnet_ipAddress|ip address]] this virtual IP address is associated with. + IpAddressId *int `json:"ipAddressId,omitempty" xmlrpc:"ipAddressId,omitempty"` + + // The network component associated with this object's IP address. + NetworkComponent *Virtual_Guest_Network_Component `json:"networkComponent,omitempty" xmlrpc:"networkComponent,omitempty"` + + // The port that a network component has reserved. This field is only required for some IP address types. + Port *int `json:"port,omitempty" xmlrpc:"port,omitempty"` + + // The type of IP that this IP address record references. Some examples are PRIMARY for the network component's primary IP address and CONSOLE_PROXY which represents the IP information for logging into a computing instance's console. + Type *string `json:"type,omitempty" xmlrpc:"type,omitempty"` +} + +// The power state class provides a common set of values for which a guest's power state will be presented in the SoftLayer API. +type Virtual_Guest_Power_State struct { + Entity + + // The description of a power state + Description *string `json:"description,omitempty" xmlrpc:"description,omitempty"` + + // The key name of a power state + KeyName *string `json:"keyName,omitempty" xmlrpc:"keyName,omitempty"` + + // The name of a power state + Name *string `json:"name,omitempty" xmlrpc:"name,omitempty"` +} + +// no documentation yet +type Virtual_Guest_Status struct { + Entity + + // no documentation yet + KeyName *string `json:"keyName,omitempty" xmlrpc:"keyName,omitempty"` + + // no documentation yet + Name *string `json:"name,omitempty" xmlrpc:"name,omitempty"` +} + +// no documentation yet +type Virtual_Guest_SupplementalCreateObjectOptions struct { + Entity + + // When explicitly set to true, createObject(s) will fail unless the order is started automatically. This can be used by automated systems to fail an order that might otherwise require manual approval. For multi-guest orders via [[SoftLayer_Virtual_Guest/createObjects|createObjects]], this value must be the exact same for every item. + ImmediateApprovalOnlyFlag *bool `json:"immediateApprovalOnlyFlag,omitempty" xmlrpc:"immediateApprovalOnlyFlag,omitempty"` + + // URI of the script to be downloaded and executed after installation is complete. This can be different for each virtual guest when multiple are sent to [[SoftLayer_Virtual_Guest/createObjects|createObjects]]. + PostInstallScriptUri *string `json:"postInstallScriptUri,omitempty" xmlrpc:"postInstallScriptUri,omitempty"` +} + +// SoftLayer_Virtual_Guest_Type models the type of a [[SoftLayer_Virtual_Guest]] (PUBLIC | DEDICATED | PRIVATE) +type Virtual_Guest_Type struct { + Entity + + // no documentation yet + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // no documentation yet + KeyName *string `json:"keyName,omitempty" xmlrpc:"keyName,omitempty"` + + // no documentation yet + Name *string `json:"name,omitempty" xmlrpc:"name,omitempty"` +} + +// The virtual host represents the platform on which virtual guests reside. At times a virtual host has no allocations on the physical server, however with many modern platforms it is a virtual machine with small CPU and Memory allocations that runs in the Control Domain. +type Virtual_Host struct { + Entity + + // The account which a virtual host belongs to. + Account *Account `json:"account,omitempty" xmlrpc:"account,omitempty"` + + // A virtual host's associated account id + AccountId *int `json:"accountId,omitempty" xmlrpc:"accountId,omitempty"` + + // Boolean flag indicating whether this virtualization platform gets billed per guest rather than at a fixed rate. + BilledPerGuestFlag *bool `json:"billedPerGuestFlag,omitempty" xmlrpc:"billedPerGuestFlag,omitempty"` + + // Boolean flag indicating whether this virtualization platform gets billed per memory usage rather than at a fixed rate. + BilledPerMemoryUsageFlag *bool `json:"billedPerMemoryUsageFlag,omitempty" xmlrpc:"billedPerMemoryUsageFlag,omitempty"` + + // The date a virtual host was created. + CreateDate *Time `json:"createDate,omitempty" xmlrpc:"createDate,omitempty"` + + // A virtual host's description. + Description *string `json:"description,omitempty" xmlrpc:"description,omitempty"` + + // The enabled flag specifies whether a virtual host can run guests. + EnabledFlag *int `json:"enabledFlag,omitempty" xmlrpc:"enabledFlag,omitempty"` + + // A count of the guests associated with a virtual host. + GuestCount *uint `json:"guestCount,omitempty" xmlrpc:"guestCount,omitempty"` + + // The guests associated with a virtual host. + Guests []Virtual_Guest `json:"guests,omitempty" xmlrpc:"guests,omitempty"` + + // The hardware record which a virtual host resides on. + Hardware *Hardware_Server `json:"hardware,omitempty" xmlrpc:"hardware,omitempty"` + + // A hardware device which a virtual host resides. + HardwareId *int `json:"hardwareId,omitempty" xmlrpc:"hardwareId,omitempty"` + + // Unique ID for a virtual host. + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // The metric tracking object for this virtual host. + MetricTrackingObject *Metric_Tracking_Object `json:"metricTrackingObject,omitempty" xmlrpc:"metricTrackingObject,omitempty"` + + // The date a virtual host was last modified. + ModifyDate *Time `json:"modifyDate,omitempty" xmlrpc:"modifyDate,omitempty"` + + // A virtual host's name. + Name *string `json:"name,omitempty" xmlrpc:"name,omitempty"` + + // The amount of memory physically available for a virtual host. + PhysicalMemoryCapacity *int `json:"physicalMemoryCapacity,omitempty" xmlrpc:"physicalMemoryCapacity,omitempty"` + + // Unique ID for a virtual host's record on a virtualization platform. + Uuid *string `json:"uuid,omitempty" xmlrpc:"uuid,omitempty"` +} + +// The SoftLayer_Virtual_Network_SecurityGroup_NetworkComponentBinding data type contains general information for a single binding. A binding associates a [[SoftLayer_Virtual_Guest_Network_Component]] with a [[SoftLayer_Network_SecurityGroup]]. +type Virtual_Network_SecurityGroup_NetworkComponentBinding struct { + Entity + + // The unique ID for a binding + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // no documentation yet + NetworkComponent *Virtual_Guest_Network_Component `json:"networkComponent,omitempty" xmlrpc:"networkComponent,omitempty"` + + // The ID of the network component + NetworkComponentId *int `json:"networkComponentId,omitempty" xmlrpc:"networkComponentId,omitempty"` + + // no documentation yet + SecurityGroup *Network_SecurityGroup `json:"securityGroup,omitempty" xmlrpc:"securityGroup,omitempty"` + + // The ID of the security group + SecurityGroupId *int `json:"securityGroupId,omitempty" xmlrpc:"securityGroupId,omitempty"` +} + +// The SoftLayer_Virtual_Storage_Repository represents a web based storage system that can be accessed through many types of devices, interfaces, and other resources. +type Virtual_Storage_Repository struct { + Entity + + // The [[SoftLayer_Account|account]] that a storage repository belongs to. + Account *Account `json:"account,omitempty" xmlrpc:"account,omitempty"` + + // The current billing item for a storage repository. + BillingItem *Billing_Item `json:"billingItem,omitempty" xmlrpc:"billingItem,omitempty"` + + // A storage repositories capacity measured in Giga-Bytes (GB) + Capacity *Float64 `json:"capacity,omitempty" xmlrpc:"capacity,omitempty"` + + // The datacenter that a virtual storage repository resides in. + Datacenter *Location `json:"datacenter,omitempty" xmlrpc:"datacenter,omitempty"` + + // A storage repositories description that describes its purpose or contents + Description *string `json:"description,omitempty" xmlrpc:"description,omitempty"` + + // A count of the [[SoftLayer_Virtual_Disk_Image|disk images]] that are in a storage repository. Disk images are the virtual hard drives for a virtual guest. + DiskImageCount *uint `json:"diskImageCount,omitempty" xmlrpc:"diskImageCount,omitempty"` + + // The [[SoftLayer_Virtual_Disk_Image|disk images]] that are in a storage repository. Disk images are the virtual hard drives for a virtual guest. + DiskImages []Virtual_Disk_Image `json:"diskImages,omitempty" xmlrpc:"diskImages,omitempty"` + + // A count of the computing instances that have disk images in a storage repository. + GuestCount *uint `json:"guestCount,omitempty" xmlrpc:"guestCount,omitempty"` + + // The computing instances that have disk images in a storage repository. + Guests []Virtual_Guest `json:"guests,omitempty" xmlrpc:"guests,omitempty"` + + // Unique ID for a storage repository. + Id *int `json:"id,omitempty" xmlrpc:"id,omitempty"` + + // no documentation yet + MetricTrackingObject *Metric_Tracking_Object_Virtual_Storage_Repository `json:"metricTrackingObject,omitempty" xmlrpc:"metricTrackingObject,omitempty"` + + // A storage repositories name that describes its purpose or contents + Name *string `json:"name,omitempty" xmlrpc:"name,omitempty"` + + // no documentation yet + PublicFlag *int `json:"publicFlag,omitempty" xmlrpc:"publicFlag,omitempty"` + + // The current billing item for a public storage repository. + PublicImageBillingItem *Billing_Item `json:"publicImageBillingItem,omitempty" xmlrpc:"publicImageBillingItem,omitempty"` + + // A storage repository's [[SoftLayer_Virtual_Storage_Repository_Type|type]]. + Type *Virtual_Storage_Repository_Type `json:"type,omitempty" xmlrpc:"type,omitempty"` + + // A storage repositories [[SoftLayer_Virtual_Storage_Repository_Type|type]] ID + TypeId *int `json:"typeId,omitempty" xmlrpc:"typeId,omitempty"` +} + +// SoftLayer employs many different types of repositories that computing instances use as their storage volume. SoftLayer_Virtual_Storage_Repository_Type models a single storage type. Common types of storage repositories include networked file systems, logical volume management, and local disk volumes for swap and page file management. +type Virtual_Storage_Repository_Type struct { + Entity + + // A brief description os a storage repository type. + Description *string `json:"description,omitempty" xmlrpc:"description,omitempty"` + + // A storage repository type's name. + Name *string `json:"name,omitempty" xmlrpc:"name,omitempty"` + + // The storage repositories on a SoftLayer customer account that belong to this type. + StorageRepositories []Virtual_Storage_Repository `json:"storageRepositories,omitempty" xmlrpc:"storageRepositories,omitempty"` + + // A count of the storage repositories on a SoftLayer customer account that belong to this type. + StorageRepositoryCount *uint `json:"storageRepositoryCount,omitempty" xmlrpc:"storageRepositoryCount,omitempty"` +} diff --git a/vendor/github.com/softlayer/softlayer-go/filter/filters.go b/vendor/github.com/softlayer/softlayer-go/filter/filters.go new file mode 100644 index 0000000000..5f78040141 --- /dev/null +++ b/vendor/github.com/softlayer/softlayer-go/filter/filters.go @@ -0,0 +1,305 @@ +/** + * Copyright 2016 IBM Corp. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +// See reference at https://sldn.softlayer.com/article/object-filters. +// Examples in the README.md file and in the examples directory. +package filter + +import ( + "encoding/json" + "fmt" + "strings" +) + +type Filter struct { + Path string + Op string + Opts map[string]interface{} + Val interface{} +} + +type Filters []Filter + +// Returns an array of Filters that you can later call .Build() on. +func New(args ...Filter) Filters { + return args +} + +// This is like calling New().Build(). +// Returns a JSON string that can be used as the object filter. +func Build(args ...Filter) string { + filters := Filters{} + + for _, arg := range args { + filters = append(filters, arg) + } + + return filters.Build() +} + +// This creates a new Filter. The path is a dot-delimited path down +// to the attribute this filter is for. The second value parameter +// is optional. +func Path(path string, val ...interface{}) Filter { + if len(val) > 0 { + return Filter{Path: path, Val: val[0]} + } + + return Filter{Path: path} +} + +// Builds the filter string in JSON format +func (fs Filters) Build() string { + // Loops around filters, + // splitting path on '.' and looping around path pieces. + // Idea is to create a map/tree like map[string]interface{}. + // Every component in the path is a node to create in the tree. + // Once we get to the leaf, we set the operation. + // map[string]interface{}{"operation": op+" "+value} + // If Op is "", then just map[string]interface{}{"operation": value}. + // Afterwards, the Opts are traversed; []map[string]interface{}{} + // For every entry in Opts, we create one map, and append it to an array of maps. + // At the end, json.Marshal the whole thing. + result := map[string]interface{}{} + for _, filter := range fs { + if filter.Path == "" { + continue + } + + cursor := result + nodes := strings.Split(filter.Path, ".") + for len(nodes) > 1 { + branch := nodes[0] + if _, ok := cursor[branch]; !ok { + cursor[branch] = map[string]interface{}{} + } + cursor = cursor[branch].(map[string]interface{}) + nodes = nodes[1:len(nodes)] + } + + leaf := nodes[0] + if filter.Val != nil { + operation := filter.Val + if filter.Op != "" { + var format string + switch filter.Val.(type) { + case int: + format = "%d" + default: + format = "%s" + } + operation = filter.Op + " " + fmt.Sprintf(format, filter.Val) + } + + cursor[leaf] = map[string]interface{}{ + "operation": operation, + } + } + + if filter.Opts == nil { + continue + } + + options := []map[string]interface{}{} + for name, value := range filter.Opts { + options = append(options, map[string]interface{}{ + "name": name, + "value": value, + }) + } + + cursor[leaf] = map[string]interface{}{ + "operation": filter.Op, + "options": options, + } + } + + jsonStr, _ := json.Marshal(result) + return string(jsonStr) +} + +// Builds the filter string in JSON format +func (f Filter) Build() string { + return Build(f) +} + +// Add options to the filter. Can be chained for multiple options. +func (f Filter) Opt(name string, value interface{}) Filter { + if f.Opts == nil { + f.Opts = map[string]interface{}{} + } + + f.Opts[name] = value + return f +} + +// Set this filter to test if property is equal to the value +func (f Filter) Eq(val interface{}) Filter { + f.Op = "" + f.Val = val + return f +} + +// Set this filter to test if property is not equal to the value +func (f Filter) NotEq(val interface{}) Filter { + f.Op = "!=" + f.Val = val + return f +} + +// Set this filter to test if property is like the value +func (f Filter) Like(val interface{}) Filter { + f.Op = "~" + f.Val = val + return f +} + +// Set this filter to test if property is unlike value +func (f Filter) NotLike(val interface{}) Filter { + f.Op = "!~" + f.Val = val + return f +} + +// Set this filter to test if property is less than value +func (f Filter) LessThan(val interface{}) Filter { + f.Op = "<" + f.Val = val + return f +} + +// Set this filter to test if property is less than or equal to the value +func (f Filter) LessThanOrEqual(val interface{}) Filter { + f.Op = "<=" + f.Val = val + return f +} + +// Set this filter to test if property is greater than value +func (f Filter) GreaterThan(val interface{}) Filter { + f.Op = ">" + f.Val = val + return f +} + +// Set this filter to test if property is greater than or equal to value +func (f Filter) GreaterThanOrEqual(val interface{}) Filter { + f.Op = ">=" + f.Val = val + return f +} + +// Set this filter to test if property is null +func (f Filter) IsNull() Filter { + f.Op = "" + f.Val = "is null" + return f +} + +// Set this filter to test if property is not null +func (f Filter) NotNull() Filter { + f.Op = "" + f.Val = "not null" + return f +} + +// Set this filter to test if property contains the value +func (f Filter) Contains(val interface{}) Filter { + f.Op = "*=" + f.Val = val + return f +} + +// Set this filter to test if property does not contain the value +func (f Filter) NotContains(val interface{}) Filter { + f.Op = "!*=" + f.Val = val + return f +} + +// Set this filter to test if property starts with the value +func (f Filter) StartsWith(val interface{}) Filter { + f.Op = "^=" + f.Val = val + return f +} + +// Set this filter to test if property does not start with the value +func (f Filter) NotStartsWith(val interface{}) Filter { + f.Op = "!^=" + f.Val = val + return f +} + +// Set this filter to test if property ends with the value +func (f Filter) EndsWith(val interface{}) Filter { + f.Op = "$=" + f.Val = val + return f +} + +// Set this filter to test if property does not end with the value +func (f Filter) NotEndsWith(val interface{}) Filter { + f.Op = "!$=" + f.Val = val + return f +} + +// Set this filter to test if property is one of the values in args. +func (f Filter) In(args ...interface{}) Filter { + f.Op = "in" + values := []interface{}{} + for _, arg := range args { + values = append(values, arg) + } + + return f.Opt("data", values) +} + +// Set this filter to test if property has a date older than the value in days. +func (f Filter) DaysPast(val interface{}) Filter { + f.Op = ">= currentDate -" + f.Val = val + return f +} + +// Set this filter to test if property has the exact date as the value. +func (f Filter) Date(date string) Filter { + f.Op = "isDate" + f.Val = nil + return f.Opt("date", []string{date}) +} + +// Set this filter to test if property has a date before the value. +func (f Filter) DateBefore(date string) Filter { + f.Op = "lessThanDate" + f.Val = nil + return f.Opt("date", []string{date}) +} + +// Set this filter to test if property has a date after the value. +func (f Filter) DateAfter(date string) Filter { + f.Op = "greaterThanDate" + f.Val = nil + return f.Opt("date", []string{date}) +} + +// Set this filter to test if property has a date between the values. +func (f Filter) DateBetween(start string, end string) Filter { + f.Op = "betweenDate" + f.Val = nil + return f.Opt("startDate", []string{start}).Opt("endDate", []string{end}) +} diff --git a/vendor/github.com/softlayer/softlayer-go/helpers/hardware/hardware.go b/vendor/github.com/softlayer/softlayer-go/helpers/hardware/hardware.go new file mode 100644 index 0000000000..2f1f09534c --- /dev/null +++ b/vendor/github.com/softlayer/softlayer-go/helpers/hardware/hardware.go @@ -0,0 +1,61 @@ +/** + * Copyright 2016 IBM Corp. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package hardware + +import ( + "fmt" + + "github.com/softlayer/softlayer-go/datatypes" + "github.com/softlayer/softlayer-go/helpers/location" + "github.com/softlayer/softlayer-go/services" + "github.com/softlayer/softlayer-go/session" + "regexp" +) + +// GeRouterByName returns a Hardware that matches the provided hostname, +// or an error if no matching Hardware can be found. +// SoftLayer does not provide a direct path to retrieve a list of router +// objects. So, get Location_Datacenter object at first and get an array of +// router objects from the Datacenter +func GetRouterByName(sess *session.Session, hostname string, args ...interface{}) (datatypes.Hardware, error) { + var mask string + if len(args) > 0 { + mask = args[0].(string) + } + + r, _ := regexp.Compile("[A-Za-z]+[0-9]+$") + dcName := r.FindString(hostname) + if len(dcName) == 0 { + return datatypes.Hardware{}, fmt.Errorf("Cannot get datacenter name from hostname %s", hostname) + } + + datacenter, err := location.GetDatacenterByName(sess, dcName, "hardwareRouters[id,hostname]") + if err != nil { + return datatypes.Hardware{}, err + } + + for _, router := range datacenter.HardwareRouters { + if *router.Hostname == hostname { + return services.GetHardwareService(sess). + Id(*router.Id). + Mask(mask). + GetObject() + } + } + + return datatypes.Hardware{}, fmt.Errorf("No routers found with hostname of %s", hostname) +} diff --git a/vendor/github.com/softlayer/softlayer-go/helpers/location/location.go b/vendor/github.com/softlayer/softlayer-go/helpers/location/location.go new file mode 100644 index 0000000000..b1016966a2 --- /dev/null +++ b/vendor/github.com/softlayer/softlayer-go/helpers/location/location.go @@ -0,0 +1,86 @@ +/** + * Copyright 2016 IBM Corp. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package location + +import ( + "fmt" + + "github.com/softlayer/softlayer-go/datatypes" + "github.com/softlayer/softlayer-go/filter" + "github.com/softlayer/softlayer-go/services" + "github.com/softlayer/softlayer-go/session" +) + +// GetLocationByName returns a Location that matches the provided name, or an +// error if no matching Location can be found. +// +// If you need to access a datacenter's unique properties, use +// GetDatacenterByName instead +func GetLocationByName(sess *session.Session, name string, args ...interface{}) (datatypes.Location, error) { + var mask string + if len(args) > 0 { + mask = args[0].(string) + } + + locs, err := services.GetLocationService(sess). + Mask(mask). + Filter(filter.New(filter.Path("name").Eq(name)).Build()). + GetDatacenters() + + if err != nil { + return datatypes.Location{}, err + } + + // An empty filtered result set does not raise an error + if len(locs) == 0 { + return datatypes.Location{}, fmt.Errorf("No locations found with name of %s", name) + } + + return locs[0], nil +} + +// GetDatacenterByName returns a Location_Datacenter that matches the provided +// name, or an error if no matching datacenter can be found. +// +// Note that unless you need to access datacenter-specific properties +// (backendHardwareRouters, etc.), it is more efficient to use +// GetLocationByName, since GetDatacenterByName requires an extra call to the +// API +func GetDatacenterByName(sess *session.Session, name string, args ...interface{}) (datatypes.Location_Datacenter, error) { + var mask string + if len(args) > 0 { + mask = args[0].(string) + } + + // SoftLayer does not provide a direct path to retrieve a list of "Location_Datacenter" + // objects. Location_Datacenter.getDatacenters() actually returns a list of "Location" + // objects, which do not have datacenter-specific properties populated. So we do this + // in two passes + + // First get the Location which matches the name + location, err := GetLocationByName(sess, name, "mask[id]") + + if err != nil { + return datatypes.Location_Datacenter{}, nil + } + + // Now get the Datacenter record itself. + return services.GetLocationDatacenterService(sess). + Id(*location.Id). + Mask(mask). + GetObject() +} diff --git a/vendor/github.com/softlayer/softlayer-go/helpers/network/network.go b/vendor/github.com/softlayer/softlayer-go/helpers/network/network.go new file mode 100644 index 0000000000..be8e6e06cb --- /dev/null +++ b/vendor/github.com/softlayer/softlayer-go/helpers/network/network.go @@ -0,0 +1,115 @@ +/** + * Copyright 2016 IBM Corp. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package network + +import ( + "fmt" + + "github.com/softlayer/softlayer-go/datatypes" + "github.com/softlayer/softlayer-go/filter" + "github.com/softlayer/softlayer-go/services" + "github.com/softlayer/softlayer-go/session" +) + +// GetNadcLbVipByName Get a virtual ip address by name attached to a load balancer +// appliance like the Netscaler VPX. In the case of some load balancer appliances +// looking up the virtual ip address by name is necessary since they don't get +// assigned an id. +func GetNadcLbVipByName(sess *session.Session, nadcId int, vipName string, mask ...string) (*datatypes.Network_LoadBalancer_VirtualIpAddress, error) { + service := services.GetNetworkApplicationDeliveryControllerService(sess) + + service = service. + Id(nadcId) + + if len(mask) > 0 { + service = service.Mask(mask[0]) + } + + vips, err := service.GetLoadBalancers() + + if err != nil { + return nil, fmt.Errorf("Error getting NADC load balancers: %s", err) + } + + for _, vip := range vips { + if *vip.Name == vipName { + return &vip, nil + } + } + + return nil, fmt.Errorf("Could not find any VIPs for NADC %d matching name %s", nadcId, vipName) +} + +// GetNadcLbVipServiceByName Get a load balancer service by name attached to a load balancer +// appliance like the Netscaler VPX. In the case of some load balancer appliances +// looking up the virtual ip address by name is necessary since they don't get +// assigned an id. +func GetNadcLbVipServiceByName( + sess *session.Session, nadcId int, vipName string, serviceName string, mask ...string, +) (*datatypes.Network_LoadBalancer_Service, error) { + vipMask := "id,name,services[name,destinationIpAddress,destinationPort,weight,healthCheck,connectionLimit]" + + if len(mask) != 0 { + vipMask = mask[0] + } + + vip, err := GetNadcLbVipByName(sess, nadcId, vipName, vipMask) + if err != nil { + return nil, err + } + + for _, service := range vip.Services { + if *service.Name == serviceName { + return &service, nil + } + } + + return nil, fmt.Errorf( + "Could not find service %s in VIP %s for load balancer %d", + serviceName, vipName, nadcId) +} + +// GetOsTypeByName retrieves an object of type SoftLayer_Network_Storage_Iscsi_OS_Type. +// To order block storage, OS type is required as a mandatory input. +// GetOsTypeByName helps in getting the OS id and keyName +// Examples: +// id:6 name: Hyper-V keyName: HYPER_V +// id:12 name: Linux keyName: LINUX +// id:22 name: VMWare keyName: VMWARE +// id:30 name: Xen keyName: XEN +func GetOsTypeByName(sess *session.Session, name string, args ...interface{}) (datatypes.Network_Storage_Iscsi_OS_Type, error) { + var mask string + if len(args) > 0 { + mask = args[0].(string) + } + + osTypes, err := services.GetNetworkStorageIscsiOSTypeService(sess). + Mask(mask). + Filter(filter.New(filter.Path("name").Eq(name)).Build()). + GetAllObjects() + + if err != nil { + return datatypes.Network_Storage_Iscsi_OS_Type{}, err + } + + // An empty filtered result set does not raise an error + if len(osTypes) == 0 { + return datatypes.Network_Storage_Iscsi_OS_Type{}, fmt.Errorf("No OS type found with name of %s", name) + } + + return osTypes[0], nil +} diff --git a/vendor/github.com/softlayer/softlayer-go/helpers/order/order.go b/vendor/github.com/softlayer/softlayer-go/helpers/order/order.go new file mode 100644 index 0000000000..3cf5911407 --- /dev/null +++ b/vendor/github.com/softlayer/softlayer-go/helpers/order/order.go @@ -0,0 +1,56 @@ +/** + * Copyright 2016 IBM Corp. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package order + +import ( + "github.com/softlayer/softlayer-go/datatypes" + "github.com/softlayer/softlayer-go/services" + "github.com/softlayer/softlayer-go/session" +) + +// CheckBillingOrderStatus returns true if the status of the billing order for +// the provided product order receipt is in the list of provided statuses. +// Returns false otherwise, along with the billing order item used to check the statuses, +// and any error encountered. +func CheckBillingOrderStatus(sess *session.Session, receipt *datatypes.Container_Product_Order_Receipt, statuses []string) (bool, *datatypes.Billing_Order_Item, error) { + service := services.GetBillingOrderItemService(sess) + + item, err := service. + Id(*receipt.PlacedOrder.Items[0].Id). + Mask("mask[id,billingItem[id,provisionTransaction[id,transactionStatus[name]]]]"). + GetObject() + + if err != nil { + return false, nil, err + } + + currentStatus := *item.BillingItem.ProvisionTransaction.TransactionStatus.Name + for _, status := range statuses { + if currentStatus == status { + return true, &item, nil + } + } + + return false, &item, nil +} + +// CheckBillingOrderComplete returns true if the status of the billing order for +// the provided product order receipt is "COMPLETE". Returns false otherwise, +// along with the billing order item used to check the statuses, and any error encountered. +func CheckBillingOrderComplete(sess *session.Session, receipt *datatypes.Container_Product_Order_Receipt) (bool, *datatypes.Billing_Order_Item, error) { + return CheckBillingOrderStatus(sess, receipt, []string{"COMPLETE"}) +} diff --git a/vendor/github.com/softlayer/softlayer-go/helpers/product/product.go b/vendor/github.com/softlayer/softlayer-go/helpers/product/product.go new file mode 100644 index 0000000000..10f9444121 --- /dev/null +++ b/vendor/github.com/softlayer/softlayer-go/helpers/product/product.go @@ -0,0 +1,192 @@ +/** + * Copyright 2016 IBM Corp. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package product + +import ( + "fmt" + + "github.com/softlayer/softlayer-go/datatypes" + "github.com/softlayer/softlayer-go/filter" + "github.com/softlayer/softlayer-go/services" + "github.com/softlayer/softlayer-go/session" + "github.com/softlayer/softlayer-go/sl" + "strings" +) + +// CPUCategoryCode Category code for cpus +const CPUCategoryCode = "guest_core" + +// MemoryCategoryCode Category code for Memory +const MemoryCategoryCode = "ram" + +// NICSpeedCategoryCode Category code for NIC speed +const NICSpeedCategoryCode = "port_speed" + +// DedicatedLoadBalancerCategoryCode Category code for Dedicated Load Balancer +const DedicatedLoadBalancerCategoryCode = "dedicated_load_balancer" + +// ProxyLoadBalancerCategoryCode Category code for Shared local load balancer (proxy load balancer) +const ProxyLoadBalancerCategoryCode = "proxy_load_balancer" + +// GetPackageByType Get the Product_Package which matches the specified +// package type +func GetPackageByType( + sess *session.Session, + packageType string, + mask ...string, +) (datatypes.Product_Package, error) { + + objectMask := "id,name,description,isActive,type[keyName]" + if len(mask) > 0 { + objectMask = mask[0] + } + + service := services.GetProductPackageService(sess) + + // Get package id + packages, err := service. + Mask(objectMask). + Filter( + filter.Build( + filter.Path("type.keyName").Eq(packageType), + ), + ). + Limit(1). + GetAllObjects() + if err != nil { + return datatypes.Product_Package{}, err + } + + packages = rejectOutletPackages(packages) + + if len(packages) == 0 { + return datatypes.Product_Package{}, fmt.Errorf("No product packages found for %s", packageType) + } + + return packages[0], nil +} + +// rejectOutletPackages removes packages whose description or name contains the +// string "OUTLET". +func rejectOutletPackages(packages []datatypes.Product_Package) []datatypes.Product_Package { + selected := []datatypes.Product_Package{} + + for _, pkg := range packages { + if (pkg.Name == nil || !strings.Contains(*pkg.Name, "OUTLET")) && + (pkg.Description == nil || !strings.Contains(*pkg.Description, "OUTLET")) { + + selected = append(selected, pkg) + } + } + + return selected +} + +// GetPackageProducts Get a list of product items for a specific product +// package ID +func GetPackageProducts( + sess *session.Session, + packageId int, + mask ...string, +) ([]datatypes.Product_Item, error) { + + objectMask := "id,capacity,description,units,keyName,prices[id,categories[id,name,categoryCode]]" + if len(mask) > 0 { + objectMask = mask[0] + } + + service := services.GetProductPackageService(sess) + + // Get product items for package id + return service. + Id(packageId). + Mask(objectMask). + GetItems() +} + +// SelectProductPricesByCategory Get a list of Product_Item_Prices that +// match a specific set of price category code / product item +// capacity combinations. +// These combinations are passed as a map of strings (category code) mapped +// to float64 (capacity) +// For example, these are the options to specify an upgrade to 8 cpus and 32 +// GB or memory: +// {"guest_core": 8.0, "ram": 32.0} +// public[0] checks type of network. +// public[1] checks type of cores. + +func SelectProductPricesByCategory( + productItems []datatypes.Product_Item, + options map[string]float64, + public ...bool, +) []datatypes.Product_Item_Price { + + forPublicNetwork := true + if len(public) > 0 { + forPublicNetwork = public[0] + } + + // Check type of cores + forPublicCores := true + if len(public) > 1 { + forPublicCores = public[1] + } + + // Filter product items based on sets of category codes and capacity numbers + prices := []datatypes.Product_Item_Price{} + priceCheck := map[string]bool{} + for _, productItem := range productItems { + isPrivate := strings.Contains(sl.Get(productItem.KeyName, "").(string), "PRIVATE") + isPublic := strings.Contains(sl.Get(productItem.Description, "Public").(string), "Public") + for _, category := range productItem.Prices[0].Categories { + for categoryCode, capacity := range options { + if _, ok := priceCheck[categoryCode]; ok { + continue + } + + if productItem.Capacity == nil { + continue + } + + if *category.CategoryCode != categoryCode { + continue + } + + if *productItem.Capacity != datatypes.Float64(capacity) { + continue + } + + // Logic taken from softlayer-python @ http://bit.ly/2bN9Gbu + switch categoryCode { + case CPUCategoryCode: + if forPublicCores == isPrivate { + continue + } + case NICSpeedCategoryCode: + if forPublicNetwork != isPublic { + continue + } + } + + prices = append(prices, productItem.Prices[0]) + priceCheck[categoryCode] = true + } + } + } + + return prices +} diff --git a/vendor/github.com/softlayer/softlayer-go/helpers/virtual/virtual.go b/vendor/github.com/softlayer/softlayer-go/helpers/virtual/virtual.go new file mode 100644 index 0000000000..0a86f177c7 --- /dev/null +++ b/vendor/github.com/softlayer/softlayer-go/helpers/virtual/virtual.go @@ -0,0 +1,91 @@ +/** + * Copyright 2016 IBM Corp. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package virtual + +import ( + "time" + + "github.com/softlayer/softlayer-go/datatypes" + "github.com/softlayer/softlayer-go/helpers/product" + "github.com/softlayer/softlayer-go/services" + "github.com/softlayer/softlayer-go/session" + "github.com/softlayer/softlayer-go/sl" +) + +// Upgrade a virtual guest to a specified set of features (e.g. cpu, ram). +// When the upgrade takes place can also be specified (`when`), but +// this is optional. The time set will be 'now' if left as nil. +// The features to upgrade are specified as the options used in +// GetProductPrices(). +func UpgradeVirtualGuest( + sess *session.Session, + guest *datatypes.Virtual_Guest, + options map[string]float64, + when ...time.Time, +) (datatypes.Container_Product_Order_Receipt, error) { + + if guest.PrivateNetworkOnlyFlag == nil || guest.DedicatedAccountHostOnlyFlag == nil { + service := services.GetVirtualGuestService(sess) + guestForFlag, err := service.Id(*guest.Id).Mask("privateNetworkOnlyFlag,dedicatedAccountHostOnlyFlag").GetObject() + if err != nil { + return datatypes.Container_Product_Order_Receipt{}, err + } + + guest.PrivateNetworkOnlyFlag = guestForFlag.PrivateNetworkOnlyFlag + guest.DedicatedAccountHostOnlyFlag = guestForFlag.DedicatedAccountHostOnlyFlag + } + + pkg, err := product.GetPackageByType(sess, "VIRTUAL_SERVER_INSTANCE") + if err != nil { + return datatypes.Container_Product_Order_Receipt{}, err + } + + productItems, err := product.GetPackageProducts(sess, *pkg.Id) + if err != nil { + return datatypes.Container_Product_Order_Receipt{}, err + } + + prices := product.SelectProductPricesByCategory(productItems, options, !*guest.PrivateNetworkOnlyFlag, !*guest.DedicatedAccountHostOnlyFlag) + + upgradeTime := time.Now().UTC().Format(time.RFC3339) + if len(when) > 0 { + upgradeTime = when[0].UTC().Format(time.RFC3339) + } + + order := datatypes.Container_Product_Order_Virtual_Guest_Upgrade{ + Container_Product_Order_Virtual_Guest: datatypes.Container_Product_Order_Virtual_Guest{ + Container_Product_Order_Hardware_Server: datatypes.Container_Product_Order_Hardware_Server{ + Container_Product_Order: datatypes.Container_Product_Order{ + PackageId: pkg.Id, + VirtualGuests: []datatypes.Virtual_Guest{ + *guest, + }, + Prices: prices, + Properties: []datatypes.Container_Product_Order_Property{ + { + Name: sl.String("MAINTENANCE_WINDOW"), + Value: &upgradeTime, + }, + }, + }, + }, + }, + } + + orderService := services.GetProductOrderService(sess) + return orderService.PlaceOrder(&order, sl.Bool(false)) +} diff --git a/vendor/github.com/softlayer/softlayer-go/services/account.go b/vendor/github.com/softlayer/softlayer-go/services/account.go new file mode 100644 index 0000000000..ff09b164e1 --- /dev/null +++ b/vendor/github.com/softlayer/softlayer-go/services/account.go @@ -0,0 +1,4602 @@ +/** + * Copyright 2016 IBM Corp. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** + * AUTOMATICALLY GENERATED CODE - DO NOT MODIFY + */ + +package services + +import ( + "fmt" + "strings" + + "github.com/softlayer/softlayer-go/datatypes" + "github.com/softlayer/softlayer-go/session" + "github.com/softlayer/softlayer-go/sl" +) + +// The SoftLayer_Account data type contains general information relating to a single SoftLayer customer account. Personal information in this type such as names, addresses, and phone numbers are assigned to the account only and not to users belonging to the account. The SoftLayer_Account data type contains a number of relational properties that are used by the SoftLayer customer portal to quickly present a variety of account related services to it's users. +// +// SoftLayer customers are unable to change their company account information in the portal or the API. If you need to change this information please open a sales ticket in our customer portal and our account management staff will assist you. +type Account struct { + Session *session.Session + Options sl.Options +} + +// GetAccountService returns an instance of the Account SoftLayer service +func GetAccountService(sess *session.Session) Account { + return Account{Session: sess} +} + +func (r Account) Id(id int) Account { + r.Options.Id = &id + return r +} + +func (r Account) Mask(mask string) Account { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r Account) Filter(filter string) Account { + r.Options.Filter = filter + return r +} + +func (r Account) Limit(limit int) Account { + r.Options.Limit = &limit + return r +} + +func (r Account) Offset(offset int) Account { + r.Options.Offset = &offset + return r +} + +// no documentation yet +func (r Account) ActivatePartner(accountId *string, hashCode *string) (resp datatypes.Account, err error) { + params := []interface{}{ + accountId, + hashCode, + } + err = r.Session.DoRequest("SoftLayer_Account", "activatePartner", params, &r.Options, &resp) + return +} + +// no documentation yet +func (r Account) AddAchInformation(achInformation *datatypes.Container_Billing_Info_Ach) (resp bool, err error) { + params := []interface{}{ + achInformation, + } + err = r.Session.DoRequest("SoftLayer_Account", "addAchInformation", params, &r.Options, &resp) + return +} + +// no documentation yet +func (r Account) AddReferralPartnerPaymentOption(paymentOption *datatypes.Container_Referral_Partner_Payment_Option) (resp bool, err error) { + params := []interface{}{ + paymentOption, + } + err = r.Session.DoRequest("SoftLayer_Account", "addReferralPartnerPaymentOption", params, &r.Options, &resp) + return +} + +// This method indicates whether or not Bandwidth Pooling updates are blocked for the account so the billing cycle can run. Generally, accounts are restricted from moving servers in or out of Bandwidth Pools from 12:00 CST on the day prior to billing, until the billing batch completes, sometime after midnight the day of actual billing for the account. +func (r Account) AreVdrUpdatesBlockedForBilling() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Account", "areVdrUpdatesBlockedForBilling", nil, &r.Options, &resp) + return +} + +// Cancel the PayPal Payment Request process. During the process of submitting a PayPal payment request, the customer is redirected to PayPal to confirm the request. If the customer elects to cancel the payment from PayPal, they are returned to SoftLayer where the manual payment record is updated to a status of canceled. +func (r Account) CancelPayPalTransaction(token *string, payerId *string) (resp bool, err error) { + params := []interface{}{ + token, + payerId, + } + err = r.Session.DoRequest("SoftLayer_Account", "cancelPayPalTransaction", params, &r.Options, &resp) + return +} + +// Complete the PayPal Payment Request process and receive confirmation message. During the process of submitting a PayPal payment request, the customer is redirected to PayPal to confirm the request. Once confirmed, PayPal returns the customer to SoftLayer where an attempt is made to finalize the transaction. A status message regarding the attempt is returned to the calling function. +func (r Account) CompletePayPalTransaction(token *string, payerId *string) (resp string, err error) { + params := []interface{}{ + token, + payerId, + } + err = r.Session.DoRequest("SoftLayer_Account", "completePayPalTransaction", params, &r.Options, &resp) + return +} + +// Retrieve the number of hourly services on an account that are active, plus any pending orders with hourly services attached. +func (r Account) CountHourlyInstances() (resp int, err error) { + err = r.Session.DoRequest("SoftLayer_Account", "countHourlyInstances", nil, &r.Options, &resp) + return +} + +// Create a new Customer user record in the SoftLayer customer portal. This is a wrapper around the Customer::createObject call, please see the documentation of that API. This wrapper adds the feature of the "silentlyCreate" option, which bypasses the IBMid invitation email process. False (the default) goes through the IBMid invitation email process, which creates the IBMid/SoftLayer Single-Sign-On (SSO) user link when the invitation is accepted (meaning the email has been received, opened, and the link(s) inside the email have been clicked to complete the process). True will silently (no email) create the IBMid/SoftLayer user SSO link immediately. Either case will use the value in the template object 'email' field to indicate the IBMid to use. This can be the username or, if unique, the email address of an IBMid. In the silent case, the IBMid must already exist. In the non-silent invitation email case, the IBMid can be created during this flow, by specifying an email address to be used to create the IBMid.All the features and restrictions of createObject apply to this API as well. In addition, note that the "silentlyCreate" flag is ONLY valid for IBMid-authenticated accounts. +func (r Account) CreateUser(templateObject *datatypes.User_Customer, password *string, vpnPassword *string, silentlyCreateFlag *bool) (resp datatypes.User_Customer, err error) { + params := []interface{}{ + templateObject, + password, + vpnPassword, + silentlyCreateFlag, + } + err = r.Session.DoRequest("SoftLayer_Account", "createUser", params, &r.Options, &resp) + return +} + +// Retrieve An email address that is responsible for abuse and legal inquiries on behalf of an account. For instance, new legal and abuse tickets are sent to this address. +func (r Account) GetAbuseEmail() (resp string, err error) { + err = r.Session.DoRequest("SoftLayer_Account", "getAbuseEmail", nil, &r.Options, &resp) + return +} + +// Retrieve Email addresses that are responsible for abuse and legal inquiries on behalf of an account. For instance, new legal and abuse tickets are sent to these addresses. +func (r Account) GetAbuseEmails() (resp []datatypes.Account_AbuseEmail, err error) { + err = r.Session.DoRequest("SoftLayer_Account", "getAbuseEmails", nil, &r.Options, &resp) + return +} + +// This method returns an array of SoftLayer_Container_Network_Storage_Evault_WebCc_JobDetails objects for the given start and end dates. Start and end dates should be be valid ISO 8601 dates. The backupStatus can be one of null, 'success', 'failed', or 'conflict'. The 'success' backupStatus returns jobs with a status of 'COMPLETED', the 'failed' backupStatus returns jobs with a status of 'FAILED', while the 'conflict' backupStatus will return jobs that are not 'COMPLETED' or 'FAILED'. +func (r Account) GetAccountBackupHistory(startDate *datatypes.Time, endDate *datatypes.Time, backupStatus *string) (resp []datatypes.Container_Network_Storage_Evault_WebCc_JobDetails, err error) { + params := []interface{}{ + startDate, + endDate, + backupStatus, + } + err = r.Session.DoRequest("SoftLayer_Account", "getAccountBackupHistory", params, &r.Options, &resp) + return +} + +// Retrieve The account contacts on an account. +func (r Account) GetAccountContacts() (resp []datatypes.Account_Contact, err error) { + err = r.Session.DoRequest("SoftLayer_Account", "getAccountContacts", nil, &r.Options, &resp) + return +} + +// Retrieve The account software licenses owned by an account +func (r Account) GetAccountLicenses() (resp []datatypes.Software_AccountLicense, err error) { + err = r.Session.DoRequest("SoftLayer_Account", "getAccountLicenses", nil, &r.Options, &resp) + return +} + +// Retrieve +func (r Account) GetAccountLinks() (resp []datatypes.Account_Link, err error) { + err = r.Session.DoRequest("SoftLayer_Account", "getAccountLinks", nil, &r.Options, &resp) + return +} + +// Retrieve An account's status presented in a more detailed data type. +func (r Account) GetAccountStatus() (resp datatypes.Account_Status, err error) { + err = r.Session.DoRequest("SoftLayer_Account", "getAccountStatus", nil, &r.Options, &resp) + return +} + +// This method pulls an account trait by its key. +func (r Account) GetAccountTraitValue(keyName *string) (resp string, err error) { + params := []interface{}{ + keyName, + } + err = r.Session.DoRequest("SoftLayer_Account", "getAccountTraitValue", params, &r.Options, &resp) + return +} + +// Retrieve The billing item associated with an account's monthly discount. +func (r Account) GetActiveAccountDiscountBillingItem() (resp datatypes.Billing_Item, err error) { + err = r.Session.DoRequest("SoftLayer_Account", "getActiveAccountDiscountBillingItem", nil, &r.Options, &resp) + return +} + +// Retrieve The active account software licenses owned by an account +func (r Account) GetActiveAccountLicenses() (resp []datatypes.Software_AccountLicense, err error) { + err = r.Session.DoRequest("SoftLayer_Account", "getActiveAccountLicenses", nil, &r.Options, &resp) + return +} + +// Retrieve The active address(es) that belong to an account. +func (r Account) GetActiveAddresses() (resp []datatypes.Account_Address, err error) { + err = r.Session.DoRequest("SoftLayer_Account", "getActiveAddresses", nil, &r.Options, &resp) + return +} + +// Return all currently active alarms on this account. Only alarms on hardware and virtual servers accessible to the current user will be returned. +func (r Account) GetActiveAlarms() (resp []datatypes.Container_Monitoring_Alarm_History, err error) { + err = r.Session.DoRequest("SoftLayer_Account", "getActiveAlarms", nil, &r.Options, &resp) + return +} + +// Retrieve All billing agreements for an account +func (r Account) GetActiveBillingAgreements() (resp []datatypes.Account_Agreement, err error) { + err = r.Session.DoRequest("SoftLayer_Account", "getActiveBillingAgreements", nil, &r.Options, &resp) + return +} + +// Retrieve +func (r Account) GetActiveCatalystEnrollment() (resp datatypes.Catalyst_Enrollment, err error) { + err = r.Session.DoRequest("SoftLayer_Account", "getActiveCatalystEnrollment", nil, &r.Options, &resp) + return +} + +// Retrieve The account's active top level colocation containers. +func (r Account) GetActiveColocationContainers() (resp []datatypes.Billing_Item, err error) { + err = r.Session.DoRequest("SoftLayer_Account", "getActiveColocationContainers", nil, &r.Options, &resp) + return +} + +// Retrieve Account's currently active Flexible Credit enrollment. +func (r Account) GetActiveFlexibleCreditEnrollment() (resp datatypes.FlexibleCredit_Enrollment, err error) { + err = r.Session.DoRequest("SoftLayer_Account", "getActiveFlexibleCreditEnrollment", nil, &r.Options, &resp) + return +} + +// Retrieve +func (r Account) GetActiveNotificationSubscribers() (resp []datatypes.Notification_Subscriber, err error) { + err = r.Session.DoRequest("SoftLayer_Account", "getActiveNotificationSubscribers", nil, &r.Options, &resp) + return +} + +// This method pulls all the active packages. This will give you a basic description of the packages within the SoftLayer Outlet store that are currently active and from which you can order a server or additional services. +func (r Account) GetActiveOutletPackages() (resp []datatypes.Product_Package, err error) { + err = r.Session.DoRequest("SoftLayer_Account", "getActiveOutletPackages", nil, &r.Options, &resp) + return +} + +// This method will return the [[SoftLayer_Product_Package]] objects from which you can order a bare metal server, virtual server, service (such as CDN or Object Storage) or other software. Once you have the package you want to order from, you may query one of various endpoints from that package to get specific information about its products and pricing. See [[SoftLayer_Product_Package/getCategories|getCategories]] or [[SoftLayer_Product_Package/getItems|getItems]] for more information. +// +// Packages that have been retired will not appear in this result set. +func (r Account) GetActivePackages() (resp []datatypes.Product_Package, err error) { + err = r.Session.DoRequest("SoftLayer_Account", "getActivePackages", nil, &r.Options, &resp) + return +} + +// This method is deprecated and should not be used in production code. +// +// This method will return the [[SoftLayer_Product_Package]] objects from which you can order a bare metal server, virtual server, service (such as CDN or Object Storage) or other software filtered by an attribute type associated with the package. Once you have the package you want to order from, you may query one of various endpoints from that package to get specific information about its products and pricing. See [[SoftLayer_Product_Package/getCategories|getCategories]] or [[SoftLayer_Product_Package/getItems|getItems]] for more information. +func (r Account) GetActivePackagesByAttribute(attributeKeyName *string) (resp []datatypes.Product_Package, err error) { + params := []interface{}{ + attributeKeyName, + } + err = r.Session.DoRequest("SoftLayer_Account", "getActivePackagesByAttribute", params, &r.Options, &resp) + return +} + +// This method pulls all the active private hosted cloud packages. This will give you a basic description of the packages that are currently active and from which you can order private hosted cloud configurations. +func (r Account) GetActivePrivateHostedCloudPackages() (resp []datatypes.Product_Package, err error) { + err = r.Session.DoRequest("SoftLayer_Account", "getActivePrivateHostedCloudPackages", nil, &r.Options, &resp) + return +} + +// Retrieve An account's non-expired quotes. +func (r Account) GetActiveQuotes() (resp []datatypes.Billing_Order_Quote, err error) { + err = r.Session.DoRequest("SoftLayer_Account", "getActiveQuotes", nil, &r.Options, &resp) + return +} + +// Retrieve The virtual software licenses controlled by an account +func (r Account) GetActiveVirtualLicenses() (resp []datatypes.Software_VirtualLicense, err error) { + err = r.Session.DoRequest("SoftLayer_Account", "getActiveVirtualLicenses", nil, &r.Options, &resp) + return +} + +// Retrieve An account's associated load balancers. +func (r Account) GetAdcLoadBalancers() (resp []datatypes.Network_Application_Delivery_Controller_LoadBalancer_VirtualIpAddress, err error) { + err = r.Session.DoRequest("SoftLayer_Account", "getAdcLoadBalancers", nil, &r.Options, &resp) + return +} + +// Retrieve All the address(es) that belong to an account. +func (r Account) GetAddresses() (resp []datatypes.Account_Address, err error) { + err = r.Session.DoRequest("SoftLayer_Account", "getAddresses", nil, &r.Options, &resp) + return +} + +// Retrieve An affiliate identifier associated with the customer account. +func (r Account) GetAffiliateId() (resp string, err error) { + err = r.Session.DoRequest("SoftLayer_Account", "getAffiliateId", nil, &r.Options, &resp) + return +} + +// Returns URL uptime data for your account +func (r Account) GetAggregatedUptimeGraph(startDate *datatypes.Time, endDate *datatypes.Time) (resp datatypes.Container_Graph, err error) { + params := []interface{}{ + startDate, + endDate, + } + err = r.Session.DoRequest("SoftLayer_Account", "getAggregatedUptimeGraph", params, &r.Options, &resp) + return +} + +// Retrieve The billing items that will be on an account's next invoice. +func (r Account) GetAllBillingItems() (resp []datatypes.Billing_Item, err error) { + err = r.Session.DoRequest("SoftLayer_Account", "getAllBillingItems", nil, &r.Options, &resp) + return +} + +// Retrieve The billing items that will be on an account's next invoice. +func (r Account) GetAllCommissionBillingItems() (resp []datatypes.Billing_Item, err error) { + err = r.Session.DoRequest("SoftLayer_Account", "getAllCommissionBillingItems", nil, &r.Options, &resp) + return +} + +// Retrieve The billing items that will be on an account's next invoice. +func (r Account) GetAllRecurringTopLevelBillingItems() (resp []datatypes.Billing_Item, err error) { + err = r.Session.DoRequest("SoftLayer_Account", "getAllRecurringTopLevelBillingItems", nil, &r.Options, &resp) + return +} + +// Retrieve The billing items that will be on an account's next invoice. Does not consider associated items. +func (r Account) GetAllRecurringTopLevelBillingItemsUnfiltered() (resp []datatypes.Billing_Item, err error) { + err = r.Session.DoRequest("SoftLayer_Account", "getAllRecurringTopLevelBillingItemsUnfiltered", nil, &r.Options, &resp) + return +} + +// Retrieve The billing items that will be on an account's next invoice. +func (r Account) GetAllSubnetBillingItems() (resp []datatypes.Billing_Item, err error) { + err = r.Session.DoRequest("SoftLayer_Account", "getAllSubnetBillingItems", nil, &r.Options, &resp) + return +} + +// Retrieve All billing items of an account. +func (r Account) GetAllTopLevelBillingItems() (resp []datatypes.Billing_Item, err error) { + err = r.Session.DoRequest("SoftLayer_Account", "getAllTopLevelBillingItems", nil, &r.Options, &resp) + return +} + +// Retrieve The billing items that will be on an account's next invoice. Does not consider associated items. +func (r Account) GetAllTopLevelBillingItemsUnfiltered() (resp []datatypes.Billing_Item, err error) { + err = r.Session.DoRequest("SoftLayer_Account", "getAllTopLevelBillingItemsUnfiltered", nil, &r.Options, &resp) + return +} + +// Retrieve Indicates whether this account is allowed to silently migrate to use IBMid Authentication. +func (r Account) GetAllowIbmIdSilentMigrationFlag() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Account", "getAllowIbmIdSilentMigrationFlag", nil, &r.Options, &resp) + return +} + +// Retrieve Flag indicating if this account can be linked with Bluemix. +func (r Account) GetAllowsBluemixAccountLinkingFlag() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Account", "getAllowsBluemixAccountLinkingFlag", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Account) GetAlternateCreditCardData() (resp datatypes.Container_Account_Payment_Method_CreditCard, err error) { + err = r.Session.DoRequest("SoftLayer_Account", "getAlternateCreditCardData", nil, &r.Options, &resp) + return +} + +// Retrieve An account's associated application delivery controller records. +func (r Account) GetApplicationDeliveryControllers() (resp []datatypes.Network_Application_Delivery_Controller, err error) { + err = r.Session.DoRequest("SoftLayer_Account", "getApplicationDeliveryControllers", nil, &r.Options, &resp) + return +} + +// Retrieve a single [[SoftLayer_Account_Attribute]] record by its [[SoftLayer_Account_Attribute_Type|types's]] key name. +func (r Account) GetAttributeByType(attributeType *string) (resp datatypes.Account_Attribute, err error) { + params := []interface{}{ + attributeType, + } + err = r.Session.DoRequest("SoftLayer_Account", "getAttributeByType", params, &r.Options, &resp) + return +} + +// Retrieve The account attribute values for a SoftLayer customer account. +func (r Account) GetAttributes() (resp []datatypes.Account_Attribute, err error) { + err = r.Session.DoRequest("SoftLayer_Account", "getAttributes", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Account) GetAuxiliaryNotifications() (resp []datatypes.Container_Utility_Message, err error) { + err = r.Session.DoRequest("SoftLayer_Account", "getAuxiliaryNotifications", nil, &r.Options, &resp) + return +} + +// Retrieve The public network VLANs assigned to an account. +func (r Account) GetAvailablePublicNetworkVlans() (resp []datatypes.Network_Vlan, err error) { + err = r.Session.DoRequest("SoftLayer_Account", "getAvailablePublicNetworkVlans", nil, &r.Options, &resp) + return +} + +// Returns the average disk space usage for all archive repositories. +func (r Account) GetAverageArchiveUsageMetricDataByDate(startDateTime *datatypes.Time, endDateTime *datatypes.Time) (resp datatypes.Float64, err error) { + params := []interface{}{ + startDateTime, + endDateTime, + } + err = r.Session.DoRequest("SoftLayer_Account", "getAverageArchiveUsageMetricDataByDate", params, &r.Options, &resp) + return +} + +// Returns the average disk space usage for all public repositories. +func (r Account) GetAveragePublicUsageMetricDataByDate(startDateTime *datatypes.Time, endDateTime *datatypes.Time) (resp datatypes.Float64, err error) { + params := []interface{}{ + startDateTime, + endDateTime, + } + err = r.Session.DoRequest("SoftLayer_Account", "getAveragePublicUsageMetricDataByDate", params, &r.Options, &resp) + return +} + +// Retrieve The account balance of a SoftLayer customer account. An account's balance is the amount of money owed to SoftLayer by the account holder, returned as a floating point number with two decimal places, measured in US Dollars ($USD). A negative account balance means the account holder has overpaid and is owed money by SoftLayer. +func (r Account) GetBalance() (resp datatypes.Float64, err error) { + err = r.Session.DoRequest("SoftLayer_Account", "getBalance", nil, &r.Options, &resp) + return +} + +// Retrieve The bandwidth allotments for an account. +func (r Account) GetBandwidthAllotments() (resp []datatypes.Network_Bandwidth_Version1_Allotment, err error) { + err = r.Session.DoRequest("SoftLayer_Account", "getBandwidthAllotments", nil, &r.Options, &resp) + return +} + +// Retrieve The bandwidth allotments for an account currently over allocation. +func (r Account) GetBandwidthAllotmentsOverAllocation() (resp []datatypes.Network_Bandwidth_Version1_Allotment, err error) { + err = r.Session.DoRequest("SoftLayer_Account", "getBandwidthAllotmentsOverAllocation", nil, &r.Options, &resp) + return +} + +// Retrieve The bandwidth allotments for an account projected to go over allocation. +func (r Account) GetBandwidthAllotmentsProjectedOverAllocation() (resp []datatypes.Network_Bandwidth_Version1_Allotment, err error) { + err = r.Session.DoRequest("SoftLayer_Account", "getBandwidthAllotmentsProjectedOverAllocation", nil, &r.Options, &resp) + return +} + +// Retrieve An account's associated bare metal server objects. +func (r Account) GetBareMetalInstances() (resp []datatypes.Hardware, err error) { + err = r.Session.DoRequest("SoftLayer_Account", "getBareMetalInstances", nil, &r.Options, &resp) + return +} + +// Retrieve All billing agreements for an account +func (r Account) GetBillingAgreements() (resp []datatypes.Account_Agreement, err error) { + err = r.Session.DoRequest("SoftLayer_Account", "getBillingAgreements", nil, &r.Options, &resp) + return +} + +// Retrieve An account's billing information. +func (r Account) GetBillingInfo() (resp datatypes.Billing_Info, err error) { + err = r.Session.DoRequest("SoftLayer_Account", "getBillingInfo", nil, &r.Options, &resp) + return +} + +// Retrieve Private template group objects (parent and children) and the shared template group objects (parent only) for an account. +func (r Account) GetBlockDeviceTemplateGroups() (resp []datatypes.Virtual_Guest_Block_Device_Template_Group, err error) { + err = r.Session.DoRequest("SoftLayer_Account", "getBlockDeviceTemplateGroups", nil, &r.Options, &resp) + return +} + +// Retrieve Indicates whether this account requires blue id authentication. +func (r Account) GetBlueIdAuthenticationRequiredFlag() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Account", "getBlueIdAuthenticationRequiredFlag", nil, &r.Options, &resp) + return +} + +// Retrieve Returns true if this account is linked to IBM Bluemix, false if not. +func (r Account) GetBluemixLinkedFlag() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Account", "getBluemixLinkedFlag", nil, &r.Options, &resp) + return +} + +// Retrieve +func (r Account) GetBrand() (resp datatypes.Brand, err error) { + err = r.Session.DoRequest("SoftLayer_Account", "getBrand", nil, &r.Options, &resp) + return +} + +// Retrieve +func (r Account) GetBrandAccountFlag() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Account", "getBrandAccountFlag", nil, &r.Options, &resp) + return +} + +// Retrieve The brand keyName. +func (r Account) GetBrandKeyName() (resp string, err error) { + err = r.Session.DoRequest("SoftLayer_Account", "getBrandKeyName", nil, &r.Options, &resp) + return +} + +// Retrieve Indicating whether this account can order additional Vlans. +func (r Account) GetCanOrderAdditionalVlansFlag() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Account", "getCanOrderAdditionalVlansFlag", nil, &r.Options, &resp) + return +} + +// Retrieve An account's active carts. +func (r Account) GetCarts() (resp []datatypes.Billing_Order_Quote, err error) { + err = r.Session.DoRequest("SoftLayer_Account", "getCarts", nil, &r.Options, &resp) + return +} + +// Retrieve +func (r Account) GetCatalystEnrollments() (resp []datatypes.Catalyst_Enrollment, err error) { + err = r.Session.DoRequest("SoftLayer_Account", "getCatalystEnrollments", nil, &r.Options, &resp) + return +} + +// Retrieve An account's associated CDN accounts. +func (r Account) GetCdnAccounts() (resp []datatypes.Network_ContentDelivery_Account, err error) { + err = r.Session.DoRequest("SoftLayer_Account", "getCdnAccounts", nil, &r.Options, &resp) + return +} + +// Retrieve All closed tickets associated with an account. +func (r Account) GetClosedTickets() (resp []datatypes.Ticket, err error) { + err = r.Session.DoRequest("SoftLayer_Account", "getClosedTickets", nil, &r.Options, &resp) + return +} + +// This method returns a SoftLayer_Container_Account_Graph_Outputs containing a base64 string PNG image. The optional parameter, detailedGraph, can be passed to get a more detailed graph. +func (r Account) GetCurrentBackupStatisticsGraph(detailedGraph *bool) (resp datatypes.Container_Account_Graph_Outputs, err error) { + params := []interface{}{ + detailedGraph, + } + err = r.Session.DoRequest("SoftLayer_Account", "getCurrentBackupStatisticsGraph", params, &r.Options, &resp) + return +} + +// no documentation yet +func (r Account) GetCurrentTicketStatisticsGraph(detailedGraph *bool) (resp datatypes.Container_Account_Graph_Outputs, err error) { + params := []interface{}{ + detailedGraph, + } + err = r.Session.DoRequest("SoftLayer_Account", "getCurrentTicketStatisticsGraph", params, &r.Options, &resp) + return +} + +// Retrieve the user record of the user calling the SoftLayer API. +func (r Account) GetCurrentUser() (resp datatypes.User_Customer, err error) { + err = r.Session.DoRequest("SoftLayer_Account", "getCurrentUser", nil, &r.Options, &resp) + return +} + +// Retrieve Datacenters which contain subnets that the account has access to route. +func (r Account) GetDatacentersWithSubnetAllocations() (resp []datatypes.Location, err error) { + err = r.Session.DoRequest("SoftLayer_Account", "getDatacentersWithSubnetAllocations", nil, &r.Options, &resp) + return +} + +// Retrieve An account's associated virtual dedicated host objects. +func (r Account) GetDedicatedHosts() (resp []datatypes.Virtual_DedicatedHost, err error) { + err = r.Session.DoRequest("SoftLayer_Account", "getDedicatedHosts", nil, &r.Options, &resp) + return +} + +// Retrieve A flag indicating whether payments are processed for this account. +func (r Account) GetDisablePaymentProcessingFlag() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Account", "getDisablePaymentProcessingFlag", nil, &r.Options, &resp) + return +} + +// Retrieve disk usage data on a [[SoftLayer_Virtual_Guest|Cloud Computing Instance]] image for the time range you provide from the Metric Tracking Object System and Legacy Data Warehouse. Each data entry objects contain ''dateTime'' and ''counter'' properties. ''dateTime'' property indicates the time that the disk usage data was measured and ''counter'' property holds the disk usage in bytes. +func (r Account) GetDiskUsageMetricDataByDate(startDateTime *datatypes.Time, endDateTime *datatypes.Time) (resp []datatypes.Metric_Tracking_Object_Data, err error) { + params := []interface{}{ + startDateTime, + endDateTime, + } + err = r.Session.DoRequest("SoftLayer_Account", "getDiskUsageMetricDataByDate", params, &r.Options, &resp) + return +} + +// Retrieve disk usage data on a [[SoftLayer_Virtual_Guest|Cloud Computing Instance]] image for the time range you provide from the Legacy Data Warehouse. Each data entry objects contain ''dateTime'' and ''counter'' properties. ''dateTime'' property indicates the time that the disk usage data was measured and ''counter'' property holds the disk usage in bytes. +func (r Account) GetDiskUsageMetricDataFromLegacyByDate(startDateTime *datatypes.Time, endDateTime *datatypes.Time) (resp []datatypes.Metric_Tracking_Object_Data, err error) { + params := []interface{}{ + startDateTime, + endDateTime, + } + err = r.Session.DoRequest("SoftLayer_Account", "getDiskUsageMetricDataFromLegacyByDate", params, &r.Options, &resp) + return +} + +// Retrieve disk usage data on a [[SoftLayer_Virtual_Guest|Cloud Computing Instance]] image for the time range you provide from the Metric Tracking Object System. Each data entry object contains ''dateTime'' and ''counter'' properties. ''dateTime'' property indicates the time that the disk usage data was measured and ''counter'' property holds the disk usage in bytes. +func (r Account) GetDiskUsageMetricDataFromMetricTrackingObjectSystemByDate(startDateTime *datatypes.Time, endDateTime *datatypes.Time) (resp []datatypes.Metric_Tracking_Object_Data, err error) { + params := []interface{}{ + startDateTime, + endDateTime, + } + err = r.Session.DoRequest("SoftLayer_Account", "getDiskUsageMetricDataFromMetricTrackingObjectSystemByDate", params, &r.Options, &resp) + return +} + +// Returns a disk usage image based on disk usage specified by the input parameters. +func (r Account) GetDiskUsageMetricImageByDate(startDateTime *datatypes.Time, endDateTime *datatypes.Time) (resp datatypes.Container_Account_Graph_Outputs, err error) { + params := []interface{}{ + startDateTime, + endDateTime, + } + err = r.Session.DoRequest("SoftLayer_Account", "getDiskUsageMetricImageByDate", params, &r.Options, &resp) + return +} + +// Retrieve The SoftLayer employees that an account is assigned to. +func (r Account) GetDisplaySupportRepresentativeAssignments() (resp []datatypes.Account_Attachment_Employee, err error) { + err = r.Session.DoRequest("SoftLayer_Account", "getDisplaySupportRepresentativeAssignments", nil, &r.Options, &resp) + return +} + +// Retrieve +func (r Account) GetDomainRegistrations() (resp []datatypes.Dns_Domain_Registration, err error) { + err = r.Session.DoRequest("SoftLayer_Account", "getDomainRegistrations", nil, &r.Options, &resp) + return +} + +// Retrieve The DNS domains associated with an account. +func (r Account) GetDomains() (resp []datatypes.Dns_Domain, err error) { + err = r.Session.DoRequest("SoftLayer_Account", "getDomains", nil, &r.Options, &resp) + return +} + +// Retrieve The DNS domains associated with an account that were not created as a result of a secondary DNS zone transfer. +func (r Account) GetDomainsWithoutSecondaryDnsRecords() (resp []datatypes.Dns_Domain, err error) { + err = r.Session.DoRequest("SoftLayer_Account", "getDomainsWithoutSecondaryDnsRecords", nil, &r.Options, &resp) + return +} + +// Retrieve The total capacity of Legacy EVault Volumes on an account, in GB. +func (r Account) GetEvaultCapacityGB() (resp uint, err error) { + err = r.Session.DoRequest("SoftLayer_Account", "getEvaultCapacityGB", nil, &r.Options, &resp) + return +} + +// Retrieve An account's master EVault user. This is only used when an account has EVault service. +func (r Account) GetEvaultMasterUsers() (resp []datatypes.Account_Password, err error) { + err = r.Session.DoRequest("SoftLayer_Account", "getEvaultMasterUsers", nil, &r.Options, &resp) + return +} + +// Retrieve An account's associated EVault storage volumes. +func (r Account) GetEvaultNetworkStorage() (resp []datatypes.Network_Storage, err error) { + err = r.Session.DoRequest("SoftLayer_Account", "getEvaultNetworkStorage", nil, &r.Options, &resp) + return +} + +// This method will return a PDF of the specified report, with the specified period within the start and end dates. The pdfType must be one of 'snapshot', or 'historical'. Possible historicalType parameters are 'monthly', 'yearly', and 'quarterly'. Start and end dates should be in ISO 8601 date format. +func (r Account) GetExecutiveSummaryPdf(pdfType *string, historicalType *string, startDate *string, endDate *string) (resp []byte, err error) { + params := []interface{}{ + pdfType, + historicalType, + startDate, + endDate, + } + err = r.Session.DoRequest("SoftLayer_Account", "getExecutiveSummaryPdf", params, &r.Options, &resp) + return +} + +// Retrieve Stored security certificates that are expired (ie. SSL) +func (r Account) GetExpiredSecurityCertificates() (resp []datatypes.Security_Certificate, err error) { + err = r.Session.DoRequest("SoftLayer_Account", "getExpiredSecurityCertificates", nil, &r.Options, &resp) + return +} + +// Retrieve Logs of who entered a colocation area which is assigned to this account, or when a user under this account enters a datacenter. +func (r Account) GetFacilityLogs() (resp []datatypes.User_Access_Facility_Log, err error) { + err = r.Session.DoRequest("SoftLayer_Account", "getFacilityLogs", nil, &r.Options, &resp) + return +} + +// Retrieve All of the account's current and former Flexible Credit enrollments. +func (r Account) GetFlexibleCreditEnrollments() (resp []datatypes.FlexibleCredit_Enrollment, err error) { + err = r.Session.DoRequest("SoftLayer_Account", "getFlexibleCreditEnrollments", nil, &r.Options, &resp) + return +} + +// This method will return a [[SoftLayer_Container_Account_Discount_Program]] object containing the Flexible Credit Program information for this account. To be considered an active participant, the account must have an enrollment record with a monthly credit amount set and the current date must be within the range defined by the enrollment and graduation date. The forNextBillCycle parameter can be set to true to return a SoftLayer_Container_Account_Discount_Program object with information with relation to the next bill cycle. The forNextBillCycle parameter defaults to false. Please note that all discount amount entries are reported as pre-tax amounts and the legacy tax fields in the [[SoftLayer_Container_Account_Discount_Program]] are deprecated. +func (r Account) GetFlexibleCreditProgramInfo(forNextBillCycle *bool) (resp datatypes.Container_Account_Discount_Program, err error) { + params := []interface{}{ + forNextBillCycle, + } + err = r.Session.DoRequest("SoftLayer_Account", "getFlexibleCreditProgramInfo", params, &r.Options, &resp) + return +} + +// Retrieve +func (r Account) GetGlobalIpRecords() (resp []datatypes.Network_Subnet_IpAddress_Global, err error) { + err = r.Session.DoRequest("SoftLayer_Account", "getGlobalIpRecords", nil, &r.Options, &resp) + return +} + +// Retrieve +func (r Account) GetGlobalIpv4Records() (resp []datatypes.Network_Subnet_IpAddress_Global, err error) { + err = r.Session.DoRequest("SoftLayer_Account", "getGlobalIpv4Records", nil, &r.Options, &resp) + return +} + +// Retrieve +func (r Account) GetGlobalIpv6Records() (resp []datatypes.Network_Subnet_IpAddress_Global, err error) { + err = r.Session.DoRequest("SoftLayer_Account", "getGlobalIpv6Records", nil, &r.Options, &resp) + return +} + +// Retrieve The global load balancer accounts for a softlayer customer account. +func (r Account) GetGlobalLoadBalancerAccounts() (resp []datatypes.Network_LoadBalancer_Global_Account, err error) { + err = r.Session.DoRequest("SoftLayer_Account", "getGlobalLoadBalancerAccounts", nil, &r.Options, &resp) + return +} + +// Retrieve An account's associated hardware objects. +func (r Account) GetHardware() (resp []datatypes.Hardware, err error) { + err = r.Session.DoRequest("SoftLayer_Account", "getHardware", nil, &r.Options, &resp) + return +} + +// Retrieve An account's associated hardware objects currently over bandwidth allocation. +func (r Account) GetHardwareOverBandwidthAllocation() (resp []datatypes.Hardware, err error) { + err = r.Session.DoRequest("SoftLayer_Account", "getHardwareOverBandwidthAllocation", nil, &r.Options, &resp) + return +} + +// Return a collection of managed hardware pools. +func (r Account) GetHardwarePools() (resp []datatypes.Container_Hardware_Pool_Details, err error) { + err = r.Session.DoRequest("SoftLayer_Account", "getHardwarePools", nil, &r.Options, &resp) + return +} + +// Retrieve An account's associated hardware objects projected to go over bandwidth allocation. +func (r Account) GetHardwareProjectedOverBandwidthAllocation() (resp []datatypes.Hardware, err error) { + err = r.Session.DoRequest("SoftLayer_Account", "getHardwareProjectedOverBandwidthAllocation", nil, &r.Options, &resp) + return +} + +// Retrieve All hardware associated with an account that has the cPanel web hosting control panel installed. +func (r Account) GetHardwareWithCpanel() (resp []datatypes.Hardware, err error) { + err = r.Session.DoRequest("SoftLayer_Account", "getHardwareWithCpanel", nil, &r.Options, &resp) + return +} + +// Retrieve All hardware associated with an account that has the Helm web hosting control panel installed. +func (r Account) GetHardwareWithHelm() (resp []datatypes.Hardware, err error) { + err = r.Session.DoRequest("SoftLayer_Account", "getHardwareWithHelm", nil, &r.Options, &resp) + return +} + +// Retrieve All hardware associated with an account that has McAfee Secure software components. +func (r Account) GetHardwareWithMcafee() (resp []datatypes.Hardware, err error) { + err = r.Session.DoRequest("SoftLayer_Account", "getHardwareWithMcafee", nil, &r.Options, &resp) + return +} + +// Retrieve All hardware associated with an account that has McAfee Secure AntiVirus for Redhat software components. +func (r Account) GetHardwareWithMcafeeAntivirusRedhat() (resp []datatypes.Hardware, err error) { + err = r.Session.DoRequest("SoftLayer_Account", "getHardwareWithMcafeeAntivirusRedhat", nil, &r.Options, &resp) + return +} + +// Retrieve All hardware associated with an account that has McAfee Secure AntiVirus for Windows software components. +func (r Account) GetHardwareWithMcafeeAntivirusWindows() (resp []datatypes.Hardware, err error) { + err = r.Session.DoRequest("SoftLayer_Account", "getHardwareWithMcafeeAntivirusWindows", nil, &r.Options, &resp) + return +} + +// Retrieve All hardware associated with an account that has McAfee Secure Intrusion Detection System software components. +func (r Account) GetHardwareWithMcafeeIntrusionDetectionSystem() (resp []datatypes.Hardware, err error) { + err = r.Session.DoRequest("SoftLayer_Account", "getHardwareWithMcafeeIntrusionDetectionSystem", nil, &r.Options, &resp) + return +} + +// Retrieve All hardware associated with an account that has the Plesk web hosting control panel installed. +func (r Account) GetHardwareWithPlesk() (resp []datatypes.Hardware, err error) { + err = r.Session.DoRequest("SoftLayer_Account", "getHardwareWithPlesk", nil, &r.Options, &resp) + return +} + +// Retrieve All hardware associated with an account that has the QuantaStor storage system installed. +func (r Account) GetHardwareWithQuantastor() (resp []datatypes.Hardware, err error) { + err = r.Session.DoRequest("SoftLayer_Account", "getHardwareWithQuantastor", nil, &r.Options, &resp) + return +} + +// Retrieve All hardware associated with an account that has the Urchin web traffic analytics package installed. +func (r Account) GetHardwareWithUrchin() (resp []datatypes.Hardware, err error) { + err = r.Session.DoRequest("SoftLayer_Account", "getHardwareWithUrchin", nil, &r.Options, &resp) + return +} + +// Retrieve All hardware associated with an account that is running a version of the Microsoft Windows operating system. +func (r Account) GetHardwareWithWindows() (resp []datatypes.Hardware, err error) { + err = r.Session.DoRequest("SoftLayer_Account", "getHardwareWithWindows", nil, &r.Options, &resp) + return +} + +// Retrieve Return 1 if one of the account's hardware has the EVault Bare Metal Server Restore Plugin otherwise 0. +func (r Account) GetHasEvaultBareMetalRestorePluginFlag() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Account", "getHasEvaultBareMetalRestorePluginFlag", nil, &r.Options, &resp) + return +} + +// Retrieve Return 1 if one of the account's hardware has an installation of Idera Server Backup otherwise 0. +func (r Account) GetHasIderaBareMetalRestorePluginFlag() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Account", "getHasIderaBareMetalRestorePluginFlag", nil, &r.Options, &resp) + return +} + +// Retrieve The number of orders in a PENDING status for a SoftLayer customer account. +func (r Account) GetHasPendingOrder() (resp uint, err error) { + err = r.Session.DoRequest("SoftLayer_Account", "getHasPendingOrder", nil, &r.Options, &resp) + return +} + +// Retrieve Return 1 if one of the account's hardware has an installation of R1Soft CDP otherwise 0. +func (r Account) GetHasR1softBareMetalRestorePluginFlag() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Account", "getHasR1softBareMetalRestorePluginFlag", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Account) GetHistoricalBackupGraph(startDate *datatypes.Time, endDate *datatypes.Time) (resp datatypes.Container_Account_Graph_Outputs, err error) { + params := []interface{}{ + startDate, + endDate, + } + err = r.Session.DoRequest("SoftLayer_Account", "getHistoricalBackupGraph", params, &r.Options, &resp) + return +} + +// This method will return a SoftLayer_Container_Account_Graph_Outputs object containing a base64 string PNG image of a line graph of bandwidth statistics given the start and end dates. The start and end dates should be valid ISO 8601 date formatted strings. +func (r Account) GetHistoricalBandwidthGraph(startDate *datatypes.Time, endDate *datatypes.Time) (resp datatypes.Container_Account_Graph_Outputs, err error) { + params := []interface{}{ + startDate, + endDate, + } + err = r.Session.DoRequest("SoftLayer_Account", "getHistoricalBandwidthGraph", params, &r.Options, &resp) + return +} + +// Given the start and end dates, this method will return a pie chart of ticket statistics in the form of SoftLayer_Container_Account_Graph_Outputs object with a base64 PNG string. If an error occurs the graphError parameter will be populated. Possible errors include: SoftLayer_Exception_Public Thrown if an invalid start or end date is provided. SoftLayer_Exception Thrown if there is an error connecting to HBase. SoftLayer_Exception Thrown if there is no data available for the specified date range. SoftLayer_Exception Thrown if there is an error retrieving data or generating the graph. +func (r Account) GetHistoricalTicketGraph(startDate *datatypes.Time, endDate *datatypes.Time) (resp datatypes.Container_Account_Graph_Outputs, err error) { + params := []interface{}{ + startDate, + endDate, + } + err = r.Session.DoRequest("SoftLayer_Account", "getHistoricalTicketGraph", params, &r.Options, &resp) + return +} + +// The graph image is returned as a base64 PNG string. Start and end dates should be formatted using the ISO 8601 date standard. If there is an error retrieving graph data or generating the graph string a graphError attribute will be returned. The graphError attribute may contain any of the following error messages: SoftLayer_Exception_Public Thrown if an invalid start or end date is provided. SoftLayer_Exception Thrown if there is an error connecting to HBase. SoftLayer_Exception Thrown if there is no data available for the specified date range. SoftLayer_Exception Thrown if there is an error retrieving data or generating the graph. +func (r Account) GetHistoricalUptimeGraph(startDate *datatypes.Time, endDate *datatypes.Time) (resp datatypes.Container_Account_Graph_Outputs, err error) { + params := []interface{}{ + startDate, + endDate, + } + err = r.Session.DoRequest("SoftLayer_Account", "getHistoricalUptimeGraph", params, &r.Options, &resp) + return +} + +// Retrieve An account's associated hourly bare metal server objects. +func (r Account) GetHourlyBareMetalInstances() (resp []datatypes.Hardware, err error) { + err = r.Session.DoRequest("SoftLayer_Account", "getHourlyBareMetalInstances", nil, &r.Options, &resp) + return +} + +// Retrieve Hourly service billing items that will be on an account's next invoice. +func (r Account) GetHourlyServiceBillingItems() (resp []datatypes.Billing_Item, err error) { + err = r.Session.DoRequest("SoftLayer_Account", "getHourlyServiceBillingItems", nil, &r.Options, &resp) + return +} + +// Retrieve An account's associated hourly virtual guest objects. +func (r Account) GetHourlyVirtualGuests() (resp []datatypes.Virtual_Guest, err error) { + err = r.Session.DoRequest("SoftLayer_Account", "getHourlyVirtualGuests", nil, &r.Options, &resp) + return +} + +// Retrieve An account's associated Virtual Storage volumes. +func (r Account) GetHubNetworkStorage() (resp []datatypes.Network_Storage, err error) { + err = r.Session.DoRequest("SoftLayer_Account", "getHubNetworkStorage", nil, &r.Options, &resp) + return +} + +// Retrieve Unique identifier for a customer used throughout IBM. +func (r Account) GetIbmCustomerNumber() (resp string, err error) { + err = r.Session.DoRequest("SoftLayer_Account", "getIbmCustomerNumber", nil, &r.Options, &resp) + return +} + +// Retrieve Timestamp representing the point in time when an account is required to use IBMid authentication. +func (r Account) GetIbmIdMigrationExpirationTimestamp() (resp string, err error) { + err = r.Session.DoRequest("SoftLayer_Account", "getIbmIdMigrationExpirationTimestamp", nil, &r.Options, &resp) + return +} + +// Retrieve +func (r Account) GetInternalNotes() (resp []datatypes.Account_Note, err error) { + err = r.Session.DoRequest("SoftLayer_Account", "getInternalNotes", nil, &r.Options, &resp) + return +} + +// Retrieve An account's associated billing invoices. +func (r Account) GetInvoices() (resp []datatypes.Billing_Invoice, err error) { + err = r.Session.DoRequest("SoftLayer_Account", "getInvoices", nil, &r.Options, &resp) + return +} + +// Retrieve +func (r Account) GetIpAddresses() (resp []datatypes.Network_Subnet_IpAddress, err error) { + err = r.Session.DoRequest("SoftLayer_Account", "getIpAddresses", nil, &r.Options, &resp) + return +} + +// Retrieve An account's associated iSCSI storage volumes. +func (r Account) GetIscsiNetworkStorage() (resp []datatypes.Network_Storage, err error) { + err = r.Session.DoRequest("SoftLayer_Account", "getIscsiNetworkStorage", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Account) GetLargestAllowedSubnetCidr(numberOfHosts *int, locationId *int) (resp int, err error) { + params := []interface{}{ + numberOfHosts, + locationId, + } + err = r.Session.DoRequest("SoftLayer_Account", "getLargestAllowedSubnetCidr", params, &r.Options, &resp) + return +} + +// Retrieve The most recently canceled billing item. +func (r Account) GetLastCanceledBillingItem() (resp datatypes.Billing_Item, err error) { + err = r.Session.DoRequest("SoftLayer_Account", "getLastCanceledBillingItem", nil, &r.Options, &resp) + return +} + +// Retrieve The most recent cancelled server billing item. +func (r Account) GetLastCancelledServerBillingItem() (resp datatypes.Billing_Item, err error) { + err = r.Session.DoRequest("SoftLayer_Account", "getLastCancelledServerBillingItem", nil, &r.Options, &resp) + return +} + +// Retrieve The five most recently closed abuse tickets associated with an account. +func (r Account) GetLastFiveClosedAbuseTickets() (resp []datatypes.Ticket, err error) { + err = r.Session.DoRequest("SoftLayer_Account", "getLastFiveClosedAbuseTickets", nil, &r.Options, &resp) + return +} + +// Retrieve The five most recently closed accounting tickets associated with an account. +func (r Account) GetLastFiveClosedAccountingTickets() (resp []datatypes.Ticket, err error) { + err = r.Session.DoRequest("SoftLayer_Account", "getLastFiveClosedAccountingTickets", nil, &r.Options, &resp) + return +} + +// Retrieve The five most recently closed tickets that do not belong to the abuse, accounting, sales, or support groups associated with an account. +func (r Account) GetLastFiveClosedOtherTickets() (resp []datatypes.Ticket, err error) { + err = r.Session.DoRequest("SoftLayer_Account", "getLastFiveClosedOtherTickets", nil, &r.Options, &resp) + return +} + +// Retrieve The five most recently closed sales tickets associated with an account. +func (r Account) GetLastFiveClosedSalesTickets() (resp []datatypes.Ticket, err error) { + err = r.Session.DoRequest("SoftLayer_Account", "getLastFiveClosedSalesTickets", nil, &r.Options, &resp) + return +} + +// Retrieve The five most recently closed support tickets associated with an account. +func (r Account) GetLastFiveClosedSupportTickets() (resp []datatypes.Ticket, err error) { + err = r.Session.DoRequest("SoftLayer_Account", "getLastFiveClosedSupportTickets", nil, &r.Options, &resp) + return +} + +// Retrieve The five most recently closed tickets associated with an account. +func (r Account) GetLastFiveClosedTickets() (resp []datatypes.Ticket, err error) { + err = r.Session.DoRequest("SoftLayer_Account", "getLastFiveClosedTickets", nil, &r.Options, &resp) + return +} + +// Retrieve An account's most recent billing date. +func (r Account) GetLatestBillDate() (resp datatypes.Time, err error) { + err = r.Session.DoRequest("SoftLayer_Account", "getLatestBillDate", nil, &r.Options, &resp) + return +} + +// Retrieve An account's latest recurring invoice. +func (r Account) GetLatestRecurringInvoice() (resp datatypes.Billing_Invoice, err error) { + err = r.Session.DoRequest("SoftLayer_Account", "getLatestRecurringInvoice", nil, &r.Options, &resp) + return +} + +// Retrieve An account's latest recurring pending invoice. +func (r Account) GetLatestRecurringPendingInvoice() (resp datatypes.Billing_Invoice, err error) { + err = r.Session.DoRequest("SoftLayer_Account", "getLatestRecurringPendingInvoice", nil, &r.Options, &resp) + return +} + +// Retrieve The legacy bandwidth allotments for an account. +func (r Account) GetLegacyBandwidthAllotments() (resp []datatypes.Network_Bandwidth_Version1_Allotment, err error) { + err = r.Session.DoRequest("SoftLayer_Account", "getLegacyBandwidthAllotments", nil, &r.Options, &resp) + return +} + +// Retrieve The total capacity of Legacy iSCSI Volumes on an account, in GB. +func (r Account) GetLegacyIscsiCapacityGB() (resp uint, err error) { + err = r.Session.DoRequest("SoftLayer_Account", "getLegacyIscsiCapacityGB", nil, &r.Options, &resp) + return +} + +// Retrieve An account's associated load balancers. +func (r Account) GetLoadBalancers() (resp []datatypes.Network_LoadBalancer_VirtualIpAddress, err error) { + err = r.Session.DoRequest("SoftLayer_Account", "getLoadBalancers", nil, &r.Options, &resp) + return +} + +// Retrieve The total capacity of Legacy lockbox Volumes on an account, in GB. +func (r Account) GetLockboxCapacityGB() (resp uint, err error) { + err = r.Session.DoRequest("SoftLayer_Account", "getLockboxCapacityGB", nil, &r.Options, &resp) + return +} + +// Retrieve An account's associated Lockbox storage volumes. +func (r Account) GetLockboxNetworkStorage() (resp []datatypes.Network_Storage, err error) { + err = r.Session.DoRequest("SoftLayer_Account", "getLockboxNetworkStorage", nil, &r.Options, &resp) + return +} + +// Retrieve +func (r Account) GetManualPaymentsUnderReview() (resp []datatypes.Billing_Payment_Card_ManualPayment, err error) { + err = r.Session.DoRequest("SoftLayer_Account", "getManualPaymentsUnderReview", nil, &r.Options, &resp) + return +} + +// Retrieve An account's master user. +func (r Account) GetMasterUser() (resp datatypes.User_Customer, err error) { + err = r.Session.DoRequest("SoftLayer_Account", "getMasterUser", nil, &r.Options, &resp) + return +} + +// Retrieve An account's media transfer service requests. +func (r Account) GetMediaDataTransferRequests() (resp []datatypes.Account_Media_Data_Transfer_Request, err error) { + err = r.Session.DoRequest("SoftLayer_Account", "getMediaDataTransferRequests", nil, &r.Options, &resp) + return +} + +// Retrieve An account's associated Message Queue accounts. +func (r Account) GetMessageQueueAccounts() (resp []datatypes.Network_Message_Queue, err error) { + err = r.Session.DoRequest("SoftLayer_Account", "getMessageQueueAccounts", nil, &r.Options, &resp) + return +} + +// Retrieve An account's associated monthly bare metal server objects. +func (r Account) GetMonthlyBareMetalInstances() (resp []datatypes.Hardware, err error) { + err = r.Session.DoRequest("SoftLayer_Account", "getMonthlyBareMetalInstances", nil, &r.Options, &resp) + return +} + +// Retrieve An account's associated monthly virtual guest objects. +func (r Account) GetMonthlyVirtualGuests() (resp []datatypes.Virtual_Guest, err error) { + err = r.Session.DoRequest("SoftLayer_Account", "getMonthlyVirtualGuests", nil, &r.Options, &resp) + return +} + +// Retrieve An account's associated NAS storage volumes. +func (r Account) GetNasNetworkStorage() (resp []datatypes.Network_Storage, err error) { + err = r.Session.DoRequest("SoftLayer_Account", "getNasNetworkStorage", nil, &r.Options, &resp) + return +} + +// Retrieve Whether or not this account can define their own networks. +func (r Account) GetNetworkCreationFlag() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Account", "getNetworkCreationFlag", nil, &r.Options, &resp) + return +} + +// Retrieve All network gateway devices on this account. +func (r Account) GetNetworkGateways() (resp []datatypes.Network_Gateway, err error) { + err = r.Session.DoRequest("SoftLayer_Account", "getNetworkGateways", nil, &r.Options, &resp) + return +} + +// Retrieve An account's associated network hardware. +func (r Account) GetNetworkHardware() (resp []datatypes.Hardware, err error) { + err = r.Session.DoRequest("SoftLayer_Account", "getNetworkHardware", nil, &r.Options, &resp) + return +} + +// Retrieve +func (r Account) GetNetworkMessageDeliveryAccounts() (resp []datatypes.Network_Message_Delivery, err error) { + err = r.Session.DoRequest("SoftLayer_Account", "getNetworkMessageDeliveryAccounts", nil, &r.Options, &resp) + return +} + +// Retrieve Hardware which is currently experiencing a service failure. +func (r Account) GetNetworkMonitorDownHardware() (resp []datatypes.Hardware, err error) { + err = r.Session.DoRequest("SoftLayer_Account", "getNetworkMonitorDownHardware", nil, &r.Options, &resp) + return +} + +// Retrieve Virtual guest which is currently experiencing a service failure. +func (r Account) GetNetworkMonitorDownVirtualGuests() (resp []datatypes.Virtual_Guest, err error) { + err = r.Session.DoRequest("SoftLayer_Account", "getNetworkMonitorDownVirtualGuests", nil, &r.Options, &resp) + return +} + +// Retrieve Hardware which is currently recovering from a service failure. +func (r Account) GetNetworkMonitorRecoveringHardware() (resp []datatypes.Hardware, err error) { + err = r.Session.DoRequest("SoftLayer_Account", "getNetworkMonitorRecoveringHardware", nil, &r.Options, &resp) + return +} + +// Retrieve Virtual guest which is currently recovering from a service failure. +func (r Account) GetNetworkMonitorRecoveringVirtualGuests() (resp []datatypes.Virtual_Guest, err error) { + err = r.Session.DoRequest("SoftLayer_Account", "getNetworkMonitorRecoveringVirtualGuests", nil, &r.Options, &resp) + return +} + +// Retrieve Hardware which is currently online. +func (r Account) GetNetworkMonitorUpHardware() (resp []datatypes.Hardware, err error) { + err = r.Session.DoRequest("SoftLayer_Account", "getNetworkMonitorUpHardware", nil, &r.Options, &resp) + return +} + +// Retrieve Virtual guest which is currently online. +func (r Account) GetNetworkMonitorUpVirtualGuests() (resp []datatypes.Virtual_Guest, err error) { + err = r.Session.DoRequest("SoftLayer_Account", "getNetworkMonitorUpVirtualGuests", nil, &r.Options, &resp) + return +} + +// Retrieve An account's associated storage volumes. This includes Lockbox, NAS, EVault, and iSCSI volumes. +func (r Account) GetNetworkStorage() (resp []datatypes.Network_Storage, err error) { + err = r.Session.DoRequest("SoftLayer_Account", "getNetworkStorage", nil, &r.Options, &resp) + return +} + +// Retrieve An account's Network Storage groups. +func (r Account) GetNetworkStorageGroups() (resp []datatypes.Network_Storage_Group, err error) { + err = r.Session.DoRequest("SoftLayer_Account", "getNetworkStorageGroups", nil, &r.Options, &resp) + return +} + +// Retrieve IPSec network tunnels for an account. +func (r Account) GetNetworkTunnelContexts() (resp []datatypes.Network_Tunnel_Module_Context, err error) { + err = r.Session.DoRequest("SoftLayer_Account", "getNetworkTunnelContexts", nil, &r.Options, &resp) + return +} + +// Retrieve Whether or not an account has automatic private VLAN spanning enabled. +func (r Account) GetNetworkVlanSpan() (resp datatypes.Account_Network_Vlan_Span, err error) { + err = r.Session.DoRequest("SoftLayer_Account", "getNetworkVlanSpan", nil, &r.Options, &resp) + return +} + +// Retrieve All network VLANs assigned to an account. +func (r Account) GetNetworkVlans() (resp []datatypes.Network_Vlan, err error) { + err = r.Session.DoRequest("SoftLayer_Account", "getNetworkVlans", nil, &r.Options, &resp) + return +} + +// Retrieve DEPRECATED - This information can be pulled directly through tapping keys now - DEPRECATED. The allotments for this account and their servers for the next billing cycle. The public inbound and outbound bandwidth is calculated for each server in addition to the daily average network traffic since the last billing date. +func (r Account) GetNextBillingPublicAllotmentHardwareBandwidthDetails() (resp []datatypes.Network_Bandwidth_Version1_Allotment, err error) { + err = r.Session.DoRequest("SoftLayer_Account", "getNextBillingPublicAllotmentHardwareBandwidthDetails", nil, &r.Options, &resp) + return +} + +// Return an account's next invoice in a Microsoft excel format. The "next invoice" is what a customer will be billed on their next invoice, assuming no changes are made. Currently this does not include Bandwidth Pooling charges. +func (r Account) GetNextInvoiceExcel(documentCreateDate *datatypes.Time) (resp []byte, err error) { + params := []interface{}{ + documentCreateDate, + } + err = r.Session.DoRequest("SoftLayer_Account", "getNextInvoiceExcel", params, &r.Options, &resp) + return +} + +// Retrieve The pre-tax total amount exempt from incubator credit for the account's next invoice. This field is now deprecated and will soon be removed. Please update all references to instead use nextInvoiceTotalAmount +func (r Account) GetNextInvoiceIncubatorExemptTotal() (resp datatypes.Float64, err error) { + err = r.Session.DoRequest("SoftLayer_Account", "getNextInvoiceIncubatorExemptTotal", nil, &r.Options, &resp) + return +} + +// Return an account's next invoice in PDF format. The "next invoice" is what a customer will be billed on their next invoice, assuming no changes are made. Currently this does not include Bandwidth Pooling charges. +func (r Account) GetNextInvoicePdf(documentCreateDate *datatypes.Time) (resp []byte, err error) { + params := []interface{}{ + documentCreateDate, + } + err = r.Session.DoRequest("SoftLayer_Account", "getNextInvoicePdf", params, &r.Options, &resp) + return +} + +// Return an account's next invoice detailed portion in PDF format. The "next invoice" is what a customer will be billed on their next invoice, assuming no changes are made. Currently this does not include Bandwidth Pooling charges. +func (r Account) GetNextInvoicePdfDetailed(documentCreateDate *datatypes.Time) (resp []byte, err error) { + params := []interface{}{ + documentCreateDate, + } + err = r.Session.DoRequest("SoftLayer_Account", "getNextInvoicePdfDetailed", params, &r.Options, &resp) + return +} + +// Retrieve The billing items that will be on an account's next invoice. +func (r Account) GetNextInvoiceTopLevelBillingItems() (resp []datatypes.Billing_Item, err error) { + err = r.Session.DoRequest("SoftLayer_Account", "getNextInvoiceTopLevelBillingItems", nil, &r.Options, &resp) + return +} + +// Retrieve The pre-tax total amount of an account's next invoice measured in US Dollars ($USD), assuming no changes or charges occur between now and time of billing. +func (r Account) GetNextInvoiceTotalAmount() (resp datatypes.Float64, err error) { + err = r.Session.DoRequest("SoftLayer_Account", "getNextInvoiceTotalAmount", nil, &r.Options, &resp) + return +} + +// Retrieve The total one-time charge amount of an account's next invoice measured in US Dollars ($USD), assuming no changes or charges occur between now and time of billing. +func (r Account) GetNextInvoiceTotalOneTimeAmount() (resp datatypes.Float64, err error) { + err = r.Session.DoRequest("SoftLayer_Account", "getNextInvoiceTotalOneTimeAmount", nil, &r.Options, &resp) + return +} + +// Retrieve The total one-time tax amount of an account's next invoice measured in US Dollars ($USD), assuming no changes or charges occur between now and time of billing. +func (r Account) GetNextInvoiceTotalOneTimeTaxAmount() (resp datatypes.Float64, err error) { + err = r.Session.DoRequest("SoftLayer_Account", "getNextInvoiceTotalOneTimeTaxAmount", nil, &r.Options, &resp) + return +} + +// Retrieve The total recurring charge amount of an account's next invoice measured in US Dollars ($USD), assuming no changes or charges occur between now and time of billing. +func (r Account) GetNextInvoiceTotalRecurringAmount() (resp datatypes.Float64, err error) { + err = r.Session.DoRequest("SoftLayer_Account", "getNextInvoiceTotalRecurringAmount", nil, &r.Options, &resp) + return +} + +// Retrieve The total recurring charge amount of an account's next invoice measured in US Dollars ($USD), assuming no changes or charges occur between now and time of billing. +func (r Account) GetNextInvoiceTotalRecurringAmountBeforeAccountDiscount() (resp datatypes.Float64, err error) { + err = r.Session.DoRequest("SoftLayer_Account", "getNextInvoiceTotalRecurringAmountBeforeAccountDiscount", nil, &r.Options, &resp) + return +} + +// Retrieve The total recurring tax amount of an account's next invoice measured in US Dollars ($USD), assuming no changes or charges occur between now and time of billing. +func (r Account) GetNextInvoiceTotalRecurringTaxAmount() (resp datatypes.Float64, err error) { + err = r.Session.DoRequest("SoftLayer_Account", "getNextInvoiceTotalRecurringTaxAmount", nil, &r.Options, &resp) + return +} + +// Retrieve The total recurring charge amount of an account's next invoice measured in US Dollars ($USD), assuming no changes or charges occur between now and time of billing. +func (r Account) GetNextInvoiceTotalTaxableRecurringAmount() (resp datatypes.Float64, err error) { + err = r.Session.DoRequest("SoftLayer_Account", "getNextInvoiceTotalTaxableRecurringAmount", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Account) GetNextInvoiceZeroFeeItemCounts() (resp []datatypes.Container_Product_Item_Category_ZeroFee_Count, err error) { + err = r.Session.DoRequest("SoftLayer_Account", "getNextInvoiceZeroFeeItemCounts", nil, &r.Options, &resp) + return +} + +// Retrieve +func (r Account) GetNotificationSubscribers() (resp []datatypes.Notification_Subscriber, err error) { + err = r.Session.DoRequest("SoftLayer_Account", "getNotificationSubscribers", nil, &r.Options, &resp) + return +} + +// getObject retrieves the SoftLayer_Account object whose ID number corresponds to the ID number of the init parameter passed to the SoftLayer_Account service. You can only retrieve the account that your portal user is assigned to. +func (r Account) GetObject() (resp datatypes.Account, err error) { + err = r.Session.DoRequest("SoftLayer_Account", "getObject", nil, &r.Options, &resp) + return +} + +// Retrieve The open abuse tickets associated with an account. +func (r Account) GetOpenAbuseTickets() (resp []datatypes.Ticket, err error) { + err = r.Session.DoRequest("SoftLayer_Account", "getOpenAbuseTickets", nil, &r.Options, &resp) + return +} + +// Retrieve The open accounting tickets associated with an account. +func (r Account) GetOpenAccountingTickets() (resp []datatypes.Ticket, err error) { + err = r.Session.DoRequest("SoftLayer_Account", "getOpenAccountingTickets", nil, &r.Options, &resp) + return +} + +// Retrieve The open billing tickets associated with an account. +func (r Account) GetOpenBillingTickets() (resp []datatypes.Ticket, err error) { + err = r.Session.DoRequest("SoftLayer_Account", "getOpenBillingTickets", nil, &r.Options, &resp) + return +} + +// Retrieve An open ticket requesting cancellation of this server, if one exists. +func (r Account) GetOpenCancellationRequests() (resp []datatypes.Billing_Item_Cancellation_Request, err error) { + err = r.Session.DoRequest("SoftLayer_Account", "getOpenCancellationRequests", nil, &r.Options, &resp) + return +} + +// Retrieve The open tickets that do not belong to the abuse, accounting, sales, or support groups associated with an account. +func (r Account) GetOpenOtherTickets() (resp []datatypes.Ticket, err error) { + err = r.Session.DoRequest("SoftLayer_Account", "getOpenOtherTickets", nil, &r.Options, &resp) + return +} + +// Retrieve An account's recurring invoices. +func (r Account) GetOpenRecurringInvoices() (resp []datatypes.Billing_Invoice, err error) { + err = r.Session.DoRequest("SoftLayer_Account", "getOpenRecurringInvoices", nil, &r.Options, &resp) + return +} + +// Retrieve The open sales tickets associated with an account. +func (r Account) GetOpenSalesTickets() (resp []datatypes.Ticket, err error) { + err = r.Session.DoRequest("SoftLayer_Account", "getOpenSalesTickets", nil, &r.Options, &resp) + return +} + +// Retrieve +func (r Account) GetOpenStackAccountLinks() (resp []datatypes.Account_Link, err error) { + err = r.Session.DoRequest("SoftLayer_Account", "getOpenStackAccountLinks", nil, &r.Options, &resp) + return +} + +// Retrieve An account's associated Openstack related Object Storage accounts. +func (r Account) GetOpenStackObjectStorage() (resp []datatypes.Network_Storage, err error) { + err = r.Session.DoRequest("SoftLayer_Account", "getOpenStackObjectStorage", nil, &r.Options, &resp) + return +} + +// Retrieve The open support tickets associated with an account. +func (r Account) GetOpenSupportTickets() (resp []datatypes.Ticket, err error) { + err = r.Session.DoRequest("SoftLayer_Account", "getOpenSupportTickets", nil, &r.Options, &resp) + return +} + +// Retrieve All open tickets associated with an account. +func (r Account) GetOpenTickets() (resp []datatypes.Ticket, err error) { + err = r.Session.DoRequest("SoftLayer_Account", "getOpenTickets", nil, &r.Options, &resp) + return +} + +// Retrieve All open tickets associated with an account last edited by an employee. +func (r Account) GetOpenTicketsWaitingOnCustomer() (resp []datatypes.Ticket, err error) { + err = r.Session.DoRequest("SoftLayer_Account", "getOpenTicketsWaitingOnCustomer", nil, &r.Options, &resp) + return +} + +// Retrieve An account's associated billing orders excluding upgrades. +func (r Account) GetOrders() (resp []datatypes.Billing_Order, err error) { + err = r.Session.DoRequest("SoftLayer_Account", "getOrders", nil, &r.Options, &resp) + return +} + +// Retrieve The billing items that have no parent billing item. These are items that don't necessarily belong to a single server. +func (r Account) GetOrphanBillingItems() (resp []datatypes.Billing_Item, err error) { + err = r.Session.DoRequest("SoftLayer_Account", "getOrphanBillingItems", nil, &r.Options, &resp) + return +} + +// Retrieve +func (r Account) GetOwnedBrands() (resp []datatypes.Brand, err error) { + err = r.Session.DoRequest("SoftLayer_Account", "getOwnedBrands", nil, &r.Options, &resp) + return +} + +// Retrieve +func (r Account) GetOwnedHardwareGenericComponentModels() (resp []datatypes.Hardware_Component_Model_Generic, err error) { + err = r.Session.DoRequest("SoftLayer_Account", "getOwnedHardwareGenericComponentModels", nil, &r.Options, &resp) + return +} + +// Retrieve +func (r Account) GetPaymentProcessors() (resp []datatypes.Billing_Payment_Processor, err error) { + err = r.Session.DoRequest("SoftLayer_Account", "getPaymentProcessors", nil, &r.Options, &resp) + return +} + +// Before being approved for general use, a credit card must be approved by a SoftLayer agent. Once a credit card change request has been either approved or denied, the change request will no longer appear in the list of pending change requests. This method will return a list of all pending change requests as well as a portion of the data from the original request. +func (r Account) GetPendingCreditCardChangeRequestData() (resp []datatypes.Container_Account_Payment_Method_CreditCard, err error) { + err = r.Session.DoRequest("SoftLayer_Account", "getPendingCreditCardChangeRequestData", nil, &r.Options, &resp) + return +} + +// Retrieve +func (r Account) GetPendingEvents() (resp []datatypes.Notification_Occurrence_Event, err error) { + err = r.Session.DoRequest("SoftLayer_Account", "getPendingEvents", nil, &r.Options, &resp) + return +} + +// Retrieve An account's latest open (pending) invoice. +func (r Account) GetPendingInvoice() (resp datatypes.Billing_Invoice, err error) { + err = r.Session.DoRequest("SoftLayer_Account", "getPendingInvoice", nil, &r.Options, &resp) + return +} + +// Retrieve A list of top-level invoice items that are on an account's currently pending invoice. +func (r Account) GetPendingInvoiceTopLevelItems() (resp []datatypes.Billing_Invoice_Item, err error) { + err = r.Session.DoRequest("SoftLayer_Account", "getPendingInvoiceTopLevelItems", nil, &r.Options, &resp) + return +} + +// Retrieve The total amount of an account's pending invoice, if one exists. +func (r Account) GetPendingInvoiceTotalAmount() (resp datatypes.Float64, err error) { + err = r.Session.DoRequest("SoftLayer_Account", "getPendingInvoiceTotalAmount", nil, &r.Options, &resp) + return +} + +// Retrieve The total one-time charges for an account's pending invoice, if one exists. In other words, it is the sum of one-time charges, setup fees, and labor fees. It does not include taxes. +func (r Account) GetPendingInvoiceTotalOneTimeAmount() (resp datatypes.Float64, err error) { + err = r.Session.DoRequest("SoftLayer_Account", "getPendingInvoiceTotalOneTimeAmount", nil, &r.Options, &resp) + return +} + +// Retrieve The sum of all the taxes related to one time charges for an account's pending invoice, if one exists. +func (r Account) GetPendingInvoiceTotalOneTimeTaxAmount() (resp datatypes.Float64, err error) { + err = r.Session.DoRequest("SoftLayer_Account", "getPendingInvoiceTotalOneTimeTaxAmount", nil, &r.Options, &resp) + return +} + +// Retrieve The total recurring amount of an account's pending invoice, if one exists. +func (r Account) GetPendingInvoiceTotalRecurringAmount() (resp datatypes.Float64, err error) { + err = r.Session.DoRequest("SoftLayer_Account", "getPendingInvoiceTotalRecurringAmount", nil, &r.Options, &resp) + return +} + +// Retrieve The total amount of the recurring taxes on an account's pending invoice, if one exists. +func (r Account) GetPendingInvoiceTotalRecurringTaxAmount() (resp datatypes.Float64, err error) { + err = r.Session.DoRequest("SoftLayer_Account", "getPendingInvoiceTotalRecurringTaxAmount", nil, &r.Options, &resp) + return +} + +// Retrieve An account's permission groups. +func (r Account) GetPermissionGroups() (resp []datatypes.User_Permission_Group, err error) { + err = r.Session.DoRequest("SoftLayer_Account", "getPermissionGroups", nil, &r.Options, &resp) + return +} + +// Retrieve An account's user roles. +func (r Account) GetPermissionRoles() (resp []datatypes.User_Permission_Role, err error) { + err = r.Session.DoRequest("SoftLayer_Account", "getPermissionRoles", nil, &r.Options, &resp) + return +} + +// Retrieve +func (r Account) GetPortableStorageVolumes() (resp []datatypes.Virtual_Disk_Image, err error) { + err = r.Session.DoRequest("SoftLayer_Account", "getPortableStorageVolumes", nil, &r.Options, &resp) + return +} + +// Retrieve Customer specified URIs that are downloaded onto a newly provisioned or reloaded server. If the URI is sent over https it will be executed directly on the server. +func (r Account) GetPostProvisioningHooks() (resp []datatypes.Provisioning_Hook, err error) { + err = r.Session.DoRequest("SoftLayer_Account", "getPostProvisioningHooks", nil, &r.Options, &resp) + return +} + +// Retrieve An account's associated portal users with PPTP VPN access. +func (r Account) GetPptpVpnUsers() (resp []datatypes.User_Customer, err error) { + err = r.Session.DoRequest("SoftLayer_Account", "getPptpVpnUsers", nil, &r.Options, &resp) + return +} + +// Retrieve The total recurring amount for an accounts previous revenue. +func (r Account) GetPreviousRecurringRevenue() (resp datatypes.Float64, err error) { + err = r.Session.DoRequest("SoftLayer_Account", "getPreviousRecurringRevenue", nil, &r.Options, &resp) + return +} + +// Retrieve The item price that an account is restricted to. +func (r Account) GetPriceRestrictions() (resp []datatypes.Product_Item_Price_Account_Restriction, err error) { + err = r.Session.DoRequest("SoftLayer_Account", "getPriceRestrictions", nil, &r.Options, &resp) + return +} + +// Retrieve All priority one tickets associated with an account. +func (r Account) GetPriorityOneTickets() (resp []datatypes.Ticket, err error) { + err = r.Session.DoRequest("SoftLayer_Account", "getPriorityOneTickets", nil, &r.Options, &resp) + return +} + +// Retrieve DEPRECATED - This information can be pulled directly through tapping keys now - DEPRECATED. The allotments for this account and their servers. The private inbound and outbound bandwidth is calculated for each server in addition to the daily average network traffic since the last billing date. +func (r Account) GetPrivateAllotmentHardwareBandwidthDetails() (resp []datatypes.Network_Bandwidth_Version1_Allotment, err error) { + err = r.Session.DoRequest("SoftLayer_Account", "getPrivateAllotmentHardwareBandwidthDetails", nil, &r.Options, &resp) + return +} + +// Retrieve Private and shared template group objects (parent only) for an account. +func (r Account) GetPrivateBlockDeviceTemplateGroups() (resp []datatypes.Virtual_Guest_Block_Device_Template_Group, err error) { + err = r.Session.DoRequest("SoftLayer_Account", "getPrivateBlockDeviceTemplateGroups", nil, &r.Options, &resp) + return +} + +// Retrieve +func (r Account) GetPrivateIpAddresses() (resp []datatypes.Network_Subnet_IpAddress, err error) { + err = r.Session.DoRequest("SoftLayer_Account", "getPrivateIpAddresses", nil, &r.Options, &resp) + return +} + +// Retrieve The private network VLANs assigned to an account. +func (r Account) GetPrivateNetworkVlans() (resp []datatypes.Network_Vlan, err error) { + err = r.Session.DoRequest("SoftLayer_Account", "getPrivateNetworkVlans", nil, &r.Options, &resp) + return +} + +// Retrieve All private subnets associated with an account. +func (r Account) GetPrivateSubnets() (resp []datatypes.Network_Subnet, err error) { + err = r.Session.DoRequest("SoftLayer_Account", "getPrivateSubnets", nil, &r.Options, &resp) + return +} + +// Retrieve DEPRECATED - This information can be pulled directly through tapping keys now - DEPRECATED. The allotments for this account and their servers. The public inbound and outbound bandwidth is calculated for each server in addition to the daily average network traffic since the last billing date. +func (r Account) GetPublicAllotmentHardwareBandwidthDetails() (resp []datatypes.Network_Bandwidth_Version1_Allotment, err error) { + err = r.Session.DoRequest("SoftLayer_Account", "getPublicAllotmentHardwareBandwidthDetails", nil, &r.Options, &resp) + return +} + +// Retrieve +func (r Account) GetPublicIpAddresses() (resp []datatypes.Network_Subnet_IpAddress, err error) { + err = r.Session.DoRequest("SoftLayer_Account", "getPublicIpAddresses", nil, &r.Options, &resp) + return +} + +// Retrieve The public network VLANs assigned to an account. +func (r Account) GetPublicNetworkVlans() (resp []datatypes.Network_Vlan, err error) { + err = r.Session.DoRequest("SoftLayer_Account", "getPublicNetworkVlans", nil, &r.Options, &resp) + return +} + +// Retrieve All public network subnets associated with an account. +func (r Account) GetPublicSubnets() (resp []datatypes.Network_Subnet, err error) { + err = r.Session.DoRequest("SoftLayer_Account", "getPublicSubnets", nil, &r.Options, &resp) + return +} + +// Retrieve An account's quotes. +func (r Account) GetQuotes() (resp []datatypes.Billing_Order_Quote, err error) { + err = r.Session.DoRequest("SoftLayer_Account", "getQuotes", nil, &r.Options, &resp) + return +} + +// Retrieve +func (r Account) GetRecentEvents() (resp []datatypes.Notification_Occurrence_Event, err error) { + err = r.Session.DoRequest("SoftLayer_Account", "getRecentEvents", nil, &r.Options, &resp) + return +} + +// Retrieve The Referral Partner for this account, if any. +func (r Account) GetReferralPartner() (resp datatypes.Account, err error) { + err = r.Session.DoRequest("SoftLayer_Account", "getReferralPartner", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Account) GetReferralPartnerCommissionForecast() (resp []datatypes.Container_Referral_Partner_Commission, err error) { + err = r.Session.DoRequest("SoftLayer_Account", "getReferralPartnerCommissionForecast", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Account) GetReferralPartnerCommissionHistory() (resp []datatypes.Container_Referral_Partner_Commission, err error) { + err = r.Session.DoRequest("SoftLayer_Account", "getReferralPartnerCommissionHistory", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Account) GetReferralPartnerCommissionPending() (resp []datatypes.Container_Referral_Partner_Commission, err error) { + err = r.Session.DoRequest("SoftLayer_Account", "getReferralPartnerCommissionPending", nil, &r.Options, &resp) + return +} + +// Retrieve If this is a account is a referral partner, the accounts this referral partner has referred +func (r Account) GetReferredAccounts() (resp []datatypes.Account, err error) { + err = r.Session.DoRequest("SoftLayer_Account", "getReferredAccounts", nil, &r.Options, &resp) + return +} + +// Retrieve +func (r Account) GetRegulatedWorkloads() (resp []datatypes.Legal_RegulatedWorkload, err error) { + err = r.Session.DoRequest("SoftLayer_Account", "getRegulatedWorkloads", nil, &r.Options, &resp) + return +} + +// Retrieve Remote management command requests for an account +func (r Account) GetRemoteManagementCommandRequests() (resp []datatypes.Hardware_Component_RemoteManagement_Command_Request, err error) { + err = r.Session.DoRequest("SoftLayer_Account", "getRemoteManagementCommandRequests", nil, &r.Options, &resp) + return +} + +// Retrieve The Replication events for all Network Storage volumes on an account. +func (r Account) GetReplicationEvents() (resp []datatypes.Network_Storage_Event, err error) { + err = r.Session.DoRequest("SoftLayer_Account", "getReplicationEvents", nil, &r.Options, &resp) + return +} + +// Retrieve Indicates whether newly created users under this account will be associated with IBMid via an email requiring a response, or not. +func (r Account) GetRequireSilentIBMidUserCreation() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Account", "getRequireSilentIBMidUserCreation", nil, &r.Options, &resp) + return +} + +// Retrieve An account's associated top-level resource groups. +func (r Account) GetResourceGroups() (resp []datatypes.Resource_Group, err error) { + err = r.Session.DoRequest("SoftLayer_Account", "getResourceGroups", nil, &r.Options, &resp) + return +} + +// Retrieve All Routers that an accounts VLANs reside on +func (r Account) GetRouters() (resp []datatypes.Hardware, err error) { + err = r.Session.DoRequest("SoftLayer_Account", "getRouters", nil, &r.Options, &resp) + return +} + +// Retrieve An account's reverse WHOIS data. This data is used when making SWIP requests. +func (r Account) GetRwhoisData() (resp datatypes.Network_Subnet_Rwhois_Data, err error) { + err = r.Session.DoRequest("SoftLayer_Account", "getRwhoisData", nil, &r.Options, &resp) + return +} + +// Retrieve +func (r Account) GetSalesforceAccountLink() (resp datatypes.Account_Link, err error) { + err = r.Session.DoRequest("SoftLayer_Account", "getSalesforceAccountLink", nil, &r.Options, &resp) + return +} + +// Retrieve The SAML configuration for this account. +func (r Account) GetSamlAuthentication() (resp datatypes.Account_Authentication_Saml, err error) { + err = r.Session.DoRequest("SoftLayer_Account", "getSamlAuthentication", nil, &r.Options, &resp) + return +} + +// Retrieve All scale groups on this account. +func (r Account) GetScaleGroups() (resp []datatypes.Scale_Group, err error) { + err = r.Session.DoRequest("SoftLayer_Account", "getScaleGroups", nil, &r.Options, &resp) + return +} + +// Retrieve The secondary DNS records for a SoftLayer customer account. +func (r Account) GetSecondaryDomains() (resp []datatypes.Dns_Secondary, err error) { + err = r.Session.DoRequest("SoftLayer_Account", "getSecondaryDomains", nil, &r.Options, &resp) + return +} + +// Retrieve Stored security certificates (ie. SSL) +func (r Account) GetSecurityCertificates() (resp []datatypes.Security_Certificate, err error) { + err = r.Session.DoRequest("SoftLayer_Account", "getSecurityCertificates", nil, &r.Options, &resp) + return +} + +// Retrieve The security groups belonging to this account. +func (r Account) GetSecurityGroups() (resp []datatypes.Network_SecurityGroup, err error) { + err = r.Session.DoRequest("SoftLayer_Account", "getSecurityGroups", nil, &r.Options, &resp) + return +} + +// Retrieve An account's vulnerability scan requests. +func (r Account) GetSecurityScanRequests() (resp []datatypes.Network_Security_Scanner_Request, err error) { + err = r.Session.DoRequest("SoftLayer_Account", "getSecurityScanRequests", nil, &r.Options, &resp) + return +} + +// Retrieve The service billing items that will be on an account's next invoice. +func (r Account) GetServiceBillingItems() (resp []datatypes.Billing_Item, err error) { + err = r.Session.DoRequest("SoftLayer_Account", "getServiceBillingItems", nil, &r.Options, &resp) + return +} + +// This method returns the [[SoftLayer_Virtual_Guest_Block_Device_Template_Group]] objects that have been shared with this account +func (r Account) GetSharedBlockDeviceTemplateGroups() (resp []datatypes.Virtual_Guest_Block_Device_Template_Group, err error) { + err = r.Session.DoRequest("SoftLayer_Account", "getSharedBlockDeviceTemplateGroups", nil, &r.Options, &resp) + return +} + +// Retrieve Shipments that belong to the customer's account. +func (r Account) GetShipments() (resp []datatypes.Account_Shipment, err error) { + err = r.Session.DoRequest("SoftLayer_Account", "getShipments", nil, &r.Options, &resp) + return +} + +// Retrieve Customer specified SSH keys that can be implemented onto a newly provisioned or reloaded server. +func (r Account) GetSshKeys() (resp []datatypes.Security_Ssh_Key, err error) { + err = r.Session.DoRequest("SoftLayer_Account", "getSshKeys", nil, &r.Options, &resp) + return +} + +// Retrieve An account's associated portal users with SSL VPN access. +func (r Account) GetSslVpnUsers() (resp []datatypes.User_Customer, err error) { + err = r.Session.DoRequest("SoftLayer_Account", "getSslVpnUsers", nil, &r.Options, &resp) + return +} + +// Retrieve An account's virtual guest objects that are hosted on a user provisioned hypervisor. +func (r Account) GetStandardPoolVirtualGuests() (resp []datatypes.Virtual_Guest, err error) { + err = r.Session.DoRequest("SoftLayer_Account", "getStandardPoolVirtualGuests", nil, &r.Options, &resp) + return +} + +// Retrieve +func (r Account) GetSubnetRegistrationDetails() (resp []datatypes.Account_Regional_Registry_Detail, err error) { + err = r.Session.DoRequest("SoftLayer_Account", "getSubnetRegistrationDetails", nil, &r.Options, &resp) + return +} + +// Retrieve +func (r Account) GetSubnetRegistrations() (resp []datatypes.Network_Subnet_Registration, err error) { + err = r.Session.DoRequest("SoftLayer_Account", "getSubnetRegistrations", nil, &r.Options, &resp) + return +} + +// Retrieve All network subnets associated with an account. +func (r Account) GetSubnets() (resp []datatypes.Network_Subnet, err error) { + err = r.Session.DoRequest("SoftLayer_Account", "getSubnets", nil, &r.Options, &resp) + return +} + +// Retrieve The SoftLayer employees that an account is assigned to. +func (r Account) GetSupportRepresentatives() (resp []datatypes.User_Employee, err error) { + err = r.Session.DoRequest("SoftLayer_Account", "getSupportRepresentatives", nil, &r.Options, &resp) + return +} + +// Retrieve The active support subscriptions for this account. +func (r Account) GetSupportSubscriptions() (resp []datatypes.Billing_Item, err error) { + err = r.Session.DoRequest("SoftLayer_Account", "getSupportSubscriptions", nil, &r.Options, &resp) + return +} + +// Retrieve +func (r Account) GetSupportTier() (resp string, err error) { + err = r.Session.DoRequest("SoftLayer_Account", "getSupportTier", nil, &r.Options, &resp) + return +} + +// Retrieve A flag indicating to suppress invoices. +func (r Account) GetSuppressInvoicesFlag() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Account", "getSuppressInvoicesFlag", nil, &r.Options, &resp) + return +} + +// Retrieve +func (r Account) GetTags() (resp []datatypes.Tag, err error) { + err = r.Session.DoRequest("SoftLayer_Account", "getTags", nil, &r.Options, &resp) + return +} + +// This method will return a SoftLayer_Container_Account_Discount_Program object containing the Technology Incubator Program information for this account. To be considered an active participant, the account must have an enrollment record with a monthly credit amount set and the current date must be within the range defined by the enrollment and graduation date. The forNextBillCycle parameter can be set to true to return a SoftLayer_Container_Account_Discount_Program object with information with relation to the next bill cycle. The forNextBillCycle parameter defaults to false. +func (r Account) GetTechIncubatorProgramInfo(forNextBillCycle *bool) (resp datatypes.Container_Account_Discount_Program, err error) { + params := []interface{}{ + forNextBillCycle, + } + err = r.Session.DoRequest("SoftLayer_Account", "getTechIncubatorProgramInfo", params, &r.Options, &resp) + return +} + +// Returns multiple [[SoftLayer_Container_Policy_Acceptance]] that represent the acceptance status of the applicable third-party policies for this account. +func (r Account) GetThirdPartyPoliciesAcceptanceStatus() (resp []datatypes.Container_Policy_Acceptance, err error) { + err = r.Session.DoRequest("SoftLayer_Account", "getThirdPartyPoliciesAcceptanceStatus", nil, &r.Options, &resp) + return +} + +// Retrieve An account's associated tickets. +func (r Account) GetTickets() (resp []datatypes.Ticket, err error) { + err = r.Session.DoRequest("SoftLayer_Account", "getTickets", nil, &r.Options, &resp) + return +} + +// Retrieve Tickets closed within the last 72 hours or last 10 tickets, whichever is less, associated with an account. +func (r Account) GetTicketsClosedInTheLastThreeDays() (resp []datatypes.Ticket, err error) { + err = r.Session.DoRequest("SoftLayer_Account", "getTicketsClosedInTheLastThreeDays", nil, &r.Options, &resp) + return +} + +// Retrieve Tickets closed today associated with an account. +func (r Account) GetTicketsClosedToday() (resp []datatypes.Ticket, err error) { + err = r.Session.DoRequest("SoftLayer_Account", "getTicketsClosedToday", nil, &r.Options, &resp) + return +} + +// Retrieve An account's associated Transcode account. +func (r Account) GetTranscodeAccounts() (resp []datatypes.Network_Media_Transcode_Account, err error) { + err = r.Session.DoRequest("SoftLayer_Account", "getTranscodeAccounts", nil, &r.Options, &resp) + return +} + +// Retrieve An account's associated upgrade requests. +func (r Account) GetUpgradeRequests() (resp []datatypes.Product_Upgrade_Request, err error) { + err = r.Session.DoRequest("SoftLayer_Account", "getUpgradeRequests", nil, &r.Options, &resp) + return +} + +// Retrieve An account's portal users. +func (r Account) GetUsers() (resp []datatypes.User_Customer, err error) { + err = r.Session.DoRequest("SoftLayer_Account", "getUsers", nil, &r.Options, &resp) + return +} + +// Retrieve a list of valid (non-expired) security certificates without the sensitive certificate information. This allows non-privileged users to view and select security certificates when configuring associated services. +func (r Account) GetValidSecurityCertificateEntries() (resp []datatypes.Security_Certificate_Entry, err error) { + err = r.Session.DoRequest("SoftLayer_Account", "getValidSecurityCertificateEntries", nil, &r.Options, &resp) + return +} + +// Retrieve Stored security certificates that are not expired (ie. SSL) +func (r Account) GetValidSecurityCertificates() (resp []datatypes.Security_Certificate, err error) { + err = r.Session.DoRequest("SoftLayer_Account", "getValidSecurityCertificates", nil, &r.Options, &resp) + return +} + +// Retrieve Return 0 if vpn updates are currently in progress on this account otherwise 1. +func (r Account) GetVdrUpdatesInProgressFlag() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Account", "getVdrUpdatesInProgressFlag", nil, &r.Options, &resp) + return +} + +// Retrieve The bandwidth pooling for this account. +func (r Account) GetVirtualDedicatedRacks() (resp []datatypes.Network_Bandwidth_Version1_Allotment, err error) { + err = r.Session.DoRequest("SoftLayer_Account", "getVirtualDedicatedRacks", nil, &r.Options, &resp) + return +} + +// Retrieve An account's associated virtual server virtual disk images. +func (r Account) GetVirtualDiskImages() (resp []datatypes.Virtual_Disk_Image, err error) { + err = r.Session.DoRequest("SoftLayer_Account", "getVirtualDiskImages", nil, &r.Options, &resp) + return +} + +// Retrieve An account's associated virtual guest objects. +func (r Account) GetVirtualGuests() (resp []datatypes.Virtual_Guest, err error) { + err = r.Session.DoRequest("SoftLayer_Account", "getVirtualGuests", nil, &r.Options, &resp) + return +} + +// Retrieve An account's associated virtual guest objects currently over bandwidth allocation. +func (r Account) GetVirtualGuestsOverBandwidthAllocation() (resp []datatypes.Virtual_Guest, err error) { + err = r.Session.DoRequest("SoftLayer_Account", "getVirtualGuestsOverBandwidthAllocation", nil, &r.Options, &resp) + return +} + +// Retrieve An account's associated virtual guest objects currently over bandwidth allocation. +func (r Account) GetVirtualGuestsProjectedOverBandwidthAllocation() (resp []datatypes.Virtual_Guest, err error) { + err = r.Session.DoRequest("SoftLayer_Account", "getVirtualGuestsProjectedOverBandwidthAllocation", nil, &r.Options, &resp) + return +} + +// Retrieve All virtual guests associated with an account that has the cPanel web hosting control panel installed. +func (r Account) GetVirtualGuestsWithCpanel() (resp []datatypes.Virtual_Guest, err error) { + err = r.Session.DoRequest("SoftLayer_Account", "getVirtualGuestsWithCpanel", nil, &r.Options, &resp) + return +} + +// Retrieve All virtual guests associated with an account that have McAfee Secure software components. +func (r Account) GetVirtualGuestsWithMcafee() (resp []datatypes.Virtual_Guest, err error) { + err = r.Session.DoRequest("SoftLayer_Account", "getVirtualGuestsWithMcafee", nil, &r.Options, &resp) + return +} + +// Retrieve All virtual guests associated with an account that have McAfee Secure AntiVirus for Redhat software components. +func (r Account) GetVirtualGuestsWithMcafeeAntivirusRedhat() (resp []datatypes.Virtual_Guest, err error) { + err = r.Session.DoRequest("SoftLayer_Account", "getVirtualGuestsWithMcafeeAntivirusRedhat", nil, &r.Options, &resp) + return +} + +// Retrieve All virtual guests associated with an account that has McAfee Secure AntiVirus for Windows software components. +func (r Account) GetVirtualGuestsWithMcafeeAntivirusWindows() (resp []datatypes.Virtual_Guest, err error) { + err = r.Session.DoRequest("SoftLayer_Account", "getVirtualGuestsWithMcafeeAntivirusWindows", nil, &r.Options, &resp) + return +} + +// Retrieve All virtual guests associated with an account that has McAfee Secure Intrusion Detection System software components. +func (r Account) GetVirtualGuestsWithMcafeeIntrusionDetectionSystem() (resp []datatypes.Virtual_Guest, err error) { + err = r.Session.DoRequest("SoftLayer_Account", "getVirtualGuestsWithMcafeeIntrusionDetectionSystem", nil, &r.Options, &resp) + return +} + +// Retrieve All virtual guests associated with an account that has the Plesk web hosting control panel installed. +func (r Account) GetVirtualGuestsWithPlesk() (resp []datatypes.Virtual_Guest, err error) { + err = r.Session.DoRequest("SoftLayer_Account", "getVirtualGuestsWithPlesk", nil, &r.Options, &resp) + return +} + +// Retrieve All virtual guests associated with an account that have the QuantaStor storage system installed. +func (r Account) GetVirtualGuestsWithQuantastor() (resp []datatypes.Virtual_Guest, err error) { + err = r.Session.DoRequest("SoftLayer_Account", "getVirtualGuestsWithQuantastor", nil, &r.Options, &resp) + return +} + +// Retrieve All virtual guests associated with an account that has the Urchin web traffic analytics package installed. +func (r Account) GetVirtualGuestsWithUrchin() (resp []datatypes.Virtual_Guest, err error) { + err = r.Session.DoRequest("SoftLayer_Account", "getVirtualGuestsWithUrchin", nil, &r.Options, &resp) + return +} + +// Retrieve The bandwidth pooling for this account. +func (r Account) GetVirtualPrivateRack() (resp datatypes.Network_Bandwidth_Version1_Allotment, err error) { + err = r.Session.DoRequest("SoftLayer_Account", "getVirtualPrivateRack", nil, &r.Options, &resp) + return +} + +// Retrieve An account's associated virtual server archived storage repositories. +func (r Account) GetVirtualStorageArchiveRepositories() (resp []datatypes.Virtual_Storage_Repository, err error) { + err = r.Session.DoRequest("SoftLayer_Account", "getVirtualStorageArchiveRepositories", nil, &r.Options, &resp) + return +} + +// Retrieve An account's associated virtual server public storage repositories. +func (r Account) GetVirtualStoragePublicRepositories() (resp []datatypes.Virtual_Storage_Repository, err error) { + err = r.Session.DoRequest("SoftLayer_Account", "getVirtualStoragePublicRepositories", nil, &r.Options, &resp) + return +} + +// This returns a collection of active VMware software account license keys. +func (r Account) GetVmWareActiveAccountLicenseKeys() (resp []string, err error) { + err = r.Session.DoRequest("SoftLayer_Account", "getVmWareActiveAccountLicenseKeys", nil, &r.Options, &resp) + return +} + +// Retrieve a list of an account's hardware's Windows Update status. This list includes which servers have available updates, which servers require rebooting due to updates, which servers have failed retrieving updates, and which servers have failed to communicate with the SoftLayer private Windows Software Update Services server. +func (r Account) GetWindowsUpdateStatus() (resp []datatypes.Container_Utility_Microsoft_Windows_UpdateServices_Status, err error) { + err = r.Session.DoRequest("SoftLayer_Account", "getWindowsUpdateStatus", nil, &r.Options, &resp) + return +} + +// Determine if an account has an [[SoftLayer_Account_Attribute|attribute]] associated with it. hasAttribute() returns false if the attribute does not exist or if it does not have a value. +func (r Account) HasAttribute(attributeType *string) (resp bool, err error) { + params := []interface{}{ + attributeType, + } + err = r.Session.DoRequest("SoftLayer_Account", "hasAttribute", params, &r.Options, &resp) + return +} + +// This method will return the limit (number) of hourly services the account is allowed to have. +func (r Account) HourlyInstanceLimit() (resp int, err error) { + err = r.Session.DoRequest("SoftLayer_Account", "hourlyInstanceLimit", nil, &r.Options, &resp) + return +} + +// This method will return the limit (number) of hourly bare metal servers the account is allowed to have. +func (r Account) HourlyServerLimit() (resp int, err error) { + err = r.Session.DoRequest("SoftLayer_Account", "hourlyServerLimit", nil, &r.Options, &resp) + return +} + +// Returns true if this account is eligible for the local currency program, false otherwise. +func (r Account) IsEligibleForLocalCurrencyProgram() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Account", "isEligibleForLocalCurrencyProgram", nil, &r.Options, &resp) + return +} + +// This method will link this SoftLayer account with the provided external account. +func (r Account) LinkExternalAccount(externalAccountId *string, authorizationToken *string, externalServiceProviderKey *string) (err error) { + var resp datatypes.Void + params := []interface{}{ + externalAccountId, + authorizationToken, + externalServiceProviderKey, + } + err = r.Session.DoRequest("SoftLayer_Account", "linkExternalAccount", params, &r.Options, &resp) + return +} + +// no documentation yet +func (r Account) RemoveAlternateCreditCard() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Account", "removeAlternateCreditCard", nil, &r.Options, &resp) + return +} + +// Retrieve the record data associated with the submission of a Credit Card Change Request. Softlayer customers are permitted to request a change in Credit Card information. Part of the process calls for an attempt by SoftLayer to submit at $1.00 charge to the financial institution backing the credit card as a means of verifying that the information provided in the change request is valid. The data associated with this change request returned to the calling function. +// +// If the onlyChangeNicknameFlag parameter is set to true, the nickname of the credit card will be changed immediately without requiring approval by an agent. To change the nickname of the active payment method, pass the empty string for paymentRoleName. To change the nickname for the alternate credit card, pass ALTERNATE_CREDIT_CARD as the paymentRoleName. vatId must be set, but the value will not be used and the empty string is acceptable. +func (r Account) RequestCreditCardChange(request *datatypes.Billing_Payment_Card_ChangeRequest, vatId *string, paymentRoleName *string, onlyChangeNicknameFlag *bool) (resp datatypes.Billing_Payment_Card_ChangeRequest, err error) { + params := []interface{}{ + request, + vatId, + paymentRoleName, + onlyChangeNicknameFlag, + } + err = r.Session.DoRequest("SoftLayer_Account", "requestCreditCardChange", params, &r.Options, &resp) + return +} + +// Retrieve the record data associated with the submission of a Manual Payment Request. Softlayer customers are permitted to request a manual one-time payment at a minimum amount of $2.00. Customers may submit a Credit Card Payment (Mastercard, Visa, American Express) or a PayPal payment. For Credit Card Payments, SoftLayer engages the credit card financial institution to submit the payment request. The financial institution's response and other data associated with the transaction are returned to the calling function. In the case of PayPal Payments, SoftLayer engages the PayPal system to initiate the PayPal payment sequence. The applicable data generated during the request is returned to the calling function. +func (r Account) RequestManualPayment(request *datatypes.Billing_Payment_Card_ManualPayment) (resp datatypes.Billing_Payment_Card_ManualPayment, err error) { + params := []interface{}{ + request, + } + err = r.Session.DoRequest("SoftLayer_Account", "requestManualPayment", params, &r.Options, &resp) + return +} + +// Retrieve the record data associated with the submission of a Manual Payment Request for a manual payment using a credit card which is on file and does not require an approval process. Softlayer customers are permitted to request a manual one-time payment at a minimum amount of $2.00. Customers may use an existing Credit Card on file (Mastercard, Visa, American Express). SoftLayer engages the credit card financial institution to submit the payment request. The financial institution's response and other data associated with the transaction are returned to the calling function. The applicable data generated during the request is returned to the calling function. +func (r Account) RequestManualPaymentUsingCreditCardOnFile(amount *string, payWithAlternateCardFlag *bool, note *string) (resp datatypes.Billing_Payment_Card_ManualPayment, err error) { + params := []interface{}{ + amount, + payWithAlternateCardFlag, + note, + } + err = r.Session.DoRequest("SoftLayer_Account", "requestManualPaymentUsingCreditCardOnFile", params, &r.Options, &resp) + return +} + +// Set this account's abuse emails. Takes an array of email addresses as strings. +func (r Account) SetAbuseEmails(emails []string) (resp bool, err error) { + params := []interface{}{ + emails, + } + err = r.Session.DoRequest("SoftLayer_Account", "setAbuseEmails", params, &r.Options, &resp) + return +} + +// Set the flag that enables or disables automatic private network VLAN spanning for a SoftLayer customer account. Enabling VLAN spanning allows an account's servers to talk on the same broadcast domain even if they reside within different private vlans. +func (r Account) SetVlanSpan(enabled *bool) (resp bool, err error) { + params := []interface{}{ + enabled, + } + err = r.Session.DoRequest("SoftLayer_Account", "setVlanSpan", params, &r.Options, &resp) + return +} + +// no documentation yet +func (r Account) SwapCreditCards() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Account", "swapCreditCards", nil, &r.Options, &resp) + return +} + +// Some larger SoftLayer customer accounts may have servers and virtual servers on more subnets than SoftLayer's private network VPN devices can assign routes for. In those cases routes for individual servers and virtual servers may be assigned individually to an account's servers via this method. +// +// Always call this method to enable changes when manually configuring VPN subnet access. +func (r Account) UpdateVpnUsersForResource(objectId *int, objectType *string) (resp bool, err error) { + params := []interface{}{ + objectId, + objectType, + } + err = r.Session.DoRequest("SoftLayer_Account", "updateVpnUsersForResource", params, &r.Options, &resp) + return +} + +// This method will validate the following account fields. Included are the allowed characters for each field. Email Address*: letters, numbers, space, period, dash, parenthesis, exclamation point, at sign, ampersand, colon, comma, underscore, apostrophe, octothorpe. Company Name*: alphabet, numbers, space, period, dash, octothorpe, forward slash, backward slash, comma, colon, at sign, ampersand, underscore, apostrophe, parenthesis, exclamation point. (Note: may not contain an email address) First Name*: alphabet, space, period, dash, comma, apostrophe. Last Name*: alphabet, space, period, dash, comma, apostrophe. Address 1*: alphabet, numbers, space, period, dash, octothorpe, forward slash, backward slash, comma, colon, at sign, ampersand, underscore, apostrophe. Address 2: alphabet, numbers, space, period, dash, octothorpe, forward slash, backward slash, comma, colon, at sign, ampersand, underscore, apostrophe. City*: alphabet, space, period, dash, apostrophe. State*: Required if country is US or Canada. Must be valid two-letter state code for that country. Postal Code*: alphabet, numbers, dash, space. Country*: alphabet, numbers. Office Phone*: alphabet, numbers, space, period, dash, parenthesis, plus sign. Alternate Phone: alphabet, numbers, space, period, dash, parenthesis, plus sign. Fax Phone: alphabet, numbers, space, period, dash, parenthesis, plus sign. +// * denotes a required field. +func (r Account) Validate(account *datatypes.Account) (resp []string, err error) { + params := []interface{}{ + account, + } + err = r.Session.DoRequest("SoftLayer_Account", "validate", params, &r.Options, &resp) + return +} + +// This method checks global and account specific requirements and returns true if the dollar amount entered is acceptable for this account and false otherwise. Please note the dollar amount is in USD. +func (r Account) ValidateManualPaymentAmount(amount *string) (resp bool, err error) { + params := []interface{}{ + amount, + } + err = r.Session.DoRequest("SoftLayer_Account", "validateManualPaymentAmount", params, &r.Options, &resp) + return +} + +// The SoftLayer_Account_Address data type contains information on an address associated with a SoftLayer account. +type Account_Address struct { + Session *session.Session + Options sl.Options +} + +// GetAccountAddressService returns an instance of the Account_Address SoftLayer service +func GetAccountAddressService(sess *session.Session) Account_Address { + return Account_Address{Session: sess} +} + +func (r Account_Address) Id(id int) Account_Address { + r.Options.Id = &id + return r +} + +func (r Account_Address) Mask(mask string) Account_Address { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r Account_Address) Filter(filter string) Account_Address { + r.Options.Filter = filter + return r +} + +func (r Account_Address) Limit(limit int) Account_Address { + r.Options.Limit = &limit + return r +} + +func (r Account_Address) Offset(offset int) Account_Address { + r.Options.Offset = &offset + return r +} + +// Create a new address record. The ''typeId'', ''accountId'', ''description'', ''address1'', ''city'', ''state'', ''country'', and ''postalCode'' properties in the templateObject parameter are required properties and may not be null or empty. Users will be restricted to creating addresses for their account. +func (r Account_Address) CreateObject(templateObject *datatypes.Account_Address) (resp datatypes.Account_Address, err error) { + params := []interface{}{ + templateObject, + } + err = r.Session.DoRequest("SoftLayer_Account_Address", "createObject", params, &r.Options, &resp) + return +} + +// Edit the properties of an address record by passing in a modified instance of a SoftLayer_Account_Address object. Users will be restricted to modifying addresses for their account. +func (r Account_Address) EditObject(templateObject *datatypes.Account_Address) (resp bool, err error) { + params := []interface{}{ + templateObject, + } + err = r.Session.DoRequest("SoftLayer_Account_Address", "editObject", params, &r.Options, &resp) + return +} + +// Retrieve The account to which this address belongs. +func (r Account_Address) GetAccount() (resp datatypes.Account, err error) { + err = r.Session.DoRequest("SoftLayer_Account_Address", "getAccount", nil, &r.Options, &resp) + return +} + +// Retrieve a list of SoftLayer datacenter addresses. +func (r Account_Address) GetAllDataCenters() (resp []datatypes.Account_Address, err error) { + err = r.Session.DoRequest("SoftLayer_Account_Address", "getAllDataCenters", nil, &r.Options, &resp) + return +} + +// Retrieve The customer user who created this address. +func (r Account_Address) GetCreateUser() (resp datatypes.User_Customer, err error) { + err = r.Session.DoRequest("SoftLayer_Account_Address", "getCreateUser", nil, &r.Options, &resp) + return +} + +// Retrieve The location of this address. +func (r Account_Address) GetLocation() (resp datatypes.Location, err error) { + err = r.Session.DoRequest("SoftLayer_Account_Address", "getLocation", nil, &r.Options, &resp) + return +} + +// Retrieve The employee who last modified this address. +func (r Account_Address) GetModifyEmployee() (resp datatypes.User_Employee, err error) { + err = r.Session.DoRequest("SoftLayer_Account_Address", "getModifyEmployee", nil, &r.Options, &resp) + return +} + +// Retrieve The customer user who last modified this address. +func (r Account_Address) GetModifyUser() (resp datatypes.User_Customer, err error) { + err = r.Session.DoRequest("SoftLayer_Account_Address", "getModifyUser", nil, &r.Options, &resp) + return +} + +// Retrieve a list of SoftLayer datacenter addresses. +func (r Account_Address) GetNetworkAddress(name *string) (resp []datatypes.Account_Address, err error) { + params := []interface{}{ + name, + } + err = r.Session.DoRequest("SoftLayer_Account_Address", "getNetworkAddress", params, &r.Options, &resp) + return +} + +// no documentation yet +func (r Account_Address) GetObject() (resp datatypes.Account_Address, err error) { + err = r.Session.DoRequest("SoftLayer_Account_Address", "getObject", nil, &r.Options, &resp) + return +} + +// Retrieve An account address' type. +func (r Account_Address) GetType() (resp datatypes.Account_Address_Type, err error) { + err = r.Session.DoRequest("SoftLayer_Account_Address", "getType", nil, &r.Options, &resp) + return +} + +// no documentation yet +type Account_Address_Type struct { + Session *session.Session + Options sl.Options +} + +// GetAccountAddressTypeService returns an instance of the Account_Address_Type SoftLayer service +func GetAccountAddressTypeService(sess *session.Session) Account_Address_Type { + return Account_Address_Type{Session: sess} +} + +func (r Account_Address_Type) Id(id int) Account_Address_Type { + r.Options.Id = &id + return r +} + +func (r Account_Address_Type) Mask(mask string) Account_Address_Type { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r Account_Address_Type) Filter(filter string) Account_Address_Type { + r.Options.Filter = filter + return r +} + +func (r Account_Address_Type) Limit(limit int) Account_Address_Type { + r.Options.Limit = &limit + return r +} + +func (r Account_Address_Type) Offset(offset int) Account_Address_Type { + r.Options.Offset = &offset + return r +} + +// no documentation yet +func (r Account_Address_Type) GetObject() (resp datatypes.Account_Address_Type, err error) { + err = r.Session.DoRequest("SoftLayer_Account_Address_Type", "getObject", nil, &r.Options, &resp) + return +} + +// This service allows for a unique identifier to be associated to an existing customer account. +type Account_Affiliation struct { + Session *session.Session + Options sl.Options +} + +// GetAccountAffiliationService returns an instance of the Account_Affiliation SoftLayer service +func GetAccountAffiliationService(sess *session.Session) Account_Affiliation { + return Account_Affiliation{Session: sess} +} + +func (r Account_Affiliation) Id(id int) Account_Affiliation { + r.Options.Id = &id + return r +} + +func (r Account_Affiliation) Mask(mask string) Account_Affiliation { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r Account_Affiliation) Filter(filter string) Account_Affiliation { + r.Options.Filter = filter + return r +} + +func (r Account_Affiliation) Limit(limit int) Account_Affiliation { + r.Options.Limit = &limit + return r +} + +func (r Account_Affiliation) Offset(offset int) Account_Affiliation { + r.Options.Offset = &offset + return r +} + +// Create a new affiliation to associate with an existing account. +func (r Account_Affiliation) CreateObject(templateObject *datatypes.Account_Affiliation) (resp datatypes.Account_Affiliation, err error) { + params := []interface{}{ + templateObject, + } + err = r.Session.DoRequest("SoftLayer_Account_Affiliation", "createObject", params, &r.Options, &resp) + return +} + +// deleteObject permanently removes an account affiliation +func (r Account_Affiliation) DeleteObject() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Account_Affiliation", "deleteObject", nil, &r.Options, &resp) + return +} + +// Edit an affiliation that is associated to an existing account. +func (r Account_Affiliation) EditObject(templateObject *datatypes.Account_Affiliation) (resp bool, err error) { + params := []interface{}{ + templateObject, + } + err = r.Session.DoRequest("SoftLayer_Account_Affiliation", "editObject", params, &r.Options, &resp) + return +} + +// Retrieve The account that an affiliation belongs to. +func (r Account_Affiliation) GetAccount() (resp datatypes.Account, err error) { + err = r.Session.DoRequest("SoftLayer_Account_Affiliation", "getAccount", nil, &r.Options, &resp) + return +} + +// Get account affiliation information associated with affiliate id. +func (r Account_Affiliation) GetAccountAffiliationsByAffiliateId(affiliateId *string) (resp []datatypes.Account_Affiliation, err error) { + params := []interface{}{ + affiliateId, + } + err = r.Session.DoRequest("SoftLayer_Account_Affiliation", "getAccountAffiliationsByAffiliateId", params, &r.Options, &resp) + return +} + +// no documentation yet +func (r Account_Affiliation) GetObject() (resp datatypes.Account_Affiliation, err error) { + err = r.Session.DoRequest("SoftLayer_Account_Affiliation", "getObject", nil, &r.Options, &resp) + return +} + +// no documentation yet +type Account_Agreement struct { + Session *session.Session + Options sl.Options +} + +// GetAccountAgreementService returns an instance of the Account_Agreement SoftLayer service +func GetAccountAgreementService(sess *session.Session) Account_Agreement { + return Account_Agreement{Session: sess} +} + +func (r Account_Agreement) Id(id int) Account_Agreement { + r.Options.Id = &id + return r +} + +func (r Account_Agreement) Mask(mask string) Account_Agreement { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r Account_Agreement) Filter(filter string) Account_Agreement { + r.Options.Filter = filter + return r +} + +func (r Account_Agreement) Limit(limit int) Account_Agreement { + r.Options.Limit = &limit + return r +} + +func (r Account_Agreement) Offset(offset int) Account_Agreement { + r.Options.Offset = &offset + return r +} + +// Retrieve +func (r Account_Agreement) GetAccount() (resp datatypes.Account, err error) { + err = r.Session.DoRequest("SoftLayer_Account_Agreement", "getAccount", nil, &r.Options, &resp) + return +} + +// Retrieve The type of agreement. +func (r Account_Agreement) GetAgreementType() (resp datatypes.Account_Agreement_Type, err error) { + err = r.Session.DoRequest("SoftLayer_Account_Agreement", "getAgreementType", nil, &r.Options, &resp) + return +} + +// Retrieve The files attached to an agreement. +func (r Account_Agreement) GetAttachedBillingAgreementFiles() (resp []datatypes.Account_MasterServiceAgreement, err error) { + err = r.Session.DoRequest("SoftLayer_Account_Agreement", "getAttachedBillingAgreementFiles", nil, &r.Options, &resp) + return +} + +// Retrieve The billing items associated with an agreement. +func (r Account_Agreement) GetBillingItems() (resp []datatypes.Billing_Item, err error) { + err = r.Session.DoRequest("SoftLayer_Account_Agreement", "getBillingItems", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Account_Agreement) GetObject() (resp datatypes.Account_Agreement, err error) { + err = r.Session.DoRequest("SoftLayer_Account_Agreement", "getObject", nil, &r.Options, &resp) + return +} + +// Retrieve The status of the agreement. +func (r Account_Agreement) GetStatus() (resp datatypes.Account_Agreement_Status, err error) { + err = r.Session.DoRequest("SoftLayer_Account_Agreement", "getStatus", nil, &r.Options, &resp) + return +} + +// Retrieve The top level billing item associated with an agreement. +func (r Account_Agreement) GetTopLevelBillingItems() (resp []datatypes.Billing_Item, err error) { + err = r.Session.DoRequest("SoftLayer_Account_Agreement", "getTopLevelBillingItems", nil, &r.Options, &resp) + return +} + +// Account authentication has many different settings that can be set. This class allows the customer or employee to set these settigns. +type Account_Authentication_Attribute struct { + Session *session.Session + Options sl.Options +} + +// GetAccountAuthenticationAttributeService returns an instance of the Account_Authentication_Attribute SoftLayer service +func GetAccountAuthenticationAttributeService(sess *session.Session) Account_Authentication_Attribute { + return Account_Authentication_Attribute{Session: sess} +} + +func (r Account_Authentication_Attribute) Id(id int) Account_Authentication_Attribute { + r.Options.Id = &id + return r +} + +func (r Account_Authentication_Attribute) Mask(mask string) Account_Authentication_Attribute { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r Account_Authentication_Attribute) Filter(filter string) Account_Authentication_Attribute { + r.Options.Filter = filter + return r +} + +func (r Account_Authentication_Attribute) Limit(limit int) Account_Authentication_Attribute { + r.Options.Limit = &limit + return r +} + +func (r Account_Authentication_Attribute) Offset(offset int) Account_Authentication_Attribute { + r.Options.Offset = &offset + return r +} + +// Retrieve The SoftLayer customer account. +func (r Account_Authentication_Attribute) GetAccount() (resp datatypes.Account, err error) { + err = r.Session.DoRequest("SoftLayer_Account_Authentication_Attribute", "getAccount", nil, &r.Options, &resp) + return +} + +// Retrieve The SoftLayer account authentication that has an attribute. +func (r Account_Authentication_Attribute) GetAuthenticationRecord() (resp datatypes.Account_Authentication_Saml, err error) { + err = r.Session.DoRequest("SoftLayer_Account_Authentication_Attribute", "getAuthenticationRecord", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Account_Authentication_Attribute) GetObject() (resp datatypes.Account_Authentication_Attribute, err error) { + err = r.Session.DoRequest("SoftLayer_Account_Authentication_Attribute", "getObject", nil, &r.Options, &resp) + return +} + +// Retrieve The type of attribute assigned to a SoftLayer account authentication. +func (r Account_Authentication_Attribute) GetType() (resp datatypes.Account_Authentication_Attribute_Type, err error) { + err = r.Session.DoRequest("SoftLayer_Account_Authentication_Attribute", "getType", nil, &r.Options, &resp) + return +} + +// SoftLayer_Account_Authentication_Attribute_Type models the type of attribute that can be assigned to a SoftLayer customer account authentication. +type Account_Authentication_Attribute_Type struct { + Session *session.Session + Options sl.Options +} + +// GetAccountAuthenticationAttributeTypeService returns an instance of the Account_Authentication_Attribute_Type SoftLayer service +func GetAccountAuthenticationAttributeTypeService(sess *session.Session) Account_Authentication_Attribute_Type { + return Account_Authentication_Attribute_Type{Session: sess} +} + +func (r Account_Authentication_Attribute_Type) Id(id int) Account_Authentication_Attribute_Type { + r.Options.Id = &id + return r +} + +func (r Account_Authentication_Attribute_Type) Mask(mask string) Account_Authentication_Attribute_Type { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r Account_Authentication_Attribute_Type) Filter(filter string) Account_Authentication_Attribute_Type { + r.Options.Filter = filter + return r +} + +func (r Account_Authentication_Attribute_Type) Limit(limit int) Account_Authentication_Attribute_Type { + r.Options.Limit = &limit + return r +} + +func (r Account_Authentication_Attribute_Type) Offset(offset int) Account_Authentication_Attribute_Type { + r.Options.Offset = &offset + return r +} + +// no documentation yet +func (r Account_Authentication_Attribute_Type) GetAllObjects() (resp []datatypes.Account_Attribute_Type, err error) { + err = r.Session.DoRequest("SoftLayer_Account_Authentication_Attribute_Type", "getAllObjects", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Account_Authentication_Attribute_Type) GetObject() (resp datatypes.Account_Authentication_Attribute_Type, err error) { + err = r.Session.DoRequest("SoftLayer_Account_Authentication_Attribute_Type", "getObject", nil, &r.Options, &resp) + return +} + +// no documentation yet +type Account_Authentication_Saml struct { + Session *session.Session + Options sl.Options +} + +// GetAccountAuthenticationSamlService returns an instance of the Account_Authentication_Saml SoftLayer service +func GetAccountAuthenticationSamlService(sess *session.Session) Account_Authentication_Saml { + return Account_Authentication_Saml{Session: sess} +} + +func (r Account_Authentication_Saml) Id(id int) Account_Authentication_Saml { + r.Options.Id = &id + return r +} + +func (r Account_Authentication_Saml) Mask(mask string) Account_Authentication_Saml { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r Account_Authentication_Saml) Filter(filter string) Account_Authentication_Saml { + r.Options.Filter = filter + return r +} + +func (r Account_Authentication_Saml) Limit(limit int) Account_Authentication_Saml { + r.Options.Limit = &limit + return r +} + +func (r Account_Authentication_Saml) Offset(offset int) Account_Authentication_Saml { + r.Options.Offset = &offset + return r +} + +// no documentation yet +func (r Account_Authentication_Saml) CreateObject(templateObject *datatypes.Account_Authentication_Saml) (resp datatypes.Account_Authentication_Saml, err error) { + params := []interface{}{ + templateObject, + } + err = r.Session.DoRequest("SoftLayer_Account_Authentication_Saml", "createObject", params, &r.Options, &resp) + return +} + +// no documentation yet +func (r Account_Authentication_Saml) DeleteObject() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Account_Authentication_Saml", "deleteObject", nil, &r.Options, &resp) + return +} + +// Edit the object by passing in a modified instance of the object +func (r Account_Authentication_Saml) EditObject(templateObject *datatypes.Account_Authentication_Saml) (resp bool, err error) { + params := []interface{}{ + templateObject, + } + err = r.Session.DoRequest("SoftLayer_Account_Authentication_Saml", "editObject", params, &r.Options, &resp) + return +} + +// Retrieve The account associated with this saml configuration. +func (r Account_Authentication_Saml) GetAccount() (resp datatypes.Account, err error) { + err = r.Session.DoRequest("SoftLayer_Account_Authentication_Saml", "getAccount", nil, &r.Options, &resp) + return +} + +// Retrieve The saml attribute values for a SoftLayer customer account. +func (r Account_Authentication_Saml) GetAttributes() (resp []datatypes.Account_Authentication_Attribute, err error) { + err = r.Session.DoRequest("SoftLayer_Account_Authentication_Saml", "getAttributes", nil, &r.Options, &resp) + return +} + +// This method will return the service provider metadata in XML format. +func (r Account_Authentication_Saml) GetMetadata() (resp string, err error) { + err = r.Session.DoRequest("SoftLayer_Account_Authentication_Saml", "getMetadata", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Account_Authentication_Saml) GetObject() (resp datatypes.Account_Authentication_Saml, err error) { + err = r.Session.DoRequest("SoftLayer_Account_Authentication_Saml", "getObject", nil, &r.Options, &resp) + return +} + +// no documentation yet +type Account_Contact struct { + Session *session.Session + Options sl.Options +} + +// GetAccountContactService returns an instance of the Account_Contact SoftLayer service +func GetAccountContactService(sess *session.Session) Account_Contact { + return Account_Contact{Session: sess} +} + +func (r Account_Contact) Id(id int) Account_Contact { + r.Options.Id = &id + return r +} + +func (r Account_Contact) Mask(mask string) Account_Contact { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r Account_Contact) Filter(filter string) Account_Contact { + r.Options.Filter = filter + return r +} + +func (r Account_Contact) Limit(limit int) Account_Contact { + r.Options.Limit = &limit + return r +} + +func (r Account_Contact) Offset(offset int) Account_Contact { + r.Options.Offset = &offset + return r +} + +// This method creates an account contact. The accountId is fixed, other properties can be set during creation. The typeId indicates the SoftLayer_Account_Contact_Type for the contact. This method returns the SoftLayer_Account_Contact object that is created. +func (r Account_Contact) CreateObject(templateObject *datatypes.Account_Contact) (resp datatypes.Account_Contact, err error) { + params := []interface{}{ + templateObject, + } + err = r.Session.DoRequest("SoftLayer_Account_Contact", "createObject", params, &r.Options, &resp) + return +} + +// deleteObject permanently removes an account contact +func (r Account_Contact) DeleteObject() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Account_Contact", "deleteObject", nil, &r.Options, &resp) + return +} + +// This method allows you to modify an account contact. Only master users are permitted to modify an account contact. +func (r Account_Contact) EditObject(templateObject *datatypes.Account_Contact) (resp bool, err error) { + params := []interface{}{ + templateObject, + } + err = r.Session.DoRequest("SoftLayer_Account_Contact", "editObject", params, &r.Options, &resp) + return +} + +// Retrieve +func (r Account_Contact) GetAccount() (resp datatypes.Account, err error) { + err = r.Session.DoRequest("SoftLayer_Account_Contact", "getAccount", nil, &r.Options, &resp) + return +} + +// This method will return an array of SoftLayer_Account_Contact_Type objects which can be used when creating or editing an account contact. +func (r Account_Contact) GetAllContactTypes() (resp []datatypes.Account_Contact_Type, err error) { + err = r.Session.DoRequest("SoftLayer_Account_Contact", "getAllContactTypes", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Account_Contact) GetObject() (resp datatypes.Account_Contact, err error) { + err = r.Session.DoRequest("SoftLayer_Account_Contact", "getObject", nil, &r.Options, &resp) + return +} + +// Retrieve +func (r Account_Contact) GetType() (resp datatypes.Account_Contact_Type, err error) { + err = r.Session.DoRequest("SoftLayer_Account_Contact", "getType", nil, &r.Options, &resp) + return +} + +// no documentation yet +type Account_Historical_Report struct { + Session *session.Session + Options sl.Options +} + +// GetAccountHistoricalReportService returns an instance of the Account_Historical_Report SoftLayer service +func GetAccountHistoricalReportService(sess *session.Session) Account_Historical_Report { + return Account_Historical_Report{Session: sess} +} + +func (r Account_Historical_Report) Id(id int) Account_Historical_Report { + r.Options.Id = &id + return r +} + +func (r Account_Historical_Report) Mask(mask string) Account_Historical_Report { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r Account_Historical_Report) Filter(filter string) Account_Historical_Report { + r.Options.Filter = filter + return r +} + +func (r Account_Historical_Report) Limit(limit int) Account_Historical_Report { + r.Options.Limit = &limit + return r +} + +func (r Account_Historical_Report) Offset(offset int) Account_Historical_Report { + r.Options.Offset = &offset + return r +} + +// no documentation yet +func (r Account_Historical_Report) GetAccountHostUptimeGraphData(startDate *string, endDate *string) (resp datatypes.Container_Graph, err error) { + params := []interface{}{ + startDate, + endDate, + } + err = r.Session.DoRequest("SoftLayer_Account_Historical_Report", "getAccountHostUptimeGraphData", params, &r.Options, &resp) + return +} + +// no documentation yet +func (r Account_Historical_Report) GetAccountHostUptimeSummary(startDateTime *string, endDateTime *string) (resp datatypes.Container_Account_Historical_Summary, err error) { + params := []interface{}{ + startDateTime, + endDateTime, + } + err = r.Session.DoRequest("SoftLayer_Account_Historical_Report", "getAccountHostUptimeSummary", params, &r.Options, &resp) + return +} + +// no documentation yet +func (r Account_Historical_Report) GetAccountUrlUptimeGraphData(startDate *string, endDate *string) (resp datatypes.Container_Graph, err error) { + params := []interface{}{ + startDate, + endDate, + } + err = r.Session.DoRequest("SoftLayer_Account_Historical_Report", "getAccountUrlUptimeGraphData", params, &r.Options, &resp) + return +} + +// no documentation yet +func (r Account_Historical_Report) GetAccountUrlUptimeSummary(startDateTime *string, endDateTime *string) (resp datatypes.Container_Account_Historical_Summary, err error) { + params := []interface{}{ + startDateTime, + endDateTime, + } + err = r.Session.DoRequest("SoftLayer_Account_Historical_Report", "getAccountUrlUptimeSummary", params, &r.Options, &resp) + return +} + +// no documentation yet +func (r Account_Historical_Report) GetHostUptimeDetail(configurationValueId *int, startDateTime *string, endDateTime *string) (resp datatypes.Container_Account_Historical_Summary_Detail, err error) { + params := []interface{}{ + configurationValueId, + startDateTime, + endDateTime, + } + err = r.Session.DoRequest("SoftLayer_Account_Historical_Report", "getHostUptimeDetail", params, &r.Options, &resp) + return +} + +// no documentation yet +func (r Account_Historical_Report) GetHostUptimeGraphData(configurationValueId *int, startDate *string, endDate *string) (resp datatypes.Container_Graph, err error) { + params := []interface{}{ + configurationValueId, + startDate, + endDate, + } + err = r.Session.DoRequest("SoftLayer_Account_Historical_Report", "getHostUptimeGraphData", params, &r.Options, &resp) + return +} + +// no documentation yet +func (r Account_Historical_Report) GetUrlUptimeDetail(configurationValueId *int, startDateTime *string, endDateTime *string) (resp datatypes.Container_Account_Historical_Summary_Detail, err error) { + params := []interface{}{ + configurationValueId, + startDateTime, + endDateTime, + } + err = r.Session.DoRequest("SoftLayer_Account_Historical_Report", "getUrlUptimeDetail", params, &r.Options, &resp) + return +} + +// no documentation yet +func (r Account_Historical_Report) GetUrlUptimeGraphData(configurationValueId *int, startDate *string, endDate *string) (resp datatypes.Container_Graph, err error) { + params := []interface{}{ + configurationValueId, + startDate, + endDate, + } + err = r.Session.DoRequest("SoftLayer_Account_Historical_Report", "getUrlUptimeGraphData", params, &r.Options, &resp) + return +} + +// no documentation yet +type Account_Link_Bluemix struct { + Session *session.Session + Options sl.Options +} + +// GetAccountLinkBluemixService returns an instance of the Account_Link_Bluemix SoftLayer service +func GetAccountLinkBluemixService(sess *session.Session) Account_Link_Bluemix { + return Account_Link_Bluemix{Session: sess} +} + +func (r Account_Link_Bluemix) Id(id int) Account_Link_Bluemix { + r.Options.Id = &id + return r +} + +func (r Account_Link_Bluemix) Mask(mask string) Account_Link_Bluemix { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r Account_Link_Bluemix) Filter(filter string) Account_Link_Bluemix { + r.Options.Filter = filter + return r +} + +func (r Account_Link_Bluemix) Limit(limit int) Account_Link_Bluemix { + r.Options.Limit = &limit + return r +} + +func (r Account_Link_Bluemix) Offset(offset int) Account_Link_Bluemix { + r.Options.Offset = &offset + return r +} + +// no documentation yet +func (r Account_Link_Bluemix) GetObject() (resp datatypes.Account_Link_Bluemix, err error) { + err = r.Session.DoRequest("SoftLayer_Account_Link_Bluemix", "getObject", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Account_Link_Bluemix) GetSupportTierType() (resp string, err error) { + err = r.Session.DoRequest("SoftLayer_Account_Link_Bluemix", "getSupportTierType", nil, &r.Options, &resp) + return +} + +// no documentation yet +type Account_Link_OpenStack struct { + Session *session.Session + Options sl.Options +} + +// GetAccountLinkOpenStackService returns an instance of the Account_Link_OpenStack SoftLayer service +func GetAccountLinkOpenStackService(sess *session.Session) Account_Link_OpenStack { + return Account_Link_OpenStack{Session: sess} +} + +func (r Account_Link_OpenStack) Id(id int) Account_Link_OpenStack { + r.Options.Id = &id + return r +} + +func (r Account_Link_OpenStack) Mask(mask string) Account_Link_OpenStack { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r Account_Link_OpenStack) Filter(filter string) Account_Link_OpenStack { + r.Options.Filter = filter + return r +} + +func (r Account_Link_OpenStack) Limit(limit int) Account_Link_OpenStack { + r.Options.Limit = &limit + return r +} + +func (r Account_Link_OpenStack) Offset(offset int) Account_Link_OpenStack { + r.Options.Offset = &offset + return r +} + +// no documentation yet +func (r Account_Link_OpenStack) CreateOSDomain(request *datatypes.Account_Link_OpenStack_LinkRequest) (resp datatypes.Account_Link_OpenStack_DomainCreationDetails, err error) { + params := []interface{}{ + request, + } + err = r.Session.DoRequest("SoftLayer_Account_Link_OpenStack", "createOSDomain", params, &r.Options, &resp) + return +} + +// no documentation yet +func (r Account_Link_OpenStack) CreateOSProject(request *datatypes.Account_Link_OpenStack_LinkRequest) (resp datatypes.Account_Link_OpenStack_ProjectCreationDetails, err error) { + params := []interface{}{ + request, + } + err = r.Session.DoRequest("SoftLayer_Account_Link_OpenStack", "createOSProject", params, &r.Options, &resp) + return +} + +// no documentation yet +func (r Account_Link_OpenStack) DeleteOSDomain(domainId *string) (resp bool, err error) { + params := []interface{}{ + domainId, + } + err = r.Session.DoRequest("SoftLayer_Account_Link_OpenStack", "deleteOSDomain", params, &r.Options, &resp) + return +} + +// no documentation yet +func (r Account_Link_OpenStack) DeleteOSProject(projectId *string) (resp bool, err error) { + params := []interface{}{ + projectId, + } + err = r.Session.DoRequest("SoftLayer_Account_Link_OpenStack", "deleteOSProject", params, &r.Options, &resp) + return +} + +// deleteObject permanently removes an account link and all of it's associated keystone data (including users for the associated project). '''This cannot be undone.''' Be wary of running this method. If you remove an account link in error you will need to re-create it by creating a new SoftLayer_Account_Link_OpenStack object. +func (r Account_Link_OpenStack) DeleteObject() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Account_Link_OpenStack", "deleteObject", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Account_Link_OpenStack) GetOSProject(projectId *string) (resp datatypes.Account_Link_OpenStack_ProjectDetails, err error) { + params := []interface{}{ + projectId, + } + err = r.Session.DoRequest("SoftLayer_Account_Link_OpenStack", "getOSProject", params, &r.Options, &resp) + return +} + +// no documentation yet +func (r Account_Link_OpenStack) GetObject() (resp datatypes.Account_Link_OpenStack, err error) { + err = r.Session.DoRequest("SoftLayer_Account_Link_OpenStack", "getObject", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Account_Link_OpenStack) ListOSProjects() (resp []datatypes.Account_Link_OpenStack_ProjectDetails, err error) { + err = r.Session.DoRequest("SoftLayer_Account_Link_OpenStack", "listOSProjects", nil, &r.Options, &resp) + return +} + +// The SoftLayer_Account_Lockdown_Request data type holds information on API requests from brand customers. +type Account_Lockdown_Request struct { + Session *session.Session + Options sl.Options +} + +// GetAccountLockdownRequestService returns an instance of the Account_Lockdown_Request SoftLayer service +func GetAccountLockdownRequestService(sess *session.Session) Account_Lockdown_Request { + return Account_Lockdown_Request{Session: sess} +} + +func (r Account_Lockdown_Request) Id(id int) Account_Lockdown_Request { + r.Options.Id = &id + return r +} + +func (r Account_Lockdown_Request) Mask(mask string) Account_Lockdown_Request { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r Account_Lockdown_Request) Filter(filter string) Account_Lockdown_Request { + r.Options.Filter = filter + return r +} + +func (r Account_Lockdown_Request) Limit(limit int) Account_Lockdown_Request { + r.Options.Limit = &limit + return r +} + +func (r Account_Lockdown_Request) Offset(offset int) Account_Lockdown_Request { + r.Options.Offset = &offset + return r +} + +// Will cancel a lockdown request scheduled in the future. Once canceled, the lockdown request cannot be reconciled and new requests must be made for subsequent actions on the account. +func (r Account_Lockdown_Request) CancelRequest() (err error) { + var resp datatypes.Void + err = r.Session.DoRequest("SoftLayer_Account_Lockdown_Request", "cancelRequest", nil, &r.Options, &resp) + return +} + +// Takes the original lockdown request ID, and an optional disable date. If no date is passed with the API call, the account will be disabled immediately. Otherwise, the account will be disabled on the date given. All hardware will be reclaimed and all accounts permanently disabled. +func (r Account_Lockdown_Request) DisableLockedAccount(disableDate *string) (resp int, err error) { + params := []interface{}{ + disableDate, + } + err = r.Session.DoRequest("SoftLayer_Account_Lockdown_Request", "disableLockedAccount", params, &r.Options, &resp) + return +} + +// Takes an account ID and an optional disconnect date. If no disconnect date is passed into the API call, the account disconnection will happen immediately. Otherwise, the account disconnection will happen on the date given. A brand account request ID will be returned and will then be updated when the disconnection occurs. +func (r Account_Lockdown_Request) DisconnectCompute(accountId *int, disconnectDate *string) (resp int, err error) { + params := []interface{}{ + accountId, + disconnectDate, + } + err = r.Session.DoRequest("SoftLayer_Account_Lockdown_Request", "disconnectCompute", params, &r.Options, &resp) + return +} + +// Provides a history of an account's lockdown requests and their status. +func (r Account_Lockdown_Request) GetAccountHistory(accountId *int) (resp []datatypes.Account_Lockdown_Request, err error) { + params := []interface{}{ + accountId, + } + err = r.Session.DoRequest("SoftLayer_Account_Lockdown_Request", "getAccountHistory", params, &r.Options, &resp) + return +} + +// no documentation yet +func (r Account_Lockdown_Request) GetObject() (resp datatypes.Account_Lockdown_Request, err error) { + err = r.Session.DoRequest("SoftLayer_Account_Lockdown_Request", "getObject", nil, &r.Options, &resp) + return +} + +// Takes the original disconnected lockdown event ID, and an optional reconnect date. If no reconnect date is passed with the API call, the account reconnection will happen immediately. Otherwise, the account reconnection will happen on the date given. The associated lockdown event will be unlocked and closed at that time. +func (r Account_Lockdown_Request) ReconnectCompute(reconnectDate *string) (resp int, err error) { + params := []interface{}{ + reconnectDate, + } + err = r.Session.DoRequest("SoftLayer_Account_Lockdown_Request", "reconnectCompute", params, &r.Options, &resp) + return +} + +// no documentation yet +type Account_MasterServiceAgreement struct { + Session *session.Session + Options sl.Options +} + +// GetAccountMasterServiceAgreementService returns an instance of the Account_MasterServiceAgreement SoftLayer service +func GetAccountMasterServiceAgreementService(sess *session.Session) Account_MasterServiceAgreement { + return Account_MasterServiceAgreement{Session: sess} +} + +func (r Account_MasterServiceAgreement) Id(id int) Account_MasterServiceAgreement { + r.Options.Id = &id + return r +} + +func (r Account_MasterServiceAgreement) Mask(mask string) Account_MasterServiceAgreement { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r Account_MasterServiceAgreement) Filter(filter string) Account_MasterServiceAgreement { + r.Options.Filter = filter + return r +} + +func (r Account_MasterServiceAgreement) Limit(limit int) Account_MasterServiceAgreement { + r.Options.Limit = &limit + return r +} + +func (r Account_MasterServiceAgreement) Offset(offset int) Account_MasterServiceAgreement { + r.Options.Offset = &offset + return r +} + +// Retrieve +func (r Account_MasterServiceAgreement) GetAccount() (resp datatypes.Account, err error) { + err = r.Session.DoRequest("SoftLayer_Account_MasterServiceAgreement", "getAccount", nil, &r.Options, &resp) + return +} + +// Gets a File Entity container with the user's account's current MSA PDF. Gets a translation if one is available. Otherwise, gets the master document. +func (r Account_MasterServiceAgreement) GetFile() (resp datatypes.Container_Utility_File_Entity, err error) { + err = r.Session.DoRequest("SoftLayer_Account_MasterServiceAgreement", "getFile", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Account_MasterServiceAgreement) GetObject() (resp datatypes.Account_MasterServiceAgreement, err error) { + err = r.Session.DoRequest("SoftLayer_Account_MasterServiceAgreement", "getObject", nil, &r.Options, &resp) + return +} + +// The SoftLayer_Account_Media data type contains information on a single piece of media associated with a Data Transfer Service request. +type Account_Media struct { + Session *session.Session + Options sl.Options +} + +// GetAccountMediaService returns an instance of the Account_Media SoftLayer service +func GetAccountMediaService(sess *session.Session) Account_Media { + return Account_Media{Session: sess} +} + +func (r Account_Media) Id(id int) Account_Media { + r.Options.Id = &id + return r +} + +func (r Account_Media) Mask(mask string) Account_Media { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r Account_Media) Filter(filter string) Account_Media { + r.Options.Filter = filter + return r +} + +func (r Account_Media) Limit(limit int) Account_Media { + r.Options.Limit = &limit + return r +} + +func (r Account_Media) Offset(offset int) Account_Media { + r.Options.Offset = &offset + return r +} + +// Edit the properties of a media record by passing in a modified instance of a SoftLayer_Account_Media object. +func (r Account_Media) EditObject(templateObject *datatypes.Account_Media) (resp bool, err error) { + params := []interface{}{ + templateObject, + } + err = r.Session.DoRequest("SoftLayer_Account_Media", "editObject", params, &r.Options, &resp) + return +} + +// Retrieve The account to which the media belongs. +func (r Account_Media) GetAccount() (resp datatypes.Account, err error) { + err = r.Session.DoRequest("SoftLayer_Account_Media", "getAccount", nil, &r.Options, &resp) + return +} + +// Retrieve a list supported media types for SoftLayer's Data Transfer Service. +func (r Account_Media) GetAllMediaTypes() (resp []datatypes.Account_Media_Type, err error) { + err = r.Session.DoRequest("SoftLayer_Account_Media", "getAllMediaTypes", nil, &r.Options, &resp) + return +} + +// Retrieve The customer user who created the media object. +func (r Account_Media) GetCreateUser() (resp datatypes.User_Customer, err error) { + err = r.Session.DoRequest("SoftLayer_Account_Media", "getCreateUser", nil, &r.Options, &resp) + return +} + +// Retrieve The datacenter where the media resides. +func (r Account_Media) GetDatacenter() (resp datatypes.Location, err error) { + err = r.Session.DoRequest("SoftLayer_Account_Media", "getDatacenter", nil, &r.Options, &resp) + return +} + +// Retrieve The employee who last modified the media. +func (r Account_Media) GetModifyEmployee() (resp datatypes.User_Employee, err error) { + err = r.Session.DoRequest("SoftLayer_Account_Media", "getModifyEmployee", nil, &r.Options, &resp) + return +} + +// Retrieve The customer user who last modified the media. +func (r Account_Media) GetModifyUser() (resp datatypes.User_Customer, err error) { + err = r.Session.DoRequest("SoftLayer_Account_Media", "getModifyUser", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Account_Media) GetObject() (resp datatypes.Account_Media, err error) { + err = r.Session.DoRequest("SoftLayer_Account_Media", "getObject", nil, &r.Options, &resp) + return +} + +// Retrieve The request to which the media belongs. +func (r Account_Media) GetRequest() (resp datatypes.Account_Media_Data_Transfer_Request, err error) { + err = r.Session.DoRequest("SoftLayer_Account_Media", "getRequest", nil, &r.Options, &resp) + return +} + +// Retrieve The media's type. +func (r Account_Media) GetType() (resp datatypes.Account_Media_Type, err error) { + err = r.Session.DoRequest("SoftLayer_Account_Media", "getType", nil, &r.Options, &resp) + return +} + +// Retrieve A guest's associated EVault network storage service account. +func (r Account_Media) GetVolume() (resp datatypes.Network_Storage, err error) { + err = r.Session.DoRequest("SoftLayer_Account_Media", "getVolume", nil, &r.Options, &resp) + return +} + +// Remove a media from a SoftLayer account's list of media. The media record is not deleted. +func (r Account_Media) RemoveMediaFromList(mediaTemplate *datatypes.Account_Media) (resp int, err error) { + params := []interface{}{ + mediaTemplate, + } + err = r.Session.DoRequest("SoftLayer_Account_Media", "removeMediaFromList", params, &r.Options, &resp) + return +} + +// The SoftLayer_Account_Media_Data_Transfer_Request data type contains information on a single Data Transfer Service request. Creation of these requests is limited to SoftLayer customers through the SoftLayer Customer Portal. +type Account_Media_Data_Transfer_Request struct { + Session *session.Session + Options sl.Options +} + +// GetAccountMediaDataTransferRequestService returns an instance of the Account_Media_Data_Transfer_Request SoftLayer service +func GetAccountMediaDataTransferRequestService(sess *session.Session) Account_Media_Data_Transfer_Request { + return Account_Media_Data_Transfer_Request{Session: sess} +} + +func (r Account_Media_Data_Transfer_Request) Id(id int) Account_Media_Data_Transfer_Request { + r.Options.Id = &id + return r +} + +func (r Account_Media_Data_Transfer_Request) Mask(mask string) Account_Media_Data_Transfer_Request { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r Account_Media_Data_Transfer_Request) Filter(filter string) Account_Media_Data_Transfer_Request { + r.Options.Filter = filter + return r +} + +func (r Account_Media_Data_Transfer_Request) Limit(limit int) Account_Media_Data_Transfer_Request { + r.Options.Limit = &limit + return r +} + +func (r Account_Media_Data_Transfer_Request) Offset(offset int) Account_Media_Data_Transfer_Request { + r.Options.Offset = &offset + return r +} + +// Edit the properties of a data transfer request record by passing in a modified instance of a SoftLayer_Account_Media_Data_Transfer_Request object. +func (r Account_Media_Data_Transfer_Request) EditObject(templateObject *datatypes.Account_Media_Data_Transfer_Request) (resp bool, err error) { + params := []interface{}{ + templateObject, + } + err = r.Session.DoRequest("SoftLayer_Account_Media_Data_Transfer_Request", "editObject", params, &r.Options, &resp) + return +} + +// Retrieve The account to which the request belongs. +func (r Account_Media_Data_Transfer_Request) GetAccount() (resp datatypes.Account, err error) { + err = r.Session.DoRequest("SoftLayer_Account_Media_Data_Transfer_Request", "getAccount", nil, &r.Options, &resp) + return +} + +// Retrieve The active tickets that are attached to the data transfer request. +func (r Account_Media_Data_Transfer_Request) GetActiveTickets() (resp []datatypes.Ticket, err error) { + err = r.Session.DoRequest("SoftLayer_Account_Media_Data_Transfer_Request", "getActiveTickets", nil, &r.Options, &resp) + return +} + +// Retrieves a list of all the possible statuses to which a request may be set. +func (r Account_Media_Data_Transfer_Request) GetAllRequestStatuses() (resp []datatypes.Account_Media_Data_Transfer_Request_Status, err error) { + err = r.Session.DoRequest("SoftLayer_Account_Media_Data_Transfer_Request", "getAllRequestStatuses", nil, &r.Options, &resp) + return +} + +// Retrieve The billing item for the original request. +func (r Account_Media_Data_Transfer_Request) GetBillingItem() (resp datatypes.Billing_Item, err error) { + err = r.Session.DoRequest("SoftLayer_Account_Media_Data_Transfer_Request", "getBillingItem", nil, &r.Options, &resp) + return +} + +// Retrieve The customer user who created the request. +func (r Account_Media_Data_Transfer_Request) GetCreateUser() (resp datatypes.User_Customer, err error) { + err = r.Session.DoRequest("SoftLayer_Account_Media_Data_Transfer_Request", "getCreateUser", nil, &r.Options, &resp) + return +} + +// Retrieve The media of the request. +func (r Account_Media_Data_Transfer_Request) GetMedia() (resp datatypes.Account_Media, err error) { + err = r.Session.DoRequest("SoftLayer_Account_Media_Data_Transfer_Request", "getMedia", nil, &r.Options, &resp) + return +} + +// Retrieve The employee who last modified the request. +func (r Account_Media_Data_Transfer_Request) GetModifyEmployee() (resp datatypes.User_Employee, err error) { + err = r.Session.DoRequest("SoftLayer_Account_Media_Data_Transfer_Request", "getModifyEmployee", nil, &r.Options, &resp) + return +} + +// Retrieve The customer user who last modified the request. +func (r Account_Media_Data_Transfer_Request) GetModifyUser() (resp datatypes.User_Customer, err error) { + err = r.Session.DoRequest("SoftLayer_Account_Media_Data_Transfer_Request", "getModifyUser", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Account_Media_Data_Transfer_Request) GetObject() (resp datatypes.Account_Media_Data_Transfer_Request, err error) { + err = r.Session.DoRequest("SoftLayer_Account_Media_Data_Transfer_Request", "getObject", nil, &r.Options, &resp) + return +} + +// Retrieve The shipments of the request. +func (r Account_Media_Data_Transfer_Request) GetShipments() (resp []datatypes.Account_Shipment, err error) { + err = r.Session.DoRequest("SoftLayer_Account_Media_Data_Transfer_Request", "getShipments", nil, &r.Options, &resp) + return +} + +// Retrieve The status of the request. +func (r Account_Media_Data_Transfer_Request) GetStatus() (resp datatypes.Account_Media_Data_Transfer_Request_Status, err error) { + err = r.Session.DoRequest("SoftLayer_Account_Media_Data_Transfer_Request", "getStatus", nil, &r.Options, &resp) + return +} + +// Retrieve All tickets that are attached to the data transfer request. +func (r Account_Media_Data_Transfer_Request) GetTickets() (resp []datatypes.Ticket, err error) { + err = r.Session.DoRequest("SoftLayer_Account_Media_Data_Transfer_Request", "getTickets", nil, &r.Options, &resp) + return +} + +// no documentation yet +type Account_Note struct { + Session *session.Session + Options sl.Options +} + +// GetAccountNoteService returns an instance of the Account_Note SoftLayer service +func GetAccountNoteService(sess *session.Session) Account_Note { + return Account_Note{Session: sess} +} + +func (r Account_Note) Id(id int) Account_Note { + r.Options.Id = &id + return r +} + +func (r Account_Note) Mask(mask string) Account_Note { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r Account_Note) Filter(filter string) Account_Note { + r.Options.Filter = filter + return r +} + +func (r Account_Note) Limit(limit int) Account_Note { + r.Options.Limit = &limit + return r +} + +func (r Account_Note) Offset(offset int) Account_Note { + r.Options.Offset = &offset + return r +} + +// no documentation yet +func (r Account_Note) CreateObject(templateObject *datatypes.Account_Note) (resp datatypes.Account_Note, err error) { + params := []interface{}{ + templateObject, + } + err = r.Session.DoRequest("SoftLayer_Account_Note", "createObject", params, &r.Options, &resp) + return +} + +// no documentation yet +func (r Account_Note) DeleteObject() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Account_Note", "deleteObject", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Account_Note) EditObject(templateObject *datatypes.Account_Note) (resp bool, err error) { + params := []interface{}{ + templateObject, + } + err = r.Session.DoRequest("SoftLayer_Account_Note", "editObject", params, &r.Options, &resp) + return +} + +// Retrieve +func (r Account_Note) GetAccount() (resp datatypes.Account, err error) { + err = r.Session.DoRequest("SoftLayer_Account_Note", "getAccount", nil, &r.Options, &resp) + return +} + +// Retrieve +func (r Account_Note) GetCustomer() (resp datatypes.User_Customer, err error) { + err = r.Session.DoRequest("SoftLayer_Account_Note", "getCustomer", nil, &r.Options, &resp) + return +} + +// Retrieve +func (r Account_Note) GetNoteHistory() (resp []datatypes.Account_Note_History, err error) { + err = r.Session.DoRequest("SoftLayer_Account_Note", "getNoteHistory", nil, &r.Options, &resp) + return +} + +// Retrieve +func (r Account_Note) GetNoteType() (resp datatypes.Account_Note_Type, err error) { + err = r.Session.DoRequest("SoftLayer_Account_Note", "getNoteType", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Account_Note) GetObject() (resp datatypes.Account_Note, err error) { + err = r.Session.DoRequest("SoftLayer_Account_Note", "getObject", nil, &r.Options, &resp) + return +} + +// no documentation yet +type Account_Note_Type struct { + Session *session.Session + Options sl.Options +} + +// GetAccountNoteTypeService returns an instance of the Account_Note_Type SoftLayer service +func GetAccountNoteTypeService(sess *session.Session) Account_Note_Type { + return Account_Note_Type{Session: sess} +} + +func (r Account_Note_Type) Id(id int) Account_Note_Type { + r.Options.Id = &id + return r +} + +func (r Account_Note_Type) Mask(mask string) Account_Note_Type { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r Account_Note_Type) Filter(filter string) Account_Note_Type { + r.Options.Filter = filter + return r +} + +func (r Account_Note_Type) Limit(limit int) Account_Note_Type { + r.Options.Limit = &limit + return r +} + +func (r Account_Note_Type) Offset(offset int) Account_Note_Type { + r.Options.Offset = &offset + return r +} + +// no documentation yet +func (r Account_Note_Type) CreateObject(templateObject *datatypes.Account_Note_Type) (resp datatypes.Account_Note_Type, err error) { + params := []interface{}{ + templateObject, + } + err = r.Session.DoRequest("SoftLayer_Account_Note_Type", "createObject", params, &r.Options, &resp) + return +} + +// no documentation yet +func (r Account_Note_Type) DeleteObject() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Account_Note_Type", "deleteObject", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Account_Note_Type) EditObject(templateObject *datatypes.Account_Note_Type) (resp bool, err error) { + params := []interface{}{ + templateObject, + } + err = r.Session.DoRequest("SoftLayer_Account_Note_Type", "editObject", params, &r.Options, &resp) + return +} + +// no documentation yet +func (r Account_Note_Type) GetAllObjects() (resp []datatypes.Account_Note_Type, err error) { + err = r.Session.DoRequest("SoftLayer_Account_Note_Type", "getAllObjects", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Account_Note_Type) GetObject() (resp datatypes.Account_Note_Type, err error) { + err = r.Session.DoRequest("SoftLayer_Account_Note_Type", "getObject", nil, &r.Options, &resp) + return +} + +// no documentation yet +type Account_Partner_Referral_Prospect struct { + Session *session.Session + Options sl.Options +} + +// GetAccountPartnerReferralProspectService returns an instance of the Account_Partner_Referral_Prospect SoftLayer service +func GetAccountPartnerReferralProspectService(sess *session.Session) Account_Partner_Referral_Prospect { + return Account_Partner_Referral_Prospect{Session: sess} +} + +func (r Account_Partner_Referral_Prospect) Id(id int) Account_Partner_Referral_Prospect { + r.Options.Id = &id + return r +} + +func (r Account_Partner_Referral_Prospect) Mask(mask string) Account_Partner_Referral_Prospect { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r Account_Partner_Referral_Prospect) Filter(filter string) Account_Partner_Referral_Prospect { + r.Options.Filter = filter + return r +} + +func (r Account_Partner_Referral_Prospect) Limit(limit int) Account_Partner_Referral_Prospect { + r.Options.Limit = &limit + return r +} + +func (r Account_Partner_Referral_Prospect) Offset(offset int) Account_Partner_Referral_Prospect { + r.Options.Offset = &offset + return r +} + +// Create a new Referral Partner Prospect +func (r Account_Partner_Referral_Prospect) CreateProspect(templateObject *datatypes.Container_Referral_Partner_Prospect, commit *bool) (resp datatypes.Account_Partner_Referral_Prospect, err error) { + params := []interface{}{ + templateObject, + commit, + } + err = r.Session.DoRequest("SoftLayer_Account_Partner_Referral_Prospect", "createProspect", params, &r.Options, &resp) + return +} + +// no documentation yet +func (r Account_Partner_Referral_Prospect) GetObject() (resp datatypes.Account_Partner_Referral_Prospect, err error) { + err = r.Session.DoRequest("SoftLayer_Account_Partner_Referral_Prospect", "getObject", nil, &r.Options, &resp) + return +} + +// Retrieves Questions for a Referral Partner Survey +func (r Account_Partner_Referral_Prospect) GetSurveyQuestions() (resp []datatypes.Survey_Question, err error) { + err = r.Session.DoRequest("SoftLayer_Account_Partner_Referral_Prospect", "getSurveyQuestions", nil, &r.Options, &resp) + return +} + +// The SoftLayer_Account_Password contains username, passwords and notes for services that may require for external applications such the Webcc interface for the EVault Storage service. +type Account_Password struct { + Session *session.Session + Options sl.Options +} + +// GetAccountPasswordService returns an instance of the Account_Password SoftLayer service +func GetAccountPasswordService(sess *session.Session) Account_Password { + return Account_Password{Session: sess} +} + +func (r Account_Password) Id(id int) Account_Password { + r.Options.Id = &id + return r +} + +func (r Account_Password) Mask(mask string) Account_Password { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r Account_Password) Filter(filter string) Account_Password { + r.Options.Filter = filter + return r +} + +func (r Account_Password) Limit(limit int) Account_Password { + r.Options.Limit = &limit + return r +} + +func (r Account_Password) Offset(offset int) Account_Password { + r.Options.Offset = &offset + return r +} + +// The password and/or notes may be modified. Modifying the EVault passwords here will also update the password the Webcc interface will use. +func (r Account_Password) EditObject(templateObject *datatypes.Account_Password) (resp bool, err error) { + params := []interface{}{ + templateObject, + } + err = r.Session.DoRequest("SoftLayer_Account_Password", "editObject", params, &r.Options, &resp) + return +} + +// Retrieve +func (r Account_Password) GetAccount() (resp datatypes.Account, err error) { + err = r.Session.DoRequest("SoftLayer_Account_Password", "getAccount", nil, &r.Options, &resp) + return +} + +// getObject retrieves the SoftLayer_Account_Password object whose ID corresponds to the ID number of the init parameter passed to the SoftLayer_Account_Password service. +func (r Account_Password) GetObject() (resp datatypes.Account_Password, err error) { + err = r.Session.DoRequest("SoftLayer_Account_Password", "getObject", nil, &r.Options, &resp) + return +} + +// Retrieve The service that an account/password combination is tied to. +func (r Account_Password) GetType() (resp datatypes.Account_Password_Type, err error) { + err = r.Session.DoRequest("SoftLayer_Account_Password", "getType", nil, &r.Options, &resp) + return +} + +// +// +// +// +// +type Account_Regional_Registry_Detail struct { + Session *session.Session + Options sl.Options +} + +// GetAccountRegionalRegistryDetailService returns an instance of the Account_Regional_Registry_Detail SoftLayer service +func GetAccountRegionalRegistryDetailService(sess *session.Session) Account_Regional_Registry_Detail { + return Account_Regional_Registry_Detail{Session: sess} +} + +func (r Account_Regional_Registry_Detail) Id(id int) Account_Regional_Registry_Detail { + r.Options.Id = &id + return r +} + +func (r Account_Regional_Registry_Detail) Mask(mask string) Account_Regional_Registry_Detail { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r Account_Regional_Registry_Detail) Filter(filter string) Account_Regional_Registry_Detail { + r.Options.Filter = filter + return r +} + +func (r Account_Regional_Registry_Detail) Limit(limit int) Account_Regional_Registry_Detail { + r.Options.Limit = &limit + return r +} + +func (r Account_Regional_Registry_Detail) Offset(offset int) Account_Regional_Registry_Detail { + r.Options.Offset = &offset + return r +} + +// This method will create a new SoftLayer_Account_Regional_Registry_Detail object. +// +// Input - [[SoftLayer_Account_Regional_Registry_Detail (type)|SoftLayer_Account_Regional_Registry_Detail]]
    • detailTypeId
      The [[SoftLayer_Account_Regional_Registry_Detail_Type|type id]] of this detail object
      • Required
      • Type - integer
    • regionalInternetRegistryHandleId
      The id of the [[SoftLayer_Account_Rwhois_Handle|RWhois handle]] object. This is only to be used for detailed registrations, where a subnet is registered to an organization. The associated handle will be required to be a valid organization object id at the relevant registry. In this case, the detail object will only be valid for the registry the organization belongs to.
      • Optional
      • Type - integer
    +func (r Account_Regional_Registry_Detail) CreateObject(templateObject *datatypes.Account_Regional_Registry_Detail) (resp datatypes.Account_Regional_Registry_Detail, err error) { + params := []interface{}{ + templateObject, + } + err = r.Session.DoRequest("SoftLayer_Account_Regional_Registry_Detail", "createObject", params, &r.Options, &resp) + return +} + +// This method will delete an existing SoftLayer_Account_Regional_Registry_Detail object. +func (r Account_Regional_Registry_Detail) DeleteObject() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Account_Regional_Registry_Detail", "deleteObject", nil, &r.Options, &resp) + return +} + +// This method will edit an existing SoftLayer_Account_Regional_Registry_Detail object. For more detail, see [[SoftLayer_Account_Regional_Registry_Detail::createObject|createObject]]. +func (r Account_Regional_Registry_Detail) EditObject(templateObject *datatypes.Account_Regional_Registry_Detail) (resp bool, err error) { + params := []interface{}{ + templateObject, + } + err = r.Session.DoRequest("SoftLayer_Account_Regional_Registry_Detail", "editObject", params, &r.Options, &resp) + return +} + +// Retrieve The account that this detail object belongs to. +func (r Account_Regional_Registry_Detail) GetAccount() (resp datatypes.Account, err error) { + err = r.Session.DoRequest("SoftLayer_Account_Regional_Registry_Detail", "getAccount", nil, &r.Options, &resp) + return +} + +// Retrieve The associated type of this detail object. +func (r Account_Regional_Registry_Detail) GetDetailType() (resp datatypes.Account_Regional_Registry_Detail_Type, err error) { + err = r.Session.DoRequest("SoftLayer_Account_Regional_Registry_Detail", "getDetailType", nil, &r.Options, &resp) + return +} + +// Retrieve References to the [[SoftLayer_Network_Subnet_Registration|registration objects]] that consume this detail object. +func (r Account_Regional_Registry_Detail) GetDetails() (resp []datatypes.Network_Subnet_Registration_Details, err error) { + err = r.Session.DoRequest("SoftLayer_Account_Regional_Registry_Detail", "getDetails", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Account_Regional_Registry_Detail) GetObject() (resp datatypes.Account_Regional_Registry_Detail, err error) { + err = r.Session.DoRequest("SoftLayer_Account_Regional_Registry_Detail", "getObject", nil, &r.Options, &resp) + return +} + +// Retrieve The individual properties that define this detail object's values. +func (r Account_Regional_Registry_Detail) GetProperties() (resp []datatypes.Account_Regional_Registry_Detail_Property, err error) { + err = r.Session.DoRequest("SoftLayer_Account_Regional_Registry_Detail", "getProperties", nil, &r.Options, &resp) + return +} + +// Retrieve The associated RWhois handle of this detail object. Used only when detailed reassignments are necessary. +func (r Account_Regional_Registry_Detail) GetRegionalInternetRegistryHandle() (resp datatypes.Account_Rwhois_Handle, err error) { + err = r.Session.DoRequest("SoftLayer_Account_Regional_Registry_Detail", "getRegionalInternetRegistryHandle", nil, &r.Options, &resp) + return +} + +// This method will create a bulk transaction to update any registrations that reference this detail object. It should only be called from a child class such as [[SoftLayer_Account_Regional_Registry_Detail_Person]] or [[SoftLayer_Account_Regional_Registry_Detail_Network]]. The registrations should be in the Open or Registration_Complete status. +func (r Account_Regional_Registry_Detail) UpdateReferencedRegistrations() (resp datatypes.Container_Network_Subnet_Registration_TransactionDetails, err error) { + err = r.Session.DoRequest("SoftLayer_Account_Regional_Registry_Detail", "updateReferencedRegistrations", nil, &r.Options, &resp) + return +} + +// Subnet registration properties are used to define various attributes of the [[SoftLayer_Account_Regional_Registry_Detail|detail objects]]. These properties are defined by the [[SoftLayer_Account_Regional_Registry_Detail_Property_Type]] objects, which describe the available value formats. +type Account_Regional_Registry_Detail_Property struct { + Session *session.Session + Options sl.Options +} + +// GetAccountRegionalRegistryDetailPropertyService returns an instance of the Account_Regional_Registry_Detail_Property SoftLayer service +func GetAccountRegionalRegistryDetailPropertyService(sess *session.Session) Account_Regional_Registry_Detail_Property { + return Account_Regional_Registry_Detail_Property{Session: sess} +} + +func (r Account_Regional_Registry_Detail_Property) Id(id int) Account_Regional_Registry_Detail_Property { + r.Options.Id = &id + return r +} + +func (r Account_Regional_Registry_Detail_Property) Mask(mask string) Account_Regional_Registry_Detail_Property { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r Account_Regional_Registry_Detail_Property) Filter(filter string) Account_Regional_Registry_Detail_Property { + r.Options.Filter = filter + return r +} + +func (r Account_Regional_Registry_Detail_Property) Limit(limit int) Account_Regional_Registry_Detail_Property { + r.Options.Limit = &limit + return r +} + +func (r Account_Regional_Registry_Detail_Property) Offset(offset int) Account_Regional_Registry_Detail_Property { + r.Options.Offset = &offset + return r +} + +// This method will create a new SoftLayer_Account_Regional_Registry_Detail_Property object. +// +// Input - [[SoftLayer_Account_Regional_Registry_Detail_Property (type)|SoftLayer_Account_Regional_Registry_Detail_Property]]
    • registrationDetailId
      The numeric ID of the [[SoftLayer_Account_Regional_Registry_Detail|detail object]] this property belongs to
      • Required
      • Type - integer
    • propertyTypeId
      The numeric ID of the associated [[SoftLayer_Account_Regional_Registry_Detail_Property_Type]] object
      • Required
      • Type - integer
    • sequencePosition
      When more than one property of the same type exists on a detail object, this value determines the position in that collection. This can be thought of more as a sort order.
      • Required
      • Type - integer
    • value
      The actual value of the property.
      • Required
      • Type - string
    +func (r Account_Regional_Registry_Detail_Property) CreateObject(templateObject *datatypes.Account_Regional_Registry_Detail_Property) (resp datatypes.Account_Regional_Registry_Detail_Property, err error) { + params := []interface{}{ + templateObject, + } + err = r.Session.DoRequest("SoftLayer_Account_Regional_Registry_Detail_Property", "createObject", params, &r.Options, &resp) + return +} + +// Edit multiple [[SoftLayer_Account_Regional_Registry_Detail_Property]] objects. +func (r Account_Regional_Registry_Detail_Property) CreateObjects(templateObjects []datatypes.Account_Regional_Registry_Detail_Property) (resp []datatypes.Account_Regional_Registry_Detail_Property, err error) { + params := []interface{}{ + templateObjects, + } + err = r.Session.DoRequest("SoftLayer_Account_Regional_Registry_Detail_Property", "createObjects", params, &r.Options, &resp) + return +} + +// This method will delete an existing SoftLayer_Account_Regional_Registry_Detail_Property object. +func (r Account_Regional_Registry_Detail_Property) DeleteObject() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Account_Regional_Registry_Detail_Property", "deleteObject", nil, &r.Options, &resp) + return +} + +// This method will edit an existing SoftLayer_Account_Regional_Registry_Detail_Property object. For more detail, see [[SoftLayer_Account_Regional_Registry_Detail_Property::createObject|createObject]]. +func (r Account_Regional_Registry_Detail_Property) EditObject(templateObject *datatypes.Account_Regional_Registry_Detail_Property) (resp bool, err error) { + params := []interface{}{ + templateObject, + } + err = r.Session.DoRequest("SoftLayer_Account_Regional_Registry_Detail_Property", "editObject", params, &r.Options, &resp) + return +} + +// Edit multiple [[SoftLayer_Account_Regional_Registry_Detail_Property]] objects. +func (r Account_Regional_Registry_Detail_Property) EditObjects(templateObjects []datatypes.Account_Regional_Registry_Detail_Property) (resp bool, err error) { + params := []interface{}{ + templateObjects, + } + err = r.Session.DoRequest("SoftLayer_Account_Regional_Registry_Detail_Property", "editObjects", params, &r.Options, &resp) + return +} + +// Retrieve The [[SoftLayer_Account_Regional_Registry_Detail]] object this property belongs to +func (r Account_Regional_Registry_Detail_Property) GetDetail() (resp datatypes.Account_Regional_Registry_Detail, err error) { + err = r.Session.DoRequest("SoftLayer_Account_Regional_Registry_Detail_Property", "getDetail", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Account_Regional_Registry_Detail_Property) GetObject() (resp datatypes.Account_Regional_Registry_Detail_Property, err error) { + err = r.Session.DoRequest("SoftLayer_Account_Regional_Registry_Detail_Property", "getObject", nil, &r.Options, &resp) + return +} + +// Retrieve The [[SoftLayer_Account_Regional_Registry_Detail_Property_Type]] object this property belongs to +func (r Account_Regional_Registry_Detail_Property) GetPropertyType() (resp datatypes.Account_Regional_Registry_Detail_Property_Type, err error) { + err = r.Session.DoRequest("SoftLayer_Account_Regional_Registry_Detail_Property", "getPropertyType", nil, &r.Options, &resp) + return +} + +// Subnet Registration Detail Property Type objects describe the nature of a [[SoftLayer_Account_Regional_Registry_Detail_Property]] object. These types use [http://php.net/pcre.pattern.php Perl-Compatible Regular Expressions] to validate the value of a property object. +type Account_Regional_Registry_Detail_Property_Type struct { + Session *session.Session + Options sl.Options +} + +// GetAccountRegionalRegistryDetailPropertyTypeService returns an instance of the Account_Regional_Registry_Detail_Property_Type SoftLayer service +func GetAccountRegionalRegistryDetailPropertyTypeService(sess *session.Session) Account_Regional_Registry_Detail_Property_Type { + return Account_Regional_Registry_Detail_Property_Type{Session: sess} +} + +func (r Account_Regional_Registry_Detail_Property_Type) Id(id int) Account_Regional_Registry_Detail_Property_Type { + r.Options.Id = &id + return r +} + +func (r Account_Regional_Registry_Detail_Property_Type) Mask(mask string) Account_Regional_Registry_Detail_Property_Type { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r Account_Regional_Registry_Detail_Property_Type) Filter(filter string) Account_Regional_Registry_Detail_Property_Type { + r.Options.Filter = filter + return r +} + +func (r Account_Regional_Registry_Detail_Property_Type) Limit(limit int) Account_Regional_Registry_Detail_Property_Type { + r.Options.Limit = &limit + return r +} + +func (r Account_Regional_Registry_Detail_Property_Type) Offset(offset int) Account_Regional_Registry_Detail_Property_Type { + r.Options.Offset = &offset + return r +} + +// no documentation yet +func (r Account_Regional_Registry_Detail_Property_Type) GetAllObjects() (resp []datatypes.Account_Regional_Registry_Detail_Property_Type, err error) { + err = r.Session.DoRequest("SoftLayer_Account_Regional_Registry_Detail_Property_Type", "getAllObjects", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Account_Regional_Registry_Detail_Property_Type) GetObject() (resp datatypes.Account_Regional_Registry_Detail_Property_Type, err error) { + err = r.Session.DoRequest("SoftLayer_Account_Regional_Registry_Detail_Property_Type", "getObject", nil, &r.Options, &resp) + return +} + +// Subnet Registration Detail Type objects describe the nature of a [[SoftLayer_Account_Regional_Registry_Detail]] object. +// +// The standard values for these objects are as follows:
    • NETWORK - The detail object represents the information for a [[SoftLayer_Network_Subnet|subnet]]
    • NETWORK6 - The detail object represents the information for an [[SoftLayer_Network_Subnet_Version6|IPv6 subnet]]
    • PERSON - The detail object represents the information for a customer with the RIR
    +type Account_Regional_Registry_Detail_Type struct { + Session *session.Session + Options sl.Options +} + +// GetAccountRegionalRegistryDetailTypeService returns an instance of the Account_Regional_Registry_Detail_Type SoftLayer service +func GetAccountRegionalRegistryDetailTypeService(sess *session.Session) Account_Regional_Registry_Detail_Type { + return Account_Regional_Registry_Detail_Type{Session: sess} +} + +func (r Account_Regional_Registry_Detail_Type) Id(id int) Account_Regional_Registry_Detail_Type { + r.Options.Id = &id + return r +} + +func (r Account_Regional_Registry_Detail_Type) Mask(mask string) Account_Regional_Registry_Detail_Type { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r Account_Regional_Registry_Detail_Type) Filter(filter string) Account_Regional_Registry_Detail_Type { + r.Options.Filter = filter + return r +} + +func (r Account_Regional_Registry_Detail_Type) Limit(limit int) Account_Regional_Registry_Detail_Type { + r.Options.Limit = &limit + return r +} + +func (r Account_Regional_Registry_Detail_Type) Offset(offset int) Account_Regional_Registry_Detail_Type { + r.Options.Offset = &offset + return r +} + +// no documentation yet +func (r Account_Regional_Registry_Detail_Type) GetAllObjects() (resp []datatypes.Account_Regional_Registry_Detail_Type, err error) { + err = r.Session.DoRequest("SoftLayer_Account_Regional_Registry_Detail_Type", "getAllObjects", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Account_Regional_Registry_Detail_Type) GetObject() (resp datatypes.Account_Regional_Registry_Detail_Type, err error) { + err = r.Session.DoRequest("SoftLayer_Account_Regional_Registry_Detail_Type", "getObject", nil, &r.Options, &resp) + return +} + +// no documentation yet +type Account_Reports_Request struct { + Session *session.Session + Options sl.Options +} + +// GetAccountReportsRequestService returns an instance of the Account_Reports_Request SoftLayer service +func GetAccountReportsRequestService(sess *session.Session) Account_Reports_Request { + return Account_Reports_Request{Session: sess} +} + +func (r Account_Reports_Request) Id(id int) Account_Reports_Request { + r.Options.Id = &id + return r +} + +func (r Account_Reports_Request) Mask(mask string) Account_Reports_Request { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r Account_Reports_Request) Filter(filter string) Account_Reports_Request { + r.Options.Filter = filter + return r +} + +func (r Account_Reports_Request) Limit(limit int) Account_Reports_Request { + r.Options.Limit = &limit + return r +} + +func (r Account_Reports_Request) Offset(offset int) Account_Reports_Request { + r.Options.Offset = &offset + return r +} + +// no documentation yet +func (r Account_Reports_Request) CreateRequest(contact *datatypes.Account_Contact, reason *string, reportType *string) (resp datatypes.Account_Reports_Request, err error) { + params := []interface{}{ + contact, + reason, + reportType, + } + err = r.Session.DoRequest("SoftLayer_Account_Reports_Request", "createRequest", params, &r.Options, &resp) + return +} + +// Retrieve +func (r Account_Reports_Request) GetAccount() (resp datatypes.Account, err error) { + err = r.Session.DoRequest("SoftLayer_Account_Reports_Request", "getAccount", nil, &r.Options, &resp) + return +} + +// Retrieve A request's corresponding external contact, if one exists. +func (r Account_Reports_Request) GetAccountContact() (resp datatypes.Account_Contact, err error) { + err = r.Session.DoRequest("SoftLayer_Account_Reports_Request", "getAccountContact", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Account_Reports_Request) GetAllObjects() (resp datatypes.Account_Reports_Request, err error) { + err = r.Session.DoRequest("SoftLayer_Account_Reports_Request", "getAllObjects", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Account_Reports_Request) GetObject() (resp datatypes.Account_Reports_Request, err error) { + err = r.Session.DoRequest("SoftLayer_Account_Reports_Request", "getObject", nil, &r.Options, &resp) + return +} + +// Retrieve Type of the report customer is requesting for. +func (r Account_Reports_Request) GetReportType() (resp datatypes.Compliance_Report_Type, err error) { + err = r.Session.DoRequest("SoftLayer_Account_Reports_Request", "getReportType", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Account_Reports_Request) GetRequestByRequestKey(requestKey *string) (resp datatypes.Account_Reports_Request, err error) { + params := []interface{}{ + requestKey, + } + err = r.Session.DoRequest("SoftLayer_Account_Reports_Request", "getRequestByRequestKey", params, &r.Options, &resp) + return +} + +// Retrieve +func (r Account_Reports_Request) GetTicket() (resp datatypes.Ticket, err error) { + err = r.Session.DoRequest("SoftLayer_Account_Reports_Request", "getTicket", nil, &r.Options, &resp) + return +} + +// Retrieve The customer user that initiated a report request. +func (r Account_Reports_Request) GetUser() (resp datatypes.User_Customer, err error) { + err = r.Session.DoRequest("SoftLayer_Account_Reports_Request", "getUser", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Account_Reports_Request) SendReportEmail(request *datatypes.Account_Reports_Request) (resp bool, err error) { + params := []interface{}{ + request, + } + err = r.Session.DoRequest("SoftLayer_Account_Reports_Request", "sendReportEmail", params, &r.Options, &resp) + return +} + +// no documentation yet +func (r Account_Reports_Request) UpdateTicketOnDecline(request *datatypes.Account_Reports_Request) (resp bool, err error) { + params := []interface{}{ + request, + } + err = r.Session.DoRequest("SoftLayer_Account_Reports_Request", "updateTicketOnDecline", params, &r.Options, &resp) + return +} + +// The SoftLayer_Account_Shipment data type contains information relating to a shipment. Basic information such as addresses, the shipment courier, and any tracking information for as shipment is accessible with this data type. +type Account_Shipment struct { + Session *session.Session + Options sl.Options +} + +// GetAccountShipmentService returns an instance of the Account_Shipment SoftLayer service +func GetAccountShipmentService(sess *session.Session) Account_Shipment { + return Account_Shipment{Session: sess} +} + +func (r Account_Shipment) Id(id int) Account_Shipment { + r.Options.Id = &id + return r +} + +func (r Account_Shipment) Mask(mask string) Account_Shipment { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r Account_Shipment) Filter(filter string) Account_Shipment { + r.Options.Filter = filter + return r +} + +func (r Account_Shipment) Limit(limit int) Account_Shipment { + r.Options.Limit = &limit + return r +} + +func (r Account_Shipment) Offset(offset int) Account_Shipment { + r.Options.Offset = &offset + return r +} + +// Edit the properties of a shipment record by passing in a modified instance of a SoftLayer_Account_Shipment object. +func (r Account_Shipment) EditObject(templateObject *datatypes.Account_Shipment) (resp bool, err error) { + params := []interface{}{ + templateObject, + } + err = r.Session.DoRequest("SoftLayer_Account_Shipment", "editObject", params, &r.Options, &resp) + return +} + +// Retrieve The account to which the shipment belongs. +func (r Account_Shipment) GetAccount() (resp datatypes.Account, err error) { + err = r.Session.DoRequest("SoftLayer_Account_Shipment", "getAccount", nil, &r.Options, &resp) + return +} + +// Retrieve a list of available shipping couriers. +func (r Account_Shipment) GetAllCouriers() (resp []datatypes.Auxiliary_Shipping_Courier, err error) { + err = r.Session.DoRequest("SoftLayer_Account_Shipment", "getAllCouriers", nil, &r.Options, &resp) + return +} + +// Retrieve a list of available shipping couriers. +func (r Account_Shipment) GetAllCouriersByType(courierTypeKeyName *string) (resp []datatypes.Auxiliary_Shipping_Courier, err error) { + params := []interface{}{ + courierTypeKeyName, + } + err = r.Session.DoRequest("SoftLayer_Account_Shipment", "getAllCouriersByType", params, &r.Options, &resp) + return +} + +// Retrieve a a list of shipment statuses. +func (r Account_Shipment) GetAllShipmentStatuses() (resp []datatypes.Account_Shipment_Status, err error) { + err = r.Session.DoRequest("SoftLayer_Account_Shipment", "getAllShipmentStatuses", nil, &r.Options, &resp) + return +} + +// Retrieve a a list of shipment types. +func (r Account_Shipment) GetAllShipmentTypes() (resp []datatypes.Account_Shipment_Type, err error) { + err = r.Session.DoRequest("SoftLayer_Account_Shipment", "getAllShipmentTypes", nil, &r.Options, &resp) + return +} + +// Retrieve The courier handling the shipment. +func (r Account_Shipment) GetCourier() (resp datatypes.Auxiliary_Shipping_Courier, err error) { + err = r.Session.DoRequest("SoftLayer_Account_Shipment", "getCourier", nil, &r.Options, &resp) + return +} + +// Retrieve The employee who created the shipment. +func (r Account_Shipment) GetCreateEmployee() (resp datatypes.User_Employee, err error) { + err = r.Session.DoRequest("SoftLayer_Account_Shipment", "getCreateEmployee", nil, &r.Options, &resp) + return +} + +// Retrieve The customer user who created the shipment. +func (r Account_Shipment) GetCreateUser() (resp datatypes.User_Customer, err error) { + err = r.Session.DoRequest("SoftLayer_Account_Shipment", "getCreateUser", nil, &r.Options, &resp) + return +} + +// Retrieve The address at which the shipment is received. +func (r Account_Shipment) GetDestinationAddress() (resp datatypes.Account_Address, err error) { + err = r.Session.DoRequest("SoftLayer_Account_Shipment", "getDestinationAddress", nil, &r.Options, &resp) + return +} + +// Retrieve The employee who last modified the shipment. +func (r Account_Shipment) GetModifyEmployee() (resp datatypes.User_Employee, err error) { + err = r.Session.DoRequest("SoftLayer_Account_Shipment", "getModifyEmployee", nil, &r.Options, &resp) + return +} + +// Retrieve The customer user who last modified the shipment. +func (r Account_Shipment) GetModifyUser() (resp datatypes.User_Customer, err error) { + err = r.Session.DoRequest("SoftLayer_Account_Shipment", "getModifyUser", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Account_Shipment) GetObject() (resp datatypes.Account_Shipment, err error) { + err = r.Session.DoRequest("SoftLayer_Account_Shipment", "getObject", nil, &r.Options, &resp) + return +} + +// Retrieve The address from which the shipment is sent. +func (r Account_Shipment) GetOriginationAddress() (resp datatypes.Account_Address, err error) { + err = r.Session.DoRequest("SoftLayer_Account_Shipment", "getOriginationAddress", nil, &r.Options, &resp) + return +} + +// Retrieve The items in the shipment. +func (r Account_Shipment) GetShipmentItems() (resp []datatypes.Account_Shipment_Item, err error) { + err = r.Session.DoRequest("SoftLayer_Account_Shipment", "getShipmentItems", nil, &r.Options, &resp) + return +} + +// Retrieve The status of the shipment. +func (r Account_Shipment) GetStatus() (resp datatypes.Account_Shipment_Status, err error) { + err = r.Session.DoRequest("SoftLayer_Account_Shipment", "getStatus", nil, &r.Options, &resp) + return +} + +// Retrieve The tracking data for the shipment. +func (r Account_Shipment) GetTrackingData() (resp []datatypes.Account_Shipment_Tracking_Data, err error) { + err = r.Session.DoRequest("SoftLayer_Account_Shipment", "getTrackingData", nil, &r.Options, &resp) + return +} + +// Retrieve The type of shipment (e.g. for Data Transfer Service or Colocation Service). +func (r Account_Shipment) GetType() (resp datatypes.Account_Shipment_Type, err error) { + err = r.Session.DoRequest("SoftLayer_Account_Shipment", "getType", nil, &r.Options, &resp) + return +} + +// The SoftLayer_Account_Shipment_Item data type contains information relating to a shipment's item. Basic information such as addresses, the shipment courier, and any tracking information for as shipment is accessible with this data type. +type Account_Shipment_Item struct { + Session *session.Session + Options sl.Options +} + +// GetAccountShipmentItemService returns an instance of the Account_Shipment_Item SoftLayer service +func GetAccountShipmentItemService(sess *session.Session) Account_Shipment_Item { + return Account_Shipment_Item{Session: sess} +} + +func (r Account_Shipment_Item) Id(id int) Account_Shipment_Item { + r.Options.Id = &id + return r +} + +func (r Account_Shipment_Item) Mask(mask string) Account_Shipment_Item { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r Account_Shipment_Item) Filter(filter string) Account_Shipment_Item { + r.Options.Filter = filter + return r +} + +func (r Account_Shipment_Item) Limit(limit int) Account_Shipment_Item { + r.Options.Limit = &limit + return r +} + +func (r Account_Shipment_Item) Offset(offset int) Account_Shipment_Item { + r.Options.Offset = &offset + return r +} + +// Edit the properties of a shipment record by passing in a modified instance of a SoftLayer_Account_Shipment_Item object. +func (r Account_Shipment_Item) EditObject(templateObject *datatypes.Account_Shipment_Item) (resp bool, err error) { + params := []interface{}{ + templateObject, + } + err = r.Session.DoRequest("SoftLayer_Account_Shipment_Item", "editObject", params, &r.Options, &resp) + return +} + +// no documentation yet +func (r Account_Shipment_Item) GetObject() (resp datatypes.Account_Shipment_Item, err error) { + err = r.Session.DoRequest("SoftLayer_Account_Shipment_Item", "getObject", nil, &r.Options, &resp) + return +} + +// Retrieve The shipment to which this item belongs. +func (r Account_Shipment_Item) GetShipment() (resp datatypes.Account_Shipment, err error) { + err = r.Session.DoRequest("SoftLayer_Account_Shipment_Item", "getShipment", nil, &r.Options, &resp) + return +} + +// Retrieve The type of this shipment item. +func (r Account_Shipment_Item) GetShipmentItemType() (resp datatypes.Account_Shipment_Item_Type, err error) { + err = r.Session.DoRequest("SoftLayer_Account_Shipment_Item", "getShipmentItemType", nil, &r.Options, &resp) + return +} + +// no documentation yet +type Account_Shipment_Item_Type struct { + Session *session.Session + Options sl.Options +} + +// GetAccountShipmentItemTypeService returns an instance of the Account_Shipment_Item_Type SoftLayer service +func GetAccountShipmentItemTypeService(sess *session.Session) Account_Shipment_Item_Type { + return Account_Shipment_Item_Type{Session: sess} +} + +func (r Account_Shipment_Item_Type) Id(id int) Account_Shipment_Item_Type { + r.Options.Id = &id + return r +} + +func (r Account_Shipment_Item_Type) Mask(mask string) Account_Shipment_Item_Type { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r Account_Shipment_Item_Type) Filter(filter string) Account_Shipment_Item_Type { + r.Options.Filter = filter + return r +} + +func (r Account_Shipment_Item_Type) Limit(limit int) Account_Shipment_Item_Type { + r.Options.Limit = &limit + return r +} + +func (r Account_Shipment_Item_Type) Offset(offset int) Account_Shipment_Item_Type { + r.Options.Offset = &offset + return r +} + +// no documentation yet +func (r Account_Shipment_Item_Type) GetObject() (resp datatypes.Account_Shipment_Item_Type, err error) { + err = r.Session.DoRequest("SoftLayer_Account_Shipment_Item_Type", "getObject", nil, &r.Options, &resp) + return +} + +// no documentation yet +type Account_Shipment_Resource_Type struct { + Session *session.Session + Options sl.Options +} + +// GetAccountShipmentResourceTypeService returns an instance of the Account_Shipment_Resource_Type SoftLayer service +func GetAccountShipmentResourceTypeService(sess *session.Session) Account_Shipment_Resource_Type { + return Account_Shipment_Resource_Type{Session: sess} +} + +func (r Account_Shipment_Resource_Type) Id(id int) Account_Shipment_Resource_Type { + r.Options.Id = &id + return r +} + +func (r Account_Shipment_Resource_Type) Mask(mask string) Account_Shipment_Resource_Type { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r Account_Shipment_Resource_Type) Filter(filter string) Account_Shipment_Resource_Type { + r.Options.Filter = filter + return r +} + +func (r Account_Shipment_Resource_Type) Limit(limit int) Account_Shipment_Resource_Type { + r.Options.Limit = &limit + return r +} + +func (r Account_Shipment_Resource_Type) Offset(offset int) Account_Shipment_Resource_Type { + r.Options.Offset = &offset + return r +} + +// no documentation yet +func (r Account_Shipment_Resource_Type) GetObject() (resp datatypes.Account_Shipment_Resource_Type, err error) { + err = r.Session.DoRequest("SoftLayer_Account_Shipment_Resource_Type", "getObject", nil, &r.Options, &resp) + return +} + +// no documentation yet +type Account_Shipment_Status struct { + Session *session.Session + Options sl.Options +} + +// GetAccountShipmentStatusService returns an instance of the Account_Shipment_Status SoftLayer service +func GetAccountShipmentStatusService(sess *session.Session) Account_Shipment_Status { + return Account_Shipment_Status{Session: sess} +} + +func (r Account_Shipment_Status) Id(id int) Account_Shipment_Status { + r.Options.Id = &id + return r +} + +func (r Account_Shipment_Status) Mask(mask string) Account_Shipment_Status { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r Account_Shipment_Status) Filter(filter string) Account_Shipment_Status { + r.Options.Filter = filter + return r +} + +func (r Account_Shipment_Status) Limit(limit int) Account_Shipment_Status { + r.Options.Limit = &limit + return r +} + +func (r Account_Shipment_Status) Offset(offset int) Account_Shipment_Status { + r.Options.Offset = &offset + return r +} + +// no documentation yet +func (r Account_Shipment_Status) GetObject() (resp datatypes.Account_Shipment_Status, err error) { + err = r.Session.DoRequest("SoftLayer_Account_Shipment_Status", "getObject", nil, &r.Options, &resp) + return +} + +// The SoftLayer_Account_Shipment_Tracking_Data data type contains information on a single piece of tracking information pertaining to a shipment. This tracking information tracking numbers by which the shipment may be tracked through the shipping courier. +type Account_Shipment_Tracking_Data struct { + Session *session.Session + Options sl.Options +} + +// GetAccountShipmentTrackingDataService returns an instance of the Account_Shipment_Tracking_Data SoftLayer service +func GetAccountShipmentTrackingDataService(sess *session.Session) Account_Shipment_Tracking_Data { + return Account_Shipment_Tracking_Data{Session: sess} +} + +func (r Account_Shipment_Tracking_Data) Id(id int) Account_Shipment_Tracking_Data { + r.Options.Id = &id + return r +} + +func (r Account_Shipment_Tracking_Data) Mask(mask string) Account_Shipment_Tracking_Data { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r Account_Shipment_Tracking_Data) Filter(filter string) Account_Shipment_Tracking_Data { + r.Options.Filter = filter + return r +} + +func (r Account_Shipment_Tracking_Data) Limit(limit int) Account_Shipment_Tracking_Data { + r.Options.Limit = &limit + return r +} + +func (r Account_Shipment_Tracking_Data) Offset(offset int) Account_Shipment_Tracking_Data { + r.Options.Offset = &offset + return r +} + +// Create a new shipment tracking data. The ''shipmentId'', ''sequence'', and ''trackingData'' properties in the templateObject parameter are required parameters to create a tracking data record. +func (r Account_Shipment_Tracking_Data) CreateObject(templateObject *datatypes.Account_Shipment_Tracking_Data) (resp datatypes.Account_Shipment_Tracking_Data, err error) { + params := []interface{}{ + templateObject, + } + err = r.Session.DoRequest("SoftLayer_Account_Shipment_Tracking_Data", "createObject", params, &r.Options, &resp) + return +} + +// Create a new shipment tracking data. The ''shipmentId'', ''sequence'', and ''trackingData'' properties of each templateObject in the templateObjects array are required parameters to create a tracking data record. +func (r Account_Shipment_Tracking_Data) CreateObjects(templateObjects []datatypes.Account_Shipment_Tracking_Data) (resp []datatypes.Account_Shipment_Tracking_Data, err error) { + params := []interface{}{ + templateObjects, + } + err = r.Session.DoRequest("SoftLayer_Account_Shipment_Tracking_Data", "createObjects", params, &r.Options, &resp) + return +} + +// deleteObject permanently removes a shipment tracking datum (number) +func (r Account_Shipment_Tracking_Data) DeleteObject() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Account_Shipment_Tracking_Data", "deleteObject", nil, &r.Options, &resp) + return +} + +// Edit the properties of a tracking data record by passing in a modified instance of a SoftLayer_Account_Shipment_Tracking_Data object. +func (r Account_Shipment_Tracking_Data) EditObject(templateObject *datatypes.Account_Shipment_Tracking_Data) (resp bool, err error) { + params := []interface{}{ + templateObject, + } + err = r.Session.DoRequest("SoftLayer_Account_Shipment_Tracking_Data", "editObject", params, &r.Options, &resp) + return +} + +// Retrieve The employee who created the tracking datum. +func (r Account_Shipment_Tracking_Data) GetCreateEmployee() (resp datatypes.User_Employee, err error) { + err = r.Session.DoRequest("SoftLayer_Account_Shipment_Tracking_Data", "getCreateEmployee", nil, &r.Options, &resp) + return +} + +// Retrieve The customer user who created the tracking datum. +func (r Account_Shipment_Tracking_Data) GetCreateUser() (resp datatypes.User_Customer, err error) { + err = r.Session.DoRequest("SoftLayer_Account_Shipment_Tracking_Data", "getCreateUser", nil, &r.Options, &resp) + return +} + +// Retrieve The employee who last modified the tracking datum. +func (r Account_Shipment_Tracking_Data) GetModifyEmployee() (resp datatypes.User_Employee, err error) { + err = r.Session.DoRequest("SoftLayer_Account_Shipment_Tracking_Data", "getModifyEmployee", nil, &r.Options, &resp) + return +} + +// Retrieve The customer user who last modified the tracking datum. +func (r Account_Shipment_Tracking_Data) GetModifyUser() (resp datatypes.User_Customer, err error) { + err = r.Session.DoRequest("SoftLayer_Account_Shipment_Tracking_Data", "getModifyUser", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Account_Shipment_Tracking_Data) GetObject() (resp datatypes.Account_Shipment_Tracking_Data, err error) { + err = r.Session.DoRequest("SoftLayer_Account_Shipment_Tracking_Data", "getObject", nil, &r.Options, &resp) + return +} + +// Retrieve The shipment of the tracking datum. +func (r Account_Shipment_Tracking_Data) GetShipment() (resp datatypes.Account_Shipment, err error) { + err = r.Session.DoRequest("SoftLayer_Account_Shipment_Tracking_Data", "getShipment", nil, &r.Options, &resp) + return +} + +// no documentation yet +type Account_Shipment_Type struct { + Session *session.Session + Options sl.Options +} + +// GetAccountShipmentTypeService returns an instance of the Account_Shipment_Type SoftLayer service +func GetAccountShipmentTypeService(sess *session.Session) Account_Shipment_Type { + return Account_Shipment_Type{Session: sess} +} + +func (r Account_Shipment_Type) Id(id int) Account_Shipment_Type { + r.Options.Id = &id + return r +} + +func (r Account_Shipment_Type) Mask(mask string) Account_Shipment_Type { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r Account_Shipment_Type) Filter(filter string) Account_Shipment_Type { + r.Options.Filter = filter + return r +} + +func (r Account_Shipment_Type) Limit(limit int) Account_Shipment_Type { + r.Options.Limit = &limit + return r +} + +func (r Account_Shipment_Type) Offset(offset int) Account_Shipment_Type { + r.Options.Offset = &offset + return r +} + +// no documentation yet +func (r Account_Shipment_Type) GetObject() (resp datatypes.Account_Shipment_Type, err error) { + err = r.Session.DoRequest("SoftLayer_Account_Shipment_Type", "getObject", nil, &r.Options, &resp) + return +} diff --git a/vendor/github.com/softlayer/softlayer-go/services/auxiliary.go b/vendor/github.com/softlayer/softlayer-go/services/auxiliary.go new file mode 100644 index 0000000000..59c183e901 --- /dev/null +++ b/vendor/github.com/softlayer/softlayer-go/services/auxiliary.go @@ -0,0 +1,729 @@ +/** + * Copyright 2016 IBM Corp. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** + * AUTOMATICALLY GENERATED CODE - DO NOT MODIFY + */ + +package services + +import ( + "fmt" + "strings" + + "github.com/softlayer/softlayer-go/datatypes" + "github.com/softlayer/softlayer-go/session" + "github.com/softlayer/softlayer-go/sl" +) + +// no documentation yet +type Auxiliary_Marketing_Event struct { + Session *session.Session + Options sl.Options +} + +// GetAuxiliaryMarketingEventService returns an instance of the Auxiliary_Marketing_Event SoftLayer service +func GetAuxiliaryMarketingEventService(sess *session.Session) Auxiliary_Marketing_Event { + return Auxiliary_Marketing_Event{Session: sess} +} + +func (r Auxiliary_Marketing_Event) Id(id int) Auxiliary_Marketing_Event { + r.Options.Id = &id + return r +} + +func (r Auxiliary_Marketing_Event) Mask(mask string) Auxiliary_Marketing_Event { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r Auxiliary_Marketing_Event) Filter(filter string) Auxiliary_Marketing_Event { + r.Options.Filter = filter + return r +} + +func (r Auxiliary_Marketing_Event) Limit(limit int) Auxiliary_Marketing_Event { + r.Options.Limit = &limit + return r +} + +func (r Auxiliary_Marketing_Event) Offset(offset int) Auxiliary_Marketing_Event { + r.Options.Offset = &offset + return r +} + +// This method will return a collection of SoftLayer_Auxiliary_Marketing_Event objects ordered in ascending order by start date. +func (r Auxiliary_Marketing_Event) GetMarketingEvents() (resp []datatypes.Auxiliary_Marketing_Event, err error) { + err = r.Session.DoRequest("SoftLayer_Auxiliary_Marketing_Event", "getMarketingEvents", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Auxiliary_Marketing_Event) GetObject() (resp datatypes.Auxiliary_Marketing_Event, err error) { + err = r.Session.DoRequest("SoftLayer_Auxiliary_Marketing_Event", "getObject", nil, &r.Options, &resp) + return +} + +// no documentation yet +type Auxiliary_Network_Status struct { + Session *session.Session + Options sl.Options +} + +// GetAuxiliaryNetworkStatusService returns an instance of the Auxiliary_Network_Status SoftLayer service +func GetAuxiliaryNetworkStatusService(sess *session.Session) Auxiliary_Network_Status { + return Auxiliary_Network_Status{Session: sess} +} + +func (r Auxiliary_Network_Status) Id(id int) Auxiliary_Network_Status { + r.Options.Id = &id + return r +} + +func (r Auxiliary_Network_Status) Mask(mask string) Auxiliary_Network_Status { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r Auxiliary_Network_Status) Filter(filter string) Auxiliary_Network_Status { + r.Options.Filter = filter + return r +} + +func (r Auxiliary_Network_Status) Limit(limit int) Auxiliary_Network_Status { + r.Options.Limit = &limit + return r +} + +func (r Auxiliary_Network_Status) Offset(offset int) Auxiliary_Network_Status { + r.Options.Offset = &offset + return r +} + +// Return the current network status of and latency information for a given target from numerous points around the world. Valid Targets: +// * ALL +// * NETWORK_DALLAS +// * NETWORK_SEATTLE +// * NETWORK_PUBLIC +// * NETWORK_PUBLIC_DALLAS +// * NETWORK_PUBLIC_SEATTLE +// * NETWORK_PUBLIC_WDC +// * NETWORK_PRIVATE +// * NETWORK_PRIVATE_DALLAS +// * NETWORK_PRIVATE_SEATTLE +// * NETWORK_PRIVATE_WDC +func (r Auxiliary_Network_Status) GetNetworkStatus(target *string) (resp []datatypes.Container_Auxiliary_Network_Status_Reading, err error) { + params := []interface{}{ + target, + } + err = r.Session.DoRequest("SoftLayer_Auxiliary_Network_Status", "getNetworkStatus", params, &r.Options, &resp) + return +} + +// A SoftLayer_Auxiliary_Notification_Emergency data object represents a notification event being broadcast to the SoftLayer customer base. It is used to provide information regarding outages or current known issues. +type Auxiliary_Notification_Emergency struct { + Session *session.Session + Options sl.Options +} + +// GetAuxiliaryNotificationEmergencyService returns an instance of the Auxiliary_Notification_Emergency SoftLayer service +func GetAuxiliaryNotificationEmergencyService(sess *session.Session) Auxiliary_Notification_Emergency { + return Auxiliary_Notification_Emergency{Session: sess} +} + +func (r Auxiliary_Notification_Emergency) Id(id int) Auxiliary_Notification_Emergency { + r.Options.Id = &id + return r +} + +func (r Auxiliary_Notification_Emergency) Mask(mask string) Auxiliary_Notification_Emergency { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r Auxiliary_Notification_Emergency) Filter(filter string) Auxiliary_Notification_Emergency { + r.Options.Filter = filter + return r +} + +func (r Auxiliary_Notification_Emergency) Limit(limit int) Auxiliary_Notification_Emergency { + r.Options.Limit = &limit + return r +} + +func (r Auxiliary_Notification_Emergency) Offset(offset int) Auxiliary_Notification_Emergency { + r.Options.Offset = &offset + return r +} + +// Retrieve an array of SoftLayer_Auxiliary_Notification_Emergency data types, which contain all notification events regardless of status. +func (r Auxiliary_Notification_Emergency) GetAllObjects() (resp []datatypes.Auxiliary_Notification_Emergency, err error) { + err = r.Session.DoRequest("SoftLayer_Auxiliary_Notification_Emergency", "getAllObjects", nil, &r.Options, &resp) + return +} + +// Retrieve an array of SoftLayer_Auxiliary_Notification_Emergency data types, which contain all current notification events. +func (r Auxiliary_Notification_Emergency) GetCurrentNotifications() (resp []datatypes.Auxiliary_Notification_Emergency, err error) { + err = r.Session.DoRequest("SoftLayer_Auxiliary_Notification_Emergency", "getCurrentNotifications", nil, &r.Options, &resp) + return +} + +// getObject retrieves the SoftLayer_Auxiliary_Notification_Emergency object, it can be used to check for current notifications being broadcast by SoftLayer. +func (r Auxiliary_Notification_Emergency) GetObject() (resp datatypes.Auxiliary_Notification_Emergency, err error) { + err = r.Session.DoRequest("SoftLayer_Auxiliary_Notification_Emergency", "getObject", nil, &r.Options, &resp) + return +} + +// Retrieve The signature of the SoftLayer employee department associated with this notification. +func (r Auxiliary_Notification_Emergency) GetSignature() (resp datatypes.Auxiliary_Notification_Emergency_Signature, err error) { + err = r.Session.DoRequest("SoftLayer_Auxiliary_Notification_Emergency", "getSignature", nil, &r.Options, &resp) + return +} + +// Retrieve The status of this notification. +func (r Auxiliary_Notification_Emergency) GetStatus() (resp datatypes.Auxiliary_Notification_Emergency_Status, err error) { + err = r.Session.DoRequest("SoftLayer_Auxiliary_Notification_Emergency", "getStatus", nil, &r.Options, &resp) + return +} + +// no documentation yet +type Auxiliary_Press_Release struct { + Session *session.Session + Options sl.Options +} + +// GetAuxiliaryPressReleaseService returns an instance of the Auxiliary_Press_Release SoftLayer service +func GetAuxiliaryPressReleaseService(sess *session.Session) Auxiliary_Press_Release { + return Auxiliary_Press_Release{Session: sess} +} + +func (r Auxiliary_Press_Release) Id(id int) Auxiliary_Press_Release { + r.Options.Id = &id + return r +} + +func (r Auxiliary_Press_Release) Mask(mask string) Auxiliary_Press_Release { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r Auxiliary_Press_Release) Filter(filter string) Auxiliary_Press_Release { + r.Options.Filter = filter + return r +} + +func (r Auxiliary_Press_Release) Limit(limit int) Auxiliary_Press_Release { + r.Options.Limit = &limit + return r +} + +func (r Auxiliary_Press_Release) Offset(offset int) Auxiliary_Press_Release { + r.Options.Offset = &offset + return r +} + +// Retrieve +func (r Auxiliary_Press_Release) GetAbout() (resp []datatypes.Auxiliary_Press_Release_About_Press_Release, err error) { + err = r.Session.DoRequest("SoftLayer_Auxiliary_Press_Release", "getAbout", nil, &r.Options, &resp) + return +} + +// Retrieve an array of SoftLayer_Auxiliary_Press_Release data types, which contain all press releases. +func (r Auxiliary_Press_Release) GetAllObjects() (resp []datatypes.Auxiliary_Press_Release, err error) { + err = r.Session.DoRequest("SoftLayer_Auxiliary_Press_Release", "getAllObjects", nil, &r.Options, &resp) + return +} + +// Retrieve +func (r Auxiliary_Press_Release) GetContacts() (resp []datatypes.Auxiliary_Press_Release_Contact_Press_Release, err error) { + err = r.Session.DoRequest("SoftLayer_Auxiliary_Press_Release", "getContacts", nil, &r.Options, &resp) + return +} + +// Retrieve +func (r Auxiliary_Press_Release) GetMediaPartners() (resp []datatypes.Auxiliary_Press_Release_Media_Partner_Press_Release, err error) { + err = r.Session.DoRequest("SoftLayer_Auxiliary_Press_Release", "getMediaPartners", nil, &r.Options, &resp) + return +} + +// getObject retrieves the SoftLayer_Auxiliary_Press_Release object whose ID number corresponds to the ID number of the init parameter passed to the SoftLayer_Auxiliary_Press_Release service. +func (r Auxiliary_Press_Release) GetObject() (resp datatypes.Auxiliary_Press_Release, err error) { + err = r.Session.DoRequest("SoftLayer_Auxiliary_Press_Release", "getObject", nil, &r.Options, &resp) + return +} + +// Retrieve +func (r Auxiliary_Press_Release) GetPressReleaseContent() (resp datatypes.Auxiliary_Press_Release_Content, err error) { + err = r.Session.DoRequest("SoftLayer_Auxiliary_Press_Release", "getPressReleaseContent", nil, &r.Options, &resp) + return +} + +// Retrieve an array of SoftLayer_Auxiliary_Press_Release data types, which contain all press releases. +func (r Auxiliary_Press_Release) GetRenderedPressRelease() (resp []datatypes.Auxiliary_Press_Release, err error) { + err = r.Session.DoRequest("SoftLayer_Auxiliary_Press_Release", "getRenderedPressRelease", nil, &r.Options, &resp) + return +} + +// Retrieve an array of SoftLayer_Auxiliary_Press_Release data types, which contain all press releases for a given year and or result limit. +func (r Auxiliary_Press_Release) GetRenderedPressReleases(resultLimit *string, year *string) (resp []datatypes.Auxiliary_Press_Release, err error) { + params := []interface{}{ + resultLimit, + year, + } + err = r.Session.DoRequest("SoftLayer_Auxiliary_Press_Release", "getRenderedPressReleases", params, &r.Options, &resp) + return +} + +// Retrieve an array of SoftLayer_Auxiliary_Press_Release data types, which have the website highlight flag set. +func (r Auxiliary_Press_Release) GetWebsiteHighlightPressReleases() (resp []datatypes.Auxiliary_Press_Release, err error) { + err = r.Session.DoRequest("SoftLayer_Auxiliary_Press_Release", "getWebsiteHighlightPressReleases", nil, &r.Options, &resp) + return +} + +// no documentation yet +type Auxiliary_Press_Release_About struct { + Session *session.Session + Options sl.Options +} + +// GetAuxiliaryPressReleaseAboutService returns an instance of the Auxiliary_Press_Release_About SoftLayer service +func GetAuxiliaryPressReleaseAboutService(sess *session.Session) Auxiliary_Press_Release_About { + return Auxiliary_Press_Release_About{Session: sess} +} + +func (r Auxiliary_Press_Release_About) Id(id int) Auxiliary_Press_Release_About { + r.Options.Id = &id + return r +} + +func (r Auxiliary_Press_Release_About) Mask(mask string) Auxiliary_Press_Release_About { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r Auxiliary_Press_Release_About) Filter(filter string) Auxiliary_Press_Release_About { + r.Options.Filter = filter + return r +} + +func (r Auxiliary_Press_Release_About) Limit(limit int) Auxiliary_Press_Release_About { + r.Options.Limit = &limit + return r +} + +func (r Auxiliary_Press_Release_About) Offset(offset int) Auxiliary_Press_Release_About { + r.Options.Offset = &offset + return r +} + +// getObject retrieves the SoftLayer_Auxiliary_Press_Release_About object whose about id number corresponds to the ID number of the init parameter passed to the SoftLayer_Auxiliary_Press_Release service. +func (r Auxiliary_Press_Release_About) GetObject() (resp datatypes.Auxiliary_Press_Release_About, err error) { + err = r.Session.DoRequest("SoftLayer_Auxiliary_Press_Release_About", "getObject", nil, &r.Options, &resp) + return +} + +// no documentation yet +type Auxiliary_Press_Release_About_Press_Release struct { + Session *session.Session + Options sl.Options +} + +// GetAuxiliaryPressReleaseAboutPressReleaseService returns an instance of the Auxiliary_Press_Release_About_Press_Release SoftLayer service +func GetAuxiliaryPressReleaseAboutPressReleaseService(sess *session.Session) Auxiliary_Press_Release_About_Press_Release { + return Auxiliary_Press_Release_About_Press_Release{Session: sess} +} + +func (r Auxiliary_Press_Release_About_Press_Release) Id(id int) Auxiliary_Press_Release_About_Press_Release { + r.Options.Id = &id + return r +} + +func (r Auxiliary_Press_Release_About_Press_Release) Mask(mask string) Auxiliary_Press_Release_About_Press_Release { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r Auxiliary_Press_Release_About_Press_Release) Filter(filter string) Auxiliary_Press_Release_About_Press_Release { + r.Options.Filter = filter + return r +} + +func (r Auxiliary_Press_Release_About_Press_Release) Limit(limit int) Auxiliary_Press_Release_About_Press_Release { + r.Options.Limit = &limit + return r +} + +func (r Auxiliary_Press_Release_About_Press_Release) Offset(offset int) Auxiliary_Press_Release_About_Press_Release { + r.Options.Offset = &offset + return r +} + +// Retrieve +func (r Auxiliary_Press_Release_About_Press_Release) GetAboutParagraphs() (resp []datatypes.Auxiliary_Press_Release_About, err error) { + err = r.Session.DoRequest("SoftLayer_Auxiliary_Press_Release_About_Press_Release", "getAboutParagraphs", nil, &r.Options, &resp) + return +} + +// getObject retrieves the SoftLayer_Auxiliary_Press_Release_About_Press_Release object whose contact id number corresponds to the ID number of the init parameter passed to the SoftLayer_Auxiliary_Press_Release service. +func (r Auxiliary_Press_Release_About_Press_Release) GetObject() (resp datatypes.Auxiliary_Press_Release_About_Press_Release, err error) { + err = r.Session.DoRequest("SoftLayer_Auxiliary_Press_Release_About_Press_Release", "getObject", nil, &r.Options, &resp) + return +} + +// Retrieve +func (r Auxiliary_Press_Release_About_Press_Release) GetPressReleases() (resp []datatypes.Auxiliary_Press_Release, err error) { + err = r.Session.DoRequest("SoftLayer_Auxiliary_Press_Release_About_Press_Release", "getPressReleases", nil, &r.Options, &resp) + return +} + +// no documentation yet +type Auxiliary_Press_Release_Contact struct { + Session *session.Session + Options sl.Options +} + +// GetAuxiliaryPressReleaseContactService returns an instance of the Auxiliary_Press_Release_Contact SoftLayer service +func GetAuxiliaryPressReleaseContactService(sess *session.Session) Auxiliary_Press_Release_Contact { + return Auxiliary_Press_Release_Contact{Session: sess} +} + +func (r Auxiliary_Press_Release_Contact) Id(id int) Auxiliary_Press_Release_Contact { + r.Options.Id = &id + return r +} + +func (r Auxiliary_Press_Release_Contact) Mask(mask string) Auxiliary_Press_Release_Contact { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r Auxiliary_Press_Release_Contact) Filter(filter string) Auxiliary_Press_Release_Contact { + r.Options.Filter = filter + return r +} + +func (r Auxiliary_Press_Release_Contact) Limit(limit int) Auxiliary_Press_Release_Contact { + r.Options.Limit = &limit + return r +} + +func (r Auxiliary_Press_Release_Contact) Offset(offset int) Auxiliary_Press_Release_Contact { + r.Options.Offset = &offset + return r +} + +// getObject retrieves the SoftLayer_Auxiliary_Press_Release_Contact object whose contact id number corresponds to the ID number of the init parameter passed to the SoftLayer_Auxiliary_Press_Release service. +func (r Auxiliary_Press_Release_Contact) GetObject() (resp datatypes.Auxiliary_Press_Release_Contact, err error) { + err = r.Session.DoRequest("SoftLayer_Auxiliary_Press_Release_Contact", "getObject", nil, &r.Options, &resp) + return +} + +// no documentation yet +type Auxiliary_Press_Release_Contact_Press_Release struct { + Session *session.Session + Options sl.Options +} + +// GetAuxiliaryPressReleaseContactPressReleaseService returns an instance of the Auxiliary_Press_Release_Contact_Press_Release SoftLayer service +func GetAuxiliaryPressReleaseContactPressReleaseService(sess *session.Session) Auxiliary_Press_Release_Contact_Press_Release { + return Auxiliary_Press_Release_Contact_Press_Release{Session: sess} +} + +func (r Auxiliary_Press_Release_Contact_Press_Release) Id(id int) Auxiliary_Press_Release_Contact_Press_Release { + r.Options.Id = &id + return r +} + +func (r Auxiliary_Press_Release_Contact_Press_Release) Mask(mask string) Auxiliary_Press_Release_Contact_Press_Release { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r Auxiliary_Press_Release_Contact_Press_Release) Filter(filter string) Auxiliary_Press_Release_Contact_Press_Release { + r.Options.Filter = filter + return r +} + +func (r Auxiliary_Press_Release_Contact_Press_Release) Limit(limit int) Auxiliary_Press_Release_Contact_Press_Release { + r.Options.Limit = &limit + return r +} + +func (r Auxiliary_Press_Release_Contact_Press_Release) Offset(offset int) Auxiliary_Press_Release_Contact_Press_Release { + r.Options.Offset = &offset + return r +} + +// Retrieve +func (r Auxiliary_Press_Release_Contact_Press_Release) GetContacts() (resp []datatypes.Auxiliary_Press_Release_Contact, err error) { + err = r.Session.DoRequest("SoftLayer_Auxiliary_Press_Release_Contact_Press_Release", "getContacts", nil, &r.Options, &resp) + return +} + +// getObject retrieves the SoftLayer_Auxiliary_Press_Release_Contact object whose contact id number corresponds to the ID number of the init parameter passed to the SoftLayer_Auxiliary_Press_Release service. +func (r Auxiliary_Press_Release_Contact_Press_Release) GetObject() (resp datatypes.Auxiliary_Press_Release_Contact_Press_Release, err error) { + err = r.Session.DoRequest("SoftLayer_Auxiliary_Press_Release_Contact_Press_Release", "getObject", nil, &r.Options, &resp) + return +} + +// Retrieve +func (r Auxiliary_Press_Release_Contact_Press_Release) GetPressReleases() (resp []datatypes.Auxiliary_Press_Release, err error) { + err = r.Session.DoRequest("SoftLayer_Auxiliary_Press_Release_Contact_Press_Release", "getPressReleases", nil, &r.Options, &resp) + return +} + +// no documentation yet +type Auxiliary_Press_Release_Content struct { + Session *session.Session + Options sl.Options +} + +// GetAuxiliaryPressReleaseContentService returns an instance of the Auxiliary_Press_Release_Content SoftLayer service +func GetAuxiliaryPressReleaseContentService(sess *session.Session) Auxiliary_Press_Release_Content { + return Auxiliary_Press_Release_Content{Session: sess} +} + +func (r Auxiliary_Press_Release_Content) Id(id int) Auxiliary_Press_Release_Content { + r.Options.Id = &id + return r +} + +func (r Auxiliary_Press_Release_Content) Mask(mask string) Auxiliary_Press_Release_Content { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r Auxiliary_Press_Release_Content) Filter(filter string) Auxiliary_Press_Release_Content { + r.Options.Filter = filter + return r +} + +func (r Auxiliary_Press_Release_Content) Limit(limit int) Auxiliary_Press_Release_Content { + r.Options.Limit = &limit + return r +} + +func (r Auxiliary_Press_Release_Content) Offset(offset int) Auxiliary_Press_Release_Content { + r.Options.Offset = &offset + return r +} + +// getObject retrieves the SoftLayer_Auxiliary_Press_Release_Content object whose ID number corresponds to the ID number of the init parameter passed to the SoftLayer_Auxiliary_Press_Release service. +func (r Auxiliary_Press_Release_Content) GetObject() (resp datatypes.Auxiliary_Press_Release_Content, err error) { + err = r.Session.DoRequest("SoftLayer_Auxiliary_Press_Release_Content", "getObject", nil, &r.Options, &resp) + return +} + +// no documentation yet +type Auxiliary_Press_Release_Media_Partner struct { + Session *session.Session + Options sl.Options +} + +// GetAuxiliaryPressReleaseMediaPartnerService returns an instance of the Auxiliary_Press_Release_Media_Partner SoftLayer service +func GetAuxiliaryPressReleaseMediaPartnerService(sess *session.Session) Auxiliary_Press_Release_Media_Partner { + return Auxiliary_Press_Release_Media_Partner{Session: sess} +} + +func (r Auxiliary_Press_Release_Media_Partner) Id(id int) Auxiliary_Press_Release_Media_Partner { + r.Options.Id = &id + return r +} + +func (r Auxiliary_Press_Release_Media_Partner) Mask(mask string) Auxiliary_Press_Release_Media_Partner { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r Auxiliary_Press_Release_Media_Partner) Filter(filter string) Auxiliary_Press_Release_Media_Partner { + r.Options.Filter = filter + return r +} + +func (r Auxiliary_Press_Release_Media_Partner) Limit(limit int) Auxiliary_Press_Release_Media_Partner { + r.Options.Limit = &limit + return r +} + +func (r Auxiliary_Press_Release_Media_Partner) Offset(offset int) Auxiliary_Press_Release_Media_Partner { + r.Options.Offset = &offset + return r +} + +// getObject retrieves the SoftLayer_Auxiliary_Press_Release_Contact object whose contact id number corresponds to the ID number of the init parameter passed to the SoftLayer_Auxiliary_Press_Release service. +func (r Auxiliary_Press_Release_Media_Partner) GetObject() (resp datatypes.Auxiliary_Press_Release_Media_Partner, err error) { + err = r.Session.DoRequest("SoftLayer_Auxiliary_Press_Release_Media_Partner", "getObject", nil, &r.Options, &resp) + return +} + +// no documentation yet +type Auxiliary_Press_Release_Media_Partner_Press_Release struct { + Session *session.Session + Options sl.Options +} + +// GetAuxiliaryPressReleaseMediaPartnerPressReleaseService returns an instance of the Auxiliary_Press_Release_Media_Partner_Press_Release SoftLayer service +func GetAuxiliaryPressReleaseMediaPartnerPressReleaseService(sess *session.Session) Auxiliary_Press_Release_Media_Partner_Press_Release { + return Auxiliary_Press_Release_Media_Partner_Press_Release{Session: sess} +} + +func (r Auxiliary_Press_Release_Media_Partner_Press_Release) Id(id int) Auxiliary_Press_Release_Media_Partner_Press_Release { + r.Options.Id = &id + return r +} + +func (r Auxiliary_Press_Release_Media_Partner_Press_Release) Mask(mask string) Auxiliary_Press_Release_Media_Partner_Press_Release { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r Auxiliary_Press_Release_Media_Partner_Press_Release) Filter(filter string) Auxiliary_Press_Release_Media_Partner_Press_Release { + r.Options.Filter = filter + return r +} + +func (r Auxiliary_Press_Release_Media_Partner_Press_Release) Limit(limit int) Auxiliary_Press_Release_Media_Partner_Press_Release { + r.Options.Limit = &limit + return r +} + +func (r Auxiliary_Press_Release_Media_Partner_Press_Release) Offset(offset int) Auxiliary_Press_Release_Media_Partner_Press_Release { + r.Options.Offset = &offset + return r +} + +// Retrieve +func (r Auxiliary_Press_Release_Media_Partner_Press_Release) GetMediaPartners() (resp []datatypes.Auxiliary_Press_Release_Media_Partner, err error) { + err = r.Session.DoRequest("SoftLayer_Auxiliary_Press_Release_Media_Partner_Press_Release", "getMediaPartners", nil, &r.Options, &resp) + return +} + +// getObject retrieves the SoftLayer_Auxiliary_Press_Release_Media_Partner_Press_Release object whose media partner id number corresponds to the ID number of the init parameter passed to the SoftLayer_Auxiliary_Press_Release service. +func (r Auxiliary_Press_Release_Media_Partner_Press_Release) GetObject() (resp datatypes.Auxiliary_Press_Release_Media_Partner_Press_Release, err error) { + err = r.Session.DoRequest("SoftLayer_Auxiliary_Press_Release_Media_Partner_Press_Release", "getObject", nil, &r.Options, &resp) + return +} + +// Retrieve +func (r Auxiliary_Press_Release_Media_Partner_Press_Release) GetPressReleases() (resp []datatypes.Auxiliary_Press_Release, err error) { + err = r.Session.DoRequest("SoftLayer_Auxiliary_Press_Release_Media_Partner_Press_Release", "getPressReleases", nil, &r.Options, &resp) + return +} + +// no documentation yet +type Auxiliary_Shipping_Courier_Type struct { + Session *session.Session + Options sl.Options +} + +// GetAuxiliaryShippingCourierTypeService returns an instance of the Auxiliary_Shipping_Courier_Type SoftLayer service +func GetAuxiliaryShippingCourierTypeService(sess *session.Session) Auxiliary_Shipping_Courier_Type { + return Auxiliary_Shipping_Courier_Type{Session: sess} +} + +func (r Auxiliary_Shipping_Courier_Type) Id(id int) Auxiliary_Shipping_Courier_Type { + r.Options.Id = &id + return r +} + +func (r Auxiliary_Shipping_Courier_Type) Mask(mask string) Auxiliary_Shipping_Courier_Type { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r Auxiliary_Shipping_Courier_Type) Filter(filter string) Auxiliary_Shipping_Courier_Type { + r.Options.Filter = filter + return r +} + +func (r Auxiliary_Shipping_Courier_Type) Limit(limit int) Auxiliary_Shipping_Courier_Type { + r.Options.Limit = &limit + return r +} + +func (r Auxiliary_Shipping_Courier_Type) Offset(offset int) Auxiliary_Shipping_Courier_Type { + r.Options.Offset = &offset + return r +} + +// Retrieve +func (r Auxiliary_Shipping_Courier_Type) GetCourier() (resp []datatypes.Auxiliary_Shipping_Courier, err error) { + err = r.Session.DoRequest("SoftLayer_Auxiliary_Shipping_Courier_Type", "getCourier", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Auxiliary_Shipping_Courier_Type) GetObject() (resp datatypes.Auxiliary_Shipping_Courier_Type, err error) { + err = r.Session.DoRequest("SoftLayer_Auxiliary_Shipping_Courier_Type", "getObject", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Auxiliary_Shipping_Courier_Type) GetTypeByKeyName(keyName *string) (resp datatypes.Auxiliary_Shipping_Courier_Type, err error) { + params := []interface{}{ + keyName, + } + err = r.Session.DoRequest("SoftLayer_Auxiliary_Shipping_Courier_Type", "getTypeByKeyName", params, &r.Options, &resp) + return +} diff --git a/vendor/github.com/softlayer/softlayer-go/services/billing.go b/vendor/github.com/softlayer/softlayer-go/services/billing.go new file mode 100644 index 0000000000..7f1fa03f6d --- /dev/null +++ b/vendor/github.com/softlayer/softlayer-go/services/billing.go @@ -0,0 +1,2744 @@ +/** + * Copyright 2016 IBM Corp. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** + * AUTOMATICALLY GENERATED CODE - DO NOT MODIFY + */ + +package services + +import ( + "fmt" + "strings" + + "github.com/softlayer/softlayer-go/datatypes" + "github.com/softlayer/softlayer-go/session" + "github.com/softlayer/softlayer-go/sl" +) + +// no documentation yet +type Billing_Currency struct { + Session *session.Session + Options sl.Options +} + +// GetBillingCurrencyService returns an instance of the Billing_Currency SoftLayer service +func GetBillingCurrencyService(sess *session.Session) Billing_Currency { + return Billing_Currency{Session: sess} +} + +func (r Billing_Currency) Id(id int) Billing_Currency { + r.Options.Id = &id + return r +} + +func (r Billing_Currency) Mask(mask string) Billing_Currency { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r Billing_Currency) Filter(filter string) Billing_Currency { + r.Options.Filter = filter + return r +} + +func (r Billing_Currency) Limit(limit int) Billing_Currency { + r.Options.Limit = &limit + return r +} + +func (r Billing_Currency) Offset(offset int) Billing_Currency { + r.Options.Offset = &offset + return r +} + +// no documentation yet +func (r Billing_Currency) GetAllObjects() (resp []datatypes.Billing_Currency, err error) { + err = r.Session.DoRequest("SoftLayer_Billing_Currency", "getAllObjects", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Billing_Currency) GetObject() (resp datatypes.Billing_Currency, err error) { + err = r.Session.DoRequest("SoftLayer_Billing_Currency", "getObject", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Billing_Currency) GetPrice(price *datatypes.Float64, formatOptions *datatypes.Container_Billing_Currency_Format) (resp string, err error) { + params := []interface{}{ + price, + formatOptions, + } + err = r.Session.DoRequest("SoftLayer_Billing_Currency", "getPrice", params, &r.Options, &resp) + return +} + +// The SoftLayer_Billing_Currency_Country data type maps what currencies are valid for specific countries. US Dollars are valid from any country, but other currencies are only available to customers in certain countries. +type Billing_Currency_Country struct { + Session *session.Session + Options sl.Options +} + +// GetBillingCurrencyCountryService returns an instance of the Billing_Currency_Country SoftLayer service +func GetBillingCurrencyCountryService(sess *session.Session) Billing_Currency_Country { + return Billing_Currency_Country{Session: sess} +} + +func (r Billing_Currency_Country) Id(id int) Billing_Currency_Country { + r.Options.Id = &id + return r +} + +func (r Billing_Currency_Country) Mask(mask string) Billing_Currency_Country { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r Billing_Currency_Country) Filter(filter string) Billing_Currency_Country { + r.Options.Filter = filter + return r +} + +func (r Billing_Currency_Country) Limit(limit int) Billing_Currency_Country { + r.Options.Limit = &limit + return r +} + +func (r Billing_Currency_Country) Offset(offset int) Billing_Currency_Country { + r.Options.Offset = &offset + return r +} + +// no documentation yet +func (r Billing_Currency_Country) GetCountriesWithListOfEligibleCurrencies() (resp []datatypes.Container_Billing_Currency_Country, err error) { + err = r.Session.DoRequest("SoftLayer_Billing_Currency_Country", "getCountriesWithListOfEligibleCurrencies", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Billing_Currency_Country) GetObject() (resp datatypes.Billing_Currency_Country, err error) { + err = r.Session.DoRequest("SoftLayer_Billing_Currency_Country", "getObject", nil, &r.Options, &resp) + return +} + +// no documentation yet +type Billing_Currency_ExchangeRate struct { + Session *session.Session + Options sl.Options +} + +// GetBillingCurrencyExchangeRateService returns an instance of the Billing_Currency_ExchangeRate SoftLayer service +func GetBillingCurrencyExchangeRateService(sess *session.Session) Billing_Currency_ExchangeRate { + return Billing_Currency_ExchangeRate{Session: sess} +} + +func (r Billing_Currency_ExchangeRate) Id(id int) Billing_Currency_ExchangeRate { + r.Options.Id = &id + return r +} + +func (r Billing_Currency_ExchangeRate) Mask(mask string) Billing_Currency_ExchangeRate { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r Billing_Currency_ExchangeRate) Filter(filter string) Billing_Currency_ExchangeRate { + r.Options.Filter = filter + return r +} + +func (r Billing_Currency_ExchangeRate) Limit(limit int) Billing_Currency_ExchangeRate { + r.Options.Limit = &limit + return r +} + +func (r Billing_Currency_ExchangeRate) Offset(offset int) Billing_Currency_ExchangeRate { + r.Options.Offset = &offset + return r +} + +// no documentation yet +func (r Billing_Currency_ExchangeRate) GetAllCurrencyExchangeRates(stringDate *string) (resp []datatypes.Billing_Currency_ExchangeRate, err error) { + params := []interface{}{ + stringDate, + } + err = r.Session.DoRequest("SoftLayer_Billing_Currency_ExchangeRate", "getAllCurrencyExchangeRates", params, &r.Options, &resp) + return +} + +// no documentation yet +func (r Billing_Currency_ExchangeRate) GetCurrencies() (resp []datatypes.Billing_Currency, err error) { + err = r.Session.DoRequest("SoftLayer_Billing_Currency_ExchangeRate", "getCurrencies", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Billing_Currency_ExchangeRate) GetExchangeRate(to *string, from *string, effectiveDate *datatypes.Time) (resp datatypes.Billing_Currency_ExchangeRate, err error) { + params := []interface{}{ + to, + from, + effectiveDate, + } + err = r.Session.DoRequest("SoftLayer_Billing_Currency_ExchangeRate", "getExchangeRate", params, &r.Options, &resp) + return +} + +// Retrieve +func (r Billing_Currency_ExchangeRate) GetFundingCurrency() (resp datatypes.Billing_Currency, err error) { + err = r.Session.DoRequest("SoftLayer_Billing_Currency_ExchangeRate", "getFundingCurrency", nil, &r.Options, &resp) + return +} + +// Retrieve +func (r Billing_Currency_ExchangeRate) GetLocalCurrency() (resp datatypes.Billing_Currency, err error) { + err = r.Session.DoRequest("SoftLayer_Billing_Currency_ExchangeRate", "getLocalCurrency", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Billing_Currency_ExchangeRate) GetObject() (resp datatypes.Billing_Currency_ExchangeRate, err error) { + err = r.Session.DoRequest("SoftLayer_Billing_Currency_ExchangeRate", "getObject", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Billing_Currency_ExchangeRate) GetPrice(price *datatypes.Float64, formatOptions *datatypes.Container_Billing_Currency_Format) (resp string, err error) { + params := []interface{}{ + price, + formatOptions, + } + err = r.Session.DoRequest("SoftLayer_Billing_Currency_ExchangeRate", "getPrice", params, &r.Options, &resp) + return +} + +// Every SoftLayer customer account has billing specific information which is kept in the SoftLayer_Billing_Info data type. This information is used by the SoftLayer accounting group when sending invoices and making billing inquiries. +type Billing_Info struct { + Session *session.Session + Options sl.Options +} + +// GetBillingInfoService returns an instance of the Billing_Info SoftLayer service +func GetBillingInfoService(sess *session.Session) Billing_Info { + return Billing_Info{Session: sess} +} + +func (r Billing_Info) Id(id int) Billing_Info { + r.Options.Id = &id + return r +} + +func (r Billing_Info) Mask(mask string) Billing_Info { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r Billing_Info) Filter(filter string) Billing_Info { + r.Options.Filter = filter + return r +} + +func (r Billing_Info) Limit(limit int) Billing_Info { + r.Options.Limit = &limit + return r +} + +func (r Billing_Info) Offset(offset int) Billing_Info { + r.Options.Offset = &offset + return r +} + +// Retrieve The SoftLayer customer account associated with this billing information. +func (r Billing_Info) GetAccount() (resp datatypes.Account, err error) { + err = r.Session.DoRequest("SoftLayer_Billing_Info", "getAccount", nil, &r.Options, &resp) + return +} + +// Retrieve +func (r Billing_Info) GetAchInformation() (resp []datatypes.Billing_Info_Ach, err error) { + err = r.Session.DoRequest("SoftLayer_Billing_Info", "getAchInformation", nil, &r.Options, &resp) + return +} + +// Retrieve Currency to be used by this customer account. +func (r Billing_Info) GetCurrency() (resp datatypes.Billing_Currency, err error) { + err = r.Session.DoRequest("SoftLayer_Billing_Info", "getCurrency", nil, &r.Options, &resp) + return +} + +// Retrieve Information related to an account's current and previous billing cycles. +func (r Billing_Info) GetCurrentBillingCycle() (resp datatypes.Billing_Info_Cycle, err error) { + err = r.Session.DoRequest("SoftLayer_Billing_Info", "getCurrentBillingCycle", nil, &r.Options, &resp) + return +} + +// Retrieve The date on which an account was last billed. +func (r Billing_Info) GetLastBillDate() (resp datatypes.Time, err error) { + err = r.Session.DoRequest("SoftLayer_Billing_Info", "getLastBillDate", nil, &r.Options, &resp) + return +} + +// Retrieve The date on which an account will be billed next. +func (r Billing_Info) GetNextBillDate() (resp datatypes.Time, err error) { + err = r.Session.DoRequest("SoftLayer_Billing_Info", "getNextBillDate", nil, &r.Options, &resp) + return +} + +// getObject retrieves the SoftLayer_Billing_Info object whose data corresponds to the account to which your portal user is tied. +func (r Billing_Info) GetObject() (resp datatypes.Billing_Info, err error) { + err = r.Session.DoRequest("SoftLayer_Billing_Info", "getObject", nil, &r.Options, &resp) + return +} + +// The SoftLayer_Billing_Invoice data type contains general information relating to an individual invoice applied to a SoftLayer customer account. Personal information in this type such as names, addresses, and phone numbers are taken from the account's contact information at the time the invoice is generated. +type Billing_Invoice struct { + Session *session.Session + Options sl.Options +} + +// GetBillingInvoiceService returns an instance of the Billing_Invoice SoftLayer service +func GetBillingInvoiceService(sess *session.Session) Billing_Invoice { + return Billing_Invoice{Session: sess} +} + +func (r Billing_Invoice) Id(id int) Billing_Invoice { + r.Options.Id = &id + return r +} + +func (r Billing_Invoice) Mask(mask string) Billing_Invoice { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r Billing_Invoice) Filter(filter string) Billing_Invoice { + r.Options.Filter = filter + return r +} + +func (r Billing_Invoice) Limit(limit int) Billing_Invoice { + r.Options.Limit = &limit + return r +} + +func (r Billing_Invoice) Offset(offset int) Billing_Invoice { + r.Options.Offset = &offset + return r +} + +// Create a transaction to email PDF and/or Excel invoice links to the requesting user's email address. You must have a PDF reader installed in order to view these files. +func (r Billing_Invoice) EmailInvoices(options *datatypes.Container_Billing_Invoice_Email) (err error) { + var resp datatypes.Void + params := []interface{}{ + options, + } + err = r.Session.DoRequest("SoftLayer_Billing_Invoice", "emailInvoices", params, &r.Options, &resp) + return +} + +// Retrieve The account that an invoice belongs to. +func (r Billing_Invoice) GetAccount() (resp datatypes.Account, err error) { + err = r.Session.DoRequest("SoftLayer_Billing_Invoice", "getAccount", nil, &r.Options, &resp) + return +} + +// Retrieve This is the amount of this invoice. +func (r Billing_Invoice) GetAmount() (resp datatypes.Float64, err error) { + err = r.Session.DoRequest("SoftLayer_Billing_Invoice", "getAmount", nil, &r.Options, &resp) + return +} + +// Retrieve +func (r Billing_Invoice) GetBrandAtInvoiceCreation() (resp datatypes.Brand, err error) { + err = r.Session.DoRequest("SoftLayer_Billing_Invoice", "getBrandAtInvoiceCreation", nil, &r.Options, &resp) + return +} + +// Retrieve A flag that will reflect whether the detailed version of the pdf has been generated. +func (r Billing_Invoice) GetDetailedPdfGeneratedFlag() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Billing_Invoice", "getDetailedPdfGeneratedFlag", nil, &r.Options, &resp) + return +} + +// Retrieve a Microsoft Excel spreadsheet of a SoftLayer invoice. You must have a Microsoft Excel reader installed in order to view these invoice files. +func (r Billing_Invoice) GetExcel() (resp []byte, err error) { + err = r.Session.DoRequest("SoftLayer_Billing_Invoice", "getExcel", nil, &r.Options, &resp) + return +} + +// Retrieve A list of top-level invoice items that are on the currently pending invoice. +func (r Billing_Invoice) GetInvoiceTopLevelItems() (resp []datatypes.Billing_Invoice_Item, err error) { + err = r.Session.DoRequest("SoftLayer_Billing_Invoice", "getInvoiceTopLevelItems", nil, &r.Options, &resp) + return +} + +// Retrieve The total amount of this invoice. +func (r Billing_Invoice) GetInvoiceTotalAmount() (resp datatypes.Float64, err error) { + err = r.Session.DoRequest("SoftLayer_Billing_Invoice", "getInvoiceTotalAmount", nil, &r.Options, &resp) + return +} + +// Retrieve The total one-time charges for this invoice. This is the sum of one-time charges + setup fees + labor fees. This does not include taxes. +func (r Billing_Invoice) GetInvoiceTotalOneTimeAmount() (resp datatypes.Float64, err error) { + err = r.Session.DoRequest("SoftLayer_Billing_Invoice", "getInvoiceTotalOneTimeAmount", nil, &r.Options, &resp) + return +} + +// Retrieve A sum of all the taxes related to one time charges for this invoice. +func (r Billing_Invoice) GetInvoiceTotalOneTimeTaxAmount() (resp datatypes.Float64, err error) { + err = r.Session.DoRequest("SoftLayer_Billing_Invoice", "getInvoiceTotalOneTimeTaxAmount", nil, &r.Options, &resp) + return +} + +// Retrieve The total amount of this invoice. This does not include taxes. +func (r Billing_Invoice) GetInvoiceTotalPreTaxAmount() (resp datatypes.Float64, err error) { + err = r.Session.DoRequest("SoftLayer_Billing_Invoice", "getInvoiceTotalPreTaxAmount", nil, &r.Options, &resp) + return +} + +// Retrieve The total Recurring amount of this invoice. This amount does not include taxes or one time charges. +func (r Billing_Invoice) GetInvoiceTotalRecurringAmount() (resp datatypes.Float64, err error) { + err = r.Session.DoRequest("SoftLayer_Billing_Invoice", "getInvoiceTotalRecurringAmount", nil, &r.Options, &resp) + return +} + +// Retrieve The total amount of the recurring taxes on this invoice. +func (r Billing_Invoice) GetInvoiceTotalRecurringTaxAmount() (resp datatypes.Float64, err error) { + err = r.Session.DoRequest("SoftLayer_Billing_Invoice", "getInvoiceTotalRecurringTaxAmount", nil, &r.Options, &resp) + return +} + +// Retrieve The items that belong to this invoice. +func (r Billing_Invoice) GetItems() (resp []datatypes.Billing_Invoice_Item, err error) { + err = r.Session.DoRequest("SoftLayer_Billing_Invoice", "getItems", nil, &r.Options, &resp) + return +} + +// getObject retrieves the SoftLayer_Billing_Invoice object whose ID number corresponds to the ID number of the init parameter passed to the SoftLayer_Billing_Invoice service. You can only retrieve invoices that are assigned to your portal user's account. +func (r Billing_Invoice) GetObject() (resp datatypes.Billing_Invoice, err error) { + err = r.Session.DoRequest("SoftLayer_Billing_Invoice", "getObject", nil, &r.Options, &resp) + return +} + +// Retrieve This is the total payment made on this invoice. +func (r Billing_Invoice) GetPayment() (resp datatypes.Float64, err error) { + err = r.Session.DoRequest("SoftLayer_Billing_Invoice", "getPayment", nil, &r.Options, &resp) + return +} + +// Retrieve The payments for the invoice. +func (r Billing_Invoice) GetPayments() (resp []datatypes.Billing_Invoice_Receivable_Payment, err error) { + err = r.Session.DoRequest("SoftLayer_Billing_Invoice", "getPayments", nil, &r.Options, &resp) + return +} + +// Retrieve a PDF record of a SoftLayer invoice. SoftLayer keeps PDF records of all closed invoices for customer retrieval from the portal and API. You must have a PDF reader installed in order to view these invoice files. +func (r Billing_Invoice) GetPdf() (resp []byte, err error) { + err = r.Session.DoRequest("SoftLayer_Billing_Invoice", "getPdf", nil, &r.Options, &resp) + return +} + +// Retrieve a PDF record of a SoftLayer detailed invoice summary. SoftLayer keeps PDF records of all closed invoices for customer retrieval from the portal and API. You must have a PDF reader installed in order to view these files. +func (r Billing_Invoice) GetPdfDetailed() (resp []byte, err error) { + err = r.Session.DoRequest("SoftLayer_Billing_Invoice", "getPdfDetailed", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Billing_Invoice) GetPdfDetailedFilename() (resp string, err error) { + err = r.Session.DoRequest("SoftLayer_Billing_Invoice", "getPdfDetailedFilename", nil, &r.Options, &resp) + return +} + +// Retrieve the size of a PDF record of a SoftLayer invoice. SoftLayer keeps PDF records of all closed invoices for customer retrieval from the portal and API. +func (r Billing_Invoice) GetPdfFileSize() (resp int, err error) { + err = r.Session.DoRequest("SoftLayer_Billing_Invoice", "getPdfFileSize", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Billing_Invoice) GetPdfFilename() (resp string, err error) { + err = r.Session.DoRequest("SoftLayer_Billing_Invoice", "getPdfFilename", nil, &r.Options, &resp) + return +} + +// Retrieve a Microsoft Excel record of a SoftLayer invoice. SoftLayer generates Microsoft Excel records of all closed invoices for customer retrieval from the portal and API. You must have a Microsoft Excel reader installed in order to view these invoice files. +func (r Billing_Invoice) GetPreliminaryExcel() (resp []byte, err error) { + err = r.Session.DoRequest("SoftLayer_Billing_Invoice", "getPreliminaryExcel", nil, &r.Options, &resp) + return +} + +// Retrieve a PDF record of a SoftLayer invoice. SoftLayer keeps PDF records of all closed invoices for customer retrieval from the portal and API. You must have a PDF reader installed in order to view these invoice files. +func (r Billing_Invoice) GetPreliminaryPdf() (resp []byte, err error) { + err = r.Session.DoRequest("SoftLayer_Billing_Invoice", "getPreliminaryPdf", nil, &r.Options, &resp) + return +} + +// Retrieve a PDF record of the detailed version of a SoftLayer invoice. SoftLayer keeps PDF records of all closed invoices for customer retrieval from the portal and API. +func (r Billing_Invoice) GetPreliminaryPdfDetailed() (resp []byte, err error) { + err = r.Session.DoRequest("SoftLayer_Billing_Invoice", "getPreliminaryPdfDetailed", nil, &r.Options, &resp) + return +} + +// Retrieve This is the seller's tax registration. +func (r Billing_Invoice) GetSellerRegistration() (resp string, err error) { + err = r.Session.DoRequest("SoftLayer_Billing_Invoice", "getSellerRegistration", nil, &r.Options, &resp) + return +} + +// Retrieve This is the tax information that applies to tax auditing. This is the official tax record for this invoice. +func (r Billing_Invoice) GetTaxInfo() (resp datatypes.Billing_Invoice_Tax_Info, err error) { + err = r.Session.DoRequest("SoftLayer_Billing_Invoice", "getTaxInfo", nil, &r.Options, &resp) + return +} + +// Retrieve This is the set of tax information for any tax calculation for this invoice. Note that not all of these are necessarily official, so use the taxInfo key to get the final information. +func (r Billing_Invoice) GetTaxInfoHistory() (resp []datatypes.Billing_Invoice_Tax_Info, err error) { + err = r.Session.DoRequest("SoftLayer_Billing_Invoice", "getTaxInfoHistory", nil, &r.Options, &resp) + return +} + +// Retrieve This is a message explaining the tax treatment for this invoice. +func (r Billing_Invoice) GetTaxMessage() (resp string, err error) { + err = r.Session.DoRequest("SoftLayer_Billing_Invoice", "getTaxMessage", nil, &r.Options, &resp) + return +} + +// Retrieve This is the strategy used to calculate tax on this invoice. +func (r Billing_Invoice) GetTaxType() (resp datatypes.Billing_Invoice_Tax_Type, err error) { + err = r.Session.DoRequest("SoftLayer_Billing_Invoice", "getTaxType", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Billing_Invoice) GetXlsFilename() (resp string, err error) { + err = r.Session.DoRequest("SoftLayer_Billing_Invoice", "getXlsFilename", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Billing_Invoice) GetZeroFeeItemCounts() (resp []datatypes.Container_Product_Item_Category_ZeroFee_Count, err error) { + err = r.Session.DoRequest("SoftLayer_Billing_Invoice", "getZeroFeeItemCounts", nil, &r.Options, &resp) + return +} + +// Each billing invoice item makes up a record within an invoice. This provides you with a detailed record of everything related to an invoice item. When you are billed, our system takes active billing items and creates an invoice. These invoice items are a copy of your active billing items, and make up the contents of your invoice. +type Billing_Invoice_Item struct { + Session *session.Session + Options sl.Options +} + +// GetBillingInvoiceItemService returns an instance of the Billing_Invoice_Item SoftLayer service +func GetBillingInvoiceItemService(sess *session.Session) Billing_Invoice_Item { + return Billing_Invoice_Item{Session: sess} +} + +func (r Billing_Invoice_Item) Id(id int) Billing_Invoice_Item { + r.Options.Id = &id + return r +} + +func (r Billing_Invoice_Item) Mask(mask string) Billing_Invoice_Item { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r Billing_Invoice_Item) Filter(filter string) Billing_Invoice_Item { + r.Options.Filter = filter + return r +} + +func (r Billing_Invoice_Item) Limit(limit int) Billing_Invoice_Item { + r.Options.Limit = &limit + return r +} + +func (r Billing_Invoice_Item) Offset(offset int) Billing_Invoice_Item { + r.Options.Offset = &offset + return r +} + +// Retrieve An Invoice Item's associated child invoice items. Only parent invoice items have associated children. For instance, a server invoice item may have associated children. +func (r Billing_Invoice_Item) GetAssociatedChildren() (resp []datatypes.Billing_Invoice_Item, err error) { + err = r.Session.DoRequest("SoftLayer_Billing_Invoice_Item", "getAssociatedChildren", nil, &r.Options, &resp) + return +} + +// Retrieve An Invoice Item's associated invoice item. If this is populated, it means this is an orphaned invoice item, but logically belongs to the associated invoice item. +func (r Billing_Invoice_Item) GetAssociatedInvoiceItem() (resp datatypes.Billing_Invoice_Item, err error) { + err = r.Session.DoRequest("SoftLayer_Billing_Invoice_Item", "getAssociatedInvoiceItem", nil, &r.Options, &resp) + return +} + +// Retrieve An Invoice Item's billing item, from which this item was generated. +func (r Billing_Invoice_Item) GetBillingItem() (resp datatypes.Billing_Item, err error) { + err = r.Session.DoRequest("SoftLayer_Billing_Invoice_Item", "getBillingItem", nil, &r.Options, &resp) + return +} + +// Retrieve This invoice item's "item category". +func (r Billing_Invoice_Item) GetCategory() (resp datatypes.Product_Item_Category, err error) { + err = r.Session.DoRequest("SoftLayer_Billing_Invoice_Item", "getCategory", nil, &r.Options, &resp) + return +} + +// Retrieve An Invoice Item's child invoice items. Only parent invoice items have children. For instance, a server invoice item will have children. +func (r Billing_Invoice_Item) GetChildren() (resp []datatypes.Billing_Invoice_Item, err error) { + err = r.Session.DoRequest("SoftLayer_Billing_Invoice_Item", "getChildren", nil, &r.Options, &resp) + return +} + +// Retrieve An Invoice Item's associated child invoice items, excluding some items with a $0.00 recurring fee. Only parent invoice items have associated children. For instance, a server invoice item may have associated children. +func (r Billing_Invoice_Item) GetFilteredAssociatedChildren() (resp []datatypes.Billing_Invoice_Item, err error) { + err = r.Session.DoRequest("SoftLayer_Billing_Invoice_Item", "getFilteredAssociatedChildren", nil, &r.Options, &resp) + return +} + +// Retrieve Indicating whether this invoice item is billed on an hourly basis. +func (r Billing_Invoice_Item) GetHourlyFlag() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Billing_Invoice_Item", "getHourlyFlag", nil, &r.Options, &resp) + return +} + +// Retrieve The invoice to which this item belongs. +func (r Billing_Invoice_Item) GetInvoice() (resp datatypes.Billing_Invoice, err error) { + err = r.Session.DoRequest("SoftLayer_Billing_Invoice_Item", "getInvoice", nil, &r.Options, &resp) + return +} + +// Retrieve An invoice item's location, if one exists.' +func (r Billing_Invoice_Item) GetLocation() (resp datatypes.Location, err error) { + err = r.Session.DoRequest("SoftLayer_Billing_Invoice_Item", "getLocation", nil, &r.Options, &resp) + return +} + +// Retrieve An Invoice Item's associated child invoice items, excluding ALL items with a $0.00 recurring fee. Only parent invoice items have associated children. For instance, a server invoice item may have associated children. +func (r Billing_Invoice_Item) GetNonZeroAssociatedChildren() (resp []datatypes.Billing_Invoice_Item, err error) { + err = r.Session.DoRequest("SoftLayer_Billing_Invoice_Item", "getNonZeroAssociatedChildren", nil, &r.Options, &resp) + return +} + +// getObject retrieves the SoftLayer_Billing_Invoice_Item object whose ID number corresponds to the ID number of the init parameter passed to the SoftLayer_Billing_Invoice_Item service. You can only retrieve the items tied to the account that your portal user is assigned to. +func (r Billing_Invoice_Item) GetObject() (resp datatypes.Billing_Invoice_Item, err error) { + err = r.Session.DoRequest("SoftLayer_Billing_Invoice_Item", "getObject", nil, &r.Options, &resp) + return +} + +// Retrieve Every item tied to a server should have a parent invoice item which is the server line item. This is how we associate items to a server. +func (r Billing_Invoice_Item) GetParent() (resp datatypes.Billing_Invoice_Item, err error) { + err = r.Session.DoRequest("SoftLayer_Billing_Invoice_Item", "getParent", nil, &r.Options, &resp) + return +} + +// Retrieve The entry in the product catalog that a invoice item is based upon. +func (r Billing_Invoice_Item) GetProduct() (resp datatypes.Product_Item, err error) { + err = r.Session.DoRequest("SoftLayer_Billing_Invoice_Item", "getProduct", nil, &r.Options, &resp) + return +} + +// Retrieve A string representing the name of parent level product group of an invoice item. +func (r Billing_Invoice_Item) GetTopLevelProductGroupName() (resp string, err error) { + err = r.Session.DoRequest("SoftLayer_Billing_Invoice_Item", "getTopLevelProductGroupName", nil, &r.Options, &resp) + return +} + +// Retrieve An invoice Item's total, including any child invoice items if they exist. +func (r Billing_Invoice_Item) GetTotalOneTimeAmount() (resp datatypes.Float64, err error) { + err = r.Session.DoRequest("SoftLayer_Billing_Invoice_Item", "getTotalOneTimeAmount", nil, &r.Options, &resp) + return +} + +// Retrieve An invoice Item's total, including any child invoice items if they exist. +func (r Billing_Invoice_Item) GetTotalOneTimeTaxAmount() (resp datatypes.Float64, err error) { + err = r.Session.DoRequest("SoftLayer_Billing_Invoice_Item", "getTotalOneTimeTaxAmount", nil, &r.Options, &resp) + return +} + +// Retrieve An invoice Item's total, including any child invoice items if they exist. +func (r Billing_Invoice_Item) GetTotalRecurringAmount() (resp datatypes.Float64, err error) { + err = r.Session.DoRequest("SoftLayer_Billing_Invoice_Item", "getTotalRecurringAmount", nil, &r.Options, &resp) + return +} + +// Retrieve A Billing Item's total, including any child billing items if they exist.' +func (r Billing_Invoice_Item) GetTotalRecurringTaxAmount() (resp datatypes.Float64, err error) { + err = r.Session.DoRequest("SoftLayer_Billing_Invoice_Item", "getTotalRecurringTaxAmount", nil, &r.Options, &resp) + return +} + +// no documentation yet +type Billing_Invoice_Next struct { + Session *session.Session + Options sl.Options +} + +// GetBillingInvoiceNextService returns an instance of the Billing_Invoice_Next SoftLayer service +func GetBillingInvoiceNextService(sess *session.Session) Billing_Invoice_Next { + return Billing_Invoice_Next{Session: sess} +} + +func (r Billing_Invoice_Next) Id(id int) Billing_Invoice_Next { + r.Options.Id = &id + return r +} + +func (r Billing_Invoice_Next) Mask(mask string) Billing_Invoice_Next { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r Billing_Invoice_Next) Filter(filter string) Billing_Invoice_Next { + r.Options.Filter = filter + return r +} + +func (r Billing_Invoice_Next) Limit(limit int) Billing_Invoice_Next { + r.Options.Limit = &limit + return r +} + +func (r Billing_Invoice_Next) Offset(offset int) Billing_Invoice_Next { + r.Options.Offset = &offset + return r +} + +// Return an account's next invoice in a Microsoft excel format. +func (r Billing_Invoice_Next) GetExcel(documentCreateDate *datatypes.Time) (resp []byte, err error) { + params := []interface{}{ + documentCreateDate, + } + err = r.Session.DoRequest("SoftLayer_Billing_Invoice_Next", "getExcel", params, &r.Options, &resp) + return +} + +// Return an account's next invoice in PDF format. +func (r Billing_Invoice_Next) GetPdf(documentCreateDate *datatypes.Time) (resp []byte, err error) { + params := []interface{}{ + documentCreateDate, + } + err = r.Session.DoRequest("SoftLayer_Billing_Invoice_Next", "getPdf", params, &r.Options, &resp) + return +} + +// Return an account's next invoice detailed portion in PDF format. +func (r Billing_Invoice_Next) GetPdfDetailed(documentCreateDate *datatypes.Time) (resp []byte, err error) { + params := []interface{}{ + documentCreateDate, + } + err = r.Session.DoRequest("SoftLayer_Billing_Invoice_Next", "getPdfDetailed", params, &r.Options, &resp) + return +} + +// The invoice tax status data type models a single status or state that an invoice can reflect in regard to an integration with a third-party tax calculation service. +type Billing_Invoice_Tax_Status struct { + Session *session.Session + Options sl.Options +} + +// GetBillingInvoiceTaxStatusService returns an instance of the Billing_Invoice_Tax_Status SoftLayer service +func GetBillingInvoiceTaxStatusService(sess *session.Session) Billing_Invoice_Tax_Status { + return Billing_Invoice_Tax_Status{Session: sess} +} + +func (r Billing_Invoice_Tax_Status) Id(id int) Billing_Invoice_Tax_Status { + r.Options.Id = &id + return r +} + +func (r Billing_Invoice_Tax_Status) Mask(mask string) Billing_Invoice_Tax_Status { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r Billing_Invoice_Tax_Status) Filter(filter string) Billing_Invoice_Tax_Status { + r.Options.Filter = filter + return r +} + +func (r Billing_Invoice_Tax_Status) Limit(limit int) Billing_Invoice_Tax_Status { + r.Options.Limit = &limit + return r +} + +func (r Billing_Invoice_Tax_Status) Offset(offset int) Billing_Invoice_Tax_Status { + r.Options.Offset = &offset + return r +} + +// no documentation yet +func (r Billing_Invoice_Tax_Status) GetAllObjects() (resp []datatypes.Billing_Invoice_Tax_Status, err error) { + err = r.Session.DoRequest("SoftLayer_Billing_Invoice_Tax_Status", "getAllObjects", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Billing_Invoice_Tax_Status) GetObject() (resp datatypes.Billing_Invoice_Tax_Status, err error) { + err = r.Session.DoRequest("SoftLayer_Billing_Invoice_Tax_Status", "getObject", nil, &r.Options, &resp) + return +} + +// The invoice tax type data type models a single strategy for handling tax calculations. +type Billing_Invoice_Tax_Type struct { + Session *session.Session + Options sl.Options +} + +// GetBillingInvoiceTaxTypeService returns an instance of the Billing_Invoice_Tax_Type SoftLayer service +func GetBillingInvoiceTaxTypeService(sess *session.Session) Billing_Invoice_Tax_Type { + return Billing_Invoice_Tax_Type{Session: sess} +} + +func (r Billing_Invoice_Tax_Type) Id(id int) Billing_Invoice_Tax_Type { + r.Options.Id = &id + return r +} + +func (r Billing_Invoice_Tax_Type) Mask(mask string) Billing_Invoice_Tax_Type { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r Billing_Invoice_Tax_Type) Filter(filter string) Billing_Invoice_Tax_Type { + r.Options.Filter = filter + return r +} + +func (r Billing_Invoice_Tax_Type) Limit(limit int) Billing_Invoice_Tax_Type { + r.Options.Limit = &limit + return r +} + +func (r Billing_Invoice_Tax_Type) Offset(offset int) Billing_Invoice_Tax_Type { + r.Options.Offset = &offset + return r +} + +// no documentation yet +func (r Billing_Invoice_Tax_Type) GetAllObjects() (resp []datatypes.Billing_Invoice_Tax_Type, err error) { + err = r.Session.DoRequest("SoftLayer_Billing_Invoice_Tax_Type", "getAllObjects", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Billing_Invoice_Tax_Type) GetObject() (resp datatypes.Billing_Invoice_Tax_Type, err error) { + err = r.Session.DoRequest("SoftLayer_Billing_Invoice_Tax_Type", "getObject", nil, &r.Options, &resp) + return +} + +// Every individual item that a SoftLayer customer is billed for is recorded in the SoftLayer_Billing_Item data type. Billing items range from server chassis to hard drives to control panels, bandwidth quota upgrades and port upgrade charges. Softlayer [[SoftLayer_Billing_Invoice|invoices]] are generated from the cost of a customer's billing items. Billing items are copied from the product catalog as they're ordered by customers to create a reference between an account and the billable items they own. +// +// Billing items exist in a tree relationship. Items are associated with each other by parent/child relationships. Component items such as CPU's, RAM, and software each have a parent billing item for the server chassis they're associated with. Billing Items with a null parent item do not have an associated parent item. +type Billing_Item struct { + Session *session.Session + Options sl.Options +} + +// GetBillingItemService returns an instance of the Billing_Item SoftLayer service +func GetBillingItemService(sess *session.Session) Billing_Item { + return Billing_Item{Session: sess} +} + +func (r Billing_Item) Id(id int) Billing_Item { + r.Options.Id = &id + return r +} + +func (r Billing_Item) Mask(mask string) Billing_Item { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r Billing_Item) Filter(filter string) Billing_Item { + r.Options.Filter = filter + return r +} + +func (r Billing_Item) Limit(limit int) Billing_Item { + r.Options.Limit = &limit + return r +} + +func (r Billing_Item) Offset(offset int) Billing_Item { + r.Options.Offset = &offset + return r +} + +// Cancel the resource or service for a billing Item. By default the billing item will be cancelled immediately and reclaim of the resource will begin shortly. Setting the "cancelImmediately" property to false will delay the cancellation until the next bill date. +// +// +// * The reason parameter could be from the list below: +// * "No longer needed" +// * "Business closing down" +// * "Server / Upgrade Costs" +// * "Migrating to larger server" +// * "Migrating to smaller server" +// * "Migrating to a different SoftLayer datacenter" +// * "Network performance / latency" +// * "Support response / timing" +// * "Sales process / upgrades" +// * "Moving to competitor" +func (r Billing_Item) CancelItem(cancelImmediately *bool, cancelAssociatedBillingItems *bool, reason *string, customerNote *string) (resp bool, err error) { + params := []interface{}{ + cancelImmediately, + cancelAssociatedBillingItems, + reason, + customerNote, + } + err = r.Session.DoRequest("SoftLayer_Billing_Item", "cancelItem", params, &r.Options, &resp) + return +} + +// Cancel the resource or service (excluding bare metal servers) for a billing Item. The billing item will be cancelled immediately and reclaim of the resource will begin shortly. +func (r Billing_Item) CancelService() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Billing_Item", "cancelService", nil, &r.Options, &resp) + return +} + +// Cancel the resource or service for a billing Item +func (r Billing_Item) CancelServiceOnAnniversaryDate() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Billing_Item", "cancelServiceOnAnniversaryDate", nil, &r.Options, &resp) + return +} + +// Retrieve The account that a billing item belongs to. +func (r Billing_Item) GetAccount() (resp datatypes.Account, err error) { + err = r.Session.DoRequest("SoftLayer_Billing_Item", "getAccount", nil, &r.Options, &resp) + return +} + +// Retrieve +func (r Billing_Item) GetActiveAgreement() (resp datatypes.Account_Agreement, err error) { + err = r.Session.DoRequest("SoftLayer_Billing_Item", "getActiveAgreement", nil, &r.Options, &resp) + return +} + +// Retrieve A flag indicating that the billing item is under an active agreement. +func (r Billing_Item) GetActiveAgreementFlag() (resp datatypes.Account_Agreement, err error) { + err = r.Session.DoRequest("SoftLayer_Billing_Item", "getActiveAgreementFlag", nil, &r.Options, &resp) + return +} + +// Retrieve A billing item's active associated child billing items. This includes "floating" items that are not necessarily child items of this billing item. +func (r Billing_Item) GetActiveAssociatedChildren() (resp []datatypes.Billing_Item, err error) { + err = r.Session.DoRequest("SoftLayer_Billing_Item", "getActiveAssociatedChildren", nil, &r.Options, &resp) + return +} + +// Retrieve +func (r Billing_Item) GetActiveAssociatedGuestDiskBillingItems() (resp []datatypes.Billing_Item, err error) { + err = r.Session.DoRequest("SoftLayer_Billing_Item", "getActiveAssociatedGuestDiskBillingItems", nil, &r.Options, &resp) + return +} + +// Retrieve A Billing Item's active bundled billing items. +func (r Billing_Item) GetActiveBundledItems() (resp []datatypes.Billing_Item, err error) { + err = r.Session.DoRequest("SoftLayer_Billing_Item", "getActiveBundledItems", nil, &r.Options, &resp) + return +} + +// Retrieve A service cancellation request item that corresponds to the billing item. +func (r Billing_Item) GetActiveCancellationItem() (resp datatypes.Billing_Item_Cancellation_Request_Item, err error) { + err = r.Session.DoRequest("SoftLayer_Billing_Item", "getActiveCancellationItem", nil, &r.Options, &resp) + return +} + +// Retrieve A Billing Item's active child billing items. +func (r Billing_Item) GetActiveChildren() (resp []datatypes.Billing_Item, err error) { + err = r.Session.DoRequest("SoftLayer_Billing_Item", "getActiveChildren", nil, &r.Options, &resp) + return +} + +// Retrieve +func (r Billing_Item) GetActiveFlag() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Billing_Item", "getActiveFlag", nil, &r.Options, &resp) + return +} + +// Retrieve +func (r Billing_Item) GetActiveSparePoolAssociatedGuestDiskBillingItems() (resp []datatypes.Billing_Item, err error) { + err = r.Session.DoRequest("SoftLayer_Billing_Item", "getActiveSparePoolAssociatedGuestDiskBillingItems", nil, &r.Options, &resp) + return +} + +// Retrieve A Billing Item's spare pool bundled billing items. +func (r Billing_Item) GetActiveSparePoolBundledItems() (resp []datatypes.Billing_Item, err error) { + err = r.Session.DoRequest("SoftLayer_Billing_Item", "getActiveSparePoolBundledItems", nil, &r.Options, &resp) + return +} + +// Retrieve A billing item's associated parent. This is to be used for billing items that are "floating", and therefore are not child items of any parent billing item. If it is desired to associate an item to another, populate this with the SoftLayer_Billing_Item ID of that associated parent item. +func (r Billing_Item) GetAssociatedBillingItem() (resp datatypes.Billing_Item, err error) { + err = r.Session.DoRequest("SoftLayer_Billing_Item", "getAssociatedBillingItem", nil, &r.Options, &resp) + return +} + +// Retrieve A history of billing items which a billing item has been associated with. +func (r Billing_Item) GetAssociatedBillingItemHistory() (resp []datatypes.Billing_Item_Association_History, err error) { + err = r.Session.DoRequest("SoftLayer_Billing_Item", "getAssociatedBillingItemHistory", nil, &r.Options, &resp) + return +} + +// Retrieve A Billing Item's associated child billing items. This includes "floating" items that are not necessarily child billing items of this billing item. +func (r Billing_Item) GetAssociatedChildren() (resp []datatypes.Billing_Item, err error) { + err = r.Session.DoRequest("SoftLayer_Billing_Item", "getAssociatedChildren", nil, &r.Options, &resp) + return +} + +// Retrieve A billing item's associated parent billing item. This object will be the same as the parent billing item if parentId is set. +func (r Billing_Item) GetAssociatedParent() (resp []datatypes.Billing_Item, err error) { + err = r.Session.DoRequest("SoftLayer_Billing_Item", "getAssociatedParent", nil, &r.Options, &resp) + return +} + +// Retrieve +func (r Billing_Item) GetAvailableMatchingVlans() (resp []datatypes.Network_Vlan, err error) { + err = r.Session.DoRequest("SoftLayer_Billing_Item", "getAvailableMatchingVlans", nil, &r.Options, &resp) + return +} + +// Retrieve The bandwidth allocation for a billing item. +func (r Billing_Item) GetBandwidthAllocation() (resp datatypes.Network_Bandwidth_Version1_Allocation, err error) { + err = r.Session.DoRequest("SoftLayer_Billing_Item", "getBandwidthAllocation", nil, &r.Options, &resp) + return +} + +// Retrieve A billing item's recurring child items that have once been billed and are scheduled to be billed in the future. +func (r Billing_Item) GetBillableChildren() (resp []datatypes.Billing_Item, err error) { + err = r.Session.DoRequest("SoftLayer_Billing_Item", "getBillableChildren", nil, &r.Options, &resp) + return +} + +// Retrieve A Billing Item's bundled billing items +func (r Billing_Item) GetBundleItems() (resp []datatypes.Product_Item_Bundles, err error) { + err = r.Session.DoRequest("SoftLayer_Billing_Item", "getBundleItems", nil, &r.Options, &resp) + return +} + +// Retrieve A Billing Item's bundled billing items' +func (r Billing_Item) GetBundledItems() (resp []datatypes.Billing_Item, err error) { + err = r.Session.DoRequest("SoftLayer_Billing_Item", "getBundledItems", nil, &r.Options, &resp) + return +} + +// Retrieve A Billing Item's active child billing items. +func (r Billing_Item) GetCanceledChildren() (resp []datatypes.Billing_Item, err error) { + err = r.Session.DoRequest("SoftLayer_Billing_Item", "getCanceledChildren", nil, &r.Options, &resp) + return +} + +// Retrieve The billing item's cancellation reason. +func (r Billing_Item) GetCancellationReason() (resp datatypes.Billing_Item_Cancellation_Reason, err error) { + err = r.Session.DoRequest("SoftLayer_Billing_Item", "getCancellationReason", nil, &r.Options, &resp) + return +} + +// Retrieve This will return any cancellation requests that are associated with this billing item. +func (r Billing_Item) GetCancellationRequests() (resp []datatypes.Billing_Item_Cancellation_Request, err error) { + err = r.Session.DoRequest("SoftLayer_Billing_Item", "getCancellationRequests", nil, &r.Options, &resp) + return +} + +// Retrieve The item category to which the billing item's item belongs. +func (r Billing_Item) GetCategory() (resp datatypes.Product_Item_Category, err error) { + err = r.Session.DoRequest("SoftLayer_Billing_Item", "getCategory", nil, &r.Options, &resp) + return +} + +// Retrieve A Billing Item's child billing items' +func (r Billing_Item) GetChildren() (resp []datatypes.Billing_Item, err error) { + err = r.Session.DoRequest("SoftLayer_Billing_Item", "getChildren", nil, &r.Options, &resp) + return +} + +// Retrieve A Billing Item's active child billing items. +func (r Billing_Item) GetChildrenWithActiveAgreement() (resp []datatypes.Billing_Item, err error) { + err = r.Session.DoRequest("SoftLayer_Billing_Item", "getChildrenWithActiveAgreement", nil, &r.Options, &resp) + return +} + +// Retrieve For product items which have a downgrade path defined, this will return those product items. +func (r Billing_Item) GetDowngradeItems() (resp []datatypes.Product_Item, err error) { + err = r.Session.DoRequest("SoftLayer_Billing_Item", "getDowngradeItems", nil, &r.Options, &resp) + return +} + +// Retrieve A Billing Item's associated child billing items, excluding some items with a $0.00 recurring fee. +func (r Billing_Item) GetFilteredNextInvoiceChildren() (resp []datatypes.Billing_Item, err error) { + err = r.Session.DoRequest("SoftLayer_Billing_Item", "getFilteredNextInvoiceChildren", nil, &r.Options, &resp) + return +} + +// Retrieve A flag that will reflect whether this billing item is billed on an hourly basis or not. +func (r Billing_Item) GetHourlyFlag() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Billing_Item", "getHourlyFlag", nil, &r.Options, &resp) + return +} + +// Retrieve Invoice items associated with this billing item +func (r Billing_Item) GetInvoiceItem() (resp datatypes.Billing_Invoice_Item, err error) { + err = r.Session.DoRequest("SoftLayer_Billing_Item", "getInvoiceItem", nil, &r.Options, &resp) + return +} + +// Retrieve All invoice items associated with the billing item +func (r Billing_Item) GetInvoiceItems() (resp []datatypes.Billing_Invoice_Item, err error) { + err = r.Session.DoRequest("SoftLayer_Billing_Item", "getInvoiceItems", nil, &r.Options, &resp) + return +} + +// Retrieve The entry in the SoftLayer product catalog that a billing item is based upon. +func (r Billing_Item) GetItem() (resp datatypes.Product_Item, err error) { + err = r.Session.DoRequest("SoftLayer_Billing_Item", "getItem", nil, &r.Options, &resp) + return +} + +// Retrieve The location of the billing item. Some billing items have physical properties such as the server itself. For items such as these, we provide location information. +func (r Billing_Item) GetLocation() (resp datatypes.Location, err error) { + err = r.Session.DoRequest("SoftLayer_Billing_Item", "getLocation", nil, &r.Options, &resp) + return +} + +// Retrieve A Billing Item's child billing items and associated items' +func (r Billing_Item) GetNextInvoiceChildren() (resp []datatypes.Billing_Item, err error) { + err = r.Session.DoRequest("SoftLayer_Billing_Item", "getNextInvoiceChildren", nil, &r.Options, &resp) + return +} + +// Retrieve A Billing Item's total, including any child billing items if they exist.' +func (r Billing_Item) GetNextInvoiceTotalOneTimeAmount() (resp datatypes.Float64, err error) { + err = r.Session.DoRequest("SoftLayer_Billing_Item", "getNextInvoiceTotalOneTimeAmount", nil, &r.Options, &resp) + return +} + +// Retrieve A Billing Item's total, including any child billing items if they exist.' +func (r Billing_Item) GetNextInvoiceTotalOneTimeTaxAmount() (resp datatypes.Float64, err error) { + err = r.Session.DoRequest("SoftLayer_Billing_Item", "getNextInvoiceTotalOneTimeTaxAmount", nil, &r.Options, &resp) + return +} + +// Retrieve A Billing Item's total, including any child billing items and associated billing items if they exist.' +func (r Billing_Item) GetNextInvoiceTotalRecurringAmount() (resp datatypes.Float64, err error) { + err = r.Session.DoRequest("SoftLayer_Billing_Item", "getNextInvoiceTotalRecurringAmount", nil, &r.Options, &resp) + return +} + +// Retrieve This is deprecated and will always be zero. Because tax is calculated in real-time, previewing the next recurring invoice is pre-tax only. +func (r Billing_Item) GetNextInvoiceTotalRecurringTaxAmount() (resp datatypes.Float64, err error) { + err = r.Session.DoRequest("SoftLayer_Billing_Item", "getNextInvoiceTotalRecurringTaxAmount", nil, &r.Options, &resp) + return +} + +// Retrieve A Billing Item's associated child billing items, excluding ALL items with a $0.00 recurring fee. +func (r Billing_Item) GetNonZeroNextInvoiceChildren() (resp []datatypes.Billing_Item, err error) { + err = r.Session.DoRequest("SoftLayer_Billing_Item", "getNonZeroNextInvoiceChildren", nil, &r.Options, &resp) + return +} + +// getObject retrieves the SoftLayer_Billing_Item object whose ID number corresponds to the ID number of the init parameter passed to the SoftLayer_Billing_Item service. You can only retrieve billing items tied to the account that your portal user is assigned to. Billing items are an account's items of billable items. There are "parent" billing items and "child" billing items. The server billing item is generally referred to as a parent billing item. The items tied to a server, such as ram, harddrives, and operating systems are considered "child" billing items. +func (r Billing_Item) GetObject() (resp datatypes.Billing_Item, err error) { + err = r.Session.DoRequest("SoftLayer_Billing_Item", "getObject", nil, &r.Options, &resp) + return +} + +// Retrieve A billing item's original order item. Simply a reference to the original order from which this billing item was created. +func (r Billing_Item) GetOrderItem() (resp datatypes.Billing_Order_Item, err error) { + err = r.Session.DoRequest("SoftLayer_Billing_Item", "getOrderItem", nil, &r.Options, &resp) + return +} + +// Retrieve The original physical location for this billing item--may differ from current. +func (r Billing_Item) GetOriginalLocation() (resp datatypes.Location, err error) { + err = r.Session.DoRequest("SoftLayer_Billing_Item", "getOriginalLocation", nil, &r.Options, &resp) + return +} + +// Retrieve The package under which this billing item was sold. A Package is the general grouping of products as seen on our order forms. +func (r Billing_Item) GetPackage() (resp datatypes.Product_Package, err error) { + err = r.Session.DoRequest("SoftLayer_Billing_Item", "getPackage", nil, &r.Options, &resp) + return +} + +// Retrieve A billing item's parent item. If a billing item has no parent item then this value is null. +func (r Billing_Item) GetParent() (resp datatypes.Billing_Item, err error) { + err = r.Session.DoRequest("SoftLayer_Billing_Item", "getParent", nil, &r.Options, &resp) + return +} + +// Retrieve A billing item's parent item. If a billing item has no parent item then this value is null. +func (r Billing_Item) GetParentVirtualGuestBillingItem() (resp datatypes.Billing_Item_Virtual_Guest, err error) { + err = r.Session.DoRequest("SoftLayer_Billing_Item", "getParentVirtualGuestBillingItem", nil, &r.Options, &resp) + return +} + +// Retrieve This flag indicates whether a billing item is scheduled to be canceled or not. +func (r Billing_Item) GetPendingCancellationFlag() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Billing_Item", "getPendingCancellationFlag", nil, &r.Options, &resp) + return +} + +// Retrieve The new order item that will replace this billing item. +func (r Billing_Item) GetPendingOrderItem() (resp datatypes.Billing_Order_Item, err error) { + err = r.Session.DoRequest("SoftLayer_Billing_Item", "getPendingOrderItem", nil, &r.Options, &resp) + return +} + +// Retrieve Provisioning transaction for this billing item +func (r Billing_Item) GetProvisionTransaction() (resp datatypes.Provisioning_Version1_Transaction, err error) { + err = r.Session.DoRequest("SoftLayer_Billing_Item", "getProvisionTransaction", nil, &r.Options, &resp) + return +} + +// This service returns billing items of a specified category code. This service should be used to retrieve billing items that you wish to cancel. Some billing items can be canceled via [[SoftLayer_Security_Certificate_Request|service cancellation]] service. +// +// In order to find billing items for cancellation, use [[SoftLayer_Product_Item_Category::getValidCancelableServiceItemCategories|product categories]] service to retrieve category codes that are eligible for cancellation. +func (r Billing_Item) GetServiceBillingItemsByCategory(categoryCode *string, includeZeroRecurringFee *bool) (resp []datatypes.Billing_Item, err error) { + params := []interface{}{ + categoryCode, + includeZeroRecurringFee, + } + err = r.Session.DoRequest("SoftLayer_Billing_Item", "getServiceBillingItemsByCategory", params, &r.Options, &resp) + return +} + +// Retrieve A friendly description of software component +func (r Billing_Item) GetSoftwareDescription() (resp datatypes.Software_Description, err error) { + err = r.Session.DoRequest("SoftLayer_Billing_Item", "getSoftwareDescription", nil, &r.Options, &resp) + return +} + +// Retrieve Billing items whose product item has an upgrade path defined in our system will return the next product item in the upgrade path. +func (r Billing_Item) GetUpgradeItem() (resp datatypes.Product_Item, err error) { + err = r.Session.DoRequest("SoftLayer_Billing_Item", "getUpgradeItem", nil, &r.Options, &resp) + return +} + +// Retrieve Billing items whose product item has an upgrade path defined in our system will return all the product items in the upgrade path. +func (r Billing_Item) GetUpgradeItems() (resp []datatypes.Product_Item, err error) { + err = r.Session.DoRequest("SoftLayer_Billing_Item", "getUpgradeItems", nil, &r.Options, &resp) + return +} + +// Remove the association from a billing item. +func (r Billing_Item) RemoveAssociationId() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Billing_Item", "removeAssociationId", nil, &r.Options, &resp) + return +} + +// Set an associated billing item to an orphan billing item. Associations allow you to tie an "orphaned" billing item, any non-server billing item that doesn't have a parent item such as secondary IP subnets or StorageLayer accounts, to a server billing item. You may only set an association for an orphan to a server. You cannot associate a server to an orphan if the either the server or orphan billing items have a cancellation date set. +func (r Billing_Item) SetAssociationId(associatedId *int) (resp bool, err error) { + params := []interface{}{ + associatedId, + } + err = r.Session.DoRequest("SoftLayer_Billing_Item", "setAssociationId", params, &r.Options, &resp) + return +} + +// Void a previously made cancellation for a service +func (r Billing_Item) VoidCancelService() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Billing_Item", "voidCancelService", nil, &r.Options, &resp) + return +} + +// The SoftLayer_Billing_Item_Cancellation_Reason data type contains cancellation reasons. +type Billing_Item_Cancellation_Reason struct { + Session *session.Session + Options sl.Options +} + +// GetBillingItemCancellationReasonService returns an instance of the Billing_Item_Cancellation_Reason SoftLayer service +func GetBillingItemCancellationReasonService(sess *session.Session) Billing_Item_Cancellation_Reason { + return Billing_Item_Cancellation_Reason{Session: sess} +} + +func (r Billing_Item_Cancellation_Reason) Id(id int) Billing_Item_Cancellation_Reason { + r.Options.Id = &id + return r +} + +func (r Billing_Item_Cancellation_Reason) Mask(mask string) Billing_Item_Cancellation_Reason { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r Billing_Item_Cancellation_Reason) Filter(filter string) Billing_Item_Cancellation_Reason { + r.Options.Filter = filter + return r +} + +func (r Billing_Item_Cancellation_Reason) Limit(limit int) Billing_Item_Cancellation_Reason { + r.Options.Limit = &limit + return r +} + +func (r Billing_Item_Cancellation_Reason) Offset(offset int) Billing_Item_Cancellation_Reason { + r.Options.Offset = &offset + return r +} + +// getAllCancellationReasons() retrieves a list of all cancellation reasons that a server/service may be assigned to. +func (r Billing_Item_Cancellation_Reason) GetAllCancellationReasons() (resp []datatypes.Billing_Item_Cancellation_Reason, err error) { + err = r.Session.DoRequest("SoftLayer_Billing_Item_Cancellation_Reason", "getAllCancellationReasons", nil, &r.Options, &resp) + return +} + +// Retrieve An billing cancellation reason category. +func (r Billing_Item_Cancellation_Reason) GetBillingCancellationReasonCategory() (resp datatypes.Billing_Item_Cancellation_Reason_Category, err error) { + err = r.Session.DoRequest("SoftLayer_Billing_Item_Cancellation_Reason", "getBillingCancellationReasonCategory", nil, &r.Options, &resp) + return +} + +// Retrieve The corresponding billing items having the specific cancellation reason. +func (r Billing_Item_Cancellation_Reason) GetBillingItems() (resp []datatypes.Billing_Item, err error) { + err = r.Session.DoRequest("SoftLayer_Billing_Item_Cancellation_Reason", "getBillingItems", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Billing_Item_Cancellation_Reason) GetObject() (resp datatypes.Billing_Item_Cancellation_Reason, err error) { + err = r.Session.DoRequest("SoftLayer_Billing_Item_Cancellation_Reason", "getObject", nil, &r.Options, &resp) + return +} + +// Retrieve +func (r Billing_Item_Cancellation_Reason) GetTranslatedReason() (resp string, err error) { + err = r.Session.DoRequest("SoftLayer_Billing_Item_Cancellation_Reason", "getTranslatedReason", nil, &r.Options, &resp) + return +} + +// The SoftLayer_Billing_Item_Cancellation_Reason_Category data type contains cancellation reason categories. +type Billing_Item_Cancellation_Reason_Category struct { + Session *session.Session + Options sl.Options +} + +// GetBillingItemCancellationReasonCategoryService returns an instance of the Billing_Item_Cancellation_Reason_Category SoftLayer service +func GetBillingItemCancellationReasonCategoryService(sess *session.Session) Billing_Item_Cancellation_Reason_Category { + return Billing_Item_Cancellation_Reason_Category{Session: sess} +} + +func (r Billing_Item_Cancellation_Reason_Category) Id(id int) Billing_Item_Cancellation_Reason_Category { + r.Options.Id = &id + return r +} + +func (r Billing_Item_Cancellation_Reason_Category) Mask(mask string) Billing_Item_Cancellation_Reason_Category { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r Billing_Item_Cancellation_Reason_Category) Filter(filter string) Billing_Item_Cancellation_Reason_Category { + r.Options.Filter = filter + return r +} + +func (r Billing_Item_Cancellation_Reason_Category) Limit(limit int) Billing_Item_Cancellation_Reason_Category { + r.Options.Limit = &limit + return r +} + +func (r Billing_Item_Cancellation_Reason_Category) Offset(offset int) Billing_Item_Cancellation_Reason_Category { + r.Options.Offset = &offset + return r +} + +// getAllCancellationReasonCategories() retrieves a list of all cancellation reason categories +func (r Billing_Item_Cancellation_Reason_Category) GetAllCancellationReasonCategories() (resp []datatypes.Billing_Item_Cancellation_Reason_Category, err error) { + err = r.Session.DoRequest("SoftLayer_Billing_Item_Cancellation_Reason_Category", "getAllCancellationReasonCategories", nil, &r.Options, &resp) + return +} + +// Retrieve The corresponding billing cancellation reasons having the specific billing cancellation reason category. +func (r Billing_Item_Cancellation_Reason_Category) GetBillingCancellationReasons() (resp []datatypes.Billing_Item_Cancellation_Reason, err error) { + err = r.Session.DoRequest("SoftLayer_Billing_Item_Cancellation_Reason_Category", "getBillingCancellationReasons", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Billing_Item_Cancellation_Reason_Category) GetObject() (resp datatypes.Billing_Item_Cancellation_Reason_Category, err error) { + err = r.Session.DoRequest("SoftLayer_Billing_Item_Cancellation_Reason_Category", "getObject", nil, &r.Options, &resp) + return +} + +// SoftLayer_Billing_Item_Cancellation_Request data type is used to cancel service billing items. +type Billing_Item_Cancellation_Request struct { + Session *session.Session + Options sl.Options +} + +// GetBillingItemCancellationRequestService returns an instance of the Billing_Item_Cancellation_Request SoftLayer service +func GetBillingItemCancellationRequestService(sess *session.Session) Billing_Item_Cancellation_Request { + return Billing_Item_Cancellation_Request{Session: sess} +} + +func (r Billing_Item_Cancellation_Request) Id(id int) Billing_Item_Cancellation_Request { + r.Options.Id = &id + return r +} + +func (r Billing_Item_Cancellation_Request) Mask(mask string) Billing_Item_Cancellation_Request { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r Billing_Item_Cancellation_Request) Filter(filter string) Billing_Item_Cancellation_Request { + r.Options.Filter = filter + return r +} + +func (r Billing_Item_Cancellation_Request) Limit(limit int) Billing_Item_Cancellation_Request { + r.Options.Limit = &limit + return r +} + +func (r Billing_Item_Cancellation_Request) Offset(offset int) Billing_Item_Cancellation_Request { + r.Options.Offset = &offset + return r +} + +// This method creates a service cancellation request. +// +// You need to have "Cancel Services" privilege to create a cancellation request. You have to provide at least one SoftLayer_Billing_Item_Cancellation_Request_Item in the "items" property. Make sure billing item's category code belongs to the cancelable product codes. You can retrieve the cancelable product category by the [[SoftLayer_Product_Item_Category::getValidCancelableServiceItemCategories|product category]] service. +func (r Billing_Item_Cancellation_Request) CreateObject(templateObject *datatypes.Billing_Item_Cancellation_Request) (resp datatypes.Billing_Item_Cancellation_Request, err error) { + params := []interface{}{ + templateObject, + } + err = r.Session.DoRequest("SoftLayer_Billing_Item_Cancellation_Request", "createObject", params, &r.Options, &resp) + return +} + +// Retrieve The SoftLayer account that a service cancellation request belongs to. +func (r Billing_Item_Cancellation_Request) GetAccount() (resp datatypes.Account, err error) { + err = r.Session.DoRequest("SoftLayer_Billing_Item_Cancellation_Request", "getAccount", nil, &r.Options, &resp) + return +} + +// This method returns all service cancellation requests. +// +// Make sure to include the "resultLimit" in the SOAP request header for quicker response. If there is no result limit header is passed, it will return the latest 25 results by default. +func (r Billing_Item_Cancellation_Request) GetAllCancellationRequests() (resp []datatypes.Billing_Item_Cancellation_Request, err error) { + err = r.Session.DoRequest("SoftLayer_Billing_Item_Cancellation_Request", "getAllCancellationRequests", nil, &r.Options, &resp) + return +} + +// Services can be canceled 2 or 3 days prior to your next bill date. This service returns the time by which a cancellation request submission is permitted in the current billing cycle. If the current time falls into the cut off date, this will return next earliest cancellation cut off date. +// +// Available category codes are: service, server +func (r Billing_Item_Cancellation_Request) GetCancellationCutoffDate(accountId *int, categoryCode *string) (resp datatypes.Time, err error) { + params := []interface{}{ + accountId, + categoryCode, + } + err = r.Session.DoRequest("SoftLayer_Billing_Item_Cancellation_Request", "getCancellationCutoffDate", params, &r.Options, &resp) + return +} + +// Retrieve A collection of service cancellation items. +func (r Billing_Item_Cancellation_Request) GetItems() (resp []datatypes.Billing_Item_Cancellation_Request_Item, err error) { + err = r.Session.DoRequest("SoftLayer_Billing_Item_Cancellation_Request", "getItems", nil, &r.Options, &resp) + return +} + +// getObject retrieves the SoftLayer_Billing_Item_Cancellation_Request object whose ID number corresponds to the ID number of the init parameter passed to the SoftLayer_Billing_Item_Cancellation_Request service. You can only retrieve cancellation request records that are assigned to your SoftLayer account. +func (r Billing_Item_Cancellation_Request) GetObject() (resp datatypes.Billing_Item_Cancellation_Request, err error) { + err = r.Session.DoRequest("SoftLayer_Billing_Item_Cancellation_Request", "getObject", nil, &r.Options, &resp) + return +} + +// Retrieve The status of a service cancellation request. +func (r Billing_Item_Cancellation_Request) GetStatus() (resp datatypes.Billing_Item_Cancellation_Request_Status, err error) { + err = r.Session.DoRequest("SoftLayer_Billing_Item_Cancellation_Request", "getStatus", nil, &r.Options, &resp) + return +} + +// Retrieve The ticket that is associated with the service cancellation request. +func (r Billing_Item_Cancellation_Request) GetTicket() (resp datatypes.Ticket, err error) { + err = r.Session.DoRequest("SoftLayer_Billing_Item_Cancellation_Request", "getTicket", nil, &r.Options, &resp) + return +} + +// Retrieve The user that initiated a service cancellation request. +func (r Billing_Item_Cancellation_Request) GetUser() (resp datatypes.User_Customer, err error) { + err = r.Session.DoRequest("SoftLayer_Billing_Item_Cancellation_Request", "getUser", nil, &r.Options, &resp) + return +} + +// This method removes a cancellation item from a cancellation request that is in "Pending" or "Approved" status. +func (r Billing_Item_Cancellation_Request) RemoveCancellationItem(itemId *int) (resp bool, err error) { + params := []interface{}{ + itemId, + } + err = r.Session.DoRequest("SoftLayer_Billing_Item_Cancellation_Request", "removeCancellationItem", params, &r.Options, &resp) + return +} + +// This method examined if a billing item is eligible for cancellation. It checks if the billing item you provided is already in your existing cancellation request. +func (r Billing_Item_Cancellation_Request) ValidateBillingItemForCancellation(billingItemId *int) (resp bool, err error) { + params := []interface{}{ + billingItemId, + } + err = r.Session.DoRequest("SoftLayer_Billing_Item_Cancellation_Request", "validateBillingItemForCancellation", params, &r.Options, &resp) + return +} + +// This method voids a service cancellation request in "Pending" or "Approved" status. +func (r Billing_Item_Cancellation_Request) Void(closeRelatedTicketFlag *bool) (resp bool, err error) { + params := []interface{}{ + closeRelatedTicketFlag, + } + err = r.Session.DoRequest("SoftLayer_Billing_Item_Cancellation_Request", "void", params, &r.Options, &resp) + return +} + +// no documentation yet +type Billing_Item_Virtual_DedicatedHost struct { + Session *session.Session + Options sl.Options +} + +// GetBillingItemVirtualDedicatedHostService returns an instance of the Billing_Item_Virtual_DedicatedHost SoftLayer service +func GetBillingItemVirtualDedicatedHostService(sess *session.Session) Billing_Item_Virtual_DedicatedHost { + return Billing_Item_Virtual_DedicatedHost{Session: sess} +} + +func (r Billing_Item_Virtual_DedicatedHost) Id(id int) Billing_Item_Virtual_DedicatedHost { + r.Options.Id = &id + return r +} + +func (r Billing_Item_Virtual_DedicatedHost) Mask(mask string) Billing_Item_Virtual_DedicatedHost { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r Billing_Item_Virtual_DedicatedHost) Filter(filter string) Billing_Item_Virtual_DedicatedHost { + r.Options.Filter = filter + return r +} + +func (r Billing_Item_Virtual_DedicatedHost) Limit(limit int) Billing_Item_Virtual_DedicatedHost { + r.Options.Limit = &limit + return r +} + +func (r Billing_Item_Virtual_DedicatedHost) Offset(offset int) Billing_Item_Virtual_DedicatedHost { + r.Options.Offset = &offset + return r +} + +// Cancel the resource or service for a billing Item. By default the billing item will be cancelled immediately and reclaim of the resource will begin shortly. Setting the "cancelImmediately" property to false will delay the cancellation until the next bill date. +// +// +// * The reason parameter could be from the list below: +// * "No longer needed" +// * "Business closing down" +// * "Server / Upgrade Costs" +// * "Migrating to larger server" +// * "Migrating to smaller server" +// * "Migrating to a different SoftLayer datacenter" +// * "Network performance / latency" +// * "Support response / timing" +// * "Sales process / upgrades" +// * "Moving to competitor" +func (r Billing_Item_Virtual_DedicatedHost) CancelItem(cancelImmediately *bool, cancelAssociatedBillingItems *bool, reason *string, customerNote *string) (resp bool, err error) { + params := []interface{}{ + cancelImmediately, + cancelAssociatedBillingItems, + reason, + customerNote, + } + err = r.Session.DoRequest("SoftLayer_Billing_Item_Virtual_DedicatedHost", "cancelItem", params, &r.Options, &resp) + return +} + +// Cancel the resource or service (excluding bare metal servers) for a billing Item. The billing item will be cancelled immediately and reclaim of the resource will begin shortly. +func (r Billing_Item_Virtual_DedicatedHost) CancelService() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Billing_Item_Virtual_DedicatedHost", "cancelService", nil, &r.Options, &resp) + return +} + +// Cancel the resource or service for a billing Item +func (r Billing_Item_Virtual_DedicatedHost) CancelServiceOnAnniversaryDate() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Billing_Item_Virtual_DedicatedHost", "cancelServiceOnAnniversaryDate", nil, &r.Options, &resp) + return +} + +// Retrieve The account that a billing item belongs to. +func (r Billing_Item_Virtual_DedicatedHost) GetAccount() (resp datatypes.Account, err error) { + err = r.Session.DoRequest("SoftLayer_Billing_Item_Virtual_DedicatedHost", "getAccount", nil, &r.Options, &resp) + return +} + +// Retrieve +func (r Billing_Item_Virtual_DedicatedHost) GetActiveAgreement() (resp datatypes.Account_Agreement, err error) { + err = r.Session.DoRequest("SoftLayer_Billing_Item_Virtual_DedicatedHost", "getActiveAgreement", nil, &r.Options, &resp) + return +} + +// Retrieve A flag indicating that the billing item is under an active agreement. +func (r Billing_Item_Virtual_DedicatedHost) GetActiveAgreementFlag() (resp datatypes.Account_Agreement, err error) { + err = r.Session.DoRequest("SoftLayer_Billing_Item_Virtual_DedicatedHost", "getActiveAgreementFlag", nil, &r.Options, &resp) + return +} + +// Retrieve A billing item's active associated child billing items. This includes "floating" items that are not necessarily child items of this billing item. +func (r Billing_Item_Virtual_DedicatedHost) GetActiveAssociatedChildren() (resp []datatypes.Billing_Item, err error) { + err = r.Session.DoRequest("SoftLayer_Billing_Item_Virtual_DedicatedHost", "getActiveAssociatedChildren", nil, &r.Options, &resp) + return +} + +// Retrieve +func (r Billing_Item_Virtual_DedicatedHost) GetActiveAssociatedGuestDiskBillingItems() (resp []datatypes.Billing_Item, err error) { + err = r.Session.DoRequest("SoftLayer_Billing_Item_Virtual_DedicatedHost", "getActiveAssociatedGuestDiskBillingItems", nil, &r.Options, &resp) + return +} + +// Retrieve A Billing Item's active bundled billing items. +func (r Billing_Item_Virtual_DedicatedHost) GetActiveBundledItems() (resp []datatypes.Billing_Item, err error) { + err = r.Session.DoRequest("SoftLayer_Billing_Item_Virtual_DedicatedHost", "getActiveBundledItems", nil, &r.Options, &resp) + return +} + +// Retrieve A service cancellation request item that corresponds to the billing item. +func (r Billing_Item_Virtual_DedicatedHost) GetActiveCancellationItem() (resp datatypes.Billing_Item_Cancellation_Request_Item, err error) { + err = r.Session.DoRequest("SoftLayer_Billing_Item_Virtual_DedicatedHost", "getActiveCancellationItem", nil, &r.Options, &resp) + return +} + +// Retrieve A Billing Item's active child billing items. +func (r Billing_Item_Virtual_DedicatedHost) GetActiveChildren() (resp []datatypes.Billing_Item, err error) { + err = r.Session.DoRequest("SoftLayer_Billing_Item_Virtual_DedicatedHost", "getActiveChildren", nil, &r.Options, &resp) + return +} + +// Retrieve +func (r Billing_Item_Virtual_DedicatedHost) GetActiveFlag() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Billing_Item_Virtual_DedicatedHost", "getActiveFlag", nil, &r.Options, &resp) + return +} + +// Retrieve +func (r Billing_Item_Virtual_DedicatedHost) GetActiveSparePoolAssociatedGuestDiskBillingItems() (resp []datatypes.Billing_Item, err error) { + err = r.Session.DoRequest("SoftLayer_Billing_Item_Virtual_DedicatedHost", "getActiveSparePoolAssociatedGuestDiskBillingItems", nil, &r.Options, &resp) + return +} + +// Retrieve A Billing Item's spare pool bundled billing items. +func (r Billing_Item_Virtual_DedicatedHost) GetActiveSparePoolBundledItems() (resp []datatypes.Billing_Item, err error) { + err = r.Session.DoRequest("SoftLayer_Billing_Item_Virtual_DedicatedHost", "getActiveSparePoolBundledItems", nil, &r.Options, &resp) + return +} + +// Retrieve A billing item's associated parent. This is to be used for billing items that are "floating", and therefore are not child items of any parent billing item. If it is desired to associate an item to another, populate this with the SoftLayer_Billing_Item ID of that associated parent item. +func (r Billing_Item_Virtual_DedicatedHost) GetAssociatedBillingItem() (resp datatypes.Billing_Item, err error) { + err = r.Session.DoRequest("SoftLayer_Billing_Item_Virtual_DedicatedHost", "getAssociatedBillingItem", nil, &r.Options, &resp) + return +} + +// Retrieve A history of billing items which a billing item has been associated with. +func (r Billing_Item_Virtual_DedicatedHost) GetAssociatedBillingItemHistory() (resp []datatypes.Billing_Item_Association_History, err error) { + err = r.Session.DoRequest("SoftLayer_Billing_Item_Virtual_DedicatedHost", "getAssociatedBillingItemHistory", nil, &r.Options, &resp) + return +} + +// Retrieve A Billing Item's associated child billing items. This includes "floating" items that are not necessarily child billing items of this billing item. +func (r Billing_Item_Virtual_DedicatedHost) GetAssociatedChildren() (resp []datatypes.Billing_Item, err error) { + err = r.Session.DoRequest("SoftLayer_Billing_Item_Virtual_DedicatedHost", "getAssociatedChildren", nil, &r.Options, &resp) + return +} + +// Retrieve A billing item's associated parent billing item. This object will be the same as the parent billing item if parentId is set. +func (r Billing_Item_Virtual_DedicatedHost) GetAssociatedParent() (resp []datatypes.Billing_Item, err error) { + err = r.Session.DoRequest("SoftLayer_Billing_Item_Virtual_DedicatedHost", "getAssociatedParent", nil, &r.Options, &resp) + return +} + +// Retrieve +func (r Billing_Item_Virtual_DedicatedHost) GetAvailableMatchingVlans() (resp []datatypes.Network_Vlan, err error) { + err = r.Session.DoRequest("SoftLayer_Billing_Item_Virtual_DedicatedHost", "getAvailableMatchingVlans", nil, &r.Options, &resp) + return +} + +// Retrieve The bandwidth allocation for a billing item. +func (r Billing_Item_Virtual_DedicatedHost) GetBandwidthAllocation() (resp datatypes.Network_Bandwidth_Version1_Allocation, err error) { + err = r.Session.DoRequest("SoftLayer_Billing_Item_Virtual_DedicatedHost", "getBandwidthAllocation", nil, &r.Options, &resp) + return +} + +// Retrieve A billing item's recurring child items that have once been billed and are scheduled to be billed in the future. +func (r Billing_Item_Virtual_DedicatedHost) GetBillableChildren() (resp []datatypes.Billing_Item, err error) { + err = r.Session.DoRequest("SoftLayer_Billing_Item_Virtual_DedicatedHost", "getBillableChildren", nil, &r.Options, &resp) + return +} + +// Retrieve A Billing Item's bundled billing items +func (r Billing_Item_Virtual_DedicatedHost) GetBundleItems() (resp []datatypes.Product_Item_Bundles, err error) { + err = r.Session.DoRequest("SoftLayer_Billing_Item_Virtual_DedicatedHost", "getBundleItems", nil, &r.Options, &resp) + return +} + +// Retrieve A Billing Item's bundled billing items' +func (r Billing_Item_Virtual_DedicatedHost) GetBundledItems() (resp []datatypes.Billing_Item, err error) { + err = r.Session.DoRequest("SoftLayer_Billing_Item_Virtual_DedicatedHost", "getBundledItems", nil, &r.Options, &resp) + return +} + +// Retrieve A Billing Item's active child billing items. +func (r Billing_Item_Virtual_DedicatedHost) GetCanceledChildren() (resp []datatypes.Billing_Item, err error) { + err = r.Session.DoRequest("SoftLayer_Billing_Item_Virtual_DedicatedHost", "getCanceledChildren", nil, &r.Options, &resp) + return +} + +// Retrieve The billing item's cancellation reason. +func (r Billing_Item_Virtual_DedicatedHost) GetCancellationReason() (resp datatypes.Billing_Item_Cancellation_Reason, err error) { + err = r.Session.DoRequest("SoftLayer_Billing_Item_Virtual_DedicatedHost", "getCancellationReason", nil, &r.Options, &resp) + return +} + +// Retrieve This will return any cancellation requests that are associated with this billing item. +func (r Billing_Item_Virtual_DedicatedHost) GetCancellationRequests() (resp []datatypes.Billing_Item_Cancellation_Request, err error) { + err = r.Session.DoRequest("SoftLayer_Billing_Item_Virtual_DedicatedHost", "getCancellationRequests", nil, &r.Options, &resp) + return +} + +// Retrieve The item category to which the billing item's item belongs. +func (r Billing_Item_Virtual_DedicatedHost) GetCategory() (resp datatypes.Product_Item_Category, err error) { + err = r.Session.DoRequest("SoftLayer_Billing_Item_Virtual_DedicatedHost", "getCategory", nil, &r.Options, &resp) + return +} + +// Retrieve A Billing Item's child billing items' +func (r Billing_Item_Virtual_DedicatedHost) GetChildren() (resp []datatypes.Billing_Item, err error) { + err = r.Session.DoRequest("SoftLayer_Billing_Item_Virtual_DedicatedHost", "getChildren", nil, &r.Options, &resp) + return +} + +// Retrieve A Billing Item's active child billing items. +func (r Billing_Item_Virtual_DedicatedHost) GetChildrenWithActiveAgreement() (resp []datatypes.Billing_Item, err error) { + err = r.Session.DoRequest("SoftLayer_Billing_Item_Virtual_DedicatedHost", "getChildrenWithActiveAgreement", nil, &r.Options, &resp) + return +} + +// Retrieve For product items which have a downgrade path defined, this will return those product items. +func (r Billing_Item_Virtual_DedicatedHost) GetDowngradeItems() (resp []datatypes.Product_Item, err error) { + err = r.Session.DoRequest("SoftLayer_Billing_Item_Virtual_DedicatedHost", "getDowngradeItems", nil, &r.Options, &resp) + return +} + +// Retrieve A Billing Item's associated child billing items, excluding some items with a $0.00 recurring fee. +func (r Billing_Item_Virtual_DedicatedHost) GetFilteredNextInvoiceChildren() (resp []datatypes.Billing_Item, err error) { + err = r.Session.DoRequest("SoftLayer_Billing_Item_Virtual_DedicatedHost", "getFilteredNextInvoiceChildren", nil, &r.Options, &resp) + return +} + +// Retrieve A flag that will reflect whether this billing item is billed on an hourly basis or not. +func (r Billing_Item_Virtual_DedicatedHost) GetHourlyFlag() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Billing_Item_Virtual_DedicatedHost", "getHourlyFlag", nil, &r.Options, &resp) + return +} + +// Retrieve Invoice items associated with this billing item +func (r Billing_Item_Virtual_DedicatedHost) GetInvoiceItem() (resp datatypes.Billing_Invoice_Item, err error) { + err = r.Session.DoRequest("SoftLayer_Billing_Item_Virtual_DedicatedHost", "getInvoiceItem", nil, &r.Options, &resp) + return +} + +// Retrieve All invoice items associated with the billing item +func (r Billing_Item_Virtual_DedicatedHost) GetInvoiceItems() (resp []datatypes.Billing_Invoice_Item, err error) { + err = r.Session.DoRequest("SoftLayer_Billing_Item_Virtual_DedicatedHost", "getInvoiceItems", nil, &r.Options, &resp) + return +} + +// Retrieve The entry in the SoftLayer product catalog that a billing item is based upon. +func (r Billing_Item_Virtual_DedicatedHost) GetItem() (resp datatypes.Product_Item, err error) { + err = r.Session.DoRequest("SoftLayer_Billing_Item_Virtual_DedicatedHost", "getItem", nil, &r.Options, &resp) + return +} + +// Retrieve The location of the billing item. Some billing items have physical properties such as the server itself. For items such as these, we provide location information. +func (r Billing_Item_Virtual_DedicatedHost) GetLocation() (resp datatypes.Location, err error) { + err = r.Session.DoRequest("SoftLayer_Billing_Item_Virtual_DedicatedHost", "getLocation", nil, &r.Options, &resp) + return +} + +// Retrieve A Billing Item's child billing items and associated items' +func (r Billing_Item_Virtual_DedicatedHost) GetNextInvoiceChildren() (resp []datatypes.Billing_Item, err error) { + err = r.Session.DoRequest("SoftLayer_Billing_Item_Virtual_DedicatedHost", "getNextInvoiceChildren", nil, &r.Options, &resp) + return +} + +// Retrieve A Billing Item's total, including any child billing items if they exist.' +func (r Billing_Item_Virtual_DedicatedHost) GetNextInvoiceTotalOneTimeAmount() (resp datatypes.Float64, err error) { + err = r.Session.DoRequest("SoftLayer_Billing_Item_Virtual_DedicatedHost", "getNextInvoiceTotalOneTimeAmount", nil, &r.Options, &resp) + return +} + +// Retrieve A Billing Item's total, including any child billing items if they exist.' +func (r Billing_Item_Virtual_DedicatedHost) GetNextInvoiceTotalOneTimeTaxAmount() (resp datatypes.Float64, err error) { + err = r.Session.DoRequest("SoftLayer_Billing_Item_Virtual_DedicatedHost", "getNextInvoiceTotalOneTimeTaxAmount", nil, &r.Options, &resp) + return +} + +// Retrieve A Billing Item's total, including any child billing items and associated billing items if they exist.' +func (r Billing_Item_Virtual_DedicatedHost) GetNextInvoiceTotalRecurringAmount() (resp datatypes.Float64, err error) { + err = r.Session.DoRequest("SoftLayer_Billing_Item_Virtual_DedicatedHost", "getNextInvoiceTotalRecurringAmount", nil, &r.Options, &resp) + return +} + +// Retrieve This is deprecated and will always be zero. Because tax is calculated in real-time, previewing the next recurring invoice is pre-tax only. +func (r Billing_Item_Virtual_DedicatedHost) GetNextInvoiceTotalRecurringTaxAmount() (resp datatypes.Float64, err error) { + err = r.Session.DoRequest("SoftLayer_Billing_Item_Virtual_DedicatedHost", "getNextInvoiceTotalRecurringTaxAmount", nil, &r.Options, &resp) + return +} + +// Retrieve A Billing Item's associated child billing items, excluding ALL items with a $0.00 recurring fee. +func (r Billing_Item_Virtual_DedicatedHost) GetNonZeroNextInvoiceChildren() (resp []datatypes.Billing_Item, err error) { + err = r.Session.DoRequest("SoftLayer_Billing_Item_Virtual_DedicatedHost", "getNonZeroNextInvoiceChildren", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Billing_Item_Virtual_DedicatedHost) GetObject() (resp datatypes.Billing_Item_Virtual_DedicatedHost, err error) { + err = r.Session.DoRequest("SoftLayer_Billing_Item_Virtual_DedicatedHost", "getObject", nil, &r.Options, &resp) + return +} + +// Retrieve A billing item's original order item. Simply a reference to the original order from which this billing item was created. +func (r Billing_Item_Virtual_DedicatedHost) GetOrderItem() (resp datatypes.Billing_Order_Item, err error) { + err = r.Session.DoRequest("SoftLayer_Billing_Item_Virtual_DedicatedHost", "getOrderItem", nil, &r.Options, &resp) + return +} + +// Retrieve The original physical location for this billing item--may differ from current. +func (r Billing_Item_Virtual_DedicatedHost) GetOriginalLocation() (resp datatypes.Location, err error) { + err = r.Session.DoRequest("SoftLayer_Billing_Item_Virtual_DedicatedHost", "getOriginalLocation", nil, &r.Options, &resp) + return +} + +// Retrieve The package under which this billing item was sold. A Package is the general grouping of products as seen on our order forms. +func (r Billing_Item_Virtual_DedicatedHost) GetPackage() (resp datatypes.Product_Package, err error) { + err = r.Session.DoRequest("SoftLayer_Billing_Item_Virtual_DedicatedHost", "getPackage", nil, &r.Options, &resp) + return +} + +// Retrieve A billing item's parent item. If a billing item has no parent item then this value is null. +func (r Billing_Item_Virtual_DedicatedHost) GetParent() (resp datatypes.Billing_Item, err error) { + err = r.Session.DoRequest("SoftLayer_Billing_Item_Virtual_DedicatedHost", "getParent", nil, &r.Options, &resp) + return +} + +// Retrieve A billing item's parent item. If a billing item has no parent item then this value is null. +func (r Billing_Item_Virtual_DedicatedHost) GetParentVirtualGuestBillingItem() (resp datatypes.Billing_Item_Virtual_Guest, err error) { + err = r.Session.DoRequest("SoftLayer_Billing_Item_Virtual_DedicatedHost", "getParentVirtualGuestBillingItem", nil, &r.Options, &resp) + return +} + +// Retrieve This flag indicates whether a billing item is scheduled to be canceled or not. +func (r Billing_Item_Virtual_DedicatedHost) GetPendingCancellationFlag() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Billing_Item_Virtual_DedicatedHost", "getPendingCancellationFlag", nil, &r.Options, &resp) + return +} + +// Retrieve The new order item that will replace this billing item. +func (r Billing_Item_Virtual_DedicatedHost) GetPendingOrderItem() (resp datatypes.Billing_Order_Item, err error) { + err = r.Session.DoRequest("SoftLayer_Billing_Item_Virtual_DedicatedHost", "getPendingOrderItem", nil, &r.Options, &resp) + return +} + +// Retrieve Provisioning transaction for this billing item +func (r Billing_Item_Virtual_DedicatedHost) GetProvisionTransaction() (resp datatypes.Provisioning_Version1_Transaction, err error) { + err = r.Session.DoRequest("SoftLayer_Billing_Item_Virtual_DedicatedHost", "getProvisionTransaction", nil, &r.Options, &resp) + return +} + +// Retrieve The resource for a virtual dedicated host billing item. +func (r Billing_Item_Virtual_DedicatedHost) GetResource() (resp datatypes.Virtual_DedicatedHost, err error) { + err = r.Session.DoRequest("SoftLayer_Billing_Item_Virtual_DedicatedHost", "getResource", nil, &r.Options, &resp) + return +} + +// This service returns billing items of a specified category code. This service should be used to retrieve billing items that you wish to cancel. Some billing items can be canceled via [[SoftLayer_Security_Certificate_Request|service cancellation]] service. +// +// In order to find billing items for cancellation, use [[SoftLayer_Product_Item_Category::getValidCancelableServiceItemCategories|product categories]] service to retrieve category codes that are eligible for cancellation. +func (r Billing_Item_Virtual_DedicatedHost) GetServiceBillingItemsByCategory(categoryCode *string, includeZeroRecurringFee *bool) (resp []datatypes.Billing_Item, err error) { + params := []interface{}{ + categoryCode, + includeZeroRecurringFee, + } + err = r.Session.DoRequest("SoftLayer_Billing_Item_Virtual_DedicatedHost", "getServiceBillingItemsByCategory", params, &r.Options, &resp) + return +} + +// Retrieve A friendly description of software component +func (r Billing_Item_Virtual_DedicatedHost) GetSoftwareDescription() (resp datatypes.Software_Description, err error) { + err = r.Session.DoRequest("SoftLayer_Billing_Item_Virtual_DedicatedHost", "getSoftwareDescription", nil, &r.Options, &resp) + return +} + +// Retrieve Billing items whose product item has an upgrade path defined in our system will return the next product item in the upgrade path. +func (r Billing_Item_Virtual_DedicatedHost) GetUpgradeItem() (resp datatypes.Product_Item, err error) { + err = r.Session.DoRequest("SoftLayer_Billing_Item_Virtual_DedicatedHost", "getUpgradeItem", nil, &r.Options, &resp) + return +} + +// Retrieve Billing items whose product item has an upgrade path defined in our system will return all the product items in the upgrade path. +func (r Billing_Item_Virtual_DedicatedHost) GetUpgradeItems() (resp []datatypes.Product_Item, err error) { + err = r.Session.DoRequest("SoftLayer_Billing_Item_Virtual_DedicatedHost", "getUpgradeItems", nil, &r.Options, &resp) + return +} + +// Remove the association from a billing item. +func (r Billing_Item_Virtual_DedicatedHost) RemoveAssociationId() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Billing_Item_Virtual_DedicatedHost", "removeAssociationId", nil, &r.Options, &resp) + return +} + +// Set an associated billing item to an orphan billing item. Associations allow you to tie an "orphaned" billing item, any non-server billing item that doesn't have a parent item such as secondary IP subnets or StorageLayer accounts, to a server billing item. You may only set an association for an orphan to a server. You cannot associate a server to an orphan if the either the server or orphan billing items have a cancellation date set. +func (r Billing_Item_Virtual_DedicatedHost) SetAssociationId(associatedId *int) (resp bool, err error) { + params := []interface{}{ + associatedId, + } + err = r.Session.DoRequest("SoftLayer_Billing_Item_Virtual_DedicatedHost", "setAssociationId", params, &r.Options, &resp) + return +} + +// Void a previously made cancellation for a service +func (r Billing_Item_Virtual_DedicatedHost) VoidCancelService() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Billing_Item_Virtual_DedicatedHost", "voidCancelService", nil, &r.Options, &resp) + return +} + +// The SoftLayer_Billing_Order data type contains general information relating to an individual order applied to a SoftLayer customer account or to a new customer. Personal information in this type such as names, addresses, and phone numbers are taken from the account's contact information at the time the order is generated for existing SoftLayer customer. +type Billing_Order struct { + Session *session.Session + Options sl.Options +} + +// GetBillingOrderService returns an instance of the Billing_Order SoftLayer service +func GetBillingOrderService(sess *session.Session) Billing_Order { + return Billing_Order{Session: sess} +} + +func (r Billing_Order) Id(id int) Billing_Order { + r.Options.Id = &id + return r +} + +func (r Billing_Order) Mask(mask string) Billing_Order { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r Billing_Order) Filter(filter string) Billing_Order { + r.Options.Filter = filter + return r +} + +func (r Billing_Order) Limit(limit int) Billing_Order { + r.Options.Limit = &limit + return r +} + +func (r Billing_Order) Offset(offset int) Billing_Order { + r.Options.Offset = &offset + return r +} + +// When an order has been modified, the customer will need to approve the changes. This method will allow the customer to approve the changes. +func (r Billing_Order) ApproveModifiedOrder() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Billing_Order", "approveModifiedOrder", nil, &r.Options, &resp) + return +} + +// Retrieve The [[SoftLayer_Account|account]] to which an order belongs. +func (r Billing_Order) GetAccount() (resp datatypes.Account, err error) { + err = r.Session.DoRequest("SoftLayer_Billing_Order", "getAccount", nil, &r.Options, &resp) + return +} + +// This will get all billing orders for your account. +func (r Billing_Order) GetAllObjects() (resp []datatypes.Billing_Order, err error) { + err = r.Session.DoRequest("SoftLayer_Billing_Order", "getAllObjects", nil, &r.Options, &resp) + return +} + +// Retrieve +func (r Billing_Order) GetBrand() (resp datatypes.Brand, err error) { + err = r.Session.DoRequest("SoftLayer_Billing_Order", "getBrand", nil, &r.Options, &resp) + return +} + +// Retrieve A cart is similar to a quote, except that it can be continually modified by the customer and does not have locked-in prices. Not all orders will have a cart associated with them. See [[SoftLayer_Billing_Order_Cart]] for more information. +func (r Billing_Order) GetCart() (resp datatypes.Billing_Order_Cart, err error) { + err = r.Session.DoRequest("SoftLayer_Billing_Order", "getCart", nil, &r.Options, &resp) + return +} + +// Retrieve The [[SoftLayer_Billing_Order_Item (type)|order items]] that are core restricted +func (r Billing_Order) GetCoreRestrictedItems() (resp []datatypes.Billing_Order_Item, err error) { + err = r.Session.DoRequest("SoftLayer_Billing_Order", "getCoreRestrictedItems", nil, &r.Options, &resp) + return +} + +// Retrieve All credit card transactions associated with this order. If this order was not placed with a credit card, this will be empty. +func (r Billing_Order) GetCreditCardTransactions() (resp []datatypes.Billing_Payment_Card_Transaction, err error) { + err = r.Session.DoRequest("SoftLayer_Billing_Order", "getCreditCardTransactions", nil, &r.Options, &resp) + return +} + +// Retrieve +func (r Billing_Order) GetExchangeRate() (resp datatypes.Billing_Currency_ExchangeRate, err error) { + err = r.Session.DoRequest("SoftLayer_Billing_Order", "getExchangeRate", nil, &r.Options, &resp) + return +} + +// Retrieve +func (r Billing_Order) GetInitialInvoice() (resp datatypes.Billing_Invoice, err error) { + err = r.Session.DoRequest("SoftLayer_Billing_Order", "getInitialInvoice", nil, &r.Options, &resp) + return +} + +// Retrieve The SoftLayer_Billing_Order_items included in an order. +func (r Billing_Order) GetItems() (resp []datatypes.Billing_Order_Item, err error) { + err = r.Session.DoRequest("SoftLayer_Billing_Order", "getItems", nil, &r.Options, &resp) + return +} + +// getObject retrieves the SoftLayer_Billing_Order object whose ID number corresponds to the ID number of the init parameter passed to the SoftLayer_Billing_Order service. You can only retrieve orders that are assigned to your portal user's account. +func (r Billing_Order) GetObject() (resp datatypes.Billing_Order, err error) { + err = r.Session.DoRequest("SoftLayer_Billing_Order", "getObject", nil, &r.Options, &resp) + return +} + +// Retrieve +func (r Billing_Order) GetOrderApprovalDate() (resp datatypes.Time, err error) { + err = r.Session.DoRequest("SoftLayer_Billing_Order", "getOrderApprovalDate", nil, &r.Options, &resp) + return +} + +// Retrieve An order's non-server items total monthly fee. +func (r Billing_Order) GetOrderNonServerMonthlyAmount() (resp datatypes.Float64, err error) { + err = r.Session.DoRequest("SoftLayer_Billing_Order", "getOrderNonServerMonthlyAmount", nil, &r.Options, &resp) + return +} + +// Retrieve An order's server items total monthly fee. +func (r Billing_Order) GetOrderServerMonthlyAmount() (resp datatypes.Float64, err error) { + err = r.Session.DoRequest("SoftLayer_Billing_Order", "getOrderServerMonthlyAmount", nil, &r.Options, &resp) + return +} + +// Get a list of [[SoftLayer_Container_Billing_Order_Status]] objects. +func (r Billing_Order) GetOrderStatuses() (resp []datatypes.Container_Billing_Order_Status, err error) { + err = r.Session.DoRequest("SoftLayer_Billing_Order", "getOrderStatuses", nil, &r.Options, &resp) + return +} + +// Retrieve An order's top level items. This normally includes the server line item and any non-server additional services such as NAS or ISCSI. +func (r Billing_Order) GetOrderTopLevelItems() (resp []datatypes.Billing_Order_Item, err error) { + err = r.Session.DoRequest("SoftLayer_Billing_Order", "getOrderTopLevelItems", nil, &r.Options, &resp) + return +} + +// Retrieve This amount represents the order's initial charge including set up fee and taxes. +func (r Billing_Order) GetOrderTotalAmount() (resp datatypes.Float64, err error) { + err = r.Session.DoRequest("SoftLayer_Billing_Order", "getOrderTotalAmount", nil, &r.Options, &resp) + return +} + +// Retrieve An order's total one time amount summing all the set up fees, the labor fees and the one time fees. Taxes will be applied for non-tax-exempt. This amount represents the initial fees that will be charged. +func (r Billing_Order) GetOrderTotalOneTime() (resp datatypes.Float64, err error) { + err = r.Session.DoRequest("SoftLayer_Billing_Order", "getOrderTotalOneTime", nil, &r.Options, &resp) + return +} + +// Retrieve An order's total one time amount. This amount represents the initial fees before tax. +func (r Billing_Order) GetOrderTotalOneTimeAmount() (resp datatypes.Float64, err error) { + err = r.Session.DoRequest("SoftLayer_Billing_Order", "getOrderTotalOneTimeAmount", nil, &r.Options, &resp) + return +} + +// Retrieve An order's total one time tax amount. This amount represents the tax that will be applied to the total charge, if the SoftLayer_Account tied to a SoftLayer_Billing_Order is a taxable account. +func (r Billing_Order) GetOrderTotalOneTimeTaxAmount() (resp datatypes.Float64, err error) { + err = r.Session.DoRequest("SoftLayer_Billing_Order", "getOrderTotalOneTimeTaxAmount", nil, &r.Options, &resp) + return +} + +// Retrieve An order's total recurring amount. Taxes will be applied for non-tax-exempt. This amount represents the fees that will be charged on a recurring (usually monthly) basis. +func (r Billing_Order) GetOrderTotalRecurring() (resp datatypes.Float64, err error) { + err = r.Session.DoRequest("SoftLayer_Billing_Order", "getOrderTotalRecurring", nil, &r.Options, &resp) + return +} + +// Retrieve An order's total recurring amount. This amount represents the fees that will be charged on a recurring (usually monthly) basis. +func (r Billing_Order) GetOrderTotalRecurringAmount() (resp datatypes.Float64, err error) { + err = r.Session.DoRequest("SoftLayer_Billing_Order", "getOrderTotalRecurringAmount", nil, &r.Options, &resp) + return +} + +// Retrieve The total tax amount of the recurring fees, if the SoftLayer_Account tied to a SoftLayer_Billing_Order is a taxable account. +func (r Billing_Order) GetOrderTotalRecurringTaxAmount() (resp datatypes.Float64, err error) { + err = r.Session.DoRequest("SoftLayer_Billing_Order", "getOrderTotalRecurringTaxAmount", nil, &r.Options, &resp) + return +} + +// Retrieve An order's total setup fee. +func (r Billing_Order) GetOrderTotalSetupAmount() (resp datatypes.Float64, err error) { + err = r.Session.DoRequest("SoftLayer_Billing_Order", "getOrderTotalSetupAmount", nil, &r.Options, &resp) + return +} + +// Retrieve The type of an order. This lets you know where this order was generated from. +func (r Billing_Order) GetOrderType() (resp datatypes.Billing_Order_Type, err error) { + err = r.Session.DoRequest("SoftLayer_Billing_Order", "getOrderType", nil, &r.Options, &resp) + return +} + +// Retrieve All PayPal transactions associated with this order. If this order was not placed with PayPal, this will be empty. +func (r Billing_Order) GetPaypalTransactions() (resp []datatypes.Billing_Payment_PayPal_Transaction, err error) { + err = r.Session.DoRequest("SoftLayer_Billing_Order", "getPaypalTransactions", nil, &r.Options, &resp) + return +} + +// Retrieve a PDF record of a SoftLayer quote. If the order is not a quote, an error will be thrown. +func (r Billing_Order) GetPdf() (resp []byte, err error) { + err = r.Session.DoRequest("SoftLayer_Billing_Order", "getPdf", nil, &r.Options, &resp) + return +} + +// Retrieve the default filename of an order PDF. +func (r Billing_Order) GetPdfFilename() (resp string, err error) { + err = r.Session.DoRequest("SoftLayer_Billing_Order", "getPdfFilename", nil, &r.Options, &resp) + return +} + +// Retrieve +func (r Billing_Order) GetPresaleEvent() (resp datatypes.Sales_Presale_Event, err error) { + err = r.Session.DoRequest("SoftLayer_Billing_Order", "getPresaleEvent", nil, &r.Options, &resp) + return +} + +// Retrieve The quote of an order. This quote holds information about its expiration date, creation date, name and status. This information is tied to an order having the status 'QUOTE' +func (r Billing_Order) GetQuote() (resp datatypes.Billing_Order_Quote, err error) { + err = r.Session.DoRequest("SoftLayer_Billing_Order", "getQuote", nil, &r.Options, &resp) + return +} + +// Generate an [[SoftLayer_Container_Product_Order|order container]] from a billing order. This will take into account promotions, reseller status, estimated taxes and all other standard order verification processes. +func (r Billing_Order) GetRecalculatedOrderContainer(message *string, ignoreDiscountsFlag *bool) (resp datatypes.Container_Product_Order, err error) { + params := []interface{}{ + message, + ignoreDiscountsFlag, + } + err = r.Session.DoRequest("SoftLayer_Billing_Order", "getRecalculatedOrderContainer", params, &r.Options, &resp) + return +} + +// Generate a [[SoftLayer_Container_Product_Order_Receipt]] object with all the order information. +func (r Billing_Order) GetReceipt() (resp datatypes.Container_Product_Order_Receipt, err error) { + err = r.Session.DoRequest("SoftLayer_Billing_Order", "getReceipt", nil, &r.Options, &resp) + return +} + +// Retrieve The Referral Partner who referred this order. (Only necessary for new customer orders) +func (r Billing_Order) GetReferralPartner() (resp datatypes.Account, err error) { + err = r.Session.DoRequest("SoftLayer_Billing_Order", "getReferralPartner", nil, &r.Options, &resp) + return +} + +// Retrieve This flag indicates an order is an upgrade. +func (r Billing_Order) GetUpgradeRequestFlag() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Billing_Order", "getUpgradeRequestFlag", nil, &r.Options, &resp) + return +} + +// Retrieve The SoftLayer_User_Customer object tied to an order. +func (r Billing_Order) GetUserRecord() (resp datatypes.User_Customer, err error) { + err = r.Session.DoRequest("SoftLayer_Billing_Order", "getUserRecord", nil, &r.Options, &resp) + return +} + +// When an order has been modified, it will contain a status indicating so. This method checks that status and also verifies that the active user's account is the same as the account on the order. +func (r Billing_Order) IsPendingEditApproval() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Billing_Order", "isPendingEditApproval", nil, &r.Options, &resp) + return +} + +// no documentation yet +type Billing_Order_Cart struct { + Session *session.Session + Options sl.Options +} + +// GetBillingOrderCartService returns an instance of the Billing_Order_Cart SoftLayer service +func GetBillingOrderCartService(sess *session.Session) Billing_Order_Cart { + return Billing_Order_Cart{Session: sess} +} + +func (r Billing_Order_Cart) Id(id int) Billing_Order_Cart { + r.Options.Id = &id + return r +} + +func (r Billing_Order_Cart) Mask(mask string) Billing_Order_Cart { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r Billing_Order_Cart) Filter(filter string) Billing_Order_Cart { + r.Options.Filter = filter + return r +} + +func (r Billing_Order_Cart) Limit(limit int) Billing_Order_Cart { + r.Options.Limit = &limit + return r +} + +func (r Billing_Order_Cart) Offset(offset int) Billing_Order_Cart { + r.Options.Offset = &offset + return r +} + +// This method is used to transfer an anonymous quote to the active user and associated account. An anonymous quote is one that was created by a user without being authenticated. If a quote was created anonymously and then the customer attempts to access that anonymous quote via the API (which requires authentication), the customer will be unable to retrieve the quote due to the security restrictions in place. By providing the ability for a customer to claim a quote, s/he will be able to pull the anonymous quote onto his/her account and successfully view the quote. +// +// To claim a quote, both the quote id and the quote key (the 32-character random string) must be provided. +func (r Billing_Order_Cart) Claim(quoteKey *string, quoteId *int) (resp datatypes.Billing_Order_Quote, err error) { + params := []interface{}{ + quoteKey, + quoteId, + } + err = r.Session.DoRequest("SoftLayer_Billing_Order_Cart", "claim", params, &r.Options, &resp) + return +} + +// When creating a new cart, the order data is sent through SoftLayer_Product_Order::verifyOrder to make sure that the cart contains valid data. If an issue is found with the order, an exception will be thrown and you will receive the same response as if SoftLayer_Product_Order::verifyOrder were called directly. Once the order verification is complete, the cart will be created. +// +// The response is the new cart id. +func (r Billing_Order_Cart) CreateCart(orderData *datatypes.Container_Product_Order) (resp int, err error) { + params := []interface{}{ + orderData, + } + err = r.Session.DoRequest("SoftLayer_Billing_Order_Cart", "createCart", params, &r.Options, &resp) + return +} + +// If a cart is no longer needed, it can be deleted using this service. Once a cart has been deleted, it cannot be retrieved again. +func (r Billing_Order_Cart) DeleteCart() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Billing_Order_Cart", "deleteCart", nil, &r.Options, &resp) + return +} + +// Account master users and sub-users in the SoftLayer customer portal can delete the quote of an order. +func (r Billing_Order_Cart) DeleteQuote() (resp datatypes.Billing_Order_Quote, err error) { + err = r.Session.DoRequest("SoftLayer_Billing_Order_Cart", "deleteQuote", nil, &r.Options, &resp) + return +} + +// Retrieve A quote's corresponding account. +func (r Billing_Order_Cart) GetAccount() (resp datatypes.Account, err error) { + err = r.Session.DoRequest("SoftLayer_Billing_Order_Cart", "getAccount", nil, &r.Options, &resp) + return +} + +// Retrieve a valid cart record of a SoftLayer order. +func (r Billing_Order_Cart) GetCartByCartKey(cartKey *string) (resp datatypes.Billing_Order_Cart, err error) { + params := []interface{}{ + cartKey, + } + err = r.Session.DoRequest("SoftLayer_Billing_Order_Cart", "getCartByCartKey", params, &r.Options, &resp) + return +} + +// no documentation yet +func (r Billing_Order_Cart) GetObject() (resp datatypes.Billing_Order_Cart, err error) { + err = r.Session.DoRequest("SoftLayer_Billing_Order_Cart", "getObject", nil, &r.Options, &resp) + return +} + +// Retrieve This order contains the records for which products were selected for this quote. +func (r Billing_Order_Cart) GetOrder() (resp datatypes.Billing_Order, err error) { + err = r.Session.DoRequest("SoftLayer_Billing_Order_Cart", "getOrder", nil, &r.Options, &resp) + return +} + +// Retrieve These are all the orders that were created from this quote. +func (r Billing_Order_Cart) GetOrdersFromQuote() (resp []datatypes.Billing_Order, err error) { + err = r.Session.DoRequest("SoftLayer_Billing_Order_Cart", "getOrdersFromQuote", nil, &r.Options, &resp) + return +} + +// Retrieve a PDF copy of the cart. +func (r Billing_Order_Cart) GetPdf() (resp []byte, err error) { + err = r.Session.DoRequest("SoftLayer_Billing_Order_Cart", "getPdf", nil, &r.Options, &resp) + return +} + +// This method will return a [[SoftLayer_Billing_Order_Quote]] that is identified by the quote key specified. If you do not have access to the quote or it does not exist, an exception will be thrown indicating so. +func (r Billing_Order_Cart) GetQuoteByQuoteKey(quoteKey *string) (resp datatypes.Billing_Order_Quote, err error) { + params := []interface{}{ + quoteKey, + } + err = r.Session.DoRequest("SoftLayer_Billing_Order_Cart", "getQuoteByQuoteKey", params, &r.Options, &resp) + return +} + +// This method allows the customer to retrieve a saved cart and put it in a format that's suitable to be sent to SoftLayer_Billing_Order_Cart::createCart to create a new cart or to SoftLayer_Billing_Order_Cart::updateCart to update an existing cart. +func (r Billing_Order_Cart) GetRecalculatedOrderContainer(orderData *datatypes.Container_Product_Order, orderBeingPlacedFlag *bool) (resp datatypes.Container_Product_Order, err error) { + params := []interface{}{ + orderData, + orderBeingPlacedFlag, + } + err = r.Session.DoRequest("SoftLayer_Billing_Order_Cart", "getRecalculatedOrderContainer", params, &r.Options, &resp) + return +} + +// Use this method for placing server orders and additional services orders. The same applies for this as with verifyOrder. Send in the SoftLayer_Container_Product_Order_Hardware_Server for server orders. In addition to verifying the order, placeOrder() also makes an initial authorization on the SoftLayer_Account tied to this order, if a credit card is on file. If the account tied to this order is a paypal customer, an URL will also be returned to the customer. After placing the order, you must go to this URL to finish the authorization process. This tells paypal that you indeed want to place the order. After going to this URL, it will direct you back to a SoftLayer webpage that tells us you have finished the process. After this, it will go to sales for final approval. +func (r Billing_Order_Cart) PlaceOrder(orderData interface{}) (resp datatypes.Container_Product_Order_Receipt, err error) { + err = datatypes.SetComplexType(orderData) + if err != nil { + return + } + params := []interface{}{ + orderData, + } + err = r.Session.DoRequest("SoftLayer_Billing_Order_Cart", "placeOrder", params, &r.Options, &resp) + return +} + +// Use this method for placing server quotes and additional services quotes. The same applies for this as with verifyOrder. Send in the SoftLayer_Container_Product_Order_Hardware_Server for server quotes. In addition to verifying the quote, placeQuote() also makes an initial authorization on the SoftLayer_Account tied to this order, if a credit card is on file. If the account tied to this order is a paypal customer, an URL will also be returned to the customer. After placing the order, you must go to this URL to finish the authorization process. This tells paypal that you indeed want to place the order. After going to this URL, it will direct you back to a SoftLayer webpage that tells us you have finished the process. +func (r Billing_Order_Cart) PlaceQuote(orderData *datatypes.Container_Product_Order) (resp datatypes.Container_Product_Order, err error) { + params := []interface{}{ + orderData, + } + err = r.Session.DoRequest("SoftLayer_Billing_Order_Cart", "placeQuote", params, &r.Options, &resp) + return +} + +// Account master users and sub-users in the SoftLayer customer portal can save the quote of an order to avoid its deletion after 5 days or its expiration after 2 days. +func (r Billing_Order_Cart) SaveQuote() (resp datatypes.Billing_Order_Quote, err error) { + err = r.Session.DoRequest("SoftLayer_Billing_Order_Cart", "saveQuote", nil, &r.Options, &resp) + return +} + +// Like SoftLayer_Billing_Order_Cart::createCart, the order data will be sent through SoftLayer_Product_Order::verifyOrder to make sure that the updated cart information is valid. Once it has been verified, the new order data will be saved. +// +// This will return the cart id. +func (r Billing_Order_Cart) UpdateCart(orderData *datatypes.Container_Product_Order) (resp int, err error) { + params := []interface{}{ + orderData, + } + err = r.Session.DoRequest("SoftLayer_Billing_Order_Cart", "updateCart", params, &r.Options, &resp) + return +} + +// Use this method for placing server orders and additional services orders. The same applies for this as with verifyOrder. Send in the SoftLayer_Container_Product_Order_Hardware_Server for server orders. In addition to verifying the order, placeOrder() also makes an initial authorization on the SoftLayer_Account tied to this order, if a credit card is on file. If the account tied to this order is a paypal customer, an URL will also be returned to the customer. After placing the order, you must go to this URL to finish the authorization process. This tells paypal that you indeed want to place the order. After going to this URL, it will direct you back to a SoftLayer webpage that tells us you have finished the process. After this, it will go to sales for final approval. +func (r Billing_Order_Cart) VerifyOrder(orderData interface{}) (resp datatypes.Container_Product_Order, err error) { + err = datatypes.SetComplexType(orderData) + if err != nil { + return + } + params := []interface{}{ + orderData, + } + err = r.Session.DoRequest("SoftLayer_Billing_Order_Cart", "verifyOrder", params, &r.Options, &resp) + return +} + +// Every individual item that a SoftLayer customer is billed for is recorded in the SoftLayer_Billing_Item data type. Billing items range from server chassis to hard drives to control panels, bandwidth quota upgrades and port upgrade charges. Softlayer [[SoftLayer_Billing_Invoice|invoices]] are generated from the cost of a customer's billing items. Billing items are copied from the product catalog as they're ordered by customers to create a reference between an account and the billable items they own. +// +// Billing items exist in a tree relationship. Items are associated with each other by parent/child relationships. Component items such as CPU's, RAM, and software each have a parent billing item for the server chassis they're associated with. Billing Items with a null parent item do not have an associated parent item. +type Billing_Order_Item struct { + Session *session.Session + Options sl.Options +} + +// GetBillingOrderItemService returns an instance of the Billing_Order_Item SoftLayer service +func GetBillingOrderItemService(sess *session.Session) Billing_Order_Item { + return Billing_Order_Item{Session: sess} +} + +func (r Billing_Order_Item) Id(id int) Billing_Order_Item { + r.Options.Id = &id + return r +} + +func (r Billing_Order_Item) Mask(mask string) Billing_Order_Item { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r Billing_Order_Item) Filter(filter string) Billing_Order_Item { + r.Options.Filter = filter + return r +} + +func (r Billing_Order_Item) Limit(limit int) Billing_Order_Item { + r.Options.Limit = &limit + return r +} + +func (r Billing_Order_Item) Offset(offset int) Billing_Order_Item { + r.Options.Offset = &offset + return r +} + +// Retrieve The SoftLayer_Billing_Item tied to the order item. +func (r Billing_Order_Item) GetBillingItem() (resp datatypes.Billing_Item, err error) { + err = r.Session.DoRequest("SoftLayer_Billing_Order_Item", "getBillingItem", nil, &r.Options, &resp) + return +} + +// Retrieve The other items included with an ordered item. +func (r Billing_Order_Item) GetBundledItems() (resp []datatypes.Billing_Order_Item, err error) { + err = r.Session.DoRequest("SoftLayer_Billing_Order_Item", "getBundledItems", nil, &r.Options, &resp) + return +} + +// Retrieve The item category tied to an order item. +func (r Billing_Order_Item) GetCategory() (resp datatypes.Product_Item_Category, err error) { + err = r.Session.DoRequest("SoftLayer_Billing_Order_Item", "getCategory", nil, &r.Options, &resp) + return +} + +// Retrieve The child order items for an order item. All server order items should have children. These children are considered a part of the server. +func (r Billing_Order_Item) GetChildren() (resp []datatypes.Billing_Order_Item, err error) { + err = r.Session.DoRequest("SoftLayer_Billing_Order_Item", "getChildren", nil, &r.Options, &resp) + return +} + +// Retrieve A hardware's universally unique identifier. +func (r Billing_Order_Item) GetGlobalIdentifier() (resp string, err error) { + err = r.Session.DoRequest("SoftLayer_Billing_Order_Item", "getGlobalIdentifier", nil, &r.Options, &resp) + return +} + +// Retrieve The component type tied to an order item. All hardware-specific items should have a generic hardware component. +func (r Billing_Order_Item) GetHardwareGenericComponent() (resp datatypes.Hardware_Component_Model_Generic, err error) { + err = r.Session.DoRequest("SoftLayer_Billing_Order_Item", "getHardwareGenericComponent", nil, &r.Options, &resp) + return +} + +// Retrieve The SoftLayer_Product_Item tied to an order item. The item is the actual definition of the product being sold. +func (r Billing_Order_Item) GetItem() (resp datatypes.Product_Item, err error) { + err = r.Session.DoRequest("SoftLayer_Billing_Order_Item", "getItem", nil, &r.Options, &resp) + return +} + +// Retrieve This is an item's category answers. +func (r Billing_Order_Item) GetItemCategoryAnswers() (resp []datatypes.Billing_Order_Item_Category_Answer, err error) { + err = r.Session.DoRequest("SoftLayer_Billing_Order_Item", "getItemCategoryAnswers", nil, &r.Options, &resp) + return +} + +// Retrieve The SoftLayer_Product_Item_Price tied to an order item. The item price object describes the cost of an item. +func (r Billing_Order_Item) GetItemPrice() (resp datatypes.Product_Item_Price, err error) { + err = r.Session.DoRequest("SoftLayer_Billing_Order_Item", "getItemPrice", nil, &r.Options, &resp) + return +} + +// Retrieve The location of an ordered item. This is usually the same as the server it is being ordered with. Otherwise it describes the location of the additional service being ordered. +func (r Billing_Order_Item) GetLocation() (resp datatypes.Location, err error) { + err = r.Session.DoRequest("SoftLayer_Billing_Order_Item", "getLocation", nil, &r.Options, &resp) + return +} + +// Retrieve +func (r Billing_Order_Item) GetNextOrderChildren() (resp []datatypes.Billing_Order_Item, err error) { + err = r.Session.DoRequest("SoftLayer_Billing_Order_Item", "getNextOrderChildren", nil, &r.Options, &resp) + return +} + +// getObject retrieves the SoftLayer_Billing_Item object whose ID number corresponds to the ID number of the init parameter passed to the SoftLayer_Billing_Item service. You can only retrieve billing items tied to the account that your portal user is assigned to. Billing items are an account's items of billable items. There are "parent" billing items and "child" billing items. The server billing item is generally referred to as a parent billing item. The items tied to a server, such as ram, harddrives, and operating systems are considered "child" billing items. +func (r Billing_Order_Item) GetObject() (resp datatypes.Billing_Order_Item, err error) { + err = r.Session.DoRequest("SoftLayer_Billing_Order_Item", "getObject", nil, &r.Options, &resp) + return +} + +// Retrieve This is only populated when an upgrade order is placed. The old billing item represents what the billing was before the upgrade happened. +func (r Billing_Order_Item) GetOldBillingItem() (resp datatypes.Billing_Item, err error) { + err = r.Session.DoRequest("SoftLayer_Billing_Order_Item", "getOldBillingItem", nil, &r.Options, &resp) + return +} + +// Retrieve The order to which this item belongs. The order contains all the information related to the items included in an order +func (r Billing_Order_Item) GetOrder() (resp datatypes.Billing_Order, err error) { + err = r.Session.DoRequest("SoftLayer_Billing_Order_Item", "getOrder", nil, &r.Options, &resp) + return +} + +// Retrieve +func (r Billing_Order_Item) GetOrderApprovalDate() (resp datatypes.Time, err error) { + err = r.Session.DoRequest("SoftLayer_Billing_Order_Item", "getOrderApprovalDate", nil, &r.Options, &resp) + return +} + +// Retrieve The SoftLayer_Product_Package an order item is a part of. +func (r Billing_Order_Item) GetPackage() (resp datatypes.Product_Package, err error) { + err = r.Session.DoRequest("SoftLayer_Billing_Order_Item", "getPackage", nil, &r.Options, &resp) + return +} + +// Retrieve The parent order item ID for an item. Items that are associated with a server will have a parent. The parent will be the server item itself. +func (r Billing_Order_Item) GetParent() (resp datatypes.Billing_Order_Item, err error) { + err = r.Session.DoRequest("SoftLayer_Billing_Order_Item", "getParent", nil, &r.Options, &resp) + return +} + +// Retrieve A count of power supplies contained within this SoftLayer_Billing_Order +func (r Billing_Order_Item) GetRedundantPowerSupplyCount() (resp uint, err error) { + err = r.Session.DoRequest("SoftLayer_Billing_Order_Item", "getRedundantPowerSupplyCount", nil, &r.Options, &resp) + return +} + +// Retrieve For ordered items that are software items, a full description of that software can be found with this property. +func (r Billing_Order_Item) GetSoftwareDescription() (resp datatypes.Software_Description, err error) { + err = r.Session.DoRequest("SoftLayer_Billing_Order_Item", "getSoftwareDescription", nil, &r.Options, &resp) + return +} + +// Retrieve The drive storage groups that are attached to this billing order item. +func (r Billing_Order_Item) GetStorageGroups() (resp []datatypes.Configuration_Storage_Group_Order, err error) { + err = r.Session.DoRequest("SoftLayer_Billing_Order_Item", "getStorageGroups", nil, &r.Options, &resp) + return +} + +// Retrieve The recurring fee of an ordered item. This amount represents the fees that will be charged on a recurring (usually monthly) basis. +func (r Billing_Order_Item) GetTotalRecurringAmount() (resp datatypes.Float64, err error) { + err = r.Session.DoRequest("SoftLayer_Billing_Order_Item", "getTotalRecurringAmount", nil, &r.Options, &resp) + return +} + +// Retrieve The next SoftLayer_Product_Item in the upgrade path for this order item. +func (r Billing_Order_Item) GetUpgradeItem() (resp datatypes.Product_Item, err error) { + err = r.Session.DoRequest("SoftLayer_Billing_Order_Item", "getUpgradeItem", nil, &r.Options, &resp) + return +} + +// The SoftLayer_Billing_Oder_Quote data type contains general information relating to an individual order applied to a SoftLayer customer account or to a new customer. Personal information in this type such as names, addresses, and phone numbers are taken from the account's contact information at the time the quote is generated for existing SoftLayer customer. +type Billing_Order_Quote struct { + Session *session.Session + Options sl.Options +} + +// GetBillingOrderQuoteService returns an instance of the Billing_Order_Quote SoftLayer service +func GetBillingOrderQuoteService(sess *session.Session) Billing_Order_Quote { + return Billing_Order_Quote{Session: sess} +} + +func (r Billing_Order_Quote) Id(id int) Billing_Order_Quote { + r.Options.Id = &id + return r +} + +func (r Billing_Order_Quote) Mask(mask string) Billing_Order_Quote { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r Billing_Order_Quote) Filter(filter string) Billing_Order_Quote { + r.Options.Filter = filter + return r +} + +func (r Billing_Order_Quote) Limit(limit int) Billing_Order_Quote { + r.Options.Limit = &limit + return r +} + +func (r Billing_Order_Quote) Offset(offset int) Billing_Order_Quote { + r.Options.Offset = &offset + return r +} + +// This method is used to transfer an anonymous quote to the active user and associated account. An anonymous quote is one that was created by a user without being authenticated. If a quote was created anonymously and then the customer attempts to access that anonymous quote via the API (which requires authentication), the customer will be unable to retrieve the quote due to the security restrictions in place. By providing the ability for a customer to claim a quote, s/he will be able to pull the anonymous quote onto his/her account and successfully view the quote. +// +// To claim a quote, both the quote id and the quote key (the 32-character random string) must be provided. +func (r Billing_Order_Quote) Claim(quoteKey *string, quoteId *int) (resp datatypes.Billing_Order_Quote, err error) { + params := []interface{}{ + quoteKey, + quoteId, + } + err = r.Session.DoRequest("SoftLayer_Billing_Order_Quote", "claim", params, &r.Options, &resp) + return +} + +// Account master users and sub-users in the SoftLayer customer portal can delete the quote of an order. +func (r Billing_Order_Quote) DeleteQuote() (resp datatypes.Billing_Order_Quote, err error) { + err = r.Session.DoRequest("SoftLayer_Billing_Order_Quote", "deleteQuote", nil, &r.Options, &resp) + return +} + +// Retrieve A quote's corresponding account. +func (r Billing_Order_Quote) GetAccount() (resp datatypes.Account, err error) { + err = r.Session.DoRequest("SoftLayer_Billing_Order_Quote", "getAccount", nil, &r.Options, &resp) + return +} + +// getObject retrieves the SoftLayer_Billing_Order_Quote object whose ID number corresponds to the ID number of the init parameter passed to the SoftLayer_Billing_Order_Quote service. You can only retrieve quotes that are assigned to your portal user's account. +func (r Billing_Order_Quote) GetObject() (resp datatypes.Billing_Order_Quote, err error) { + err = r.Session.DoRequest("SoftLayer_Billing_Order_Quote", "getObject", nil, &r.Options, &resp) + return +} + +// Retrieve This order contains the records for which products were selected for this quote. +func (r Billing_Order_Quote) GetOrder() (resp datatypes.Billing_Order, err error) { + err = r.Session.DoRequest("SoftLayer_Billing_Order_Quote", "getOrder", nil, &r.Options, &resp) + return +} + +// Retrieve These are all the orders that were created from this quote. +func (r Billing_Order_Quote) GetOrdersFromQuote() (resp []datatypes.Billing_Order, err error) { + err = r.Session.DoRequest("SoftLayer_Billing_Order_Quote", "getOrdersFromQuote", nil, &r.Options, &resp) + return +} + +// Retrieve a PDF record of a SoftLayer quoted order. SoftLayer keeps PDF records of all quoted orders for customer retrieval from the portal and API. You must have a PDF reader installed in order to view these quoted order files. +func (r Billing_Order_Quote) GetPdf() (resp []byte, err error) { + err = r.Session.DoRequest("SoftLayer_Billing_Order_Quote", "getPdf", nil, &r.Options, &resp) + return +} + +// This method will return a [[SoftLayer_Billing_Order_Quote]] that is identified by the quote key specified. If you do not have access to the quote or it does not exist, an exception will be thrown indicating so. +func (r Billing_Order_Quote) GetQuoteByQuoteKey(quoteKey *string) (resp datatypes.Billing_Order_Quote, err error) { + params := []interface{}{ + quoteKey, + } + err = r.Session.DoRequest("SoftLayer_Billing_Order_Quote", "getQuoteByQuoteKey", params, &r.Options, &resp) + return +} + +// Generate an [[SoftLayer_Container_Product_Order|order container]] from the previously-created quote. This will take into account promotions, reseller status, estimated taxes and all other standard order verification processes. +func (r Billing_Order_Quote) GetRecalculatedOrderContainer(userOrderData *datatypes.Container_Product_Order, orderBeingPlacedFlag *bool) (resp datatypes.Container_Product_Order, err error) { + params := []interface{}{ + userOrderData, + orderBeingPlacedFlag, + } + err = r.Session.DoRequest("SoftLayer_Billing_Order_Quote", "getRecalculatedOrderContainer", params, &r.Options, &resp) + return +} + +// Use this method for placing server orders and additional services orders. The same applies for this as with verifyOrder. Send in the SoftLayer_Container_Product_Order_Hardware_Server for server orders. In addition to verifying the order, placeOrder() also makes an initial authorization on the SoftLayer_Account tied to this order, if a credit card is on file. If the account tied to this order is a paypal customer, an URL will also be returned to the customer. After placing the order, you must go to this URL to finish the authorization process. This tells paypal that you indeed want to place the order. After going to this URL, it will direct you back to a SoftLayer webpage that tells us you have finished the process. After this, it will go to sales for final approval. +func (r Billing_Order_Quote) PlaceOrder(orderData interface{}) (resp datatypes.Container_Product_Order_Receipt, err error) { + err = datatypes.SetComplexType(orderData) + if err != nil { + return + } + params := []interface{}{ + orderData, + } + err = r.Session.DoRequest("SoftLayer_Billing_Order_Quote", "placeOrder", params, &r.Options, &resp) + return +} + +// Use this method for placing server quotes and additional services quotes. The same applies for this as with verifyOrder. Send in the SoftLayer_Container_Product_Order_Hardware_Server for server quotes. In addition to verifying the quote, placeQuote() also makes an initial authorization on the SoftLayer_Account tied to this order, if a credit card is on file. If the account tied to this order is a paypal customer, an URL will also be returned to the customer. After placing the order, you must go to this URL to finish the authorization process. This tells paypal that you indeed want to place the order. After going to this URL, it will direct you back to a SoftLayer webpage that tells us you have finished the process. +func (r Billing_Order_Quote) PlaceQuote(orderData *datatypes.Container_Product_Order) (resp datatypes.Container_Product_Order, err error) { + params := []interface{}{ + orderData, + } + err = r.Session.DoRequest("SoftLayer_Billing_Order_Quote", "placeQuote", params, &r.Options, &resp) + return +} + +// Account master users and sub-users in the SoftLayer customer portal can save the quote of an order to avoid its deletion after 5 days or its expiration after 2 days. +func (r Billing_Order_Quote) SaveQuote() (resp datatypes.Billing_Order_Quote, err error) { + err = r.Session.DoRequest("SoftLayer_Billing_Order_Quote", "saveQuote", nil, &r.Options, &resp) + return +} + +// Use this method for placing server orders and additional services orders. The same applies for this as with verifyOrder. Send in the SoftLayer_Container_Product_Order_Hardware_Server for server orders. In addition to verifying the order, placeOrder() also makes an initial authorization on the SoftLayer_Account tied to this order, if a credit card is on file. If the account tied to this order is a paypal customer, an URL will also be returned to the customer. After placing the order, you must go to this URL to finish the authorization process. This tells paypal that you indeed want to place the order. After going to this URL, it will direct you back to a SoftLayer webpage that tells us you have finished the process. After this, it will go to sales for final approval. +func (r Billing_Order_Quote) VerifyOrder(orderData interface{}) (resp datatypes.Container_Product_Order, err error) { + err = datatypes.SetComplexType(orderData) + if err != nil { + return + } + params := []interface{}{ + orderData, + } + err = r.Session.DoRequest("SoftLayer_Billing_Order_Quote", "verifyOrder", params, &r.Options, &resp) + return +} diff --git a/vendor/github.com/softlayer/softlayer-go/services/brand.go b/vendor/github.com/softlayer/softlayer-go/services/brand.go new file mode 100644 index 0000000000..96272d1529 --- /dev/null +++ b/vendor/github.com/softlayer/softlayer-go/services/brand.go @@ -0,0 +1,293 @@ +/** + * Copyright 2016 IBM Corp. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** + * AUTOMATICALLY GENERATED CODE - DO NOT MODIFY + */ + +package services + +import ( + "fmt" + "strings" + + "github.com/softlayer/softlayer-go/datatypes" + "github.com/softlayer/softlayer-go/session" + "github.com/softlayer/softlayer-go/sl" +) + +// The SoftLayer_Brand data type contains brand information relating to the single SoftLayer customer account. +// +// SoftLayer customers are unable to change their brand information in the portal or the API. +type Brand struct { + Session *session.Session + Options sl.Options +} + +// GetBrandService returns an instance of the Brand SoftLayer service +func GetBrandService(sess *session.Session) Brand { + return Brand{Session: sess} +} + +func (r Brand) Id(id int) Brand { + r.Options.Id = &id + return r +} + +func (r Brand) Mask(mask string) Brand { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r Brand) Filter(filter string) Brand { + r.Options.Filter = filter + return r +} + +func (r Brand) Limit(limit int) Brand { + r.Options.Limit = &limit + return r +} + +func (r Brand) Offset(offset int) Brand { + r.Options.Offset = &offset + return r +} + +// Create a new customer account record. +func (r Brand) CreateCustomerAccount(account *datatypes.Account, bypassDuplicateAccountCheck *bool) (resp datatypes.Account, err error) { + params := []interface{}{ + account, + bypassDuplicateAccountCheck, + } + err = r.Session.DoRequest("SoftLayer_Brand", "createCustomerAccount", params, &r.Options, &resp) + return +} + +// Create a new brand record. +func (r Brand) CreateObject(templateObject *datatypes.Brand) (resp datatypes.Brand, err error) { + params := []interface{}{ + templateObject, + } + err = r.Session.DoRequest("SoftLayer_Brand", "createObject", params, &r.Options, &resp) + return +} + +// Retrieve +func (r Brand) GetAccount() (resp datatypes.Account, err error) { + err = r.Session.DoRequest("SoftLayer_Brand", "getAccount", nil, &r.Options, &resp) + return +} + +// Retrieve All accounts owned by the brand. +func (r Brand) GetAllOwnedAccounts() (resp []datatypes.Account, err error) { + err = r.Session.DoRequest("SoftLayer_Brand", "getAllOwnedAccounts", nil, &r.Options, &resp) + return +} + +// (DEPRECATED) Use [[SoftLayer_Ticket_Subject::getAllObjects]] method. +func (r Brand) GetAllTicketSubjects(account *datatypes.Account) (resp []datatypes.Ticket_Subject, err error) { + params := []interface{}{ + account, + } + err = r.Session.DoRequest("SoftLayer_Brand", "getAllTicketSubjects", params, &r.Options, &resp) + return +} + +// Retrieve This flag indicates if creation of accounts is allowed. +func (r Brand) GetAllowAccountCreationFlag() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Brand", "getAllowAccountCreationFlag", nil, &r.Options, &resp) + return +} + +// Retrieve The Product Catalog for the Brand +func (r Brand) GetCatalog() (resp datatypes.Product_Catalog, err error) { + err = r.Session.DoRequest("SoftLayer_Brand", "getCatalog", nil, &r.Options, &resp) + return +} + +// Retrieve the contact information for the brand such as the corporate or support contact. This will include the contact name, telephone number, fax number, email address, and mailing address of the contact. +func (r Brand) GetContactInformation() (resp []datatypes.Brand_Contact, err error) { + err = r.Session.DoRequest("SoftLayer_Brand", "getContactInformation", nil, &r.Options, &resp) + return +} + +// Retrieve The contacts for the brand. +func (r Brand) GetContacts() (resp []datatypes.Brand_Contact, err error) { + err = r.Session.DoRequest("SoftLayer_Brand", "getContacts", nil, &r.Options, &resp) + return +} + +// Retrieve This references relationship between brands, locations and countries associated with a user's account that are ineligible when ordering products. For example, the India datacenter may not be available on this brand for customers that live in Great Britain. +func (r Brand) GetCustomerCountryLocationRestrictions() (resp []datatypes.Brand_Restriction_Location_CustomerCountry, err error) { + err = r.Session.DoRequest("SoftLayer_Brand", "getCustomerCountryLocationRestrictions", nil, &r.Options, &resp) + return +} + +// Retrieve +func (r Brand) GetDistributor() (resp datatypes.Brand, err error) { + err = r.Session.DoRequest("SoftLayer_Brand", "getDistributor", nil, &r.Options, &resp) + return +} + +// Retrieve +func (r Brand) GetDistributorChildFlag() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Brand", "getDistributorChildFlag", nil, &r.Options, &resp) + return +} + +// Retrieve +func (r Brand) GetDistributorFlag() (resp string, err error) { + err = r.Session.DoRequest("SoftLayer_Brand", "getDistributorFlag", nil, &r.Options, &resp) + return +} + +// Retrieve An account's associated hardware objects. +func (r Brand) GetHardware() (resp []datatypes.Hardware, err error) { + err = r.Session.DoRequest("SoftLayer_Brand", "getHardware", nil, &r.Options, &resp) + return +} + +// Retrieve +func (r Brand) GetHasAgentSupportFlag() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Brand", "getHasAgentSupportFlag", nil, &r.Options, &resp) + return +} + +// Get the payment processor merchant name. +func (r Brand) GetMerchantName() (resp string, err error) { + err = r.Session.DoRequest("SoftLayer_Brand", "getMerchantName", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Brand) GetObject() (resp datatypes.Brand, err error) { + err = r.Session.DoRequest("SoftLayer_Brand", "getObject", nil, &r.Options, &resp) + return +} + +// Retrieve +func (r Brand) GetOpenTickets() (resp []datatypes.Ticket, err error) { + err = r.Session.DoRequest("SoftLayer_Brand", "getOpenTickets", nil, &r.Options, &resp) + return +} + +// Retrieve Active accounts owned by the brand. +func (r Brand) GetOwnedAccounts() (resp []datatypes.Account, err error) { + err = r.Session.DoRequest("SoftLayer_Brand", "getOwnedAccounts", nil, &r.Options, &resp) + return +} + +// Retrieve +func (r Brand) GetTicketGroups() (resp []datatypes.Ticket_Group, err error) { + err = r.Session.DoRequest("SoftLayer_Brand", "getTicketGroups", nil, &r.Options, &resp) + return +} + +// Retrieve +func (r Brand) GetTickets() (resp []datatypes.Ticket, err error) { + err = r.Session.DoRequest("SoftLayer_Brand", "getTickets", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Brand) GetToken(userId *int) (resp string, err error) { + params := []interface{}{ + userId, + } + err = r.Session.DoRequest("SoftLayer_Brand", "getToken", params, &r.Options, &resp) + return +} + +// Retrieve +func (r Brand) GetUsers() (resp []datatypes.User_Customer, err error) { + err = r.Session.DoRequest("SoftLayer_Brand", "getUsers", nil, &r.Options, &resp) + return +} + +// Retrieve An account's associated virtual guest objects. +func (r Brand) GetVirtualGuests() (resp []datatypes.Virtual_Guest, err error) { + err = r.Session.DoRequest("SoftLayer_Brand", "getVirtualGuests", nil, &r.Options, &resp) + return +} + +// The [[SoftLayer_Brand_Restriction_Location_CustomerCountry]] data type defines the relationship between brands, locations and countries associated with a user's account that are ineligible when ordering products. For example, the India datacenter may not be available on the SoftLayer US brand for customers that live in Great Britain. +type Brand_Restriction_Location_CustomerCountry struct { + Session *session.Session + Options sl.Options +} + +// GetBrandRestrictionLocationCustomerCountryService returns an instance of the Brand_Restriction_Location_CustomerCountry SoftLayer service +func GetBrandRestrictionLocationCustomerCountryService(sess *session.Session) Brand_Restriction_Location_CustomerCountry { + return Brand_Restriction_Location_CustomerCountry{Session: sess} +} + +func (r Brand_Restriction_Location_CustomerCountry) Id(id int) Brand_Restriction_Location_CustomerCountry { + r.Options.Id = &id + return r +} + +func (r Brand_Restriction_Location_CustomerCountry) Mask(mask string) Brand_Restriction_Location_CustomerCountry { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r Brand_Restriction_Location_CustomerCountry) Filter(filter string) Brand_Restriction_Location_CustomerCountry { + r.Options.Filter = filter + return r +} + +func (r Brand_Restriction_Location_CustomerCountry) Limit(limit int) Brand_Restriction_Location_CustomerCountry { + r.Options.Limit = &limit + return r +} + +func (r Brand_Restriction_Location_CustomerCountry) Offset(offset int) Brand_Restriction_Location_CustomerCountry { + r.Options.Offset = &offset + return r +} + +// no documentation yet +func (r Brand_Restriction_Location_CustomerCountry) GetAllObjects() (resp []datatypes.Brand_Restriction_Location_CustomerCountry, err error) { + err = r.Session.DoRequest("SoftLayer_Brand_Restriction_Location_CustomerCountry", "getAllObjects", nil, &r.Options, &resp) + return +} + +// Retrieve This references the brand that has a brand-location-country restriction setup. +func (r Brand_Restriction_Location_CustomerCountry) GetBrand() (resp datatypes.Brand, err error) { + err = r.Session.DoRequest("SoftLayer_Brand_Restriction_Location_CustomerCountry", "getBrand", nil, &r.Options, &resp) + return +} + +// Retrieve This references the datacenter that has a brand-location-country restriction setup. For example, if a datacenter is listed with a restriction for Canada, a Canadian customer may not be eligible to order services at that location. +func (r Brand_Restriction_Location_CustomerCountry) GetLocation() (resp datatypes.Location, err error) { + err = r.Session.DoRequest("SoftLayer_Brand_Restriction_Location_CustomerCountry", "getLocation", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Brand_Restriction_Location_CustomerCountry) GetObject() (resp datatypes.Brand_Restriction_Location_CustomerCountry, err error) { + err = r.Session.DoRequest("SoftLayer_Brand_Restriction_Location_CustomerCountry", "getObject", nil, &r.Options, &resp) + return +} diff --git a/vendor/github.com/softlayer/softlayer-go/services/catalyst.go b/vendor/github.com/softlayer/softlayer-go/services/catalyst.go new file mode 100644 index 0000000000..12242da8a3 --- /dev/null +++ b/vendor/github.com/softlayer/softlayer-go/services/catalyst.go @@ -0,0 +1,207 @@ +/** + * Copyright 2016 IBM Corp. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** + * AUTOMATICALLY GENERATED CODE - DO NOT MODIFY + */ + +package services + +import ( + "fmt" + "strings" + + "github.com/softlayer/softlayer-go/datatypes" + "github.com/softlayer/softlayer-go/session" + "github.com/softlayer/softlayer-go/sl" +) + +// no documentation yet +type Catalyst_Company_Type struct { + Session *session.Session + Options sl.Options +} + +// GetCatalystCompanyTypeService returns an instance of the Catalyst_Company_Type SoftLayer service +func GetCatalystCompanyTypeService(sess *session.Session) Catalyst_Company_Type { + return Catalyst_Company_Type{Session: sess} +} + +func (r Catalyst_Company_Type) Id(id int) Catalyst_Company_Type { + r.Options.Id = &id + return r +} + +func (r Catalyst_Company_Type) Mask(mask string) Catalyst_Company_Type { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r Catalyst_Company_Type) Filter(filter string) Catalyst_Company_Type { + r.Options.Filter = filter + return r +} + +func (r Catalyst_Company_Type) Limit(limit int) Catalyst_Company_Type { + r.Options.Limit = &limit + return r +} + +func (r Catalyst_Company_Type) Offset(offset int) Catalyst_Company_Type { + r.Options.Offset = &offset + return r +} + +// <<.create_object > li > div { padding-top: .5em; padding-bottom: .5em} +// createObject() enables the creation of servers on an account. This +// method is a simplified alternative to interacting with the ordering system directly. +// +// +// In order to create a server, a template object must be sent in with a few required +// values. +// +// +// When this method returns an order will have been placed for a server of the specified configuration. +// +// +// To determine when the server is available you can poll the server via [[SoftLayer_Hardware/getObject|getObject]], +// checking the provisionDate property. +// When provisionDate is not null, the server will be ready. Be sure to use the globalIdentifier +// as your initialization parameter. +// +// +// Warning: Servers created via this method will incur charges on your account. For testing input parameters see [[SoftLayer_Hardware/generateOrderTemplate|generateOrderTemplate]]. +// +// +// Input - [[SoftLayer_Hardware (type)|SoftLayer_Hardware]] +//
      +//
    • hostname +//
      Hostname for the server.
        +//
      • Required
      • +//
      • Type - string
      • +//
      +//
      +//
    • +//
    • domain +//
      Domain for the server.
        +//
      • Required
      • +//
      • Type - string
      • +//
      +//
      +//
    • +//
    • processorCoreAmount +//
      The number of logical CPU cores to allocate.
        +//
      • Required
      • +//
      • Type - int
      • +//
      • See [[SoftLayer_Hardware/getCreateObjectOptions|getCreateObjectOptions]] for available options.
      • +//
      +//
      +//
    • +//
    • memoryCapacity +//
      The amount of memory to allocate in gigabytes.
        +//
      • Required
      • +//
      • Type - int
      • +//
      • See [[SoftLayer_Hardware/getCreateObjectOptions|getCreateObjectOptions]] for available options.
      • +//
      +//
      +//
    • +//
    • hourlyBillingFlag +//
      Specifies the billing type for the server.
        +//
      • Required
      • +//
      • Type - boolean
      • +//
      • When true the server will be billed on hourly usage, otherwise it will be billed on a monthly basis.
      • +//
      +//
      +//
    • +//
    • operatingSystemReferenceCode +//
      An identifier for the operating system to provision the server with.
        +//
      • Required
      • +//
      • Type - string
      • +//
      • See [[SoftLayer_Hardware/getCreateObjectOptions|getCreateObjectOptions]] for available options.
      • +//
      +//
      +//
    • +//
    • datacenter.name +//
      Specifies which datacenter the server is to be provisioned in.
        +//
      • Required
      • +//
      • Type - string
      • +//
      • The datacenter property is a [[SoftLayer_Location (type)|location]] structure with the name field set.
      • +//
      • See [[SoftLayer_Hardware/getCreateObjectOptions|getCreateObjectOptions]] for available options.
      • +//
      +// { +// "datacenter": { +// "name": "dal05" +// } +// } +//
      +//
    • +//
    • networkComponents.maxSpeed +//
      Specifies the connection speed for the server's network components.
        +//
      • Optional
      • +//
      • Type - int
      • +//
      • Default - The highest available zero cost port speed will be used.
      • +//
      • Description - The networkComponents property is an array with a single [[SoftLayer_Network_Component (type)|network component]] structure. The maxSpeed property must be set to specify the network uplink speed, in megabits per second, of the server.
      • +//
      • See [[SoftLayer_Hardware/getCreateObjectOptions|getCreateObjectOptions]] for available options.
      • +//
      +// { +// "networkComponents": [ +// { +// "maxSpeed": 1000 +// } +// ] +// } +//
      +//
    • +//
    • networkComponents.redundancyEnabledFlag +//
      Specifies whether or not the server's network components should be in redundancy groups.
        +//
      • Optional
      • +//
      • Type - bool
      • +//
      • Default - false
      • +//
      • Description - The networkComponents property is an array with a single [[SoftLayer_Network_Component (type)|network component]] structure. When the redundancyEnabledFlag property is true the server's network components will be in redundancy groups.
      • +//
      +// { +// "networkComponents": [ +// { +// "redundancyEnabledFlag": false +// } +// ] +// } +//
      +//
    • +//
    • privateNetworkOnlyFlag +//
      Specifies whether or not the server only has access to the private network
        +//
      • Optional
      • +//
      • Type - boolean
      • +//
      • Default - false
      • +//
      • When true this flag specifies that a server is to only have access to the private network.
      • +//
      +//
      +//
    • +//
    • primaryNetworkComponent.networkVlan.id +//
      Specifies the network vlan which is to be used for the frontend interface of the server.
        +//
      • Optional
      • +//
      • Type - int
      • +//
      • Description - The primaryNetworkComponent property is a [[SoftLayer_Network_Component (type)|network component]] structure with the networkVlan property populated with a [[SoftLayer_Network_Vlan (type)|vlan]] structure. The id property must be set to specify the frontend network vlan of the server.
      • +//
      +// { +// "primaryNetworkComponent": { +// "networkVlan": { +// "id": 1 +// } +// } +// } +//
      +//
    • +//
    • primaryBackendNetworkComponent.networkVlan.id +//
      Specifies the network vlan which is to be used for the backend interface of the server.
        +//
      • Optional
      • +//
      • Type - int
      • +//
      • Description - The primaryBackendNetworkComponent property is a [[SoftLayer_Network_Component (type)|network component]] structure with the networkVlan property populated with a [[SoftLayer_Network_Vlan (type)|vlan]] structure. The id property must be set to specify the backend network vlan of the server.
      • +//
      +// { +// "primaryBackendNetworkComponent": { +// "networkVlan": { +// "id": 2 +// } +// } +// } +//
      +//
    • +//
    • fixedConfigurationPreset.keyName +//
        +//
      • Optional
      • +//
      • Type - string
      • +//
      • Description - The fixedConfigurationPreset property is a [[SoftLayer_Product_Package_Preset (type)|fixed configuration preset]] structure. The keyName property must be set to specify preset to use.
      • +//
      • If a fixed configuration preset is used processorCoreAmount, memoryCapacity and hardDrives properties must not be set.
      • +//
      • See [[SoftLayer_Hardware/getCreateObjectOptions|getCreateObjectOptions]] for available options.
      • +//
      +// { +// "fixedConfigurationPreset": { +// "keyName": "SOME_KEY_NAME" +// } +// } +//
      +//
    • +//
    • userData.value +//
      Arbitrary data to be made available to the server.
        +//
      • Optional
      • +//
      • Type - string
      • +//
      • Description - The userData property is an array with a single [[SoftLayer_Hardware_Attribute (type)|attribute]] structure with the value property set to an arbitrary value.
      • +//
      • This value can be retrieved via the [[SoftLayer_Resource_Metadata/getUserMetadata|getUserMetadata]] method from a request originating from the server. This is primarily useful for providing data to software that may be on the server and configured to execute upon first boot.
      • +//
      +// { +// "userData": [ +// { +// "value": "someValue" +// } +// ] +// } +//
      +//
    • +//
    • hardDrives +//
      Hard drive settings for the server
        +//
      • Optional
      • +//
      • Type - SoftLayer_Hardware_Component
      • +//
      • Default - The largest available capacity for a zero cost primary disk will be used.
      • +//
      • Description - The hardDrives property is an array of [[SoftLayer_Hardware_Component (type)|hardware component]] structures. +//
      • Each hard drive must specify the capacity property.
      • +//
      • See [[SoftLayer_Hardware/getCreateObjectOptions|getCreateObjectOptions]] for available options.
      • +//
      +// { +// "hardDrives": [ +// { +// "capacity": 500 +// } +// ] +// } +//
      +//
    • +//
    • sshKeys +//
      SSH keys to install on the server upon provisioning.
        +//
      • Optional
      • +//
      • Type - array of [[SoftLayer_Security_Ssh_Key (type)|SoftLayer_Security_Ssh_Key]]
      • +//
      • Description - The sshKeys property is an array of [[SoftLayer_Security_Ssh_Key (type)|SSH Key]] structures with the id property set to the value of an existing SSH key.
      • +//
      • To create a new SSH key, call [[SoftLayer_Security_Ssh_Key/createObject|createObject]] on the [[SoftLayer_Security_Ssh_Key]] service.
      • +//
      • To obtain a list of existing SSH keys, call [[SoftLayer_Account/getSshKeys|getSshKeys]] on the [[SoftLayer_Account]] service. +//
      +// { +// "sshKeys": [ +// { +// "id": 123 +// } +// ] +// } +//
      +//
    • +//
    • postInstallScriptUri +//
      Specifies the uri location of the script to be downloaded and run after installation is complete.
        +//
      • Optional
      • +//
      • Type - string
      • +//
      +//
      +//
    • +//
    +// +// +//

    REST Example

    +// curl -X POST -d '{ +// "parameters":[ +// { +// "hostname": "host1", +// "domain": "example.com", +// "processorCoreAmount": 2, +// "memoryCapacity": 2, +// "hourlyBillingFlag": true, +// "operatingSystemReferenceCode": "UBUNTU_LATEST" +// } +// ] +// }' https://api.softlayer.com/rest/v3/SoftLayer_Hardware.json +// +// HTTP/1.1 201 Created +// Location: https://api.softlayer.com/rest/v3/SoftLayer_Hardware/f5a3fcff-db1d-4b7c-9fa0-0349e41c29c5/getObject +// +// +// { +// "accountId": 232298, +// "bareMetalInstanceFlag": null, +// "domain": "example.com", +// "hardwareStatusId": null, +// "hostname": "host1", +// "id": null, +// "serviceProviderId": null, +// "serviceProviderResourceId": null, +// "globalIdentifier": "f5a3fcff-db1d-4b7c-9fa0-0349e41c29c5", +// "hourlyBillingFlag": true, +// "memoryCapacity": 2, +// "operatingSystemReferenceCode": "UBUNTU_LATEST", +// "processorCoreAmount": 2 +// } +// +func (r Hardware) CreateObject(templateObject *datatypes.Hardware) (resp datatypes.Hardware, err error) { + params := []interface{}{ + templateObject, + } + err = r.Session.DoRequest("SoftLayer_Hardware", "createObject", params, &r.Options, &resp) + return +} + +// +// This method will cancel a server effective immediately. For servers billed hourly, the charges will stop immediately after the method returns. +func (r Hardware) DeleteObject() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware", "deleteObject", nil, &r.Options, &resp) + return +} + +// Delete software component passwords. +func (r Hardware) DeleteSoftwareComponentPasswords(softwareComponentPasswords []datatypes.Software_Component_Password) (resp bool, err error) { + params := []interface{}{ + softwareComponentPasswords, + } + err = r.Session.DoRequest("SoftLayer_Hardware", "deleteSoftwareComponentPasswords", params, &r.Options, &resp) + return +} + +// Edit the properties of a software component password such as the username, password, and notes. +func (r Hardware) EditSoftwareComponentPasswords(softwareComponentPasswords []datatypes.Software_Component_Password) (resp bool, err error) { + params := []interface{}{ + softwareComponentPasswords, + } + err = r.Session.DoRequest("SoftLayer_Hardware", "editSoftwareComponentPasswords", params, &r.Options, &resp) + return +} + +// Download and run remote script from uri on the hardware. +func (r Hardware) ExecuteRemoteScript(uri *string) (err error) { + var resp datatypes.Void + params := []interface{}{ + uri, + } + err = r.Session.DoRequest("SoftLayer_Hardware", "executeRemoteScript", params, &r.Options, &resp) + return +} + +// The '''findByIpAddress''' method finds hardware using its primary public or private IP address. IP addresses that have a secondary subnet tied to the hardware will not return the hardware - alternate means of locating the hardware must be used (see '''Associated Methods'''). If no hardware is found, no errors are generated and no data is returned. +func (r Hardware) FindByIpAddress(ipAddress *string) (resp datatypes.Hardware, err error) { + params := []interface{}{ + ipAddress, + } + err = r.Session.DoRequest("SoftLayer_Hardware", "findByIpAddress", params, &r.Options, &resp) + return +} + +// +// Obtain an [[SoftLayer_Container_Product_Order_Hardware_Server (type)|order container]] that can be sent to [[SoftLayer_Product_Order/verifyOrder|verifyOrder]] or [[SoftLayer_Product_Order/placeOrder|placeOrder]]. +// +// +// This is primarily useful when there is a necessity to confirm the price which will be charged for an order. +// +// +// See [[SoftLayer_Hardware/createObject|createObject]] for specifics on the requirements of the template object parameter. +func (r Hardware) GenerateOrderTemplate(templateObject *datatypes.Hardware) (resp datatypes.Container_Product_Order, err error) { + params := []interface{}{ + templateObject, + } + err = r.Session.DoRequest("SoftLayer_Hardware", "generateOrderTemplate", params, &r.Options, &resp) + return +} + +// Retrieve The account associated with a piece of hardware. +func (r Hardware) GetAccount() (resp datatypes.Account, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware", "getAccount", nil, &r.Options, &resp) + return +} + +// Retrieve A piece of hardware's active physical components. +func (r Hardware) GetActiveComponents() (resp []datatypes.Hardware_Component, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware", "getActiveComponents", nil, &r.Options, &resp) + return +} + +// Retrieve A piece of hardware's active network monitoring incidents. +func (r Hardware) GetActiveNetworkMonitorIncident() (resp []datatypes.Network_Monitor_Version1_Incident, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware", "getActiveNetworkMonitorIncident", nil, &r.Options, &resp) + return +} + +// The '''getAlarmHistory''' method retrieves a detailed history for the monitoring alarm. When calling this method, a start and end date for the history to be retrieved must be entered. +func (r Hardware) GetAlarmHistory(startDate *datatypes.Time, endDate *datatypes.Time, alarmId *string) (resp []datatypes.Container_Monitoring_Alarm_History, err error) { + params := []interface{}{ + startDate, + endDate, + alarmId, + } + err = r.Session.DoRequest("SoftLayer_Hardware", "getAlarmHistory", params, &r.Options, &resp) + return +} + +// Retrieve +func (r Hardware) GetAllPowerComponents() (resp []datatypes.Hardware_Power_Component, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware", "getAllPowerComponents", nil, &r.Options, &resp) + return +} + +// Retrieve The SoftLayer_Network_Storage_Allowed_Host information to connect this server to Network Storage volumes that require access control lists. +func (r Hardware) GetAllowedHost() (resp datatypes.Network_Storage_Allowed_Host, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware", "getAllowedHost", nil, &r.Options, &resp) + return +} + +// Retrieve The SoftLayer_Network_Storage objects that this SoftLayer_Hardware has access to. +func (r Hardware) GetAllowedNetworkStorage() (resp []datatypes.Network_Storage, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware", "getAllowedNetworkStorage", nil, &r.Options, &resp) + return +} + +// Retrieve The SoftLayer_Network_Storage objects whose Replica that this SoftLayer_Hardware has access to. +func (r Hardware) GetAllowedNetworkStorageReplicas() (resp []datatypes.Network_Storage, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware", "getAllowedNetworkStorageReplicas", nil, &r.Options, &resp) + return +} + +// Retrieve Information regarding an antivirus/spyware software component object. +func (r Hardware) GetAntivirusSpywareSoftwareComponent() (resp datatypes.Software_Component, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware", "getAntivirusSpywareSoftwareComponent", nil, &r.Options, &resp) + return +} + +// This method is retrieve a list of SoftLayer_Network_Storage volumes that are authorized access to this SoftLayer_Hardware. +func (r Hardware) GetAttachedNetworkStorages(nasType *string) (resp []datatypes.Network_Storage, err error) { + params := []interface{}{ + nasType, + } + err = r.Session.DoRequest("SoftLayer_Hardware", "getAttachedNetworkStorages", params, &r.Options, &resp) + return +} + +// Retrieve Information regarding a piece of hardware's specific attributes. +func (r Hardware) GetAttributes() (resp []datatypes.Hardware_Attribute, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware", "getAttributes", nil, &r.Options, &resp) + return +} + +// This method retrieves a list of SoftLayer_Network_Storage volumes that can be authorized to this SoftLayer_Hardware. +func (r Hardware) GetAvailableNetworkStorages(nasType *string) (resp []datatypes.Network_Storage, err error) { + params := []interface{}{ + nasType, + } + err = r.Session.DoRequest("SoftLayer_Hardware", "getAvailableNetworkStorages", params, &r.Options, &resp) + return +} + +// Retrieve The average daily public bandwidth usage for the current billing cycle. +func (r Hardware) GetAverageDailyPublicBandwidthUsage() (resp datatypes.Float64, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware", "getAverageDailyPublicBandwidthUsage", nil, &r.Options, &resp) + return +} + +// The '''getBackendIncomingBandwidth''' method retrieves the amount of incoming private network traffic used between the given start date and end date parameters. When entering start and end dates, only the month, day and year are used to calculate bandwidth totals - the time (HH:MM:SS) is ignored and defaults to midnight. The amount of bandwidth retrieved is measured in gigabytes. +func (r Hardware) GetBackendIncomingBandwidth(startDate *datatypes.Time, endDate *datatypes.Time) (resp datatypes.Float64, err error) { + params := []interface{}{ + startDate, + endDate, + } + err = r.Session.DoRequest("SoftLayer_Hardware", "getBackendIncomingBandwidth", params, &r.Options, &resp) + return +} + +// Retrieve A piece of hardware's back-end or private network components. +func (r Hardware) GetBackendNetworkComponents() (resp []datatypes.Network_Component, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware", "getBackendNetworkComponents", nil, &r.Options, &resp) + return +} + +// The '''getBackendOutgoingBandwidth''' method retrieves the amount of outgoing private network traffic used between the given start date and end date parameters. When entering start and end dates, only the month, day and year are used to calculate bandwidth totals - the time (HH:MM:SS) is ignored and defaults to midnight. The amount of bandwidth retrieved is measured in gigabytes. +func (r Hardware) GetBackendOutgoingBandwidth(startDate *datatypes.Time, endDate *datatypes.Time) (resp datatypes.Float64, err error) { + params := []interface{}{ + startDate, + endDate, + } + err = r.Session.DoRequest("SoftLayer_Hardware", "getBackendOutgoingBandwidth", params, &r.Options, &resp) + return +} + +// Retrieve A hardware's backend or private router. +func (r Hardware) GetBackendRouters() (resp []datatypes.Hardware, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware", "getBackendRouters", nil, &r.Options, &resp) + return +} + +// Retrieve A hardware's allotted bandwidth (measured in GB). +func (r Hardware) GetBandwidthAllocation() (resp datatypes.Float64, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware", "getBandwidthAllocation", nil, &r.Options, &resp) + return +} + +// Retrieve A hardware's allotted detail record. Allotment details link bandwidth allocation with allotments. +func (r Hardware) GetBandwidthAllotmentDetail() (resp datatypes.Network_Bandwidth_Version1_Allotment_Detail, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware", "getBandwidthAllotmentDetail", nil, &r.Options, &resp) + return +} + +// Retrieve Information regarding a piece of hardware's benchmark certifications. +func (r Hardware) GetBenchmarkCertifications() (resp []datatypes.Hardware_Benchmark_Certification, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware", "getBenchmarkCertifications", nil, &r.Options, &resp) + return +} + +// Retrieve Information regarding the billing item for a server. +func (r Hardware) GetBillingItem() (resp datatypes.Billing_Item_Hardware, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware", "getBillingItem", nil, &r.Options, &resp) + return +} + +// Retrieve A flag indicating that a billing item exists. +func (r Hardware) GetBillingItemFlag() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware", "getBillingItemFlag", nil, &r.Options, &resp) + return +} + +// Retrieve Determines whether the hardware is ineligible for cancellation because it is disconnected. +func (r Hardware) GetBlockCancelBecauseDisconnectedFlag() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware", "getBlockCancelBecauseDisconnectedFlag", nil, &r.Options, &resp) + return +} + +// Retrieve Status indicating whether or not a piece of hardware has business continuance insurance. +func (r Hardware) GetBusinessContinuanceInsuranceFlag() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware", "getBusinessContinuanceInsuranceFlag", nil, &r.Options, &resp) + return +} + +// Retrieve Child hardware. +func (r Hardware) GetChildrenHardware() (resp []datatypes.Hardware, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware", "getChildrenHardware", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Hardware) GetComponentDetailsXML() (resp string, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware", "getComponentDetailsXML", nil, &r.Options, &resp) + return +} + +// Retrieve A piece of hardware's components. +func (r Hardware) GetComponents() (resp []datatypes.Hardware_Component, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware", "getComponents", nil, &r.Options, &resp) + return +} + +// Retrieve A continuous data protection/server backup software component object. +func (r Hardware) GetContinuousDataProtectionSoftwareComponent() (resp datatypes.Software_Component, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware", "getContinuousDataProtectionSoftwareComponent", nil, &r.Options, &resp) + return +} + +// +// There are many options that may be provided while ordering a server, this method can be used to determine what these options are. +// +// +// Detailed information on the return value can be found on the data type page for [[SoftLayer_Container_Hardware_Configuration (type)]]. +func (r Hardware) GetCreateObjectOptions() (resp datatypes.Container_Hardware_Configuration, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware", "getCreateObjectOptions", nil, &r.Options, &resp) + return +} + +// Retrieve The current billable public outbound bandwidth for this hardware for the current billing cycle. +func (r Hardware) GetCurrentBillableBandwidthUsage() (resp datatypes.Float64, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware", "getCurrentBillableBandwidthUsage", nil, &r.Options, &resp) + return +} + +// Get the billing detail for this instance for the current billing period. This does not include bandwidth usage. +func (r Hardware) GetCurrentBillingDetail() (resp []datatypes.Billing_Item, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware", "getCurrentBillingDetail", nil, &r.Options, &resp) + return +} + +// The '''getCurrentBillingTotal''' method retrieves the total bill amount in US Dollars ($) for the current billing period. In addition to the total bill amount, the billing detail also includes all bandwidth used up to the point the method is called on the piece of hardware. +func (r Hardware) GetCurrentBillingTotal() (resp datatypes.Float64, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware", "getCurrentBillingTotal", nil, &r.Options, &resp) + return +} + +// The '''getDailyAverage''' method calculates the average daily network traffic used by the selected server. Using the required parameter ''dateTime'' to enter a start and end date, the user retrieves this average, measure in gigabytes (GB) for the specified date range. When entering parameters, only the month, day and year are required - time entries are omitted as this method defaults the time to midnight in order to account for the entire day. +func (r Hardware) GetDailyAverage(startDate *datatypes.Time, endDate *datatypes.Time) (resp datatypes.Float64, err error) { + params := []interface{}{ + startDate, + endDate, + } + err = r.Session.DoRequest("SoftLayer_Hardware", "getDailyAverage", params, &r.Options, &resp) + return +} + +// Retrieve Information regarding the datacenter in which a piece of hardware resides. +func (r Hardware) GetDatacenter() (resp datatypes.Location, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware", "getDatacenter", nil, &r.Options, &resp) + return +} + +// Retrieve The name of the datacenter in which a piece of hardware resides. +func (r Hardware) GetDatacenterName() (resp string, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware", "getDatacenterName", nil, &r.Options, &resp) + return +} + +// Retrieve All hardware that has uplink network connections to a piece of hardware. +func (r Hardware) GetDownlinkHardware() (resp []datatypes.Hardware, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware", "getDownlinkHardware", nil, &r.Options, &resp) + return +} + +// Retrieve All hardware that has uplink network connections to a piece of hardware. +func (r Hardware) GetDownlinkNetworkHardware() (resp []datatypes.Hardware, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware", "getDownlinkNetworkHardware", nil, &r.Options, &resp) + return +} + +// Retrieve Information regarding all servers attached to a piece of network hardware. +func (r Hardware) GetDownlinkServers() (resp []datatypes.Hardware, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware", "getDownlinkServers", nil, &r.Options, &resp) + return +} + +// Retrieve Information regarding all virtual guests attached to a piece of network hardware. +func (r Hardware) GetDownlinkVirtualGuests() (resp []datatypes.Virtual_Guest, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware", "getDownlinkVirtualGuests", nil, &r.Options, &resp) + return +} + +// Retrieve All hardware downstream from a network device. +func (r Hardware) GetDownstreamHardwareBindings() (resp []datatypes.Network_Component_Uplink_Hardware, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware", "getDownstreamHardwareBindings", nil, &r.Options, &resp) + return +} + +// Retrieve All network hardware downstream from the selected piece of hardware. +func (r Hardware) GetDownstreamNetworkHardware() (resp []datatypes.Hardware, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware", "getDownstreamNetworkHardware", nil, &r.Options, &resp) + return +} + +// Retrieve All network hardware with monitoring warnings or errors that are downstream from the selected piece of hardware. +func (r Hardware) GetDownstreamNetworkHardwareWithIncidents() (resp []datatypes.Hardware, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware", "getDownstreamNetworkHardwareWithIncidents", nil, &r.Options, &resp) + return +} + +// Retrieve Information regarding all servers attached downstream to a piece of network hardware. +func (r Hardware) GetDownstreamServers() (resp []datatypes.Hardware, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware", "getDownstreamServers", nil, &r.Options, &resp) + return +} + +// Retrieve Information regarding all virtual guests attached to a piece of network hardware. +func (r Hardware) GetDownstreamVirtualGuests() (resp []datatypes.Virtual_Guest, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware", "getDownstreamVirtualGuests", nil, &r.Options, &resp) + return +} + +// Retrieve The drive controllers contained within a piece of hardware. +func (r Hardware) GetDriveControllers() (resp []datatypes.Hardware_Component, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware", "getDriveControllers", nil, &r.Options, &resp) + return +} + +// Retrieve Information regarding a piece of hardware's associated EVault network storage service account. +func (r Hardware) GetEvaultNetworkStorage() (resp []datatypes.Network_Storage, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware", "getEvaultNetworkStorage", nil, &r.Options, &resp) + return +} + +// Retrieve Information regarding a piece of hardware's firewall services. +func (r Hardware) GetFirewallServiceComponent() (resp datatypes.Network_Component_Firewall, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware", "getFirewallServiceComponent", nil, &r.Options, &resp) + return +} + +// Retrieve Defines the fixed components in a fixed configuration bare metal server. +func (r Hardware) GetFixedConfigurationPreset() (resp datatypes.Product_Package_Preset, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware", "getFixedConfigurationPreset", nil, &r.Options, &resp) + return +} + +// The '''getFrontendIncomingBandwidth''' method retrieves the amount of incoming public network traffic used by a server between the given start and end date parameters. When entering the ''dateTime'' parameter, only the month, day and year of the start and end dates are required - the time (hour, minute and second) are set to midnight by default and cannot be changed. The amount of bandwidth retrieved is measured in gigabytes (GB). +func (r Hardware) GetFrontendIncomingBandwidth(startDate *datatypes.Time, endDate *datatypes.Time) (resp datatypes.Float64, err error) { + params := []interface{}{ + startDate, + endDate, + } + err = r.Session.DoRequest("SoftLayer_Hardware", "getFrontendIncomingBandwidth", params, &r.Options, &resp) + return +} + +// Retrieve A piece of hardware's front-end or public network components. +func (r Hardware) GetFrontendNetworkComponents() (resp []datatypes.Network_Component, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware", "getFrontendNetworkComponents", nil, &r.Options, &resp) + return +} + +// The '''getFrontendOutgoingBandwidth''' method retrieves the amount of outgoing public network traffic used by a server between the given start and end date parameters. The ''dateTime'' parameter requires only the day, month and year to be entered - the time (hour, minute and second) are set to midnight be default in order to gather the data for the entire start and end date indicated in the parameter. The amount of bandwidth retrieved is measured in gigabytes (GB). +func (r Hardware) GetFrontendOutgoingBandwidth(startDate *datatypes.Time, endDate *datatypes.Time) (resp datatypes.Float64, err error) { + params := []interface{}{ + startDate, + endDate, + } + err = r.Session.DoRequest("SoftLayer_Hardware", "getFrontendOutgoingBandwidth", params, &r.Options, &resp) + return +} + +// Retrieve A hardware's frontend or public router. +func (r Hardware) GetFrontendRouters() (resp []datatypes.Hardware, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware", "getFrontendRouters", nil, &r.Options, &resp) + return +} + +// Retrieve A hardware's universally unique identifier. +func (r Hardware) GetGlobalIdentifier() (resp string, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware", "getGlobalIdentifier", nil, &r.Options, &resp) + return +} + +// Retrieve The hard drives contained within a piece of hardware. +func (r Hardware) GetHardDrives() (resp []datatypes.Hardware_Component, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware", "getHardDrives", nil, &r.Options, &resp) + return +} + +// Retrieve The chassis that a piece of hardware is housed in. +func (r Hardware) GetHardwareChassis() (resp datatypes.Hardware_Chassis, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware", "getHardwareChassis", nil, &r.Options, &resp) + return +} + +// Retrieve A hardware's function. +func (r Hardware) GetHardwareFunction() (resp datatypes.Hardware_Function, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware", "getHardwareFunction", nil, &r.Options, &resp) + return +} + +// Retrieve A hardware's function. +func (r Hardware) GetHardwareFunctionDescription() (resp string, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware", "getHardwareFunctionDescription", nil, &r.Options, &resp) + return +} + +// Retrieve A hardware's status. +func (r Hardware) GetHardwareStatus() (resp datatypes.Hardware_Status, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware", "getHardwareStatus", nil, &r.Options, &resp) + return +} + +// Retrieve Determine in hardware object has TPM enabled. +func (r Hardware) GetHasTrustedPlatformModuleBillingItemFlag() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware", "getHasTrustedPlatformModuleBillingItemFlag", nil, &r.Options, &resp) + return +} + +// Retrieve Information regarding a host IPS software component object. +func (r Hardware) GetHostIpsSoftwareComponent() (resp datatypes.Software_Component, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware", "getHostIpsSoftwareComponent", nil, &r.Options, &resp) + return +} + +// The '''getHourlyBandwidth''' method retrieves all bandwidth updates hourly for the specified hardware. Because the potential number of data points can become excessive, the method limits the user to obtain data in 24-hour intervals. The required ''dateTime'' parameter is used as the starting point for the query and will be calculated for the 24-hour period starting with the specified date and time. For example, entering a parameter of +// +// '02/01/2008 0:00' +// +// results in a return of all bandwidth data for the entire day of February 1, 2008, as 0:00 specifies a midnight start date. Please note that the time entered should be completed using a 24-hour clock (military time, astronomical time). +// +// For data spanning more than a single 24-hour period, refer to the getBandwidthData function on the metricTrackingObject for the piece of hardware. +func (r Hardware) GetHourlyBandwidth(mode *string, day *datatypes.Time) (resp []datatypes.Metric_Tracking_Object_Data, err error) { + params := []interface{}{ + mode, + day, + } + err = r.Session.DoRequest("SoftLayer_Hardware", "getHourlyBandwidth", params, &r.Options, &resp) + return +} + +// Retrieve A server's hourly billing status. +func (r Hardware) GetHourlyBillingFlag() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware", "getHourlyBillingFlag", nil, &r.Options, &resp) + return +} + +// Retrieve The sum of all the inbound network traffic data for the last 30 days. +func (r Hardware) GetInboundBandwidthUsage() (resp datatypes.Float64, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware", "getInboundBandwidthUsage", nil, &r.Options, &resp) + return +} + +// Retrieve The total public inbound bandwidth for this hardware for the current billing cycle. +func (r Hardware) GetInboundPublicBandwidthUsage() (resp datatypes.Float64, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware", "getInboundPublicBandwidthUsage", nil, &r.Options, &resp) + return +} + +// Retrieve Information regarding the last transaction a server performed. +func (r Hardware) GetLastTransaction() (resp datatypes.Provisioning_Version1_Transaction, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware", "getLastTransaction", nil, &r.Options, &resp) + return +} + +// Retrieve A piece of hardware's latest network monitoring incident. +func (r Hardware) GetLatestNetworkMonitorIncident() (resp datatypes.Network_Monitor_Version1_Incident, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware", "getLatestNetworkMonitorIncident", nil, &r.Options, &resp) + return +} + +// Retrieve Where a piece of hardware is located within SoftLayer's location hierarchy. +func (r Hardware) GetLocation() (resp datatypes.Location, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware", "getLocation", nil, &r.Options, &resp) + return +} + +// Retrieve +func (r Hardware) GetLocationPathString() (resp string, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware", "getLocationPathString", nil, &r.Options, &resp) + return +} + +// Retrieve Information regarding a lockbox account associated with a server. +func (r Hardware) GetLockboxNetworkStorage() (resp datatypes.Network_Storage, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware", "getLockboxNetworkStorage", nil, &r.Options, &resp) + return +} + +// Retrieve A flag indicating that the hardware is a managed resource. +func (r Hardware) GetManagedResourceFlag() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware", "getManagedResourceFlag", nil, &r.Options, &resp) + return +} + +// Retrieve Information regarding a piece of hardware's memory. +func (r Hardware) GetMemory() (resp []datatypes.Hardware_Component, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware", "getMemory", nil, &r.Options, &resp) + return +} + +// Retrieve The amount of memory a piece of hardware has, measured in gigabytes. +func (r Hardware) GetMemoryCapacity() (resp uint, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware", "getMemoryCapacity", nil, &r.Options, &resp) + return +} + +// Retrieve A piece of hardware's metric tracking object. +func (r Hardware) GetMetricTrackingObject() (resp datatypes.Metric_Tracking_Object_HardwareServer, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware", "getMetricTrackingObject", nil, &r.Options, &resp) + return +} + +// Returns open monitoring alarms for a given time period +func (r Hardware) GetMonitoringActiveAlarms(startDate *datatypes.Time, endDate *datatypes.Time) (resp []datatypes.Container_Monitoring_Alarm_History, err error) { + params := []interface{}{ + startDate, + endDate, + } + err = r.Session.DoRequest("SoftLayer_Hardware", "getMonitoringActiveAlarms", params, &r.Options, &resp) + return +} + +// Retrieve Information regarding the monitoring agents associated with a piece of hardware. +func (r Hardware) GetMonitoringAgents() (resp []datatypes.Monitoring_Agent, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware", "getMonitoringAgents", nil, &r.Options, &resp) + return +} + +// Returns closed monitoring alarms for a given time period +func (r Hardware) GetMonitoringClosedAlarms(startDate *datatypes.Time, endDate *datatypes.Time) (resp []datatypes.Container_Monitoring_Alarm_History, err error) { + params := []interface{}{ + startDate, + endDate, + } + err = r.Session.DoRequest("SoftLayer_Hardware", "getMonitoringClosedAlarms", params, &r.Options, &resp) + return +} + +// Retrieve Information regarding the hardware's monitoring robot. +func (r Hardware) GetMonitoringRobot() (resp datatypes.Monitoring_Robot, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware", "getMonitoringRobot", nil, &r.Options, &resp) + return +} + +// Retrieve Information regarding a piece of hardware's network monitoring services. +func (r Hardware) GetMonitoringServiceComponent() (resp datatypes.Network_Monitor_Version1_Query_Host_Stratum, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware", "getMonitoringServiceComponent", nil, &r.Options, &resp) + return +} + +// Retrieve The monitoring service flag eligibility status for a piece of hardware. +func (r Hardware) GetMonitoringServiceEligibilityFlag() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware", "getMonitoringServiceEligibilityFlag", nil, &r.Options, &resp) + return +} + +// Retrieve The service flag status for a piece of hardware. +func (r Hardware) GetMonitoringServiceFlag() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware", "getMonitoringServiceFlag", nil, &r.Options, &resp) + return +} + +// Retrieve Information regarding a piece of hardware's motherboard. +func (r Hardware) GetMotherboard() (resp datatypes.Hardware_Component, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware", "getMotherboard", nil, &r.Options, &resp) + return +} + +// Retrieve Information regarding a piece of hardware's network cards. +func (r Hardware) GetNetworkCards() (resp []datatypes.Hardware_Component, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware", "getNetworkCards", nil, &r.Options, &resp) + return +} + +// Retrieve Returns a hardware's network components. +func (r Hardware) GetNetworkComponents() (resp []datatypes.Network_Component, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware", "getNetworkComponents", nil, &r.Options, &resp) + return +} + +// Retrieve The gateway member if this device is part of a network gateway. +func (r Hardware) GetNetworkGatewayMember() (resp datatypes.Network_Gateway_Member, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware", "getNetworkGatewayMember", nil, &r.Options, &resp) + return +} + +// Retrieve Whether or not this device is part of a network gateway. +func (r Hardware) GetNetworkGatewayMemberFlag() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware", "getNetworkGatewayMemberFlag", nil, &r.Options, &resp) + return +} + +// Retrieve A piece of hardware's network management IP address. +func (r Hardware) GetNetworkManagementIpAddress() (resp string, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware", "getNetworkManagementIpAddress", nil, &r.Options, &resp) + return +} + +// Retrieve All servers with failed monitoring that are attached downstream to a piece of hardware. +func (r Hardware) GetNetworkMonitorAttachedDownHardware() (resp []datatypes.Hardware, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware", "getNetworkMonitorAttachedDownHardware", nil, &r.Options, &resp) + return +} + +// Retrieve Virtual guests that are attached downstream to a hardware that have failed monitoring +func (r Hardware) GetNetworkMonitorAttachedDownVirtualGuests() (resp []datatypes.Virtual_Guest, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware", "getNetworkMonitorAttachedDownVirtualGuests", nil, &r.Options, &resp) + return +} + +// Retrieve The status of all of a piece of hardware's network monitoring incidents. +func (r Hardware) GetNetworkMonitorIncidents() (resp []datatypes.Network_Monitor_Version1_Incident, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware", "getNetworkMonitorIncidents", nil, &r.Options, &resp) + return +} + +// Retrieve Information regarding a piece of hardware's network monitors. +func (r Hardware) GetNetworkMonitors() (resp []datatypes.Network_Monitor_Version1_Query_Host, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware", "getNetworkMonitors", nil, &r.Options, &resp) + return +} + +// Retrieve The value of a hardware's network status attribute. +func (r Hardware) GetNetworkStatus() (resp string, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware", "getNetworkStatus", nil, &r.Options, &resp) + return +} + +// Retrieve The hardware's related network status attribute. +func (r Hardware) GetNetworkStatusAttribute() (resp datatypes.Hardware_Attribute, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware", "getNetworkStatusAttribute", nil, &r.Options, &resp) + return +} + +// Retrieve Information regarding a piece of hardware's associated network storage service account. +func (r Hardware) GetNetworkStorage() (resp []datatypes.Network_Storage, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware", "getNetworkStorage", nil, &r.Options, &resp) + return +} + +// Retrieve The network virtual LANs (VLANs) associated with a piece of hardware's network components. +func (r Hardware) GetNetworkVlans() (resp []datatypes.Network_Vlan, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware", "getNetworkVlans", nil, &r.Options, &resp) + return +} + +// Retrieve A hardware's allotted bandwidth for the next billing cycle (measured in GB). +func (r Hardware) GetNextBillingCycleBandwidthAllocation() (resp datatypes.Float64, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware", "getNextBillingCycleBandwidthAllocation", nil, &r.Options, &resp) + return +} + +// Retrieve +func (r Hardware) GetNotesHistory() (resp []datatypes.Hardware_Note, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware", "getNotesHistory", nil, &r.Options, &resp) + return +} + +// getObject retrieves the SoftLayer_Hardware object whose ID number corresponds to the ID number of the init parameter passed to the SoftLayer_Hardware service. You can only retrieve the account that your portal user is assigned to. +func (r Hardware) GetObject() (resp datatypes.Hardware, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware", "getObject", nil, &r.Options, &resp) + return +} + +// Retrieve Information regarding a piece of hardware's operating system. +func (r Hardware) GetOperatingSystem() (resp datatypes.Software_Component_OperatingSystem, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware", "getOperatingSystem", nil, &r.Options, &resp) + return +} + +// Retrieve A hardware's operating system software description. +func (r Hardware) GetOperatingSystemReferenceCode() (resp string, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware", "getOperatingSystemReferenceCode", nil, &r.Options, &resp) + return +} + +// Retrieve The sum of all the outbound network traffic data for the last 30 days. +func (r Hardware) GetOutboundBandwidthUsage() (resp datatypes.Float64, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware", "getOutboundBandwidthUsage", nil, &r.Options, &resp) + return +} + +// Retrieve The total public outbound bandwidth for this hardware for the current billing cycle. +func (r Hardware) GetOutboundPublicBandwidthUsage() (resp datatypes.Float64, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware", "getOutboundPublicBandwidthUsage", nil, &r.Options, &resp) + return +} + +// Retrieve Parent Hardware. +func (r Hardware) GetParentHardware() (resp datatypes.Hardware, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware", "getParentHardware", nil, &r.Options, &resp) + return +} + +// Retrieve Information regarding the Point of Presence (PoP) location in which a piece of hardware resides. +func (r Hardware) GetPointOfPresenceLocation() (resp datatypes.Location, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware", "getPointOfPresenceLocation", nil, &r.Options, &resp) + return +} + +// Retrieve The power components for a hardware object. +func (r Hardware) GetPowerComponents() (resp []datatypes.Hardware_Power_Component, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware", "getPowerComponents", nil, &r.Options, &resp) + return +} + +// Retrieve Information regarding a piece of hardware's power supply. +func (r Hardware) GetPowerSupply() (resp []datatypes.Hardware_Component, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware", "getPowerSupply", nil, &r.Options, &resp) + return +} + +// Retrieve The hardware's primary private IP address. +func (r Hardware) GetPrimaryBackendIpAddress() (resp string, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware", "getPrimaryBackendIpAddress", nil, &r.Options, &resp) + return +} + +// Retrieve Information regarding the hardware's primary back-end network component. +func (r Hardware) GetPrimaryBackendNetworkComponent() (resp datatypes.Network_Component, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware", "getPrimaryBackendNetworkComponent", nil, &r.Options, &resp) + return +} + +// Retrieve The hardware's primary public IP address. +func (r Hardware) GetPrimaryIpAddress() (resp string, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware", "getPrimaryIpAddress", nil, &r.Options, &resp) + return +} + +// Retrieve Information regarding the hardware's primary public network component. +func (r Hardware) GetPrimaryNetworkComponent() (resp datatypes.Network_Component, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware", "getPrimaryNetworkComponent", nil, &r.Options, &resp) + return +} + +// Retrieve a graph of a server's private network bandwidth usage over the specified timeframe. If no timeframe is specified then getPublicBandwidthGraphImage retrieves the last 24 hours of public bandwidth usage. getPrivateBandwidthGraphImage returns a PNG image measuring 827 pixels by 293 pixels. +func (r Hardware) GetPrivateBandwidthData(startTime *int, endTime *int) (resp []datatypes.Metric_Tracking_Object_Data, err error) { + params := []interface{}{ + startTime, + endTime, + } + err = r.Session.DoRequest("SoftLayer_Hardware", "getPrivateBandwidthData", params, &r.Options, &resp) + return +} + +// Retrieve Whether the hardware only has access to the private network. +func (r Hardware) GetPrivateNetworkOnlyFlag() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware", "getPrivateNetworkOnlyFlag", nil, &r.Options, &resp) + return +} + +// Retrieve The total number of processor cores, summed from all processors that are attached to a piece of hardware +func (r Hardware) GetProcessorCoreAmount() (resp uint, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware", "getProcessorCoreAmount", nil, &r.Options, &resp) + return +} + +// Retrieve The total number of physical processor cores, summed from all processors that are attached to a piece of hardware +func (r Hardware) GetProcessorPhysicalCoreAmount() (resp uint, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware", "getProcessorPhysicalCoreAmount", nil, &r.Options, &resp) + return +} + +// Retrieve Information regarding a piece of hardware's processors. +func (r Hardware) GetProcessors() (resp []datatypes.Hardware_Component, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware", "getProcessors", nil, &r.Options, &resp) + return +} + +// Retrieve a graph of a server's public network bandwidth usage over the specified timeframe. If no timeframe is specified then getPublicBandwidthGraphImage retrieves the last 24 hours of public bandwidth usage. getPublicBandwidthGraphImage returns a PNG image measuring 827 pixels by 293 pixels. +func (r Hardware) GetPublicBandwidthData(startTime *int, endTime *int) (resp []datatypes.Metric_Tracking_Object_Data, err error) { + params := []interface{}{ + startTime, + endTime, + } + err = r.Session.DoRequest("SoftLayer_Hardware", "getPublicBandwidthData", params, &r.Options, &resp) + return +} + +// Retrieve +func (r Hardware) GetRack() (resp datatypes.Location, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware", "getRack", nil, &r.Options, &resp) + return +} + +// Retrieve The RAID controllers contained within a piece of hardware. +func (r Hardware) GetRaidControllers() (resp []datatypes.Hardware_Component, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware", "getRaidControllers", nil, &r.Options, &resp) + return +} + +// Retrieve Recent events that impact this hardware. +func (r Hardware) GetRecentEvents() (resp []datatypes.Notification_Occurrence_Event, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware", "getRecentEvents", nil, &r.Options, &resp) + return +} + +// Retrieve User credentials to issue commands and/or interact with the server's remote management card. +func (r Hardware) GetRemoteManagementAccounts() (resp []datatypes.Hardware_Component_RemoteManagement_User, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware", "getRemoteManagementAccounts", nil, &r.Options, &resp) + return +} + +// Retrieve A hardware's associated remote management component. This is normally IPMI. +func (r Hardware) GetRemoteManagementComponent() (resp datatypes.Network_Component, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware", "getRemoteManagementComponent", nil, &r.Options, &resp) + return +} + +// Retrieve +func (r Hardware) GetResourceConfigurations() (resp []datatypes.Hardware_Resource_Configuration, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware", "getResourceConfigurations", nil, &r.Options, &resp) + return +} + +// Retrieve +func (r Hardware) GetResourceGroupMemberReferences() (resp []datatypes.Resource_Group_Member, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware", "getResourceGroupMemberReferences", nil, &r.Options, &resp) + return +} + +// Retrieve +func (r Hardware) GetResourceGroupRoles() (resp []datatypes.Resource_Group_Role, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware", "getResourceGroupRoles", nil, &r.Options, &resp) + return +} + +// Retrieve The resource groups in which this hardware is a member. +func (r Hardware) GetResourceGroups() (resp []datatypes.Resource_Group, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware", "getResourceGroups", nil, &r.Options, &resp) + return +} + +// Retrieve A hardware's routers. +func (r Hardware) GetRouters() (resp []datatypes.Hardware, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware", "getRouters", nil, &r.Options, &resp) + return +} + +// Retrieve Collection of scale assets this hardware corresponds to. +func (r Hardware) GetScaleAssets() (resp []datatypes.Scale_Asset, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware", "getScaleAssets", nil, &r.Options, &resp) + return +} + +// Retrieve Information regarding a piece of hardware's vulnerability scan requests. +func (r Hardware) GetSecurityScanRequests() (resp []datatypes.Network_Security_Scanner_Request, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware", "getSecurityScanRequests", nil, &r.Options, &resp) + return +} + +// The '''getSensorData''' method retrieves a server's hardware state via its internal sensors. Remote sensor data is transmitted to the SoftLayer API by way of the server's remote management card. Sensor data measures various information, including system temperatures, voltages and other local server settings. Sensor data is cached for 30 second; calls made to this method for the same server within 30 seconds of each other will result in the same data being returned. To ensure that the data retrieved retrieves snapshot of varied data, make calls greater than 30 seconds apart. +func (r Hardware) GetSensorData() (resp []datatypes.Container_RemoteManagement_SensorReading, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware", "getSensorData", nil, &r.Options, &resp) + return +} + +// The '''getSensorDataWithGraphs''' method retrieves the raw data returned from the server's remote management card. Along with raw data, graphs for the CPU and system temperatures and fan speeds are also returned. For more details on what information is returned, refer to the ''getSensorData'' method. +func (r Hardware) GetSensorDataWithGraphs() (resp datatypes.Container_RemoteManagement_SensorReadingsWithGraphs, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware", "getSensorDataWithGraphs", nil, &r.Options, &resp) + return +} + +// The '''getServerFanSpeedGraphs''' method retrieves the server's fan speeds and displays the speeds using tachometer graphs. data used to construct these graphs is retrieved from the server's remote management card. Each graph returned will have an associated title. +func (r Hardware) GetServerFanSpeedGraphs() (resp []datatypes.Container_RemoteManagement_Graphs_SensorSpeed, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware", "getServerFanSpeedGraphs", nil, &r.Options, &resp) + return +} + +// The '''getPowerState''' method retrieves the power state for the selected server. The server's power status is retrieved from its remote management card. This method returns "on", for a server that has been powered on, or "off" for servers powered off. +func (r Hardware) GetServerPowerState() (resp string, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware", "getServerPowerState", nil, &r.Options, &resp) + return +} + +// Retrieve Information regarding the server room in which the hardware is located. +func (r Hardware) GetServerRoom() (resp datatypes.Location, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware", "getServerRoom", nil, &r.Options, &resp) + return +} + +// The '''getServerTemperatureGraphs''' retrieves the server's temperatures and displays the various temperatures using thermometer graphs. Temperatures retrieved are CPU temperature(s) and system temperatures. Data used to construct the graphs is retrieved from the server's remote management card. All graphs returned will have an associated title. +func (r Hardware) GetServerTemperatureGraphs() (resp []datatypes.Container_RemoteManagement_Graphs_SensorTemperature, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware", "getServerTemperatureGraphs", nil, &r.Options, &resp) + return +} + +// Retrieve Information regarding the piece of hardware's service provider. +func (r Hardware) GetServiceProvider() (resp datatypes.Service_Provider, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware", "getServiceProvider", nil, &r.Options, &resp) + return +} + +// Retrieve Information regarding a piece of hardware's installed software. +func (r Hardware) GetSoftwareComponents() (resp []datatypes.Software_Component, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware", "getSoftwareComponents", nil, &r.Options, &resp) + return +} + +// Retrieve Information regarding the billing item for a spare pool server. +func (r Hardware) GetSparePoolBillingItem() (resp datatypes.Billing_Item_Hardware, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware", "getSparePoolBillingItem", nil, &r.Options, &resp) + return +} + +// Retrieve SSH keys to be installed on the server during provisioning or an OS reload. +func (r Hardware) GetSshKeys() (resp []datatypes.Security_Ssh_Key, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware", "getSshKeys", nil, &r.Options, &resp) + return +} + +// Retrieve +func (r Hardware) GetStorageNetworkComponents() (resp []datatypes.Network_Component, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware", "getStorageNetworkComponents", nil, &r.Options, &resp) + return +} + +// Retrieve +func (r Hardware) GetTagReferences() (resp []datatypes.Tag_Reference, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware", "getTagReferences", nil, &r.Options, &resp) + return +} + +// Retrieve +func (r Hardware) GetTopLevelLocation() (resp datatypes.Location, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware", "getTopLevelLocation", nil, &r.Options, &resp) + return +} + +// +// This method will query transaction history for a piece of hardware. +func (r Hardware) GetTransactionHistory() (resp []datatypes.Provisioning_Version1_Transaction_History, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware", "getTransactionHistory", nil, &r.Options, &resp) + return +} + +// Retrieve a list of upgradeable items available to this piece of hardware. Currently, getUpgradeItemPrices retrieves upgrades available for a server's memory, hard drives, network port speed, bandwidth allocation and GPUs. +func (r Hardware) GetUpgradeItemPrices() (resp []datatypes.Product_Item_Price, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware", "getUpgradeItemPrices", nil, &r.Options, &resp) + return +} + +// Retrieve An account's associated upgrade request object, if any. +func (r Hardware) GetUpgradeRequest() (resp datatypes.Product_Upgrade_Request, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware", "getUpgradeRequest", nil, &r.Options, &resp) + return +} + +// Retrieve The network device connected to a piece of hardware. +func (r Hardware) GetUplinkHardware() (resp datatypes.Hardware, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware", "getUplinkHardware", nil, &r.Options, &resp) + return +} + +// Retrieve Information regarding the network component that is one level higher than a piece of hardware on the network infrastructure. +func (r Hardware) GetUplinkNetworkComponents() (resp []datatypes.Network_Component, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware", "getUplinkNetworkComponents", nil, &r.Options, &resp) + return +} + +// Retrieve A string containing custom user data for a hardware order. +func (r Hardware) GetUserData() (resp []datatypes.Hardware_Attribute, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware", "getUserData", nil, &r.Options, &resp) + return +} + +// Retrieve Information regarding the virtual chassis for a piece of hardware. +func (r Hardware) GetVirtualChassis() (resp datatypes.Hardware_Group, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware", "getVirtualChassis", nil, &r.Options, &resp) + return +} + +// Retrieve Information regarding the virtual chassis siblings for a piece of hardware. +func (r Hardware) GetVirtualChassisSiblings() (resp []datatypes.Hardware, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware", "getVirtualChassisSiblings", nil, &r.Options, &resp) + return +} + +// Retrieve A piece of hardware's virtual host record. +func (r Hardware) GetVirtualHost() (resp datatypes.Virtual_Host, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware", "getVirtualHost", nil, &r.Options, &resp) + return +} + +// Retrieve Information regarding a piece of hardware's virtual software licenses. +func (r Hardware) GetVirtualLicenses() (resp []datatypes.Software_VirtualLicense, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware", "getVirtualLicenses", nil, &r.Options, &resp) + return +} + +// Retrieve Information regarding the bandwidth allotment to which a piece of hardware belongs. +func (r Hardware) GetVirtualRack() (resp datatypes.Network_Bandwidth_Version1_Allotment, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware", "getVirtualRack", nil, &r.Options, &resp) + return +} + +// Retrieve The name of the bandwidth allotment belonging to a piece of hardware. +func (r Hardware) GetVirtualRackId() (resp int, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware", "getVirtualRackId", nil, &r.Options, &resp) + return +} + +// Retrieve The name of the bandwidth allotment belonging to a piece of hardware. +func (r Hardware) GetVirtualRackName() (resp string, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware", "getVirtualRackName", nil, &r.Options, &resp) + return +} + +// Retrieve A piece of hardware's virtualization platform software. +func (r Hardware) GetVirtualizationPlatform() (resp datatypes.Software_Component, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware", "getVirtualizationPlatform", nil, &r.Options, &resp) + return +} + +// The '''importVirtualHost''' method attempts to import the host record for the virtualization platform running on a server. +func (r Hardware) ImportVirtualHost() (resp datatypes.Virtual_Host, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware", "importVirtualHost", nil, &r.Options, &resp) + return +} + +// The '''isPingable''' method issues a ping command to the selected server and returns the result of the ping command. This boolean return value displays ''true'' upon successful ping or ''false'' for a failed ping. +func (r Hardware) IsPingable() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware", "isPingable", nil, &r.Options, &resp) + return +} + +// Issues a ping command to the server and returns the ping response. +func (r Hardware) Ping() (resp string, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware", "ping", nil, &r.Options, &resp) + return +} + +// The '''powerCycle''' method completes a power off and power on of the server successively in one command. The power cycle command is equivalent to unplugging the server from the power strip and then plugging the server back in. '''This method should only be used when all other options have been exhausted'''. Additional remote management commands may not be executed if this command was successfully issued within the last 20 minutes to avoid server failure. Remote management commands include: +// +// rebootSoft rebootHard powerOn powerOff powerCycle +// +// +func (r Hardware) PowerCycle() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware", "powerCycle", nil, &r.Options, &resp) + return +} + +// This method will power off the server via the server's remote management card. +func (r Hardware) PowerOff() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware", "powerOff", nil, &r.Options, &resp) + return +} + +// The '''powerOn''' method powers on a server via its remote management card. This boolean return value returns ''true'' upon successful execution and ''false'' if unsuccessful. Other remote management commands may not be issued in this command was successfully completed within the last 20 minutes to avoid server failure. Remote management commands include: +// +// rebootSoft rebootHard powerOn powerOff powerCycle +// +// +func (r Hardware) PowerOn() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware", "powerOn", nil, &r.Options, &resp) + return +} + +// The '''rebootDefault''' method attempts to reboot the server by issuing a soft reboot, or reset, command to the server's remote management card. if the reset attempt is unsuccessful, a power cycle command will be issued via the power strip. The power cycle command is equivalent to unplugging the server from the power strip and then plugging the server back in. If the reset was successful within the last 20 minutes, another remote management command cannot be completed to avoid server failure. Remote management commands include: +// +// rebootSoft rebootHard powerOn powerOff powerCycle +// +// +func (r Hardware) RebootDefault() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware", "rebootDefault", nil, &r.Options, &resp) + return +} + +// The '''rebootHard''' method reboots the server by issuing a cycle command to the server's remote management card. A hard reboot is equivalent to pressing the ''Reset'' button on a server - it is issued immediately and will not allow processes to shut down prior to the reboot. Completing a hard reboot may initiate system disk checks upon server reboot, causing the boot up to take longer than normally expected. +// +// Remote management commands are unable to be executed if a reboot has been issued successfully within the last 20 minutes to avoid server failure. Remote management commands include: +// +// rebootSoft rebootHard powerOn powerOff powerCycle +// +// +func (r Hardware) RebootHard() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware", "rebootHard", nil, &r.Options, &resp) + return +} + +// The '''rebootSoft''' method reboots the server by issuing a reset command to the server's remote management card via soft reboot. When executing a soft reboot, servers allow all processes to shut down completely before rebooting. Remote management commands are unable to be issued within 20 minutes of issuing a successful soft reboot in order to avoid server failure. Remote management commands include: +// +// rebootSoft rebootHard powerOn powerOff powerCycle +// +// +func (r Hardware) RebootSoft() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware", "rebootSoft", nil, &r.Options, &resp) + return +} + +// This method is used to remove access to s SoftLayer_Network_Storage volumes that supports host- or network-level access control. +func (r Hardware) RemoveAccessToNetworkStorage(networkStorageTemplateObject *datatypes.Network_Storage) (resp bool, err error) { + params := []interface{}{ + networkStorageTemplateObject, + } + err = r.Session.DoRequest("SoftLayer_Hardware", "removeAccessToNetworkStorage", params, &r.Options, &resp) + return +} + +// This method is used to allow access to multiple SoftLayer_Network_Storage volumes that support host- or network-level access control. +func (r Hardware) RemoveAccessToNetworkStorageList(networkStorageTemplateObjects []datatypes.Network_Storage) (resp bool, err error) { + params := []interface{}{ + networkStorageTemplateObjects, + } + err = r.Session.DoRequest("SoftLayer_Hardware", "removeAccessToNetworkStorageList", params, &r.Options, &resp) + return +} + +// no documentation yet +func (r Hardware) SetTags(tags *string) (resp bool, err error) { + params := []interface{}{ + tags, + } + err = r.Session.DoRequest("SoftLayer_Hardware", "setTags", params, &r.Options, &resp) + return +} + +// This method will update the root IPMI password on this SoftLayer_Hardware. +func (r Hardware) UpdateIpmiPassword(password *string) (resp bool, err error) { + params := []interface{}{ + password, + } + err = r.Session.DoRequest("SoftLayer_Hardware", "updateIpmiPassword", params, &r.Options, &resp) + return +} + +// The SoftLayer_Hardware_Benchmark_Certification data type contains general information relating to a single SoftLayer hardware benchmark certification document. +type Hardware_Benchmark_Certification struct { + Session *session.Session + Options sl.Options +} + +// GetHardwareBenchmarkCertificationService returns an instance of the Hardware_Benchmark_Certification SoftLayer service +func GetHardwareBenchmarkCertificationService(sess *session.Session) Hardware_Benchmark_Certification { + return Hardware_Benchmark_Certification{Session: sess} +} + +func (r Hardware_Benchmark_Certification) Id(id int) Hardware_Benchmark_Certification { + r.Options.Id = &id + return r +} + +func (r Hardware_Benchmark_Certification) Mask(mask string) Hardware_Benchmark_Certification { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r Hardware_Benchmark_Certification) Filter(filter string) Hardware_Benchmark_Certification { + r.Options.Filter = filter + return r +} + +func (r Hardware_Benchmark_Certification) Limit(limit int) Hardware_Benchmark_Certification { + r.Options.Limit = &limit + return r +} + +func (r Hardware_Benchmark_Certification) Offset(offset int) Hardware_Benchmark_Certification { + r.Options.Offset = &offset + return r +} + +// Retrieve Information regarding a benchmark certification result's associated SoftLayer customer account. +func (r Hardware_Benchmark_Certification) GetAccount() (resp datatypes.Account, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Benchmark_Certification", "getAccount", nil, &r.Options, &resp) + return +} + +// Retrieve Information regarding the piece of hardware on which a benchmark certification test was performed. +func (r Hardware_Benchmark_Certification) GetHardware() (resp datatypes.Hardware, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Benchmark_Certification", "getHardware", nil, &r.Options, &resp) + return +} + +// getObject retrieves the SoftLayer_Hardware_Benchmark_Certification object whose ID number corresponds to the ID number of the init parameter passed to the SoftLayer_Hardware_Benchmark_Certification service. +func (r Hardware_Benchmark_Certification) GetObject() (resp datatypes.Hardware_Benchmark_Certification, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Benchmark_Certification", "getObject", nil, &r.Options, &resp) + return +} + +// Attempt to retrieve the file associated with a benchmark certification result, if such a file exists. If there is no file for this benchmark certification result, calling this method throws an exception. The "getResultFile" method attempts to retrieve the file associated with a benchmark certification result, if such a file exists. If no file exists for the benchmark certification, an exception is thrown. +func (r Hardware_Benchmark_Certification) GetResultFile() (resp []byte, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Benchmark_Certification", "getResultFile", nil, &r.Options, &resp) + return +} + +// The SoftLayer_Hardware_Component_Model data type contains general information relating to a single SoftLayer component model. A component model represents a vendor specific representation of a hardware component. Every piece of hardware on a server will have a specific hardware component model. +type Hardware_Component_Model struct { + Session *session.Session + Options sl.Options +} + +// GetHardwareComponentModelService returns an instance of the Hardware_Component_Model SoftLayer service +func GetHardwareComponentModelService(sess *session.Session) Hardware_Component_Model { + return Hardware_Component_Model{Session: sess} +} + +func (r Hardware_Component_Model) Id(id int) Hardware_Component_Model { + r.Options.Id = &id + return r +} + +func (r Hardware_Component_Model) Mask(mask string) Hardware_Component_Model { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r Hardware_Component_Model) Filter(filter string) Hardware_Component_Model { + r.Options.Filter = filter + return r +} + +func (r Hardware_Component_Model) Limit(limit int) Hardware_Component_Model { + r.Options.Limit = &limit + return r +} + +func (r Hardware_Component_Model) Offset(offset int) Hardware_Component_Model { + r.Options.Offset = &offset + return r +} + +// Retrieve +func (r Hardware_Component_Model) GetArchitectureType() (resp datatypes.Hardware_Component_Model_Architecture_Type, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Component_Model", "getArchitectureType", nil, &r.Options, &resp) + return +} + +// Retrieve +func (r Hardware_Component_Model) GetAttributes() (resp []datatypes.Hardware_Component_Model_Attribute, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Component_Model", "getAttributes", nil, &r.Options, &resp) + return +} + +// Retrieve +func (r Hardware_Component_Model) GetCompatibleArrayTypes() (resp []datatypes.Configuration_Storage_Group_Array_Type, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Component_Model", "getCompatibleArrayTypes", nil, &r.Options, &resp) + return +} + +// Retrieve All the component models that are compatible with a hardware component model. +func (r Hardware_Component_Model) GetCompatibleChildComponentModels() (resp []datatypes.Hardware_Component_Model, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Component_Model", "getCompatibleChildComponentModels", nil, &r.Options, &resp) + return +} + +// Retrieve All the component models that a hardware component model is compatible with. +func (r Hardware_Component_Model) GetCompatibleParentComponentModels() (resp []datatypes.Hardware_Component_Model, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Component_Model", "getCompatibleParentComponentModels", nil, &r.Options, &resp) + return +} + +// Retrieve A hardware component model's physical components in inventory. +func (r Hardware_Component_Model) GetHardwareComponents() (resp []datatypes.Hardware_Component, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Component_Model", "getHardwareComponents", nil, &r.Options, &resp) + return +} + +// Retrieve The non-vendor specific generic component model for a hardware component model. +func (r Hardware_Component_Model) GetHardwareGenericComponentModel() (resp datatypes.Hardware_Component_Model_Generic, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Component_Model", "getHardwareGenericComponentModel", nil, &r.Options, &resp) + return +} + +// Retrieve +func (r Hardware_Component_Model) GetInfinibandCompatibleAttribute() (resp datatypes.Hardware_Component_Model_Attribute, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Component_Model", "getInfinibandCompatibleAttribute", nil, &r.Options, &resp) + return +} + +// Retrieve +func (r Hardware_Component_Model) GetIsFlexSkuCompatible() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Component_Model", "getIsFlexSkuCompatible", nil, &r.Options, &resp) + return +} + +// Retrieve +func (r Hardware_Component_Model) GetIsInfinibandCompatible() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Component_Model", "getIsInfinibandCompatible", nil, &r.Options, &resp) + return +} + +// getObject retrieves the SoftLayer_Hardware_Component_Model object. +func (r Hardware_Component_Model) GetObject() (resp datatypes.Hardware_Component_Model, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Component_Model", "getObject", nil, &r.Options, &resp) + return +} + +// Retrieve A motherboard's average reboot time. +func (r Hardware_Component_Model) GetRebootTime() (resp datatypes.Hardware_Component_Motherboard_Reboot_Time, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Component_Model", "getRebootTime", nil, &r.Options, &resp) + return +} + +// Retrieve A hardware component model's type. +func (r Hardware_Component_Model) GetType() (resp string, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Component_Model", "getType", nil, &r.Options, &resp) + return +} + +// Retrieve The types of attributes that are allowed for a given hardware component model. +func (r Hardware_Component_Model) GetValidAttributeTypes() (resp []datatypes.Hardware_Component_Model_Attribute_Type, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Component_Model", "getValidAttributeTypes", nil, &r.Options, &resp) + return +} + +// The SoftLayer_Hardware_Component_Partition_OperatingSystem data type contains general information relating to a single SoftLayer Operating System Partition Template. +type Hardware_Component_Partition_OperatingSystem struct { + Session *session.Session + Options sl.Options +} + +// GetHardwareComponentPartitionOperatingSystemService returns an instance of the Hardware_Component_Partition_OperatingSystem SoftLayer service +func GetHardwareComponentPartitionOperatingSystemService(sess *session.Session) Hardware_Component_Partition_OperatingSystem { + return Hardware_Component_Partition_OperatingSystem{Session: sess} +} + +func (r Hardware_Component_Partition_OperatingSystem) Id(id int) Hardware_Component_Partition_OperatingSystem { + r.Options.Id = &id + return r +} + +func (r Hardware_Component_Partition_OperatingSystem) Mask(mask string) Hardware_Component_Partition_OperatingSystem { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r Hardware_Component_Partition_OperatingSystem) Filter(filter string) Hardware_Component_Partition_OperatingSystem { + r.Options.Filter = filter + return r +} + +func (r Hardware_Component_Partition_OperatingSystem) Limit(limit int) Hardware_Component_Partition_OperatingSystem { + r.Options.Limit = &limit + return r +} + +func (r Hardware_Component_Partition_OperatingSystem) Offset(offset int) Hardware_Component_Partition_OperatingSystem { + r.Options.Offset = &offset + return r +} + +// no documentation yet +func (r Hardware_Component_Partition_OperatingSystem) GetAllObjects() (resp []datatypes.Hardware_Component_Partition_OperatingSystem, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Component_Partition_OperatingSystem", "getAllObjects", nil, &r.Options, &resp) + return +} + +// The '''getByDescription''' method retrieves all possible partition templates based on the description (required parameter) entered when calling the method. The description is typically the operating system's name. Current recognized values include 'linux', 'windows', 'freebsd', and 'Debian'. +func (r Hardware_Component_Partition_OperatingSystem) GetByDescription(description *string) (resp datatypes.Hardware_Component_Partition_OperatingSystem, err error) { + params := []interface{}{ + description, + } + err = r.Session.DoRequest("SoftLayer_Hardware_Component_Partition_OperatingSystem", "getByDescription", params, &r.Options, &resp) + return +} + +// getObject retrieves the SoftLayer_Hardware_Component_Partition_OperatingSystem object whose ID number corresponds to the ID number of the init parameter passed to the SoftLayer_Hardware_Component_Partition_OperatingSystem service.s +func (r Hardware_Component_Partition_OperatingSystem) GetObject() (resp datatypes.Hardware_Component_Partition_OperatingSystem, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Component_Partition_OperatingSystem", "getObject", nil, &r.Options, &resp) + return +} + +// Retrieve Information regarding an operating system's [[SoftLayer_Hardware_Component_Partition_Template|Partition Templates]]. +func (r Hardware_Component_Partition_OperatingSystem) GetPartitionTemplates() (resp []datatypes.Hardware_Component_Partition_Template, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Component_Partition_OperatingSystem", "getPartitionTemplates", nil, &r.Options, &resp) + return +} + +// The SoftLayer_Hardware_Component_Partition_Template data type contains general information relating to a single SoftLayer partition template. Partition templates group 1 or more partition configurations that can be used to predefine how a hard drive's partitions will be configured. +type Hardware_Component_Partition_Template struct { + Session *session.Session + Options sl.Options +} + +// GetHardwareComponentPartitionTemplateService returns an instance of the Hardware_Component_Partition_Template SoftLayer service +func GetHardwareComponentPartitionTemplateService(sess *session.Session) Hardware_Component_Partition_Template { + return Hardware_Component_Partition_Template{Session: sess} +} + +func (r Hardware_Component_Partition_Template) Id(id int) Hardware_Component_Partition_Template { + r.Options.Id = &id + return r +} + +func (r Hardware_Component_Partition_Template) Mask(mask string) Hardware_Component_Partition_Template { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r Hardware_Component_Partition_Template) Filter(filter string) Hardware_Component_Partition_Template { + r.Options.Filter = filter + return r +} + +func (r Hardware_Component_Partition_Template) Limit(limit int) Hardware_Component_Partition_Template { + r.Options.Limit = &limit + return r +} + +func (r Hardware_Component_Partition_Template) Offset(offset int) Hardware_Component_Partition_Template { + r.Options.Offset = &offset + return r +} + +// Retrieve A partition template's associated [[SoftLayer_Account|Account]]. +func (r Hardware_Component_Partition_Template) GetAccount() (resp datatypes.Account, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Component_Partition_Template", "getAccount", nil, &r.Options, &resp) + return +} + +// Retrieve An individual partition for a partition template. This is identical to 'partitionTemplatePartition' except this will sort unix partitions. +func (r Hardware_Component_Partition_Template) GetData() (resp []datatypes.Hardware_Component_Partition_Template_Partition, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Component_Partition_Template", "getData", nil, &r.Options, &resp) + return +} + +// Retrieve +func (r Hardware_Component_Partition_Template) GetExpireDate() (resp string, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Component_Partition_Template", "getExpireDate", nil, &r.Options, &resp) + return +} + +// getObject retrieves the SoftLayer_Hardware_Component_Partition_Template object whose ID number corresponds to the ID number of the init parameter passed to the SoftLayer_Hardware_Component_Partition_Template service. You can only retrieve the partition templates that your account created or the templates predefined by SoftLayer. +func (r Hardware_Component_Partition_Template) GetObject() (resp datatypes.Hardware_Component_Partition_Template, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Component_Partition_Template", "getObject", nil, &r.Options, &resp) + return +} + +// Retrieve A partition template's associated [[SoftLayer_Hardware_Component_Partition_OperatingSystem|Operating System]]. +func (r Hardware_Component_Partition_Template) GetPartitionOperatingSystem() (resp datatypes.Hardware_Component_Partition_OperatingSystem, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Component_Partition_Template", "getPartitionOperatingSystem", nil, &r.Options, &resp) + return +} + +// Retrieve An individual partition for a partition template. +func (r Hardware_Component_Partition_Template) GetPartitionTemplatePartition() (resp []datatypes.Hardware_Component_Partition_Template_Partition, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Component_Partition_Template", "getPartitionTemplatePartition", nil, &r.Options, &resp) + return +} + +// The SoftLayer_Hardware_Router data type contains general information relating to a single SoftLayer router. +type Hardware_Router struct { + Session *session.Session + Options sl.Options +} + +// GetHardwareRouterService returns an instance of the Hardware_Router SoftLayer service +func GetHardwareRouterService(sess *session.Session) Hardware_Router { + return Hardware_Router{Session: sess} +} + +func (r Hardware_Router) Id(id int) Hardware_Router { + r.Options.Id = &id + return r +} + +func (r Hardware_Router) Mask(mask string) Hardware_Router { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r Hardware_Router) Filter(filter string) Hardware_Router { + r.Options.Filter = filter + return r +} + +func (r Hardware_Router) Limit(limit int) Hardware_Router { + r.Options.Limit = &limit + return r +} + +func (r Hardware_Router) Offset(offset int) Hardware_Router { + r.Options.Offset = &offset + return r +} + +// This method is used to allow access to a SoftLayer_Network_Storage volume that supports host- or network-level access control. +func (r Hardware_Router) AllowAccessToNetworkStorage(networkStorageTemplateObject *datatypes.Network_Storage) (resp bool, err error) { + params := []interface{}{ + networkStorageTemplateObject, + } + err = r.Session.DoRequest("SoftLayer_Hardware_Router", "allowAccessToNetworkStorage", params, &r.Options, &resp) + return +} + +// This method is used to allow access to multiple SoftLayer_Network_Storage volumes that support host- or network-level access control. +func (r Hardware_Router) AllowAccessToNetworkStorageList(networkStorageTemplateObjects []datatypes.Network_Storage) (resp bool, err error) { + params := []interface{}{ + networkStorageTemplateObjects, + } + err = r.Session.DoRequest("SoftLayer_Hardware_Router", "allowAccessToNetworkStorageList", params, &r.Options, &resp) + return +} + +// Captures a Flex Image of the hard disk on the physical machine, based on the capture template parameter. Returns the image template group containing the disk image. +func (r Hardware_Router) CaptureImage(captureTemplate *datatypes.Container_Disk_Image_Capture_Template) (resp datatypes.Virtual_Guest_Block_Device_Template_Group, err error) { + params := []interface{}{ + captureTemplate, + } + err = r.Session.DoRequest("SoftLayer_Hardware_Router", "captureImage", params, &r.Options, &resp) + return +} + +// Returns monitoring alarm detailed history +func (r Hardware_Router) CloseAlarm(alarmId *string) (resp bool, err error) { + params := []interface{}{ + alarmId, + } + err = r.Session.DoRequest("SoftLayer_Hardware_Router", "closeAlarm", params, &r.Options, &resp) + return +} + +// +// +// createObject() enables the creation of servers on an account. This +// method is a simplified alternative to interacting with the ordering system directly. +// +// +// In order to create a server, a template object must be sent in with a few required +// values. +// +// +// When this method returns an order will have been placed for a server of the specified configuration. +// +// +// To determine when the server is available you can poll the server via [[SoftLayer_Hardware/getObject|getObject]], +// checking the provisionDate property. +// When provisionDate is not null, the server will be ready. Be sure to use the globalIdentifier +// as your initialization parameter. +// +// +// Warning: Servers created via this method will incur charges on your account. For testing input parameters see [[SoftLayer_Hardware/generateOrderTemplate|generateOrderTemplate]]. +// +// +// Input - [[SoftLayer_Hardware (type)|SoftLayer_Hardware]] +//
      +//
    • hostname +//
      Hostname for the server.
        +//
      • Required
      • +//
      • Type - string
      • +//
      +//
      +//
    • +//
    • domain +//
      Domain for the server.
        +//
      • Required
      • +//
      • Type - string
      • +//
      +//
      +//
    • +//
    • processorCoreAmount +//
      The number of logical CPU cores to allocate.
        +//
      • Required
      • +//
      • Type - int
      • +//
      • See [[SoftLayer_Hardware/getCreateObjectOptions|getCreateObjectOptions]] for available options.
      • +//
      +//
      +//
    • +//
    • memoryCapacity +//
      The amount of memory to allocate in gigabytes.
        +//
      • Required
      • +//
      • Type - int
      • +//
      • See [[SoftLayer_Hardware/getCreateObjectOptions|getCreateObjectOptions]] for available options.
      • +//
      +//
      +//
    • +//
    • hourlyBillingFlag +//
      Specifies the billing type for the server.
        +//
      • Required
      • +//
      • Type - boolean
      • +//
      • When true the server will be billed on hourly usage, otherwise it will be billed on a monthly basis.
      • +//
      +//
      +//
    • +//
    • operatingSystemReferenceCode +//
      An identifier for the operating system to provision the server with.
        +//
      • Required
      • +//
      • Type - string
      • +//
      • See [[SoftLayer_Hardware/getCreateObjectOptions|getCreateObjectOptions]] for available options.
      • +//
      +//
      +//
    • +//
    • datacenter.name +//
      Specifies which datacenter the server is to be provisioned in.
        +//
      • Required
      • +//
      • Type - string
      • +//
      • The datacenter property is a [[SoftLayer_Location (type)|location]] structure with the name field set.
      • +//
      • See [[SoftLayer_Hardware/getCreateObjectOptions|getCreateObjectOptions]] for available options.
      • +//
      +// { +// "datacenter": { +// "name": "dal05" +// } +// } +//
      +//
    • +//
    • networkComponents.maxSpeed +//
      Specifies the connection speed for the server's network components.
        +//
      • Optional
      • +//
      • Type - int
      • +//
      • Default - The highest available zero cost port speed will be used.
      • +//
      • Description - The networkComponents property is an array with a single [[SoftLayer_Network_Component (type)|network component]] structure. The maxSpeed property must be set to specify the network uplink speed, in megabits per second, of the server.
      • +//
      • See [[SoftLayer_Hardware/getCreateObjectOptions|getCreateObjectOptions]] for available options.
      • +//
      +// { +// "networkComponents": [ +// { +// "maxSpeed": 1000 +// } +// ] +// } +//
      +//
    • +//
    • networkComponents.redundancyEnabledFlag +//
      Specifies whether or not the server's network components should be in redundancy groups.
        +//
      • Optional
      • +//
      • Type - bool
      • +//
      • Default - false
      • +//
      • Description - The networkComponents property is an array with a single [[SoftLayer_Network_Component (type)|network component]] structure. When the redundancyEnabledFlag property is true the server's network components will be in redundancy groups.
      • +//
      +// { +// "networkComponents": [ +// { +// "redundancyEnabledFlag": false +// } +// ] +// } +//
      +//
    • +//
    • privateNetworkOnlyFlag +//
      Specifies whether or not the server only has access to the private network
        +//
      • Optional
      • +//
      • Type - boolean
      • +//
      • Default - false
      • +//
      • When true this flag specifies that a server is to only have access to the private network.
      • +//
      +//
      +//
    • +//
    • primaryNetworkComponent.networkVlan.id +//
      Specifies the network vlan which is to be used for the frontend interface of the server.
        +//
      • Optional
      • +//
      • Type - int
      • +//
      • Description - The primaryNetworkComponent property is a [[SoftLayer_Network_Component (type)|network component]] structure with the networkVlan property populated with a [[SoftLayer_Network_Vlan (type)|vlan]] structure. The id property must be set to specify the frontend network vlan of the server.
      • +//
      +// { +// "primaryNetworkComponent": { +// "networkVlan": { +// "id": 1 +// } +// } +// } +//
      +//
    • +//
    • primaryBackendNetworkComponent.networkVlan.id +//
      Specifies the network vlan which is to be used for the backend interface of the server.
        +//
      • Optional
      • +//
      • Type - int
      • +//
      • Description - The primaryBackendNetworkComponent property is a [[SoftLayer_Network_Component (type)|network component]] structure with the networkVlan property populated with a [[SoftLayer_Network_Vlan (type)|vlan]] structure. The id property must be set to specify the backend network vlan of the server.
      • +//
      +// { +// "primaryBackendNetworkComponent": { +// "networkVlan": { +// "id": 2 +// } +// } +// } +//
      +//
    • +//
    • fixedConfigurationPreset.keyName +//
        +//
      • Optional
      • +//
      • Type - string
      • +//
      • Description - The fixedConfigurationPreset property is a [[SoftLayer_Product_Package_Preset (type)|fixed configuration preset]] structure. The keyName property must be set to specify preset to use.
      • +//
      • If a fixed configuration preset is used processorCoreAmount, memoryCapacity and hardDrives properties must not be set.
      • +//
      • See [[SoftLayer_Hardware/getCreateObjectOptions|getCreateObjectOptions]] for available options.
      • +//
      +// { +// "fixedConfigurationPreset": { +// "keyName": "SOME_KEY_NAME" +// } +// } +//
      +//
    • +//
    • userData.value +//
      Arbitrary data to be made available to the server.
        +//
      • Optional
      • +//
      • Type - string
      • +//
      • Description - The userData property is an array with a single [[SoftLayer_Hardware_Attribute (type)|attribute]] structure with the value property set to an arbitrary value.
      • +//
      • This value can be retrieved via the [[SoftLayer_Resource_Metadata/getUserMetadata|getUserMetadata]] method from a request originating from the server. This is primarily useful for providing data to software that may be on the server and configured to execute upon first boot.
      • +//
      +// { +// "userData": [ +// { +// "value": "someValue" +// } +// ] +// } +//
      +//
    • +//
    • hardDrives +//
      Hard drive settings for the server
        +//
      • Optional
      • +//
      • Type - SoftLayer_Hardware_Component
      • +//
      • Default - The largest available capacity for a zero cost primary disk will be used.
      • +//
      • Description - The hardDrives property is an array of [[SoftLayer_Hardware_Component (type)|hardware component]] structures. +//
      • Each hard drive must specify the capacity property.
      • +//
      • See [[SoftLayer_Hardware/getCreateObjectOptions|getCreateObjectOptions]] for available options.
      • +//
      +// { +// "hardDrives": [ +// { +// "capacity": 500 +// } +// ] +// } +//
      +//
    • +//
    • sshKeys +//
      SSH keys to install on the server upon provisioning.
        +//
      • Optional
      • +//
      • Type - array of [[SoftLayer_Security_Ssh_Key (type)|SoftLayer_Security_Ssh_Key]]
      • +//
      • Description - The sshKeys property is an array of [[SoftLayer_Security_Ssh_Key (type)|SSH Key]] structures with the id property set to the value of an existing SSH key.
      • +//
      • To create a new SSH key, call [[SoftLayer_Security_Ssh_Key/createObject|createObject]] on the [[SoftLayer_Security_Ssh_Key]] service.
      • +//
      • To obtain a list of existing SSH keys, call [[SoftLayer_Account/getSshKeys|getSshKeys]] on the [[SoftLayer_Account]] service. +//
      +// { +// "sshKeys": [ +// { +// "id": 123 +// } +// ] +// } +//
      +//
    • +//
    • postInstallScriptUri +//
      Specifies the uri location of the script to be downloaded and run after installation is complete.
        +//
      • Optional
      • +//
      • Type - string
      • +//
      +//
      +//
    • +//
    +// +// +//

    REST Example

    +// curl -X POST -d '{ +// "parameters":[ +// { +// "hostname": "host1", +// "domain": "example.com", +// "processorCoreAmount": 2, +// "memoryCapacity": 2, +// "hourlyBillingFlag": true, +// "operatingSystemReferenceCode": "UBUNTU_LATEST" +// } +// ] +// }' https://api.softlayer.com/rest/v3/SoftLayer_Hardware.json +// +// HTTP/1.1 201 Created +// Location: https://api.softlayer.com/rest/v3/SoftLayer_Hardware/f5a3fcff-db1d-4b7c-9fa0-0349e41c29c5/getObject +// +// +// { +// "accountId": 232298, +// "bareMetalInstanceFlag": null, +// "domain": "example.com", +// "hardwareStatusId": null, +// "hostname": "host1", +// "id": null, +// "serviceProviderId": null, +// "serviceProviderResourceId": null, +// "globalIdentifier": "f5a3fcff-db1d-4b7c-9fa0-0349e41c29c5", +// "hourlyBillingFlag": true, +// "memoryCapacity": 2, +// "operatingSystemReferenceCode": "UBUNTU_LATEST", +// "processorCoreAmount": 2 +// } +// +func (r Hardware_Router) CreateObject(templateObject *datatypes.Hardware) (resp datatypes.Hardware, err error) { + params := []interface{}{ + templateObject, + } + err = r.Session.DoRequest("SoftLayer_Hardware_Router", "createObject", params, &r.Options, &resp) + return +} + +// +// This method will cancel a server effective immediately. For servers billed hourly, the charges will stop immediately after the method returns. +func (r Hardware_Router) DeleteObject() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Router", "deleteObject", nil, &r.Options, &resp) + return +} + +// Delete software component passwords. +func (r Hardware_Router) DeleteSoftwareComponentPasswords(softwareComponentPasswords []datatypes.Software_Component_Password) (resp bool, err error) { + params := []interface{}{ + softwareComponentPasswords, + } + err = r.Session.DoRequest("SoftLayer_Hardware_Router", "deleteSoftwareComponentPasswords", params, &r.Options, &resp) + return +} + +// Edit the properties of a software component password such as the username, password, and notes. +func (r Hardware_Router) EditSoftwareComponentPasswords(softwareComponentPasswords []datatypes.Software_Component_Password) (resp bool, err error) { + params := []interface{}{ + softwareComponentPasswords, + } + err = r.Session.DoRequest("SoftLayer_Hardware_Router", "editSoftwareComponentPasswords", params, &r.Options, &resp) + return +} + +// Download and run remote script from uri on the hardware. +func (r Hardware_Router) ExecuteRemoteScript(uri *string) (err error) { + var resp datatypes.Void + params := []interface{}{ + uri, + } + err = r.Session.DoRequest("SoftLayer_Hardware_Router", "executeRemoteScript", params, &r.Options, &resp) + return +} + +// The '''findByIpAddress''' method finds hardware using its primary public or private IP address. IP addresses that have a secondary subnet tied to the hardware will not return the hardware - alternate means of locating the hardware must be used (see '''Associated Methods'''). If no hardware is found, no errors are generated and no data is returned. +func (r Hardware_Router) FindByIpAddress(ipAddress *string) (resp datatypes.Hardware, err error) { + params := []interface{}{ + ipAddress, + } + err = r.Session.DoRequest("SoftLayer_Hardware_Router", "findByIpAddress", params, &r.Options, &resp) + return +} + +// +// Obtain an [[SoftLayer_Container_Product_Order_Hardware_Server (type)|order container]] that can be sent to [[SoftLayer_Product_Order/verifyOrder|verifyOrder]] or [[SoftLayer_Product_Order/placeOrder|placeOrder]]. +// +// +// This is primarily useful when there is a necessity to confirm the price which will be charged for an order. +// +// +// See [[SoftLayer_Hardware/createObject|createObject]] for specifics on the requirements of the template object parameter. +func (r Hardware_Router) GenerateOrderTemplate(templateObject *datatypes.Hardware) (resp datatypes.Container_Product_Order, err error) { + params := []interface{}{ + templateObject, + } + err = r.Session.DoRequest("SoftLayer_Hardware_Router", "generateOrderTemplate", params, &r.Options, &resp) + return +} + +// Retrieve The account associated with a piece of hardware. +func (r Hardware_Router) GetAccount() (resp datatypes.Account, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Router", "getAccount", nil, &r.Options, &resp) + return +} + +// Retrieve A piece of hardware's active physical components. +func (r Hardware_Router) GetActiveComponents() (resp []datatypes.Hardware_Component, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Router", "getActiveComponents", nil, &r.Options, &resp) + return +} + +// Retrieve A piece of hardware's active network monitoring incidents. +func (r Hardware_Router) GetActiveNetworkMonitorIncident() (resp []datatypes.Network_Monitor_Version1_Incident, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Router", "getActiveNetworkMonitorIncident", nil, &r.Options, &resp) + return +} + +// The '''getAlarmHistory''' method retrieves a detailed history for the monitoring alarm. When calling this method, a start and end date for the history to be retrieved must be entered. +func (r Hardware_Router) GetAlarmHistory(startDate *datatypes.Time, endDate *datatypes.Time, alarmId *string) (resp []datatypes.Container_Monitoring_Alarm_History, err error) { + params := []interface{}{ + startDate, + endDate, + alarmId, + } + err = r.Session.DoRequest("SoftLayer_Hardware_Router", "getAlarmHistory", params, &r.Options, &resp) + return +} + +// Retrieve +func (r Hardware_Router) GetAllPowerComponents() (resp []datatypes.Hardware_Power_Component, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Router", "getAllPowerComponents", nil, &r.Options, &resp) + return +} + +// Retrieve The SoftLayer_Network_Storage_Allowed_Host information to connect this server to Network Storage volumes that require access control lists. +func (r Hardware_Router) GetAllowedHost() (resp datatypes.Network_Storage_Allowed_Host, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Router", "getAllowedHost", nil, &r.Options, &resp) + return +} + +// Retrieve The SoftLayer_Network_Storage objects that this SoftLayer_Hardware has access to. +func (r Hardware_Router) GetAllowedNetworkStorage() (resp []datatypes.Network_Storage, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Router", "getAllowedNetworkStorage", nil, &r.Options, &resp) + return +} + +// Retrieve The SoftLayer_Network_Storage objects whose Replica that this SoftLayer_Hardware has access to. +func (r Hardware_Router) GetAllowedNetworkStorageReplicas() (resp []datatypes.Network_Storage, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Router", "getAllowedNetworkStorageReplicas", nil, &r.Options, &resp) + return +} + +// Retrieve Information regarding an antivirus/spyware software component object. +func (r Hardware_Router) GetAntivirusSpywareSoftwareComponent() (resp datatypes.Software_Component, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Router", "getAntivirusSpywareSoftwareComponent", nil, &r.Options, &resp) + return +} + +// This method is retrieve a list of SoftLayer_Network_Storage volumes that are authorized access to this SoftLayer_Hardware. +func (r Hardware_Router) GetAttachedNetworkStorages(nasType *string) (resp []datatypes.Network_Storage, err error) { + params := []interface{}{ + nasType, + } + err = r.Session.DoRequest("SoftLayer_Hardware_Router", "getAttachedNetworkStorages", params, &r.Options, &resp) + return +} + +// Retrieve Information regarding a piece of hardware's specific attributes. +func (r Hardware_Router) GetAttributes() (resp []datatypes.Hardware_Attribute, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Router", "getAttributes", nil, &r.Options, &resp) + return +} + +// This method retrieves a list of SoftLayer_Network_Storage volumes that can be authorized to this SoftLayer_Hardware. +func (r Hardware_Router) GetAvailableNetworkStorages(nasType *string) (resp []datatypes.Network_Storage, err error) { + params := []interface{}{ + nasType, + } + err = r.Session.DoRequest("SoftLayer_Hardware_Router", "getAvailableNetworkStorages", params, &r.Options, &resp) + return +} + +// Retrieve The average daily public bandwidth usage for the current billing cycle. +func (r Hardware_Router) GetAverageDailyPublicBandwidthUsage() (resp datatypes.Float64, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Router", "getAverageDailyPublicBandwidthUsage", nil, &r.Options, &resp) + return +} + +// The '''getBackendIncomingBandwidth''' method retrieves the amount of incoming private network traffic used between the given start date and end date parameters. When entering start and end dates, only the month, day and year are used to calculate bandwidth totals - the time (HH:MM:SS) is ignored and defaults to midnight. The amount of bandwidth retrieved is measured in gigabytes. +func (r Hardware_Router) GetBackendIncomingBandwidth(startDate *datatypes.Time, endDate *datatypes.Time) (resp datatypes.Float64, err error) { + params := []interface{}{ + startDate, + endDate, + } + err = r.Session.DoRequest("SoftLayer_Hardware_Router", "getBackendIncomingBandwidth", params, &r.Options, &resp) + return +} + +// Retrieve A piece of hardware's back-end or private network components. +func (r Hardware_Router) GetBackendNetworkComponents() (resp []datatypes.Network_Component, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Router", "getBackendNetworkComponents", nil, &r.Options, &resp) + return +} + +// The '''getBackendOutgoingBandwidth''' method retrieves the amount of outgoing private network traffic used between the given start date and end date parameters. When entering start and end dates, only the month, day and year are used to calculate bandwidth totals - the time (HH:MM:SS) is ignored and defaults to midnight. The amount of bandwidth retrieved is measured in gigabytes. +func (r Hardware_Router) GetBackendOutgoingBandwidth(startDate *datatypes.Time, endDate *datatypes.Time) (resp datatypes.Float64, err error) { + params := []interface{}{ + startDate, + endDate, + } + err = r.Session.DoRequest("SoftLayer_Hardware_Router", "getBackendOutgoingBandwidth", params, &r.Options, &resp) + return +} + +// Retrieve A hardware's backend or private router. +func (r Hardware_Router) GetBackendRouters() (resp []datatypes.Hardware, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Router", "getBackendRouters", nil, &r.Options, &resp) + return +} + +// Retrieve A hardware's allotted bandwidth (measured in GB). +func (r Hardware_Router) GetBandwidthAllocation() (resp datatypes.Float64, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Router", "getBandwidthAllocation", nil, &r.Options, &resp) + return +} + +// Retrieve A hardware's allotted detail record. Allotment details link bandwidth allocation with allotments. +func (r Hardware_Router) GetBandwidthAllotmentDetail() (resp datatypes.Network_Bandwidth_Version1_Allotment_Detail, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Router", "getBandwidthAllotmentDetail", nil, &r.Options, &resp) + return +} + +// Retrieve Information regarding a piece of hardware's benchmark certifications. +func (r Hardware_Router) GetBenchmarkCertifications() (resp []datatypes.Hardware_Benchmark_Certification, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Router", "getBenchmarkCertifications", nil, &r.Options, &resp) + return +} + +// Retrieve Information regarding the billing item for a server. +func (r Hardware_Router) GetBillingItem() (resp datatypes.Billing_Item_Hardware, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Router", "getBillingItem", nil, &r.Options, &resp) + return +} + +// Retrieve A flag indicating that a billing item exists. +func (r Hardware_Router) GetBillingItemFlag() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Router", "getBillingItemFlag", nil, &r.Options, &resp) + return +} + +// Retrieve Determines whether the hardware is ineligible for cancellation because it is disconnected. +func (r Hardware_Router) GetBlockCancelBecauseDisconnectedFlag() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Router", "getBlockCancelBecauseDisconnectedFlag", nil, &r.Options, &resp) + return +} + +// Retrieve Associated subnets for a router object. +func (r Hardware_Router) GetBoundSubnets() (resp []datatypes.Network_Subnet, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Router", "getBoundSubnets", nil, &r.Options, &resp) + return +} + +// Retrieve Status indicating whether or not a piece of hardware has business continuance insurance. +func (r Hardware_Router) GetBusinessContinuanceInsuranceFlag() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Router", "getBusinessContinuanceInsuranceFlag", nil, &r.Options, &resp) + return +} + +// Retrieve Child hardware. +func (r Hardware_Router) GetChildrenHardware() (resp []datatypes.Hardware, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Router", "getChildrenHardware", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Hardware_Router) GetComponentDetailsXML() (resp string, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Router", "getComponentDetailsXML", nil, &r.Options, &resp) + return +} + +// Retrieve A piece of hardware's components. +func (r Hardware_Router) GetComponents() (resp []datatypes.Hardware_Component, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Router", "getComponents", nil, &r.Options, &resp) + return +} + +// Retrieve A continuous data protection/server backup software component object. +func (r Hardware_Router) GetContinuousDataProtectionSoftwareComponent() (resp datatypes.Software_Component, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Router", "getContinuousDataProtectionSoftwareComponent", nil, &r.Options, &resp) + return +} + +// +// There are many options that may be provided while ordering a server, this method can be used to determine what these options are. +// +// +// Detailed information on the return value can be found on the data type page for [[SoftLayer_Container_Hardware_Configuration (type)]]. +func (r Hardware_Router) GetCreateObjectOptions() (resp datatypes.Container_Hardware_Configuration, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Router", "getCreateObjectOptions", nil, &r.Options, &resp) + return +} + +// Retrieve The current billable public outbound bandwidth for this hardware for the current billing cycle. +func (r Hardware_Router) GetCurrentBillableBandwidthUsage() (resp datatypes.Float64, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Router", "getCurrentBillableBandwidthUsage", nil, &r.Options, &resp) + return +} + +// Get the billing detail for this instance for the current billing period. This does not include bandwidth usage. +func (r Hardware_Router) GetCurrentBillingDetail() (resp []datatypes.Billing_Item, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Router", "getCurrentBillingDetail", nil, &r.Options, &resp) + return +} + +// The '''getCurrentBillingTotal''' method retrieves the total bill amount in US Dollars ($) for the current billing period. In addition to the total bill amount, the billing detail also includes all bandwidth used up to the point the method is called on the piece of hardware. +func (r Hardware_Router) GetCurrentBillingTotal() (resp datatypes.Float64, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Router", "getCurrentBillingTotal", nil, &r.Options, &resp) + return +} + +// The '''getDailyAverage''' method calculates the average daily network traffic used by the selected server. Using the required parameter ''dateTime'' to enter a start and end date, the user retrieves this average, measure in gigabytes (GB) for the specified date range. When entering parameters, only the month, day and year are required - time entries are omitted as this method defaults the time to midnight in order to account for the entire day. +func (r Hardware_Router) GetDailyAverage(startDate *datatypes.Time, endDate *datatypes.Time) (resp datatypes.Float64, err error) { + params := []interface{}{ + startDate, + endDate, + } + err = r.Session.DoRequest("SoftLayer_Hardware_Router", "getDailyAverage", params, &r.Options, &resp) + return +} + +// Retrieve Information regarding the datacenter in which a piece of hardware resides. +func (r Hardware_Router) GetDatacenter() (resp datatypes.Location, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Router", "getDatacenter", nil, &r.Options, &resp) + return +} + +// Retrieve The name of the datacenter in which a piece of hardware resides. +func (r Hardware_Router) GetDatacenterName() (resp string, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Router", "getDatacenterName", nil, &r.Options, &resp) + return +} + +// Retrieve All hardware that has uplink network connections to a piece of hardware. +func (r Hardware_Router) GetDownlinkHardware() (resp []datatypes.Hardware, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Router", "getDownlinkHardware", nil, &r.Options, &resp) + return +} + +// Retrieve All hardware that has uplink network connections to a piece of hardware. +func (r Hardware_Router) GetDownlinkNetworkHardware() (resp []datatypes.Hardware, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Router", "getDownlinkNetworkHardware", nil, &r.Options, &resp) + return +} + +// Retrieve Information regarding all servers attached to a piece of network hardware. +func (r Hardware_Router) GetDownlinkServers() (resp []datatypes.Hardware, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Router", "getDownlinkServers", nil, &r.Options, &resp) + return +} + +// Retrieve Information regarding all virtual guests attached to a piece of network hardware. +func (r Hardware_Router) GetDownlinkVirtualGuests() (resp []datatypes.Virtual_Guest, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Router", "getDownlinkVirtualGuests", nil, &r.Options, &resp) + return +} + +// Retrieve All hardware downstream from a network device. +func (r Hardware_Router) GetDownstreamHardwareBindings() (resp []datatypes.Network_Component_Uplink_Hardware, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Router", "getDownstreamHardwareBindings", nil, &r.Options, &resp) + return +} + +// Retrieve All network hardware downstream from the selected piece of hardware. +func (r Hardware_Router) GetDownstreamNetworkHardware() (resp []datatypes.Hardware, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Router", "getDownstreamNetworkHardware", nil, &r.Options, &resp) + return +} + +// Retrieve All network hardware with monitoring warnings or errors that are downstream from the selected piece of hardware. +func (r Hardware_Router) GetDownstreamNetworkHardwareWithIncidents() (resp []datatypes.Hardware, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Router", "getDownstreamNetworkHardwareWithIncidents", nil, &r.Options, &resp) + return +} + +// Retrieve Information regarding all servers attached downstream to a piece of network hardware. +func (r Hardware_Router) GetDownstreamServers() (resp []datatypes.Hardware, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Router", "getDownstreamServers", nil, &r.Options, &resp) + return +} + +// Retrieve Information regarding all virtual guests attached to a piece of network hardware. +func (r Hardware_Router) GetDownstreamVirtualGuests() (resp []datatypes.Virtual_Guest, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Router", "getDownstreamVirtualGuests", nil, &r.Options, &resp) + return +} + +// Retrieve The drive controllers contained within a piece of hardware. +func (r Hardware_Router) GetDriveControllers() (resp []datatypes.Hardware_Component, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Router", "getDriveControllers", nil, &r.Options, &resp) + return +} + +// Retrieve Information regarding a piece of hardware's associated EVault network storage service account. +func (r Hardware_Router) GetEvaultNetworkStorage() (resp []datatypes.Network_Storage, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Router", "getEvaultNetworkStorage", nil, &r.Options, &resp) + return +} + +// Retrieve Information regarding a piece of hardware's firewall services. +func (r Hardware_Router) GetFirewallServiceComponent() (resp datatypes.Network_Component_Firewall, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Router", "getFirewallServiceComponent", nil, &r.Options, &resp) + return +} + +// Retrieve Defines the fixed components in a fixed configuration bare metal server. +func (r Hardware_Router) GetFixedConfigurationPreset() (resp datatypes.Product_Package_Preset, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Router", "getFixedConfigurationPreset", nil, &r.Options, &resp) + return +} + +// The '''getFrontendIncomingBandwidth''' method retrieves the amount of incoming public network traffic used by a server between the given start and end date parameters. When entering the ''dateTime'' parameter, only the month, day and year of the start and end dates are required - the time (hour, minute and second) are set to midnight by default and cannot be changed. The amount of bandwidth retrieved is measured in gigabytes (GB). +func (r Hardware_Router) GetFrontendIncomingBandwidth(startDate *datatypes.Time, endDate *datatypes.Time) (resp datatypes.Float64, err error) { + params := []interface{}{ + startDate, + endDate, + } + err = r.Session.DoRequest("SoftLayer_Hardware_Router", "getFrontendIncomingBandwidth", params, &r.Options, &resp) + return +} + +// Retrieve A piece of hardware's front-end or public network components. +func (r Hardware_Router) GetFrontendNetworkComponents() (resp []datatypes.Network_Component, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Router", "getFrontendNetworkComponents", nil, &r.Options, &resp) + return +} + +// The '''getFrontendOutgoingBandwidth''' method retrieves the amount of outgoing public network traffic used by a server between the given start and end date parameters. The ''dateTime'' parameter requires only the day, month and year to be entered - the time (hour, minute and second) are set to midnight be default in order to gather the data for the entire start and end date indicated in the parameter. The amount of bandwidth retrieved is measured in gigabytes (GB). +func (r Hardware_Router) GetFrontendOutgoingBandwidth(startDate *datatypes.Time, endDate *datatypes.Time) (resp datatypes.Float64, err error) { + params := []interface{}{ + startDate, + endDate, + } + err = r.Session.DoRequest("SoftLayer_Hardware_Router", "getFrontendOutgoingBandwidth", params, &r.Options, &resp) + return +} + +// Retrieve A hardware's frontend or public router. +func (r Hardware_Router) GetFrontendRouters() (resp []datatypes.Hardware, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Router", "getFrontendRouters", nil, &r.Options, &resp) + return +} + +// Retrieve A hardware's universally unique identifier. +func (r Hardware_Router) GetGlobalIdentifier() (resp string, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Router", "getGlobalIdentifier", nil, &r.Options, &resp) + return +} + +// Retrieve The hard drives contained within a piece of hardware. +func (r Hardware_Router) GetHardDrives() (resp []datatypes.Hardware_Component, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Router", "getHardDrives", nil, &r.Options, &resp) + return +} + +// Retrieve The chassis that a piece of hardware is housed in. +func (r Hardware_Router) GetHardwareChassis() (resp datatypes.Hardware_Chassis, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Router", "getHardwareChassis", nil, &r.Options, &resp) + return +} + +// Retrieve A hardware's function. +func (r Hardware_Router) GetHardwareFunction() (resp datatypes.Hardware_Function, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Router", "getHardwareFunction", nil, &r.Options, &resp) + return +} + +// Retrieve A hardware's function. +func (r Hardware_Router) GetHardwareFunctionDescription() (resp string, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Router", "getHardwareFunctionDescription", nil, &r.Options, &resp) + return +} + +// Retrieve A hardware's status. +func (r Hardware_Router) GetHardwareStatus() (resp datatypes.Hardware_Status, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Router", "getHardwareStatus", nil, &r.Options, &resp) + return +} + +// Retrieve Determine in hardware object has TPM enabled. +func (r Hardware_Router) GetHasTrustedPlatformModuleBillingItemFlag() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Router", "getHasTrustedPlatformModuleBillingItemFlag", nil, &r.Options, &resp) + return +} + +// Retrieve Information regarding a host IPS software component object. +func (r Hardware_Router) GetHostIpsSoftwareComponent() (resp datatypes.Software_Component, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Router", "getHostIpsSoftwareComponent", nil, &r.Options, &resp) + return +} + +// The '''getHourlyBandwidth''' method retrieves all bandwidth updates hourly for the specified hardware. Because the potential number of data points can become excessive, the method limits the user to obtain data in 24-hour intervals. The required ''dateTime'' parameter is used as the starting point for the query and will be calculated for the 24-hour period starting with the specified date and time. For example, entering a parameter of +// +// '02/01/2008 0:00' +// +// results in a return of all bandwidth data for the entire day of February 1, 2008, as 0:00 specifies a midnight start date. Please note that the time entered should be completed using a 24-hour clock (military time, astronomical time). +// +// For data spanning more than a single 24-hour period, refer to the getBandwidthData function on the metricTrackingObject for the piece of hardware. +func (r Hardware_Router) GetHourlyBandwidth(mode *string, day *datatypes.Time) (resp []datatypes.Metric_Tracking_Object_Data, err error) { + params := []interface{}{ + mode, + day, + } + err = r.Session.DoRequest("SoftLayer_Hardware_Router", "getHourlyBandwidth", params, &r.Options, &resp) + return +} + +// Retrieve A server's hourly billing status. +func (r Hardware_Router) GetHourlyBillingFlag() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Router", "getHourlyBillingFlag", nil, &r.Options, &resp) + return +} + +// Retrieve The sum of all the inbound network traffic data for the last 30 days. +func (r Hardware_Router) GetInboundBandwidthUsage() (resp datatypes.Float64, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Router", "getInboundBandwidthUsage", nil, &r.Options, &resp) + return +} + +// Retrieve The total public inbound bandwidth for this hardware for the current billing cycle. +func (r Hardware_Router) GetInboundPublicBandwidthUsage() (resp datatypes.Float64, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Router", "getInboundPublicBandwidthUsage", nil, &r.Options, &resp) + return +} + +// Retrieve Information regarding the last transaction a server performed. +func (r Hardware_Router) GetLastTransaction() (resp datatypes.Provisioning_Version1_Transaction, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Router", "getLastTransaction", nil, &r.Options, &resp) + return +} + +// Retrieve A piece of hardware's latest network monitoring incident. +func (r Hardware_Router) GetLatestNetworkMonitorIncident() (resp datatypes.Network_Monitor_Version1_Incident, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Router", "getLatestNetworkMonitorIncident", nil, &r.Options, &resp) + return +} + +// Retrieve A flag indicating that a VLAN on the router can be assigned to a host that has local disk functionality. +func (r Hardware_Router) GetLocalDiskStorageCapabilityFlag() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Router", "getLocalDiskStorageCapabilityFlag", nil, &r.Options, &resp) + return +} + +// Retrieve Where a piece of hardware is located within SoftLayer's location hierarchy. +func (r Hardware_Router) GetLocation() (resp datatypes.Location, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Router", "getLocation", nil, &r.Options, &resp) + return +} + +// Retrieve +func (r Hardware_Router) GetLocationPathString() (resp string, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Router", "getLocationPathString", nil, &r.Options, &resp) + return +} + +// Retrieve Information regarding a lockbox account associated with a server. +func (r Hardware_Router) GetLockboxNetworkStorage() (resp datatypes.Network_Storage, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Router", "getLockboxNetworkStorage", nil, &r.Options, &resp) + return +} + +// Retrieve A flag indicating that the hardware is a managed resource. +func (r Hardware_Router) GetManagedResourceFlag() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Router", "getManagedResourceFlag", nil, &r.Options, &resp) + return +} + +// Retrieve Information regarding a piece of hardware's memory. +func (r Hardware_Router) GetMemory() (resp []datatypes.Hardware_Component, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Router", "getMemory", nil, &r.Options, &resp) + return +} + +// Retrieve The amount of memory a piece of hardware has, measured in gigabytes. +func (r Hardware_Router) GetMemoryCapacity() (resp uint, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Router", "getMemoryCapacity", nil, &r.Options, &resp) + return +} + +// Retrieve A piece of hardware's metric tracking object. +func (r Hardware_Router) GetMetricTrackingObject() (resp datatypes.Metric_Tracking_Object_HardwareServer, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Router", "getMetricTrackingObject", nil, &r.Options, &resp) + return +} + +// Returns open monitoring alarms for a given time period +func (r Hardware_Router) GetMonitoringActiveAlarms(startDate *datatypes.Time, endDate *datatypes.Time) (resp []datatypes.Container_Monitoring_Alarm_History, err error) { + params := []interface{}{ + startDate, + endDate, + } + err = r.Session.DoRequest("SoftLayer_Hardware_Router", "getMonitoringActiveAlarms", params, &r.Options, &resp) + return +} + +// Retrieve Information regarding the monitoring agents associated with a piece of hardware. +func (r Hardware_Router) GetMonitoringAgents() (resp []datatypes.Monitoring_Agent, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Router", "getMonitoringAgents", nil, &r.Options, &resp) + return +} + +// Returns closed monitoring alarms for a given time period +func (r Hardware_Router) GetMonitoringClosedAlarms(startDate *datatypes.Time, endDate *datatypes.Time) (resp []datatypes.Container_Monitoring_Alarm_History, err error) { + params := []interface{}{ + startDate, + endDate, + } + err = r.Session.DoRequest("SoftLayer_Hardware_Router", "getMonitoringClosedAlarms", params, &r.Options, &resp) + return +} + +// Retrieve Information regarding the hardware's monitoring robot. +func (r Hardware_Router) GetMonitoringRobot() (resp datatypes.Monitoring_Robot, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Router", "getMonitoringRobot", nil, &r.Options, &resp) + return +} + +// Retrieve Information regarding a piece of hardware's network monitoring services. +func (r Hardware_Router) GetMonitoringServiceComponent() (resp datatypes.Network_Monitor_Version1_Query_Host_Stratum, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Router", "getMonitoringServiceComponent", nil, &r.Options, &resp) + return +} + +// Retrieve The monitoring service flag eligibility status for a piece of hardware. +func (r Hardware_Router) GetMonitoringServiceEligibilityFlag() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Router", "getMonitoringServiceEligibilityFlag", nil, &r.Options, &resp) + return +} + +// Retrieve The service flag status for a piece of hardware. +func (r Hardware_Router) GetMonitoringServiceFlag() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Router", "getMonitoringServiceFlag", nil, &r.Options, &resp) + return +} + +// Retrieve Information regarding a piece of hardware's motherboard. +func (r Hardware_Router) GetMotherboard() (resp datatypes.Hardware_Component, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Router", "getMotherboard", nil, &r.Options, &resp) + return +} + +// Retrieve Information regarding a piece of hardware's network cards. +func (r Hardware_Router) GetNetworkCards() (resp []datatypes.Hardware_Component, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Router", "getNetworkCards", nil, &r.Options, &resp) + return +} + +// Retrieve Returns a hardware's network components. +func (r Hardware_Router) GetNetworkComponents() (resp []datatypes.Network_Component, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Router", "getNetworkComponents", nil, &r.Options, &resp) + return +} + +// Retrieve The gateway member if this device is part of a network gateway. +func (r Hardware_Router) GetNetworkGatewayMember() (resp datatypes.Network_Gateway_Member, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Router", "getNetworkGatewayMember", nil, &r.Options, &resp) + return +} + +// Retrieve Whether or not this device is part of a network gateway. +func (r Hardware_Router) GetNetworkGatewayMemberFlag() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Router", "getNetworkGatewayMemberFlag", nil, &r.Options, &resp) + return +} + +// Retrieve A piece of hardware's network management IP address. +func (r Hardware_Router) GetNetworkManagementIpAddress() (resp string, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Router", "getNetworkManagementIpAddress", nil, &r.Options, &resp) + return +} + +// Retrieve All servers with failed monitoring that are attached downstream to a piece of hardware. +func (r Hardware_Router) GetNetworkMonitorAttachedDownHardware() (resp []datatypes.Hardware, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Router", "getNetworkMonitorAttachedDownHardware", nil, &r.Options, &resp) + return +} + +// Retrieve Virtual guests that are attached downstream to a hardware that have failed monitoring +func (r Hardware_Router) GetNetworkMonitorAttachedDownVirtualGuests() (resp []datatypes.Virtual_Guest, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Router", "getNetworkMonitorAttachedDownVirtualGuests", nil, &r.Options, &resp) + return +} + +// Retrieve The status of all of a piece of hardware's network monitoring incidents. +func (r Hardware_Router) GetNetworkMonitorIncidents() (resp []datatypes.Network_Monitor_Version1_Incident, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Router", "getNetworkMonitorIncidents", nil, &r.Options, &resp) + return +} + +// Retrieve Information regarding a piece of hardware's network monitors. +func (r Hardware_Router) GetNetworkMonitors() (resp []datatypes.Network_Monitor_Version1_Query_Host, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Router", "getNetworkMonitors", nil, &r.Options, &resp) + return +} + +// Retrieve The value of a hardware's network status attribute. +func (r Hardware_Router) GetNetworkStatus() (resp string, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Router", "getNetworkStatus", nil, &r.Options, &resp) + return +} + +// Retrieve The hardware's related network status attribute. +func (r Hardware_Router) GetNetworkStatusAttribute() (resp datatypes.Hardware_Attribute, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Router", "getNetworkStatusAttribute", nil, &r.Options, &resp) + return +} + +// Retrieve Information regarding a piece of hardware's associated network storage service account. +func (r Hardware_Router) GetNetworkStorage() (resp []datatypes.Network_Storage, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Router", "getNetworkStorage", nil, &r.Options, &resp) + return +} + +// Retrieve The network virtual LANs (VLANs) associated with a piece of hardware's network components. +func (r Hardware_Router) GetNetworkVlans() (resp []datatypes.Network_Vlan, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Router", "getNetworkVlans", nil, &r.Options, &resp) + return +} + +// Retrieve A hardware's allotted bandwidth for the next billing cycle (measured in GB). +func (r Hardware_Router) GetNextBillingCycleBandwidthAllocation() (resp datatypes.Float64, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Router", "getNextBillingCycleBandwidthAllocation", nil, &r.Options, &resp) + return +} + +// Retrieve +func (r Hardware_Router) GetNotesHistory() (resp []datatypes.Hardware_Note, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Router", "getNotesHistory", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Hardware_Router) GetObject() (resp datatypes.Hardware_Router, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Router", "getObject", nil, &r.Options, &resp) + return +} + +// Retrieve Information regarding a piece of hardware's operating system. +func (r Hardware_Router) GetOperatingSystem() (resp datatypes.Software_Component_OperatingSystem, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Router", "getOperatingSystem", nil, &r.Options, &resp) + return +} + +// Retrieve A hardware's operating system software description. +func (r Hardware_Router) GetOperatingSystemReferenceCode() (resp string, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Router", "getOperatingSystemReferenceCode", nil, &r.Options, &resp) + return +} + +// Retrieve The sum of all the outbound network traffic data for the last 30 days. +func (r Hardware_Router) GetOutboundBandwidthUsage() (resp datatypes.Float64, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Router", "getOutboundBandwidthUsage", nil, &r.Options, &resp) + return +} + +// Retrieve The total public outbound bandwidth for this hardware for the current billing cycle. +func (r Hardware_Router) GetOutboundPublicBandwidthUsage() (resp datatypes.Float64, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Router", "getOutboundPublicBandwidthUsage", nil, &r.Options, &resp) + return +} + +// Retrieve Parent Hardware. +func (r Hardware_Router) GetParentHardware() (resp datatypes.Hardware, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Router", "getParentHardware", nil, &r.Options, &resp) + return +} + +// Retrieve Information regarding the Point of Presence (PoP) location in which a piece of hardware resides. +func (r Hardware_Router) GetPointOfPresenceLocation() (resp datatypes.Location, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Router", "getPointOfPresenceLocation", nil, &r.Options, &resp) + return +} + +// Retrieve The power components for a hardware object. +func (r Hardware_Router) GetPowerComponents() (resp []datatypes.Hardware_Power_Component, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Router", "getPowerComponents", nil, &r.Options, &resp) + return +} + +// Retrieve Information regarding a piece of hardware's power supply. +func (r Hardware_Router) GetPowerSupply() (resp []datatypes.Hardware_Component, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Router", "getPowerSupply", nil, &r.Options, &resp) + return +} + +// Retrieve The hardware's primary private IP address. +func (r Hardware_Router) GetPrimaryBackendIpAddress() (resp string, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Router", "getPrimaryBackendIpAddress", nil, &r.Options, &resp) + return +} + +// Retrieve Information regarding the hardware's primary back-end network component. +func (r Hardware_Router) GetPrimaryBackendNetworkComponent() (resp datatypes.Network_Component, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Router", "getPrimaryBackendNetworkComponent", nil, &r.Options, &resp) + return +} + +// Retrieve The hardware's primary public IP address. +func (r Hardware_Router) GetPrimaryIpAddress() (resp string, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Router", "getPrimaryIpAddress", nil, &r.Options, &resp) + return +} + +// Retrieve Information regarding the hardware's primary public network component. +func (r Hardware_Router) GetPrimaryNetworkComponent() (resp datatypes.Network_Component, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Router", "getPrimaryNetworkComponent", nil, &r.Options, &resp) + return +} + +// Retrieve a graph of a server's private network bandwidth usage over the specified timeframe. If no timeframe is specified then getPublicBandwidthGraphImage retrieves the last 24 hours of public bandwidth usage. getPrivateBandwidthGraphImage returns a PNG image measuring 827 pixels by 293 pixels. +func (r Hardware_Router) GetPrivateBandwidthData(startTime *int, endTime *int) (resp []datatypes.Metric_Tracking_Object_Data, err error) { + params := []interface{}{ + startTime, + endTime, + } + err = r.Session.DoRequest("SoftLayer_Hardware_Router", "getPrivateBandwidthData", params, &r.Options, &resp) + return +} + +// Retrieve Whether the hardware only has access to the private network. +func (r Hardware_Router) GetPrivateNetworkOnlyFlag() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Router", "getPrivateNetworkOnlyFlag", nil, &r.Options, &resp) + return +} + +// Retrieve The total number of processor cores, summed from all processors that are attached to a piece of hardware +func (r Hardware_Router) GetProcessorCoreAmount() (resp uint, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Router", "getProcessorCoreAmount", nil, &r.Options, &resp) + return +} + +// Retrieve The total number of physical processor cores, summed from all processors that are attached to a piece of hardware +func (r Hardware_Router) GetProcessorPhysicalCoreAmount() (resp uint, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Router", "getProcessorPhysicalCoreAmount", nil, &r.Options, &resp) + return +} + +// Retrieve Information regarding a piece of hardware's processors. +func (r Hardware_Router) GetProcessors() (resp []datatypes.Hardware_Component, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Router", "getProcessors", nil, &r.Options, &resp) + return +} + +// Retrieve a graph of a server's public network bandwidth usage over the specified timeframe. If no timeframe is specified then getPublicBandwidthGraphImage retrieves the last 24 hours of public bandwidth usage. getPublicBandwidthGraphImage returns a PNG image measuring 827 pixels by 293 pixels. +func (r Hardware_Router) GetPublicBandwidthData(startTime *int, endTime *int) (resp []datatypes.Metric_Tracking_Object_Data, err error) { + params := []interface{}{ + startTime, + endTime, + } + err = r.Session.DoRequest("SoftLayer_Hardware_Router", "getPublicBandwidthData", params, &r.Options, &resp) + return +} + +// Retrieve +func (r Hardware_Router) GetRack() (resp datatypes.Location, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Router", "getRack", nil, &r.Options, &resp) + return +} + +// Retrieve The RAID controllers contained within a piece of hardware. +func (r Hardware_Router) GetRaidControllers() (resp []datatypes.Hardware_Component, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Router", "getRaidControllers", nil, &r.Options, &resp) + return +} + +// Retrieve Recent events that impact this hardware. +func (r Hardware_Router) GetRecentEvents() (resp []datatypes.Notification_Occurrence_Event, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Router", "getRecentEvents", nil, &r.Options, &resp) + return +} + +// Retrieve User credentials to issue commands and/or interact with the server's remote management card. +func (r Hardware_Router) GetRemoteManagementAccounts() (resp []datatypes.Hardware_Component_RemoteManagement_User, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Router", "getRemoteManagementAccounts", nil, &r.Options, &resp) + return +} + +// Retrieve A hardware's associated remote management component. This is normally IPMI. +func (r Hardware_Router) GetRemoteManagementComponent() (resp datatypes.Network_Component, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Router", "getRemoteManagementComponent", nil, &r.Options, &resp) + return +} + +// Retrieve +func (r Hardware_Router) GetResourceConfigurations() (resp []datatypes.Hardware_Resource_Configuration, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Router", "getResourceConfigurations", nil, &r.Options, &resp) + return +} + +// Retrieve +func (r Hardware_Router) GetResourceGroupMemberReferences() (resp []datatypes.Resource_Group_Member, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Router", "getResourceGroupMemberReferences", nil, &r.Options, &resp) + return +} + +// Retrieve +func (r Hardware_Router) GetResourceGroupRoles() (resp []datatypes.Resource_Group_Role, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Router", "getResourceGroupRoles", nil, &r.Options, &resp) + return +} + +// Retrieve The resource groups in which this hardware is a member. +func (r Hardware_Router) GetResourceGroups() (resp []datatypes.Resource_Group, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Router", "getResourceGroups", nil, &r.Options, &resp) + return +} + +// Retrieve A hardware's routers. +func (r Hardware_Router) GetRouters() (resp []datatypes.Hardware, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Router", "getRouters", nil, &r.Options, &resp) + return +} + +// Retrieve A flag indicating that a VLAN on the router can be assigned to a host that has SAN disk functionality. +func (r Hardware_Router) GetSanStorageCapabilityFlag() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Router", "getSanStorageCapabilityFlag", nil, &r.Options, &resp) + return +} + +// Retrieve Collection of scale assets this hardware corresponds to. +func (r Hardware_Router) GetScaleAssets() (resp []datatypes.Scale_Asset, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Router", "getScaleAssets", nil, &r.Options, &resp) + return +} + +// Retrieve Information regarding a piece of hardware's vulnerability scan requests. +func (r Hardware_Router) GetSecurityScanRequests() (resp []datatypes.Network_Security_Scanner_Request, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Router", "getSecurityScanRequests", nil, &r.Options, &resp) + return +} + +// The '''getSensorData''' method retrieves a server's hardware state via its internal sensors. Remote sensor data is transmitted to the SoftLayer API by way of the server's remote management card. Sensor data measures various information, including system temperatures, voltages and other local server settings. Sensor data is cached for 30 second; calls made to this method for the same server within 30 seconds of each other will result in the same data being returned. To ensure that the data retrieved retrieves snapshot of varied data, make calls greater than 30 seconds apart. +func (r Hardware_Router) GetSensorData() (resp []datatypes.Container_RemoteManagement_SensorReading, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Router", "getSensorData", nil, &r.Options, &resp) + return +} + +// The '''getSensorDataWithGraphs''' method retrieves the raw data returned from the server's remote management card. Along with raw data, graphs for the CPU and system temperatures and fan speeds are also returned. For more details on what information is returned, refer to the ''getSensorData'' method. +func (r Hardware_Router) GetSensorDataWithGraphs() (resp datatypes.Container_RemoteManagement_SensorReadingsWithGraphs, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Router", "getSensorDataWithGraphs", nil, &r.Options, &resp) + return +} + +// The '''getServerFanSpeedGraphs''' method retrieves the server's fan speeds and displays the speeds using tachometer graphs. data used to construct these graphs is retrieved from the server's remote management card. Each graph returned will have an associated title. +func (r Hardware_Router) GetServerFanSpeedGraphs() (resp []datatypes.Container_RemoteManagement_Graphs_SensorSpeed, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Router", "getServerFanSpeedGraphs", nil, &r.Options, &resp) + return +} + +// The '''getPowerState''' method retrieves the power state for the selected server. The server's power status is retrieved from its remote management card. This method returns "on", for a server that has been powered on, or "off" for servers powered off. +func (r Hardware_Router) GetServerPowerState() (resp string, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Router", "getServerPowerState", nil, &r.Options, &resp) + return +} + +// Retrieve Information regarding the server room in which the hardware is located. +func (r Hardware_Router) GetServerRoom() (resp datatypes.Location, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Router", "getServerRoom", nil, &r.Options, &resp) + return +} + +// The '''getServerTemperatureGraphs''' retrieves the server's temperatures and displays the various temperatures using thermometer graphs. Temperatures retrieved are CPU temperature(s) and system temperatures. Data used to construct the graphs is retrieved from the server's remote management card. All graphs returned will have an associated title. +func (r Hardware_Router) GetServerTemperatureGraphs() (resp []datatypes.Container_RemoteManagement_Graphs_SensorTemperature, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Router", "getServerTemperatureGraphs", nil, &r.Options, &resp) + return +} + +// Retrieve Information regarding the piece of hardware's service provider. +func (r Hardware_Router) GetServiceProvider() (resp datatypes.Service_Provider, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Router", "getServiceProvider", nil, &r.Options, &resp) + return +} + +// Retrieve Information regarding a piece of hardware's installed software. +func (r Hardware_Router) GetSoftwareComponents() (resp []datatypes.Software_Component, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Router", "getSoftwareComponents", nil, &r.Options, &resp) + return +} + +// Retrieve Information regarding the billing item for a spare pool server. +func (r Hardware_Router) GetSparePoolBillingItem() (resp datatypes.Billing_Item_Hardware, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Router", "getSparePoolBillingItem", nil, &r.Options, &resp) + return +} + +// Retrieve SSH keys to be installed on the server during provisioning or an OS reload. +func (r Hardware_Router) GetSshKeys() (resp []datatypes.Security_Ssh_Key, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Router", "getSshKeys", nil, &r.Options, &resp) + return +} + +// Retrieve +func (r Hardware_Router) GetStorageNetworkComponents() (resp []datatypes.Network_Component, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Router", "getStorageNetworkComponents", nil, &r.Options, &resp) + return +} + +// Retrieve +func (r Hardware_Router) GetTagReferences() (resp []datatypes.Tag_Reference, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Router", "getTagReferences", nil, &r.Options, &resp) + return +} + +// Retrieve +func (r Hardware_Router) GetTopLevelLocation() (resp datatypes.Location, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Router", "getTopLevelLocation", nil, &r.Options, &resp) + return +} + +// +// This method will query transaction history for a piece of hardware. +func (r Hardware_Router) GetTransactionHistory() (resp []datatypes.Provisioning_Version1_Transaction_History, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Router", "getTransactionHistory", nil, &r.Options, &resp) + return +} + +// Retrieve a list of upgradeable items available to this piece of hardware. Currently, getUpgradeItemPrices retrieves upgrades available for a server's memory, hard drives, network port speed, bandwidth allocation and GPUs. +func (r Hardware_Router) GetUpgradeItemPrices() (resp []datatypes.Product_Item_Price, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Router", "getUpgradeItemPrices", nil, &r.Options, &resp) + return +} + +// Retrieve An account's associated upgrade request object, if any. +func (r Hardware_Router) GetUpgradeRequest() (resp datatypes.Product_Upgrade_Request, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Router", "getUpgradeRequest", nil, &r.Options, &resp) + return +} + +// Retrieve The network device connected to a piece of hardware. +func (r Hardware_Router) GetUplinkHardware() (resp datatypes.Hardware, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Router", "getUplinkHardware", nil, &r.Options, &resp) + return +} + +// Retrieve Information regarding the network component that is one level higher than a piece of hardware on the network infrastructure. +func (r Hardware_Router) GetUplinkNetworkComponents() (resp []datatypes.Network_Component, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Router", "getUplinkNetworkComponents", nil, &r.Options, &resp) + return +} + +// Retrieve A string containing custom user data for a hardware order. +func (r Hardware_Router) GetUserData() (resp []datatypes.Hardware_Attribute, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Router", "getUserData", nil, &r.Options, &resp) + return +} + +// Retrieve Information regarding the virtual chassis for a piece of hardware. +func (r Hardware_Router) GetVirtualChassis() (resp datatypes.Hardware_Group, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Router", "getVirtualChassis", nil, &r.Options, &resp) + return +} + +// Retrieve Information regarding the virtual chassis siblings for a piece of hardware. +func (r Hardware_Router) GetVirtualChassisSiblings() (resp []datatypes.Hardware, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Router", "getVirtualChassisSiblings", nil, &r.Options, &resp) + return +} + +// Retrieve A piece of hardware's virtual host record. +func (r Hardware_Router) GetVirtualHost() (resp datatypes.Virtual_Host, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Router", "getVirtualHost", nil, &r.Options, &resp) + return +} + +// Retrieve Information regarding a piece of hardware's virtual software licenses. +func (r Hardware_Router) GetVirtualLicenses() (resp []datatypes.Software_VirtualLicense, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Router", "getVirtualLicenses", nil, &r.Options, &resp) + return +} + +// Retrieve Information regarding the bandwidth allotment to which a piece of hardware belongs. +func (r Hardware_Router) GetVirtualRack() (resp datatypes.Network_Bandwidth_Version1_Allotment, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Router", "getVirtualRack", nil, &r.Options, &resp) + return +} + +// Retrieve The name of the bandwidth allotment belonging to a piece of hardware. +func (r Hardware_Router) GetVirtualRackId() (resp int, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Router", "getVirtualRackId", nil, &r.Options, &resp) + return +} + +// Retrieve The name of the bandwidth allotment belonging to a piece of hardware. +func (r Hardware_Router) GetVirtualRackName() (resp string, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Router", "getVirtualRackName", nil, &r.Options, &resp) + return +} + +// Retrieve A piece of hardware's virtualization platform software. +func (r Hardware_Router) GetVirtualizationPlatform() (resp datatypes.Software_Component, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Router", "getVirtualizationPlatform", nil, &r.Options, &resp) + return +} + +// The '''importVirtualHost''' method attempts to import the host record for the virtualization platform running on a server. +func (r Hardware_Router) ImportVirtualHost() (resp datatypes.Virtual_Host, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Router", "importVirtualHost", nil, &r.Options, &resp) + return +} + +// The '''isPingable''' method issues a ping command to the selected server and returns the result of the ping command. This boolean return value displays ''true'' upon successful ping or ''false'' for a failed ping. +func (r Hardware_Router) IsPingable() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Router", "isPingable", nil, &r.Options, &resp) + return +} + +// Issues a ping command to the server and returns the ping response. +func (r Hardware_Router) Ping() (resp string, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Router", "ping", nil, &r.Options, &resp) + return +} + +// The '''powerCycle''' method completes a power off and power on of the server successively in one command. The power cycle command is equivalent to unplugging the server from the power strip and then plugging the server back in. '''This method should only be used when all other options have been exhausted'''. Additional remote management commands may not be executed if this command was successfully issued within the last 20 minutes to avoid server failure. Remote management commands include: +// +// rebootSoft rebootHard powerOn powerOff powerCycle +// +// +func (r Hardware_Router) PowerCycle() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Router", "powerCycle", nil, &r.Options, &resp) + return +} + +// This method will power off the server via the server's remote management card. +func (r Hardware_Router) PowerOff() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Router", "powerOff", nil, &r.Options, &resp) + return +} + +// The '''powerOn''' method powers on a server via its remote management card. This boolean return value returns ''true'' upon successful execution and ''false'' if unsuccessful. Other remote management commands may not be issued in this command was successfully completed within the last 20 minutes to avoid server failure. Remote management commands include: +// +// rebootSoft rebootHard powerOn powerOff powerCycle +// +// +func (r Hardware_Router) PowerOn() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Router", "powerOn", nil, &r.Options, &resp) + return +} + +// The '''rebootDefault''' method attempts to reboot the server by issuing a soft reboot, or reset, command to the server's remote management card. if the reset attempt is unsuccessful, a power cycle command will be issued via the power strip. The power cycle command is equivalent to unplugging the server from the power strip and then plugging the server back in. If the reset was successful within the last 20 minutes, another remote management command cannot be completed to avoid server failure. Remote management commands include: +// +// rebootSoft rebootHard powerOn powerOff powerCycle +// +// +func (r Hardware_Router) RebootDefault() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Router", "rebootDefault", nil, &r.Options, &resp) + return +} + +// The '''rebootHard''' method reboots the server by issuing a cycle command to the server's remote management card. A hard reboot is equivalent to pressing the ''Reset'' button on a server - it is issued immediately and will not allow processes to shut down prior to the reboot. Completing a hard reboot may initiate system disk checks upon server reboot, causing the boot up to take longer than normally expected. +// +// Remote management commands are unable to be executed if a reboot has been issued successfully within the last 20 minutes to avoid server failure. Remote management commands include: +// +// rebootSoft rebootHard powerOn powerOff powerCycle +// +// +func (r Hardware_Router) RebootHard() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Router", "rebootHard", nil, &r.Options, &resp) + return +} + +// The '''rebootSoft''' method reboots the server by issuing a reset command to the server's remote management card via soft reboot. When executing a soft reboot, servers allow all processes to shut down completely before rebooting. Remote management commands are unable to be issued within 20 minutes of issuing a successful soft reboot in order to avoid server failure. Remote management commands include: +// +// rebootSoft rebootHard powerOn powerOff powerCycle +// +// +func (r Hardware_Router) RebootSoft() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Router", "rebootSoft", nil, &r.Options, &resp) + return +} + +// This method is used to remove access to s SoftLayer_Network_Storage volumes that supports host- or network-level access control. +func (r Hardware_Router) RemoveAccessToNetworkStorage(networkStorageTemplateObject *datatypes.Network_Storage) (resp bool, err error) { + params := []interface{}{ + networkStorageTemplateObject, + } + err = r.Session.DoRequest("SoftLayer_Hardware_Router", "removeAccessToNetworkStorage", params, &r.Options, &resp) + return +} + +// This method is used to allow access to multiple SoftLayer_Network_Storage volumes that support host- or network-level access control. +func (r Hardware_Router) RemoveAccessToNetworkStorageList(networkStorageTemplateObjects []datatypes.Network_Storage) (resp bool, err error) { + params := []interface{}{ + networkStorageTemplateObjects, + } + err = r.Session.DoRequest("SoftLayer_Hardware_Router", "removeAccessToNetworkStorageList", params, &r.Options, &resp) + return +} + +// no documentation yet +func (r Hardware_Router) SetTags(tags *string) (resp bool, err error) { + params := []interface{}{ + tags, + } + err = r.Session.DoRequest("SoftLayer_Hardware_Router", "setTags", params, &r.Options, &resp) + return +} + +// This method will update the root IPMI password on this SoftLayer_Hardware. +func (r Hardware_Router) UpdateIpmiPassword(password *string) (resp bool, err error) { + params := []interface{}{ + password, + } + err = r.Session.DoRequest("SoftLayer_Hardware_Router", "updateIpmiPassword", params, &r.Options, &resp) + return +} + +// no documentation yet +type Hardware_SecurityModule struct { + Session *session.Session + Options sl.Options +} + +// GetHardwareSecurityModuleService returns an instance of the Hardware_SecurityModule SoftLayer service +func GetHardwareSecurityModuleService(sess *session.Session) Hardware_SecurityModule { + return Hardware_SecurityModule{Session: sess} +} + +func (r Hardware_SecurityModule) Id(id int) Hardware_SecurityModule { + r.Options.Id = &id + return r +} + +func (r Hardware_SecurityModule) Mask(mask string) Hardware_SecurityModule { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r Hardware_SecurityModule) Filter(filter string) Hardware_SecurityModule { + r.Options.Filter = filter + return r +} + +func (r Hardware_SecurityModule) Limit(limit int) Hardware_SecurityModule { + r.Options.Limit = &limit + return r +} + +func (r Hardware_SecurityModule) Offset(offset int) Hardware_SecurityModule { + r.Options.Offset = &offset + return r +} + +// Activates the private network port +func (r Hardware_SecurityModule) ActivatePrivatePort() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule", "activatePrivatePort", nil, &r.Options, &resp) + return +} + +// Activates the public network port +func (r Hardware_SecurityModule) ActivatePublicPort() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule", "activatePublicPort", nil, &r.Options, &resp) + return +} + +// This method is used to allow access to a SoftLayer_Network_Storage volume that supports host- or network-level access control. +func (r Hardware_SecurityModule) AllowAccessToNetworkStorage(networkStorageTemplateObject *datatypes.Network_Storage) (resp bool, err error) { + params := []interface{}{ + networkStorageTemplateObject, + } + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule", "allowAccessToNetworkStorage", params, &r.Options, &resp) + return +} + +// This method is used to allow access to multiple SoftLayer_Network_Storage volumes that support host- or network-level access control. +func (r Hardware_SecurityModule) AllowAccessToNetworkStorageList(networkStorageTemplateObjects []datatypes.Network_Storage) (resp bool, err error) { + params := []interface{}{ + networkStorageTemplateObjects, + } + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule", "allowAccessToNetworkStorageList", params, &r.Options, &resp) + return +} + +// The Rescue Kernel is designed to provide you with the ability to bring a server online in order to troubleshoot system problems that would normally only be resolved by an OS Reload. The correct Rescue Kernel will be selected based upon the currently installed operating system. When the rescue kernel process is initiated, the server will shutdown and reboot on to the public network with the same IP's assigned to the server to allow for remote connections. It will bring your server offline for approximately 10 minutes while the rescue is in progress. The root/administrator password will be the same as what is listed in the portal for the server. +func (r Hardware_SecurityModule) BootToRescueLayer(noOsBootEnvironment *string) (resp bool, err error) { + params := []interface{}{ + noOsBootEnvironment, + } + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule", "bootToRescueLayer", params, &r.Options, &resp) + return +} + +// Captures a Flex Image of the hard disk on the physical machine, based on the capture template parameter. Returns the image template group containing the disk image. +func (r Hardware_SecurityModule) CaptureImage(captureTemplate *datatypes.Container_Disk_Image_Capture_Template) (resp datatypes.Virtual_Guest_Block_Device_Template_Group, err error) { + params := []interface{}{ + captureTemplate, + } + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule", "captureImage", params, &r.Options, &resp) + return +} + +// Returns monitoring alarm detailed history +func (r Hardware_SecurityModule) CloseAlarm(alarmId *string) (resp bool, err error) { + params := []interface{}{ + alarmId, + } + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule", "closeAlarm", params, &r.Options, &resp) + return +} + +// You can launch firmware updates by selecting from your server list. It will bring your server offline for approximately 20 minutes while the updates are in progress. +// +// In the event of a hardware failure during this test our datacenter engineers will be notified of the problem automatically. They will then replace any failed components to bring your server back online, and will be contacting you to ensure that impact on your server is minimal. +func (r Hardware_SecurityModule) CreateFirmwareUpdateTransaction(ipmi *int, raidController *int, bios *int, harddrive *int) (resp bool, err error) { + params := []interface{}{ + ipmi, + raidController, + bios, + harddrive, + } + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule", "createFirmwareUpdateTransaction", params, &r.Options, &resp) + return +} + +// +// +// createObject() enables the creation of servers on an account. This +// method is a simplified alternative to interacting with the ordering system directly. +// +// +// In order to create a server, a template object must be sent in with a few required +// values. +// +// +// When this method returns an order will have been placed for a server of the specified configuration. +// +// +// To determine when the server is available you can poll the server via [[SoftLayer_Hardware/getObject|getObject]], +// checking the provisionDate property. +// When provisionDate is not null, the server will be ready. Be sure to use the globalIdentifier +// as your initialization parameter. +// +// +// Warning: Servers created via this method will incur charges on your account. For testing input parameters see [[SoftLayer_Hardware/generateOrderTemplate|generateOrderTemplate]]. +// +// +// Input - [[SoftLayer_Hardware (type)|SoftLayer_Hardware]] +//
      +//
    • hostname +//
      Hostname for the server.
        +//
      • Required
      • +//
      • Type - string
      • +//
      +//
      +//
    • +//
    • domain +//
      Domain for the server.
        +//
      • Required
      • +//
      • Type - string
      • +//
      +//
      +//
    • +//
    • processorCoreAmount +//
      The number of logical CPU cores to allocate.
        +//
      • Required
      • +//
      • Type - int
      • +//
      • See [[SoftLayer_Hardware/getCreateObjectOptions|getCreateObjectOptions]] for available options.
      • +//
      +//
      +//
    • +//
    • memoryCapacity +//
      The amount of memory to allocate in gigabytes.
        +//
      • Required
      • +//
      • Type - int
      • +//
      • See [[SoftLayer_Hardware/getCreateObjectOptions|getCreateObjectOptions]] for available options.
      • +//
      +//
      +//
    • +//
    • hourlyBillingFlag +//
      Specifies the billing type for the server.
        +//
      • Required
      • +//
      • Type - boolean
      • +//
      • When true the server will be billed on hourly usage, otherwise it will be billed on a monthly basis.
      • +//
      +//
      +//
    • +//
    • operatingSystemReferenceCode +//
      An identifier for the operating system to provision the server with.
        +//
      • Required
      • +//
      • Type - string
      • +//
      • See [[SoftLayer_Hardware/getCreateObjectOptions|getCreateObjectOptions]] for available options.
      • +//
      +//
      +//
    • +//
    • datacenter.name +//
      Specifies which datacenter the server is to be provisioned in.
        +//
      • Required
      • +//
      • Type - string
      • +//
      • The datacenter property is a [[SoftLayer_Location (type)|location]] structure with the name field set.
      • +//
      • See [[SoftLayer_Hardware/getCreateObjectOptions|getCreateObjectOptions]] for available options.
      • +//
      +// { +// "datacenter": { +// "name": "dal05" +// } +// } +//
      +//
    • +//
    • networkComponents.maxSpeed +//
      Specifies the connection speed for the server's network components.
        +//
      • Optional
      • +//
      • Type - int
      • +//
      • Default - The highest available zero cost port speed will be used.
      • +//
      • Description - The networkComponents property is an array with a single [[SoftLayer_Network_Component (type)|network component]] structure. The maxSpeed property must be set to specify the network uplink speed, in megabits per second, of the server.
      • +//
      • See [[SoftLayer_Hardware/getCreateObjectOptions|getCreateObjectOptions]] for available options.
      • +//
      +// { +// "networkComponents": [ +// { +// "maxSpeed": 1000 +// } +// ] +// } +//
      +//
    • +//
    • networkComponents.redundancyEnabledFlag +//
      Specifies whether or not the server's network components should be in redundancy groups.
        +//
      • Optional
      • +//
      • Type - bool
      • +//
      • Default - false
      • +//
      • Description - The networkComponents property is an array with a single [[SoftLayer_Network_Component (type)|network component]] structure. When the redundancyEnabledFlag property is true the server's network components will be in redundancy groups.
      • +//
      +// { +// "networkComponents": [ +// { +// "redundancyEnabledFlag": false +// } +// ] +// } +//
      +//
    • +//
    • privateNetworkOnlyFlag +//
      Specifies whether or not the server only has access to the private network
        +//
      • Optional
      • +//
      • Type - boolean
      • +//
      • Default - false
      • +//
      • When true this flag specifies that a server is to only have access to the private network.
      • +//
      +//
      +//
    • +//
    • primaryNetworkComponent.networkVlan.id +//
      Specifies the network vlan which is to be used for the frontend interface of the server.
        +//
      • Optional
      • +//
      • Type - int
      • +//
      • Description - The primaryNetworkComponent property is a [[SoftLayer_Network_Component (type)|network component]] structure with the networkVlan property populated with a [[SoftLayer_Network_Vlan (type)|vlan]] structure. The id property must be set to specify the frontend network vlan of the server.
      • +//
      +// { +// "primaryNetworkComponent": { +// "networkVlan": { +// "id": 1 +// } +// } +// } +//
      +//
    • +//
    • primaryBackendNetworkComponent.networkVlan.id +//
      Specifies the network vlan which is to be used for the backend interface of the server.
        +//
      • Optional
      • +//
      • Type - int
      • +//
      • Description - The primaryBackendNetworkComponent property is a [[SoftLayer_Network_Component (type)|network component]] structure with the networkVlan property populated with a [[SoftLayer_Network_Vlan (type)|vlan]] structure. The id property must be set to specify the backend network vlan of the server.
      • +//
      +// { +// "primaryBackendNetworkComponent": { +// "networkVlan": { +// "id": 2 +// } +// } +// } +//
      +//
    • +//
    • fixedConfigurationPreset.keyName +//
        +//
      • Optional
      • +//
      • Type - string
      • +//
      • Description - The fixedConfigurationPreset property is a [[SoftLayer_Product_Package_Preset (type)|fixed configuration preset]] structure. The keyName property must be set to specify preset to use.
      • +//
      • If a fixed configuration preset is used processorCoreAmount, memoryCapacity and hardDrives properties must not be set.
      • +//
      • See [[SoftLayer_Hardware/getCreateObjectOptions|getCreateObjectOptions]] for available options.
      • +//
      +// { +// "fixedConfigurationPreset": { +// "keyName": "SOME_KEY_NAME" +// } +// } +//
      +//
    • +//
    • userData.value +//
      Arbitrary data to be made available to the server.
        +//
      • Optional
      • +//
      • Type - string
      • +//
      • Description - The userData property is an array with a single [[SoftLayer_Hardware_Attribute (type)|attribute]] structure with the value property set to an arbitrary value.
      • +//
      • This value can be retrieved via the [[SoftLayer_Resource_Metadata/getUserMetadata|getUserMetadata]] method from a request originating from the server. This is primarily useful for providing data to software that may be on the server and configured to execute upon first boot.
      • +//
      +// { +// "userData": [ +// { +// "value": "someValue" +// } +// ] +// } +//
      +//
    • +//
    • hardDrives +//
      Hard drive settings for the server
        +//
      • Optional
      • +//
      • Type - SoftLayer_Hardware_Component
      • +//
      • Default - The largest available capacity for a zero cost primary disk will be used.
      • +//
      • Description - The hardDrives property is an array of [[SoftLayer_Hardware_Component (type)|hardware component]] structures. +//
      • Each hard drive must specify the capacity property.
      • +//
      • See [[SoftLayer_Hardware/getCreateObjectOptions|getCreateObjectOptions]] for available options.
      • +//
      +// { +// "hardDrives": [ +// { +// "capacity": 500 +// } +// ] +// } +//
      +//
    • +//
    • sshKeys +//
      SSH keys to install on the server upon provisioning.
        +//
      • Optional
      • +//
      • Type - array of [[SoftLayer_Security_Ssh_Key (type)|SoftLayer_Security_Ssh_Key]]
      • +//
      • Description - The sshKeys property is an array of [[SoftLayer_Security_Ssh_Key (type)|SSH Key]] structures with the id property set to the value of an existing SSH key.
      • +//
      • To create a new SSH key, call [[SoftLayer_Security_Ssh_Key/createObject|createObject]] on the [[SoftLayer_Security_Ssh_Key]] service.
      • +//
      • To obtain a list of existing SSH keys, call [[SoftLayer_Account/getSshKeys|getSshKeys]] on the [[SoftLayer_Account]] service. +//
      +// { +// "sshKeys": [ +// { +// "id": 123 +// } +// ] +// } +//
      +//
    • +//
    • postInstallScriptUri +//
      Specifies the uri location of the script to be downloaded and run after installation is complete.
        +//
      • Optional
      • +//
      • Type - string
      • +//
      +//
      +//
    • +//
    +// +// +//

    REST Example

    +// curl -X POST -d '{ +// "parameters":[ +// { +// "hostname": "host1", +// "domain": "example.com", +// "processorCoreAmount": 2, +// "memoryCapacity": 2, +// "hourlyBillingFlag": true, +// "operatingSystemReferenceCode": "UBUNTU_LATEST" +// } +// ] +// }' https://api.softlayer.com/rest/v3/SoftLayer_Hardware.json +// +// HTTP/1.1 201 Created +// Location: https://api.softlayer.com/rest/v3/SoftLayer_Hardware/f5a3fcff-db1d-4b7c-9fa0-0349e41c29c5/getObject +// +// +// { +// "accountId": 232298, +// "bareMetalInstanceFlag": null, +// "domain": "example.com", +// "hardwareStatusId": null, +// "hostname": "host1", +// "id": null, +// "serviceProviderId": null, +// "serviceProviderResourceId": null, +// "globalIdentifier": "f5a3fcff-db1d-4b7c-9fa0-0349e41c29c5", +// "hourlyBillingFlag": true, +// "memoryCapacity": 2, +// "operatingSystemReferenceCode": "UBUNTU_LATEST", +// "processorCoreAmount": 2 +// } +// +func (r Hardware_SecurityModule) CreateObject(templateObject *datatypes.Hardware_SecurityModule) (resp datatypes.Hardware_SecurityModule, err error) { + params := []interface{}{ + templateObject, + } + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule", "createObject", params, &r.Options, &resp) + return +} + +// no documentation yet +func (r Hardware_SecurityModule) CreatePostSoftwareInstallTransaction(installCodes []string, returnBoolean *bool) (resp bool, err error) { + params := []interface{}{ + installCodes, + returnBoolean, + } + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule", "createPostSoftwareInstallTransaction", params, &r.Options, &resp) + return +} + +// +// This method will cancel a server effective immediately. For servers billed hourly, the charges will stop immediately after the method returns. +func (r Hardware_SecurityModule) DeleteObject() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule", "deleteObject", nil, &r.Options, &resp) + return +} + +// Delete software component passwords. +func (r Hardware_SecurityModule) DeleteSoftwareComponentPasswords(softwareComponentPasswords []datatypes.Software_Component_Password) (resp bool, err error) { + params := []interface{}{ + softwareComponentPasswords, + } + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule", "deleteSoftwareComponentPasswords", params, &r.Options, &resp) + return +} + +// Edit a server's properties +func (r Hardware_SecurityModule) EditObject(templateObject *datatypes.Hardware_Server) (resp bool, err error) { + params := []interface{}{ + templateObject, + } + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule", "editObject", params, &r.Options, &resp) + return +} + +// Edit the properties of a software component password such as the username, password, and notes. +func (r Hardware_SecurityModule) EditSoftwareComponentPasswords(softwareComponentPasswords []datatypes.Software_Component_Password) (resp bool, err error) { + params := []interface{}{ + softwareComponentPasswords, + } + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule", "editSoftwareComponentPasswords", params, &r.Options, &resp) + return +} + +// Download and run remote script from uri on the hardware. +func (r Hardware_SecurityModule) ExecuteRemoteScript(uri *string) (err error) { + var resp datatypes.Void + params := []interface{}{ + uri, + } + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule", "executeRemoteScript", params, &r.Options, &resp) + return +} + +// The '''findByIpAddress''' method finds hardware using its primary public or private IP address. IP addresses that have a secondary subnet tied to the hardware will not return the hardware - alternate means of locating the hardware must be used (see '''Associated Methods'''). If no hardware is found, no errors are generated and no data is returned. +func (r Hardware_SecurityModule) FindByIpAddress(ipAddress *string) (resp datatypes.Hardware, err error) { + params := []interface{}{ + ipAddress, + } + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule", "findByIpAddress", params, &r.Options, &resp) + return +} + +// +// Obtain an [[SoftLayer_Container_Product_Order_Hardware_Server (type)|order container]] that can be sent to [[SoftLayer_Product_Order/verifyOrder|verifyOrder]] or [[SoftLayer_Product_Order/placeOrder|placeOrder]]. +// +// +// This is primarily useful when there is a necessity to confirm the price which will be charged for an order. +// +// +// See [[SoftLayer_Hardware/createObject|createObject]] for specifics on the requirements of the template object parameter. +func (r Hardware_SecurityModule) GenerateOrderTemplate(templateObject *datatypes.Hardware) (resp datatypes.Container_Product_Order, err error) { + params := []interface{}{ + templateObject, + } + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule", "generateOrderTemplate", params, &r.Options, &resp) + return +} + +// Retrieve The account associated with a piece of hardware. +func (r Hardware_SecurityModule) GetAccount() (resp datatypes.Account, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule", "getAccount", nil, &r.Options, &resp) + return +} + +// Retrieve A piece of hardware's active physical components. +func (r Hardware_SecurityModule) GetActiveComponents() (resp []datatypes.Hardware_Component, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule", "getActiveComponents", nil, &r.Options, &resp) + return +} + +// Retrieve The billing item for a server's attached network firewall. +func (r Hardware_SecurityModule) GetActiveNetworkFirewallBillingItem() (resp datatypes.Billing_Item, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule", "getActiveNetworkFirewallBillingItem", nil, &r.Options, &resp) + return +} + +// Retrieve A piece of hardware's active network monitoring incidents. +func (r Hardware_SecurityModule) GetActiveNetworkMonitorIncident() (resp []datatypes.Network_Monitor_Version1_Incident, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule", "getActiveNetworkMonitorIncident", nil, &r.Options, &resp) + return +} + +// Retrieve +func (r Hardware_SecurityModule) GetActiveTickets() (resp []datatypes.Ticket, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule", "getActiveTickets", nil, &r.Options, &resp) + return +} + +// Retrieve Transaction currently running for server. +func (r Hardware_SecurityModule) GetActiveTransaction() (resp datatypes.Provisioning_Version1_Transaction, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule", "getActiveTransaction", nil, &r.Options, &resp) + return +} + +// Retrieve Any active transaction(s) that are currently running for the server (example: os reload). +func (r Hardware_SecurityModule) GetActiveTransactions() (resp []datatypes.Provisioning_Version1_Transaction, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule", "getActiveTransactions", nil, &r.Options, &resp) + return +} + +// The '''getAlarmHistory''' method retrieves a detailed history for the monitoring alarm. When calling this method, a start and end date for the history to be retrieved must be entered. +func (r Hardware_SecurityModule) GetAlarmHistory(startDate *datatypes.Time, endDate *datatypes.Time, alarmId *string) (resp []datatypes.Container_Monitoring_Alarm_History, err error) { + params := []interface{}{ + startDate, + endDate, + alarmId, + } + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule", "getAlarmHistory", params, &r.Options, &resp) + return +} + +// Retrieve +func (r Hardware_SecurityModule) GetAllPowerComponents() (resp []datatypes.Hardware_Power_Component, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule", "getAllPowerComponents", nil, &r.Options, &resp) + return +} + +// Retrieve The SoftLayer_Network_Storage_Allowed_Host information to connect this server to Network Storage volumes that require access control lists. +func (r Hardware_SecurityModule) GetAllowedHost() (resp datatypes.Network_Storage_Allowed_Host, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule", "getAllowedHost", nil, &r.Options, &resp) + return +} + +// Retrieve The SoftLayer_Network_Storage objects that this SoftLayer_Hardware has access to. +func (r Hardware_SecurityModule) GetAllowedNetworkStorage() (resp []datatypes.Network_Storage, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule", "getAllowedNetworkStorage", nil, &r.Options, &resp) + return +} + +// Retrieve The SoftLayer_Network_Storage objects whose Replica that this SoftLayer_Hardware has access to. +func (r Hardware_SecurityModule) GetAllowedNetworkStorageReplicas() (resp []datatypes.Network_Storage, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule", "getAllowedNetworkStorageReplicas", nil, &r.Options, &resp) + return +} + +// Retrieve Information regarding an antivirus/spyware software component object. +func (r Hardware_SecurityModule) GetAntivirusSpywareSoftwareComponent() (resp datatypes.Software_Component, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule", "getAntivirusSpywareSoftwareComponent", nil, &r.Options, &resp) + return +} + +// This method is retrieve a list of SoftLayer_Network_Storage volumes that are authorized access to this SoftLayer_Hardware. +func (r Hardware_SecurityModule) GetAttachedNetworkStorages(nasType *string) (resp []datatypes.Network_Storage, err error) { + params := []interface{}{ + nasType, + } + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule", "getAttachedNetworkStorages", params, &r.Options, &resp) + return +} + +// Retrieve Information regarding a piece of hardware's specific attributes. +func (r Hardware_SecurityModule) GetAttributes() (resp []datatypes.Hardware_Attribute, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule", "getAttributes", nil, &r.Options, &resp) + return +} + +// Retrieve An object that stores the maximum level for the monitoring query types and response types. +func (r Hardware_SecurityModule) GetAvailableMonitoring() (resp []datatypes.Network_Monitor_Version1_Query_Host_Stratum, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule", "getAvailableMonitoring", nil, &r.Options, &resp) + return +} + +// This method retrieves a list of SoftLayer_Network_Storage volumes that can be authorized to this SoftLayer_Hardware. +func (r Hardware_SecurityModule) GetAvailableNetworkStorages(nasType *string) (resp []datatypes.Network_Storage, err error) { + params := []interface{}{ + nasType, + } + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule", "getAvailableNetworkStorages", params, &r.Options, &resp) + return +} + +// Retrieve The average daily total bandwidth usage for the current billing cycle. +func (r Hardware_SecurityModule) GetAverageDailyBandwidthUsage() (resp datatypes.Float64, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule", "getAverageDailyBandwidthUsage", nil, &r.Options, &resp) + return +} + +// Retrieve The average daily private bandwidth usage for the current billing cycle. +func (r Hardware_SecurityModule) GetAverageDailyPrivateBandwidthUsage() (resp datatypes.Float64, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule", "getAverageDailyPrivateBandwidthUsage", nil, &r.Options, &resp) + return +} + +// Retrieve The average daily public bandwidth usage for the current billing cycle. +func (r Hardware_SecurityModule) GetAverageDailyPublicBandwidthUsage() (resp datatypes.Float64, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule", "getAverageDailyPublicBandwidthUsage", nil, &r.Options, &resp) + return +} + +// Use this method to return an array of private bandwidth utilization records between a given date range. +// +// This method represents the NEW version of getFrontendBandwidthUse +func (r Hardware_SecurityModule) GetBackendBandwidthUsage(startDate *datatypes.Time, endDate *datatypes.Time) (resp []datatypes.Metric_Tracking_Object_Data, err error) { + params := []interface{}{ + startDate, + endDate, + } + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule", "getBackendBandwidthUsage", params, &r.Options, &resp) + return +} + +// Use this method to return an array of private bandwidth utilization records between a given date range. +func (r Hardware_SecurityModule) GetBackendBandwidthUse(startDate *datatypes.Time, endDate *datatypes.Time) (resp []datatypes.Network_Bandwidth_Version1_Usage_Detail, err error) { + params := []interface{}{ + startDate, + endDate, + } + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule", "getBackendBandwidthUse", params, &r.Options, &resp) + return +} + +// The '''getBackendIncomingBandwidth''' method retrieves the amount of incoming private network traffic used between the given start date and end date parameters. When entering start and end dates, only the month, day and year are used to calculate bandwidth totals - the time (HH:MM:SS) is ignored and defaults to midnight. The amount of bandwidth retrieved is measured in gigabytes. +func (r Hardware_SecurityModule) GetBackendIncomingBandwidth(startDate *datatypes.Time, endDate *datatypes.Time) (resp datatypes.Float64, err error) { + params := []interface{}{ + startDate, + endDate, + } + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule", "getBackendIncomingBandwidth", params, &r.Options, &resp) + return +} + +// Retrieve A piece of hardware's back-end or private network components. +func (r Hardware_SecurityModule) GetBackendNetworkComponents() (resp []datatypes.Network_Component, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule", "getBackendNetworkComponents", nil, &r.Options, &resp) + return +} + +// The '''getBackendOutgoingBandwidth''' method retrieves the amount of outgoing private network traffic used between the given start date and end date parameters. When entering start and end dates, only the month, day and year are used to calculate bandwidth totals - the time (HH:MM:SS) is ignored and defaults to midnight. The amount of bandwidth retrieved is measured in gigabytes. +func (r Hardware_SecurityModule) GetBackendOutgoingBandwidth(startDate *datatypes.Time, endDate *datatypes.Time) (resp datatypes.Float64, err error) { + params := []interface{}{ + startDate, + endDate, + } + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule", "getBackendOutgoingBandwidth", params, &r.Options, &resp) + return +} + +// Retrieve A hardware's backend or private router. +func (r Hardware_SecurityModule) GetBackendRouters() (resp []datatypes.Hardware, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule", "getBackendRouters", nil, &r.Options, &resp) + return +} + +// Retrieve A hardware's allotted bandwidth (measured in GB). +func (r Hardware_SecurityModule) GetBandwidthAllocation() (resp datatypes.Float64, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule", "getBandwidthAllocation", nil, &r.Options, &resp) + return +} + +// Retrieve A hardware's allotted detail record. Allotment details link bandwidth allocation with allotments. +func (r Hardware_SecurityModule) GetBandwidthAllotmentDetail() (resp datatypes.Network_Bandwidth_Version1_Allotment_Detail, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule", "getBandwidthAllotmentDetail", nil, &r.Options, &resp) + return +} + +// Retrieve a collection of bandwidth data from an individual public or private network tracking object. Data is ideal if you with to employ your own traffic storage and graphing systems. +func (r Hardware_SecurityModule) GetBandwidthForDateRange(startDate *datatypes.Time, endDate *datatypes.Time) (resp []datatypes.Metric_Tracking_Object_Data, err error) { + params := []interface{}{ + startDate, + endDate, + } + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule", "getBandwidthForDateRange", params, &r.Options, &resp) + return +} + +// Use this method when needing a bandwidth image for a single server. It will gather the correct input parameters for the generic graphing utility automatically based on the snapshot specified. Use the $draw flag to suppress the generation of the actual binary PNG image. +func (r Hardware_SecurityModule) GetBandwidthImage(networkType *string, snapshotRange *string, draw *bool, dateSpecified *datatypes.Time, dateSpecifiedEnd *datatypes.Time) (resp datatypes.Container_Bandwidth_GraphOutputs, err error) { + params := []interface{}{ + networkType, + snapshotRange, + draw, + dateSpecified, + dateSpecifiedEnd, + } + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule", "getBandwidthImage", params, &r.Options, &resp) + return +} + +// Retrieve Information regarding a piece of hardware's benchmark certifications. +func (r Hardware_SecurityModule) GetBenchmarkCertifications() (resp []datatypes.Hardware_Benchmark_Certification, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule", "getBenchmarkCertifications", nil, &r.Options, &resp) + return +} + +// Retrieve The raw bandwidth usage data for the current billing cycle. One object will be returned for each network this server is attached to. +func (r Hardware_SecurityModule) GetBillingCycleBandwidthUsage() (resp []datatypes.Network_Bandwidth_Usage, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule", "getBillingCycleBandwidthUsage", nil, &r.Options, &resp) + return +} + +// Retrieve The raw private bandwidth usage data for the current billing cycle. +func (r Hardware_SecurityModule) GetBillingCyclePrivateBandwidthUsage() (resp datatypes.Network_Bandwidth_Usage, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule", "getBillingCyclePrivateBandwidthUsage", nil, &r.Options, &resp) + return +} + +// Retrieve The raw public bandwidth usage data for the current billing cycle. +func (r Hardware_SecurityModule) GetBillingCyclePublicBandwidthUsage() (resp datatypes.Network_Bandwidth_Usage, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule", "getBillingCyclePublicBandwidthUsage", nil, &r.Options, &resp) + return +} + +// Retrieve Information regarding the billing item for a server. +func (r Hardware_SecurityModule) GetBillingItem() (resp datatypes.Billing_Item_Hardware, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule", "getBillingItem", nil, &r.Options, &resp) + return +} + +// Retrieve A flag indicating that a billing item exists. +func (r Hardware_SecurityModule) GetBillingItemFlag() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule", "getBillingItemFlag", nil, &r.Options, &resp) + return +} + +// Retrieve Determines whether the hardware is ineligible for cancellation because it is disconnected. +func (r Hardware_SecurityModule) GetBlockCancelBecauseDisconnectedFlag() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule", "getBlockCancelBecauseDisconnectedFlag", nil, &r.Options, &resp) + return +} + +// Retrieve Status indicating whether or not a piece of hardware has business continuance insurance. +func (r Hardware_SecurityModule) GetBusinessContinuanceInsuranceFlag() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule", "getBusinessContinuanceInsuranceFlag", nil, &r.Options, &resp) + return +} + +// Retrieve Child hardware. +func (r Hardware_SecurityModule) GetChildrenHardware() (resp []datatypes.Hardware, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule", "getChildrenHardware", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Hardware_SecurityModule) GetComponentDetailsXML() (resp string, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule", "getComponentDetailsXML", nil, &r.Options, &resp) + return +} + +// Retrieve A piece of hardware's components. +func (r Hardware_SecurityModule) GetComponents() (resp []datatypes.Hardware_Component, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule", "getComponents", nil, &r.Options, &resp) + return +} + +// Retrieve +func (r Hardware_SecurityModule) GetContainsSolidStateDrivesFlag() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule", "getContainsSolidStateDrivesFlag", nil, &r.Options, &resp) + return +} + +// Retrieve A continuous data protection/server backup software component object. +func (r Hardware_SecurityModule) GetContinuousDataProtectionSoftwareComponent() (resp datatypes.Software_Component, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule", "getContinuousDataProtectionSoftwareComponent", nil, &r.Options, &resp) + return +} + +// Retrieve A server's control panel. +func (r Hardware_SecurityModule) GetControlPanel() (resp datatypes.Software_Component_ControlPanel, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule", "getControlPanel", nil, &r.Options, &resp) + return +} + +// Retrieve The total cost of a server, measured in US Dollars ($USD). +func (r Hardware_SecurityModule) GetCost() (resp datatypes.Float64, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule", "getCost", nil, &r.Options, &resp) + return +} + +// +// There are many options that may be provided while ordering a server, this method can be used to determine what these options are. +// +// +// Detailed information on the return value can be found on the data type page for [[SoftLayer_Container_Hardware_Configuration (type)]]. +func (r Hardware_SecurityModule) GetCreateObjectOptions() (resp datatypes.Container_Hardware_Configuration, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule", "getCreateObjectOptions", nil, &r.Options, &resp) + return +} + +// Retrieve An object that provides commonly used bandwidth summary components for the current billing cycle. +func (r Hardware_SecurityModule) GetCurrentBandwidthSummary() (resp datatypes.Metric_Tracking_Object_Bandwidth_Summary, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule", "getCurrentBandwidthSummary", nil, &r.Options, &resp) + return +} + +// Attempt to retrieve the file associated with the current benchmark certification result, if such a file exists. If there is no file for this benchmark certification result, calling this method throws an exception. +func (r Hardware_SecurityModule) GetCurrentBenchmarkCertificationResultFile() (resp []byte, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule", "getCurrentBenchmarkCertificationResultFile", nil, &r.Options, &resp) + return +} + +// Retrieve The current billable public outbound bandwidth for this hardware for the current billing cycle. +func (r Hardware_SecurityModule) GetCurrentBillableBandwidthUsage() (resp datatypes.Float64, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule", "getCurrentBillableBandwidthUsage", nil, &r.Options, &resp) + return +} + +// Get the billing detail for this instance for the current billing period. This does not include bandwidth usage. +func (r Hardware_SecurityModule) GetCurrentBillingDetail() (resp []datatypes.Billing_Item, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule", "getCurrentBillingDetail", nil, &r.Options, &resp) + return +} + +// The '''getCurrentBillingTotal''' method retrieves the total bill amount in US Dollars ($) for the current billing period. In addition to the total bill amount, the billing detail also includes all bandwidth used up to the point the method is called on the piece of hardware. +func (r Hardware_SecurityModule) GetCurrentBillingTotal() (resp datatypes.Float64, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule", "getCurrentBillingTotal", nil, &r.Options, &resp) + return +} + +// Retrieve bandwidth graph by date. +func (r Hardware_SecurityModule) GetCustomBandwidthDataByDate(graphData *datatypes.Container_Graph) (resp datatypes.Container_Graph, err error) { + params := []interface{}{ + graphData, + } + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule", "getCustomBandwidthDataByDate", params, &r.Options, &resp) + return +} + +// Retrieve Indicates if a server has a Customer Installed OS +func (r Hardware_SecurityModule) GetCustomerInstalledOperatingSystemFlag() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule", "getCustomerInstalledOperatingSystemFlag", nil, &r.Options, &resp) + return +} + +// Retrieve Indicates if a server is a customer owned device. +func (r Hardware_SecurityModule) GetCustomerOwnedFlag() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule", "getCustomerOwnedFlag", nil, &r.Options, &resp) + return +} + +// The '''getDailyAverage''' method calculates the average daily network traffic used by the selected server. Using the required parameter ''dateTime'' to enter a start and end date, the user retrieves this average, measure in gigabytes (GB) for the specified date range. When entering parameters, only the month, day and year are required - time entries are omitted as this method defaults the time to midnight in order to account for the entire day. +func (r Hardware_SecurityModule) GetDailyAverage(startDate *datatypes.Time, endDate *datatypes.Time) (resp datatypes.Float64, err error) { + params := []interface{}{ + startDate, + endDate, + } + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule", "getDailyAverage", params, &r.Options, &resp) + return +} + +// Retrieve Information regarding the datacenter in which a piece of hardware resides. +func (r Hardware_SecurityModule) GetDatacenter() (resp datatypes.Location, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule", "getDatacenter", nil, &r.Options, &resp) + return +} + +// Retrieve The name of the datacenter in which a piece of hardware resides. +func (r Hardware_SecurityModule) GetDatacenterName() (resp string, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule", "getDatacenterName", nil, &r.Options, &resp) + return +} + +// Retrieve All hardware that has uplink network connections to a piece of hardware. +func (r Hardware_SecurityModule) GetDownlinkHardware() (resp []datatypes.Hardware, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule", "getDownlinkHardware", nil, &r.Options, &resp) + return +} + +// Retrieve All hardware that has uplink network connections to a piece of hardware. +func (r Hardware_SecurityModule) GetDownlinkNetworkHardware() (resp []datatypes.Hardware, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule", "getDownlinkNetworkHardware", nil, &r.Options, &resp) + return +} + +// Retrieve Information regarding all servers attached to a piece of network hardware. +func (r Hardware_SecurityModule) GetDownlinkServers() (resp []datatypes.Hardware, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule", "getDownlinkServers", nil, &r.Options, &resp) + return +} + +// Retrieve Information regarding all virtual guests attached to a piece of network hardware. +func (r Hardware_SecurityModule) GetDownlinkVirtualGuests() (resp []datatypes.Virtual_Guest, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule", "getDownlinkVirtualGuests", nil, &r.Options, &resp) + return +} + +// Retrieve All hardware downstream from a network device. +func (r Hardware_SecurityModule) GetDownstreamHardwareBindings() (resp []datatypes.Network_Component_Uplink_Hardware, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule", "getDownstreamHardwareBindings", nil, &r.Options, &resp) + return +} + +// Retrieve All network hardware downstream from the selected piece of hardware. +func (r Hardware_SecurityModule) GetDownstreamNetworkHardware() (resp []datatypes.Hardware, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule", "getDownstreamNetworkHardware", nil, &r.Options, &resp) + return +} + +// Retrieve All network hardware with monitoring warnings or errors that are downstream from the selected piece of hardware. +func (r Hardware_SecurityModule) GetDownstreamNetworkHardwareWithIncidents() (resp []datatypes.Hardware, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule", "getDownstreamNetworkHardwareWithIncidents", nil, &r.Options, &resp) + return +} + +// Retrieve Information regarding all servers attached downstream to a piece of network hardware. +func (r Hardware_SecurityModule) GetDownstreamServers() (resp []datatypes.Hardware, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule", "getDownstreamServers", nil, &r.Options, &resp) + return +} + +// Retrieve Information regarding all virtual guests attached to a piece of network hardware. +func (r Hardware_SecurityModule) GetDownstreamVirtualGuests() (resp []datatypes.Virtual_Guest, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule", "getDownstreamVirtualGuests", nil, &r.Options, &resp) + return +} + +// Retrieve The drive controllers contained within a piece of hardware. +func (r Hardware_SecurityModule) GetDriveControllers() (resp []datatypes.Hardware_Component, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule", "getDriveControllers", nil, &r.Options, &resp) + return +} + +// Retrieve Information regarding a piece of hardware's associated EVault network storage service account. +func (r Hardware_SecurityModule) GetEvaultNetworkStorage() (resp []datatypes.Network_Storage, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule", "getEvaultNetworkStorage", nil, &r.Options, &resp) + return +} + +// Get the subnets associated with this server that are protectable by a network component firewall. +func (r Hardware_SecurityModule) GetFirewallProtectableSubnets() (resp []datatypes.Network_Subnet, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule", "getFirewallProtectableSubnets", nil, &r.Options, &resp) + return +} + +// Retrieve Information regarding a piece of hardware's firewall services. +func (r Hardware_SecurityModule) GetFirewallServiceComponent() (resp datatypes.Network_Component_Firewall, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule", "getFirewallServiceComponent", nil, &r.Options, &resp) + return +} + +// Retrieve Defines the fixed components in a fixed configuration bare metal server. +func (r Hardware_SecurityModule) GetFixedConfigurationPreset() (resp datatypes.Product_Package_Preset, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule", "getFixedConfigurationPreset", nil, &r.Options, &resp) + return +} + +// Use this method to return an array of public bandwidth utilization records between a given date range. +// +// This method represents the NEW version of getFrontendBandwidthUse +func (r Hardware_SecurityModule) GetFrontendBandwidthUsage(startDate *datatypes.Time, endDate *datatypes.Time) (resp []datatypes.Metric_Tracking_Object_Data, err error) { + params := []interface{}{ + startDate, + endDate, + } + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule", "getFrontendBandwidthUsage", params, &r.Options, &resp) + return +} + +// Use this method to return an array of public bandwidth utilization records between a given date range. +func (r Hardware_SecurityModule) GetFrontendBandwidthUse(startDate *datatypes.Time, endDate *datatypes.Time) (resp []datatypes.Network_Bandwidth_Version1_Usage_Detail, err error) { + params := []interface{}{ + startDate, + endDate, + } + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule", "getFrontendBandwidthUse", params, &r.Options, &resp) + return +} + +// The '''getFrontendIncomingBandwidth''' method retrieves the amount of incoming public network traffic used by a server between the given start and end date parameters. When entering the ''dateTime'' parameter, only the month, day and year of the start and end dates are required - the time (hour, minute and second) are set to midnight by default and cannot be changed. The amount of bandwidth retrieved is measured in gigabytes (GB). +func (r Hardware_SecurityModule) GetFrontendIncomingBandwidth(startDate *datatypes.Time, endDate *datatypes.Time) (resp datatypes.Float64, err error) { + params := []interface{}{ + startDate, + endDate, + } + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule", "getFrontendIncomingBandwidth", params, &r.Options, &resp) + return +} + +// Retrieve A piece of hardware's front-end or public network components. +func (r Hardware_SecurityModule) GetFrontendNetworkComponents() (resp []datatypes.Network_Component, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule", "getFrontendNetworkComponents", nil, &r.Options, &resp) + return +} + +// The '''getFrontendOutgoingBandwidth''' method retrieves the amount of outgoing public network traffic used by a server between the given start and end date parameters. The ''dateTime'' parameter requires only the day, month and year to be entered - the time (hour, minute and second) are set to midnight be default in order to gather the data for the entire start and end date indicated in the parameter. The amount of bandwidth retrieved is measured in gigabytes (GB). +func (r Hardware_SecurityModule) GetFrontendOutgoingBandwidth(startDate *datatypes.Time, endDate *datatypes.Time) (resp datatypes.Float64, err error) { + params := []interface{}{ + startDate, + endDate, + } + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule", "getFrontendOutgoingBandwidth", params, &r.Options, &resp) + return +} + +// Retrieve A hardware's frontend or public router. +func (r Hardware_SecurityModule) GetFrontendRouters() (resp []datatypes.Hardware, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule", "getFrontendRouters", nil, &r.Options, &resp) + return +} + +// Retrieve A hardware's universally unique identifier. +func (r Hardware_SecurityModule) GetGlobalIdentifier() (resp string, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule", "getGlobalIdentifier", nil, &r.Options, &resp) + return +} + +// Retrieve The hard drives contained within a piece of hardware. +func (r Hardware_SecurityModule) GetHardDrives() (resp []datatypes.Hardware_Component, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule", "getHardDrives", nil, &r.Options, &resp) + return +} + +// Retrieve a server by searching for the primary IP address. +func (r Hardware_SecurityModule) GetHardwareByIpAddress(ipAddress *string) (resp datatypes.Hardware_Server, err error) { + params := []interface{}{ + ipAddress, + } + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule", "getHardwareByIpAddress", params, &r.Options, &resp) + return +} + +// Retrieve The chassis that a piece of hardware is housed in. +func (r Hardware_SecurityModule) GetHardwareChassis() (resp datatypes.Hardware_Chassis, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule", "getHardwareChassis", nil, &r.Options, &resp) + return +} + +// Retrieve A hardware's function. +func (r Hardware_SecurityModule) GetHardwareFunction() (resp datatypes.Hardware_Function, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule", "getHardwareFunction", nil, &r.Options, &resp) + return +} + +// Retrieve A hardware's function. +func (r Hardware_SecurityModule) GetHardwareFunctionDescription() (resp string, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule", "getHardwareFunctionDescription", nil, &r.Options, &resp) + return +} + +// Retrieve A hardware's status. +func (r Hardware_SecurityModule) GetHardwareStatus() (resp datatypes.Hardware_Status, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule", "getHardwareStatus", nil, &r.Options, &resp) + return +} + +// Retrieve Determine in hardware object has TPM enabled. +func (r Hardware_SecurityModule) GetHasTrustedPlatformModuleBillingItemFlag() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule", "getHasTrustedPlatformModuleBillingItemFlag", nil, &r.Options, &resp) + return +} + +// Retrieve Information regarding a host IPS software component object. +func (r Hardware_SecurityModule) GetHostIpsSoftwareComponent() (resp datatypes.Software_Component, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule", "getHostIpsSoftwareComponent", nil, &r.Options, &resp) + return +} + +// The '''getHourlyBandwidth''' method retrieves all bandwidth updates hourly for the specified hardware. Because the potential number of data points can become excessive, the method limits the user to obtain data in 24-hour intervals. The required ''dateTime'' parameter is used as the starting point for the query and will be calculated for the 24-hour period starting with the specified date and time. For example, entering a parameter of +// +// '02/01/2008 0:00' +// +// results in a return of all bandwidth data for the entire day of February 1, 2008, as 0:00 specifies a midnight start date. Please note that the time entered should be completed using a 24-hour clock (military time, astronomical time). +// +// For data spanning more than a single 24-hour period, refer to the getBandwidthData function on the metricTrackingObject for the piece of hardware. +func (r Hardware_SecurityModule) GetHourlyBandwidth(mode *string, day *datatypes.Time) (resp []datatypes.Metric_Tracking_Object_Data, err error) { + params := []interface{}{ + mode, + day, + } + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule", "getHourlyBandwidth", params, &r.Options, &resp) + return +} + +// Retrieve A server's hourly billing status. +func (r Hardware_SecurityModule) GetHourlyBillingFlag() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule", "getHourlyBillingFlag", nil, &r.Options, &resp) + return +} + +// Retrieve The sum of all the inbound network traffic data for the last 30 days. +func (r Hardware_SecurityModule) GetInboundBandwidthUsage() (resp datatypes.Float64, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule", "getInboundBandwidthUsage", nil, &r.Options, &resp) + return +} + +// Retrieve The total private inbound bandwidth for this hardware for the current billing cycle. +func (r Hardware_SecurityModule) GetInboundPrivateBandwidthUsage() (resp datatypes.Float64, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule", "getInboundPrivateBandwidthUsage", nil, &r.Options, &resp) + return +} + +// Retrieve The total public inbound bandwidth for this hardware for the current billing cycle. +func (r Hardware_SecurityModule) GetInboundPublicBandwidthUsage() (resp datatypes.Float64, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule", "getInboundPublicBandwidthUsage", nil, &r.Options, &resp) + return +} + +// Return a collection of SoftLayer_Item_Price objects from a collection of SoftLayer_Software_Description +func (r Hardware_SecurityModule) GetItemPricesFromSoftwareDescriptions(softwareDescriptions []datatypes.Software_Description, includeTranslationsFlag *bool, returnAllPricesFlag *bool) (resp []datatypes.Product_Item, err error) { + params := []interface{}{ + softwareDescriptions, + includeTranslationsFlag, + returnAllPricesFlag, + } + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule", "getItemPricesFromSoftwareDescriptions", params, &r.Options, &resp) + return +} + +// Retrieve The last transaction that a server's operating system was loaded. +func (r Hardware_SecurityModule) GetLastOperatingSystemReload() (resp datatypes.Provisioning_Version1_Transaction, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule", "getLastOperatingSystemReload", nil, &r.Options, &resp) + return +} + +// Retrieve Information regarding the last transaction a server performed. +func (r Hardware_SecurityModule) GetLastTransaction() (resp datatypes.Provisioning_Version1_Transaction, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule", "getLastTransaction", nil, &r.Options, &resp) + return +} + +// Retrieve A piece of hardware's latest network monitoring incident. +func (r Hardware_SecurityModule) GetLatestNetworkMonitorIncident() (resp datatypes.Network_Monitor_Version1_Incident, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule", "getLatestNetworkMonitorIncident", nil, &r.Options, &resp) + return +} + +// Retrieve Where a piece of hardware is located within SoftLayer's location hierarchy. +func (r Hardware_SecurityModule) GetLocation() (resp datatypes.Location, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule", "getLocation", nil, &r.Options, &resp) + return +} + +// Retrieve +func (r Hardware_SecurityModule) GetLocationPathString() (resp string, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule", "getLocationPathString", nil, &r.Options, &resp) + return +} + +// Retrieve Information regarding a lockbox account associated with a server. +func (r Hardware_SecurityModule) GetLockboxNetworkStorage() (resp datatypes.Network_Storage, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule", "getLockboxNetworkStorage", nil, &r.Options, &resp) + return +} + +// Retrieve A flag indicating that the hardware is a managed resource. +func (r Hardware_SecurityModule) GetManagedResourceFlag() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule", "getManagedResourceFlag", nil, &r.Options, &resp) + return +} + +// Retrieve the remote management network component attached with this server. +func (r Hardware_SecurityModule) GetManagementNetworkComponent() (resp datatypes.Network_Component, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule", "getManagementNetworkComponent", nil, &r.Options, &resp) + return +} + +// Retrieve Information regarding a piece of hardware's memory. +func (r Hardware_SecurityModule) GetMemory() (resp []datatypes.Hardware_Component, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule", "getMemory", nil, &r.Options, &resp) + return +} + +// Retrieve The amount of memory a piece of hardware has, measured in gigabytes. +func (r Hardware_SecurityModule) GetMemoryCapacity() (resp uint, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule", "getMemoryCapacity", nil, &r.Options, &resp) + return +} + +// Retrieve A piece of hardware's metric tracking object. +func (r Hardware_SecurityModule) GetMetricTrackingObject() (resp datatypes.Metric_Tracking_Object_HardwareServer, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule", "getMetricTrackingObject", nil, &r.Options, &resp) + return +} + +// Retrieve The metric tracking object id for this server. +func (r Hardware_SecurityModule) GetMetricTrackingObjectId() (resp int, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule", "getMetricTrackingObjectId", nil, &r.Options, &resp) + return +} + +// Returns open monitoring alarms for a given time period +func (r Hardware_SecurityModule) GetMonitoringActiveAlarms(startDate *datatypes.Time, endDate *datatypes.Time) (resp []datatypes.Container_Monitoring_Alarm_History, err error) { + params := []interface{}{ + startDate, + endDate, + } + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule", "getMonitoringActiveAlarms", params, &r.Options, &resp) + return +} + +// Retrieve Information regarding the monitoring agents associated with a piece of hardware. +func (r Hardware_SecurityModule) GetMonitoringAgents() (resp []datatypes.Monitoring_Agent, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule", "getMonitoringAgents", nil, &r.Options, &resp) + return +} + +// Returns closed monitoring alarms for a given time period +func (r Hardware_SecurityModule) GetMonitoringClosedAlarms(startDate *datatypes.Time, endDate *datatypes.Time) (resp []datatypes.Container_Monitoring_Alarm_History, err error) { + params := []interface{}{ + startDate, + endDate, + } + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule", "getMonitoringClosedAlarms", params, &r.Options, &resp) + return +} + +// Retrieve Information regarding the hardware's monitoring robot. +func (r Hardware_SecurityModule) GetMonitoringRobot() (resp datatypes.Monitoring_Robot, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule", "getMonitoringRobot", nil, &r.Options, &resp) + return +} + +// Retrieve Information regarding a piece of hardware's network monitoring services. +func (r Hardware_SecurityModule) GetMonitoringServiceComponent() (resp datatypes.Network_Monitor_Version1_Query_Host_Stratum, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule", "getMonitoringServiceComponent", nil, &r.Options, &resp) + return +} + +// Retrieve The monitoring service flag eligibility status for a piece of hardware. +func (r Hardware_SecurityModule) GetMonitoringServiceEligibilityFlag() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule", "getMonitoringServiceEligibilityFlag", nil, &r.Options, &resp) + return +} + +// Retrieve The service flag status for a piece of hardware. +func (r Hardware_SecurityModule) GetMonitoringServiceFlag() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule", "getMonitoringServiceFlag", nil, &r.Options, &resp) + return +} + +// Retrieve The monitoring notification objects for this hardware. Each object links this hardware instance to a user account that will be notified if monitoring on this hardware object fails +func (r Hardware_SecurityModule) GetMonitoringUserNotification() (resp []datatypes.User_Customer_Notification_Hardware, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule", "getMonitoringUserNotification", nil, &r.Options, &resp) + return +} + +// Retrieve Information regarding a piece of hardware's motherboard. +func (r Hardware_SecurityModule) GetMotherboard() (resp datatypes.Hardware_Component, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule", "getMotherboard", nil, &r.Options, &resp) + return +} + +// Retrieve Information regarding a piece of hardware's network cards. +func (r Hardware_SecurityModule) GetNetworkCards() (resp []datatypes.Hardware_Component, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule", "getNetworkCards", nil, &r.Options, &resp) + return +} + +// Get the IP addresses associated with this server that are protectable by a network component firewall. Note, this may not return all values for IPv6 subnets for this server. Please use getFirewallProtectableSubnets to get all protectable subnets. +func (r Hardware_SecurityModule) GetNetworkComponentFirewallProtectableIpAddresses() (resp []datatypes.Network_Subnet_IpAddress, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule", "getNetworkComponentFirewallProtectableIpAddresses", nil, &r.Options, &resp) + return +} + +// Retrieve Returns a hardware's network components. +func (r Hardware_SecurityModule) GetNetworkComponents() (resp []datatypes.Network_Component, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule", "getNetworkComponents", nil, &r.Options, &resp) + return +} + +// Retrieve The gateway member if this device is part of a network gateway. +func (r Hardware_SecurityModule) GetNetworkGatewayMember() (resp datatypes.Network_Gateway_Member, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule", "getNetworkGatewayMember", nil, &r.Options, &resp) + return +} + +// Retrieve Whether or not this device is part of a network gateway. +func (r Hardware_SecurityModule) GetNetworkGatewayMemberFlag() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule", "getNetworkGatewayMemberFlag", nil, &r.Options, &resp) + return +} + +// Retrieve A piece of hardware's network management IP address. +func (r Hardware_SecurityModule) GetNetworkManagementIpAddress() (resp string, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule", "getNetworkManagementIpAddress", nil, &r.Options, &resp) + return +} + +// Retrieve All servers with failed monitoring that are attached downstream to a piece of hardware. +func (r Hardware_SecurityModule) GetNetworkMonitorAttachedDownHardware() (resp []datatypes.Hardware, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule", "getNetworkMonitorAttachedDownHardware", nil, &r.Options, &resp) + return +} + +// Retrieve Virtual guests that are attached downstream to a hardware that have failed monitoring +func (r Hardware_SecurityModule) GetNetworkMonitorAttachedDownVirtualGuests() (resp []datatypes.Virtual_Guest, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule", "getNetworkMonitorAttachedDownVirtualGuests", nil, &r.Options, &resp) + return +} + +// Retrieve The status of all of a piece of hardware's network monitoring incidents. +func (r Hardware_SecurityModule) GetNetworkMonitorIncidents() (resp []datatypes.Network_Monitor_Version1_Incident, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule", "getNetworkMonitorIncidents", nil, &r.Options, &resp) + return +} + +// Retrieve Information regarding a piece of hardware's network monitors. +func (r Hardware_SecurityModule) GetNetworkMonitors() (resp []datatypes.Network_Monitor_Version1_Query_Host, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule", "getNetworkMonitors", nil, &r.Options, &resp) + return +} + +// Retrieve The value of a hardware's network status attribute. +func (r Hardware_SecurityModule) GetNetworkStatus() (resp string, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule", "getNetworkStatus", nil, &r.Options, &resp) + return +} + +// Retrieve The hardware's related network status attribute. +func (r Hardware_SecurityModule) GetNetworkStatusAttribute() (resp datatypes.Hardware_Attribute, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule", "getNetworkStatusAttribute", nil, &r.Options, &resp) + return +} + +// Retrieve Information regarding a piece of hardware's associated network storage service account. +func (r Hardware_SecurityModule) GetNetworkStorage() (resp []datatypes.Network_Storage, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule", "getNetworkStorage", nil, &r.Options, &resp) + return +} + +// Retrieve The network virtual LANs (VLANs) associated with a piece of hardware's network components. +func (r Hardware_SecurityModule) GetNetworkVlans() (resp []datatypes.Network_Vlan, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule", "getNetworkVlans", nil, &r.Options, &resp) + return +} + +// Retrieve A hardware's allotted bandwidth for the next billing cycle (measured in GB). +func (r Hardware_SecurityModule) GetNextBillingCycleBandwidthAllocation() (resp datatypes.Float64, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule", "getNextBillingCycleBandwidthAllocation", nil, &r.Options, &resp) + return +} + +// Retrieve +func (r Hardware_SecurityModule) GetNotesHistory() (resp []datatypes.Hardware_Note, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule", "getNotesHistory", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Hardware_SecurityModule) GetObject() (resp datatypes.Hardware_SecurityModule, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule", "getObject", nil, &r.Options, &resp) + return +} + +// Retrieve An open ticket requesting cancellation of this server, if one exists. +func (r Hardware_SecurityModule) GetOpenCancellationTicket() (resp datatypes.Ticket, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule", "getOpenCancellationTicket", nil, &r.Options, &resp) + return +} + +// Retrieve Information regarding a piece of hardware's operating system. +func (r Hardware_SecurityModule) GetOperatingSystem() (resp datatypes.Software_Component_OperatingSystem, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule", "getOperatingSystem", nil, &r.Options, &resp) + return +} + +// Retrieve A hardware's operating system software description. +func (r Hardware_SecurityModule) GetOperatingSystemReferenceCode() (resp string, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule", "getOperatingSystemReferenceCode", nil, &r.Options, &resp) + return +} + +// Retrieve The sum of all the outbound network traffic data for the last 30 days. +func (r Hardware_SecurityModule) GetOutboundBandwidthUsage() (resp datatypes.Float64, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule", "getOutboundBandwidthUsage", nil, &r.Options, &resp) + return +} + +// Retrieve The total private outbound bandwidth for this hardware for the current billing cycle. +func (r Hardware_SecurityModule) GetOutboundPrivateBandwidthUsage() (resp datatypes.Float64, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule", "getOutboundPrivateBandwidthUsage", nil, &r.Options, &resp) + return +} + +// Retrieve The total public outbound bandwidth for this hardware for the current billing cycle. +func (r Hardware_SecurityModule) GetOutboundPublicBandwidthUsage() (resp datatypes.Float64, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule", "getOutboundPublicBandwidthUsage", nil, &r.Options, &resp) + return +} + +// Retrieve Whether the bandwidth usage for this hardware for the current billing cycle exceeds the allocation. +func (r Hardware_SecurityModule) GetOverBandwidthAllocationFlag() (resp int, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule", "getOverBandwidthAllocationFlag", nil, &r.Options, &resp) + return +} + +// Retrieve a server's hardware state via its internal sensors. Remote sensor data is transmitted to the SoftLayer API by way of the server's remote management card. Sensor data measures system temperatures, voltages, and other local server settings. Sensor data is cached for 30 seconds. Calls made to getSensorData for the same server within 30 seconds of each other will return the same data. Subsequent calls will return new data once the cache expires. +func (r Hardware_SecurityModule) GetPMInfo() (resp []datatypes.Container_RemoteManagement_PmInfo, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule", "getPMInfo", nil, &r.Options, &resp) + return +} + +// Retrieve Parent Hardware. +func (r Hardware_SecurityModule) GetParentHardware() (resp datatypes.Hardware, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule", "getParentHardware", nil, &r.Options, &resp) + return +} + +// Retrieve Information regarding the Point of Presence (PoP) location in which a piece of hardware resides. +func (r Hardware_SecurityModule) GetPointOfPresenceLocation() (resp datatypes.Location, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule", "getPointOfPresenceLocation", nil, &r.Options, &resp) + return +} + +// Retrieve The power components for a hardware object. +func (r Hardware_SecurityModule) GetPowerComponents() (resp []datatypes.Hardware_Power_Component, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule", "getPowerComponents", nil, &r.Options, &resp) + return +} + +// Retrieve Information regarding a piece of hardware's power supply. +func (r Hardware_SecurityModule) GetPowerSupply() (resp []datatypes.Hardware_Component, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule", "getPowerSupply", nil, &r.Options, &resp) + return +} + +// Retrieve The hardware's primary private IP address. +func (r Hardware_SecurityModule) GetPrimaryBackendIpAddress() (resp string, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule", "getPrimaryBackendIpAddress", nil, &r.Options, &resp) + return +} + +// Retrieve Information regarding the hardware's primary back-end network component. +func (r Hardware_SecurityModule) GetPrimaryBackendNetworkComponent() (resp datatypes.Network_Component, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule", "getPrimaryBackendNetworkComponent", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Hardware_SecurityModule) GetPrimaryDriveSize() (resp int, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule", "getPrimaryDriveSize", nil, &r.Options, &resp) + return +} + +// Retrieve The hardware's primary public IP address. +func (r Hardware_SecurityModule) GetPrimaryIpAddress() (resp string, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule", "getPrimaryIpAddress", nil, &r.Options, &resp) + return +} + +// Retrieve Information regarding the hardware's primary public network component. +func (r Hardware_SecurityModule) GetPrimaryNetworkComponent() (resp datatypes.Network_Component, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule", "getPrimaryNetworkComponent", nil, &r.Options, &resp) + return +} + +// Retrieve a graph of a server's private network bandwidth usage over the specified timeframe. If no timeframe is specified then getPublicBandwidthGraphImage retrieves the last 24 hours of public bandwidth usage. getPrivateBandwidthGraphImage returns a PNG image measuring 827 pixels by 293 pixels. +func (r Hardware_SecurityModule) GetPrivateBandwidthData(startTime *int, endTime *int) (resp []datatypes.Metric_Tracking_Object_Data, err error) { + params := []interface{}{ + startTime, + endTime, + } + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule", "getPrivateBandwidthData", params, &r.Options, &resp) + return +} + +// Retrieve a brief summary of a server's private network bandwidth usage. getPrivateBandwidthDataSummary retrieves a server's bandwidth allocation for its billing period, its estimated usage during its billing period, and an estimation of how much bandwidth it will use during its billing period based on its current usage. A server's projected bandwidth usage increases in accuracy as it progresses through its billing period. +func (r Hardware_SecurityModule) GetPrivateBandwidthDataSummary() (resp datatypes.Container_Network_Bandwidth_Data_Summary, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule", "getPrivateBandwidthDataSummary", nil, &r.Options, &resp) + return +} + +// Retrieve a graph of a server's private network bandwidth usage over the specified time frame. If no time frame is specified then getPublicBandwidthGraphImage retrieves the last 24 hours of public bandwidth usage. getPublicBandwidthGraphImage returns a PNG image +func (r Hardware_SecurityModule) GetPrivateBandwidthGraphImage(startTime *string, endTime *string) (resp []byte, err error) { + params := []interface{}{ + startTime, + endTime, + } + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule", "getPrivateBandwidthGraphImage", params, &r.Options, &resp) + return +} + +// Retrieve A server's primary private IP address. +func (r Hardware_SecurityModule) GetPrivateIpAddress() (resp string, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule", "getPrivateIpAddress", nil, &r.Options, &resp) + return +} + +// Retrieve the private network component attached with this server. +func (r Hardware_SecurityModule) GetPrivateNetworkComponent() (resp datatypes.Network_Component, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule", "getPrivateNetworkComponent", nil, &r.Options, &resp) + return +} + +// Retrieve Whether the hardware only has access to the private network. +func (r Hardware_SecurityModule) GetPrivateNetworkOnlyFlag() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule", "getPrivateNetworkOnlyFlag", nil, &r.Options, &resp) + return +} + +// Retrieve the backend VLAN for the primary IP address of the server +func (r Hardware_SecurityModule) GetPrivateVlan() (resp datatypes.Network_Vlan, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule", "getPrivateVlan", nil, &r.Options, &resp) + return +} + +// Retrieve a backend network VLAN by searching for an IP address +func (r Hardware_SecurityModule) GetPrivateVlanByIpAddress(ipAddress *string) (resp datatypes.Network_Vlan, err error) { + params := []interface{}{ + ipAddress, + } + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule", "getPrivateVlanByIpAddress", params, &r.Options, &resp) + return +} + +// Retrieve The total number of processor cores, summed from all processors that are attached to a piece of hardware +func (r Hardware_SecurityModule) GetProcessorCoreAmount() (resp uint, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule", "getProcessorCoreAmount", nil, &r.Options, &resp) + return +} + +// Retrieve The total number of physical processor cores, summed from all processors that are attached to a piece of hardware +func (r Hardware_SecurityModule) GetProcessorPhysicalCoreAmount() (resp uint, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule", "getProcessorPhysicalCoreAmount", nil, &r.Options, &resp) + return +} + +// Retrieve Information regarding a piece of hardware's processors. +func (r Hardware_SecurityModule) GetProcessors() (resp []datatypes.Hardware_Component, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule", "getProcessors", nil, &r.Options, &resp) + return +} + +// Retrieve Whether the bandwidth usage for this hardware for the current billing cycle is projected to exceed the allocation. +func (r Hardware_SecurityModule) GetProjectedOverBandwidthAllocationFlag() (resp int, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule", "getProjectedOverBandwidthAllocationFlag", nil, &r.Options, &resp) + return +} + +// Retrieve The projected public outbound bandwidth for this hardware for the current billing cycle. +func (r Hardware_SecurityModule) GetProjectedPublicBandwidthUsage() (resp datatypes.Float64, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule", "getProjectedPublicBandwidthUsage", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Hardware_SecurityModule) GetProvisionDate() (resp datatypes.Time, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule", "getProvisionDate", nil, &r.Options, &resp) + return +} + +// Retrieve a graph of a server's public network bandwidth usage over the specified timeframe. If no timeframe is specified then getPublicBandwidthGraphImage retrieves the last 24 hours of public bandwidth usage. getPublicBandwidthGraphImage returns a PNG image measuring 827 pixels by 293 pixels. +func (r Hardware_SecurityModule) GetPublicBandwidthData(startTime *int, endTime *int) (resp []datatypes.Metric_Tracking_Object_Data, err error) { + params := []interface{}{ + startTime, + endTime, + } + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule", "getPublicBandwidthData", params, &r.Options, &resp) + return +} + +// Retrieve a brief summary of a server's public network bandwidth usage. getPublicBandwidthDataSummary retrieves a server's bandwidth allocation for its billing period, its estimated usage during its billing period, and an estimation of how much bandwidth it will use during its billing period based on its current usage. A server's projected bandwidth usage increases in accuracy as it progresses through its billing period. +func (r Hardware_SecurityModule) GetPublicBandwidthDataSummary() (resp datatypes.Container_Network_Bandwidth_Data_Summary, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule", "getPublicBandwidthDataSummary", nil, &r.Options, &resp) + return +} + +// Retrieve a graph of a server's public network bandwidth usage over the specified time frame. If no time frame is specified then getPublicBandwidthGraphImage retrieves the last 24 hours of public bandwidth usage. getPublicBandwidthGraphImage returns a PNG image measuring 827 pixels by 293 pixels. THIS METHOD GENERATES GRAPHS BASED ON THE NEW DATA WAREHOUSE REPOSITORY. +func (r Hardware_SecurityModule) GetPublicBandwidthGraphImage(startTime *datatypes.Time, endTime *datatypes.Time) (resp []byte, err error) { + params := []interface{}{ + startTime, + endTime, + } + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule", "getPublicBandwidthGraphImage", params, &r.Options, &resp) + return +} + +// Retrieve the total number of bytes used by a server over a specified time period via the data warehouse tracking objects for this hardware. +func (r Hardware_SecurityModule) GetPublicBandwidthTotal(startTime *int, endTime *int) (resp uint, err error) { + params := []interface{}{ + startTime, + endTime, + } + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule", "getPublicBandwidthTotal", params, &r.Options, &resp) + return +} + +// Retrieve a SoftLayer server's public network component. Some servers are only connected to the private network and may not have a public network component. In that case getPublicNetworkComponent returns a null object. +func (r Hardware_SecurityModule) GetPublicNetworkComponent() (resp datatypes.Network_Component, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule", "getPublicNetworkComponent", nil, &r.Options, &resp) + return +} + +// Retrieve the frontend VLAN for the primary IP address of the server +func (r Hardware_SecurityModule) GetPublicVlan() (resp datatypes.Network_Vlan, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule", "getPublicVlan", nil, &r.Options, &resp) + return +} + +// Retrieve the frontend network Vlan by searching the hostname of a server +func (r Hardware_SecurityModule) GetPublicVlanByHostname(hostname *string) (resp datatypes.Network_Vlan, err error) { + params := []interface{}{ + hostname, + } + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule", "getPublicVlanByHostname", params, &r.Options, &resp) + return +} + +// Retrieve +func (r Hardware_SecurityModule) GetRack() (resp datatypes.Location, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule", "getRack", nil, &r.Options, &resp) + return +} + +// Retrieve The RAID controllers contained within a piece of hardware. +func (r Hardware_SecurityModule) GetRaidControllers() (resp []datatypes.Hardware_Component, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule", "getRaidControllers", nil, &r.Options, &resp) + return +} + +// Retrieve Recent events that impact this hardware. +func (r Hardware_SecurityModule) GetRecentEvents() (resp []datatypes.Notification_Occurrence_Event, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule", "getRecentEvents", nil, &r.Options, &resp) + return +} + +// Retrieve The last five commands issued to the server's remote management card. +func (r Hardware_SecurityModule) GetRecentRemoteManagementCommands() (resp []datatypes.Hardware_Component_RemoteManagement_Command_Request, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule", "getRecentRemoteManagementCommands", nil, &r.Options, &resp) + return +} + +// Retrieve +func (r Hardware_SecurityModule) GetRegionalInternetRegistry() (resp datatypes.Network_Regional_Internet_Registry, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule", "getRegionalInternetRegistry", nil, &r.Options, &resp) + return +} + +// Retrieve A server's remote management card. +func (r Hardware_SecurityModule) GetRemoteManagement() (resp datatypes.Hardware_Component_RemoteManagement, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule", "getRemoteManagement", nil, &r.Options, &resp) + return +} + +// Retrieve User credentials to issue commands and/or interact with the server's remote management card. +func (r Hardware_SecurityModule) GetRemoteManagementAccounts() (resp []datatypes.Hardware_Component_RemoteManagement_User, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule", "getRemoteManagementAccounts", nil, &r.Options, &resp) + return +} + +// Retrieve A hardware's associated remote management component. This is normally IPMI. +func (r Hardware_SecurityModule) GetRemoteManagementComponent() (resp datatypes.Network_Component, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule", "getRemoteManagementComponent", nil, &r.Options, &resp) + return +} + +// Retrieve User(s) who have access to issue commands and/or interact with the server's remote management card. +func (r Hardware_SecurityModule) GetRemoteManagementUsers() (resp []datatypes.Hardware_Component_RemoteManagement_User, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule", "getRemoteManagementUsers", nil, &r.Options, &resp) + return +} + +// Retrieve +func (r Hardware_SecurityModule) GetResourceConfigurations() (resp []datatypes.Hardware_Resource_Configuration, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule", "getResourceConfigurations", nil, &r.Options, &resp) + return +} + +// Retrieve +func (r Hardware_SecurityModule) GetResourceGroupMemberReferences() (resp []datatypes.Resource_Group_Member, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule", "getResourceGroupMemberReferences", nil, &r.Options, &resp) + return +} + +// Retrieve +func (r Hardware_SecurityModule) GetResourceGroupRoles() (resp []datatypes.Resource_Group_Role, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule", "getResourceGroupRoles", nil, &r.Options, &resp) + return +} + +// Retrieve The resource groups in which this hardware is a member. +func (r Hardware_SecurityModule) GetResourceGroups() (resp []datatypes.Resource_Group, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule", "getResourceGroups", nil, &r.Options, &resp) + return +} + +// Retrieve the reverse domain records associated with this server. +func (r Hardware_SecurityModule) GetReverseDomainRecords() (resp []datatypes.Dns_Domain, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule", "getReverseDomainRecords", nil, &r.Options, &resp) + return +} + +// Retrieve A hardware's routers. +func (r Hardware_SecurityModule) GetRouters() (resp []datatypes.Hardware, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule", "getRouters", nil, &r.Options, &resp) + return +} + +// Retrieve Collection of scale assets this hardware corresponds to. +func (r Hardware_SecurityModule) GetScaleAssets() (resp []datatypes.Scale_Asset, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule", "getScaleAssets", nil, &r.Options, &resp) + return +} + +// Retrieve Information regarding a piece of hardware's vulnerability scan requests. +func (r Hardware_SecurityModule) GetSecurityScanRequests() (resp []datatypes.Network_Security_Scanner_Request, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule", "getSecurityScanRequests", nil, &r.Options, &resp) + return +} + +// Retrieve a server's hardware state via its internal sensors. Remote sensor data is transmitted to the SoftLayer API by way of the server's remote management card. Sensor data measures system temperatures, voltages, and other local server settings. Sensor data is cached for 30 seconds. Calls made to getSensorData for the same server within 30 seconds of each other will return the same data. Subsequent calls will return new data once the cache expires. +func (r Hardware_SecurityModule) GetSensorData() (resp []datatypes.Container_RemoteManagement_SensorReading, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule", "getSensorData", nil, &r.Options, &resp) + return +} + +// Retrieves the raw data returned from the server's remote management card. For more details of what is returned please refer to the getSensorData method. Along with the raw data, graphs for the cpu and system temperatures and fan speeds are also returned. +func (r Hardware_SecurityModule) GetSensorDataWithGraphs() (resp datatypes.Container_RemoteManagement_SensorReadingsWithGraphs, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule", "getSensorDataWithGraphs", nil, &r.Options, &resp) + return +} + +// Retrieve a server's hardware components, software, and network components. getServerDetails is an aggregation function that combines the results of [[SoftLayer_Hardware_Server::getComponents]], [[SoftLayer_Hardware_Server::getSoftware]], and [[SoftLayer_Hardware_Server::getNetworkComponents]] in a single container. +func (r Hardware_SecurityModule) GetServerDetails() (resp datatypes.Container_Hardware_Server_Details, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule", "getServerDetails", nil, &r.Options, &resp) + return +} + +// Retrieve the server's fan speeds and displays them using tachometer graphs. Data used to construct graphs is retrieved from the server's remote management card. All graphs returned will have a title associated with it. +func (r Hardware_SecurityModule) GetServerFanSpeedGraphs() (resp []datatypes.Container_RemoteManagement_Graphs_SensorSpeed, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule", "getServerFanSpeedGraphs", nil, &r.Options, &resp) + return +} + +// Retrieves the power state for the server. The server's power status is retrieved from its remote management card. This will return 'on' or 'off'. +func (r Hardware_SecurityModule) GetServerPowerState() (resp string, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule", "getServerPowerState", nil, &r.Options, &resp) + return +} + +// Retrieve Information regarding the server room in which the hardware is located. +func (r Hardware_SecurityModule) GetServerRoom() (resp datatypes.Location, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule", "getServerRoom", nil, &r.Options, &resp) + return +} + +// Retrieve the server's temperature and displays them using thermometer graphs. Temperatures retrieved are CPU(s) and system temperatures. Data used to construct graphs is retrieved from the server's remote management card. All graphs returned will have a title associated with it. +func (r Hardware_SecurityModule) GetServerTemperatureGraphs() (resp []datatypes.Container_RemoteManagement_Graphs_SensorTemperature, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule", "getServerTemperatureGraphs", nil, &r.Options, &resp) + return +} + +// Retrieve Information regarding the piece of hardware's service provider. +func (r Hardware_SecurityModule) GetServiceProvider() (resp datatypes.Service_Provider, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule", "getServiceProvider", nil, &r.Options, &resp) + return +} + +// Retrieve Information regarding a piece of hardware's installed software. +func (r Hardware_SecurityModule) GetSoftwareComponents() (resp []datatypes.Software_Component, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule", "getSoftwareComponents", nil, &r.Options, &resp) + return +} + +// Retrieve Information regarding the billing item for a spare pool server. +func (r Hardware_SecurityModule) GetSparePoolBillingItem() (resp datatypes.Billing_Item_Hardware, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule", "getSparePoolBillingItem", nil, &r.Options, &resp) + return +} + +// Retrieve SSH keys to be installed on the server during provisioning or an OS reload. +func (r Hardware_SecurityModule) GetSshKeys() (resp []datatypes.Security_Ssh_Key, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule", "getSshKeys", nil, &r.Options, &resp) + return +} + +// Retrieve A server's remote management card used for statistics. +func (r Hardware_SecurityModule) GetStatisticsRemoteManagement() (resp datatypes.Hardware_Component_RemoteManagement, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule", "getStatisticsRemoteManagement", nil, &r.Options, &resp) + return +} + +// Retrieve +func (r Hardware_SecurityModule) GetStorageNetworkComponents() (resp []datatypes.Network_Component, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule", "getStorageNetworkComponents", nil, &r.Options, &resp) + return +} + +// Retrieve +func (r Hardware_SecurityModule) GetTagReferences() (resp []datatypes.Tag_Reference, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule", "getTagReferences", nil, &r.Options, &resp) + return +} + +// Retrieve +func (r Hardware_SecurityModule) GetTopLevelLocation() (resp datatypes.Location, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule", "getTopLevelLocation", nil, &r.Options, &resp) + return +} + +// +// This method will query transaction history for a piece of hardware. +func (r Hardware_SecurityModule) GetTransactionHistory() (resp []datatypes.Provisioning_Version1_Transaction_History, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule", "getTransactionHistory", nil, &r.Options, &resp) + return +} + +// Retrieve a list of upgradeable items available to this piece of hardware. Currently, getUpgradeItemPrices retrieves upgrades available for a server's memory, hard drives, network port speed, bandwidth allocation and GPUs. +func (r Hardware_SecurityModule) GetUpgradeItemPrices() (resp []datatypes.Product_Item_Price, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule", "getUpgradeItemPrices", nil, &r.Options, &resp) + return +} + +// Retrieve An account's associated upgrade request object, if any. +func (r Hardware_SecurityModule) GetUpgradeRequest() (resp datatypes.Product_Upgrade_Request, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule", "getUpgradeRequest", nil, &r.Options, &resp) + return +} + +// Retrieve The network device connected to a piece of hardware. +func (r Hardware_SecurityModule) GetUplinkHardware() (resp datatypes.Hardware, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule", "getUplinkHardware", nil, &r.Options, &resp) + return +} + +// Retrieve Information regarding the network component that is one level higher than a piece of hardware on the network infrastructure. +func (r Hardware_SecurityModule) GetUplinkNetworkComponents() (resp []datatypes.Network_Component, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule", "getUplinkNetworkComponents", nil, &r.Options, &resp) + return +} + +// Retrieve A string containing custom user data for a hardware order. +func (r Hardware_SecurityModule) GetUserData() (resp []datatypes.Hardware_Attribute, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule", "getUserData", nil, &r.Options, &resp) + return +} + +// Retrieve A list of users that have access to this computing instance. +func (r Hardware_SecurityModule) GetUsers() (resp []datatypes.User_Customer, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule", "getUsers", nil, &r.Options, &resp) + return +} + +// This method will return the list of block device template groups that are valid to the host. For instance, it will only retrieve FLEX images. +func (r Hardware_SecurityModule) GetValidBlockDeviceTemplateGroups(visibility *string) (resp []datatypes.Virtual_Guest_Block_Device_Template_Group, err error) { + params := []interface{}{ + visibility, + } + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule", "getValidBlockDeviceTemplateGroups", params, &r.Options, &resp) + return +} + +// Retrieve Information regarding the virtual chassis for a piece of hardware. +func (r Hardware_SecurityModule) GetVirtualChassis() (resp datatypes.Hardware_Group, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule", "getVirtualChassis", nil, &r.Options, &resp) + return +} + +// Retrieve Information regarding the virtual chassis siblings for a piece of hardware. +func (r Hardware_SecurityModule) GetVirtualChassisSiblings() (resp []datatypes.Hardware, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule", "getVirtualChassisSiblings", nil, &r.Options, &resp) + return +} + +// Retrieve A hardware server's virtual servers. +func (r Hardware_SecurityModule) GetVirtualGuests() (resp []datatypes.Virtual_Guest, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule", "getVirtualGuests", nil, &r.Options, &resp) + return +} + +// Retrieve A piece of hardware's virtual host record. +func (r Hardware_SecurityModule) GetVirtualHost() (resp datatypes.Virtual_Host, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule", "getVirtualHost", nil, &r.Options, &resp) + return +} + +// Retrieve Information regarding a piece of hardware's virtual software licenses. +func (r Hardware_SecurityModule) GetVirtualLicenses() (resp []datatypes.Software_VirtualLicense, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule", "getVirtualLicenses", nil, &r.Options, &resp) + return +} + +// Retrieve Information regarding the bandwidth allotment to which a piece of hardware belongs. +func (r Hardware_SecurityModule) GetVirtualRack() (resp datatypes.Network_Bandwidth_Version1_Allotment, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule", "getVirtualRack", nil, &r.Options, &resp) + return +} + +// Retrieve The name of the bandwidth allotment belonging to a piece of hardware. +func (r Hardware_SecurityModule) GetVirtualRackId() (resp int, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule", "getVirtualRackId", nil, &r.Options, &resp) + return +} + +// Retrieve The name of the bandwidth allotment belonging to a piece of hardware. +func (r Hardware_SecurityModule) GetVirtualRackName() (resp string, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule", "getVirtualRackName", nil, &r.Options, &resp) + return +} + +// Retrieve A piece of hardware's virtualization platform software. +func (r Hardware_SecurityModule) GetVirtualizationPlatform() (resp datatypes.Software_Component, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule", "getVirtualizationPlatform", nil, &r.Options, &resp) + return +} + +// Retrieve a list of Windows updates available for a server from the local SoftLayer Windows Server Update Services (WSUS) server. Windows servers provisioned by SoftLayer are configured to use the local WSUS server via the private network by default. +func (r Hardware_SecurityModule) GetWindowsUpdateAvailableUpdates() (resp []datatypes.Container_Utility_Microsoft_Windows_UpdateServices_UpdateItem, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule", "getWindowsUpdateAvailableUpdates", nil, &r.Options, &resp) + return +} + +// Retrieve a list of Windows updates installed on a server as reported by the local SoftLayer Windows Server Update Services (WSUS) server. Windows servers provisioned by SoftLayer are configured to use the local WSUS server via the private network by default. +func (r Hardware_SecurityModule) GetWindowsUpdateInstalledUpdates() (resp []datatypes.Container_Utility_Microsoft_Windows_UpdateServices_UpdateItem, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule", "getWindowsUpdateInstalledUpdates", nil, &r.Options, &resp) + return +} + +// This method returns an update status record for this server. That record will specify if the server is missing updates, or has updates that must be reinstalled or require a reboot to go into affect. +func (r Hardware_SecurityModule) GetWindowsUpdateStatus() (resp datatypes.Container_Utility_Microsoft_Windows_UpdateServices_Status, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule", "getWindowsUpdateStatus", nil, &r.Options, &resp) + return +} + +// The '''importVirtualHost''' method attempts to import the host record for the virtualization platform running on a server. +func (r Hardware_SecurityModule) ImportVirtualHost() (resp datatypes.Virtual_Host, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule", "importVirtualHost", nil, &r.Options, &resp) + return +} + +// Idera Bare Metal Server Restore is a backup agent designed specifically for making full system restores made with Idera Server Backup. +func (r Hardware_SecurityModule) InitiateIderaBareMetalRestore() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule", "initiateIderaBareMetalRestore", nil, &r.Options, &resp) + return +} + +// R1Soft Bare Metal Server Restore is an R1Soft disk agent designed specifically for making full system restores made with R1Soft CDP Server backup. +func (r Hardware_SecurityModule) InitiateR1SoftBareMetalRestore() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule", "initiateR1SoftBareMetalRestore", nil, &r.Options, &resp) + return +} + +// Issues a ping command and returns the success (true) or failure (false) of the ping command. +func (r Hardware_SecurityModule) IsBackendPingable() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule", "isBackendPingable", nil, &r.Options, &resp) + return +} + +// Issues a ping command and returns the success (true) or failure (false) of the ping command. +func (r Hardware_SecurityModule) IsPingable() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule", "isPingable", nil, &r.Options, &resp) + return +} + +// Determine if the server runs any version of the Microsoft Windows operating systems. Return ''true'' if it does and ''false if otherwise. +func (r Hardware_SecurityModule) IsWindowsServer() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule", "isWindowsServer", nil, &r.Options, &resp) + return +} + +// You can launch firmware updates by selecting from your server list. It will bring your server offline for approximately 20 minutes while the updates are in progress. +// +// In the event of a hardware failure during this test our datacenter engineers will be notified of the problem automatically. They will then replace any failed components to bring your server back online, and will be contacting you to ensure that impact on your server is minimal. +func (r Hardware_SecurityModule) MassFirmwareUpdate(hardwareIds []int, ipmi *bool, raidController *bool, bios *bool, harddrive *bool) (resp []datatypes.Container_Hardware_Server_Request, err error) { + params := []interface{}{ + hardwareIds, + ipmi, + raidController, + bios, + harddrive, + } + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule", "massFirmwareUpdate", params, &r.Options, &resp) + return +} + +// Reloads current or customer specified operating system configuration. +// +// This service has a confirmation protocol for proceeding with the reload. To proceed with the reload without confirmation, simply pass in 'FORCE' as the token parameter. To proceed with the reload with confirmation, simply call the service with no parameter. A token string will be returned by this service. The token will remain active for 10 minutes. Use this token as the parameter to confirm that a reload is to be performed for the server. +// +// As a precaution, we strongly recommend backing up all data before reloading the operating system. The reload will format the primary disk and will reconfigure the server to the current specifications on record. +// +// The reload will take AT MINIMUM 66 minutes. +func (r Hardware_SecurityModule) MassReloadOperatingSystem(hardwareIds []string, token *string, config *datatypes.Container_Hardware_Server_Configuration) (resp string, err error) { + params := []interface{}{ + hardwareIds, + token, + config, + } + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule", "massReloadOperatingSystem", params, &r.Options, &resp) + return +} + +// The ability to place multiple bare metal servers in a state where they are powered down and ports closed yet still allocated to the customer as a part of the Spare Pool program. +func (r Hardware_SecurityModule) MassSparePool(hardwareIds []string, action *string, newOrder *bool) (resp []datatypes.Container_Hardware_Server_Request, err error) { + params := []interface{}{ + hardwareIds, + action, + newOrder, + } + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule", "massSparePool", params, &r.Options, &resp) + return +} + +// Issues a ping command to the server and returns the ping response. +func (r Hardware_SecurityModule) Ping() (resp string, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule", "ping", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Hardware_SecurityModule) PopulateServer(hardwareId *int, serialString *string) (err error) { + var resp datatypes.Void + params := []interface{}{ + hardwareId, + serialString, + } + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule", "populateServer", params, &r.Options, &resp) + return +} + +// Power off then power on the server via powerstrip. The power cycle command is equivalent to unplugging the server from the powerstrip and then plugging the server back into the powerstrip. This should only be used as a last resort. If a reboot command has been issued successfully in the past 20 minutes, another remote management command (rebootSoft, rebootHard, powerOn, powerOff and powerCycle) will not be allowed. This is to avoid any type of server failures. +func (r Hardware_SecurityModule) PowerCycle() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule", "powerCycle", nil, &r.Options, &resp) + return +} + +// This method will power off the server via the server's remote management card. +func (r Hardware_SecurityModule) PowerOff() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule", "powerOff", nil, &r.Options, &resp) + return +} + +// Power on server via its remote management card. If a reboot command has been issued successfully in the past 20 minutes, another remote management command (rebootSoft, rebootHard, powerOn, powerOff and powerCycle) will not be allowed. This is to avoid any type of server failures. +func (r Hardware_SecurityModule) PowerOn() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule", "powerOn", nil, &r.Options, &resp) + return +} + +// Attempts to reboot the server by issuing a reset (soft reboot) command to the server's remote management card. If the reset (soft reboot) attempt is unsuccessful, a power cycle command will be issued via the powerstrip. The power cycle command is equivalent to unplugging the server from the powerstrip and then plugging the server back into the powerstrip. If a reboot command has been issued successfully in the past 20 minutes, another remote management command (rebootSoft, rebootHard, powerOn, powerOff and powerCycle) will not be allowed. This is to avoid any type of server failures. +func (r Hardware_SecurityModule) RebootDefault() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule", "rebootDefault", nil, &r.Options, &resp) + return +} + +// Reboot the server by issuing a cycle command to the server's remote management card. This is equivalent to pressing the 'Reset' button on the server. This command is issued immediately and will not wait for processes to shutdown. After this command is issued, the server may take a few moments to boot up as server may run system disks checks. If a reboot command has been issued successfully in the past 20 minutes, another remote management command (rebootSoft, rebootHard, powerOn, powerOff and powerCycle) will not be allowed. This is to avoid any type of server failures. +func (r Hardware_SecurityModule) RebootHard() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule", "rebootHard", nil, &r.Options, &resp) + return +} + +// Reboot the server by issuing a reset command to the server's remote management card. This is a graceful reboot. The servers will allow all process to shutdown gracefully before rebooting. If a reboot command has been issued successfully in the past 20 minutes, another remote management command (rebootSoft, rebootHard, powerOn, powerOff and powerCycle) will not be allowed. This is to avoid any type of server failures. +func (r Hardware_SecurityModule) RebootSoft() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule", "rebootSoft", nil, &r.Options, &resp) + return +} + +// Reloads current operating system configuration. +// +// This service has a confirmation protocol for proceeding with the reload. To proceed with the reload without confirmation, simply pass in 'FORCE' as the token parameter. To proceed with the reload with confirmation, simply call the service with no parameter. A token string will be returned by this service. The token will remain active for 10 minutes. Use this token as the parameter to confirm that a reload is to be performed for the server. +// +// As a precaution, we strongly recommend backing up all data before reloading the operating system. The reload will format the primary disk and will reconfigure the server to the current specifications on record. +// +// The reload will take AT MINIMUM 66 minutes. +func (r Hardware_SecurityModule) ReloadCurrentOperatingSystemConfiguration(token *string) (resp string, err error) { + params := []interface{}{ + token, + } + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule", "reloadCurrentOperatingSystemConfiguration", params, &r.Options, &resp) + return +} + +// Reloads current or customer specified operating system configuration. +// +// This service has a confirmation protocol for proceeding with the reload. To proceed with the reload without confirmation, simply pass in 'FORCE' as the token parameter. To proceed with the reload with confirmation, simply call the service with no parameter. A token string will be returned by this service. The token will remain active for 10 minutes. Use this token as the parameter to confirm that a reload is to be performed for the server. +// +// As a precaution, we strongly recommend backing up all data before reloading the operating system. The reload will format the primary disk and will reconfigure the server to the current specifications on record. +// +// The reload will take AT MINIMUM 66 minutes. +func (r Hardware_SecurityModule) ReloadOperatingSystem(token *string, config *datatypes.Container_Hardware_Server_Configuration) (resp string, err error) { + params := []interface{}{ + token, + config, + } + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule", "reloadOperatingSystem", params, &r.Options, &resp) + return +} + +// This method is used to remove access to s SoftLayer_Network_Storage volumes that supports host- or network-level access control. +func (r Hardware_SecurityModule) RemoveAccessToNetworkStorage(networkStorageTemplateObject *datatypes.Network_Storage) (resp bool, err error) { + params := []interface{}{ + networkStorageTemplateObject, + } + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule", "removeAccessToNetworkStorage", params, &r.Options, &resp) + return +} + +// This method is used to allow access to multiple SoftLayer_Network_Storage volumes that support host- or network-level access control. +func (r Hardware_SecurityModule) RemoveAccessToNetworkStorageList(networkStorageTemplateObjects []datatypes.Network_Storage) (resp bool, err error) { + params := []interface{}{ + networkStorageTemplateObjects, + } + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule", "removeAccessToNetworkStorageList", params, &r.Options, &resp) + return +} + +// You can launch a new Passmark hardware test by selecting from your server list. It will bring your server offline for approximately 20 minutes while the testing is in progress, and will publish a certificate with the results to your hardware details page. +// +// While the hard drives are tested for the initial deployment, the Passmark Certificate utility will not test the hard drives on your live server. This is to ensure that no data is overwritten. If you would like to test the server's hard drives, you can have the full Passmark suite installed to your server free of charge through a new Support ticket. +// +// While the test itself does not overwrite any data on the server, it is recommended that you make full off-server backups of all data prior to launching the test. The Passmark hardware test is designed to force any latent hardware issues to the surface, so hardware failure is possible. +// +// In the event of a hardware failure during this test our datacenter engineers will be notified of the problem automatically. They will then replace any failed components to bring your server back online, and will be contacting you to ensure that impact on your server is minimal. +func (r Hardware_SecurityModule) RunPassmarkCertificationBenchmark() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule", "runPassmarkCertificationBenchmark", nil, &r.Options, &resp) + return +} + +// Changes the password that we have stored in our database for a servers' Operating System +func (r Hardware_SecurityModule) SetOperatingSystemPassword(newPassword *string) (resp bool, err error) { + params := []interface{}{ + newPassword, + } + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule", "setOperatingSystemPassword", params, &r.Options, &resp) + return +} + +// Sets the private network interface speed to the new speed. Speed values can only be 0 (Disconnect), 10, 100, 1000, and 10000. The new speed must be equal to or less than the max speed of the interface. +// +// It will take less than a minute to update the switch port speed. The server uplink will not be operational again until the server interface speed is updated. +func (r Hardware_SecurityModule) SetPrivateNetworkInterfaceSpeed(newSpeed *int) (resp bool, err error) { + params := []interface{}{ + newSpeed, + } + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule", "setPrivateNetworkInterfaceSpeed", params, &r.Options, &resp) + return +} + +// Sets the public network interface speed to the new speed. Speed values can only be 0 (Disconnect), 10, 100, 1000, and 10000. The new speed must be equal to or less than the max speed of the interface. +// +// It will take less than a minute to update the switch port speed. The server uplink will not be operational again until the server interface speed is updated. +func (r Hardware_SecurityModule) SetPublicNetworkInterfaceSpeed(newSpeed *int) (resp bool, err error) { + params := []interface{}{ + newSpeed, + } + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule", "setPublicNetworkInterfaceSpeed", params, &r.Options, &resp) + return +} + +// no documentation yet +func (r Hardware_SecurityModule) SetTags(tags *string) (resp bool, err error) { + params := []interface{}{ + tags, + } + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule", "setTags", params, &r.Options, &resp) + return +} + +// Sets the data that will be written to the configuration drive. +func (r Hardware_SecurityModule) SetUserMetadata(metadata []string) (resp []datatypes.Hardware_Attribute, err error) { + params := []interface{}{ + metadata, + } + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule", "setUserMetadata", params, &r.Options, &resp) + return +} + +// Shuts down the public network port +func (r Hardware_SecurityModule) ShutdownPrivatePort() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule", "shutdownPrivatePort", nil, &r.Options, &resp) + return +} + +// Shuts down the public network port +func (r Hardware_SecurityModule) ShutdownPublicPort() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule", "shutdownPublicPort", nil, &r.Options, &resp) + return +} + +// The ability to place bare metal servers in a state where they are powered down, and ports closed yet still allocated to the customer as a part of the Spare Pool program. +func (r Hardware_SecurityModule) SparePool(action *string, newOrder *bool) (resp bool, err error) { + params := []interface{}{ + action, + newOrder, + } + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule", "sparePool", params, &r.Options, &resp) + return +} + +// This method will update the root IPMI password on this SoftLayer_Hardware. +func (r Hardware_SecurityModule) UpdateIpmiPassword(password *string) (resp bool, err error) { + params := []interface{}{ + password, + } + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule", "updateIpmiPassword", params, &r.Options, &resp) + return +} + +// Validates a collection of partitions for an operating system +func (r Hardware_SecurityModule) ValidatePartitionsForOperatingSystem(operatingSystem *datatypes.Software_Description, partitions []datatypes.Hardware_Component_Partition) (resp bool, err error) { + params := []interface{}{ + operatingSystem, + partitions, + } + err = r.Session.DoRequest("SoftLayer_Hardware_SecurityModule", "validatePartitionsForOperatingSystem", params, &r.Options, &resp) + return +} + +// The SoftLayer_Hardware_Server data type contains general information relating to a single SoftLayer server. +type Hardware_Server struct { + Session *session.Session + Options sl.Options +} + +// GetHardwareServerService returns an instance of the Hardware_Server SoftLayer service +func GetHardwareServerService(sess *session.Session) Hardware_Server { + return Hardware_Server{Session: sess} +} + +func (r Hardware_Server) Id(id int) Hardware_Server { + r.Options.Id = &id + return r +} + +func (r Hardware_Server) Mask(mask string) Hardware_Server { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r Hardware_Server) Filter(filter string) Hardware_Server { + r.Options.Filter = filter + return r +} + +func (r Hardware_Server) Limit(limit int) Hardware_Server { + r.Options.Limit = &limit + return r +} + +func (r Hardware_Server) Offset(offset int) Hardware_Server { + r.Options.Offset = &offset + return r +} + +// Activates the private network port +func (r Hardware_Server) ActivatePrivatePort() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Server", "activatePrivatePort", nil, &r.Options, &resp) + return +} + +// Activates the public network port +func (r Hardware_Server) ActivatePublicPort() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Server", "activatePublicPort", nil, &r.Options, &resp) + return +} + +// This method is used to allow access to a SoftLayer_Network_Storage volume that supports host- or network-level access control. +func (r Hardware_Server) AllowAccessToNetworkStorage(networkStorageTemplateObject *datatypes.Network_Storage) (resp bool, err error) { + params := []interface{}{ + networkStorageTemplateObject, + } + err = r.Session.DoRequest("SoftLayer_Hardware_Server", "allowAccessToNetworkStorage", params, &r.Options, &resp) + return +} + +// This method is used to allow access to multiple SoftLayer_Network_Storage volumes that support host- or network-level access control. +func (r Hardware_Server) AllowAccessToNetworkStorageList(networkStorageTemplateObjects []datatypes.Network_Storage) (resp bool, err error) { + params := []interface{}{ + networkStorageTemplateObjects, + } + err = r.Session.DoRequest("SoftLayer_Hardware_Server", "allowAccessToNetworkStorageList", params, &r.Options, &resp) + return +} + +// The Rescue Kernel is designed to provide you with the ability to bring a server online in order to troubleshoot system problems that would normally only be resolved by an OS Reload. The correct Rescue Kernel will be selected based upon the currently installed operating system. When the rescue kernel process is initiated, the server will shutdown and reboot on to the public network with the same IP's assigned to the server to allow for remote connections. It will bring your server offline for approximately 10 minutes while the rescue is in progress. The root/administrator password will be the same as what is listed in the portal for the server. +func (r Hardware_Server) BootToRescueLayer(noOsBootEnvironment *string) (resp bool, err error) { + params := []interface{}{ + noOsBootEnvironment, + } + err = r.Session.DoRequest("SoftLayer_Hardware_Server", "bootToRescueLayer", params, &r.Options, &resp) + return +} + +// Captures a Flex Image of the hard disk on the physical machine, based on the capture template parameter. Returns the image template group containing the disk image. +func (r Hardware_Server) CaptureImage(captureTemplate *datatypes.Container_Disk_Image_Capture_Template) (resp datatypes.Virtual_Guest_Block_Device_Template_Group, err error) { + params := []interface{}{ + captureTemplate, + } + err = r.Session.DoRequest("SoftLayer_Hardware_Server", "captureImage", params, &r.Options, &resp) + return +} + +// Returns monitoring alarm detailed history +func (r Hardware_Server) CloseAlarm(alarmId *string) (resp bool, err error) { + params := []interface{}{ + alarmId, + } + err = r.Session.DoRequest("SoftLayer_Hardware_Server", "closeAlarm", params, &r.Options, &resp) + return +} + +// You can launch firmware updates by selecting from your server list. It will bring your server offline for approximately 20 minutes while the updates are in progress. +// +// In the event of a hardware failure during this test our datacenter engineers will be notified of the problem automatically. They will then replace any failed components to bring your server back online, and will be contacting you to ensure that impact on your server is minimal. +func (r Hardware_Server) CreateFirmwareUpdateTransaction(ipmi *int, raidController *int, bios *int, harddrive *int) (resp bool, err error) { + params := []interface{}{ + ipmi, + raidController, + bios, + harddrive, + } + err = r.Session.DoRequest("SoftLayer_Hardware_Server", "createFirmwareUpdateTransaction", params, &r.Options, &resp) + return +} + +// +// +// createObject() enables the creation of servers on an account. This +// method is a simplified alternative to interacting with the ordering system directly. +// +// +// In order to create a server, a template object must be sent in with a few required +// values. +// +// +// When this method returns an order will have been placed for a server of the specified configuration. +// +// +// To determine when the server is available you can poll the server via [[SoftLayer_Hardware/getObject|getObject]], +// checking the provisionDate property. +// When provisionDate is not null, the server will be ready. Be sure to use the globalIdentifier +// as your initialization parameter. +// +// +// Warning: Servers created via this method will incur charges on your account. For testing input parameters see [[SoftLayer_Hardware/generateOrderTemplate|generateOrderTemplate]]. +// +// +// Input - [[SoftLayer_Hardware (type)|SoftLayer_Hardware]] +//
      +//
    • hostname +//
      Hostname for the server.
        +//
      • Required
      • +//
      • Type - string
      • +//
      +//
      +//
    • +//
    • domain +//
      Domain for the server.
        +//
      • Required
      • +//
      • Type - string
      • +//
      +//
      +//
    • +//
    • processorCoreAmount +//
      The number of logical CPU cores to allocate.
        +//
      • Required
      • +//
      • Type - int
      • +//
      • See [[SoftLayer_Hardware/getCreateObjectOptions|getCreateObjectOptions]] for available options.
      • +//
      +//
      +//
    • +//
    • memoryCapacity +//
      The amount of memory to allocate in gigabytes.
        +//
      • Required
      • +//
      • Type - int
      • +//
      • See [[SoftLayer_Hardware/getCreateObjectOptions|getCreateObjectOptions]] for available options.
      • +//
      +//
      +//
    • +//
    • hourlyBillingFlag +//
      Specifies the billing type for the server.
        +//
      • Required
      • +//
      • Type - boolean
      • +//
      • When true the server will be billed on hourly usage, otherwise it will be billed on a monthly basis.
      • +//
      +//
      +//
    • +//
    • operatingSystemReferenceCode +//
      An identifier for the operating system to provision the server with.
        +//
      • Required
      • +//
      • Type - string
      • +//
      • See [[SoftLayer_Hardware/getCreateObjectOptions|getCreateObjectOptions]] for available options.
      • +//
      +//
      +//
    • +//
    • datacenter.name +//
      Specifies which datacenter the server is to be provisioned in.
        +//
      • Required
      • +//
      • Type - string
      • +//
      • The datacenter property is a [[SoftLayer_Location (type)|location]] structure with the name field set.
      • +//
      • See [[SoftLayer_Hardware/getCreateObjectOptions|getCreateObjectOptions]] for available options.
      • +//
      +// { +// "datacenter": { +// "name": "dal05" +// } +// } +//
      +//
    • +//
    • networkComponents.maxSpeed +//
      Specifies the connection speed for the server's network components.
        +//
      • Optional
      • +//
      • Type - int
      • +//
      • Default - The highest available zero cost port speed will be used.
      • +//
      • Description - The networkComponents property is an array with a single [[SoftLayer_Network_Component (type)|network component]] structure. The maxSpeed property must be set to specify the network uplink speed, in megabits per second, of the server.
      • +//
      • See [[SoftLayer_Hardware/getCreateObjectOptions|getCreateObjectOptions]] for available options.
      • +//
      +// { +// "networkComponents": [ +// { +// "maxSpeed": 1000 +// } +// ] +// } +//
      +//
    • +//
    • networkComponents.redundancyEnabledFlag +//
      Specifies whether or not the server's network components should be in redundancy groups.
        +//
      • Optional
      • +//
      • Type - bool
      • +//
      • Default - false
      • +//
      • Description - The networkComponents property is an array with a single [[SoftLayer_Network_Component (type)|network component]] structure. When the redundancyEnabledFlag property is true the server's network components will be in redundancy groups.
      • +//
      +// { +// "networkComponents": [ +// { +// "redundancyEnabledFlag": false +// } +// ] +// } +//
      +//
    • +//
    • privateNetworkOnlyFlag +//
      Specifies whether or not the server only has access to the private network
        +//
      • Optional
      • +//
      • Type - boolean
      • +//
      • Default - false
      • +//
      • When true this flag specifies that a server is to only have access to the private network.
      • +//
      +//
      +//
    • +//
    • primaryNetworkComponent.networkVlan.id +//
      Specifies the network vlan which is to be used for the frontend interface of the server.
        +//
      • Optional
      • +//
      • Type - int
      • +//
      • Description - The primaryNetworkComponent property is a [[SoftLayer_Network_Component (type)|network component]] structure with the networkVlan property populated with a [[SoftLayer_Network_Vlan (type)|vlan]] structure. The id property must be set to specify the frontend network vlan of the server.
      • +//
      +// { +// "primaryNetworkComponent": { +// "networkVlan": { +// "id": 1 +// } +// } +// } +//
      +//
    • +//
    • primaryBackendNetworkComponent.networkVlan.id +//
      Specifies the network vlan which is to be used for the backend interface of the server.
        +//
      • Optional
      • +//
      • Type - int
      • +//
      • Description - The primaryBackendNetworkComponent property is a [[SoftLayer_Network_Component (type)|network component]] structure with the networkVlan property populated with a [[SoftLayer_Network_Vlan (type)|vlan]] structure. The id property must be set to specify the backend network vlan of the server.
      • +//
      +// { +// "primaryBackendNetworkComponent": { +// "networkVlan": { +// "id": 2 +// } +// } +// } +//
      +//
    • +//
    • fixedConfigurationPreset.keyName +//
        +//
      • Optional
      • +//
      • Type - string
      • +//
      • Description - The fixedConfigurationPreset property is a [[SoftLayer_Product_Package_Preset (type)|fixed configuration preset]] structure. The keyName property must be set to specify preset to use.
      • +//
      • If a fixed configuration preset is used processorCoreAmount, memoryCapacity and hardDrives properties must not be set.
      • +//
      • See [[SoftLayer_Hardware/getCreateObjectOptions|getCreateObjectOptions]] for available options.
      • +//
      +// { +// "fixedConfigurationPreset": { +// "keyName": "SOME_KEY_NAME" +// } +// } +//
      +//
    • +//
    • userData.value +//
      Arbitrary data to be made available to the server.
        +//
      • Optional
      • +//
      • Type - string
      • +//
      • Description - The userData property is an array with a single [[SoftLayer_Hardware_Attribute (type)|attribute]] structure with the value property set to an arbitrary value.
      • +//
      • This value can be retrieved via the [[SoftLayer_Resource_Metadata/getUserMetadata|getUserMetadata]] method from a request originating from the server. This is primarily useful for providing data to software that may be on the server and configured to execute upon first boot.
      • +//
      +// { +// "userData": [ +// { +// "value": "someValue" +// } +// ] +// } +//
      +//
    • +//
    • hardDrives +//
      Hard drive settings for the server
        +//
      • Optional
      • +//
      • Type - SoftLayer_Hardware_Component
      • +//
      • Default - The largest available capacity for a zero cost primary disk will be used.
      • +//
      • Description - The hardDrives property is an array of [[SoftLayer_Hardware_Component (type)|hardware component]] structures. +//
      • Each hard drive must specify the capacity property.
      • +//
      • See [[SoftLayer_Hardware/getCreateObjectOptions|getCreateObjectOptions]] for available options.
      • +//
      +// { +// "hardDrives": [ +// { +// "capacity": 500 +// } +// ] +// } +//
      +//
    • +//
    • sshKeys +//
      SSH keys to install on the server upon provisioning.
        +//
      • Optional
      • +//
      • Type - array of [[SoftLayer_Security_Ssh_Key (type)|SoftLayer_Security_Ssh_Key]]
      • +//
      • Description - The sshKeys property is an array of [[SoftLayer_Security_Ssh_Key (type)|SSH Key]] structures with the id property set to the value of an existing SSH key.
      • +//
      • To create a new SSH key, call [[SoftLayer_Security_Ssh_Key/createObject|createObject]] on the [[SoftLayer_Security_Ssh_Key]] service.
      • +//
      • To obtain a list of existing SSH keys, call [[SoftLayer_Account/getSshKeys|getSshKeys]] on the [[SoftLayer_Account]] service. +//
      +// { +// "sshKeys": [ +// { +// "id": 123 +// } +// ] +// } +//
      +//
    • +//
    • postInstallScriptUri +//
      Specifies the uri location of the script to be downloaded and run after installation is complete.
        +//
      • Optional
      • +//
      • Type - string
      • +//
      +//
      +//
    • +//
    +// +// +//

    REST Example

    +// curl -X POST -d '{ +// "parameters":[ +// { +// "hostname": "host1", +// "domain": "example.com", +// "processorCoreAmount": 2, +// "memoryCapacity": 2, +// "hourlyBillingFlag": true, +// "operatingSystemReferenceCode": "UBUNTU_LATEST" +// } +// ] +// }' https://api.softlayer.com/rest/v3/SoftLayer_Hardware.json +// +// HTTP/1.1 201 Created +// Location: https://api.softlayer.com/rest/v3/SoftLayer_Hardware/f5a3fcff-db1d-4b7c-9fa0-0349e41c29c5/getObject +// +// +// { +// "accountId": 232298, +// "bareMetalInstanceFlag": null, +// "domain": "example.com", +// "hardwareStatusId": null, +// "hostname": "host1", +// "id": null, +// "serviceProviderId": null, +// "serviceProviderResourceId": null, +// "globalIdentifier": "f5a3fcff-db1d-4b7c-9fa0-0349e41c29c5", +// "hourlyBillingFlag": true, +// "memoryCapacity": 2, +// "operatingSystemReferenceCode": "UBUNTU_LATEST", +// "processorCoreAmount": 2 +// } +// +func (r Hardware_Server) CreateObject(templateObject *datatypes.Hardware_Server) (resp datatypes.Hardware_Server, err error) { + params := []interface{}{ + templateObject, + } + err = r.Session.DoRequest("SoftLayer_Hardware_Server", "createObject", params, &r.Options, &resp) + return +} + +// no documentation yet +func (r Hardware_Server) CreatePostSoftwareInstallTransaction(installCodes []string, returnBoolean *bool) (resp bool, err error) { + params := []interface{}{ + installCodes, + returnBoolean, + } + err = r.Session.DoRequest("SoftLayer_Hardware_Server", "createPostSoftwareInstallTransaction", params, &r.Options, &resp) + return +} + +// +// This method will cancel a server effective immediately. For servers billed hourly, the charges will stop immediately after the method returns. +func (r Hardware_Server) DeleteObject() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Server", "deleteObject", nil, &r.Options, &resp) + return +} + +// Delete software component passwords. +func (r Hardware_Server) DeleteSoftwareComponentPasswords(softwareComponentPasswords []datatypes.Software_Component_Password) (resp bool, err error) { + params := []interface{}{ + softwareComponentPasswords, + } + err = r.Session.DoRequest("SoftLayer_Hardware_Server", "deleteSoftwareComponentPasswords", params, &r.Options, &resp) + return +} + +// Edit a server's properties +func (r Hardware_Server) EditObject(templateObject *datatypes.Hardware_Server) (resp bool, err error) { + params := []interface{}{ + templateObject, + } + err = r.Session.DoRequest("SoftLayer_Hardware_Server", "editObject", params, &r.Options, &resp) + return +} + +// Edit the properties of a software component password such as the username, password, and notes. +func (r Hardware_Server) EditSoftwareComponentPasswords(softwareComponentPasswords []datatypes.Software_Component_Password) (resp bool, err error) { + params := []interface{}{ + softwareComponentPasswords, + } + err = r.Session.DoRequest("SoftLayer_Hardware_Server", "editSoftwareComponentPasswords", params, &r.Options, &resp) + return +} + +// Download and run remote script from uri on the hardware. +func (r Hardware_Server) ExecuteRemoteScript(uri *string) (err error) { + var resp datatypes.Void + params := []interface{}{ + uri, + } + err = r.Session.DoRequest("SoftLayer_Hardware_Server", "executeRemoteScript", params, &r.Options, &resp) + return +} + +// The '''findByIpAddress''' method finds hardware using its primary public or private IP address. IP addresses that have a secondary subnet tied to the hardware will not return the hardware - alternate means of locating the hardware must be used (see '''Associated Methods'''). If no hardware is found, no errors are generated and no data is returned. +func (r Hardware_Server) FindByIpAddress(ipAddress *string) (resp datatypes.Hardware, err error) { + params := []interface{}{ + ipAddress, + } + err = r.Session.DoRequest("SoftLayer_Hardware_Server", "findByIpAddress", params, &r.Options, &resp) + return +} + +// +// Obtain an [[SoftLayer_Container_Product_Order_Hardware_Server (type)|order container]] that can be sent to [[SoftLayer_Product_Order/verifyOrder|verifyOrder]] or [[SoftLayer_Product_Order/placeOrder|placeOrder]]. +// +// +// This is primarily useful when there is a necessity to confirm the price which will be charged for an order. +// +// +// See [[SoftLayer_Hardware/createObject|createObject]] for specifics on the requirements of the template object parameter. +func (r Hardware_Server) GenerateOrderTemplate(templateObject *datatypes.Hardware) (resp datatypes.Container_Product_Order, err error) { + params := []interface{}{ + templateObject, + } + err = r.Session.DoRequest("SoftLayer_Hardware_Server", "generateOrderTemplate", params, &r.Options, &resp) + return +} + +// Retrieve The account associated with a piece of hardware. +func (r Hardware_Server) GetAccount() (resp datatypes.Account, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Server", "getAccount", nil, &r.Options, &resp) + return +} + +// Retrieve A piece of hardware's active physical components. +func (r Hardware_Server) GetActiveComponents() (resp []datatypes.Hardware_Component, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Server", "getActiveComponents", nil, &r.Options, &resp) + return +} + +// Retrieve The billing item for a server's attached network firewall. +func (r Hardware_Server) GetActiveNetworkFirewallBillingItem() (resp datatypes.Billing_Item, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Server", "getActiveNetworkFirewallBillingItem", nil, &r.Options, &resp) + return +} + +// Retrieve A piece of hardware's active network monitoring incidents. +func (r Hardware_Server) GetActiveNetworkMonitorIncident() (resp []datatypes.Network_Monitor_Version1_Incident, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Server", "getActiveNetworkMonitorIncident", nil, &r.Options, &resp) + return +} + +// Retrieve +func (r Hardware_Server) GetActiveTickets() (resp []datatypes.Ticket, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Server", "getActiveTickets", nil, &r.Options, &resp) + return +} + +// Retrieve Transaction currently running for server. +func (r Hardware_Server) GetActiveTransaction() (resp datatypes.Provisioning_Version1_Transaction, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Server", "getActiveTransaction", nil, &r.Options, &resp) + return +} + +// Retrieve Any active transaction(s) that are currently running for the server (example: os reload). +func (r Hardware_Server) GetActiveTransactions() (resp []datatypes.Provisioning_Version1_Transaction, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Server", "getActiveTransactions", nil, &r.Options, &resp) + return +} + +// The '''getAlarmHistory''' method retrieves a detailed history for the monitoring alarm. When calling this method, a start and end date for the history to be retrieved must be entered. +func (r Hardware_Server) GetAlarmHistory(startDate *datatypes.Time, endDate *datatypes.Time, alarmId *string) (resp []datatypes.Container_Monitoring_Alarm_History, err error) { + params := []interface{}{ + startDate, + endDate, + alarmId, + } + err = r.Session.DoRequest("SoftLayer_Hardware_Server", "getAlarmHistory", params, &r.Options, &resp) + return +} + +// Retrieve +func (r Hardware_Server) GetAllPowerComponents() (resp []datatypes.Hardware_Power_Component, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Server", "getAllPowerComponents", nil, &r.Options, &resp) + return +} + +// Retrieve The SoftLayer_Network_Storage_Allowed_Host information to connect this server to Network Storage volumes that require access control lists. +func (r Hardware_Server) GetAllowedHost() (resp datatypes.Network_Storage_Allowed_Host, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Server", "getAllowedHost", nil, &r.Options, &resp) + return +} + +// Retrieve The SoftLayer_Network_Storage objects that this SoftLayer_Hardware has access to. +func (r Hardware_Server) GetAllowedNetworkStorage() (resp []datatypes.Network_Storage, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Server", "getAllowedNetworkStorage", nil, &r.Options, &resp) + return +} + +// Retrieve The SoftLayer_Network_Storage objects whose Replica that this SoftLayer_Hardware has access to. +func (r Hardware_Server) GetAllowedNetworkStorageReplicas() (resp []datatypes.Network_Storage, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Server", "getAllowedNetworkStorageReplicas", nil, &r.Options, &resp) + return +} + +// Retrieve Information regarding an antivirus/spyware software component object. +func (r Hardware_Server) GetAntivirusSpywareSoftwareComponent() (resp datatypes.Software_Component, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Server", "getAntivirusSpywareSoftwareComponent", nil, &r.Options, &resp) + return +} + +// This method is retrieve a list of SoftLayer_Network_Storage volumes that are authorized access to this SoftLayer_Hardware. +func (r Hardware_Server) GetAttachedNetworkStorages(nasType *string) (resp []datatypes.Network_Storage, err error) { + params := []interface{}{ + nasType, + } + err = r.Session.DoRequest("SoftLayer_Hardware_Server", "getAttachedNetworkStorages", params, &r.Options, &resp) + return +} + +// Retrieve Information regarding a piece of hardware's specific attributes. +func (r Hardware_Server) GetAttributes() (resp []datatypes.Hardware_Attribute, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Server", "getAttributes", nil, &r.Options, &resp) + return +} + +// Retrieve An object that stores the maximum level for the monitoring query types and response types. +func (r Hardware_Server) GetAvailableMonitoring() (resp []datatypes.Network_Monitor_Version1_Query_Host_Stratum, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Server", "getAvailableMonitoring", nil, &r.Options, &resp) + return +} + +// This method retrieves a list of SoftLayer_Network_Storage volumes that can be authorized to this SoftLayer_Hardware. +func (r Hardware_Server) GetAvailableNetworkStorages(nasType *string) (resp []datatypes.Network_Storage, err error) { + params := []interface{}{ + nasType, + } + err = r.Session.DoRequest("SoftLayer_Hardware_Server", "getAvailableNetworkStorages", params, &r.Options, &resp) + return +} + +// Retrieve The average daily total bandwidth usage for the current billing cycle. +func (r Hardware_Server) GetAverageDailyBandwidthUsage() (resp datatypes.Float64, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Server", "getAverageDailyBandwidthUsage", nil, &r.Options, &resp) + return +} + +// Retrieve The average daily private bandwidth usage for the current billing cycle. +func (r Hardware_Server) GetAverageDailyPrivateBandwidthUsage() (resp datatypes.Float64, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Server", "getAverageDailyPrivateBandwidthUsage", nil, &r.Options, &resp) + return +} + +// Retrieve The average daily public bandwidth usage for the current billing cycle. +func (r Hardware_Server) GetAverageDailyPublicBandwidthUsage() (resp datatypes.Float64, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Server", "getAverageDailyPublicBandwidthUsage", nil, &r.Options, &resp) + return +} + +// Use this method to return an array of private bandwidth utilization records between a given date range. +// +// This method represents the NEW version of getFrontendBandwidthUse +func (r Hardware_Server) GetBackendBandwidthUsage(startDate *datatypes.Time, endDate *datatypes.Time) (resp []datatypes.Metric_Tracking_Object_Data, err error) { + params := []interface{}{ + startDate, + endDate, + } + err = r.Session.DoRequest("SoftLayer_Hardware_Server", "getBackendBandwidthUsage", params, &r.Options, &resp) + return +} + +// Use this method to return an array of private bandwidth utilization records between a given date range. +func (r Hardware_Server) GetBackendBandwidthUse(startDate *datatypes.Time, endDate *datatypes.Time) (resp []datatypes.Network_Bandwidth_Version1_Usage_Detail, err error) { + params := []interface{}{ + startDate, + endDate, + } + err = r.Session.DoRequest("SoftLayer_Hardware_Server", "getBackendBandwidthUse", params, &r.Options, &resp) + return +} + +// The '''getBackendIncomingBandwidth''' method retrieves the amount of incoming private network traffic used between the given start date and end date parameters. When entering start and end dates, only the month, day and year are used to calculate bandwidth totals - the time (HH:MM:SS) is ignored and defaults to midnight. The amount of bandwidth retrieved is measured in gigabytes. +func (r Hardware_Server) GetBackendIncomingBandwidth(startDate *datatypes.Time, endDate *datatypes.Time) (resp datatypes.Float64, err error) { + params := []interface{}{ + startDate, + endDate, + } + err = r.Session.DoRequest("SoftLayer_Hardware_Server", "getBackendIncomingBandwidth", params, &r.Options, &resp) + return +} + +// Retrieve A piece of hardware's back-end or private network components. +func (r Hardware_Server) GetBackendNetworkComponents() (resp []datatypes.Network_Component, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Server", "getBackendNetworkComponents", nil, &r.Options, &resp) + return +} + +// The '''getBackendOutgoingBandwidth''' method retrieves the amount of outgoing private network traffic used between the given start date and end date parameters. When entering start and end dates, only the month, day and year are used to calculate bandwidth totals - the time (HH:MM:SS) is ignored and defaults to midnight. The amount of bandwidth retrieved is measured in gigabytes. +func (r Hardware_Server) GetBackendOutgoingBandwidth(startDate *datatypes.Time, endDate *datatypes.Time) (resp datatypes.Float64, err error) { + params := []interface{}{ + startDate, + endDate, + } + err = r.Session.DoRequest("SoftLayer_Hardware_Server", "getBackendOutgoingBandwidth", params, &r.Options, &resp) + return +} + +// Retrieve A hardware's backend or private router. +func (r Hardware_Server) GetBackendRouters() (resp []datatypes.Hardware, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Server", "getBackendRouters", nil, &r.Options, &resp) + return +} + +// Retrieve A hardware's allotted bandwidth (measured in GB). +func (r Hardware_Server) GetBandwidthAllocation() (resp datatypes.Float64, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Server", "getBandwidthAllocation", nil, &r.Options, &resp) + return +} + +// Retrieve A hardware's allotted detail record. Allotment details link bandwidth allocation with allotments. +func (r Hardware_Server) GetBandwidthAllotmentDetail() (resp datatypes.Network_Bandwidth_Version1_Allotment_Detail, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Server", "getBandwidthAllotmentDetail", nil, &r.Options, &resp) + return +} + +// Retrieve a collection of bandwidth data from an individual public or private network tracking object. Data is ideal if you with to employ your own traffic storage and graphing systems. +func (r Hardware_Server) GetBandwidthForDateRange(startDate *datatypes.Time, endDate *datatypes.Time) (resp []datatypes.Metric_Tracking_Object_Data, err error) { + params := []interface{}{ + startDate, + endDate, + } + err = r.Session.DoRequest("SoftLayer_Hardware_Server", "getBandwidthForDateRange", params, &r.Options, &resp) + return +} + +// Use this method when needing a bandwidth image for a single server. It will gather the correct input parameters for the generic graphing utility automatically based on the snapshot specified. Use the $draw flag to suppress the generation of the actual binary PNG image. +func (r Hardware_Server) GetBandwidthImage(networkType *string, snapshotRange *string, draw *bool, dateSpecified *datatypes.Time, dateSpecifiedEnd *datatypes.Time) (resp datatypes.Container_Bandwidth_GraphOutputs, err error) { + params := []interface{}{ + networkType, + snapshotRange, + draw, + dateSpecified, + dateSpecifiedEnd, + } + err = r.Session.DoRequest("SoftLayer_Hardware_Server", "getBandwidthImage", params, &r.Options, &resp) + return +} + +// Retrieve Information regarding a piece of hardware's benchmark certifications. +func (r Hardware_Server) GetBenchmarkCertifications() (resp []datatypes.Hardware_Benchmark_Certification, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Server", "getBenchmarkCertifications", nil, &r.Options, &resp) + return +} + +// Retrieve The raw bandwidth usage data for the current billing cycle. One object will be returned for each network this server is attached to. +func (r Hardware_Server) GetBillingCycleBandwidthUsage() (resp []datatypes.Network_Bandwidth_Usage, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Server", "getBillingCycleBandwidthUsage", nil, &r.Options, &resp) + return +} + +// Retrieve The raw private bandwidth usage data for the current billing cycle. +func (r Hardware_Server) GetBillingCyclePrivateBandwidthUsage() (resp datatypes.Network_Bandwidth_Usage, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Server", "getBillingCyclePrivateBandwidthUsage", nil, &r.Options, &resp) + return +} + +// Retrieve The raw public bandwidth usage data for the current billing cycle. +func (r Hardware_Server) GetBillingCyclePublicBandwidthUsage() (resp datatypes.Network_Bandwidth_Usage, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Server", "getBillingCyclePublicBandwidthUsage", nil, &r.Options, &resp) + return +} + +// Retrieve Information regarding the billing item for a server. +func (r Hardware_Server) GetBillingItem() (resp datatypes.Billing_Item_Hardware, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Server", "getBillingItem", nil, &r.Options, &resp) + return +} + +// Retrieve A flag indicating that a billing item exists. +func (r Hardware_Server) GetBillingItemFlag() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Server", "getBillingItemFlag", nil, &r.Options, &resp) + return +} + +// Retrieve Determines whether the hardware is ineligible for cancellation because it is disconnected. +func (r Hardware_Server) GetBlockCancelBecauseDisconnectedFlag() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Server", "getBlockCancelBecauseDisconnectedFlag", nil, &r.Options, &resp) + return +} + +// Retrieve Status indicating whether or not a piece of hardware has business continuance insurance. +func (r Hardware_Server) GetBusinessContinuanceInsuranceFlag() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Server", "getBusinessContinuanceInsuranceFlag", nil, &r.Options, &resp) + return +} + +// Retrieve Child hardware. +func (r Hardware_Server) GetChildrenHardware() (resp []datatypes.Hardware, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Server", "getChildrenHardware", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Hardware_Server) GetComponentDetailsXML() (resp string, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Server", "getComponentDetailsXML", nil, &r.Options, &resp) + return +} + +// Retrieve A piece of hardware's components. +func (r Hardware_Server) GetComponents() (resp []datatypes.Hardware_Component, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Server", "getComponents", nil, &r.Options, &resp) + return +} + +// Retrieve +func (r Hardware_Server) GetContainsSolidStateDrivesFlag() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Server", "getContainsSolidStateDrivesFlag", nil, &r.Options, &resp) + return +} + +// Retrieve A continuous data protection/server backup software component object. +func (r Hardware_Server) GetContinuousDataProtectionSoftwareComponent() (resp datatypes.Software_Component, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Server", "getContinuousDataProtectionSoftwareComponent", nil, &r.Options, &resp) + return +} + +// Retrieve A server's control panel. +func (r Hardware_Server) GetControlPanel() (resp datatypes.Software_Component_ControlPanel, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Server", "getControlPanel", nil, &r.Options, &resp) + return +} + +// Retrieve The total cost of a server, measured in US Dollars ($USD). +func (r Hardware_Server) GetCost() (resp datatypes.Float64, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Server", "getCost", nil, &r.Options, &resp) + return +} + +// +// There are many options that may be provided while ordering a server, this method can be used to determine what these options are. +// +// +// Detailed information on the return value can be found on the data type page for [[SoftLayer_Container_Hardware_Configuration (type)]]. +func (r Hardware_Server) GetCreateObjectOptions() (resp datatypes.Container_Hardware_Configuration, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Server", "getCreateObjectOptions", nil, &r.Options, &resp) + return +} + +// Retrieve An object that provides commonly used bandwidth summary components for the current billing cycle. +func (r Hardware_Server) GetCurrentBandwidthSummary() (resp datatypes.Metric_Tracking_Object_Bandwidth_Summary, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Server", "getCurrentBandwidthSummary", nil, &r.Options, &resp) + return +} + +// Attempt to retrieve the file associated with the current benchmark certification result, if such a file exists. If there is no file for this benchmark certification result, calling this method throws an exception. +func (r Hardware_Server) GetCurrentBenchmarkCertificationResultFile() (resp []byte, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Server", "getCurrentBenchmarkCertificationResultFile", nil, &r.Options, &resp) + return +} + +// Retrieve The current billable public outbound bandwidth for this hardware for the current billing cycle. +func (r Hardware_Server) GetCurrentBillableBandwidthUsage() (resp datatypes.Float64, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Server", "getCurrentBillableBandwidthUsage", nil, &r.Options, &resp) + return +} + +// Get the billing detail for this instance for the current billing period. This does not include bandwidth usage. +func (r Hardware_Server) GetCurrentBillingDetail() (resp []datatypes.Billing_Item, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Server", "getCurrentBillingDetail", nil, &r.Options, &resp) + return +} + +// The '''getCurrentBillingTotal''' method retrieves the total bill amount in US Dollars ($) for the current billing period. In addition to the total bill amount, the billing detail also includes all bandwidth used up to the point the method is called on the piece of hardware. +func (r Hardware_Server) GetCurrentBillingTotal() (resp datatypes.Float64, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Server", "getCurrentBillingTotal", nil, &r.Options, &resp) + return +} + +// Retrieve bandwidth graph by date. +func (r Hardware_Server) GetCustomBandwidthDataByDate(graphData *datatypes.Container_Graph) (resp datatypes.Container_Graph, err error) { + params := []interface{}{ + graphData, + } + err = r.Session.DoRequest("SoftLayer_Hardware_Server", "getCustomBandwidthDataByDate", params, &r.Options, &resp) + return +} + +// Retrieve Indicates if a server has a Customer Installed OS +func (r Hardware_Server) GetCustomerInstalledOperatingSystemFlag() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Server", "getCustomerInstalledOperatingSystemFlag", nil, &r.Options, &resp) + return +} + +// Retrieve Indicates if a server is a customer owned device. +func (r Hardware_Server) GetCustomerOwnedFlag() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Server", "getCustomerOwnedFlag", nil, &r.Options, &resp) + return +} + +// The '''getDailyAverage''' method calculates the average daily network traffic used by the selected server. Using the required parameter ''dateTime'' to enter a start and end date, the user retrieves this average, measure in gigabytes (GB) for the specified date range. When entering parameters, only the month, day and year are required - time entries are omitted as this method defaults the time to midnight in order to account for the entire day. +func (r Hardware_Server) GetDailyAverage(startDate *datatypes.Time, endDate *datatypes.Time) (resp datatypes.Float64, err error) { + params := []interface{}{ + startDate, + endDate, + } + err = r.Session.DoRequest("SoftLayer_Hardware_Server", "getDailyAverage", params, &r.Options, &resp) + return +} + +// Retrieve Information regarding the datacenter in which a piece of hardware resides. +func (r Hardware_Server) GetDatacenter() (resp datatypes.Location, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Server", "getDatacenter", nil, &r.Options, &resp) + return +} + +// Retrieve The name of the datacenter in which a piece of hardware resides. +func (r Hardware_Server) GetDatacenterName() (resp string, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Server", "getDatacenterName", nil, &r.Options, &resp) + return +} + +// Retrieve All hardware that has uplink network connections to a piece of hardware. +func (r Hardware_Server) GetDownlinkHardware() (resp []datatypes.Hardware, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Server", "getDownlinkHardware", nil, &r.Options, &resp) + return +} + +// Retrieve All hardware that has uplink network connections to a piece of hardware. +func (r Hardware_Server) GetDownlinkNetworkHardware() (resp []datatypes.Hardware, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Server", "getDownlinkNetworkHardware", nil, &r.Options, &resp) + return +} + +// Retrieve Information regarding all servers attached to a piece of network hardware. +func (r Hardware_Server) GetDownlinkServers() (resp []datatypes.Hardware, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Server", "getDownlinkServers", nil, &r.Options, &resp) + return +} + +// Retrieve Information regarding all virtual guests attached to a piece of network hardware. +func (r Hardware_Server) GetDownlinkVirtualGuests() (resp []datatypes.Virtual_Guest, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Server", "getDownlinkVirtualGuests", nil, &r.Options, &resp) + return +} + +// Retrieve All hardware downstream from a network device. +func (r Hardware_Server) GetDownstreamHardwareBindings() (resp []datatypes.Network_Component_Uplink_Hardware, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Server", "getDownstreamHardwareBindings", nil, &r.Options, &resp) + return +} + +// Retrieve All network hardware downstream from the selected piece of hardware. +func (r Hardware_Server) GetDownstreamNetworkHardware() (resp []datatypes.Hardware, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Server", "getDownstreamNetworkHardware", nil, &r.Options, &resp) + return +} + +// Retrieve All network hardware with monitoring warnings or errors that are downstream from the selected piece of hardware. +func (r Hardware_Server) GetDownstreamNetworkHardwareWithIncidents() (resp []datatypes.Hardware, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Server", "getDownstreamNetworkHardwareWithIncidents", nil, &r.Options, &resp) + return +} + +// Retrieve Information regarding all servers attached downstream to a piece of network hardware. +func (r Hardware_Server) GetDownstreamServers() (resp []datatypes.Hardware, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Server", "getDownstreamServers", nil, &r.Options, &resp) + return +} + +// Retrieve Information regarding all virtual guests attached to a piece of network hardware. +func (r Hardware_Server) GetDownstreamVirtualGuests() (resp []datatypes.Virtual_Guest, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Server", "getDownstreamVirtualGuests", nil, &r.Options, &resp) + return +} + +// Retrieve The drive controllers contained within a piece of hardware. +func (r Hardware_Server) GetDriveControllers() (resp []datatypes.Hardware_Component, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Server", "getDriveControllers", nil, &r.Options, &resp) + return +} + +// Retrieve Information regarding a piece of hardware's associated EVault network storage service account. +func (r Hardware_Server) GetEvaultNetworkStorage() (resp []datatypes.Network_Storage, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Server", "getEvaultNetworkStorage", nil, &r.Options, &resp) + return +} + +// Get the subnets associated with this server that are protectable by a network component firewall. +func (r Hardware_Server) GetFirewallProtectableSubnets() (resp []datatypes.Network_Subnet, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Server", "getFirewallProtectableSubnets", nil, &r.Options, &resp) + return +} + +// Retrieve Information regarding a piece of hardware's firewall services. +func (r Hardware_Server) GetFirewallServiceComponent() (resp datatypes.Network_Component_Firewall, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Server", "getFirewallServiceComponent", nil, &r.Options, &resp) + return +} + +// Retrieve Defines the fixed components in a fixed configuration bare metal server. +func (r Hardware_Server) GetFixedConfigurationPreset() (resp datatypes.Product_Package_Preset, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Server", "getFixedConfigurationPreset", nil, &r.Options, &resp) + return +} + +// Use this method to return an array of public bandwidth utilization records between a given date range. +// +// This method represents the NEW version of getFrontendBandwidthUse +func (r Hardware_Server) GetFrontendBandwidthUsage(startDate *datatypes.Time, endDate *datatypes.Time) (resp []datatypes.Metric_Tracking_Object_Data, err error) { + params := []interface{}{ + startDate, + endDate, + } + err = r.Session.DoRequest("SoftLayer_Hardware_Server", "getFrontendBandwidthUsage", params, &r.Options, &resp) + return +} + +// Use this method to return an array of public bandwidth utilization records between a given date range. +func (r Hardware_Server) GetFrontendBandwidthUse(startDate *datatypes.Time, endDate *datatypes.Time) (resp []datatypes.Network_Bandwidth_Version1_Usage_Detail, err error) { + params := []interface{}{ + startDate, + endDate, + } + err = r.Session.DoRequest("SoftLayer_Hardware_Server", "getFrontendBandwidthUse", params, &r.Options, &resp) + return +} + +// The '''getFrontendIncomingBandwidth''' method retrieves the amount of incoming public network traffic used by a server between the given start and end date parameters. When entering the ''dateTime'' parameter, only the month, day and year of the start and end dates are required - the time (hour, minute and second) are set to midnight by default and cannot be changed. The amount of bandwidth retrieved is measured in gigabytes (GB). +func (r Hardware_Server) GetFrontendIncomingBandwidth(startDate *datatypes.Time, endDate *datatypes.Time) (resp datatypes.Float64, err error) { + params := []interface{}{ + startDate, + endDate, + } + err = r.Session.DoRequest("SoftLayer_Hardware_Server", "getFrontendIncomingBandwidth", params, &r.Options, &resp) + return +} + +// Retrieve A piece of hardware's front-end or public network components. +func (r Hardware_Server) GetFrontendNetworkComponents() (resp []datatypes.Network_Component, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Server", "getFrontendNetworkComponents", nil, &r.Options, &resp) + return +} + +// The '''getFrontendOutgoingBandwidth''' method retrieves the amount of outgoing public network traffic used by a server between the given start and end date parameters. The ''dateTime'' parameter requires only the day, month and year to be entered - the time (hour, minute and second) are set to midnight be default in order to gather the data for the entire start and end date indicated in the parameter. The amount of bandwidth retrieved is measured in gigabytes (GB). +func (r Hardware_Server) GetFrontendOutgoingBandwidth(startDate *datatypes.Time, endDate *datatypes.Time) (resp datatypes.Float64, err error) { + params := []interface{}{ + startDate, + endDate, + } + err = r.Session.DoRequest("SoftLayer_Hardware_Server", "getFrontendOutgoingBandwidth", params, &r.Options, &resp) + return +} + +// Retrieve A hardware's frontend or public router. +func (r Hardware_Server) GetFrontendRouters() (resp []datatypes.Hardware, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Server", "getFrontendRouters", nil, &r.Options, &resp) + return +} + +// Retrieve A hardware's universally unique identifier. +func (r Hardware_Server) GetGlobalIdentifier() (resp string, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Server", "getGlobalIdentifier", nil, &r.Options, &resp) + return +} + +// Retrieve The hard drives contained within a piece of hardware. +func (r Hardware_Server) GetHardDrives() (resp []datatypes.Hardware_Component, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Server", "getHardDrives", nil, &r.Options, &resp) + return +} + +// Retrieve a server by searching for the primary IP address. +func (r Hardware_Server) GetHardwareByIpAddress(ipAddress *string) (resp datatypes.Hardware_Server, err error) { + params := []interface{}{ + ipAddress, + } + err = r.Session.DoRequest("SoftLayer_Hardware_Server", "getHardwareByIpAddress", params, &r.Options, &resp) + return +} + +// Retrieve The chassis that a piece of hardware is housed in. +func (r Hardware_Server) GetHardwareChassis() (resp datatypes.Hardware_Chassis, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Server", "getHardwareChassis", nil, &r.Options, &resp) + return +} + +// Retrieve A hardware's function. +func (r Hardware_Server) GetHardwareFunction() (resp datatypes.Hardware_Function, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Server", "getHardwareFunction", nil, &r.Options, &resp) + return +} + +// Retrieve A hardware's function. +func (r Hardware_Server) GetHardwareFunctionDescription() (resp string, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Server", "getHardwareFunctionDescription", nil, &r.Options, &resp) + return +} + +// Retrieve A hardware's status. +func (r Hardware_Server) GetHardwareStatus() (resp datatypes.Hardware_Status, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Server", "getHardwareStatus", nil, &r.Options, &resp) + return +} + +// Retrieve Determine in hardware object has TPM enabled. +func (r Hardware_Server) GetHasTrustedPlatformModuleBillingItemFlag() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Server", "getHasTrustedPlatformModuleBillingItemFlag", nil, &r.Options, &resp) + return +} + +// Retrieve Information regarding a host IPS software component object. +func (r Hardware_Server) GetHostIpsSoftwareComponent() (resp datatypes.Software_Component, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Server", "getHostIpsSoftwareComponent", nil, &r.Options, &resp) + return +} + +// The '''getHourlyBandwidth''' method retrieves all bandwidth updates hourly for the specified hardware. Because the potential number of data points can become excessive, the method limits the user to obtain data in 24-hour intervals. The required ''dateTime'' parameter is used as the starting point for the query and will be calculated for the 24-hour period starting with the specified date and time. For example, entering a parameter of +// +// '02/01/2008 0:00' +// +// results in a return of all bandwidth data for the entire day of February 1, 2008, as 0:00 specifies a midnight start date. Please note that the time entered should be completed using a 24-hour clock (military time, astronomical time). +// +// For data spanning more than a single 24-hour period, refer to the getBandwidthData function on the metricTrackingObject for the piece of hardware. +func (r Hardware_Server) GetHourlyBandwidth(mode *string, day *datatypes.Time) (resp []datatypes.Metric_Tracking_Object_Data, err error) { + params := []interface{}{ + mode, + day, + } + err = r.Session.DoRequest("SoftLayer_Hardware_Server", "getHourlyBandwidth", params, &r.Options, &resp) + return +} + +// Retrieve A server's hourly billing status. +func (r Hardware_Server) GetHourlyBillingFlag() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Server", "getHourlyBillingFlag", nil, &r.Options, &resp) + return +} + +// Retrieve The sum of all the inbound network traffic data for the last 30 days. +func (r Hardware_Server) GetInboundBandwidthUsage() (resp datatypes.Float64, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Server", "getInboundBandwidthUsage", nil, &r.Options, &resp) + return +} + +// Retrieve The total private inbound bandwidth for this hardware for the current billing cycle. +func (r Hardware_Server) GetInboundPrivateBandwidthUsage() (resp datatypes.Float64, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Server", "getInboundPrivateBandwidthUsage", nil, &r.Options, &resp) + return +} + +// Retrieve The total public inbound bandwidth for this hardware for the current billing cycle. +func (r Hardware_Server) GetInboundPublicBandwidthUsage() (resp datatypes.Float64, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Server", "getInboundPublicBandwidthUsage", nil, &r.Options, &resp) + return +} + +// Return a collection of SoftLayer_Item_Price objects from a collection of SoftLayer_Software_Description +func (r Hardware_Server) GetItemPricesFromSoftwareDescriptions(softwareDescriptions []datatypes.Software_Description, includeTranslationsFlag *bool, returnAllPricesFlag *bool) (resp []datatypes.Product_Item, err error) { + params := []interface{}{ + softwareDescriptions, + includeTranslationsFlag, + returnAllPricesFlag, + } + err = r.Session.DoRequest("SoftLayer_Hardware_Server", "getItemPricesFromSoftwareDescriptions", params, &r.Options, &resp) + return +} + +// Retrieve The last transaction that a server's operating system was loaded. +func (r Hardware_Server) GetLastOperatingSystemReload() (resp datatypes.Provisioning_Version1_Transaction, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Server", "getLastOperatingSystemReload", nil, &r.Options, &resp) + return +} + +// Retrieve Information regarding the last transaction a server performed. +func (r Hardware_Server) GetLastTransaction() (resp datatypes.Provisioning_Version1_Transaction, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Server", "getLastTransaction", nil, &r.Options, &resp) + return +} + +// Retrieve A piece of hardware's latest network monitoring incident. +func (r Hardware_Server) GetLatestNetworkMonitorIncident() (resp datatypes.Network_Monitor_Version1_Incident, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Server", "getLatestNetworkMonitorIncident", nil, &r.Options, &resp) + return +} + +// Retrieve Where a piece of hardware is located within SoftLayer's location hierarchy. +func (r Hardware_Server) GetLocation() (resp datatypes.Location, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Server", "getLocation", nil, &r.Options, &resp) + return +} + +// Retrieve +func (r Hardware_Server) GetLocationPathString() (resp string, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Server", "getLocationPathString", nil, &r.Options, &resp) + return +} + +// Retrieve Information regarding a lockbox account associated with a server. +func (r Hardware_Server) GetLockboxNetworkStorage() (resp datatypes.Network_Storage, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Server", "getLockboxNetworkStorage", nil, &r.Options, &resp) + return +} + +// Retrieve A flag indicating that the hardware is a managed resource. +func (r Hardware_Server) GetManagedResourceFlag() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Server", "getManagedResourceFlag", nil, &r.Options, &resp) + return +} + +// Retrieve the remote management network component attached with this server. +func (r Hardware_Server) GetManagementNetworkComponent() (resp datatypes.Network_Component, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Server", "getManagementNetworkComponent", nil, &r.Options, &resp) + return +} + +// Retrieve Information regarding a piece of hardware's memory. +func (r Hardware_Server) GetMemory() (resp []datatypes.Hardware_Component, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Server", "getMemory", nil, &r.Options, &resp) + return +} + +// Retrieve The amount of memory a piece of hardware has, measured in gigabytes. +func (r Hardware_Server) GetMemoryCapacity() (resp uint, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Server", "getMemoryCapacity", nil, &r.Options, &resp) + return +} + +// Retrieve A piece of hardware's metric tracking object. +func (r Hardware_Server) GetMetricTrackingObject() (resp datatypes.Metric_Tracking_Object_HardwareServer, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Server", "getMetricTrackingObject", nil, &r.Options, &resp) + return +} + +// Retrieve The metric tracking object id for this server. +func (r Hardware_Server) GetMetricTrackingObjectId() (resp int, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Server", "getMetricTrackingObjectId", nil, &r.Options, &resp) + return +} + +// Returns open monitoring alarms for a given time period +func (r Hardware_Server) GetMonitoringActiveAlarms(startDate *datatypes.Time, endDate *datatypes.Time) (resp []datatypes.Container_Monitoring_Alarm_History, err error) { + params := []interface{}{ + startDate, + endDate, + } + err = r.Session.DoRequest("SoftLayer_Hardware_Server", "getMonitoringActiveAlarms", params, &r.Options, &resp) + return +} + +// Retrieve Information regarding the monitoring agents associated with a piece of hardware. +func (r Hardware_Server) GetMonitoringAgents() (resp []datatypes.Monitoring_Agent, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Server", "getMonitoringAgents", nil, &r.Options, &resp) + return +} + +// Returns closed monitoring alarms for a given time period +func (r Hardware_Server) GetMonitoringClosedAlarms(startDate *datatypes.Time, endDate *datatypes.Time) (resp []datatypes.Container_Monitoring_Alarm_History, err error) { + params := []interface{}{ + startDate, + endDate, + } + err = r.Session.DoRequest("SoftLayer_Hardware_Server", "getMonitoringClosedAlarms", params, &r.Options, &resp) + return +} + +// Retrieve Information regarding the hardware's monitoring robot. +func (r Hardware_Server) GetMonitoringRobot() (resp datatypes.Monitoring_Robot, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Server", "getMonitoringRobot", nil, &r.Options, &resp) + return +} + +// Retrieve Information regarding a piece of hardware's network monitoring services. +func (r Hardware_Server) GetMonitoringServiceComponent() (resp datatypes.Network_Monitor_Version1_Query_Host_Stratum, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Server", "getMonitoringServiceComponent", nil, &r.Options, &resp) + return +} + +// Retrieve The monitoring service flag eligibility status for a piece of hardware. +func (r Hardware_Server) GetMonitoringServiceEligibilityFlag() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Server", "getMonitoringServiceEligibilityFlag", nil, &r.Options, &resp) + return +} + +// Retrieve The service flag status for a piece of hardware. +func (r Hardware_Server) GetMonitoringServiceFlag() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Server", "getMonitoringServiceFlag", nil, &r.Options, &resp) + return +} + +// Retrieve The monitoring notification objects for this hardware. Each object links this hardware instance to a user account that will be notified if monitoring on this hardware object fails +func (r Hardware_Server) GetMonitoringUserNotification() (resp []datatypes.User_Customer_Notification_Hardware, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Server", "getMonitoringUserNotification", nil, &r.Options, &resp) + return +} + +// Retrieve Information regarding a piece of hardware's motherboard. +func (r Hardware_Server) GetMotherboard() (resp datatypes.Hardware_Component, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Server", "getMotherboard", nil, &r.Options, &resp) + return +} + +// Retrieve Information regarding a piece of hardware's network cards. +func (r Hardware_Server) GetNetworkCards() (resp []datatypes.Hardware_Component, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Server", "getNetworkCards", nil, &r.Options, &resp) + return +} + +// Get the IP addresses associated with this server that are protectable by a network component firewall. Note, this may not return all values for IPv6 subnets for this server. Please use getFirewallProtectableSubnets to get all protectable subnets. +func (r Hardware_Server) GetNetworkComponentFirewallProtectableIpAddresses() (resp []datatypes.Network_Subnet_IpAddress, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Server", "getNetworkComponentFirewallProtectableIpAddresses", nil, &r.Options, &resp) + return +} + +// Retrieve Returns a hardware's network components. +func (r Hardware_Server) GetNetworkComponents() (resp []datatypes.Network_Component, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Server", "getNetworkComponents", nil, &r.Options, &resp) + return +} + +// Retrieve The gateway member if this device is part of a network gateway. +func (r Hardware_Server) GetNetworkGatewayMember() (resp datatypes.Network_Gateway_Member, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Server", "getNetworkGatewayMember", nil, &r.Options, &resp) + return +} + +// Retrieve Whether or not this device is part of a network gateway. +func (r Hardware_Server) GetNetworkGatewayMemberFlag() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Server", "getNetworkGatewayMemberFlag", nil, &r.Options, &resp) + return +} + +// Retrieve A piece of hardware's network management IP address. +func (r Hardware_Server) GetNetworkManagementIpAddress() (resp string, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Server", "getNetworkManagementIpAddress", nil, &r.Options, &resp) + return +} + +// Retrieve All servers with failed monitoring that are attached downstream to a piece of hardware. +func (r Hardware_Server) GetNetworkMonitorAttachedDownHardware() (resp []datatypes.Hardware, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Server", "getNetworkMonitorAttachedDownHardware", nil, &r.Options, &resp) + return +} + +// Retrieve Virtual guests that are attached downstream to a hardware that have failed monitoring +func (r Hardware_Server) GetNetworkMonitorAttachedDownVirtualGuests() (resp []datatypes.Virtual_Guest, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Server", "getNetworkMonitorAttachedDownVirtualGuests", nil, &r.Options, &resp) + return +} + +// Retrieve The status of all of a piece of hardware's network monitoring incidents. +func (r Hardware_Server) GetNetworkMonitorIncidents() (resp []datatypes.Network_Monitor_Version1_Incident, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Server", "getNetworkMonitorIncidents", nil, &r.Options, &resp) + return +} + +// Retrieve Information regarding a piece of hardware's network monitors. +func (r Hardware_Server) GetNetworkMonitors() (resp []datatypes.Network_Monitor_Version1_Query_Host, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Server", "getNetworkMonitors", nil, &r.Options, &resp) + return +} + +// Retrieve The value of a hardware's network status attribute. +func (r Hardware_Server) GetNetworkStatus() (resp string, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Server", "getNetworkStatus", nil, &r.Options, &resp) + return +} + +// Retrieve The hardware's related network status attribute. +func (r Hardware_Server) GetNetworkStatusAttribute() (resp datatypes.Hardware_Attribute, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Server", "getNetworkStatusAttribute", nil, &r.Options, &resp) + return +} + +// Retrieve Information regarding a piece of hardware's associated network storage service account. +func (r Hardware_Server) GetNetworkStorage() (resp []datatypes.Network_Storage, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Server", "getNetworkStorage", nil, &r.Options, &resp) + return +} + +// Retrieve The network virtual LANs (VLANs) associated with a piece of hardware's network components. +func (r Hardware_Server) GetNetworkVlans() (resp []datatypes.Network_Vlan, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Server", "getNetworkVlans", nil, &r.Options, &resp) + return +} + +// Retrieve A hardware's allotted bandwidth for the next billing cycle (measured in GB). +func (r Hardware_Server) GetNextBillingCycleBandwidthAllocation() (resp datatypes.Float64, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Server", "getNextBillingCycleBandwidthAllocation", nil, &r.Options, &resp) + return +} + +// Retrieve +func (r Hardware_Server) GetNotesHistory() (resp []datatypes.Hardware_Note, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Server", "getNotesHistory", nil, &r.Options, &resp) + return +} + +// getObject retrieves the SoftLayer_Hardware_Server object whose ID number corresponds to the ID number of the init parameter passed to the SoftLayer_Hardware service. You can only retrieve servers from the account that your portal user is assigned to. +func (r Hardware_Server) GetObject() (resp datatypes.Hardware_Server, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Server", "getObject", nil, &r.Options, &resp) + return +} + +// Retrieve An open ticket requesting cancellation of this server, if one exists. +func (r Hardware_Server) GetOpenCancellationTicket() (resp datatypes.Ticket, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Server", "getOpenCancellationTicket", nil, &r.Options, &resp) + return +} + +// Retrieve Information regarding a piece of hardware's operating system. +func (r Hardware_Server) GetOperatingSystem() (resp datatypes.Software_Component_OperatingSystem, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Server", "getOperatingSystem", nil, &r.Options, &resp) + return +} + +// Retrieve A hardware's operating system software description. +func (r Hardware_Server) GetOperatingSystemReferenceCode() (resp string, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Server", "getOperatingSystemReferenceCode", nil, &r.Options, &resp) + return +} + +// Retrieve The sum of all the outbound network traffic data for the last 30 days. +func (r Hardware_Server) GetOutboundBandwidthUsage() (resp datatypes.Float64, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Server", "getOutboundBandwidthUsage", nil, &r.Options, &resp) + return +} + +// Retrieve The total private outbound bandwidth for this hardware for the current billing cycle. +func (r Hardware_Server) GetOutboundPrivateBandwidthUsage() (resp datatypes.Float64, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Server", "getOutboundPrivateBandwidthUsage", nil, &r.Options, &resp) + return +} + +// Retrieve The total public outbound bandwidth for this hardware for the current billing cycle. +func (r Hardware_Server) GetOutboundPublicBandwidthUsage() (resp datatypes.Float64, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Server", "getOutboundPublicBandwidthUsage", nil, &r.Options, &resp) + return +} + +// Retrieve Whether the bandwidth usage for this hardware for the current billing cycle exceeds the allocation. +func (r Hardware_Server) GetOverBandwidthAllocationFlag() (resp int, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Server", "getOverBandwidthAllocationFlag", nil, &r.Options, &resp) + return +} + +// Retrieve a server's hardware state via its internal sensors. Remote sensor data is transmitted to the SoftLayer API by way of the server's remote management card. Sensor data measures system temperatures, voltages, and other local server settings. Sensor data is cached for 30 seconds. Calls made to getSensorData for the same server within 30 seconds of each other will return the same data. Subsequent calls will return new data once the cache expires. +func (r Hardware_Server) GetPMInfo() (resp []datatypes.Container_RemoteManagement_PmInfo, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Server", "getPMInfo", nil, &r.Options, &resp) + return +} + +// Retrieve Parent Hardware. +func (r Hardware_Server) GetParentHardware() (resp datatypes.Hardware, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Server", "getParentHardware", nil, &r.Options, &resp) + return +} + +// Retrieve Information regarding the Point of Presence (PoP) location in which a piece of hardware resides. +func (r Hardware_Server) GetPointOfPresenceLocation() (resp datatypes.Location, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Server", "getPointOfPresenceLocation", nil, &r.Options, &resp) + return +} + +// Retrieve The power components for a hardware object. +func (r Hardware_Server) GetPowerComponents() (resp []datatypes.Hardware_Power_Component, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Server", "getPowerComponents", nil, &r.Options, &resp) + return +} + +// Retrieve Information regarding a piece of hardware's power supply. +func (r Hardware_Server) GetPowerSupply() (resp []datatypes.Hardware_Component, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Server", "getPowerSupply", nil, &r.Options, &resp) + return +} + +// Retrieve The hardware's primary private IP address. +func (r Hardware_Server) GetPrimaryBackendIpAddress() (resp string, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Server", "getPrimaryBackendIpAddress", nil, &r.Options, &resp) + return +} + +// Retrieve Information regarding the hardware's primary back-end network component. +func (r Hardware_Server) GetPrimaryBackendNetworkComponent() (resp datatypes.Network_Component, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Server", "getPrimaryBackendNetworkComponent", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Hardware_Server) GetPrimaryDriveSize() (resp int, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Server", "getPrimaryDriveSize", nil, &r.Options, &resp) + return +} + +// Retrieve The hardware's primary public IP address. +func (r Hardware_Server) GetPrimaryIpAddress() (resp string, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Server", "getPrimaryIpAddress", nil, &r.Options, &resp) + return +} + +// Retrieve Information regarding the hardware's primary public network component. +func (r Hardware_Server) GetPrimaryNetworkComponent() (resp datatypes.Network_Component, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Server", "getPrimaryNetworkComponent", nil, &r.Options, &resp) + return +} + +// Retrieve a graph of a server's private network bandwidth usage over the specified timeframe. If no timeframe is specified then getPublicBandwidthGraphImage retrieves the last 24 hours of public bandwidth usage. getPrivateBandwidthGraphImage returns a PNG image measuring 827 pixels by 293 pixels. +func (r Hardware_Server) GetPrivateBandwidthData(startTime *int, endTime *int) (resp []datatypes.Metric_Tracking_Object_Data, err error) { + params := []interface{}{ + startTime, + endTime, + } + err = r.Session.DoRequest("SoftLayer_Hardware_Server", "getPrivateBandwidthData", params, &r.Options, &resp) + return +} + +// Retrieve a brief summary of a server's private network bandwidth usage. getPrivateBandwidthDataSummary retrieves a server's bandwidth allocation for its billing period, its estimated usage during its billing period, and an estimation of how much bandwidth it will use during its billing period based on its current usage. A server's projected bandwidth usage increases in accuracy as it progresses through its billing period. +func (r Hardware_Server) GetPrivateBandwidthDataSummary() (resp datatypes.Container_Network_Bandwidth_Data_Summary, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Server", "getPrivateBandwidthDataSummary", nil, &r.Options, &resp) + return +} + +// Retrieve a graph of a server's private network bandwidth usage over the specified time frame. If no time frame is specified then getPublicBandwidthGraphImage retrieves the last 24 hours of public bandwidth usage. getPublicBandwidthGraphImage returns a PNG image +func (r Hardware_Server) GetPrivateBandwidthGraphImage(startTime *string, endTime *string) (resp []byte, err error) { + params := []interface{}{ + startTime, + endTime, + } + err = r.Session.DoRequest("SoftLayer_Hardware_Server", "getPrivateBandwidthGraphImage", params, &r.Options, &resp) + return +} + +// Retrieve A server's primary private IP address. +func (r Hardware_Server) GetPrivateIpAddress() (resp string, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Server", "getPrivateIpAddress", nil, &r.Options, &resp) + return +} + +// Retrieve the private network component attached with this server. +func (r Hardware_Server) GetPrivateNetworkComponent() (resp datatypes.Network_Component, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Server", "getPrivateNetworkComponent", nil, &r.Options, &resp) + return +} + +// Retrieve Whether the hardware only has access to the private network. +func (r Hardware_Server) GetPrivateNetworkOnlyFlag() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Server", "getPrivateNetworkOnlyFlag", nil, &r.Options, &resp) + return +} + +// Retrieve the backend VLAN for the primary IP address of the server +func (r Hardware_Server) GetPrivateVlan() (resp datatypes.Network_Vlan, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Server", "getPrivateVlan", nil, &r.Options, &resp) + return +} + +// Retrieve a backend network VLAN by searching for an IP address +func (r Hardware_Server) GetPrivateVlanByIpAddress(ipAddress *string) (resp datatypes.Network_Vlan, err error) { + params := []interface{}{ + ipAddress, + } + err = r.Session.DoRequest("SoftLayer_Hardware_Server", "getPrivateVlanByIpAddress", params, &r.Options, &resp) + return +} + +// Retrieve The total number of processor cores, summed from all processors that are attached to a piece of hardware +func (r Hardware_Server) GetProcessorCoreAmount() (resp uint, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Server", "getProcessorCoreAmount", nil, &r.Options, &resp) + return +} + +// Retrieve The total number of physical processor cores, summed from all processors that are attached to a piece of hardware +func (r Hardware_Server) GetProcessorPhysicalCoreAmount() (resp uint, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Server", "getProcessorPhysicalCoreAmount", nil, &r.Options, &resp) + return +} + +// Retrieve Information regarding a piece of hardware's processors. +func (r Hardware_Server) GetProcessors() (resp []datatypes.Hardware_Component, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Server", "getProcessors", nil, &r.Options, &resp) + return +} + +// Retrieve Whether the bandwidth usage for this hardware for the current billing cycle is projected to exceed the allocation. +func (r Hardware_Server) GetProjectedOverBandwidthAllocationFlag() (resp int, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Server", "getProjectedOverBandwidthAllocationFlag", nil, &r.Options, &resp) + return +} + +// Retrieve The projected public outbound bandwidth for this hardware for the current billing cycle. +func (r Hardware_Server) GetProjectedPublicBandwidthUsage() (resp datatypes.Float64, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Server", "getProjectedPublicBandwidthUsage", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Hardware_Server) GetProvisionDate() (resp datatypes.Time, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Server", "getProvisionDate", nil, &r.Options, &resp) + return +} + +// Retrieve a graph of a server's public network bandwidth usage over the specified timeframe. If no timeframe is specified then getPublicBandwidthGraphImage retrieves the last 24 hours of public bandwidth usage. getPublicBandwidthGraphImage returns a PNG image measuring 827 pixels by 293 pixels. +func (r Hardware_Server) GetPublicBandwidthData(startTime *int, endTime *int) (resp []datatypes.Metric_Tracking_Object_Data, err error) { + params := []interface{}{ + startTime, + endTime, + } + err = r.Session.DoRequest("SoftLayer_Hardware_Server", "getPublicBandwidthData", params, &r.Options, &resp) + return +} + +// Retrieve a brief summary of a server's public network bandwidth usage. getPublicBandwidthDataSummary retrieves a server's bandwidth allocation for its billing period, its estimated usage during its billing period, and an estimation of how much bandwidth it will use during its billing period based on its current usage. A server's projected bandwidth usage increases in accuracy as it progresses through its billing period. +func (r Hardware_Server) GetPublicBandwidthDataSummary() (resp datatypes.Container_Network_Bandwidth_Data_Summary, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Server", "getPublicBandwidthDataSummary", nil, &r.Options, &resp) + return +} + +// Retrieve a graph of a server's public network bandwidth usage over the specified time frame. If no time frame is specified then getPublicBandwidthGraphImage retrieves the last 24 hours of public bandwidth usage. getPublicBandwidthGraphImage returns a PNG image measuring 827 pixels by 293 pixels. THIS METHOD GENERATES GRAPHS BASED ON THE NEW DATA WAREHOUSE REPOSITORY. +func (r Hardware_Server) GetPublicBandwidthGraphImage(startTime *datatypes.Time, endTime *datatypes.Time) (resp []byte, err error) { + params := []interface{}{ + startTime, + endTime, + } + err = r.Session.DoRequest("SoftLayer_Hardware_Server", "getPublicBandwidthGraphImage", params, &r.Options, &resp) + return +} + +// Retrieve the total number of bytes used by a server over a specified time period via the data warehouse tracking objects for this hardware. +func (r Hardware_Server) GetPublicBandwidthTotal(startTime *int, endTime *int) (resp uint, err error) { + params := []interface{}{ + startTime, + endTime, + } + err = r.Session.DoRequest("SoftLayer_Hardware_Server", "getPublicBandwidthTotal", params, &r.Options, &resp) + return +} + +// Retrieve a SoftLayer server's public network component. Some servers are only connected to the private network and may not have a public network component. In that case getPublicNetworkComponent returns a null object. +func (r Hardware_Server) GetPublicNetworkComponent() (resp datatypes.Network_Component, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Server", "getPublicNetworkComponent", nil, &r.Options, &resp) + return +} + +// Retrieve the frontend VLAN for the primary IP address of the server +func (r Hardware_Server) GetPublicVlan() (resp datatypes.Network_Vlan, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Server", "getPublicVlan", nil, &r.Options, &resp) + return +} + +// Retrieve the frontend network Vlan by searching the hostname of a server +func (r Hardware_Server) GetPublicVlanByHostname(hostname *string) (resp datatypes.Network_Vlan, err error) { + params := []interface{}{ + hostname, + } + err = r.Session.DoRequest("SoftLayer_Hardware_Server", "getPublicVlanByHostname", params, &r.Options, &resp) + return +} + +// Retrieve +func (r Hardware_Server) GetRack() (resp datatypes.Location, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Server", "getRack", nil, &r.Options, &resp) + return +} + +// Retrieve The RAID controllers contained within a piece of hardware. +func (r Hardware_Server) GetRaidControllers() (resp []datatypes.Hardware_Component, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Server", "getRaidControllers", nil, &r.Options, &resp) + return +} + +// Retrieve Recent events that impact this hardware. +func (r Hardware_Server) GetRecentEvents() (resp []datatypes.Notification_Occurrence_Event, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Server", "getRecentEvents", nil, &r.Options, &resp) + return +} + +// Retrieve The last five commands issued to the server's remote management card. +func (r Hardware_Server) GetRecentRemoteManagementCommands() (resp []datatypes.Hardware_Component_RemoteManagement_Command_Request, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Server", "getRecentRemoteManagementCommands", nil, &r.Options, &resp) + return +} + +// Retrieve +func (r Hardware_Server) GetRegionalInternetRegistry() (resp datatypes.Network_Regional_Internet_Registry, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Server", "getRegionalInternetRegistry", nil, &r.Options, &resp) + return +} + +// Retrieve A server's remote management card. +func (r Hardware_Server) GetRemoteManagement() (resp datatypes.Hardware_Component_RemoteManagement, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Server", "getRemoteManagement", nil, &r.Options, &resp) + return +} + +// Retrieve User credentials to issue commands and/or interact with the server's remote management card. +func (r Hardware_Server) GetRemoteManagementAccounts() (resp []datatypes.Hardware_Component_RemoteManagement_User, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Server", "getRemoteManagementAccounts", nil, &r.Options, &resp) + return +} + +// Retrieve A hardware's associated remote management component. This is normally IPMI. +func (r Hardware_Server) GetRemoteManagementComponent() (resp datatypes.Network_Component, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Server", "getRemoteManagementComponent", nil, &r.Options, &resp) + return +} + +// Retrieve User(s) who have access to issue commands and/or interact with the server's remote management card. +func (r Hardware_Server) GetRemoteManagementUsers() (resp []datatypes.Hardware_Component_RemoteManagement_User, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Server", "getRemoteManagementUsers", nil, &r.Options, &resp) + return +} + +// Retrieve +func (r Hardware_Server) GetResourceConfigurations() (resp []datatypes.Hardware_Resource_Configuration, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Server", "getResourceConfigurations", nil, &r.Options, &resp) + return +} + +// Retrieve +func (r Hardware_Server) GetResourceGroupMemberReferences() (resp []datatypes.Resource_Group_Member, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Server", "getResourceGroupMemberReferences", nil, &r.Options, &resp) + return +} + +// Retrieve +func (r Hardware_Server) GetResourceGroupRoles() (resp []datatypes.Resource_Group_Role, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Server", "getResourceGroupRoles", nil, &r.Options, &resp) + return +} + +// Retrieve The resource groups in which this hardware is a member. +func (r Hardware_Server) GetResourceGroups() (resp []datatypes.Resource_Group, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Server", "getResourceGroups", nil, &r.Options, &resp) + return +} + +// Retrieve the reverse domain records associated with this server. +func (r Hardware_Server) GetReverseDomainRecords() (resp []datatypes.Dns_Domain, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Server", "getReverseDomainRecords", nil, &r.Options, &resp) + return +} + +// Retrieve A hardware's routers. +func (r Hardware_Server) GetRouters() (resp []datatypes.Hardware, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Server", "getRouters", nil, &r.Options, &resp) + return +} + +// Retrieve Collection of scale assets this hardware corresponds to. +func (r Hardware_Server) GetScaleAssets() (resp []datatypes.Scale_Asset, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Server", "getScaleAssets", nil, &r.Options, &resp) + return +} + +// Retrieve Information regarding a piece of hardware's vulnerability scan requests. +func (r Hardware_Server) GetSecurityScanRequests() (resp []datatypes.Network_Security_Scanner_Request, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Server", "getSecurityScanRequests", nil, &r.Options, &resp) + return +} + +// Retrieve a server's hardware state via its internal sensors. Remote sensor data is transmitted to the SoftLayer API by way of the server's remote management card. Sensor data measures system temperatures, voltages, and other local server settings. Sensor data is cached for 30 seconds. Calls made to getSensorData for the same server within 30 seconds of each other will return the same data. Subsequent calls will return new data once the cache expires. +func (r Hardware_Server) GetSensorData() (resp []datatypes.Container_RemoteManagement_SensorReading, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Server", "getSensorData", nil, &r.Options, &resp) + return +} + +// Retrieves the raw data returned from the server's remote management card. For more details of what is returned please refer to the getSensorData method. Along with the raw data, graphs for the cpu and system temperatures and fan speeds are also returned. +func (r Hardware_Server) GetSensorDataWithGraphs() (resp datatypes.Container_RemoteManagement_SensorReadingsWithGraphs, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Server", "getSensorDataWithGraphs", nil, &r.Options, &resp) + return +} + +// Retrieve a server's hardware components, software, and network components. getServerDetails is an aggregation function that combines the results of [[SoftLayer_Hardware_Server::getComponents]], [[SoftLayer_Hardware_Server::getSoftware]], and [[SoftLayer_Hardware_Server::getNetworkComponents]] in a single container. +func (r Hardware_Server) GetServerDetails() (resp datatypes.Container_Hardware_Server_Details, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Server", "getServerDetails", nil, &r.Options, &resp) + return +} + +// Retrieve the server's fan speeds and displays them using tachometer graphs. Data used to construct graphs is retrieved from the server's remote management card. All graphs returned will have a title associated with it. +func (r Hardware_Server) GetServerFanSpeedGraphs() (resp []datatypes.Container_RemoteManagement_Graphs_SensorSpeed, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Server", "getServerFanSpeedGraphs", nil, &r.Options, &resp) + return +} + +// Retrieves the power state for the server. The server's power status is retrieved from its remote management card. This will return 'on' or 'off'. +func (r Hardware_Server) GetServerPowerState() (resp string, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Server", "getServerPowerState", nil, &r.Options, &resp) + return +} + +// Retrieve Information regarding the server room in which the hardware is located. +func (r Hardware_Server) GetServerRoom() (resp datatypes.Location, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Server", "getServerRoom", nil, &r.Options, &resp) + return +} + +// Retrieve the server's temperature and displays them using thermometer graphs. Temperatures retrieved are CPU(s) and system temperatures. Data used to construct graphs is retrieved from the server's remote management card. All graphs returned will have a title associated with it. +func (r Hardware_Server) GetServerTemperatureGraphs() (resp []datatypes.Container_RemoteManagement_Graphs_SensorTemperature, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Server", "getServerTemperatureGraphs", nil, &r.Options, &resp) + return +} + +// Retrieve Information regarding the piece of hardware's service provider. +func (r Hardware_Server) GetServiceProvider() (resp datatypes.Service_Provider, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Server", "getServiceProvider", nil, &r.Options, &resp) + return +} + +// Retrieve Information regarding a piece of hardware's installed software. +func (r Hardware_Server) GetSoftwareComponents() (resp []datatypes.Software_Component, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Server", "getSoftwareComponents", nil, &r.Options, &resp) + return +} + +// Retrieve Information regarding the billing item for a spare pool server. +func (r Hardware_Server) GetSparePoolBillingItem() (resp datatypes.Billing_Item_Hardware, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Server", "getSparePoolBillingItem", nil, &r.Options, &resp) + return +} + +// Retrieve SSH keys to be installed on the server during provisioning or an OS reload. +func (r Hardware_Server) GetSshKeys() (resp []datatypes.Security_Ssh_Key, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Server", "getSshKeys", nil, &r.Options, &resp) + return +} + +// Retrieve A server's remote management card used for statistics. +func (r Hardware_Server) GetStatisticsRemoteManagement() (resp datatypes.Hardware_Component_RemoteManagement, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Server", "getStatisticsRemoteManagement", nil, &r.Options, &resp) + return +} + +// Retrieve +func (r Hardware_Server) GetStorageNetworkComponents() (resp []datatypes.Network_Component, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Server", "getStorageNetworkComponents", nil, &r.Options, &resp) + return +} + +// Retrieve +func (r Hardware_Server) GetTagReferences() (resp []datatypes.Tag_Reference, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Server", "getTagReferences", nil, &r.Options, &resp) + return +} + +// Retrieve +func (r Hardware_Server) GetTopLevelLocation() (resp datatypes.Location, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Server", "getTopLevelLocation", nil, &r.Options, &resp) + return +} + +// +// This method will query transaction history for a piece of hardware. +func (r Hardware_Server) GetTransactionHistory() (resp []datatypes.Provisioning_Version1_Transaction_History, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Server", "getTransactionHistory", nil, &r.Options, &resp) + return +} + +// Retrieve a list of upgradeable items available to this piece of hardware. Currently, getUpgradeItemPrices retrieves upgrades available for a server's memory, hard drives, network port speed, bandwidth allocation and GPUs. +func (r Hardware_Server) GetUpgradeItemPrices() (resp []datatypes.Product_Item_Price, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Server", "getUpgradeItemPrices", nil, &r.Options, &resp) + return +} + +// Retrieve An account's associated upgrade request object, if any. +func (r Hardware_Server) GetUpgradeRequest() (resp datatypes.Product_Upgrade_Request, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Server", "getUpgradeRequest", nil, &r.Options, &resp) + return +} + +// Retrieve The network device connected to a piece of hardware. +func (r Hardware_Server) GetUplinkHardware() (resp datatypes.Hardware, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Server", "getUplinkHardware", nil, &r.Options, &resp) + return +} + +// Retrieve Information regarding the network component that is one level higher than a piece of hardware on the network infrastructure. +func (r Hardware_Server) GetUplinkNetworkComponents() (resp []datatypes.Network_Component, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Server", "getUplinkNetworkComponents", nil, &r.Options, &resp) + return +} + +// Retrieve A string containing custom user data for a hardware order. +func (r Hardware_Server) GetUserData() (resp []datatypes.Hardware_Attribute, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Server", "getUserData", nil, &r.Options, &resp) + return +} + +// Retrieve A list of users that have access to this computing instance. +func (r Hardware_Server) GetUsers() (resp []datatypes.User_Customer, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Server", "getUsers", nil, &r.Options, &resp) + return +} + +// This method will return the list of block device template groups that are valid to the host. For instance, it will only retrieve FLEX images. +func (r Hardware_Server) GetValidBlockDeviceTemplateGroups(visibility *string) (resp []datatypes.Virtual_Guest_Block_Device_Template_Group, err error) { + params := []interface{}{ + visibility, + } + err = r.Session.DoRequest("SoftLayer_Hardware_Server", "getValidBlockDeviceTemplateGroups", params, &r.Options, &resp) + return +} + +// Retrieve Information regarding the virtual chassis for a piece of hardware. +func (r Hardware_Server) GetVirtualChassis() (resp datatypes.Hardware_Group, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Server", "getVirtualChassis", nil, &r.Options, &resp) + return +} + +// Retrieve Information regarding the virtual chassis siblings for a piece of hardware. +func (r Hardware_Server) GetVirtualChassisSiblings() (resp []datatypes.Hardware, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Server", "getVirtualChassisSiblings", nil, &r.Options, &resp) + return +} + +// Retrieve A hardware server's virtual servers. +func (r Hardware_Server) GetVirtualGuests() (resp []datatypes.Virtual_Guest, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Server", "getVirtualGuests", nil, &r.Options, &resp) + return +} + +// Retrieve A piece of hardware's virtual host record. +func (r Hardware_Server) GetVirtualHost() (resp datatypes.Virtual_Host, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Server", "getVirtualHost", nil, &r.Options, &resp) + return +} + +// Retrieve Information regarding a piece of hardware's virtual software licenses. +func (r Hardware_Server) GetVirtualLicenses() (resp []datatypes.Software_VirtualLicense, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Server", "getVirtualLicenses", nil, &r.Options, &resp) + return +} + +// Retrieve Information regarding the bandwidth allotment to which a piece of hardware belongs. +func (r Hardware_Server) GetVirtualRack() (resp datatypes.Network_Bandwidth_Version1_Allotment, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Server", "getVirtualRack", nil, &r.Options, &resp) + return +} + +// Retrieve The name of the bandwidth allotment belonging to a piece of hardware. +func (r Hardware_Server) GetVirtualRackId() (resp int, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Server", "getVirtualRackId", nil, &r.Options, &resp) + return +} + +// Retrieve The name of the bandwidth allotment belonging to a piece of hardware. +func (r Hardware_Server) GetVirtualRackName() (resp string, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Server", "getVirtualRackName", nil, &r.Options, &resp) + return +} + +// Retrieve A piece of hardware's virtualization platform software. +func (r Hardware_Server) GetVirtualizationPlatform() (resp datatypes.Software_Component, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Server", "getVirtualizationPlatform", nil, &r.Options, &resp) + return +} + +// Retrieve a list of Windows updates available for a server from the local SoftLayer Windows Server Update Services (WSUS) server. Windows servers provisioned by SoftLayer are configured to use the local WSUS server via the private network by default. +func (r Hardware_Server) GetWindowsUpdateAvailableUpdates() (resp []datatypes.Container_Utility_Microsoft_Windows_UpdateServices_UpdateItem, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Server", "getWindowsUpdateAvailableUpdates", nil, &r.Options, &resp) + return +} + +// Retrieve a list of Windows updates installed on a server as reported by the local SoftLayer Windows Server Update Services (WSUS) server. Windows servers provisioned by SoftLayer are configured to use the local WSUS server via the private network by default. +func (r Hardware_Server) GetWindowsUpdateInstalledUpdates() (resp []datatypes.Container_Utility_Microsoft_Windows_UpdateServices_UpdateItem, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Server", "getWindowsUpdateInstalledUpdates", nil, &r.Options, &resp) + return +} + +// This method returns an update status record for this server. That record will specify if the server is missing updates, or has updates that must be reinstalled or require a reboot to go into affect. +func (r Hardware_Server) GetWindowsUpdateStatus() (resp datatypes.Container_Utility_Microsoft_Windows_UpdateServices_Status, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Server", "getWindowsUpdateStatus", nil, &r.Options, &resp) + return +} + +// The '''importVirtualHost''' method attempts to import the host record for the virtualization platform running on a server. +func (r Hardware_Server) ImportVirtualHost() (resp datatypes.Virtual_Host, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Server", "importVirtualHost", nil, &r.Options, &resp) + return +} + +// Idera Bare Metal Server Restore is a backup agent designed specifically for making full system restores made with Idera Server Backup. +func (r Hardware_Server) InitiateIderaBareMetalRestore() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Server", "initiateIderaBareMetalRestore", nil, &r.Options, &resp) + return +} + +// R1Soft Bare Metal Server Restore is an R1Soft disk agent designed specifically for making full system restores made with R1Soft CDP Server backup. +func (r Hardware_Server) InitiateR1SoftBareMetalRestore() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Server", "initiateR1SoftBareMetalRestore", nil, &r.Options, &resp) + return +} + +// Issues a ping command and returns the success (true) or failure (false) of the ping command. +func (r Hardware_Server) IsBackendPingable() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Server", "isBackendPingable", nil, &r.Options, &resp) + return +} + +// Issues a ping command and returns the success (true) or failure (false) of the ping command. +func (r Hardware_Server) IsPingable() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Server", "isPingable", nil, &r.Options, &resp) + return +} + +// Determine if the server runs any version of the Microsoft Windows operating systems. Return ''true'' if it does and ''false if otherwise. +func (r Hardware_Server) IsWindowsServer() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Server", "isWindowsServer", nil, &r.Options, &resp) + return +} + +// You can launch firmware updates by selecting from your server list. It will bring your server offline for approximately 20 minutes while the updates are in progress. +// +// In the event of a hardware failure during this test our datacenter engineers will be notified of the problem automatically. They will then replace any failed components to bring your server back online, and will be contacting you to ensure that impact on your server is minimal. +func (r Hardware_Server) MassFirmwareUpdate(hardwareIds []int, ipmi *bool, raidController *bool, bios *bool, harddrive *bool) (resp []datatypes.Container_Hardware_Server_Request, err error) { + params := []interface{}{ + hardwareIds, + ipmi, + raidController, + bios, + harddrive, + } + err = r.Session.DoRequest("SoftLayer_Hardware_Server", "massFirmwareUpdate", params, &r.Options, &resp) + return +} + +// Reloads current or customer specified operating system configuration. +// +// This service has a confirmation protocol for proceeding with the reload. To proceed with the reload without confirmation, simply pass in 'FORCE' as the token parameter. To proceed with the reload with confirmation, simply call the service with no parameter. A token string will be returned by this service. The token will remain active for 10 minutes. Use this token as the parameter to confirm that a reload is to be performed for the server. +// +// As a precaution, we strongly recommend backing up all data before reloading the operating system. The reload will format the primary disk and will reconfigure the server to the current specifications on record. +// +// The reload will take AT MINIMUM 66 minutes. +func (r Hardware_Server) MassReloadOperatingSystem(hardwareIds []string, token *string, config *datatypes.Container_Hardware_Server_Configuration) (resp string, err error) { + params := []interface{}{ + hardwareIds, + token, + config, + } + err = r.Session.DoRequest("SoftLayer_Hardware_Server", "massReloadOperatingSystem", params, &r.Options, &resp) + return +} + +// The ability to place multiple bare metal servers in a state where they are powered down and ports closed yet still allocated to the customer as a part of the Spare Pool program. +func (r Hardware_Server) MassSparePool(hardwareIds []string, action *string, newOrder *bool) (resp []datatypes.Container_Hardware_Server_Request, err error) { + params := []interface{}{ + hardwareIds, + action, + newOrder, + } + err = r.Session.DoRequest("SoftLayer_Hardware_Server", "massSparePool", params, &r.Options, &resp) + return +} + +// Issues a ping command to the server and returns the ping response. +func (r Hardware_Server) Ping() (resp string, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Server", "ping", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Hardware_Server) PopulateServer(hardwareId *int, serialString *string) (err error) { + var resp datatypes.Void + params := []interface{}{ + hardwareId, + serialString, + } + err = r.Session.DoRequest("SoftLayer_Hardware_Server", "populateServer", params, &r.Options, &resp) + return +} + +// Power off then power on the server via powerstrip. The power cycle command is equivalent to unplugging the server from the powerstrip and then plugging the server back into the powerstrip. This should only be used as a last resort. If a reboot command has been issued successfully in the past 20 minutes, another remote management command (rebootSoft, rebootHard, powerOn, powerOff and powerCycle) will not be allowed. This is to avoid any type of server failures. +func (r Hardware_Server) PowerCycle() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Server", "powerCycle", nil, &r.Options, &resp) + return +} + +// This method will power off the server via the server's remote management card. +func (r Hardware_Server) PowerOff() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Server", "powerOff", nil, &r.Options, &resp) + return +} + +// Power on server via its remote management card. If a reboot command has been issued successfully in the past 20 minutes, another remote management command (rebootSoft, rebootHard, powerOn, powerOff and powerCycle) will not be allowed. This is to avoid any type of server failures. +func (r Hardware_Server) PowerOn() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Server", "powerOn", nil, &r.Options, &resp) + return +} + +// Attempts to reboot the server by issuing a reset (soft reboot) command to the server's remote management card. If the reset (soft reboot) attempt is unsuccessful, a power cycle command will be issued via the powerstrip. The power cycle command is equivalent to unplugging the server from the powerstrip and then plugging the server back into the powerstrip. If a reboot command has been issued successfully in the past 20 minutes, another remote management command (rebootSoft, rebootHard, powerOn, powerOff and powerCycle) will not be allowed. This is to avoid any type of server failures. +func (r Hardware_Server) RebootDefault() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Server", "rebootDefault", nil, &r.Options, &resp) + return +} + +// Reboot the server by issuing a cycle command to the server's remote management card. This is equivalent to pressing the 'Reset' button on the server. This command is issued immediately and will not wait for processes to shutdown. After this command is issued, the server may take a few moments to boot up as server may run system disks checks. If a reboot command has been issued successfully in the past 20 minutes, another remote management command (rebootSoft, rebootHard, powerOn, powerOff and powerCycle) will not be allowed. This is to avoid any type of server failures. +func (r Hardware_Server) RebootHard() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Server", "rebootHard", nil, &r.Options, &resp) + return +} + +// Reboot the server by issuing a reset command to the server's remote management card. This is a graceful reboot. The servers will allow all process to shutdown gracefully before rebooting. If a reboot command has been issued successfully in the past 20 minutes, another remote management command (rebootSoft, rebootHard, powerOn, powerOff and powerCycle) will not be allowed. This is to avoid any type of server failures. +func (r Hardware_Server) RebootSoft() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Server", "rebootSoft", nil, &r.Options, &resp) + return +} + +// Reloads current operating system configuration. +// +// This service has a confirmation protocol for proceeding with the reload. To proceed with the reload without confirmation, simply pass in 'FORCE' as the token parameter. To proceed with the reload with confirmation, simply call the service with no parameter. A token string will be returned by this service. The token will remain active for 10 minutes. Use this token as the parameter to confirm that a reload is to be performed for the server. +// +// As a precaution, we strongly recommend backing up all data before reloading the operating system. The reload will format the primary disk and will reconfigure the server to the current specifications on record. +// +// The reload will take AT MINIMUM 66 minutes. +func (r Hardware_Server) ReloadCurrentOperatingSystemConfiguration(token *string) (resp string, err error) { + params := []interface{}{ + token, + } + err = r.Session.DoRequest("SoftLayer_Hardware_Server", "reloadCurrentOperatingSystemConfiguration", params, &r.Options, &resp) + return +} + +// Reloads current or customer specified operating system configuration. +// +// This service has a confirmation protocol for proceeding with the reload. To proceed with the reload without confirmation, simply pass in 'FORCE' as the token parameter. To proceed with the reload with confirmation, simply call the service with no parameter. A token string will be returned by this service. The token will remain active for 10 minutes. Use this token as the parameter to confirm that a reload is to be performed for the server. +// +// As a precaution, we strongly recommend backing up all data before reloading the operating system. The reload will format the primary disk and will reconfigure the server to the current specifications on record. +// +// The reload will take AT MINIMUM 66 minutes. +func (r Hardware_Server) ReloadOperatingSystem(token *string, config *datatypes.Container_Hardware_Server_Configuration) (resp string, err error) { + params := []interface{}{ + token, + config, + } + err = r.Session.DoRequest("SoftLayer_Hardware_Server", "reloadOperatingSystem", params, &r.Options, &resp) + return +} + +// This method is used to remove access to s SoftLayer_Network_Storage volumes that supports host- or network-level access control. +func (r Hardware_Server) RemoveAccessToNetworkStorage(networkStorageTemplateObject *datatypes.Network_Storage) (resp bool, err error) { + params := []interface{}{ + networkStorageTemplateObject, + } + err = r.Session.DoRequest("SoftLayer_Hardware_Server", "removeAccessToNetworkStorage", params, &r.Options, &resp) + return +} + +// This method is used to allow access to multiple SoftLayer_Network_Storage volumes that support host- or network-level access control. +func (r Hardware_Server) RemoveAccessToNetworkStorageList(networkStorageTemplateObjects []datatypes.Network_Storage) (resp bool, err error) { + params := []interface{}{ + networkStorageTemplateObjects, + } + err = r.Session.DoRequest("SoftLayer_Hardware_Server", "removeAccessToNetworkStorageList", params, &r.Options, &resp) + return +} + +// You can launch a new Passmark hardware test by selecting from your server list. It will bring your server offline for approximately 20 minutes while the testing is in progress, and will publish a certificate with the results to your hardware details page. +// +// While the hard drives are tested for the initial deployment, the Passmark Certificate utility will not test the hard drives on your live server. This is to ensure that no data is overwritten. If you would like to test the server's hard drives, you can have the full Passmark suite installed to your server free of charge through a new Support ticket. +// +// While the test itself does not overwrite any data on the server, it is recommended that you make full off-server backups of all data prior to launching the test. The Passmark hardware test is designed to force any latent hardware issues to the surface, so hardware failure is possible. +// +// In the event of a hardware failure during this test our datacenter engineers will be notified of the problem automatically. They will then replace any failed components to bring your server back online, and will be contacting you to ensure that impact on your server is minimal. +func (r Hardware_Server) RunPassmarkCertificationBenchmark() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Server", "runPassmarkCertificationBenchmark", nil, &r.Options, &resp) + return +} + +// Changes the password that we have stored in our database for a servers' Operating System +func (r Hardware_Server) SetOperatingSystemPassword(newPassword *string) (resp bool, err error) { + params := []interface{}{ + newPassword, + } + err = r.Session.DoRequest("SoftLayer_Hardware_Server", "setOperatingSystemPassword", params, &r.Options, &resp) + return +} + +// Sets the private network interface speed to the new speed. Speed values can only be 0 (Disconnect), 10, 100, 1000, and 10000. The new speed must be equal to or less than the max speed of the interface. +// +// It will take less than a minute to update the switch port speed. The server uplink will not be operational again until the server interface speed is updated. +func (r Hardware_Server) SetPrivateNetworkInterfaceSpeed(newSpeed *int) (resp bool, err error) { + params := []interface{}{ + newSpeed, + } + err = r.Session.DoRequest("SoftLayer_Hardware_Server", "setPrivateNetworkInterfaceSpeed", params, &r.Options, &resp) + return +} + +// Sets the public network interface speed to the new speed. Speed values can only be 0 (Disconnect), 10, 100, 1000, and 10000. The new speed must be equal to or less than the max speed of the interface. +// +// It will take less than a minute to update the switch port speed. The server uplink will not be operational again until the server interface speed is updated. +func (r Hardware_Server) SetPublicNetworkInterfaceSpeed(newSpeed *int) (resp bool, err error) { + params := []interface{}{ + newSpeed, + } + err = r.Session.DoRequest("SoftLayer_Hardware_Server", "setPublicNetworkInterfaceSpeed", params, &r.Options, &resp) + return +} + +// no documentation yet +func (r Hardware_Server) SetTags(tags *string) (resp bool, err error) { + params := []interface{}{ + tags, + } + err = r.Session.DoRequest("SoftLayer_Hardware_Server", "setTags", params, &r.Options, &resp) + return +} + +// Sets the data that will be written to the configuration drive. +func (r Hardware_Server) SetUserMetadata(metadata []string) (resp []datatypes.Hardware_Attribute, err error) { + params := []interface{}{ + metadata, + } + err = r.Session.DoRequest("SoftLayer_Hardware_Server", "setUserMetadata", params, &r.Options, &resp) + return +} + +// Shuts down the public network port +func (r Hardware_Server) ShutdownPrivatePort() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Server", "shutdownPrivatePort", nil, &r.Options, &resp) + return +} + +// Shuts down the public network port +func (r Hardware_Server) ShutdownPublicPort() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Hardware_Server", "shutdownPublicPort", nil, &r.Options, &resp) + return +} + +// The ability to place bare metal servers in a state where they are powered down, and ports closed yet still allocated to the customer as a part of the Spare Pool program. +func (r Hardware_Server) SparePool(action *string, newOrder *bool) (resp bool, err error) { + params := []interface{}{ + action, + newOrder, + } + err = r.Session.DoRequest("SoftLayer_Hardware_Server", "sparePool", params, &r.Options, &resp) + return +} + +// This method will update the root IPMI password on this SoftLayer_Hardware. +func (r Hardware_Server) UpdateIpmiPassword(password *string) (resp bool, err error) { + params := []interface{}{ + password, + } + err = r.Session.DoRequest("SoftLayer_Hardware_Server", "updateIpmiPassword", params, &r.Options, &resp) + return +} + +// Validates a collection of partitions for an operating system +func (r Hardware_Server) ValidatePartitionsForOperatingSystem(operatingSystem *datatypes.Software_Description, partitions []datatypes.Hardware_Component_Partition) (resp bool, err error) { + params := []interface{}{ + operatingSystem, + partitions, + } + err = r.Session.DoRequest("SoftLayer_Hardware_Server", "validatePartitionsForOperatingSystem", params, &r.Options, &resp) + return +} diff --git a/vendor/github.com/softlayer/softlayer-go/services/layout.go b/vendor/github.com/softlayer/softlayer-go/services/layout.go new file mode 100644 index 0000000000..1446d32b87 --- /dev/null +++ b/vendor/github.com/softlayer/softlayer-go/services/layout.go @@ -0,0 +1,512 @@ +/** + * Copyright 2016 IBM Corp. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** + * AUTOMATICALLY GENERATED CODE - DO NOT MODIFY + */ + +package services + +import ( + "fmt" + "strings" + + "github.com/softlayer/softlayer-go/datatypes" + "github.com/softlayer/softlayer-go/session" + "github.com/softlayer/softlayer-go/sl" +) + +// The SoftLayer_Layout_Container contains definitions for default page layouts +type Layout_Container struct { + Session *session.Session + Options sl.Options +} + +// GetLayoutContainerService returns an instance of the Layout_Container SoftLayer service +func GetLayoutContainerService(sess *session.Session) Layout_Container { + return Layout_Container{Session: sess} +} + +func (r Layout_Container) Id(id int) Layout_Container { + r.Options.Id = &id + return r +} + +func (r Layout_Container) Mask(mask string) Layout_Container { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r Layout_Container) Filter(filter string) Layout_Container { + r.Options.Filter = filter + return r +} + +func (r Layout_Container) Limit(limit int) Layout_Container { + r.Options.Limit = &limit + return r +} + +func (r Layout_Container) Offset(offset int) Layout_Container { + r.Options.Offset = &offset + return r +} + +// Use this method to retrieve all active layout containers that can be customized. +func (r Layout_Container) GetAllObjects() (resp []datatypes.Layout_Container, err error) { + err = r.Session.DoRequest("SoftLayer_Layout_Container", "getAllObjects", nil, &r.Options, &resp) + return +} + +// Retrieve The type of the layout container object +func (r Layout_Container) GetLayoutContainerType() (resp datatypes.Layout_Container_Type, err error) { + err = r.Session.DoRequest("SoftLayer_Layout_Container", "getLayoutContainerType", nil, &r.Options, &resp) + return +} + +// Retrieve The layout items assigned to this layout container +func (r Layout_Container) GetLayoutItems() (resp []datatypes.Layout_Item, err error) { + err = r.Session.DoRequest("SoftLayer_Layout_Container", "getLayoutItems", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Layout_Container) GetObject() (resp datatypes.Layout_Container, err error) { + err = r.Session.DoRequest("SoftLayer_Layout_Container", "getObject", nil, &r.Options, &resp) + return +} + +// The SoftLayer_Layout_Item contains definitions for default layout items +type Layout_Item struct { + Session *session.Session + Options sl.Options +} + +// GetLayoutItemService returns an instance of the Layout_Item SoftLayer service +func GetLayoutItemService(sess *session.Session) Layout_Item { + return Layout_Item{Session: sess} +} + +func (r Layout_Item) Id(id int) Layout_Item { + r.Options.Id = &id + return r +} + +func (r Layout_Item) Mask(mask string) Layout_Item { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r Layout_Item) Filter(filter string) Layout_Item { + r.Options.Filter = filter + return r +} + +func (r Layout_Item) Limit(limit int) Layout_Item { + r.Options.Limit = &limit + return r +} + +func (r Layout_Item) Offset(offset int) Layout_Item { + r.Options.Offset = &offset + return r +} + +// Retrieve The layout preferences assigned to this layout item +func (r Layout_Item) GetLayoutItemPreferences() (resp []datatypes.Layout_Preference, err error) { + err = r.Session.DoRequest("SoftLayer_Layout_Item", "getLayoutItemPreferences", nil, &r.Options, &resp) + return +} + +// Retrieve The type of the layout item object +func (r Layout_Item) GetLayoutItemType() (resp datatypes.Layout_Item_Type, err error) { + err = r.Session.DoRequest("SoftLayer_Layout_Item", "getLayoutItemType", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Layout_Item) GetObject() (resp datatypes.Layout_Item, err error) { + err = r.Session.DoRequest("SoftLayer_Layout_Item", "getObject", nil, &r.Options, &resp) + return +} + +// The SoftLayer_Layout_Profile contains the definition of the layout profile +type Layout_Profile struct { + Session *session.Session + Options sl.Options +} + +// GetLayoutProfileService returns an instance of the Layout_Profile SoftLayer service +func GetLayoutProfileService(sess *session.Session) Layout_Profile { + return Layout_Profile{Session: sess} +} + +func (r Layout_Profile) Id(id int) Layout_Profile { + r.Options.Id = &id + return r +} + +func (r Layout_Profile) Mask(mask string) Layout_Profile { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r Layout_Profile) Filter(filter string) Layout_Profile { + r.Options.Filter = filter + return r +} + +func (r Layout_Profile) Limit(limit int) Layout_Profile { + r.Options.Limit = &limit + return r +} + +func (r Layout_Profile) Offset(offset int) Layout_Profile { + r.Options.Offset = &offset + return r +} + +// This method creates a new layout profile object. +func (r Layout_Profile) CreateObject(templateObject *datatypes.Layout_Profile) (resp bool, err error) { + params := []interface{}{ + templateObject, + } + err = r.Session.DoRequest("SoftLayer_Layout_Profile", "createObject", params, &r.Options, &resp) + return +} + +// This method deletes an existing layout profile and associated custom preferences +func (r Layout_Profile) DeleteObject() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Layout_Profile", "deleteObject", nil, &r.Options, &resp) + return +} + +// This method edits an existing layout profile object by passing in a modified instance of the object. +func (r Layout_Profile) EditObject(templateObject *datatypes.Layout_Profile) (resp bool, err error) { + params := []interface{}{ + templateObject, + } + err = r.Session.DoRequest("SoftLayer_Layout_Profile", "editObject", params, &r.Options, &resp) + return +} + +// Retrieve +func (r Layout_Profile) GetLayoutContainers() (resp []datatypes.Layout_Container, err error) { + err = r.Session.DoRequest("SoftLayer_Layout_Profile", "getLayoutContainers", nil, &r.Options, &resp) + return +} + +// Retrieve +func (r Layout_Profile) GetLayoutPreferences() (resp []datatypes.Layout_Profile_Preference, err error) { + err = r.Session.DoRequest("SoftLayer_Layout_Profile", "getLayoutPreferences", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Layout_Profile) GetObject() (resp datatypes.Layout_Profile, err error) { + err = r.Session.DoRequest("SoftLayer_Layout_Profile", "getObject", nil, &r.Options, &resp) + return +} + +// This method modifies an existing associated [[SoftLayer_Layout_Profile_Preference]] object. If the preference object being modified is a default value object, a new record is created to override the default value. +// +// Only preferences that are assigned to a profile may be updated. Attempts to update a non-existent preference object will result in an exception being thrown. +func (r Layout_Profile) ModifyPreference(templateObject *datatypes.Layout_Profile_Preference) (resp datatypes.Layout_Profile_Preference, err error) { + params := []interface{}{ + templateObject, + } + err = r.Session.DoRequest("SoftLayer_Layout_Profile", "modifyPreference", params, &r.Options, &resp) + return +} + +// Using this method, multiple [[SoftLayer_Layout_Profile_Preference]] objects may be updated at once. +// +// Refer to [[SoftLayer_Layout_Profile::modifyPreference()]] for more information. +func (r Layout_Profile) ModifyPreferences(layoutPreferenceObjects []datatypes.Layout_Profile_Preference) (resp []datatypes.Layout_Profile_Preference, err error) { + params := []interface{}{ + layoutPreferenceObjects, + } + err = r.Session.DoRequest("SoftLayer_Layout_Profile", "modifyPreferences", params, &r.Options, &resp) + return +} + +// no documentation yet +type Layout_Profile_Containers struct { + Session *session.Session + Options sl.Options +} + +// GetLayoutProfileContainersService returns an instance of the Layout_Profile_Containers SoftLayer service +func GetLayoutProfileContainersService(sess *session.Session) Layout_Profile_Containers { + return Layout_Profile_Containers{Session: sess} +} + +func (r Layout_Profile_Containers) Id(id int) Layout_Profile_Containers { + r.Options.Id = &id + return r +} + +func (r Layout_Profile_Containers) Mask(mask string) Layout_Profile_Containers { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r Layout_Profile_Containers) Filter(filter string) Layout_Profile_Containers { + r.Options.Filter = filter + return r +} + +func (r Layout_Profile_Containers) Limit(limit int) Layout_Profile_Containers { + r.Options.Limit = &limit + return r +} + +func (r Layout_Profile_Containers) Offset(offset int) Layout_Profile_Containers { + r.Options.Offset = &offset + return r +} + +// no documentation yet +func (r Layout_Profile_Containers) CreateObject(templateObject *datatypes.Layout_Profile_Containers) (resp bool, err error) { + params := []interface{}{ + templateObject, + } + err = r.Session.DoRequest("SoftLayer_Layout_Profile_Containers", "createObject", params, &r.Options, &resp) + return +} + +// no documentation yet +func (r Layout_Profile_Containers) EditObject(templateObject *datatypes.Layout_Profile_Containers) (resp bool, err error) { + params := []interface{}{ + templateObject, + } + err = r.Session.DoRequest("SoftLayer_Layout_Profile_Containers", "editObject", params, &r.Options, &resp) + return +} + +// Retrieve The container to be contained +func (r Layout_Profile_Containers) GetLayoutContainerType() (resp datatypes.Layout_Container, err error) { + err = r.Session.DoRequest("SoftLayer_Layout_Profile_Containers", "getLayoutContainerType", nil, &r.Options, &resp) + return +} + +// Retrieve The profile containing this container +func (r Layout_Profile_Containers) GetLayoutProfile() (resp datatypes.Layout_Profile, err error) { + err = r.Session.DoRequest("SoftLayer_Layout_Profile_Containers", "getLayoutProfile", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Layout_Profile_Containers) GetObject() (resp datatypes.Layout_Profile_Containers, err error) { + err = r.Session.DoRequest("SoftLayer_Layout_Profile_Containers", "getObject", nil, &r.Options, &resp) + return +} + +// no documentation yet +type Layout_Profile_Customer struct { + Session *session.Session + Options sl.Options +} + +// GetLayoutProfileCustomerService returns an instance of the Layout_Profile_Customer SoftLayer service +func GetLayoutProfileCustomerService(sess *session.Session) Layout_Profile_Customer { + return Layout_Profile_Customer{Session: sess} +} + +func (r Layout_Profile_Customer) Id(id int) Layout_Profile_Customer { + r.Options.Id = &id + return r +} + +func (r Layout_Profile_Customer) Mask(mask string) Layout_Profile_Customer { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r Layout_Profile_Customer) Filter(filter string) Layout_Profile_Customer { + r.Options.Filter = filter + return r +} + +func (r Layout_Profile_Customer) Limit(limit int) Layout_Profile_Customer { + r.Options.Limit = &limit + return r +} + +func (r Layout_Profile_Customer) Offset(offset int) Layout_Profile_Customer { + r.Options.Offset = &offset + return r +} + +// This method creates a new layout profile object. +func (r Layout_Profile_Customer) CreateObject(templateObject *datatypes.Layout_Profile) (resp bool, err error) { + params := []interface{}{ + templateObject, + } + err = r.Session.DoRequest("SoftLayer_Layout_Profile_Customer", "createObject", params, &r.Options, &resp) + return +} + +// This method deletes an existing layout profile and associated custom preferences +func (r Layout_Profile_Customer) DeleteObject() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Layout_Profile_Customer", "deleteObject", nil, &r.Options, &resp) + return +} + +// This method edits an existing layout profile object by passing in a modified instance of the object. +func (r Layout_Profile_Customer) EditObject(templateObject *datatypes.Layout_Profile) (resp bool, err error) { + params := []interface{}{ + templateObject, + } + err = r.Session.DoRequest("SoftLayer_Layout_Profile_Customer", "editObject", params, &r.Options, &resp) + return +} + +// Retrieve +func (r Layout_Profile_Customer) GetLayoutContainers() (resp []datatypes.Layout_Container, err error) { + err = r.Session.DoRequest("SoftLayer_Layout_Profile_Customer", "getLayoutContainers", nil, &r.Options, &resp) + return +} + +// Retrieve +func (r Layout_Profile_Customer) GetLayoutPreferences() (resp []datatypes.Layout_Profile_Preference, err error) { + err = r.Session.DoRequest("SoftLayer_Layout_Profile_Customer", "getLayoutPreferences", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Layout_Profile_Customer) GetObject() (resp datatypes.Layout_Profile_Customer, err error) { + err = r.Session.DoRequest("SoftLayer_Layout_Profile_Customer", "getObject", nil, &r.Options, &resp) + return +} + +// Retrieve +func (r Layout_Profile_Customer) GetUserRecord() (resp datatypes.User_Customer, err error) { + err = r.Session.DoRequest("SoftLayer_Layout_Profile_Customer", "getUserRecord", nil, &r.Options, &resp) + return +} + +// This method modifies an existing associated [[SoftLayer_Layout_Profile_Preference]] object. If the preference object being modified is a default value object, a new record is created to override the default value. +// +// Only preferences that are assigned to a profile may be updated. Attempts to update a non-existent preference object will result in an exception being thrown. +func (r Layout_Profile_Customer) ModifyPreference(templateObject *datatypes.Layout_Profile_Preference) (resp datatypes.Layout_Profile_Preference, err error) { + params := []interface{}{ + templateObject, + } + err = r.Session.DoRequest("SoftLayer_Layout_Profile_Customer", "modifyPreference", params, &r.Options, &resp) + return +} + +// Using this method, multiple [[SoftLayer_Layout_Profile_Preference]] objects may be updated at once. +// +// Refer to [[SoftLayer_Layout_Profile::modifyPreference()]] for more information. +func (r Layout_Profile_Customer) ModifyPreferences(layoutPreferenceObjects []datatypes.Layout_Profile_Preference) (resp []datatypes.Layout_Profile_Preference, err error) { + params := []interface{}{ + layoutPreferenceObjects, + } + err = r.Session.DoRequest("SoftLayer_Layout_Profile_Customer", "modifyPreferences", params, &r.Options, &resp) + return +} + +// The SoftLayer_Layout_Profile_Preference contains definitions for layout preferences +type Layout_Profile_Preference struct { + Session *session.Session + Options sl.Options +} + +// GetLayoutProfilePreferenceService returns an instance of the Layout_Profile_Preference SoftLayer service +func GetLayoutProfilePreferenceService(sess *session.Session) Layout_Profile_Preference { + return Layout_Profile_Preference{Session: sess} +} + +func (r Layout_Profile_Preference) Id(id int) Layout_Profile_Preference { + r.Options.Id = &id + return r +} + +func (r Layout_Profile_Preference) Mask(mask string) Layout_Profile_Preference { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r Layout_Profile_Preference) Filter(filter string) Layout_Profile_Preference { + r.Options.Filter = filter + return r +} + +func (r Layout_Profile_Preference) Limit(limit int) Layout_Profile_Preference { + r.Options.Limit = &limit + return r +} + +func (r Layout_Profile_Preference) Offset(offset int) Layout_Profile_Preference { + r.Options.Offset = &offset + return r +} + +// Retrieve +func (r Layout_Profile_Preference) GetLayoutContainer() (resp datatypes.Layout_Container, err error) { + err = r.Session.DoRequest("SoftLayer_Layout_Profile_Preference", "getLayoutContainer", nil, &r.Options, &resp) + return +} + +// Retrieve +func (r Layout_Profile_Preference) GetLayoutItem() (resp datatypes.Layout_Item, err error) { + err = r.Session.DoRequest("SoftLayer_Layout_Profile_Preference", "getLayoutItem", nil, &r.Options, &resp) + return +} + +// Retrieve +func (r Layout_Profile_Preference) GetLayoutPreference() (resp datatypes.Layout_Preference, err error) { + err = r.Session.DoRequest("SoftLayer_Layout_Profile_Preference", "getLayoutPreference", nil, &r.Options, &resp) + return +} + +// Retrieve +func (r Layout_Profile_Preference) GetLayoutProfile() (resp datatypes.Layout_Profile, err error) { + err = r.Session.DoRequest("SoftLayer_Layout_Profile_Preference", "getLayoutProfile", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Layout_Profile_Preference) GetObject() (resp datatypes.Layout_Profile_Preference, err error) { + err = r.Session.DoRequest("SoftLayer_Layout_Profile_Preference", "getObject", nil, &r.Options, &resp) + return +} diff --git a/vendor/github.com/softlayer/softlayer-go/services/locale.go b/vendor/github.com/softlayer/softlayer-go/services/locale.go new file mode 100644 index 0000000000..273ec82d26 --- /dev/null +++ b/vendor/github.com/softlayer/softlayer-go/services/locale.go @@ -0,0 +1,201 @@ +/** + * Copyright 2016 IBM Corp. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** + * AUTOMATICALLY GENERATED CODE - DO NOT MODIFY + */ + +package services + +import ( + "fmt" + "strings" + + "github.com/softlayer/softlayer-go/datatypes" + "github.com/softlayer/softlayer-go/session" + "github.com/softlayer/softlayer-go/sl" +) + +// no documentation yet +type Locale struct { + Session *session.Session + Options sl.Options +} + +// GetLocaleService returns an instance of the Locale SoftLayer service +func GetLocaleService(sess *session.Session) Locale { + return Locale{Session: sess} +} + +func (r Locale) Id(id int) Locale { + r.Options.Id = &id + return r +} + +func (r Locale) Mask(mask string) Locale { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r Locale) Filter(filter string) Locale { + r.Options.Filter = filter + return r +} + +func (r Locale) Limit(limit int) Locale { + r.Options.Limit = &limit + return r +} + +func (r Locale) Offset(offset int) Locale { + r.Options.Offset = &offset + return r +} + +// no documentation yet +func (r Locale) GetClosestToLanguageTag(languageTag *string) (resp datatypes.Locale, err error) { + params := []interface{}{ + languageTag, + } + err = r.Session.DoRequest("SoftLayer_Locale", "getClosestToLanguageTag", params, &r.Options, &resp) + return +} + +// no documentation yet +func (r Locale) GetObject() (resp datatypes.Locale, err error) { + err = r.Session.DoRequest("SoftLayer_Locale", "getObject", nil, &r.Options, &resp) + return +} + +// no documentation yet +type Locale_Country struct { + Session *session.Session + Options sl.Options +} + +// GetLocaleCountryService returns an instance of the Locale_Country SoftLayer service +func GetLocaleCountryService(sess *session.Session) Locale_Country { + return Locale_Country{Session: sess} +} + +func (r Locale_Country) Id(id int) Locale_Country { + r.Options.Id = &id + return r +} + +func (r Locale_Country) Mask(mask string) Locale_Country { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r Locale_Country) Filter(filter string) Locale_Country { + r.Options.Filter = filter + return r +} + +func (r Locale_Country) Limit(limit int) Locale_Country { + r.Options.Limit = &limit + return r +} + +func (r Locale_Country) Offset(offset int) Locale_Country { + r.Options.Offset = &offset + return r +} + +// Use this method to retrieve a list of countries and locale information available to the current user. +func (r Locale_Country) GetAvailableCountries() (resp []datatypes.Locale_Country, err error) { + err = r.Session.DoRequest("SoftLayer_Locale_Country", "getAvailableCountries", nil, &r.Options, &resp) + return +} + +// Use this method to retrieve a list of countries and locale information such as country code and state/provinces. +func (r Locale_Country) GetCountries() (resp []datatypes.Locale_Country, err error) { + err = r.Session.DoRequest("SoftLayer_Locale_Country", "getCountries", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Locale_Country) GetObject() (resp datatypes.Locale_Country, err error) { + err = r.Session.DoRequest("SoftLayer_Locale_Country", "getObject", nil, &r.Options, &resp) + return +} + +// Retrieve States that belong to this country. +func (r Locale_Country) GetStates() (resp []datatypes.Locale_StateProvince, err error) { + err = r.Session.DoRequest("SoftLayer_Locale_Country", "getStates", nil, &r.Options, &resp) + return +} + +// Each User is assigned a timezone allowing for a precise local timestamp. +type Locale_Timezone struct { + Session *session.Session + Options sl.Options +} + +// GetLocaleTimezoneService returns an instance of the Locale_Timezone SoftLayer service +func GetLocaleTimezoneService(sess *session.Session) Locale_Timezone { + return Locale_Timezone{Session: sess} +} + +func (r Locale_Timezone) Id(id int) Locale_Timezone { + r.Options.Id = &id + return r +} + +func (r Locale_Timezone) Mask(mask string) Locale_Timezone { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r Locale_Timezone) Filter(filter string) Locale_Timezone { + r.Options.Filter = filter + return r +} + +func (r Locale_Timezone) Limit(limit int) Locale_Timezone { + r.Options.Limit = &limit + return r +} + +func (r Locale_Timezone) Offset(offset int) Locale_Timezone { + r.Options.Offset = &offset + return r +} + +// Retrieve all timezone objects. +func (r Locale_Timezone) GetAllObjects() (resp []datatypes.Locale_Timezone, err error) { + err = r.Session.DoRequest("SoftLayer_Locale_Timezone", "getAllObjects", nil, &r.Options, &resp) + return +} + +// getObject retrieves the SoftLayer_Locale_Timezone object whose ID number corresponds to the ID number of the init parameter passed to the SoftLayer_Locale_Timezone service. +func (r Locale_Timezone) GetObject() (resp datatypes.Locale_Timezone, err error) { + err = r.Session.DoRequest("SoftLayer_Locale_Timezone", "getObject", nil, &r.Options, &resp) + return +} diff --git a/vendor/github.com/softlayer/softlayer-go/services/location.go b/vendor/github.com/softlayer/softlayer-go/services/location.go new file mode 100644 index 0000000000..66134911fb --- /dev/null +++ b/vendor/github.com/softlayer/softlayer-go/services/location.go @@ -0,0 +1,866 @@ +/** + * Copyright 2016 IBM Corp. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** + * AUTOMATICALLY GENERATED CODE - DO NOT MODIFY + */ + +package services + +import ( + "fmt" + "strings" + + "github.com/softlayer/softlayer-go/datatypes" + "github.com/softlayer/softlayer-go/session" + "github.com/softlayer/softlayer-go/sl" +) + +// Every piece of hardware and network connection owned by SoftLayer is tracked physically by location and stored in the SoftLayer_Location data type. SoftLayer locations exist in parent/child relationships, a convenient way to track equipment from it's city, datacenter, server room, rack, then slot. Network backbones are tied to datacenters only, not to a room, rack, or slot. +type Location struct { + Session *session.Session + Options sl.Options +} + +// GetLocationService returns an instance of the Location SoftLayer service +func GetLocationService(sess *session.Session) Location { + return Location{Session: sess} +} + +func (r Location) Id(id int) Location { + r.Options.Id = &id + return r +} + +func (r Location) Mask(mask string) Location { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r Location) Filter(filter string) Location { + r.Options.Filter = filter + return r +} + +func (r Location) Limit(limit int) Location { + r.Options.Limit = &limit + return r +} + +func (r Location) Offset(offset int) Location { + r.Options.Offset = &offset + return r +} + +// Object Storage is only available in select datacenters. This method will return all the datacenters where object storage is available. +func (r Location) GetAvailableObjectStorageDatacenters() (resp []datatypes.Location, err error) { + err = r.Session.DoRequest("SoftLayer_Location", "getAvailableObjectStorageDatacenters", nil, &r.Options, &resp) + return +} + +// Retrieve +func (r Location) GetBackboneDependents() (resp []datatypes.Network_Backbone_Location_Dependent, err error) { + err = r.Session.DoRequest("SoftLayer_Location", "getBackboneDependents", nil, &r.Options, &resp) + return +} + +// Retrieve all datacenter locations. SoftLayer's datacenters exist in various cities and each contain one or more server rooms which house network and server infrastructure. +func (r Location) GetDatacenters() (resp []datatypes.Location, err error) { + err = r.Session.DoRequest("SoftLayer_Location", "getDatacenters", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Location) GetDatacentersWithVirtualImageStoreServiceResourceRecord() (resp []datatypes.Location, err error) { + err = r.Session.DoRequest("SoftLayer_Location", "getDatacentersWithVirtualImageStoreServiceResourceRecord", nil, &r.Options, &resp) + return +} + +// Retrieve A location can be a member of 1 or more groups. This will show which groups to which a location belongs. +func (r Location) GetGroups() (resp []datatypes.Location_Group, err error) { + err = r.Session.DoRequest("SoftLayer_Location", "getGroups", nil, &r.Options, &resp) + return +} + +// Retrieve +func (r Location) GetHardwareFirewalls() (resp []datatypes.Hardware, err error) { + err = r.Session.DoRequest("SoftLayer_Location", "getHardwareFirewalls", nil, &r.Options, &resp) + return +} + +// Retrieve A location's physical address. +func (r Location) GetLocationAddress() (resp datatypes.Account_Address, err error) { + err = r.Session.DoRequest("SoftLayer_Location", "getLocationAddress", nil, &r.Options, &resp) + return +} + +// Retrieve A location's Dedicated Rack member +func (r Location) GetLocationReservationMember() (resp datatypes.Location_Reservation_Rack_Member, err error) { + err = r.Session.DoRequest("SoftLayer_Location", "getLocationReservationMember", nil, &r.Options, &resp) + return +} + +// Retrieve The current locations status. +func (r Location) GetLocationStatus() (resp datatypes.Location_Status, err error) { + err = r.Session.DoRequest("SoftLayer_Location", "getLocationStatus", nil, &r.Options, &resp) + return +} + +// Retrieve +func (r Location) GetNetworkConfigurationAttribute() (resp datatypes.Hardware_Attribute, err error) { + err = r.Session.DoRequest("SoftLayer_Location", "getNetworkConfigurationAttribute", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Location) GetObject() (resp datatypes.Location, err error) { + err = r.Session.DoRequest("SoftLayer_Location", "getObject", nil, &r.Options, &resp) + return +} + +// Retrieve The total number of users online using SoftLayer's PPTP VPN service for a location. +func (r Location) GetOnlinePptpVpnUserCount() (resp int, err error) { + err = r.Session.DoRequest("SoftLayer_Location", "getOnlinePptpVpnUserCount", nil, &r.Options, &resp) + return +} + +// Retrieve The total number of users online using SoftLayer's SSL VPN service for a location. +func (r Location) GetOnlineSslVpnUserCount() (resp int, err error) { + err = r.Session.DoRequest("SoftLayer_Location", "getOnlineSslVpnUserCount", nil, &r.Options, &resp) + return +} + +// Retrieve +func (r Location) GetPathString() (resp string, err error) { + err = r.Session.DoRequest("SoftLayer_Location", "getPathString", nil, &r.Options, &resp) + return +} + +// Retrieve A location can be a member of 1 or more Price Groups. This will show which groups to which a location belongs. +func (r Location) GetPriceGroups() (resp []datatypes.Location_Group, err error) { + err = r.Session.DoRequest("SoftLayer_Location", "getPriceGroups", nil, &r.Options, &resp) + return +} + +// Retrieve A location can be a member of 1 or more regions. This will show which regions to which a location belongs. +func (r Location) GetRegions() (resp []datatypes.Location_Region, err error) { + err = r.Session.DoRequest("SoftLayer_Location", "getRegions", nil, &r.Options, &resp) + return +} + +// Retrieve +func (r Location) GetTimezone() (resp datatypes.Locale_Timezone, err error) { + err = r.Session.DoRequest("SoftLayer_Location", "getTimezone", nil, &r.Options, &resp) + return +} + +// Retrieve A location can be a member of 1 Bandwidth Pooling Group. This will show which group to which a location belongs. +func (r Location) GetVdrGroup() (resp datatypes.Location_Group_Location_CrossReference, err error) { + err = r.Session.DoRequest("SoftLayer_Location", "getVdrGroup", nil, &r.Options, &resp) + return +} + +// Retrieve all datacenter locations. SoftLayer's datacenters exist in various cities and each contain one or more server rooms which house network and server infrastructure. +func (r Location) GetViewableDatacenters() (resp []datatypes.Location, err error) { + err = r.Session.DoRequest("SoftLayer_Location", "getViewableDatacenters", nil, &r.Options, &resp) + return +} + +// Retrieve all viewable pop and datacenter locations. +func (r Location) GetViewablePopsAndDataCenters() (resp []datatypes.Location, err error) { + err = r.Session.DoRequest("SoftLayer_Location", "getViewablePopsAndDataCenters", nil, &r.Options, &resp) + return +} + +// Retrieve all viewable network locations. +func (r Location) GetViewablepointOfPresence() (resp []datatypes.Location, err error) { + err = r.Session.DoRequest("SoftLayer_Location", "getViewablepointOfPresence", nil, &r.Options, &resp) + return +} + +// Retrieve all point of presence locations. +func (r Location) GetpointOfPresence() (resp []datatypes.Location, err error) { + err = r.Session.DoRequest("SoftLayer_Location", "getpointOfPresence", nil, &r.Options, &resp) + return +} + +// SoftLayer_Location_Datacenter extends the [[SoftLayer_Location]] data type to include datacenter-specific properties. +type Location_Datacenter struct { + Session *session.Session + Options sl.Options +} + +// GetLocationDatacenterService returns an instance of the Location_Datacenter SoftLayer service +func GetLocationDatacenterService(sess *session.Session) Location_Datacenter { + return Location_Datacenter{Session: sess} +} + +func (r Location_Datacenter) Id(id int) Location_Datacenter { + r.Options.Id = &id + return r +} + +func (r Location_Datacenter) Mask(mask string) Location_Datacenter { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r Location_Datacenter) Filter(filter string) Location_Datacenter { + r.Options.Filter = filter + return r +} + +func (r Location_Datacenter) Limit(limit int) Location_Datacenter { + r.Options.Limit = &limit + return r +} + +func (r Location_Datacenter) Offset(offset int) Location_Datacenter { + r.Options.Offset = &offset + return r +} + +// Retrieve +func (r Location_Datacenter) GetActiveItemPresaleEvents() (resp []datatypes.Sales_Presale_Event, err error) { + err = r.Session.DoRequest("SoftLayer_Location_Datacenter", "getActiveItemPresaleEvents", nil, &r.Options, &resp) + return +} + +// Retrieve +func (r Location_Datacenter) GetActivePresaleEvents() (resp []datatypes.Sales_Presale_Event, err error) { + err = r.Session.DoRequest("SoftLayer_Location_Datacenter", "getActivePresaleEvents", nil, &r.Options, &resp) + return +} + +// Object Storage is only available in select datacenters. This method will return all the datacenters where object storage is available. +func (r Location_Datacenter) GetAvailableObjectStorageDatacenters() (resp []datatypes.Location, err error) { + err = r.Session.DoRequest("SoftLayer_Location_Datacenter", "getAvailableObjectStorageDatacenters", nil, &r.Options, &resp) + return +} + +// Retrieve +func (r Location_Datacenter) GetBackboneDependents() (resp []datatypes.Network_Backbone_Location_Dependent, err error) { + err = r.Session.DoRequest("SoftLayer_Location_Datacenter", "getBackboneDependents", nil, &r.Options, &resp) + return +} + +// Retrieve +func (r Location_Datacenter) GetBackendHardwareRouters() (resp []datatypes.Hardware, err error) { + err = r.Session.DoRequest("SoftLayer_Location_Datacenter", "getBackendHardwareRouters", nil, &r.Options, &resp) + return +} + +// Retrieve Subnets which are directly bound to one or more routers in a given datacenter, and currently allow routing. +func (r Location_Datacenter) GetBoundSubnets() (resp []datatypes.Network_Subnet, err error) { + err = r.Session.DoRequest("SoftLayer_Location_Datacenter", "getBoundSubnets", nil, &r.Options, &resp) + return +} + +// Retrieve This references relationship between brands, locations and countries associated with a user's account that are ineligible when ordering products. For example, the India datacenter may not be available on this brand for customers that live in Great Britain. +func (r Location_Datacenter) GetBrandCountryRestrictions() (resp []datatypes.Brand_Restriction_Location_CustomerCountry, err error) { + err = r.Session.DoRequest("SoftLayer_Location_Datacenter", "getBrandCountryRestrictions", nil, &r.Options, &resp) + return +} + +// Retrieve all datacenter locations. SoftLayer's datacenters exist in various cities and each contain one or more server rooms which house network and server infrastructure. +func (r Location_Datacenter) GetDatacenters() (resp []datatypes.Location, err error) { + err = r.Session.DoRequest("SoftLayer_Location_Datacenter", "getDatacenters", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Location_Datacenter) GetDatacentersWithVirtualImageStoreServiceResourceRecord() (resp []datatypes.Location, err error) { + err = r.Session.DoRequest("SoftLayer_Location_Datacenter", "getDatacentersWithVirtualImageStoreServiceResourceRecord", nil, &r.Options, &resp) + return +} + +// Retrieve +func (r Location_Datacenter) GetFrontendHardwareRouters() (resp []datatypes.Hardware, err error) { + err = r.Session.DoRequest("SoftLayer_Location_Datacenter", "getFrontendHardwareRouters", nil, &r.Options, &resp) + return +} + +// Retrieve A location can be a member of 1 or more groups. This will show which groups to which a location belongs. +func (r Location_Datacenter) GetGroups() (resp []datatypes.Location_Group, err error) { + err = r.Session.DoRequest("SoftLayer_Location_Datacenter", "getGroups", nil, &r.Options, &resp) + return +} + +// Retrieve +func (r Location_Datacenter) GetHardwareFirewalls() (resp []datatypes.Hardware, err error) { + err = r.Session.DoRequest("SoftLayer_Location_Datacenter", "getHardwareFirewalls", nil, &r.Options, &resp) + return +} + +// Retrieve +func (r Location_Datacenter) GetHardwareRouters() (resp []datatypes.Hardware, err error) { + err = r.Session.DoRequest("SoftLayer_Location_Datacenter", "getHardwareRouters", nil, &r.Options, &resp) + return +} + +// Retrieve A location's physical address. +func (r Location_Datacenter) GetLocationAddress() (resp datatypes.Account_Address, err error) { + err = r.Session.DoRequest("SoftLayer_Location_Datacenter", "getLocationAddress", nil, &r.Options, &resp) + return +} + +// Retrieve A location's Dedicated Rack member +func (r Location_Datacenter) GetLocationReservationMember() (resp datatypes.Location_Reservation_Rack_Member, err error) { + err = r.Session.DoRequest("SoftLayer_Location_Datacenter", "getLocationReservationMember", nil, &r.Options, &resp) + return +} + +// Retrieve The current locations status. +func (r Location_Datacenter) GetLocationStatus() (resp datatypes.Location_Status, err error) { + err = r.Session.DoRequest("SoftLayer_Location_Datacenter", "getLocationStatus", nil, &r.Options, &resp) + return +} + +// Retrieve +func (r Location_Datacenter) GetNetworkConfigurationAttribute() (resp datatypes.Hardware_Attribute, err error) { + err = r.Session.DoRequest("SoftLayer_Location_Datacenter", "getNetworkConfigurationAttribute", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Location_Datacenter) GetObject() (resp datatypes.Location_Datacenter, err error) { + err = r.Session.DoRequest("SoftLayer_Location_Datacenter", "getObject", nil, &r.Options, &resp) + return +} + +// Retrieve The total number of users online using SoftLayer's PPTP VPN service for a location. +func (r Location_Datacenter) GetOnlinePptpVpnUserCount() (resp int, err error) { + err = r.Session.DoRequest("SoftLayer_Location_Datacenter", "getOnlinePptpVpnUserCount", nil, &r.Options, &resp) + return +} + +// Retrieve The total number of users online using SoftLayer's SSL VPN service for a location. +func (r Location_Datacenter) GetOnlineSslVpnUserCount() (resp int, err error) { + err = r.Session.DoRequest("SoftLayer_Location_Datacenter", "getOnlineSslVpnUserCount", nil, &r.Options, &resp) + return +} + +// Retrieve +func (r Location_Datacenter) GetPathString() (resp string, err error) { + err = r.Session.DoRequest("SoftLayer_Location_Datacenter", "getPathString", nil, &r.Options, &resp) + return +} + +// Retrieve +func (r Location_Datacenter) GetPresaleEvents() (resp []datatypes.Sales_Presale_Event, err error) { + err = r.Session.DoRequest("SoftLayer_Location_Datacenter", "getPresaleEvents", nil, &r.Options, &resp) + return +} + +// Retrieve A location can be a member of 1 or more Price Groups. This will show which groups to which a location belongs. +func (r Location_Datacenter) GetPriceGroups() (resp []datatypes.Location_Group, err error) { + err = r.Session.DoRequest("SoftLayer_Location_Datacenter", "getPriceGroups", nil, &r.Options, &resp) + return +} + +// Retrieve The regional group this datacenter belongs to. +func (r Location_Datacenter) GetRegionalGroup() (resp datatypes.Location_Group_Regional, err error) { + err = r.Session.DoRequest("SoftLayer_Location_Datacenter", "getRegionalGroup", nil, &r.Options, &resp) + return +} + +// Retrieve +func (r Location_Datacenter) GetRegionalInternetRegistry() (resp datatypes.Network_Regional_Internet_Registry, err error) { + err = r.Session.DoRequest("SoftLayer_Location_Datacenter", "getRegionalInternetRegistry", nil, &r.Options, &resp) + return +} + +// Retrieve A location can be a member of 1 or more regions. This will show which regions to which a location belongs. +func (r Location_Datacenter) GetRegions() (resp []datatypes.Location_Region, err error) { + err = r.Session.DoRequest("SoftLayer_Location_Datacenter", "getRegions", nil, &r.Options, &resp) + return +} + +// Retrieve Retrieve all subnets that are eligible to be routed; those which the account has permission to associate with a vlan. +func (r Location_Datacenter) GetRoutableBoundSubnets() (resp []datatypes.Network_Subnet, err error) { + err = r.Session.DoRequest("SoftLayer_Location_Datacenter", "getRoutableBoundSubnets", nil, &r.Options, &resp) + return +} + +// Retrieve a graph of a SoftLayer datacenter's last 48 hours of network activity. Statistics graphs show traffic outbound from a datacenter on top and inbound traffic on the bottom followed by a legend of the network services tracked in the graph. getStatisticsGraphImage returns a PNG image of variable width and height depending on the number of services reported in the image. +func (r Location_Datacenter) GetStatisticsGraphImage() (resp []byte, err error) { + err = r.Session.DoRequest("SoftLayer_Location_Datacenter", "getStatisticsGraphImage", nil, &r.Options, &resp) + return +} + +// Retrieve +func (r Location_Datacenter) GetTimezone() (resp datatypes.Locale_Timezone, err error) { + err = r.Session.DoRequest("SoftLayer_Location_Datacenter", "getTimezone", nil, &r.Options, &resp) + return +} + +// Retrieve A location can be a member of 1 Bandwidth Pooling Group. This will show which group to which a location belongs. +func (r Location_Datacenter) GetVdrGroup() (resp datatypes.Location_Group_Location_CrossReference, err error) { + err = r.Session.DoRequest("SoftLayer_Location_Datacenter", "getVdrGroup", nil, &r.Options, &resp) + return +} + +// Retrieve all datacenter locations. SoftLayer's datacenters exist in various cities and each contain one or more server rooms which house network and server infrastructure. +func (r Location_Datacenter) GetViewableDatacenters() (resp []datatypes.Location, err error) { + err = r.Session.DoRequest("SoftLayer_Location_Datacenter", "getViewableDatacenters", nil, &r.Options, &resp) + return +} + +// Retrieve all viewable pop and datacenter locations. +func (r Location_Datacenter) GetViewablePopsAndDataCenters() (resp []datatypes.Location, err error) { + err = r.Session.DoRequest("SoftLayer_Location_Datacenter", "getViewablePopsAndDataCenters", nil, &r.Options, &resp) + return +} + +// Retrieve all viewable network locations. +func (r Location_Datacenter) GetViewablepointOfPresence() (resp []datatypes.Location, err error) { + err = r.Session.DoRequest("SoftLayer_Location_Datacenter", "getViewablepointOfPresence", nil, &r.Options, &resp) + return +} + +// Retrieve all point of presence locations. +func (r Location_Datacenter) GetpointOfPresence() (resp []datatypes.Location, err error) { + err = r.Session.DoRequest("SoftLayer_Location_Datacenter", "getpointOfPresence", nil, &r.Options, &resp) + return +} + +// no documentation yet +type Location_Group struct { + Session *session.Session + Options sl.Options +} + +// GetLocationGroupService returns an instance of the Location_Group SoftLayer service +func GetLocationGroupService(sess *session.Session) Location_Group { + return Location_Group{Session: sess} +} + +func (r Location_Group) Id(id int) Location_Group { + r.Options.Id = &id + return r +} + +func (r Location_Group) Mask(mask string) Location_Group { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r Location_Group) Filter(filter string) Location_Group { + r.Options.Filter = filter + return r +} + +func (r Location_Group) Limit(limit int) Location_Group { + r.Options.Limit = &limit + return r +} + +func (r Location_Group) Offset(offset int) Location_Group { + r.Options.Offset = &offset + return r +} + +// no documentation yet +func (r Location_Group) GetAllObjects() (resp []datatypes.Location_Group, err error) { + err = r.Session.DoRequest("SoftLayer_Location_Group", "getAllObjects", nil, &r.Options, &resp) + return +} + +// Retrieve The type for this location group. +func (r Location_Group) GetLocationGroupType() (resp datatypes.Location_Group_Type, err error) { + err = r.Session.DoRequest("SoftLayer_Location_Group", "getLocationGroupType", nil, &r.Options, &resp) + return +} + +// Retrieve The locations in a group. +func (r Location_Group) GetLocations() (resp []datatypes.Location, err error) { + err = r.Session.DoRequest("SoftLayer_Location_Group", "getLocations", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Location_Group) GetObject() (resp datatypes.Location_Group, err error) { + err = r.Session.DoRequest("SoftLayer_Location_Group", "getObject", nil, &r.Options, &resp) + return +} + +// no documentation yet +type Location_Group_Pricing struct { + Session *session.Session + Options sl.Options +} + +// GetLocationGroupPricingService returns an instance of the Location_Group_Pricing SoftLayer service +func GetLocationGroupPricingService(sess *session.Session) Location_Group_Pricing { + return Location_Group_Pricing{Session: sess} +} + +func (r Location_Group_Pricing) Id(id int) Location_Group_Pricing { + r.Options.Id = &id + return r +} + +func (r Location_Group_Pricing) Mask(mask string) Location_Group_Pricing { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r Location_Group_Pricing) Filter(filter string) Location_Group_Pricing { + r.Options.Filter = filter + return r +} + +func (r Location_Group_Pricing) Limit(limit int) Location_Group_Pricing { + r.Options.Limit = &limit + return r +} + +func (r Location_Group_Pricing) Offset(offset int) Location_Group_Pricing { + r.Options.Offset = &offset + return r +} + +// no documentation yet +func (r Location_Group_Pricing) GetAllObjects() (resp []datatypes.Location_Group, err error) { + err = r.Session.DoRequest("SoftLayer_Location_Group_Pricing", "getAllObjects", nil, &r.Options, &resp) + return +} + +// Retrieve The type for this location group. +func (r Location_Group_Pricing) GetLocationGroupType() (resp datatypes.Location_Group_Type, err error) { + err = r.Session.DoRequest("SoftLayer_Location_Group_Pricing", "getLocationGroupType", nil, &r.Options, &resp) + return +} + +// Retrieve The locations in a group. +func (r Location_Group_Pricing) GetLocations() (resp []datatypes.Location, err error) { + err = r.Session.DoRequest("SoftLayer_Location_Group_Pricing", "getLocations", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Location_Group_Pricing) GetObject() (resp datatypes.Location_Group_Pricing, err error) { + err = r.Session.DoRequest("SoftLayer_Location_Group_Pricing", "getObject", nil, &r.Options, &resp) + return +} + +// Retrieve The prices that this pricing location group limits. All of these prices will only be available in the locations defined by this pricing location group. +func (r Location_Group_Pricing) GetPrices() (resp []datatypes.Product_Item_Price, err error) { + err = r.Session.DoRequest("SoftLayer_Location_Group_Pricing", "getPrices", nil, &r.Options, &resp) + return +} + +// no documentation yet +type Location_Group_Regional struct { + Session *session.Session + Options sl.Options +} + +// GetLocationGroupRegionalService returns an instance of the Location_Group_Regional SoftLayer service +func GetLocationGroupRegionalService(sess *session.Session) Location_Group_Regional { + return Location_Group_Regional{Session: sess} +} + +func (r Location_Group_Regional) Id(id int) Location_Group_Regional { + r.Options.Id = &id + return r +} + +func (r Location_Group_Regional) Mask(mask string) Location_Group_Regional { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r Location_Group_Regional) Filter(filter string) Location_Group_Regional { + r.Options.Filter = filter + return r +} + +func (r Location_Group_Regional) Limit(limit int) Location_Group_Regional { + r.Options.Limit = &limit + return r +} + +func (r Location_Group_Regional) Offset(offset int) Location_Group_Regional { + r.Options.Offset = &offset + return r +} + +// no documentation yet +func (r Location_Group_Regional) GetAllObjects() (resp []datatypes.Location_Group, err error) { + err = r.Session.DoRequest("SoftLayer_Location_Group_Regional", "getAllObjects", nil, &r.Options, &resp) + return +} + +// Retrieve The datacenters in a group. +func (r Location_Group_Regional) GetDatacenters() (resp []datatypes.Location, err error) { + err = r.Session.DoRequest("SoftLayer_Location_Group_Regional", "getDatacenters", nil, &r.Options, &resp) + return +} + +// Retrieve The type for this location group. +func (r Location_Group_Regional) GetLocationGroupType() (resp datatypes.Location_Group_Type, err error) { + err = r.Session.DoRequest("SoftLayer_Location_Group_Regional", "getLocationGroupType", nil, &r.Options, &resp) + return +} + +// Retrieve The locations in a group. +func (r Location_Group_Regional) GetLocations() (resp []datatypes.Location, err error) { + err = r.Session.DoRequest("SoftLayer_Location_Group_Regional", "getLocations", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Location_Group_Regional) GetObject() (resp datatypes.Location_Group_Regional, err error) { + err = r.Session.DoRequest("SoftLayer_Location_Group_Regional", "getObject", nil, &r.Options, &resp) + return +} + +// Retrieve The preferred datacenters of a group. +func (r Location_Group_Regional) GetPreferredDatacenter() (resp datatypes.Location_Datacenter, err error) { + err = r.Session.DoRequest("SoftLayer_Location_Group_Regional", "getPreferredDatacenter", nil, &r.Options, &resp) + return +} + +// no documentation yet +type Location_Reservation struct { + Session *session.Session + Options sl.Options +} + +// GetLocationReservationService returns an instance of the Location_Reservation SoftLayer service +func GetLocationReservationService(sess *session.Session) Location_Reservation { + return Location_Reservation{Session: sess} +} + +func (r Location_Reservation) Id(id int) Location_Reservation { + r.Options.Id = &id + return r +} + +func (r Location_Reservation) Mask(mask string) Location_Reservation { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r Location_Reservation) Filter(filter string) Location_Reservation { + r.Options.Filter = filter + return r +} + +func (r Location_Reservation) Limit(limit int) Location_Reservation { + r.Options.Limit = &limit + return r +} + +func (r Location_Reservation) Offset(offset int) Location_Reservation { + r.Options.Offset = &offset + return r +} + +// Retrieve The account that a billing item belongs to. +func (r Location_Reservation) GetAccount() (resp datatypes.Account, err error) { + err = r.Session.DoRequest("SoftLayer_Location_Reservation", "getAccount", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Location_Reservation) GetAccountReservations() (resp []datatypes.Location_Reservation, err error) { + err = r.Session.DoRequest("SoftLayer_Location_Reservation", "getAccountReservations", nil, &r.Options, &resp) + return +} + +// Retrieve The bandwidth allotment that the reservation belongs to. +func (r Location_Reservation) GetAllotment() (resp datatypes.Network_Bandwidth_Version1_Allotment, err error) { + err = r.Session.DoRequest("SoftLayer_Location_Reservation", "getAllotment", nil, &r.Options, &resp) + return +} + +// Retrieve The bandwidth allotment that the reservation belongs to. +func (r Location_Reservation) GetBillingItem() (resp datatypes.Billing_Item, err error) { + err = r.Session.DoRequest("SoftLayer_Location_Reservation", "getBillingItem", nil, &r.Options, &resp) + return +} + +// Retrieve The datacenter location that the reservation belongs to. +func (r Location_Reservation) GetLocation() (resp datatypes.Location, err error) { + err = r.Session.DoRequest("SoftLayer_Location_Reservation", "getLocation", nil, &r.Options, &resp) + return +} + +// Retrieve Rack information for the reservation +func (r Location_Reservation) GetLocationReservationRack() (resp datatypes.Location_Reservation_Rack, err error) { + err = r.Session.DoRequest("SoftLayer_Location_Reservation", "getLocationReservationRack", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Location_Reservation) GetObject() (resp datatypes.Location_Reservation, err error) { + err = r.Session.DoRequest("SoftLayer_Location_Reservation", "getObject", nil, &r.Options, &resp) + return +} + +// no documentation yet +type Location_Reservation_Rack struct { + Session *session.Session + Options sl.Options +} + +// GetLocationReservationRackService returns an instance of the Location_Reservation_Rack SoftLayer service +func GetLocationReservationRackService(sess *session.Session) Location_Reservation_Rack { + return Location_Reservation_Rack{Session: sess} +} + +func (r Location_Reservation_Rack) Id(id int) Location_Reservation_Rack { + r.Options.Id = &id + return r +} + +func (r Location_Reservation_Rack) Mask(mask string) Location_Reservation_Rack { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r Location_Reservation_Rack) Filter(filter string) Location_Reservation_Rack { + r.Options.Filter = filter + return r +} + +func (r Location_Reservation_Rack) Limit(limit int) Location_Reservation_Rack { + r.Options.Limit = &limit + return r +} + +func (r Location_Reservation_Rack) Offset(offset int) Location_Reservation_Rack { + r.Options.Offset = &offset + return r +} + +// Retrieve The bandwidth allotment that the reservation belongs to. +func (r Location_Reservation_Rack) GetAllotment() (resp datatypes.Network_Bandwidth_Version1_Allotment, err error) { + err = r.Session.DoRequest("SoftLayer_Location_Reservation_Rack", "getAllotment", nil, &r.Options, &resp) + return +} + +// Retrieve Members of the rack. +func (r Location_Reservation_Rack) GetChildren() (resp []datatypes.Location_Reservation_Rack_Member, err error) { + err = r.Session.DoRequest("SoftLayer_Location_Reservation_Rack", "getChildren", nil, &r.Options, &resp) + return +} + +// Retrieve +func (r Location_Reservation_Rack) GetLocation() (resp datatypes.Location, err error) { + err = r.Session.DoRequest("SoftLayer_Location_Reservation_Rack", "getLocation", nil, &r.Options, &resp) + return +} + +// Retrieve +func (r Location_Reservation_Rack) GetLocationReservation() (resp datatypes.Location_Reservation, err error) { + err = r.Session.DoRequest("SoftLayer_Location_Reservation_Rack", "getLocationReservation", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Location_Reservation_Rack) GetObject() (resp datatypes.Location_Reservation_Rack, err error) { + err = r.Session.DoRequest("SoftLayer_Location_Reservation_Rack", "getObject", nil, &r.Options, &resp) + return +} + +// no documentation yet +type Location_Reservation_Rack_Member struct { + Session *session.Session + Options sl.Options +} + +// GetLocationReservationRackMemberService returns an instance of the Location_Reservation_Rack_Member SoftLayer service +func GetLocationReservationRackMemberService(sess *session.Session) Location_Reservation_Rack_Member { + return Location_Reservation_Rack_Member{Session: sess} +} + +func (r Location_Reservation_Rack_Member) Id(id int) Location_Reservation_Rack_Member { + r.Options.Id = &id + return r +} + +func (r Location_Reservation_Rack_Member) Mask(mask string) Location_Reservation_Rack_Member { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r Location_Reservation_Rack_Member) Filter(filter string) Location_Reservation_Rack_Member { + r.Options.Filter = filter + return r +} + +func (r Location_Reservation_Rack_Member) Limit(limit int) Location_Reservation_Rack_Member { + r.Options.Limit = &limit + return r +} + +func (r Location_Reservation_Rack_Member) Offset(offset int) Location_Reservation_Rack_Member { + r.Options.Offset = &offset + return r +} + +// Retrieve Location relation for the rack member +func (r Location_Reservation_Rack_Member) GetLocation() (resp datatypes.Location, err error) { + err = r.Session.DoRequest("SoftLayer_Location_Reservation_Rack_Member", "getLocation", nil, &r.Options, &resp) + return +} + +// Retrieve +func (r Location_Reservation_Rack_Member) GetLocationReservationRack() (resp datatypes.Location_Reservation, err error) { + err = r.Session.DoRequest("SoftLayer_Location_Reservation_Rack_Member", "getLocationReservationRack", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Location_Reservation_Rack_Member) GetObject() (resp datatypes.Location_Reservation_Rack_Member, err error) { + err = r.Session.DoRequest("SoftLayer_Location_Reservation_Rack_Member", "getObject", nil, &r.Options, &resp) + return +} diff --git a/vendor/github.com/softlayer/softlayer-go/services/marketplace.go b/vendor/github.com/softlayer/softlayer-go/services/marketplace.go new file mode 100644 index 0000000000..27339a571f --- /dev/null +++ b/vendor/github.com/softlayer/softlayer-go/services/marketplace.go @@ -0,0 +1,148 @@ +/** + * Copyright 2016 IBM Corp. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** + * AUTOMATICALLY GENERATED CODE - DO NOT MODIFY + */ + +package services + +import ( + "fmt" + "strings" + + "github.com/softlayer/softlayer-go/datatypes" + "github.com/softlayer/softlayer-go/session" + "github.com/softlayer/softlayer-go/sl" +) + +// no documentation yet +type Marketplace_Partner struct { + Session *session.Session + Options sl.Options +} + +// GetMarketplacePartnerService returns an instance of the Marketplace_Partner SoftLayer service +func GetMarketplacePartnerService(sess *session.Session) Marketplace_Partner { + return Marketplace_Partner{Session: sess} +} + +func (r Marketplace_Partner) Id(id int) Marketplace_Partner { + r.Options.Id = &id + return r +} + +func (r Marketplace_Partner) Mask(mask string) Marketplace_Partner { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r Marketplace_Partner) Filter(filter string) Marketplace_Partner { + r.Options.Filter = filter + return r +} + +func (r Marketplace_Partner) Limit(limit int) Marketplace_Partner { + r.Options.Limit = &limit + return r +} + +func (r Marketplace_Partner) Offset(offset int) Marketplace_Partner { + r.Options.Offset = &offset + return r +} + +// no documentation yet +func (r Marketplace_Partner) GetAllObjects() (resp []datatypes.Marketplace_Partner, err error) { + err = r.Session.DoRequest("SoftLayer_Marketplace_Partner", "getAllObjects", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Marketplace_Partner) GetAllPublishedPartners(searchTerm *string) (resp []datatypes.Marketplace_Partner, err error) { + params := []interface{}{ + searchTerm, + } + err = r.Session.DoRequest("SoftLayer_Marketplace_Partner", "getAllPublishedPartners", params, &r.Options, &resp) + return +} + +// Retrieve +func (r Marketplace_Partner) GetAttachments() (resp []datatypes.Marketplace_Partner_Attachment, err error) { + err = r.Session.DoRequest("SoftLayer_Marketplace_Partner", "getAttachments", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Marketplace_Partner) GetFeaturedPartners(non *bool) (resp []datatypes.Marketplace_Partner, err error) { + params := []interface{}{ + non, + } + err = r.Session.DoRequest("SoftLayer_Marketplace_Partner", "getFeaturedPartners", params, &r.Options, &resp) + return +} + +// no documentation yet +func (r Marketplace_Partner) GetFile(name *string) (resp datatypes.Marketplace_Partner_File, err error) { + params := []interface{}{ + name, + } + err = r.Session.DoRequest("SoftLayer_Marketplace_Partner", "getFile", params, &r.Options, &resp) + return +} + +// Retrieve +func (r Marketplace_Partner) GetLogoMedium() (resp datatypes.Marketplace_Partner_Attachment, err error) { + err = r.Session.DoRequest("SoftLayer_Marketplace_Partner", "getLogoMedium", nil, &r.Options, &resp) + return +} + +// Retrieve +func (r Marketplace_Partner) GetLogoMediumTemp() (resp datatypes.Marketplace_Partner_Attachment, err error) { + err = r.Session.DoRequest("SoftLayer_Marketplace_Partner", "getLogoMediumTemp", nil, &r.Options, &resp) + return +} + +// Retrieve +func (r Marketplace_Partner) GetLogoSmall() (resp datatypes.Marketplace_Partner_Attachment, err error) { + err = r.Session.DoRequest("SoftLayer_Marketplace_Partner", "getLogoSmall", nil, &r.Options, &resp) + return +} + +// Retrieve +func (r Marketplace_Partner) GetLogoSmallTemp() (resp datatypes.Marketplace_Partner_Attachment, err error) { + err = r.Session.DoRequest("SoftLayer_Marketplace_Partner", "getLogoSmallTemp", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Marketplace_Partner) GetObject() (resp datatypes.Marketplace_Partner, err error) { + err = r.Session.DoRequest("SoftLayer_Marketplace_Partner", "getObject", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Marketplace_Partner) GetPartnerByUrlIdentifier(urlIdentifier *string) (resp datatypes.Marketplace_Partner, err error) { + params := []interface{}{ + urlIdentifier, + } + err = r.Session.DoRequest("SoftLayer_Marketplace_Partner", "getPartnerByUrlIdentifier", params, &r.Options, &resp) + return +} diff --git a/vendor/github.com/softlayer/softlayer-go/services/metric.go b/vendor/github.com/softlayer/softlayer-go/services/metric.go new file mode 100644 index 0000000000..e2721cf4c2 --- /dev/null +++ b/vendor/github.com/softlayer/softlayer-go/services/metric.go @@ -0,0 +1,234 @@ +/** + * Copyright 2016 IBM Corp. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** + * AUTOMATICALLY GENERATED CODE - DO NOT MODIFY + */ + +package services + +import ( + "fmt" + "strings" + + "github.com/softlayer/softlayer-go/datatypes" + "github.com/softlayer/softlayer-go/session" + "github.com/softlayer/softlayer-go/sl" +) + +// Metric tracking objects provides a common interface to all metrics provided by SoftLayer. These metrics range from network component traffic for a server to aggregated Bandwidth Pooling traffic and more. Every object within SoftLayer's range of objects that has data that can be tracked over time has an associated tracking object. Use the [[SoftLayer_Metric_Tracking_Object]] service to retrieve raw and graph data from a tracking object. +type Metric_Tracking_Object struct { + Session *session.Session + Options sl.Options +} + +// GetMetricTrackingObjectService returns an instance of the Metric_Tracking_Object SoftLayer service +func GetMetricTrackingObjectService(sess *session.Session) Metric_Tracking_Object { + return Metric_Tracking_Object{Session: sess} +} + +func (r Metric_Tracking_Object) Id(id int) Metric_Tracking_Object { + r.Options.Id = &id + return r +} + +func (r Metric_Tracking_Object) Mask(mask string) Metric_Tracking_Object { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r Metric_Tracking_Object) Filter(filter string) Metric_Tracking_Object { + r.Options.Filter = filter + return r +} + +func (r Metric_Tracking_Object) Limit(limit int) Metric_Tracking_Object { + r.Options.Limit = &limit + return r +} + +func (r Metric_Tracking_Object) Offset(offset int) Metric_Tracking_Object { + r.Options.Offset = &offset + return r +} + +// Retrieve a PNG image of the last 24 hours of bandwidth usage of one of SoftLayer's network backbones. +func (r Metric_Tracking_Object) GetBackboneBandwidthGraph(graphTitle *string) (resp datatypes.Container_Bandwidth_GraphOutputs, err error) { + params := []interface{}{ + graphTitle, + } + err = r.Session.DoRequest("SoftLayer_Metric_Tracking_Object", "getBackboneBandwidthGraph", params, &r.Options, &resp) + return +} + +// Retrieve a collection of raw bandwidth data from an individual public or private network tracking object. Raw data is ideal if you with to employ your own traffic storage and graphing systems. +func (r Metric_Tracking_Object) GetBandwidthData(startDateTime *datatypes.Time, endDateTime *datatypes.Time, typ *string, rollupSeconds *int) (resp []datatypes.Metric_Tracking_Object_Data, err error) { + params := []interface{}{ + startDateTime, + endDateTime, + typ, + rollupSeconds, + } + err = r.Session.DoRequest("SoftLayer_Metric_Tracking_Object", "getBandwidthData", params, &r.Options, &resp) + return +} + +// Retrieve a PNG image of a bandwidth graph representing the bandwidth usage over time recorded by SofTLayer's bandwidth pollers. +func (r Metric_Tracking_Object) GetBandwidthGraph(startDateTime *datatypes.Time, endDateTime *datatypes.Time, graphType *string, fontSize *int, graphWidth *int, graphHeight *int, doNotShowTimeZone *bool) (resp datatypes.Container_Bandwidth_GraphOutputs, err error) { + params := []interface{}{ + startDateTime, + endDateTime, + graphType, + fontSize, + graphWidth, + graphHeight, + doNotShowTimeZone, + } + err = r.Session.DoRequest("SoftLayer_Metric_Tracking_Object", "getBandwidthGraph", params, &r.Options, &resp) + return +} + +// Retrieve the total amount of bandwidth recorded by a tracking object within the given date range. This method will only work on SoftLayer_Metric_Tracking_Object for SoftLayer_Hardware objects, and SoftLayer_Virtual_Guest objects. +func (r Metric_Tracking_Object) GetBandwidthTotal(startDateTime *datatypes.Time, endDateTime *datatypes.Time, direction *string, typ *string) (resp uint, err error) { + params := []interface{}{ + startDateTime, + endDateTime, + direction, + typ, + } + err = r.Session.DoRequest("SoftLayer_Metric_Tracking_Object", "getBandwidthTotal", params, &r.Options, &resp) + return +} + +// Returns a graph container instance that is populated with metric data for the tracking object. +func (r Metric_Tracking_Object) GetCustomGraphData(graphContainer *datatypes.Container_Graph) (resp datatypes.Container_Graph, err error) { + params := []interface{}{ + graphContainer, + } + err = r.Session.DoRequest("SoftLayer_Metric_Tracking_Object", "getCustomGraphData", params, &r.Options, &resp) + return +} + +// Retrieve a collection of detailed metric data over a date range. Ideal if you want to employ your own graphing systems. Note not all metrics support this method. Those that do not return null. +func (r Metric_Tracking_Object) GetDetailsForDateRange(startDate *datatypes.Time, endDate *datatypes.Time, graphType []string) (resp []datatypes.Container_Metric_Tracking_Object_Details, err error) { + params := []interface{}{ + startDate, + endDate, + graphType, + } + err = r.Session.DoRequest("SoftLayer_Metric_Tracking_Object", "getDetailsForDateRange", params, &r.Options, &resp) + return +} + +// Retrieve a PNG image of a metric in graph form. +func (r Metric_Tracking_Object) GetGraph(startDateTime *datatypes.Time, endDateTime *datatypes.Time, graphType []string) (resp datatypes.Container_Bandwidth_GraphOutputs, err error) { + params := []interface{}{ + startDateTime, + endDateTime, + graphType, + } + err = r.Session.DoRequest("SoftLayer_Metric_Tracking_Object", "getGraph", params, &r.Options, &resp) + return +} + +// Returns a collection of metric data types that can be retrieved for a metric tracking object. +func (r Metric_Tracking_Object) GetMetricDataTypes() (resp []datatypes.Container_Metric_Data_Type, err error) { + err = r.Session.DoRequest("SoftLayer_Metric_Tracking_Object", "getMetricDataTypes", nil, &r.Options, &resp) + return +} + +// getObject retrieves the SoftLayer_Metric_Tracking_Object object whose ID number corresponds to the ID number of the init parameter passed to the SoftLayer_Metric_Tracking_Object service. You can only tracking objects that are associated with your SoftLayer account or services. +func (r Metric_Tracking_Object) GetObject() (resp datatypes.Metric_Tracking_Object, err error) { + err = r.Session.DoRequest("SoftLayer_Metric_Tracking_Object", "getObject", nil, &r.Options, &resp) + return +} + +// Retrieve a metric summary. Ideal if you want to employ your own graphing systems. Note not all metric types contain a summary. These return null. +func (r Metric_Tracking_Object) GetSummary(graphType *string) (resp datatypes.Container_Metric_Tracking_Object_Summary, err error) { + params := []interface{}{ + graphType, + } + err = r.Session.DoRequest("SoftLayer_Metric_Tracking_Object", "getSummary", params, &r.Options, &resp) + return +} + +// Returns summarized metric data for the date range, metric type and summary period provided. +func (r Metric_Tracking_Object) GetSummaryData(startDateTime *datatypes.Time, endDateTime *datatypes.Time, validTypes []datatypes.Container_Metric_Data_Type, summaryPeriod *int) (resp []datatypes.Metric_Tracking_Object_Data, err error) { + params := []interface{}{ + startDateTime, + endDateTime, + validTypes, + summaryPeriod, + } + err = r.Session.DoRequest("SoftLayer_Metric_Tracking_Object", "getSummaryData", params, &r.Options, &resp) + return +} + +// Retrieve The type of data that a tracking object polls. +func (r Metric_Tracking_Object) GetType() (resp datatypes.Metric_Tracking_Object_Type, err error) { + err = r.Session.DoRequest("SoftLayer_Metric_Tracking_Object", "getType", nil, &r.Options, &resp) + return +} + +// This data type provides commonly used bandwidth summary components for the current billing cycle. +type Metric_Tracking_Object_Bandwidth_Summary struct { + Session *session.Session + Options sl.Options +} + +// GetMetricTrackingObjectBandwidthSummaryService returns an instance of the Metric_Tracking_Object_Bandwidth_Summary SoftLayer service +func GetMetricTrackingObjectBandwidthSummaryService(sess *session.Session) Metric_Tracking_Object_Bandwidth_Summary { + return Metric_Tracking_Object_Bandwidth_Summary{Session: sess} +} + +func (r Metric_Tracking_Object_Bandwidth_Summary) Id(id int) Metric_Tracking_Object_Bandwidth_Summary { + r.Options.Id = &id + return r +} + +func (r Metric_Tracking_Object_Bandwidth_Summary) Mask(mask string) Metric_Tracking_Object_Bandwidth_Summary { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r Metric_Tracking_Object_Bandwidth_Summary) Filter(filter string) Metric_Tracking_Object_Bandwidth_Summary { + r.Options.Filter = filter + return r +} + +func (r Metric_Tracking_Object_Bandwidth_Summary) Limit(limit int) Metric_Tracking_Object_Bandwidth_Summary { + r.Options.Limit = &limit + return r +} + +func (r Metric_Tracking_Object_Bandwidth_Summary) Offset(offset int) Metric_Tracking_Object_Bandwidth_Summary { + r.Options.Offset = &offset + return r +} + +// no documentation yet +func (r Metric_Tracking_Object_Bandwidth_Summary) GetObject() (resp datatypes.Metric_Tracking_Object_Bandwidth_Summary, err error) { + err = r.Session.DoRequest("SoftLayer_Metric_Tracking_Object_Bandwidth_Summary", "getObject", nil, &r.Options, &resp) + return +} diff --git a/vendor/github.com/softlayer/softlayer-go/services/monitoring.go b/vendor/github.com/softlayer/softlayer-go/services/monitoring.go new file mode 100644 index 0000000000..2de43d7e96 --- /dev/null +++ b/vendor/github.com/softlayer/softlayer-go/services/monitoring.go @@ -0,0 +1,685 @@ +/** + * Copyright 2016 IBM Corp. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** + * AUTOMATICALLY GENERATED CODE - DO NOT MODIFY + */ + +package services + +import ( + "fmt" + "strings" + + "github.com/softlayer/softlayer-go/datatypes" + "github.com/softlayer/softlayer-go/session" + "github.com/softlayer/softlayer-go/sl" +) + +// A monitoring agent object contains information describing the agent. +type Monitoring_Agent struct { + Session *session.Session + Options sl.Options +} + +// GetMonitoringAgentService returns an instance of the Monitoring_Agent SoftLayer service +func GetMonitoringAgentService(sess *session.Session) Monitoring_Agent { + return Monitoring_Agent{Session: sess} +} + +func (r Monitoring_Agent) Id(id int) Monitoring_Agent { + r.Options.Id = &id + return r +} + +func (r Monitoring_Agent) Mask(mask string) Monitoring_Agent { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r Monitoring_Agent) Filter(filter string) Monitoring_Agent { + r.Options.Filter = filter + return r +} + +func (r Monitoring_Agent) Limit(limit int) Monitoring_Agent { + r.Options.Limit = &limit + return r +} + +func (r Monitoring_Agent) Offset(offset int) Monitoring_Agent { + r.Options.Offset = &offset + return r +} + +// This method activates a SoftLayer_Monitoring_Agent. +func (r Monitoring_Agent) Activate() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Monitoring_Agent", "activate", nil, &r.Options, &resp) + return +} + +// This method is used to apply changes to a monitoring agent's configuration for SoftLayer_Configuration_Template_Section with the property sectionType that has a keyName of 'TEMPLATE_SECTION'. Configuration values that are passed in can be new or updated objects but must have a definitionId and profileId defined for both. Existing SoftLayer_Monitoring_Agent_Configuration_Value values can be retrieved as a property of the SoftLayer_Configuration_Template_Section_Definition's from the monitoring agent's configurationTemplate property. New values will follow the structure of SoftLayer_Monitoring_Agent_Configuration_Value. It returns a SoftLayer_Provisioning_Version1_Transaction object to track the progress of the update being applied. Some configuration sections act as a template which helps to create additional monitoring configurations. For instance, Core Resource monitoring agent lets you create monitoring configurations for different disk volumes or disk path. +func (r Monitoring_Agent) AddConfigurationProfile(configurationValues []datatypes.Monitoring_Agent_Configuration_Value) (resp datatypes.Provisioning_Version1_Transaction, err error) { + params := []interface{}{ + configurationValues, + } + err = r.Session.DoRequest("SoftLayer_Monitoring_Agent", "addConfigurationProfile", params, &r.Options, &resp) + return +} + +// This method creates a transaction used to apply changes to a monitoring agent's configuration for an array of SoftLayer_Configuration_Template_Section that have the property sectionType with a name of 'Fixed section'. Configuration values that are passed in can be new or updated objects but must have a configurationDefinitionId defined for both. Existing SoftLayer_Monitoring_Agent_Configuration_Value values can be retrieved as a property of the SoftLayer_Configuration_Template_Section_Definition from the monitoring agent's configurationTemplate property. New values will follow the structure of SoftLayer_Monitoring_Agent_Configuration_Value. This method returns a SoftLayer_Provisioning_Version1_Transaction object to track the progress of the update being applied. +func (r Monitoring_Agent) ApplyConfigurationValues(configurationValues []datatypes.Monitoring_Agent_Configuration_Value) (resp datatypes.Provisioning_Version1_Transaction, err error) { + params := []interface{}{ + configurationValues, + } + err = r.Session.DoRequest("SoftLayer_Monitoring_Agent", "applyConfigurationValues", params, &r.Options, &resp) + return +} + +// This method will deactivate the monitoring agent, preventing it from generating any further alarms. +func (r Monitoring_Agent) Deactivate() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Monitoring_Agent", "deactivate", nil, &r.Options, &resp) + return +} + +// This method will remove a SoftLayer_Configuration_Template_Section_Profile from a SoftLayer_Configuration_Template_Section by passing in the sectionId of the profile object and identifier of the profile. This will execute the action immediately on the server and the SoftLayer_Configuration_Template_Section returning a boolean true if successful. +func (r Monitoring_Agent) DeleteConfigurationProfile(sectionId *int, profileId *int) (resp bool, err error) { + params := []interface{}{ + sectionId, + profileId, + } + err = r.Session.DoRequest("SoftLayer_Monitoring_Agent", "deleteConfigurationProfile", params, &r.Options, &resp) + return +} + +// Initialize a monitoring agent and deploy it with the SoftLayer_Configuration_Template with the same identifier as the $configurationTemplateId parameter. If the configuration template ID is not provided, the current configuration template will be used. When executing this method, the existing configuration values will be lost. If no configuration template identifier is provided, the current configuration template will be used. '''Warning''' Reporting data may be lost as a result of executing this method. +func (r Monitoring_Agent) DeployMonitoringAgent(configurationTemplateId *int) (resp datatypes.Provisioning_Version1_Transaction, err error) { + params := []interface{}{ + configurationTemplateId, + } + err = r.Session.DoRequest("SoftLayer_Monitoring_Agent", "deployMonitoringAgent", params, &r.Options, &resp) + return +} + +// This method retrieves an array of SoftLayer_Notification_User_Subscriber objects belonging to the SoftLayer_Monitoring_Agent which are able to receive alarm notifications. +func (r Monitoring_Agent) GetActiveAlarmSubscribers() (resp []datatypes.Notification_User_Subscriber, err error) { + err = r.Session.DoRequest("SoftLayer_Monitoring_Agent", "getActiveAlarmSubscribers", nil, &r.Options, &resp) + return +} + +// Retrieve The current status of the corresponding agent +func (r Monitoring_Agent) GetAgentStatus() (resp datatypes.Monitoring_Agent_Status, err error) { + err = r.Session.DoRequest("SoftLayer_Monitoring_Agent", "getAgentStatus", nil, &r.Options, &resp) + return +} + +// This method returns an array of available SoftLayer_Configuration_Template objects for this monitoring agent. +func (r Monitoring_Agent) GetAvailableConfigurationTemplates() (resp []datatypes.Configuration_Template, err error) { + err = r.Session.DoRequest("SoftLayer_Monitoring_Agent", "getAvailableConfigurationTemplates", nil, &r.Options, &resp) + return +} + +// Returns an array of available configuration values that are specific to a server or a Virtual that this monitoring agent is running on. For example, invoking this method against "Network Traffic Monitoring Agent" will return all available network adapters on your system. +func (r Monitoring_Agent) GetAvailableConfigurationValues(configurationDefinitionId *int, configValues []datatypes.Monitoring_Agent_Configuration_Value) (resp []datatypes.Monitoring_Agent_Configuration_Value, err error) { + params := []interface{}{ + configurationDefinitionId, + configValues, + } + err = r.Session.DoRequest("SoftLayer_Monitoring_Agent", "getAvailableConfigurationValues", params, &r.Options, &resp) + return +} + +// Retrieve All custom configuration profiles associated with the corresponding agent +func (r Monitoring_Agent) GetConfigurationProfiles() (resp []datatypes.Configuration_Template_Section_Profile, err error) { + err = r.Session.DoRequest("SoftLayer_Monitoring_Agent", "getConfigurationProfiles", nil, &r.Options, &resp) + return +} + +// Retrieve A template of an agent's current configuration which contains information about the structure of the configuration values. +func (r Monitoring_Agent) GetConfigurationTemplate() (resp datatypes.Configuration_Template, err error) { + err = r.Session.DoRequest("SoftLayer_Monitoring_Agent", "getConfigurationTemplate", nil, &r.Options, &resp) + return +} + +// Retrieve The values associated with the corresponding Agent configuration. +func (r Monitoring_Agent) GetConfigurationValues() (resp []datatypes.Monitoring_Agent_Configuration_Value, err error) { + err = r.Session.DoRequest("SoftLayer_Monitoring_Agent", "getConfigurationValues", nil, &r.Options, &resp) + return +} + +// This method returns an array of SoftLayer_User_Customer objects, representing those who are allowed to be used as alarm subscribers. +func (r Monitoring_Agent) GetEligibleAlarmSubscibers() (resp []datatypes.User_Customer, err error) { + err = r.Session.DoRequest("SoftLayer_Monitoring_Agent", "getEligibleAlarmSubscibers", nil, &r.Options, &resp) + return +} + +// This method returns a SoftLayer_Container_Bandwidth_GraphOutputs object containing a base64 PNG string graph of the provided configuration values for the given begin and end dates. +func (r Monitoring_Agent) GetGraph(configurationValues []datatypes.Monitoring_Agent_Configuration_Value, beginDate *datatypes.Time, endDate *datatypes.Time) (resp datatypes.Container_Monitoring_Graph_Outputs, err error) { + params := []interface{}{ + configurationValues, + beginDate, + endDate, + } + err = r.Session.DoRequest("SoftLayer_Monitoring_Agent", "getGraph", params, &r.Options, &resp) + return +} + +// This method returns the metric data for each of the configuration values provided during the given time range. +func (r Monitoring_Agent) GetGraphData(metricDataTypes []datatypes.Container_Metric_Data_Type, startDate *datatypes.Time, endDate *datatypes.Time) (resp []datatypes.Metric_Tracking_Object_Data, err error) { + params := []interface{}{ + metricDataTypes, + startDate, + endDate, + } + err = r.Session.DoRequest("SoftLayer_Monitoring_Agent", "getGraphData", params, &r.Options, &resp) + return +} + +// Retrieve SoftLayer hardware related to the agent. +func (r Monitoring_Agent) GetHardware() (resp datatypes.Hardware, err error) { + err = r.Session.DoRequest("SoftLayer_Monitoring_Agent", "getHardware", nil, &r.Options, &resp) + return +} + +// This method retrieves a monitoring agent whose identifier corresponds to the value provided in the initialization parameter passed to the SoftLayer_Monitoring_Agent service. +func (r Monitoring_Agent) GetObject() (resp datatypes.Monitoring_Agent, err error) { + err = r.Session.DoRequest("SoftLayer_Monitoring_Agent", "getObject", nil, &r.Options, &resp) + return +} + +// Retrieve Contains general information relating to a single SoftLayer product. +func (r Monitoring_Agent) GetProductItem() (resp datatypes.Product_Item, err error) { + err = r.Session.DoRequest("SoftLayer_Monitoring_Agent", "getProductItem", nil, &r.Options, &resp) + return +} + +// Retrieve A description for a specific installation of a Software Component +func (r Monitoring_Agent) GetSoftwareDescription() (resp datatypes.Software_Description, err error) { + err = r.Session.DoRequest("SoftLayer_Monitoring_Agent", "getSoftwareDescription", nil, &r.Options, &resp) + return +} + +// Retrieve Monitoring agent status name. +func (r Monitoring_Agent) GetStatusName() (resp string, err error) { + err = r.Session.DoRequest("SoftLayer_Monitoring_Agent", "getStatusName", nil, &r.Options, &resp) + return +} + +// Retrieve Softlayer_Virtual_Guest object related to the monitoring agent, which this virtual guest object and hardware is on the server of the running agent. +func (r Monitoring_Agent) GetVirtualGuest() (resp datatypes.Virtual_Guest, err error) { + err = r.Session.DoRequest("SoftLayer_Monitoring_Agent", "getVirtualGuest", nil, &r.Options, &resp) + return +} + +// Use of this method will allow removing active subscribers from the monitoring agent. The agent subscribers can be managed within the portal from the "Alarm Subscribers" tab of the monitoring agent configuration. +func (r Monitoring_Agent) RemoveActiveAlarmSubscriber(userRecordId *int) (resp bool, err error) { + params := []interface{}{ + userRecordId, + } + err = r.Session.DoRequest("SoftLayer_Monitoring_Agent", "removeActiveAlarmSubscriber", params, &r.Options, &resp) + return +} + +// Use of this method will allow removing all subscribers from the monitoring agent. The agent subscribers can be managed within the portal from the "Alarm Subscribers" tab of the monitoring agent configuration. +func (r Monitoring_Agent) RemoveAllAlarmSubscribers() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Monitoring_Agent", "removeAllAlarmSubscribers", nil, &r.Options, &resp) + return +} + +// This method restarts a monitoring agent and sets the agent's status to 'ACTIVE'. +func (r Monitoring_Agent) RestartMonitoringAgent() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Monitoring_Agent", "restartMonitoringAgent", nil, &r.Options, &resp) + return +} + +// This method assigns a user to receive the alerts generated by this SoftLayer_Monitoring_Agent. +func (r Monitoring_Agent) SetActiveAlarmSubscriber(userRecordId *int) (resp bool, err error) { + params := []interface{}{ + userRecordId, + } + err = r.Session.DoRequest("SoftLayer_Monitoring_Agent", "setActiveAlarmSubscriber", params, &r.Options, &resp) + return +} + +// The SoftLayer_Monitoring_Agent_Configuration_Template_Group class is consisted of configuration templates for agents in a monitoring package. +type Monitoring_Agent_Configuration_Template_Group struct { + Session *session.Session + Options sl.Options +} + +// GetMonitoringAgentConfigurationTemplateGroupService returns an instance of the Monitoring_Agent_Configuration_Template_Group SoftLayer service +func GetMonitoringAgentConfigurationTemplateGroupService(sess *session.Session) Monitoring_Agent_Configuration_Template_Group { + return Monitoring_Agent_Configuration_Template_Group{Session: sess} +} + +func (r Monitoring_Agent_Configuration_Template_Group) Id(id int) Monitoring_Agent_Configuration_Template_Group { + r.Options.Id = &id + return r +} + +func (r Monitoring_Agent_Configuration_Template_Group) Mask(mask string) Monitoring_Agent_Configuration_Template_Group { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r Monitoring_Agent_Configuration_Template_Group) Filter(filter string) Monitoring_Agent_Configuration_Template_Group { + r.Options.Filter = filter + return r +} + +func (r Monitoring_Agent_Configuration_Template_Group) Limit(limit int) Monitoring_Agent_Configuration_Template_Group { + r.Options.Limit = &limit + return r +} + +func (r Monitoring_Agent_Configuration_Template_Group) Offset(offset int) Monitoring_Agent_Configuration_Template_Group { + r.Options.Offset = &offset + return r +} + +// This method creates a SoftLayer_Monitoring_Agent_Configuration_Template_Group using the values provided in the template object. The template objects accountId will be overridden to use the active user's accountId as it shows on their associated SoftLayer_User_Customer object. +func (r Monitoring_Agent_Configuration_Template_Group) CreateObject(templateObject *datatypes.Monitoring_Agent_Configuration_Template_Group) (resp datatypes.Monitoring_Agent_Configuration_Template_Group, err error) { + params := []interface{}{ + templateObject, + } + err = r.Session.DoRequest("SoftLayer_Monitoring_Agent_Configuration_Template_Group", "createObject", params, &r.Options, &resp) + return +} + +// Deletes a customer configuration template group. +func (r Monitoring_Agent_Configuration_Template_Group) DeleteObject() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Monitoring_Agent_Configuration_Template_Group", "deleteObject", nil, &r.Options, &resp) + return +} + +// This method edits an existing SoftLayer_Monitoring_Agent_Configuration_Template_Group using the values passed in the $object parameter. The $object parameter should use the same structure as a SoftLayer_Monitoring_Agent_Configuration_Template_Group object. +func (r Monitoring_Agent_Configuration_Template_Group) EditObject(templateObject *datatypes.Monitoring_Agent_Configuration_Template_Group) (resp bool, err error) { + params := []interface{}{ + templateObject, + } + err = r.Session.DoRequest("SoftLayer_Monitoring_Agent_Configuration_Template_Group", "editObject", params, &r.Options, &resp) + return +} + +// Retrieve +func (r Monitoring_Agent_Configuration_Template_Group) GetAccount() (resp datatypes.Account, err error) { + err = r.Session.DoRequest("SoftLayer_Monitoring_Agent_Configuration_Template_Group", "getAccount", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Monitoring_Agent_Configuration_Template_Group) GetAllObjects() (resp []datatypes.Monitoring_Agent_Configuration_Template_Group, err error) { + err = r.Session.DoRequest("SoftLayer_Monitoring_Agent_Configuration_Template_Group", "getAllObjects", nil, &r.Options, &resp) + return +} + +// This method retrieves an array of SoftLayer_Monitoring_Agent_Configuration_Template_Group objects that are available to the active user's account. The packageId parameter is not currently used. +func (r Monitoring_Agent_Configuration_Template_Group) GetConfigurationGroups(packageId *int) (resp []datatypes.Monitoring_Agent_Configuration_Template_Group, err error) { + params := []interface{}{ + packageId, + } + err = r.Session.DoRequest("SoftLayer_Monitoring_Agent_Configuration_Template_Group", "getConfigurationGroups", params, &r.Options, &resp) + return +} + +// Retrieve +func (r Monitoring_Agent_Configuration_Template_Group) GetConfigurationTemplateReferences() (resp []datatypes.Monitoring_Agent_Configuration_Template_Group_Reference, err error) { + err = r.Session.DoRequest("SoftLayer_Monitoring_Agent_Configuration_Template_Group", "getConfigurationTemplateReferences", nil, &r.Options, &resp) + return +} + +// Retrieve +func (r Monitoring_Agent_Configuration_Template_Group) GetConfigurationTemplates() (resp []datatypes.Configuration_Template, err error) { + err = r.Session.DoRequest("SoftLayer_Monitoring_Agent_Configuration_Template_Group", "getConfigurationTemplates", nil, &r.Options, &resp) + return +} + +// Retrieve +func (r Monitoring_Agent_Configuration_Template_Group) GetItem() (resp datatypes.Product_Item, err error) { + err = r.Session.DoRequest("SoftLayer_Monitoring_Agent_Configuration_Template_Group", "getItem", nil, &r.Options, &resp) + return +} + +// This method retrieves a monitoring agent configuration template group whose identifier corresponds to the value provided in the initialization parameter passed to the SoftLayer_Monitoring_Agent_Configuration_Template_Group service. +func (r Monitoring_Agent_Configuration_Template_Group) GetObject() (resp datatypes.Monitoring_Agent_Configuration_Template_Group, err error) { + err = r.Session.DoRequest("SoftLayer_Monitoring_Agent_Configuration_Template_Group", "getObject", nil, &r.Options, &resp) + return +} + +// SoftLayer_Monitoring_Agent_Configuration_Template_Group_Reference class holds the reference information, essentially a SQL join, between a monitoring configuration group and agent configuration templates. +type Monitoring_Agent_Configuration_Template_Group_Reference struct { + Session *session.Session + Options sl.Options +} + +// GetMonitoringAgentConfigurationTemplateGroupReferenceService returns an instance of the Monitoring_Agent_Configuration_Template_Group_Reference SoftLayer service +func GetMonitoringAgentConfigurationTemplateGroupReferenceService(sess *session.Session) Monitoring_Agent_Configuration_Template_Group_Reference { + return Monitoring_Agent_Configuration_Template_Group_Reference{Session: sess} +} + +func (r Monitoring_Agent_Configuration_Template_Group_Reference) Id(id int) Monitoring_Agent_Configuration_Template_Group_Reference { + r.Options.Id = &id + return r +} + +func (r Monitoring_Agent_Configuration_Template_Group_Reference) Mask(mask string) Monitoring_Agent_Configuration_Template_Group_Reference { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r Monitoring_Agent_Configuration_Template_Group_Reference) Filter(filter string) Monitoring_Agent_Configuration_Template_Group_Reference { + r.Options.Filter = filter + return r +} + +func (r Monitoring_Agent_Configuration_Template_Group_Reference) Limit(limit int) Monitoring_Agent_Configuration_Template_Group_Reference { + r.Options.Limit = &limit + return r +} + +func (r Monitoring_Agent_Configuration_Template_Group_Reference) Offset(offset int) Monitoring_Agent_Configuration_Template_Group_Reference { + r.Options.Offset = &offset + return r +} + +// This method creates a monitoring agent configuration template group reference by passing in an object with the SoftLayer_Monitoring_Agent_Configuration_Template_Group_Reference structure as the $templateObject parameter. +func (r Monitoring_Agent_Configuration_Template_Group_Reference) CreateObject(templateObject *datatypes.Monitoring_Agent_Configuration_Template_Group_Reference) (resp datatypes.Monitoring_Agent_Configuration_Template_Group_Reference, err error) { + params := []interface{}{ + templateObject, + } + err = r.Session.DoRequest("SoftLayer_Monitoring_Agent_Configuration_Template_Group_Reference", "createObject", params, &r.Options, &resp) + return +} + +// This method creates monitoring agent configuration template group references by passing in an array of objects with the SoftLayer_Monitoring_Agent_Configuration_Template_Group_Reference structure as the $templateObjects parameter. Setting the $bulkCommit parameter to true will commit the changes in one transaction, false will commit after each object is created. +func (r Monitoring_Agent_Configuration_Template_Group_Reference) CreateObjects(templateObjects []datatypes.Monitoring_Agent_Configuration_Template_Group_Reference) (resp bool, err error) { + params := []interface{}{ + templateObjects, + } + err = r.Session.DoRequest("SoftLayer_Monitoring_Agent_Configuration_Template_Group_Reference", "createObjects", params, &r.Options, &resp) + return +} + +// This method updates a SoftLayer_Monitoring_Agent_Configuration_Template_Group_Reference record by passing in a modified instance of the object. +func (r Monitoring_Agent_Configuration_Template_Group_Reference) EditObject(templateObject *datatypes.Monitoring_Agent_Configuration_Template_Group_Reference) (resp bool, err error) { + params := []interface{}{ + templateObject, + } + err = r.Session.DoRequest("SoftLayer_Monitoring_Agent_Configuration_Template_Group_Reference", "editObject", params, &r.Options, &resp) + return +} + +// This method updates a set of SoftLayer_Monitoring_Agent_Configuration_Template_Group_Reference records by passing in an array of modified instances of the objects. Setting the $bulkCommit parameter to true will commit the changes in one transaction, false will commit after each object is updated. +func (r Monitoring_Agent_Configuration_Template_Group_Reference) EditObjects(templateObjects []datatypes.Monitoring_Agent_Configuration_Template_Group_Reference) (resp bool, err error) { + params := []interface{}{ + templateObjects, + } + err = r.Session.DoRequest("SoftLayer_Monitoring_Agent_Configuration_Template_Group_Reference", "editObjects", params, &r.Options, &resp) + return +} + +// This method retrieves all SoftLayer_Monitoring_Agent_Configuration_Template_Group_Reference objects accessible to the active user. +func (r Monitoring_Agent_Configuration_Template_Group_Reference) GetAllObjects() (resp []datatypes.Monitoring_Agent_Configuration_Template_Group_Reference, err error) { + err = r.Session.DoRequest("SoftLayer_Monitoring_Agent_Configuration_Template_Group_Reference", "getAllObjects", nil, &r.Options, &resp) + return +} + +// Retrieve +func (r Monitoring_Agent_Configuration_Template_Group_Reference) GetConfigurationTemplate() (resp datatypes.Configuration_Template, err error) { + err = r.Session.DoRequest("SoftLayer_Monitoring_Agent_Configuration_Template_Group_Reference", "getConfigurationTemplate", nil, &r.Options, &resp) + return +} + +// This method retrieves a monitoring agent configuration template group reference whose identifier corresponds to the value provided in the initialization parameter passed to the SoftLayer_Monitoring_Agent_Configuration_Template_Group_Reference service. +func (r Monitoring_Agent_Configuration_Template_Group_Reference) GetObject() (resp datatypes.Monitoring_Agent_Configuration_Template_Group_Reference, err error) { + err = r.Session.DoRequest("SoftLayer_Monitoring_Agent_Configuration_Template_Group_Reference", "getObject", nil, &r.Options, &resp) + return +} + +// Retrieve +func (r Monitoring_Agent_Configuration_Template_Group_Reference) GetTemplateGroup() (resp datatypes.Monitoring_Agent_Configuration_Template_Group, err error) { + err = r.Session.DoRequest("SoftLayer_Monitoring_Agent_Configuration_Template_Group_Reference", "getTemplateGroup", nil, &r.Options, &resp) + return +} + +// Monitoring agent configuration value +type Monitoring_Agent_Configuration_Value struct { + Session *session.Session + Options sl.Options +} + +// GetMonitoringAgentConfigurationValueService returns an instance of the Monitoring_Agent_Configuration_Value SoftLayer service +func GetMonitoringAgentConfigurationValueService(sess *session.Session) Monitoring_Agent_Configuration_Value { + return Monitoring_Agent_Configuration_Value{Session: sess} +} + +func (r Monitoring_Agent_Configuration_Value) Id(id int) Monitoring_Agent_Configuration_Value { + r.Options.Id = &id + return r +} + +func (r Monitoring_Agent_Configuration_Value) Mask(mask string) Monitoring_Agent_Configuration_Value { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r Monitoring_Agent_Configuration_Value) Filter(filter string) Monitoring_Agent_Configuration_Value { + r.Options.Filter = filter + return r +} + +func (r Monitoring_Agent_Configuration_Value) Limit(limit int) Monitoring_Agent_Configuration_Value { + r.Options.Limit = &limit + return r +} + +func (r Monitoring_Agent_Configuration_Value) Offset(offset int) Monitoring_Agent_Configuration_Value { + r.Options.Offset = &offset + return r +} + +// Retrieve +func (r Monitoring_Agent_Configuration_Value) GetDefinition() (resp datatypes.Configuration_Template_Section_Definition, err error) { + err = r.Session.DoRequest("SoftLayer_Monitoring_Agent_Configuration_Value", "getDefinition", nil, &r.Options, &resp) + return +} + +// Retrieve The metric data type used to retrieve metric data currently being tracked. +func (r Monitoring_Agent_Configuration_Value) GetMetricDataType() (resp datatypes.Container_Metric_Data_Type, err error) { + err = r.Session.DoRequest("SoftLayer_Monitoring_Agent_Configuration_Value", "getMetricDataType", nil, &r.Options, &resp) + return +} + +// Retrieve +func (r Monitoring_Agent_Configuration_Value) GetMonitoringAgent() (resp datatypes.Monitoring_Agent, err error) { + err = r.Session.DoRequest("SoftLayer_Monitoring_Agent_Configuration_Value", "getMonitoringAgent", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Monitoring_Agent_Configuration_Value) GetObject() (resp datatypes.Monitoring_Agent_Configuration_Value, err error) { + err = r.Session.DoRequest("SoftLayer_Monitoring_Agent_Configuration_Value", "getObject", nil, &r.Options, &resp) + return +} + +// Retrieve +func (r Monitoring_Agent_Configuration_Value) GetProfile() (resp datatypes.Configuration_Template_Section_Profile, err error) { + err = r.Session.DoRequest("SoftLayer_Monitoring_Agent_Configuration_Value", "getProfile", nil, &r.Options, &resp) + return +} + +// Monitoring agent status +type Monitoring_Agent_Status struct { + Session *session.Session + Options sl.Options +} + +// GetMonitoringAgentStatusService returns an instance of the Monitoring_Agent_Status SoftLayer service +func GetMonitoringAgentStatusService(sess *session.Session) Monitoring_Agent_Status { + return Monitoring_Agent_Status{Session: sess} +} + +func (r Monitoring_Agent_Status) Id(id int) Monitoring_Agent_Status { + r.Options.Id = &id + return r +} + +func (r Monitoring_Agent_Status) Mask(mask string) Monitoring_Agent_Status { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r Monitoring_Agent_Status) Filter(filter string) Monitoring_Agent_Status { + r.Options.Filter = filter + return r +} + +func (r Monitoring_Agent_Status) Limit(limit int) Monitoring_Agent_Status { + r.Options.Limit = &limit + return r +} + +func (r Monitoring_Agent_Status) Offset(offset int) Monitoring_Agent_Status { + r.Options.Offset = &offset + return r +} + +// no documentation yet +func (r Monitoring_Agent_Status) GetObject() (resp datatypes.Monitoring_Agent_Status, err error) { + err = r.Session.DoRequest("SoftLayer_Monitoring_Agent_Status", "getObject", nil, &r.Options, &resp) + return +} + +// The SoftLayer_Monitoring_Robot data type contains general information relating to a monitoring robot. +type Monitoring_Robot struct { + Session *session.Session + Options sl.Options +} + +// GetMonitoringRobotService returns an instance of the Monitoring_Robot SoftLayer service +func GetMonitoringRobotService(sess *session.Session) Monitoring_Robot { + return Monitoring_Robot{Session: sess} +} + +func (r Monitoring_Robot) Id(id int) Monitoring_Robot { + r.Options.Id = &id + return r +} + +func (r Monitoring_Robot) Mask(mask string) Monitoring_Robot { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r Monitoring_Robot) Filter(filter string) Monitoring_Robot { + r.Options.Filter = filter + return r +} + +func (r Monitoring_Robot) Limit(limit int) Monitoring_Robot { + r.Options.Limit = &limit + return r +} + +func (r Monitoring_Robot) Offset(offset int) Monitoring_Robot { + r.Options.Offset = &offset + return r +} + +// Checks if a monitoring robot can communicate with SoftLayer monitoring management system via the private network. +// +// TCP port 48000 - 48002 must be open on your server or your virtual server in order for this test to succeed. +func (r Monitoring_Robot) CheckConnection() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Monitoring_Robot", "checkConnection", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Monitoring_Robot) DeployMonitoringAgents(configurationTemplateGroup *datatypes.Monitoring_Agent_Configuration_Template_Group) (resp datatypes.Provisioning_Version1_Transaction, err error) { + params := []interface{}{ + configurationTemplateGroup, + } + err = r.Session.DoRequest("SoftLayer_Monitoring_Robot", "deployMonitoringAgents", params, &r.Options, &resp) + return +} + +// Retrieve The account associated with the corresponding robot. +func (r Monitoring_Robot) GetAccount() (resp datatypes.Account, err error) { + err = r.Session.DoRequest("SoftLayer_Monitoring_Robot", "getAccount", nil, &r.Options, &resp) + return +} + +// Returns available configuration template groups for this monitoring agent. +func (r Monitoring_Robot) GetAvailableConfigurationGroups() (resp []datatypes.Monitoring_Agent_Configuration_Template_Group, err error) { + err = r.Session.DoRequest("SoftLayer_Monitoring_Robot", "getAvailableConfigurationGroups", nil, &r.Options, &resp) + return +} + +// Retrieve The program (monitoring agent) that gets details of a system or application and reporting of the metric data and triggers alarms for predefined events. +func (r Monitoring_Robot) GetMonitoringAgents() (resp []datatypes.Monitoring_Agent, err error) { + err = r.Session.DoRequest("SoftLayer_Monitoring_Robot", "getMonitoringAgents", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Monitoring_Robot) GetObject() (resp datatypes.Monitoring_Robot, err error) { + err = r.Session.DoRequest("SoftLayer_Monitoring_Robot", "getObject", nil, &r.Options, &resp) + return +} + +// Retrieve The current status of the robot. +func (r Monitoring_Robot) GetRobotStatus() (resp datatypes.Monitoring_Robot_Status, err error) { + err = r.Session.DoRequest("SoftLayer_Monitoring_Robot", "getRobotStatus", nil, &r.Options, &resp) + return +} + +// Retrieve The SoftLayer_Software_Component that corresponds to the robot installation on the server. +func (r Monitoring_Robot) GetSoftwareComponent() (resp datatypes.Software_Component, err error) { + err = r.Session.DoRequest("SoftLayer_Monitoring_Robot", "getSoftwareComponent", nil, &r.Options, &resp) + return +} + +// If our monitoring management system is not able to connect to your monitoring robot, it sets the robot status to "Limited Connectivity". Robots in this status will not be process by our monitoring management system. You cannot manage monitoring agents either. +// +// Use this method to resets monitoring robot status to "Active" to indicate the connection issue is resolved. +func (r Monitoring_Robot) ResetStatus() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Monitoring_Robot", "resetStatus", nil, &r.Options, &resp) + return +} diff --git a/vendor/github.com/softlayer/softlayer-go/services/network.go b/vendor/github.com/softlayer/softlayer-go/services/network.go new file mode 100644 index 0000000000..addbee26dd --- /dev/null +++ b/vendor/github.com/softlayer/softlayer-go/services/network.go @@ -0,0 +1,13667 @@ +/** + * Copyright 2016 IBM Corp. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** + * AUTOMATICALLY GENERATED CODE - DO NOT MODIFY + */ + +package services + +import ( + "fmt" + "strings" + + "github.com/softlayer/softlayer-go/datatypes" + "github.com/softlayer/softlayer-go/session" + "github.com/softlayer/softlayer-go/sl" +) + +// no documentation yet +type Network struct { + Session *session.Session + Options sl.Options +} + +// GetNetworkService returns an instance of the Network SoftLayer service +func GetNetworkService(sess *session.Session) Network { + return Network{Session: sess} +} + +func (r Network) Id(id int) Network { + r.Options.Id = &id + return r +} + +func (r Network) Mask(mask string) Network { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r Network) Filter(filter string) Network { + r.Options.Filter = filter + return r +} + +func (r Network) Limit(limit int) Network { + r.Options.Limit = &limit + return r +} + +func (r Network) Offset(offset int) Network { + r.Options.Offset = &offset + return r +} + +// Provide a template containing the following properties to create a Network: +// * networkIdentifier +// * cidr +// * name +// +// +// The ``networkIdentifier`` must be an IP address within RFC 1918 blocks: +// * 192.168.0.0/16 +// * 172.16.0.0/12 +// * 10.0.0.0/8 +// The ``cidr`` must be an integer between 16 and 24, inclusive. The ``networkIdentifier``/``cidr`` must represent a valid subnet specification. The ``name`` must not be empty, but otherwise can contain up to 50 characters of user specified information to identify the Network. +// +// The subnet specification of the Network bounds the IP address space which can be utilized and constrains the creation of Subnets within the Network. +// +// Example networkIdentifier/CIDR combinations: +// * 192.168.0.0/16 +// * 192.168.0.0/17 +// * 172.16.0.0/16 +// * 172.31.0.0/16 +// * 10.0.0.0/16 +// * 10.255.0.0/16 +func (r Network) CreateObject(templateObject *datatypes.Network) (resp datatypes.Network, err error) { + params := []interface{}{ + templateObject, + } + err = r.Session.DoRequest("SoftLayer_Network", "createObject", params, &r.Options, &resp) + return +} + +// Creation of a Subnet is necessary prior to provisioning compute resources into a Network. In order to create a Subnet, both a [[SoftLayer_Network_Subnet|Subnet]] and [[SoftLayer_Network_Pod|Pod]] must be specified. The Pod determines where the Subnet will be available for use by compute resources. +// +// Provide a Subnet template containing the following properties: +// * networkIdentifier +// * cidr +// The ``networkIdentifier`` must represent an IP address within that specified by the Network. The ``cidr`` must be an integer between 24 and 29, inclusive, and represent a subnet size smaller than the Network's. The ``networkIdentifier``/``cidr`` must represent a valid subnet specification. +// +// Provide a Pod template containing the following property: +// * name +// The ``name`` must represent a valid Pod e.g. sjc01.pod02. See [[SoftLayer_Network_Pod (type)]] for more information. +// +// The following constraints apply to Subnet creation: +// * It must fit within the bounds of the Network. +// * It must be no larger than /24 and no smaller than /29. +// * Its size must not equal that of the Network. This implies that a fully +// utilized Network will have a minimum of two Subnets. +// * The Pod must support the ability to create Networks by having the +// SUPPORTS_CUSTOMER_DEFINED_NETWORK capability. See [[SoftLayer_Network_Pod/getCapabilities]]. +func (r Network) CreateSubnet(subnet *datatypes.Network_Subnet, pod *datatypes.Network_Pod) (resp datatypes.Network_Subnet, err error) { + params := []interface{}{ + subnet, + pod, + } + err = r.Session.DoRequest("SoftLayer_Network", "createSubnet", params, &r.Options, &resp) + return +} + +// Remove the specified Network along with any Subnets. +func (r Network) DeleteObject() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Network", "deleteObject", nil, &r.Options, &resp) + return +} + +// +// +// Provide a Subnet template containing the following properties: +// * networkIdentifier +// * cidr +// The ``networkIdentifier`` must represent an IP address within that specified by the Network. The ``cidr`` must be an integer between 24 and 29, inclusive, and represent a subnet size smaller than the Network's. The ``networkIdentifier``/``cidr`` must represent a valid subnet specification. Or: +// * id +// The ``id`` must identify a Subnet in the Network. If the ``id`` is provided, the ``networkIdentifier``/``cidr`` will be ignored. +// +// Subnets may only be removed when no compute resources are utilizing them. +func (r Network) DeleteSubnet(subnet *datatypes.Network_Subnet) (resp bool, err error) { + params := []interface{}{ + subnet, + } + err = r.Session.DoRequest("SoftLayer_Network", "deleteSubnet", params, &r.Options, &resp) + return +} + +// Modify either the ``name`` or ``notes`` properties of a Network. +func (r Network) EditObject(templateObject *datatypes.Network) (resp bool, err error) { + params := []interface{}{ + templateObject, + } + err = r.Session.DoRequest("SoftLayer_Network", "editObject", params, &r.Options, &resp) + return +} + +// no documentation yet +func (r Network) GetAllObjects() (resp []datatypes.Network, err error) { + err = r.Session.DoRequest("SoftLayer_Network", "getAllObjects", nil, &r.Options, &resp) + return +} + +// Retrieve The size of the Network specified in CIDR notation. Specified in conjunction with the ``networkIdentifier`` to describe the bounding subnet size for the Network. Required for creation. See [[SoftLayer_Network/createObject]] documentation for creation details. +func (r Network) GetCidr() (resp int, err error) { + err = r.Session.DoRequest("SoftLayer_Network", "getCidr", nil, &r.Options, &resp) + return +} + +// Retrieve A name for the Network. This is required during creation of a Network and is entirely user defined. +func (r Network) GetName() (resp string, err error) { + err = r.Session.DoRequest("SoftLayer_Network", "getName", nil, &r.Options, &resp) + return +} + +// Retrieve The starting IP address of the Network. Specified in conjunction with the ``cidr`` property to specify the bounding IP address space for the Network. Required for creation. See [[SoftLayer_Network/createObject]] documentation for creation details. +func (r Network) GetNetworkIdentifier() (resp string, err error) { + err = r.Session.DoRequest("SoftLayer_Network", "getNetworkIdentifier", nil, &r.Options, &resp) + return +} + +// Retrieve Notes, or a description of the Network. This is entirely user defined. +func (r Network) GetNotes() (resp string, err error) { + err = r.Session.DoRequest("SoftLayer_Network", "getNotes", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Network) GetObject() (resp datatypes.Network, err error) { + err = r.Session.DoRequest("SoftLayer_Network", "getObject", nil, &r.Options, &resp) + return +} + +// Retrieve The Subnets within the Network. These represent the realized segments of the Network and reside within a [[SoftLayer_Network_Pod|Pod]]. A Subnet must be specified when provisioning a compute resource within a Network. +func (r Network) GetSubnets() (resp []datatypes.Network_Subnet, err error) { + err = r.Session.DoRequest("SoftLayer_Network", "getSubnets", nil, &r.Options, &resp) + return +} + +// The SoftLayer_Network_Application_Delivery_Controller data type models a single instance of an application delivery controller. Local properties are read only, except for a ''notes'' property, which can be used to describe your application delivery controller service. The type's relational properties provide more information to the service's function and login information to the controller's backend management if advanced view is enabled. +type Network_Application_Delivery_Controller struct { + Session *session.Session + Options sl.Options +} + +// GetNetworkApplicationDeliveryControllerService returns an instance of the Network_Application_Delivery_Controller SoftLayer service +func GetNetworkApplicationDeliveryControllerService(sess *session.Session) Network_Application_Delivery_Controller { + return Network_Application_Delivery_Controller{Session: sess} +} + +func (r Network_Application_Delivery_Controller) Id(id int) Network_Application_Delivery_Controller { + r.Options.Id = &id + return r +} + +func (r Network_Application_Delivery_Controller) Mask(mask string) Network_Application_Delivery_Controller { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r Network_Application_Delivery_Controller) Filter(filter string) Network_Application_Delivery_Controller { + r.Options.Filter = filter + return r +} + +func (r Network_Application_Delivery_Controller) Limit(limit int) Network_Application_Delivery_Controller { + r.Options.Limit = &limit + return r +} + +func (r Network_Application_Delivery_Controller) Offset(offset int) Network_Application_Delivery_Controller { + r.Options.Offset = &offset + return r +} + +// Create or add to an application delivery controller based load balancer service. The loadBalancer parameter must have its ''name'', ''type'', ''sourcePort'', and ''virtualIpAddress'' properties populated. Changes are reflected immediately in the application delivery controller. +func (r Network_Application_Delivery_Controller) CreateLiveLoadBalancer(loadBalancer *datatypes.Network_LoadBalancer_VirtualIpAddress) (resp bool, err error) { + params := []interface{}{ + loadBalancer, + } + err = r.Session.DoRequest("SoftLayer_Network_Application_Delivery_Controller", "createLiveLoadBalancer", params, &r.Options, &resp) + return +} + +// Remove a virtual IP address from an application delivery controller based load balancer. Only the ''name'' property in the loadBalancer parameter must be populated. Changes are reflected immediately in the application delivery controller. +func (r Network_Application_Delivery_Controller) DeleteLiveLoadBalancer(loadBalancer *datatypes.Network_LoadBalancer_VirtualIpAddress) (resp bool, err error) { + params := []interface{}{ + loadBalancer, + } + err = r.Session.DoRequest("SoftLayer_Network_Application_Delivery_Controller", "deleteLiveLoadBalancer", params, &r.Options, &resp) + return +} + +// Remove an entire load balancer service, including all virtual IP addresses, from and application delivery controller based load balancer. The ''name'' property the and ''name'' property within the ''vip'' property of the service parameter must be provided. Changes are reflected immediately in the application delivery controller. +func (r Network_Application_Delivery_Controller) DeleteLiveLoadBalancerService(service *datatypes.Network_LoadBalancer_Service) (err error) { + var resp datatypes.Void + params := []interface{}{ + service, + } + err = r.Session.DoRequest("SoftLayer_Network_Application_Delivery_Controller", "deleteLiveLoadBalancerService", params, &r.Options, &resp) + return +} + +// Edit an applications delivery controller record. Currently only a controller's notes property is editable. +func (r Network_Application_Delivery_Controller) EditObject(templateObject *datatypes.Network_Application_Delivery_Controller) (resp bool, err error) { + params := []interface{}{ + templateObject, + } + err = r.Session.DoRequest("SoftLayer_Network_Application_Delivery_Controller", "editObject", params, &r.Options, &resp) + return +} + +// Retrieve The SoftLayer customer account that owns an application delivery controller record. +func (r Network_Application_Delivery_Controller) GetAccount() (resp datatypes.Account, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Application_Delivery_Controller", "getAccount", nil, &r.Options, &resp) + return +} + +// Retrieve The average daily public bandwidth usage for the current billing cycle. +func (r Network_Application_Delivery_Controller) GetAverageDailyPublicBandwidthUsage() (resp datatypes.Float64, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Application_Delivery_Controller", "getAverageDailyPublicBandwidthUsage", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Network_Application_Delivery_Controller) GetBandwidthDataByDate(startDateTime *datatypes.Time, endDateTime *datatypes.Time, networkType *string) (resp []datatypes.Metric_Tracking_Object_Data, err error) { + params := []interface{}{ + startDateTime, + endDateTime, + networkType, + } + err = r.Session.DoRequest("SoftLayer_Network_Application_Delivery_Controller", "getBandwidthDataByDate", params, &r.Options, &resp) + return +} + +// Use this method when needing a bandwidth image for a single application delivery controller. It will gather the correct input parameters for the generic graphing utility based on the date ranges +func (r Network_Application_Delivery_Controller) GetBandwidthImageByDate(startDateTime *datatypes.Time, endDateTime *datatypes.Time, networkType *string) (resp datatypes.Container_Bandwidth_GraphOutputs, err error) { + params := []interface{}{ + startDateTime, + endDateTime, + networkType, + } + err = r.Session.DoRequest("SoftLayer_Network_Application_Delivery_Controller", "getBandwidthImageByDate", params, &r.Options, &resp) + return +} + +// Retrieve The billing item for a Application Delivery Controller. +func (r Network_Application_Delivery_Controller) GetBillingItem() (resp datatypes.Billing_Item_Network_Application_Delivery_Controller, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Application_Delivery_Controller", "getBillingItem", nil, &r.Options, &resp) + return +} + +// Retrieve Previous configurations for an Application Delivery Controller. +func (r Network_Application_Delivery_Controller) GetConfigurationHistory() (resp []datatypes.Network_Application_Delivery_Controller_Configuration_History, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Application_Delivery_Controller", "getConfigurationHistory", nil, &r.Options, &resp) + return +} + +// Retrieve bandwidth graph by date. +func (r Network_Application_Delivery_Controller) GetCustomBandwidthDataByDate(graphData *datatypes.Container_Graph) (resp datatypes.Container_Graph, err error) { + params := []interface{}{ + graphData, + } + err = r.Session.DoRequest("SoftLayer_Network_Application_Delivery_Controller", "getCustomBandwidthDataByDate", params, &r.Options, &resp) + return +} + +// Retrieve The datacenter that the application delivery controller resides in. +func (r Network_Application_Delivery_Controller) GetDatacenter() (resp datatypes.Location, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Application_Delivery_Controller", "getDatacenter", nil, &r.Options, &resp) + return +} + +// Retrieve A brief description of an application delivery controller record. +func (r Network_Application_Delivery_Controller) GetDescription() (resp string, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Application_Delivery_Controller", "getDescription", nil, &r.Options, &resp) + return +} + +// Retrieve The date in which the license for this application delivery controller will expire. +func (r Network_Application_Delivery_Controller) GetLicenseExpirationDate() (resp datatypes.Time, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Application_Delivery_Controller", "getLicenseExpirationDate", nil, &r.Options, &resp) + return +} + +// Get the graph image for an application delivery controller service based on the supplied graph type and metric. The available graph types are: 'connections' and 'status', and the available metrics are: 'day', 'week' and 'month'. +// +// This method returns the raw binary image data. +func (r Network_Application_Delivery_Controller) GetLiveLoadBalancerServiceGraphImage(service *datatypes.Network_LoadBalancer_Service, graphType *string, metric *string) (resp []byte, err error) { + params := []interface{}{ + service, + graphType, + metric, + } + err = r.Session.DoRequest("SoftLayer_Network_Application_Delivery_Controller", "getLiveLoadBalancerServiceGraphImage", params, &r.Options, &resp) + return +} + +// Retrieve The virtual IP address records that belong to an application delivery controller based load balancer. +func (r Network_Application_Delivery_Controller) GetLoadBalancers() (resp []datatypes.Network_LoadBalancer_VirtualIpAddress, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Application_Delivery_Controller", "getLoadBalancers", nil, &r.Options, &resp) + return +} + +// Retrieve A flag indicating that this Application Delivery Controller is a managed resource. +func (r Network_Application_Delivery_Controller) GetManagedResourceFlag() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Application_Delivery_Controller", "getManagedResourceFlag", nil, &r.Options, &resp) + return +} + +// Retrieve An application delivery controller's management ip address. +func (r Network_Application_Delivery_Controller) GetManagementIpAddress() (resp string, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Application_Delivery_Controller", "getManagementIpAddress", nil, &r.Options, &resp) + return +} + +// Retrieve The network VLAN that an application delivery controller resides on. +func (r Network_Application_Delivery_Controller) GetNetworkVlan() (resp datatypes.Network_Vlan, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Application_Delivery_Controller", "getNetworkVlan", nil, &r.Options, &resp) + return +} + +// Retrieve The network VLANs that an application delivery controller resides on. +func (r Network_Application_Delivery_Controller) GetNetworkVlans() (resp []datatypes.Network_Vlan, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Application_Delivery_Controller", "getNetworkVlans", nil, &r.Options, &resp) + return +} + +// getObject retrieves the SoftLayer_Network_Application_Delivery_Controller object whose ID number corresponds to the ID number of the init parameter passed to the SoftLayer_Network_Application_Delivery_Controller service. You can only retrieve application delivery controllers that are associated with your SoftLayer customer account. +func (r Network_Application_Delivery_Controller) GetObject() (resp datatypes.Network_Application_Delivery_Controller, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Application_Delivery_Controller", "getObject", nil, &r.Options, &resp) + return +} + +// Retrieve The total public outbound bandwidth for the current billing cycle. +func (r Network_Application_Delivery_Controller) GetOutboundPublicBandwidthUsage() (resp datatypes.Float64, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Application_Delivery_Controller", "getOutboundPublicBandwidthUsage", nil, &r.Options, &resp) + return +} + +// Retrieve The password used to connect to an application delivery controller's management interface when it is operating in advanced view mode. +func (r Network_Application_Delivery_Controller) GetPassword() (resp datatypes.Software_Component_Password, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Application_Delivery_Controller", "getPassword", nil, &r.Options, &resp) + return +} + +// Retrieve An application delivery controller's primary public IP address. +func (r Network_Application_Delivery_Controller) GetPrimaryIpAddress() (resp string, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Application_Delivery_Controller", "getPrimaryIpAddress", nil, &r.Options, &resp) + return +} + +// Retrieve The projected public outbound bandwidth for the current billing cycle. +func (r Network_Application_Delivery_Controller) GetProjectedPublicBandwidthUsage() (resp datatypes.Float64, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Application_Delivery_Controller", "getProjectedPublicBandwidthUsage", nil, &r.Options, &resp) + return +} + +// Retrieve A network application controller's subnets. A subnet is a group of IP addresses +func (r Network_Application_Delivery_Controller) GetSubnets() (resp []datatypes.Network_Subnet, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Application_Delivery_Controller", "getSubnets", nil, &r.Options, &resp) + return +} + +// Retrieve +func (r Network_Application_Delivery_Controller) GetTagReferences() (resp []datatypes.Tag_Reference, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Application_Delivery_Controller", "getTagReferences", nil, &r.Options, &resp) + return +} + +// Retrieve +func (r Network_Application_Delivery_Controller) GetType() (resp datatypes.Network_Application_Delivery_Controller_Type, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Application_Delivery_Controller", "getType", nil, &r.Options, &resp) + return +} + +// Retrieve +func (r Network_Application_Delivery_Controller) GetVirtualIpAddresses() (resp []datatypes.Network_Application_Delivery_Controller_LoadBalancer_VirtualIpAddress, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Application_Delivery_Controller", "getVirtualIpAddresses", nil, &r.Options, &resp) + return +} + +// Restore an application delivery controller's base configuration state. The configuration will be set to what it was when initially provisioned. +func (r Network_Application_Delivery_Controller) RestoreBaseConfiguration() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Application_Delivery_Controller", "restoreBaseConfiguration", nil, &r.Options, &resp) + return +} + +// Restore an application delivery controller's configuration state. +func (r Network_Application_Delivery_Controller) RestoreConfiguration(configurationHistoryId *int) (resp bool, err error) { + params := []interface{}{ + configurationHistoryId, + } + err = r.Session.DoRequest("SoftLayer_Network_Application_Delivery_Controller", "restoreConfiguration", params, &r.Options, &resp) + return +} + +// Save an application delivery controller's configuration state. The notes property for this method is optional. +func (r Network_Application_Delivery_Controller) SaveCurrentConfiguration(notes *string) (resp datatypes.Network_Application_Delivery_Controller_Configuration_History, err error) { + params := []interface{}{ + notes, + } + err = r.Session.DoRequest("SoftLayer_Network_Application_Delivery_Controller", "saveCurrentConfiguration", params, &r.Options, &resp) + return +} + +// Update the the virtual IP address interface within an application delivery controller based load balancer identified by the ''name'' property in the loadBalancer parameter. You only need to set the properties in the loadBalancer parameter that you wish to change. Any virtual IP properties omitted or left empty are ignored. Changes are reflected immediately in the application delivery controller. +func (r Network_Application_Delivery_Controller) UpdateLiveLoadBalancer(loadBalancer *datatypes.Network_LoadBalancer_VirtualIpAddress) (resp bool, err error) { + params := []interface{}{ + loadBalancer, + } + err = r.Session.DoRequest("SoftLayer_Network_Application_Delivery_Controller", "updateLiveLoadBalancer", params, &r.Options, &resp) + return +} + +// Update the NetScaler VPX License. +// +// This service will create a transaction to update a NetScaler VPX License. After the license is updated the load balancer will reboot in order to apply the newly issued license +// +// The load balancer will be unavailable during the reboot. +func (r Network_Application_Delivery_Controller) UpdateNetScalerLicense() (resp datatypes.Provisioning_Version1_Transaction, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Application_Delivery_Controller", "updateNetScalerLicense", nil, &r.Options, &resp) + return +} + +// The SoftLayer_Network_Application_Delivery_Controller_Configuration_History data type models a single instance of a configuration history entry for an application delivery controller. The configuration history entries are used to support creating backups of an application delivery controller's configuration state in order to restore them later if needed. +type Network_Application_Delivery_Controller_Configuration_History struct { + Session *session.Session + Options sl.Options +} + +// GetNetworkApplicationDeliveryControllerConfigurationHistoryService returns an instance of the Network_Application_Delivery_Controller_Configuration_History SoftLayer service +func GetNetworkApplicationDeliveryControllerConfigurationHistoryService(sess *session.Session) Network_Application_Delivery_Controller_Configuration_History { + return Network_Application_Delivery_Controller_Configuration_History{Session: sess} +} + +func (r Network_Application_Delivery_Controller_Configuration_History) Id(id int) Network_Application_Delivery_Controller_Configuration_History { + r.Options.Id = &id + return r +} + +func (r Network_Application_Delivery_Controller_Configuration_History) Mask(mask string) Network_Application_Delivery_Controller_Configuration_History { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r Network_Application_Delivery_Controller_Configuration_History) Filter(filter string) Network_Application_Delivery_Controller_Configuration_History { + r.Options.Filter = filter + return r +} + +func (r Network_Application_Delivery_Controller_Configuration_History) Limit(limit int) Network_Application_Delivery_Controller_Configuration_History { + r.Options.Limit = &limit + return r +} + +func (r Network_Application_Delivery_Controller_Configuration_History) Offset(offset int) Network_Application_Delivery_Controller_Configuration_History { + r.Options.Offset = &offset + return r +} + +// deleteObject permanently removes a configuration history record +func (r Network_Application_Delivery_Controller_Configuration_History) DeleteObject() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Application_Delivery_Controller_Configuration_History", "deleteObject", nil, &r.Options, &resp) + return +} + +// Retrieve The application delivery controller that a configuration history record belongs to. +func (r Network_Application_Delivery_Controller_Configuration_History) GetController() (resp datatypes.Network_Application_Delivery_Controller, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Application_Delivery_Controller_Configuration_History", "getController", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Network_Application_Delivery_Controller_Configuration_History) GetObject() (resp datatypes.Network_Application_Delivery_Controller_Configuration_History, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Application_Delivery_Controller_Configuration_History", "getObject", nil, &r.Options, &resp) + return +} + +// no documentation yet +type Network_Application_Delivery_Controller_LoadBalancer_Health_Attribute struct { + Session *session.Session + Options sl.Options +} + +// GetNetworkApplicationDeliveryControllerLoadBalancerHealthAttributeService returns an instance of the Network_Application_Delivery_Controller_LoadBalancer_Health_Attribute SoftLayer service +func GetNetworkApplicationDeliveryControllerLoadBalancerHealthAttributeService(sess *session.Session) Network_Application_Delivery_Controller_LoadBalancer_Health_Attribute { + return Network_Application_Delivery_Controller_LoadBalancer_Health_Attribute{Session: sess} +} + +func (r Network_Application_Delivery_Controller_LoadBalancer_Health_Attribute) Id(id int) Network_Application_Delivery_Controller_LoadBalancer_Health_Attribute { + r.Options.Id = &id + return r +} + +func (r Network_Application_Delivery_Controller_LoadBalancer_Health_Attribute) Mask(mask string) Network_Application_Delivery_Controller_LoadBalancer_Health_Attribute { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r Network_Application_Delivery_Controller_LoadBalancer_Health_Attribute) Filter(filter string) Network_Application_Delivery_Controller_LoadBalancer_Health_Attribute { + r.Options.Filter = filter + return r +} + +func (r Network_Application_Delivery_Controller_LoadBalancer_Health_Attribute) Limit(limit int) Network_Application_Delivery_Controller_LoadBalancer_Health_Attribute { + r.Options.Limit = &limit + return r +} + +func (r Network_Application_Delivery_Controller_LoadBalancer_Health_Attribute) Offset(offset int) Network_Application_Delivery_Controller_LoadBalancer_Health_Attribute { + r.Options.Offset = &offset + return r +} + +// Retrieve +func (r Network_Application_Delivery_Controller_LoadBalancer_Health_Attribute) GetHealthCheck() (resp datatypes.Network_Application_Delivery_Controller_LoadBalancer_Health_Check, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Application_Delivery_Controller_LoadBalancer_Health_Attribute", "getHealthCheck", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Network_Application_Delivery_Controller_LoadBalancer_Health_Attribute) GetObject() (resp datatypes.Network_Application_Delivery_Controller_LoadBalancer_Health_Attribute, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Application_Delivery_Controller_LoadBalancer_Health_Attribute", "getObject", nil, &r.Options, &resp) + return +} + +// Retrieve +func (r Network_Application_Delivery_Controller_LoadBalancer_Health_Attribute) GetType() (resp datatypes.Network_Application_Delivery_Controller_LoadBalancer_Health_Attribute_Type, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Application_Delivery_Controller_LoadBalancer_Health_Attribute", "getType", nil, &r.Options, &resp) + return +} + +// no documentation yet +type Network_Application_Delivery_Controller_LoadBalancer_Health_Attribute_Type struct { + Session *session.Session + Options sl.Options +} + +// GetNetworkApplicationDeliveryControllerLoadBalancerHealthAttributeTypeService returns an instance of the Network_Application_Delivery_Controller_LoadBalancer_Health_Attribute_Type SoftLayer service +func GetNetworkApplicationDeliveryControllerLoadBalancerHealthAttributeTypeService(sess *session.Session) Network_Application_Delivery_Controller_LoadBalancer_Health_Attribute_Type { + return Network_Application_Delivery_Controller_LoadBalancer_Health_Attribute_Type{Session: sess} +} + +func (r Network_Application_Delivery_Controller_LoadBalancer_Health_Attribute_Type) Id(id int) Network_Application_Delivery_Controller_LoadBalancer_Health_Attribute_Type { + r.Options.Id = &id + return r +} + +func (r Network_Application_Delivery_Controller_LoadBalancer_Health_Attribute_Type) Mask(mask string) Network_Application_Delivery_Controller_LoadBalancer_Health_Attribute_Type { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r Network_Application_Delivery_Controller_LoadBalancer_Health_Attribute_Type) Filter(filter string) Network_Application_Delivery_Controller_LoadBalancer_Health_Attribute_Type { + r.Options.Filter = filter + return r +} + +func (r Network_Application_Delivery_Controller_LoadBalancer_Health_Attribute_Type) Limit(limit int) Network_Application_Delivery_Controller_LoadBalancer_Health_Attribute_Type { + r.Options.Limit = &limit + return r +} + +func (r Network_Application_Delivery_Controller_LoadBalancer_Health_Attribute_Type) Offset(offset int) Network_Application_Delivery_Controller_LoadBalancer_Health_Attribute_Type { + r.Options.Offset = &offset + return r +} + +// no documentation yet +func (r Network_Application_Delivery_Controller_LoadBalancer_Health_Attribute_Type) GetAllObjects() (resp []datatypes.Network_Application_Delivery_Controller_LoadBalancer_Health_Attribute_Type, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Application_Delivery_Controller_LoadBalancer_Health_Attribute_Type", "getAllObjects", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Network_Application_Delivery_Controller_LoadBalancer_Health_Attribute_Type) GetObject() (resp datatypes.Network_Application_Delivery_Controller_LoadBalancer_Health_Attribute_Type, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Application_Delivery_Controller_LoadBalancer_Health_Attribute_Type", "getObject", nil, &r.Options, &resp) + return +} + +// no documentation yet +type Network_Application_Delivery_Controller_LoadBalancer_Health_Check struct { + Session *session.Session + Options sl.Options +} + +// GetNetworkApplicationDeliveryControllerLoadBalancerHealthCheckService returns an instance of the Network_Application_Delivery_Controller_LoadBalancer_Health_Check SoftLayer service +func GetNetworkApplicationDeliveryControllerLoadBalancerHealthCheckService(sess *session.Session) Network_Application_Delivery_Controller_LoadBalancer_Health_Check { + return Network_Application_Delivery_Controller_LoadBalancer_Health_Check{Session: sess} +} + +func (r Network_Application_Delivery_Controller_LoadBalancer_Health_Check) Id(id int) Network_Application_Delivery_Controller_LoadBalancer_Health_Check { + r.Options.Id = &id + return r +} + +func (r Network_Application_Delivery_Controller_LoadBalancer_Health_Check) Mask(mask string) Network_Application_Delivery_Controller_LoadBalancer_Health_Check { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r Network_Application_Delivery_Controller_LoadBalancer_Health_Check) Filter(filter string) Network_Application_Delivery_Controller_LoadBalancer_Health_Check { + r.Options.Filter = filter + return r +} + +func (r Network_Application_Delivery_Controller_LoadBalancer_Health_Check) Limit(limit int) Network_Application_Delivery_Controller_LoadBalancer_Health_Check { + r.Options.Limit = &limit + return r +} + +func (r Network_Application_Delivery_Controller_LoadBalancer_Health_Check) Offset(offset int) Network_Application_Delivery_Controller_LoadBalancer_Health_Check { + r.Options.Offset = &offset + return r +} + +// Retrieve +func (r Network_Application_Delivery_Controller_LoadBalancer_Health_Check) GetAttributes() (resp []datatypes.Network_Application_Delivery_Controller_LoadBalancer_Health_Attribute, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Application_Delivery_Controller_LoadBalancer_Health_Check", "getAttributes", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Network_Application_Delivery_Controller_LoadBalancer_Health_Check) GetObject() (resp datatypes.Network_Application_Delivery_Controller_LoadBalancer_Health_Check, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Application_Delivery_Controller_LoadBalancer_Health_Check", "getObject", nil, &r.Options, &resp) + return +} + +// Retrieve Collection of scale load balancers that use this health check. +func (r Network_Application_Delivery_Controller_LoadBalancer_Health_Check) GetScaleLoadBalancers() (resp []datatypes.Scale_LoadBalancer, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Application_Delivery_Controller_LoadBalancer_Health_Check", "getScaleLoadBalancers", nil, &r.Options, &resp) + return +} + +// Retrieve +func (r Network_Application_Delivery_Controller_LoadBalancer_Health_Check) GetServices() (resp []datatypes.Network_Application_Delivery_Controller_LoadBalancer_Service, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Application_Delivery_Controller_LoadBalancer_Health_Check", "getServices", nil, &r.Options, &resp) + return +} + +// Retrieve +func (r Network_Application_Delivery_Controller_LoadBalancer_Health_Check) GetType() (resp datatypes.Network_Application_Delivery_Controller_LoadBalancer_Health_Check_Type, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Application_Delivery_Controller_LoadBalancer_Health_Check", "getType", nil, &r.Options, &resp) + return +} + +// no documentation yet +type Network_Application_Delivery_Controller_LoadBalancer_Health_Check_Type struct { + Session *session.Session + Options sl.Options +} + +// GetNetworkApplicationDeliveryControllerLoadBalancerHealthCheckTypeService returns an instance of the Network_Application_Delivery_Controller_LoadBalancer_Health_Check_Type SoftLayer service +func GetNetworkApplicationDeliveryControllerLoadBalancerHealthCheckTypeService(sess *session.Session) Network_Application_Delivery_Controller_LoadBalancer_Health_Check_Type { + return Network_Application_Delivery_Controller_LoadBalancer_Health_Check_Type{Session: sess} +} + +func (r Network_Application_Delivery_Controller_LoadBalancer_Health_Check_Type) Id(id int) Network_Application_Delivery_Controller_LoadBalancer_Health_Check_Type { + r.Options.Id = &id + return r +} + +func (r Network_Application_Delivery_Controller_LoadBalancer_Health_Check_Type) Mask(mask string) Network_Application_Delivery_Controller_LoadBalancer_Health_Check_Type { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r Network_Application_Delivery_Controller_LoadBalancer_Health_Check_Type) Filter(filter string) Network_Application_Delivery_Controller_LoadBalancer_Health_Check_Type { + r.Options.Filter = filter + return r +} + +func (r Network_Application_Delivery_Controller_LoadBalancer_Health_Check_Type) Limit(limit int) Network_Application_Delivery_Controller_LoadBalancer_Health_Check_Type { + r.Options.Limit = &limit + return r +} + +func (r Network_Application_Delivery_Controller_LoadBalancer_Health_Check_Type) Offset(offset int) Network_Application_Delivery_Controller_LoadBalancer_Health_Check_Type { + r.Options.Offset = &offset + return r +} + +// no documentation yet +func (r Network_Application_Delivery_Controller_LoadBalancer_Health_Check_Type) GetAllObjects() (resp []datatypes.Network_Application_Delivery_Controller_LoadBalancer_Health_Check_Type, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Application_Delivery_Controller_LoadBalancer_Health_Check_Type", "getAllObjects", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Network_Application_Delivery_Controller_LoadBalancer_Health_Check_Type) GetObject() (resp datatypes.Network_Application_Delivery_Controller_LoadBalancer_Health_Check_Type, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Application_Delivery_Controller_LoadBalancer_Health_Check_Type", "getObject", nil, &r.Options, &resp) + return +} + +// no documentation yet +type Network_Application_Delivery_Controller_LoadBalancer_Routing_Method struct { + Session *session.Session + Options sl.Options +} + +// GetNetworkApplicationDeliveryControllerLoadBalancerRoutingMethodService returns an instance of the Network_Application_Delivery_Controller_LoadBalancer_Routing_Method SoftLayer service +func GetNetworkApplicationDeliveryControllerLoadBalancerRoutingMethodService(sess *session.Session) Network_Application_Delivery_Controller_LoadBalancer_Routing_Method { + return Network_Application_Delivery_Controller_LoadBalancer_Routing_Method{Session: sess} +} + +func (r Network_Application_Delivery_Controller_LoadBalancer_Routing_Method) Id(id int) Network_Application_Delivery_Controller_LoadBalancer_Routing_Method { + r.Options.Id = &id + return r +} + +func (r Network_Application_Delivery_Controller_LoadBalancer_Routing_Method) Mask(mask string) Network_Application_Delivery_Controller_LoadBalancer_Routing_Method { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r Network_Application_Delivery_Controller_LoadBalancer_Routing_Method) Filter(filter string) Network_Application_Delivery_Controller_LoadBalancer_Routing_Method { + r.Options.Filter = filter + return r +} + +func (r Network_Application_Delivery_Controller_LoadBalancer_Routing_Method) Limit(limit int) Network_Application_Delivery_Controller_LoadBalancer_Routing_Method { + r.Options.Limit = &limit + return r +} + +func (r Network_Application_Delivery_Controller_LoadBalancer_Routing_Method) Offset(offset int) Network_Application_Delivery_Controller_LoadBalancer_Routing_Method { + r.Options.Offset = &offset + return r +} + +// no documentation yet +func (r Network_Application_Delivery_Controller_LoadBalancer_Routing_Method) GetAllObjects() (resp []datatypes.Network_Application_Delivery_Controller_LoadBalancer_Routing_Method, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Application_Delivery_Controller_LoadBalancer_Routing_Method", "getAllObjects", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Network_Application_Delivery_Controller_LoadBalancer_Routing_Method) GetObject() (resp datatypes.Network_Application_Delivery_Controller_LoadBalancer_Routing_Method, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Application_Delivery_Controller_LoadBalancer_Routing_Method", "getObject", nil, &r.Options, &resp) + return +} + +// no documentation yet +type Network_Application_Delivery_Controller_LoadBalancer_Routing_Type struct { + Session *session.Session + Options sl.Options +} + +// GetNetworkApplicationDeliveryControllerLoadBalancerRoutingTypeService returns an instance of the Network_Application_Delivery_Controller_LoadBalancer_Routing_Type SoftLayer service +func GetNetworkApplicationDeliveryControllerLoadBalancerRoutingTypeService(sess *session.Session) Network_Application_Delivery_Controller_LoadBalancer_Routing_Type { + return Network_Application_Delivery_Controller_LoadBalancer_Routing_Type{Session: sess} +} + +func (r Network_Application_Delivery_Controller_LoadBalancer_Routing_Type) Id(id int) Network_Application_Delivery_Controller_LoadBalancer_Routing_Type { + r.Options.Id = &id + return r +} + +func (r Network_Application_Delivery_Controller_LoadBalancer_Routing_Type) Mask(mask string) Network_Application_Delivery_Controller_LoadBalancer_Routing_Type { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r Network_Application_Delivery_Controller_LoadBalancer_Routing_Type) Filter(filter string) Network_Application_Delivery_Controller_LoadBalancer_Routing_Type { + r.Options.Filter = filter + return r +} + +func (r Network_Application_Delivery_Controller_LoadBalancer_Routing_Type) Limit(limit int) Network_Application_Delivery_Controller_LoadBalancer_Routing_Type { + r.Options.Limit = &limit + return r +} + +func (r Network_Application_Delivery_Controller_LoadBalancer_Routing_Type) Offset(offset int) Network_Application_Delivery_Controller_LoadBalancer_Routing_Type { + r.Options.Offset = &offset + return r +} + +// no documentation yet +func (r Network_Application_Delivery_Controller_LoadBalancer_Routing_Type) GetAllObjects() (resp []datatypes.Network_Application_Delivery_Controller_LoadBalancer_Routing_Type, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Application_Delivery_Controller_LoadBalancer_Routing_Type", "getAllObjects", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Network_Application_Delivery_Controller_LoadBalancer_Routing_Type) GetObject() (resp datatypes.Network_Application_Delivery_Controller_LoadBalancer_Routing_Type, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Application_Delivery_Controller_LoadBalancer_Routing_Type", "getObject", nil, &r.Options, &resp) + return +} + +// no documentation yet +type Network_Application_Delivery_Controller_LoadBalancer_Service struct { + Session *session.Session + Options sl.Options +} + +// GetNetworkApplicationDeliveryControllerLoadBalancerServiceService returns an instance of the Network_Application_Delivery_Controller_LoadBalancer_Service SoftLayer service +func GetNetworkApplicationDeliveryControllerLoadBalancerServiceService(sess *session.Session) Network_Application_Delivery_Controller_LoadBalancer_Service { + return Network_Application_Delivery_Controller_LoadBalancer_Service{Session: sess} +} + +func (r Network_Application_Delivery_Controller_LoadBalancer_Service) Id(id int) Network_Application_Delivery_Controller_LoadBalancer_Service { + r.Options.Id = &id + return r +} + +func (r Network_Application_Delivery_Controller_LoadBalancer_Service) Mask(mask string) Network_Application_Delivery_Controller_LoadBalancer_Service { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r Network_Application_Delivery_Controller_LoadBalancer_Service) Filter(filter string) Network_Application_Delivery_Controller_LoadBalancer_Service { + r.Options.Filter = filter + return r +} + +func (r Network_Application_Delivery_Controller_LoadBalancer_Service) Limit(limit int) Network_Application_Delivery_Controller_LoadBalancer_Service { + r.Options.Limit = &limit + return r +} + +func (r Network_Application_Delivery_Controller_LoadBalancer_Service) Offset(offset int) Network_Application_Delivery_Controller_LoadBalancer_Service { + r.Options.Offset = &offset + return r +} + +// no documentation yet +func (r Network_Application_Delivery_Controller_LoadBalancer_Service) DeleteObject() (err error) { + var resp datatypes.Void + err = r.Session.DoRequest("SoftLayer_Network_Application_Delivery_Controller_LoadBalancer_Service", "deleteObject", nil, &r.Options, &resp) + return +} + +// Get the graph image for a load balancer service based on the supplied graph type and metric. The available graph types are: 'connections' and 'status', and the available metrics are: 'day', 'week' and 'month'. +// +// This method returns the raw binary image data. +func (r Network_Application_Delivery_Controller_LoadBalancer_Service) GetGraphImage(graphType *string, metric *string) (resp []byte, err error) { + params := []interface{}{ + graphType, + metric, + } + err = r.Session.DoRequest("SoftLayer_Network_Application_Delivery_Controller_LoadBalancer_Service", "getGraphImage", params, &r.Options, &resp) + return +} + +// Retrieve +func (r Network_Application_Delivery_Controller_LoadBalancer_Service) GetGroupReferences() (resp []datatypes.Network_Application_Delivery_Controller_LoadBalancer_Service_Group_CrossReference, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Application_Delivery_Controller_LoadBalancer_Service", "getGroupReferences", nil, &r.Options, &resp) + return +} + +// Retrieve +func (r Network_Application_Delivery_Controller_LoadBalancer_Service) GetGroups() (resp []datatypes.Network_Application_Delivery_Controller_LoadBalancer_Service_Group, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Application_Delivery_Controller_LoadBalancer_Service", "getGroups", nil, &r.Options, &resp) + return +} + +// Retrieve +func (r Network_Application_Delivery_Controller_LoadBalancer_Service) GetHealthCheck() (resp datatypes.Network_Application_Delivery_Controller_LoadBalancer_Health_Check, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Application_Delivery_Controller_LoadBalancer_Service", "getHealthCheck", nil, &r.Options, &resp) + return +} + +// Retrieve +func (r Network_Application_Delivery_Controller_LoadBalancer_Service) GetHealthChecks() (resp []datatypes.Network_Application_Delivery_Controller_LoadBalancer_Health_Check, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Application_Delivery_Controller_LoadBalancer_Service", "getHealthChecks", nil, &r.Options, &resp) + return +} + +// Retrieve +func (r Network_Application_Delivery_Controller_LoadBalancer_Service) GetIpAddress() (resp datatypes.Network_Subnet_IpAddress, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Application_Delivery_Controller_LoadBalancer_Service", "getIpAddress", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Network_Application_Delivery_Controller_LoadBalancer_Service) GetObject() (resp datatypes.Network_Application_Delivery_Controller_LoadBalancer_Service, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Application_Delivery_Controller_LoadBalancer_Service", "getObject", nil, &r.Options, &resp) + return +} + +// Retrieve +func (r Network_Application_Delivery_Controller_LoadBalancer_Service) GetServiceGroup() (resp datatypes.Network_Application_Delivery_Controller_LoadBalancer_Service_Group, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Application_Delivery_Controller_LoadBalancer_Service", "getServiceGroup", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Network_Application_Delivery_Controller_LoadBalancer_Service) ToggleStatus() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Application_Delivery_Controller_LoadBalancer_Service", "toggleStatus", nil, &r.Options, &resp) + return +} + +// no documentation yet +type Network_Application_Delivery_Controller_LoadBalancer_Service_Group struct { + Session *session.Session + Options sl.Options +} + +// GetNetworkApplicationDeliveryControllerLoadBalancerServiceGroupService returns an instance of the Network_Application_Delivery_Controller_LoadBalancer_Service_Group SoftLayer service +func GetNetworkApplicationDeliveryControllerLoadBalancerServiceGroupService(sess *session.Session) Network_Application_Delivery_Controller_LoadBalancer_Service_Group { + return Network_Application_Delivery_Controller_LoadBalancer_Service_Group{Session: sess} +} + +func (r Network_Application_Delivery_Controller_LoadBalancer_Service_Group) Id(id int) Network_Application_Delivery_Controller_LoadBalancer_Service_Group { + r.Options.Id = &id + return r +} + +func (r Network_Application_Delivery_Controller_LoadBalancer_Service_Group) Mask(mask string) Network_Application_Delivery_Controller_LoadBalancer_Service_Group { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r Network_Application_Delivery_Controller_LoadBalancer_Service_Group) Filter(filter string) Network_Application_Delivery_Controller_LoadBalancer_Service_Group { + r.Options.Filter = filter + return r +} + +func (r Network_Application_Delivery_Controller_LoadBalancer_Service_Group) Limit(limit int) Network_Application_Delivery_Controller_LoadBalancer_Service_Group { + r.Options.Limit = &limit + return r +} + +func (r Network_Application_Delivery_Controller_LoadBalancer_Service_Group) Offset(offset int) Network_Application_Delivery_Controller_LoadBalancer_Service_Group { + r.Options.Offset = &offset + return r +} + +// Get the graph image for a load balancer service group based on the supplied graph type and metric. The only available graph type currently is: 'connections', and the available metrics are: 'day', 'week' and 'month'. +// +// This method returns the raw binary image data. +func (r Network_Application_Delivery_Controller_LoadBalancer_Service_Group) GetGraphImage(graphType *string, metric *string) (resp []byte, err error) { + params := []interface{}{ + graphType, + metric, + } + err = r.Session.DoRequest("SoftLayer_Network_Application_Delivery_Controller_LoadBalancer_Service_Group", "getGraphImage", params, &r.Options, &resp) + return +} + +// no documentation yet +func (r Network_Application_Delivery_Controller_LoadBalancer_Service_Group) GetObject() (resp datatypes.Network_Application_Delivery_Controller_LoadBalancer_Service_Group, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Application_Delivery_Controller_LoadBalancer_Service_Group", "getObject", nil, &r.Options, &resp) + return +} + +// Retrieve +func (r Network_Application_Delivery_Controller_LoadBalancer_Service_Group) GetRoutingMethod() (resp datatypes.Network_Application_Delivery_Controller_LoadBalancer_Routing_Method, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Application_Delivery_Controller_LoadBalancer_Service_Group", "getRoutingMethod", nil, &r.Options, &resp) + return +} + +// Retrieve +func (r Network_Application_Delivery_Controller_LoadBalancer_Service_Group) GetRoutingType() (resp datatypes.Network_Application_Delivery_Controller_LoadBalancer_Routing_Type, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Application_Delivery_Controller_LoadBalancer_Service_Group", "getRoutingType", nil, &r.Options, &resp) + return +} + +// Retrieve +func (r Network_Application_Delivery_Controller_LoadBalancer_Service_Group) GetServiceReferences() (resp []datatypes.Network_Application_Delivery_Controller_LoadBalancer_Service_Group_CrossReference, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Application_Delivery_Controller_LoadBalancer_Service_Group", "getServiceReferences", nil, &r.Options, &resp) + return +} + +// Retrieve +func (r Network_Application_Delivery_Controller_LoadBalancer_Service_Group) GetServices() (resp []datatypes.Network_Application_Delivery_Controller_LoadBalancer_Service, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Application_Delivery_Controller_LoadBalancer_Service_Group", "getServices", nil, &r.Options, &resp) + return +} + +// Retrieve +func (r Network_Application_Delivery_Controller_LoadBalancer_Service_Group) GetVirtualServer() (resp datatypes.Network_Application_Delivery_Controller_LoadBalancer_VirtualServer, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Application_Delivery_Controller_LoadBalancer_Service_Group", "getVirtualServer", nil, &r.Options, &resp) + return +} + +// Retrieve +func (r Network_Application_Delivery_Controller_LoadBalancer_Service_Group) GetVirtualServers() (resp []datatypes.Network_Application_Delivery_Controller_LoadBalancer_VirtualServer, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Application_Delivery_Controller_LoadBalancer_Service_Group", "getVirtualServers", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Network_Application_Delivery_Controller_LoadBalancer_Service_Group) KickAllConnections() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Application_Delivery_Controller_LoadBalancer_Service_Group", "kickAllConnections", nil, &r.Options, &resp) + return +} + +// no documentation yet +type Network_Application_Delivery_Controller_LoadBalancer_VirtualIpAddress struct { + Session *session.Session + Options sl.Options +} + +// GetNetworkApplicationDeliveryControllerLoadBalancerVirtualIpAddressService returns an instance of the Network_Application_Delivery_Controller_LoadBalancer_VirtualIpAddress SoftLayer service +func GetNetworkApplicationDeliveryControllerLoadBalancerVirtualIpAddressService(sess *session.Session) Network_Application_Delivery_Controller_LoadBalancer_VirtualIpAddress { + return Network_Application_Delivery_Controller_LoadBalancer_VirtualIpAddress{Session: sess} +} + +func (r Network_Application_Delivery_Controller_LoadBalancer_VirtualIpAddress) Id(id int) Network_Application_Delivery_Controller_LoadBalancer_VirtualIpAddress { + r.Options.Id = &id + return r +} + +func (r Network_Application_Delivery_Controller_LoadBalancer_VirtualIpAddress) Mask(mask string) Network_Application_Delivery_Controller_LoadBalancer_VirtualIpAddress { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r Network_Application_Delivery_Controller_LoadBalancer_VirtualIpAddress) Filter(filter string) Network_Application_Delivery_Controller_LoadBalancer_VirtualIpAddress { + r.Options.Filter = filter + return r +} + +func (r Network_Application_Delivery_Controller_LoadBalancer_VirtualIpAddress) Limit(limit int) Network_Application_Delivery_Controller_LoadBalancer_VirtualIpAddress { + r.Options.Limit = &limit + return r +} + +func (r Network_Application_Delivery_Controller_LoadBalancer_VirtualIpAddress) Offset(offset int) Network_Application_Delivery_Controller_LoadBalancer_VirtualIpAddress { + r.Options.Offset = &offset + return r +} + +// Like any other API object, the load balancers can have their exposed properties edited by passing in a modified version of the object. The load balancer object also can modify its services in this way. Simply request the load balancer object you wish to edit, then modify the objects in the services array and pass the modified object to this function. WARNING: Services cannot be deleted in this manner, you must call deleteObject() on the service to physically remove them from the load balancer. +func (r Network_Application_Delivery_Controller_LoadBalancer_VirtualIpAddress) EditObject(templateObject *datatypes.Network_Application_Delivery_Controller_LoadBalancer_VirtualIpAddress) (resp bool, err error) { + params := []interface{}{ + templateObject, + } + err = r.Session.DoRequest("SoftLayer_Network_Application_Delivery_Controller_LoadBalancer_VirtualIpAddress", "editObject", params, &r.Options, &resp) + return +} + +// Retrieve +func (r Network_Application_Delivery_Controller_LoadBalancer_VirtualIpAddress) GetAccount() (resp datatypes.Account, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Application_Delivery_Controller_LoadBalancer_VirtualIpAddress", "getAccount", nil, &r.Options, &resp) + return +} + +// Retrieve A virtual IP address's associated application delivery controller. +func (r Network_Application_Delivery_Controller_LoadBalancer_VirtualIpAddress) GetApplicationDeliveryController() (resp datatypes.Network_Application_Delivery_Controller, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Application_Delivery_Controller_LoadBalancer_VirtualIpAddress", "getApplicationDeliveryController", nil, &r.Options, &resp) + return +} + +// Retrieve A virtual IP address's associated application delivery controllers. +func (r Network_Application_Delivery_Controller_LoadBalancer_VirtualIpAddress) GetApplicationDeliveryControllers() (resp []datatypes.Network_Application_Delivery_Controller, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Application_Delivery_Controller_LoadBalancer_VirtualIpAddress", "getApplicationDeliveryControllers", nil, &r.Options, &resp) + return +} + +// Yields a list of the SSL/TLS encryption ciphers that are currently supported on this virtual IP address instance. +func (r Network_Application_Delivery_Controller_LoadBalancer_VirtualIpAddress) GetAvailableSecureTransportCiphers() (resp []datatypes.Security_SecureTransportCipher, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Application_Delivery_Controller_LoadBalancer_VirtualIpAddress", "getAvailableSecureTransportCiphers", nil, &r.Options, &resp) + return +} + +// Yields a list of the secure communication protocols that are currently supported on this virtual IP address instance. The list of supported ciphers for each protocol is culled to match availability. +func (r Network_Application_Delivery_Controller_LoadBalancer_VirtualIpAddress) GetAvailableSecureTransportProtocols() (resp []datatypes.Security_SecureTransportProtocol, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Application_Delivery_Controller_LoadBalancer_VirtualIpAddress", "getAvailableSecureTransportProtocols", nil, &r.Options, &resp) + return +} + +// Retrieve The current billing item for the load balancer virtual IP. This is only valid when dedicatedFlag is false. This is an independent virtual IP, and if canceled, will only affect the associated virtual IP. +func (r Network_Application_Delivery_Controller_LoadBalancer_VirtualIpAddress) GetBillingItem() (resp datatypes.Billing_Item, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Application_Delivery_Controller_LoadBalancer_VirtualIpAddress", "getBillingItem", nil, &r.Options, &resp) + return +} + +// Retrieve The current billing item for the load balancing device housing the virtual IP. This billing item represents a device which could contain other virtual IPs. Caution should be taken when canceling. This is only valid when dedicatedFlag is true. +func (r Network_Application_Delivery_Controller_LoadBalancer_VirtualIpAddress) GetDedicatedBillingItem() (resp datatypes.Billing_Item_Network_LoadBalancer, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Application_Delivery_Controller_LoadBalancer_VirtualIpAddress", "getDedicatedBillingItem", nil, &r.Options, &resp) + return +} + +// Retrieve Denotes whether the virtual IP is configured within a high availability cluster. +func (r Network_Application_Delivery_Controller_LoadBalancer_VirtualIpAddress) GetHighAvailabilityFlag() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Application_Delivery_Controller_LoadBalancer_VirtualIpAddress", "getHighAvailabilityFlag", nil, &r.Options, &resp) + return +} + +// Retrieve +func (r Network_Application_Delivery_Controller_LoadBalancer_VirtualIpAddress) GetIpAddress() (resp datatypes.Network_Subnet_IpAddress, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Application_Delivery_Controller_LoadBalancer_VirtualIpAddress", "getIpAddress", nil, &r.Options, &resp) + return +} + +// Retrieve +func (r Network_Application_Delivery_Controller_LoadBalancer_VirtualIpAddress) GetLoadBalancerHardware() (resp []datatypes.Hardware, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Application_Delivery_Controller_LoadBalancer_VirtualIpAddress", "getLoadBalancerHardware", nil, &r.Options, &resp) + return +} + +// Retrieve A flag indicating that the load balancer is a managed resource. +func (r Network_Application_Delivery_Controller_LoadBalancer_VirtualIpAddress) GetManagedResourceFlag() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Application_Delivery_Controller_LoadBalancer_VirtualIpAddress", "getManagedResourceFlag", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Network_Application_Delivery_Controller_LoadBalancer_VirtualIpAddress) GetObject() (resp datatypes.Network_Application_Delivery_Controller_LoadBalancer_VirtualIpAddress, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Application_Delivery_Controller_LoadBalancer_VirtualIpAddress", "getObject", nil, &r.Options, &resp) + return +} + +// Retrieve The list of security ciphers enabled for this virtual IP address +func (r Network_Application_Delivery_Controller_LoadBalancer_VirtualIpAddress) GetSecureTransportCiphers() (resp []datatypes.Network_Application_Delivery_Controller_LoadBalancer_VirtualIpAddress_SecureTransportCipher, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Application_Delivery_Controller_LoadBalancer_VirtualIpAddress", "getSecureTransportCiphers", nil, &r.Options, &resp) + return +} + +// Retrieve The list of secure transport protocols enabled for this virtual IP address +func (r Network_Application_Delivery_Controller_LoadBalancer_VirtualIpAddress) GetSecureTransportProtocols() (resp []datatypes.Network_Application_Delivery_Controller_LoadBalancer_VirtualIpAddress_SecureTransportProtocol, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Application_Delivery_Controller_LoadBalancer_VirtualIpAddress", "getSecureTransportProtocols", nil, &r.Options, &resp) + return +} + +// Retrieve The SSL certificate currently associated with the VIP. +func (r Network_Application_Delivery_Controller_LoadBalancer_VirtualIpAddress) GetSecurityCertificate() (resp datatypes.Security_Certificate, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Application_Delivery_Controller_LoadBalancer_VirtualIpAddress", "getSecurityCertificate", nil, &r.Options, &resp) + return +} + +// Retrieve The SSL certificate currently associated with the VIP. Provides chosen certificate visibility to unprivileged users. +func (r Network_Application_Delivery_Controller_LoadBalancer_VirtualIpAddress) GetSecurityCertificateEntry() (resp datatypes.Security_Certificate_Entry, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Application_Delivery_Controller_LoadBalancer_VirtualIpAddress", "getSecurityCertificateEntry", nil, &r.Options, &resp) + return +} + +// Retrieve +func (r Network_Application_Delivery_Controller_LoadBalancer_VirtualIpAddress) GetVirtualServers() (resp []datatypes.Network_Application_Delivery_Controller_LoadBalancer_VirtualServer, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Application_Delivery_Controller_LoadBalancer_VirtualIpAddress", "getVirtualServers", nil, &r.Options, &resp) + return +} + +// Start SSL acceleration on all SSL virtual services (those with a type of HTTPS). This action should be taken only after configuring an SSL certificate for the virtual IP. +func (r Network_Application_Delivery_Controller_LoadBalancer_VirtualIpAddress) StartSsl() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Application_Delivery_Controller_LoadBalancer_VirtualIpAddress", "startSsl", nil, &r.Options, &resp) + return +} + +// Stop SSL acceleration on all SSL virtual services (those with a type of HTTPS). +func (r Network_Application_Delivery_Controller_LoadBalancer_VirtualIpAddress) StopSsl() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Application_Delivery_Controller_LoadBalancer_VirtualIpAddress", "stopSsl", nil, &r.Options, &resp) + return +} + +// Upgrades the connection limit on the Virtual IP to Address to the next, higher connection limit of the same product. +func (r Network_Application_Delivery_Controller_LoadBalancer_VirtualIpAddress) UpgradeConnectionLimit() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Application_Delivery_Controller_LoadBalancer_VirtualIpAddress", "upgradeConnectionLimit", nil, &r.Options, &resp) + return +} + +// no documentation yet +type Network_Application_Delivery_Controller_LoadBalancer_VirtualServer struct { + Session *session.Session + Options sl.Options +} + +// GetNetworkApplicationDeliveryControllerLoadBalancerVirtualServerService returns an instance of the Network_Application_Delivery_Controller_LoadBalancer_VirtualServer SoftLayer service +func GetNetworkApplicationDeliveryControllerLoadBalancerVirtualServerService(sess *session.Session) Network_Application_Delivery_Controller_LoadBalancer_VirtualServer { + return Network_Application_Delivery_Controller_LoadBalancer_VirtualServer{Session: sess} +} + +func (r Network_Application_Delivery_Controller_LoadBalancer_VirtualServer) Id(id int) Network_Application_Delivery_Controller_LoadBalancer_VirtualServer { + r.Options.Id = &id + return r +} + +func (r Network_Application_Delivery_Controller_LoadBalancer_VirtualServer) Mask(mask string) Network_Application_Delivery_Controller_LoadBalancer_VirtualServer { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r Network_Application_Delivery_Controller_LoadBalancer_VirtualServer) Filter(filter string) Network_Application_Delivery_Controller_LoadBalancer_VirtualServer { + r.Options.Filter = filter + return r +} + +func (r Network_Application_Delivery_Controller_LoadBalancer_VirtualServer) Limit(limit int) Network_Application_Delivery_Controller_LoadBalancer_VirtualServer { + r.Options.Limit = &limit + return r +} + +func (r Network_Application_Delivery_Controller_LoadBalancer_VirtualServer) Offset(offset int) Network_Application_Delivery_Controller_LoadBalancer_VirtualServer { + r.Options.Offset = &offset + return r +} + +// no documentation yet +func (r Network_Application_Delivery_Controller_LoadBalancer_VirtualServer) DeleteObject() (err error) { + var resp datatypes.Void + err = r.Session.DoRequest("SoftLayer_Network_Application_Delivery_Controller_LoadBalancer_VirtualServer", "deleteObject", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Network_Application_Delivery_Controller_LoadBalancer_VirtualServer) GetObject() (resp datatypes.Network_Application_Delivery_Controller_LoadBalancer_VirtualServer, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Application_Delivery_Controller_LoadBalancer_VirtualServer", "getObject", nil, &r.Options, &resp) + return +} + +// Retrieve +func (r Network_Application_Delivery_Controller_LoadBalancer_VirtualServer) GetRoutingMethod() (resp datatypes.Network_Application_Delivery_Controller_LoadBalancer_Routing_Method, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Application_Delivery_Controller_LoadBalancer_VirtualServer", "getRoutingMethod", nil, &r.Options, &resp) + return +} + +// Retrieve Collection of scale load balancers this virtual server applies to. +func (r Network_Application_Delivery_Controller_LoadBalancer_VirtualServer) GetScaleLoadBalancers() (resp []datatypes.Scale_LoadBalancer, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Application_Delivery_Controller_LoadBalancer_VirtualServer", "getScaleLoadBalancers", nil, &r.Options, &resp) + return +} + +// Retrieve +func (r Network_Application_Delivery_Controller_LoadBalancer_VirtualServer) GetServiceGroups() (resp []datatypes.Network_Application_Delivery_Controller_LoadBalancer_Service_Group, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Application_Delivery_Controller_LoadBalancer_VirtualServer", "getServiceGroups", nil, &r.Options, &resp) + return +} + +// Retrieve +func (r Network_Application_Delivery_Controller_LoadBalancer_VirtualServer) GetVirtualIpAddress() (resp datatypes.Network_Application_Delivery_Controller_LoadBalancer_VirtualIpAddress, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Application_Delivery_Controller_LoadBalancer_VirtualServer", "getVirtualIpAddress", nil, &r.Options, &resp) + return +} + +// Start SSL acceleration on all SSL virtual services (those with a type of HTTPS). This action should be taken only after configuring an SSL certificate for the virtual IP. +func (r Network_Application_Delivery_Controller_LoadBalancer_VirtualServer) StartSsl() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Application_Delivery_Controller_LoadBalancer_VirtualServer", "startSsl", nil, &r.Options, &resp) + return +} + +// Stop SSL acceleration on all SSL virtual services (those with a type of HTTPS). +func (r Network_Application_Delivery_Controller_LoadBalancer_VirtualServer) StopSsl() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Application_Delivery_Controller_LoadBalancer_VirtualServer", "stopSsl", nil, &r.Options, &resp) + return +} + +// A SoftLayer_Network_Backbone represents a single backbone connection from SoftLayer to the public Internet, from the Internet to the SoftLayer private network, or a link that connects the private networks between SoftLayer's datacenters. The SoftLayer_Network_Backbone data type is a collection of data associated with one of those connections. +type Network_Backbone struct { + Session *session.Session + Options sl.Options +} + +// GetNetworkBackboneService returns an instance of the Network_Backbone SoftLayer service +func GetNetworkBackboneService(sess *session.Session) Network_Backbone { + return Network_Backbone{Session: sess} +} + +func (r Network_Backbone) Id(id int) Network_Backbone { + r.Options.Id = &id + return r +} + +func (r Network_Backbone) Mask(mask string) Network_Backbone { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r Network_Backbone) Filter(filter string) Network_Backbone { + r.Options.Filter = filter + return r +} + +func (r Network_Backbone) Limit(limit int) Network_Backbone { + r.Options.Limit = &limit + return r +} + +func (r Network_Backbone) Offset(offset int) Network_Backbone { + r.Options.Offset = &offset + return r +} + +// Retrieve a list of all SoftLayer backbone connections. Use this method if you need all backbones or don't know the id number of a specific backbone. +func (r Network_Backbone) GetAllBackbones() (resp []datatypes.Network_Backbone, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Backbone", "getAllBackbones", nil, &r.Options, &resp) + return +} + +// Retrieve a list of all SoftLayer backbone connections for a location name. +func (r Network_Backbone) GetBackbonesForLocationName(locationName *string) (resp []datatypes.Network_Backbone, err error) { + params := []interface{}{ + locationName, + } + err = r.Session.DoRequest("SoftLayer_Network_Backbone", "getBackbonesForLocationName", params, &r.Options, &resp) + return +} + +// Retrieve a graph of a SoftLayer backbone's last 24 hours of activity. getGraphImage returns a PNG image measuring 827 pixels by 293 pixels. +func (r Network_Backbone) GetGraphImage() (resp []byte, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Backbone", "getGraphImage", nil, &r.Options, &resp) + return +} + +// Retrieve A backbone's status. +func (r Network_Backbone) GetHealth() (resp string, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Backbone", "getHealth", nil, &r.Options, &resp) + return +} + +// Retrieve Which of the SoftLayer datacenters a backbone is connected to. +func (r Network_Backbone) GetLocation() (resp datatypes.Location, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Backbone", "getLocation", nil, &r.Options, &resp) + return +} + +// Retrieve A backbone's primary network component. +func (r Network_Backbone) GetNetworkComponent() (resp datatypes.Network_Component, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Backbone", "getNetworkComponent", nil, &r.Options, &resp) + return +} + +// Retrieve an individual SoftLayer_Network_Backbone record. Use the getAllBackbones() method to retrieve a list of all SoftLayer network backbones. +func (r Network_Backbone) GetObject() (resp datatypes.Network_Backbone, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Backbone", "getObject", nil, &r.Options, &resp) + return +} + +// no documentation yet +type Network_Backbone_Location_Dependent struct { + Session *session.Session + Options sl.Options +} + +// GetNetworkBackboneLocationDependentService returns an instance of the Network_Backbone_Location_Dependent SoftLayer service +func GetNetworkBackboneLocationDependentService(sess *session.Session) Network_Backbone_Location_Dependent { + return Network_Backbone_Location_Dependent{Session: sess} +} + +func (r Network_Backbone_Location_Dependent) Id(id int) Network_Backbone_Location_Dependent { + r.Options.Id = &id + return r +} + +func (r Network_Backbone_Location_Dependent) Mask(mask string) Network_Backbone_Location_Dependent { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r Network_Backbone_Location_Dependent) Filter(filter string) Network_Backbone_Location_Dependent { + r.Options.Filter = filter + return r +} + +func (r Network_Backbone_Location_Dependent) Limit(limit int) Network_Backbone_Location_Dependent { + r.Options.Limit = &limit + return r +} + +func (r Network_Backbone_Location_Dependent) Offset(offset int) Network_Backbone_Location_Dependent { + r.Options.Offset = &offset + return r +} + +// no documentation yet +func (r Network_Backbone_Location_Dependent) GetAllObjects() (resp []datatypes.Network_Backbone_Location_Dependent, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Backbone_Location_Dependent", "getAllObjects", nil, &r.Options, &resp) + return +} + +// Retrieve +func (r Network_Backbone_Location_Dependent) GetDependentLocation() (resp datatypes.Location, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Backbone_Location_Dependent", "getDependentLocation", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Network_Backbone_Location_Dependent) GetObject() (resp datatypes.Network_Backbone_Location_Dependent, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Backbone_Location_Dependent", "getObject", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Network_Backbone_Location_Dependent) GetSourceDependentsByName(locationName *string) (resp datatypes.Location, err error) { + params := []interface{}{ + locationName, + } + err = r.Session.DoRequest("SoftLayer_Network_Backbone_Location_Dependent", "getSourceDependentsByName", params, &r.Options, &resp) + return +} + +// Retrieve +func (r Network_Backbone_Location_Dependent) GetSourceLocation() (resp datatypes.Location, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Backbone_Location_Dependent", "getSourceLocation", nil, &r.Options, &resp) + return +} + +// The SoftLayer_Network_Bandwidth_Version1_Allotment class provides methods and data structures necessary to work with an array of hardware objects associated with a single Bandwidth Pooling. +type Network_Bandwidth_Version1_Allotment struct { + Session *session.Session + Options sl.Options +} + +// GetNetworkBandwidthVersion1AllotmentService returns an instance of the Network_Bandwidth_Version1_Allotment SoftLayer service +func GetNetworkBandwidthVersion1AllotmentService(sess *session.Session) Network_Bandwidth_Version1_Allotment { + return Network_Bandwidth_Version1_Allotment{Session: sess} +} + +func (r Network_Bandwidth_Version1_Allotment) Id(id int) Network_Bandwidth_Version1_Allotment { + r.Options.Id = &id + return r +} + +func (r Network_Bandwidth_Version1_Allotment) Mask(mask string) Network_Bandwidth_Version1_Allotment { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r Network_Bandwidth_Version1_Allotment) Filter(filter string) Network_Bandwidth_Version1_Allotment { + r.Options.Filter = filter + return r +} + +func (r Network_Bandwidth_Version1_Allotment) Limit(limit int) Network_Bandwidth_Version1_Allotment { + r.Options.Limit = &limit + return r +} + +func (r Network_Bandwidth_Version1_Allotment) Offset(offset int) Network_Bandwidth_Version1_Allotment { + r.Options.Offset = &offset + return r +} + +// Create a allotment for servers to pool bandwidth and avoid overages in billing if they use more than there allocated bandwidth. +func (r Network_Bandwidth_Version1_Allotment) CreateObject(templateObject *datatypes.Network_Bandwidth_Version1_Allotment) (resp datatypes.Network_Bandwidth_Version1_Allotment, err error) { + params := []interface{}{ + templateObject, + } + err = r.Session.DoRequest("SoftLayer_Network_Bandwidth_Version1_Allotment", "createObject", params, &r.Options, &resp) + return +} + +// Edit a bandwidth allotment's local properties. Currently you may only change an allotment's name. Use the [[SoftLayer_Network_Bandwidth_Version1_Allotment::reassignServers|reassignServers()]] and [[SoftLayer_Network_Bandwidth_Version1_Allotment::unassignServers|unassignServers()]] methods to move servers in and out of your allotments. +func (r Network_Bandwidth_Version1_Allotment) EditObject(templateObject *datatypes.Network_Bandwidth_Version1_Allotment) (resp bool, err error) { + params := []interface{}{ + templateObject, + } + err = r.Session.DoRequest("SoftLayer_Network_Bandwidth_Version1_Allotment", "editObject", params, &r.Options, &resp) + return +} + +// Retrieve The account associated with this virtual rack. +func (r Network_Bandwidth_Version1_Allotment) GetAccount() (resp datatypes.Account, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Bandwidth_Version1_Allotment", "getAccount", nil, &r.Options, &resp) + return +} + +// Retrieve The bandwidth allotment detail records associated with this virtual rack. +func (r Network_Bandwidth_Version1_Allotment) GetActiveDetails() (resp []datatypes.Network_Bandwidth_Version1_Allotment_Detail, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Bandwidth_Version1_Allotment", "getActiveDetails", nil, &r.Options, &resp) + return +} + +// Retrieve The Application Delivery Controller contained within a virtual rack. +func (r Network_Bandwidth_Version1_Allotment) GetApplicationDeliveryControllers() (resp []datatypes.Network_Application_Delivery_Controller, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Bandwidth_Version1_Allotment", "getApplicationDeliveryControllers", nil, &r.Options, &resp) + return +} + +// Retrieve The average daily public bandwidth usage for the current billing cycle. +func (r Network_Bandwidth_Version1_Allotment) GetAverageDailyPublicBandwidthUsage() (resp datatypes.Float64, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Bandwidth_Version1_Allotment", "getAverageDailyPublicBandwidthUsage", nil, &r.Options, &resp) + return +} + +// This method recurses through all servers on a Bandwidth Pool for 24 hour time span starting at a given date/time. To get the private data set for all servers on a Bandwidth Pool from midnight Feb 1st, 2008 to 23:59 on Feb 1st, you would pass a parameter of '02/01/2008 0:00'. The ending date / time is calculated for you to prevent requesting data from the server for periods larger than 24 hours as this method requires processing a lot of data records and can get slow at times. +func (r Network_Bandwidth_Version1_Allotment) GetBackendBandwidthByHour(date *datatypes.Time) (resp []datatypes.Container_Network_Bandwidth_Version1_Usage, err error) { + params := []interface{}{ + date, + } + err = r.Session.DoRequest("SoftLayer_Network_Bandwidth_Version1_Allotment", "getBackendBandwidthByHour", params, &r.Options, &resp) + return +} + +// This method recurses through all servers on a Bandwidth Pool between the given start and end dates to retrieve public bandwidth data. +func (r Network_Bandwidth_Version1_Allotment) GetBackendBandwidthUse(startDate *datatypes.Time, endDate *datatypes.Time) (resp []datatypes.Network_Bandwidth_Version1_Usage_Detail, err error) { + params := []interface{}{ + startDate, + endDate, + } + err = r.Session.DoRequest("SoftLayer_Network_Bandwidth_Version1_Allotment", "getBackendBandwidthUse", params, &r.Options, &resp) + return +} + +// Retrieve The bandwidth allotment type of this virtual rack. +func (r Network_Bandwidth_Version1_Allotment) GetBandwidthAllotmentType() (resp datatypes.Network_Bandwidth_Version1_Allotment_Type, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Bandwidth_Version1_Allotment", "getBandwidthAllotmentType", nil, &r.Options, &resp) + return +} + +// Retrieve a collection of bandwidth data from an individual public or private network tracking object. Data is ideal if you with to employ your own traffic storage and graphing systems. +func (r Network_Bandwidth_Version1_Allotment) GetBandwidthForDateRange(startDate *datatypes.Time, endDate *datatypes.Time) (resp []datatypes.Metric_Tracking_Object_Data, err error) { + params := []interface{}{ + startDate, + endDate, + } + err = r.Session.DoRequest("SoftLayer_Network_Bandwidth_Version1_Allotment", "getBandwidthForDateRange", params, &r.Options, &resp) + return +} + +// This method recurses through all servers on a Bandwidth Pool for a given snapshot range, gathers the necessary parameters, and then calls the bandwidth graphing server. The return result is a container that includes the min and max dates for all servers to be used in the query, as well as an image in PNG format. This method uses the new and improved drawing routines which should return in a reasonable time frame now that the new backend data warehouse is used. +func (r Network_Bandwidth_Version1_Allotment) GetBandwidthImage(networkType *string, snapshotRange *string, draw *bool, dateSpecified *datatypes.Time, dateSpecifiedEnd *datatypes.Time) (resp datatypes.Container_Bandwidth_GraphOutputs, err error) { + params := []interface{}{ + networkType, + snapshotRange, + draw, + dateSpecified, + dateSpecifiedEnd, + } + err = r.Session.DoRequest("SoftLayer_Network_Bandwidth_Version1_Allotment", "getBandwidthImage", params, &r.Options, &resp) + return +} + +// Retrieve The bare metal server instances contained within a virtual rack. +func (r Network_Bandwidth_Version1_Allotment) GetBareMetalInstances() (resp []datatypes.Hardware, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Bandwidth_Version1_Allotment", "getBareMetalInstances", nil, &r.Options, &resp) + return +} + +// Retrieve A virtual rack's raw bandwidth usage data for an account's current billing cycle. One object is returned for each network this server is attached to. +func (r Network_Bandwidth_Version1_Allotment) GetBillingCycleBandwidthUsage() (resp []datatypes.Network_Bandwidth_Usage, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Bandwidth_Version1_Allotment", "getBillingCycleBandwidthUsage", nil, &r.Options, &resp) + return +} + +// Retrieve A virtual rack's raw private network bandwidth usage data for an account's current billing cycle. +func (r Network_Bandwidth_Version1_Allotment) GetBillingCyclePrivateBandwidthUsage() (resp datatypes.Network_Bandwidth_Usage, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Bandwidth_Version1_Allotment", "getBillingCyclePrivateBandwidthUsage", nil, &r.Options, &resp) + return +} + +// Retrieve A virtual rack's raw public network bandwidth usage data for an account's current billing cycle. +func (r Network_Bandwidth_Version1_Allotment) GetBillingCyclePublicBandwidthUsage() (resp datatypes.Network_Bandwidth_Usage, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Bandwidth_Version1_Allotment", "getBillingCyclePublicBandwidthUsage", nil, &r.Options, &resp) + return +} + +// Retrieve The total public bandwidth used in this virtual rack for an account's current billing cycle. +func (r Network_Bandwidth_Version1_Allotment) GetBillingCyclePublicUsageTotal() (resp uint, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Bandwidth_Version1_Allotment", "getBillingCyclePublicUsageTotal", nil, &r.Options, &resp) + return +} + +// Retrieve A virtual rack's billing item. +func (r Network_Bandwidth_Version1_Allotment) GetBillingItem() (resp datatypes.Billing_Item, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Bandwidth_Version1_Allotment", "getBillingItem", nil, &r.Options, &resp) + return +} + +// Retrieve An object that provides commonly used bandwidth summary components for the current billing cycle. +func (r Network_Bandwidth_Version1_Allotment) GetCurrentBandwidthSummary() (resp datatypes.Metric_Tracking_Object_Bandwidth_Summary, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Bandwidth_Version1_Allotment", "getCurrentBandwidthSummary", nil, &r.Options, &resp) + return +} + +// Retrieve bandwidth graph by date. +func (r Network_Bandwidth_Version1_Allotment) GetCustomBandwidthDataByDate(graphData *datatypes.Container_Graph) (resp datatypes.Container_Graph, err error) { + params := []interface{}{ + graphData, + } + err = r.Session.DoRequest("SoftLayer_Network_Bandwidth_Version1_Allotment", "getCustomBandwidthDataByDate", params, &r.Options, &resp) + return +} + +// Retrieve The bandwidth allotment detail records associated with this virtual rack. +func (r Network_Bandwidth_Version1_Allotment) GetDetails() (resp []datatypes.Network_Bandwidth_Version1_Allotment_Detail, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Bandwidth_Version1_Allotment", "getDetails", nil, &r.Options, &resp) + return +} + +// This method recurses through all servers on a Bandwidth Pool for 24 hour time span starting at a given date/time. To get the public data set for all servers on a Bandwidth Pool from midnight Feb 1st, 2008 to 23:59 on Feb 1st, you would pass a parameter of '02/01/2008 0:00'. The ending date / time is calculated for you to prevent requesting data from the server for periods larger than 24 hours as this method requires processing a lot of data records and can get slow at times. +func (r Network_Bandwidth_Version1_Allotment) GetFrontendBandwidthByHour(date *datatypes.Time) (resp []datatypes.Container_Network_Bandwidth_Version1_Usage, err error) { + params := []interface{}{ + date, + } + err = r.Session.DoRequest("SoftLayer_Network_Bandwidth_Version1_Allotment", "getFrontendBandwidthByHour", params, &r.Options, &resp) + return +} + +// This method recurses through all servers on a Bandwidth Pool between the given start and end dates to retrieve private bandwidth data. +func (r Network_Bandwidth_Version1_Allotment) GetFrontendBandwidthUse(startDate *datatypes.Time, endDate *datatypes.Time) (resp []datatypes.Network_Bandwidth_Version1_Usage_Detail, err error) { + params := []interface{}{ + startDate, + endDate, + } + err = r.Session.DoRequest("SoftLayer_Network_Bandwidth_Version1_Allotment", "getFrontendBandwidthUse", params, &r.Options, &resp) + return +} + +// Retrieve The hardware contained within a virtual rack. +func (r Network_Bandwidth_Version1_Allotment) GetHardware() (resp []datatypes.Hardware, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Bandwidth_Version1_Allotment", "getHardware", nil, &r.Options, &resp) + return +} + +// Retrieve The total public inbound bandwidth used in this virtual rack for an account's current billing cycle. +func (r Network_Bandwidth_Version1_Allotment) GetInboundPublicBandwidthUsage() (resp datatypes.Float64, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Bandwidth_Version1_Allotment", "getInboundPublicBandwidthUsage", nil, &r.Options, &resp) + return +} + +// Retrieve The location group associated with this virtual rack. +func (r Network_Bandwidth_Version1_Allotment) GetLocationGroup() (resp datatypes.Location_Group, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Bandwidth_Version1_Allotment", "getLocationGroup", nil, &r.Options, &resp) + return +} + +// Retrieve The managed bare metal server instances contained within a virtual rack. +func (r Network_Bandwidth_Version1_Allotment) GetManagedBareMetalInstances() (resp []datatypes.Hardware, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Bandwidth_Version1_Allotment", "getManagedBareMetalInstances", nil, &r.Options, &resp) + return +} + +// Retrieve The managed hardware contained within a virtual rack. +func (r Network_Bandwidth_Version1_Allotment) GetManagedHardware() (resp []datatypes.Hardware, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Bandwidth_Version1_Allotment", "getManagedHardware", nil, &r.Options, &resp) + return +} + +// Retrieve The managed Virtual Server contained within a virtual rack. +func (r Network_Bandwidth_Version1_Allotment) GetManagedVirtualGuests() (resp []datatypes.Virtual_Guest, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Bandwidth_Version1_Allotment", "getManagedVirtualGuests", nil, &r.Options, &resp) + return +} + +// Retrieve A virtual rack's metric tracking object. This object records all periodic polled data available to this rack. +func (r Network_Bandwidth_Version1_Allotment) GetMetricTrackingObject() (resp datatypes.Metric_Tracking_Object_VirtualDedicatedRack, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Bandwidth_Version1_Allotment", "getMetricTrackingObject", nil, &r.Options, &resp) + return +} + +// Retrieve The metric tracking object id for this allotment. +func (r Network_Bandwidth_Version1_Allotment) GetMetricTrackingObjectId() (resp int, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Bandwidth_Version1_Allotment", "getMetricTrackingObjectId", nil, &r.Options, &resp) + return +} + +// getObject retrieves the SoftLayer_Network_Bandwidth_Version1_Allotment object whose ID number corresponds to the ID number of the init parameter passed to the SoftLayer_Hardware service. You can only retrieve an allotment associated with the account that your portal user is assigned to. +func (r Network_Bandwidth_Version1_Allotment) GetObject() (resp datatypes.Network_Bandwidth_Version1_Allotment, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Bandwidth_Version1_Allotment", "getObject", nil, &r.Options, &resp) + return +} + +// Retrieve The total public outbound bandwidth used in this virtual rack for an account's current billing cycle. +func (r Network_Bandwidth_Version1_Allotment) GetOutboundPublicBandwidthUsage() (resp datatypes.Float64, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Bandwidth_Version1_Allotment", "getOutboundPublicBandwidthUsage", nil, &r.Options, &resp) + return +} + +// Retrieve Whether the bandwidth usage for this bandwidth pool for the current billing cycle exceeds the allocation. +func (r Network_Bandwidth_Version1_Allotment) GetOverBandwidthAllocationFlag() (resp int, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Bandwidth_Version1_Allotment", "getOverBandwidthAllocationFlag", nil, &r.Options, &resp) + return +} + +// Retrieve The private network only hardware contained within a virtual rack. +func (r Network_Bandwidth_Version1_Allotment) GetPrivateNetworkOnlyHardware() (resp []datatypes.Hardware, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Bandwidth_Version1_Allotment", "getPrivateNetworkOnlyHardware", nil, &r.Options, &resp) + return +} + +// Retrieve Whether the bandwidth usage for this bandwidth pool for the current billing cycle is projected to exceed the allocation. +func (r Network_Bandwidth_Version1_Allotment) GetProjectedOverBandwidthAllocationFlag() (resp int, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Bandwidth_Version1_Allotment", "getProjectedOverBandwidthAllocationFlag", nil, &r.Options, &resp) + return +} + +// Retrieve The projected public outbound bandwidth for this virtual server for the current billing cycle. +func (r Network_Bandwidth_Version1_Allotment) GetProjectedPublicBandwidthUsage() (resp datatypes.Float64, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Bandwidth_Version1_Allotment", "getProjectedPublicBandwidthUsage", nil, &r.Options, &resp) + return +} + +// Retrieve +func (r Network_Bandwidth_Version1_Allotment) GetServiceProvider() (resp datatypes.Service_Provider, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Bandwidth_Version1_Allotment", "getServiceProvider", nil, &r.Options, &resp) + return +} + +// Retrieve The combined allocated bandwidth for all servers in a virtual rack. +func (r Network_Bandwidth_Version1_Allotment) GetTotalBandwidthAllocated() (resp uint, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Bandwidth_Version1_Allotment", "getTotalBandwidthAllocated", nil, &r.Options, &resp) + return +} + +// Gets the monthly recurring fee of a pooled server. +func (r Network_Bandwidth_Version1_Allotment) GetVdrMemberRecurringFee() (resp datatypes.Float64, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Bandwidth_Version1_Allotment", "getVdrMemberRecurringFee", nil, &r.Options, &resp) + return +} + +// Retrieve The Virtual Server contained within a virtual rack. +func (r Network_Bandwidth_Version1_Allotment) GetVirtualGuests() (resp []datatypes.Virtual_Guest, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Bandwidth_Version1_Allotment", "getVirtualGuests", nil, &r.Options, &resp) + return +} + +// This method will reassign a collection of SoftLayer hardware to a bandwidth allotment Bandwidth Pool. +func (r Network_Bandwidth_Version1_Allotment) ReassignServers(templateObjects []datatypes.Hardware, newAllotmentId *int) (resp bool, err error) { + params := []interface{}{ + templateObjects, + newAllotmentId, + } + err = r.Session.DoRequest("SoftLayer_Network_Bandwidth_Version1_Allotment", "reassignServers", params, &r.Options, &resp) + return +} + +// This will remove a bandwidth pooling from a customer's allotments by cancelling the billing item. All servers in that allotment will get moved to the account's vpr. +func (r Network_Bandwidth_Version1_Allotment) RequestVdrCancellation() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Bandwidth_Version1_Allotment", "requestVdrCancellation", nil, &r.Options, &resp) + return +} + +// This will move servers into a bandwidth pool, removing them from their previous bandwidth pool and optionally remove the bandwidth pool on completion. +func (r Network_Bandwidth_Version1_Allotment) RequestVdrContentUpdates(hardwareToAdd []datatypes.Hardware, hardwareToRemove []datatypes.Hardware, cloudsToAdd []datatypes.Virtual_Guest, cloudsToRemove []datatypes.Virtual_Guest, optionalAllotmentId *int, adcToAdd []datatypes.Network_Application_Delivery_Controller, adcToRemove []datatypes.Network_Application_Delivery_Controller) (resp bool, err error) { + params := []interface{}{ + hardwareToAdd, + hardwareToRemove, + cloudsToAdd, + cloudsToRemove, + optionalAllotmentId, + adcToAdd, + adcToRemove, + } + err = r.Session.DoRequest("SoftLayer_Network_Bandwidth_Version1_Allotment", "requestVdrContentUpdates", params, &r.Options, &resp) + return +} + +// This will update the bandwidth pool to the servers provided. Servers currently in the bandwidth pool not provided on update will be removed. Servers provided on update not currently in the bandwidth pool will be added. If all servers are removed, this removes the bandwidth pool on completion. +func (r Network_Bandwidth_Version1_Allotment) SetVdrContent(hardware []datatypes.Hardware, bareMetalServers []datatypes.Hardware, virtualServerInstance []datatypes.Virtual_Guest, adc []datatypes.Network_Application_Delivery_Controller, optionalAllotmentId *int) (resp bool, err error) { + params := []interface{}{ + hardware, + bareMetalServers, + virtualServerInstance, + adc, + optionalAllotmentId, + } + err = r.Session.DoRequest("SoftLayer_Network_Bandwidth_Version1_Allotment", "setVdrContent", params, &r.Options, &resp) + return +} + +// This method will reassign a collection of SoftLayer hardware to the virtual private rack +func (r Network_Bandwidth_Version1_Allotment) UnassignServers(templateObjects []datatypes.Hardware) (resp bool, err error) { + params := []interface{}{ + templateObjects, + } + err = r.Session.DoRequest("SoftLayer_Network_Bandwidth_Version1_Allotment", "unassignServers", params, &r.Options, &resp) + return +} + +// This method will void a pending server removal from this bandwidth pooling. Pass in the id of the hardware object or virtual guest you wish to update. Assuming that object is currently pending removal from the bandwidth pool at the start of the next billing cycle, the bandwidth pool member status will be restored and the pending cancellation removed. +func (r Network_Bandwidth_Version1_Allotment) VoidPendingServerMove(id *int, typ *string) (resp bool, err error) { + params := []interface{}{ + id, + typ, + } + err = r.Session.DoRequest("SoftLayer_Network_Bandwidth_Version1_Allotment", "voidPendingServerMove", params, &r.Options, &resp) + return +} + +// This method will void a pending cancellation on a bandwidth pool. Note however any servers that belonged to the rack will have to be restored individually using the method voidPendingServerMove($id, $type). +func (r Network_Bandwidth_Version1_Allotment) VoidPendingVdrCancellation() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Bandwidth_Version1_Allotment", "voidPendingVdrCancellation", nil, &r.Options, &resp) + return +} + +// Every piece of hardware running in SoftLayer's datacenters connected to the public, private, or management networks (where applicable) have a corresponding network component. These network components are modeled by the SoftLayer_Network_Component data type. These data types reflect the servers' local ethernet and remote management interfaces. +type Network_Component struct { + Session *session.Session + Options sl.Options +} + +// GetNetworkComponentService returns an instance of the Network_Component SoftLayer service +func GetNetworkComponentService(sess *session.Session) Network_Component { + return Network_Component{Session: sess} +} + +func (r Network_Component) Id(id int) Network_Component { + r.Options.Id = &id + return r +} + +func (r Network_Component) Mask(mask string) Network_Component { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r Network_Component) Filter(filter string) Network_Component { + r.Options.Filter = filter + return r +} + +func (r Network_Component) Limit(limit int) Network_Component { + r.Options.Limit = &limit + return r +} + +func (r Network_Component) Offset(offset int) Network_Component { + r.Options.Offset = &offset + return r +} + +// Add VLANs as trunks to a network component. The VLANs given must be assigned to your account, and on the router to which this network component is connected. The current native VLAN (networkVlanId/networkVlan) cannot be added as a trunk. This method should be called on a network component attached directly to customer assigned hardware, though all trunking operations will occur on the uplinkComponent. A current list of VLAN trunks for a network component on a customer server can be found at 'uplinkComponent->networkVlanTrunks'. +// +// This method returns an array of SoftLayer_Network_Vlans which were added as trunks. Any requested trunks which are already trunked will be silently ignored, and will not be returned. +// +// Configuration of network hardware is done asynchronously, do not depend on the return of this call as an indication that the newly trunked VLANs will be accessible. +func (r Network_Component) AddNetworkVlanTrunks(networkVlans []datatypes.Network_Vlan) (resp []datatypes.Network_Vlan, err error) { + params := []interface{}{ + networkVlans, + } + err = r.Session.DoRequest("SoftLayer_Network_Component", "addNetworkVlanTrunks", params, &r.Options, &resp) + return +} + +// This method will remove all VLANs trunked to this network component. The native VLAN (networkVlanId/networkVlan) will remain active, and cannot be removed via the API. Returns a list of SoftLayer_Network_Vlan objects for which the trunks were removed. +func (r Network_Component) ClearNetworkVlanTrunks() (resp []datatypes.Network_Vlan, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Component", "clearNetworkVlanTrunks", nil, &r.Options, &resp) + return +} + +// Retrieve Reboot/power (rebootDefault, rebootSoft, rebootHard, powerOn, powerOff and powerCycle) command currently executing by the server's remote management card. +func (r Network_Component) GetActiveCommand() (resp datatypes.Hardware_Component_RemoteManagement_Command_Request, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Component", "getActiveCommand", nil, &r.Options, &resp) + return +} + +// Retrieve bandwidth graph by date. +func (r Network_Component) GetCustomBandwidthDataByDate(graphData *datatypes.Container_Graph) (resp datatypes.Container_Graph, err error) { + params := []interface{}{ + graphData, + } + err = r.Session.DoRequest("SoftLayer_Network_Component", "getCustomBandwidthDataByDate", params, &r.Options, &resp) + return +} + +// Retrieve The network component linking this object to a child device +func (r Network_Component) GetDownlinkComponent() (resp datatypes.Network_Component, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Component", "getDownlinkComponent", nil, &r.Options, &resp) + return +} + +// Retrieve The duplex mode of a network component. +func (r Network_Component) GetDuplexMode() (resp datatypes.Network_Component_Duplex_Mode, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Component", "getDuplexMode", nil, &r.Options, &resp) + return +} + +// Retrieve The hardware that a network component resides in. +func (r Network_Component) GetHardware() (resp datatypes.Hardware, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Component", "getHardware", nil, &r.Options, &resp) + return +} + +// Retrieve +func (r Network_Component) GetHighAvailabilityFirewallFlag() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Component", "getHighAvailabilityFirewallFlag", nil, &r.Options, &resp) + return +} + +// Retrieve A hardware switch's interface to the bandwidth pod. +func (r Network_Component) GetInterface() (resp datatypes.Network_Bandwidth_Version1_Interface, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Component", "getInterface", nil, &r.Options, &resp) + return +} + +// Retrieve The records of all IP addresses bound to a network component. +func (r Network_Component) GetIpAddressBindings() (resp []datatypes.Network_Component_IpAddress, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Component", "getIpAddressBindings", nil, &r.Options, &resp) + return +} + +// Retrieve +func (r Network_Component) GetIpAddresses() (resp []datatypes.Network_Subnet_IpAddress, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Component", "getIpAddresses", nil, &r.Options, &resp) + return +} + +// Retrieve Last reboot/power (rebootDefault, rebootSoft, rebootHard, powerOn, powerOff and powerCycle) command issued to the server's remote management card. +func (r Network_Component) GetLastCommand() (resp datatypes.Hardware_Component_RemoteManagement_Command_Request, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Component", "getLastCommand", nil, &r.Options, &resp) + return +} + +// Retrieve The metric tracking object for this network component. +func (r Network_Component) GetMetricTrackingObject() (resp datatypes.Metric_Tracking_Object, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Component", "getMetricTrackingObject", nil, &r.Options, &resp) + return +} + +// Retrieve The upstream network component firewall. +func (r Network_Component) GetNetworkComponentFirewall() (resp datatypes.Network_Component_Firewall, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Component", "getNetworkComponentFirewall", nil, &r.Options, &resp) + return +} + +// Retrieve A network component's associated group. +func (r Network_Component) GetNetworkComponentGroup() (resp datatypes.Network_Component_Group, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Component", "getNetworkComponentGroup", nil, &r.Options, &resp) + return +} + +// Retrieve All network devices in SoftLayer's network hierarchy that this device is connected to. +func (r Network_Component) GetNetworkHardware() (resp []datatypes.Hardware, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Component", "getNetworkHardware", nil, &r.Options, &resp) + return +} + +// Retrieve The VLAN that a network component's subnet is associated with. +func (r Network_Component) GetNetworkVlan() (resp datatypes.Network_Vlan, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Component", "getNetworkVlan", nil, &r.Options, &resp) + return +} + +// Retrieve The VLANs that are trunked to this network component. +func (r Network_Component) GetNetworkVlanTrunks() (resp []datatypes.Network_Component_Network_Vlan_Trunk, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Component", "getNetworkVlanTrunks", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Network_Component) GetObject() (resp datatypes.Network_Component, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Component", "getObject", nil, &r.Options, &resp) + return +} + +// +// **DEPRECATED - This operation will cease to function after April 4th, 2016 and will be removed from v3.2** +// Retrieve various network statistics. The network statistics are retrieved from the network device using snmpget. Below is a list of statistics retrieved: +// * Administrative Status +// * Operational Status +// * Maximum Transmission Unit +// * In Octets +// * Out Octets +// * In Unicast Packets +// * Out Unicast Packets +// * In Multicast Packets +// * Out Multicast Packets +func (r Network_Component) GetPortStatistics() (resp datatypes.Container_Network_Port_Statistic, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Component", "getPortStatistics", nil, &r.Options, &resp) + return +} + +// Retrieve The primary IPv4 Address record for a network component. +func (r Network_Component) GetPrimaryIpAddressRecord() (resp datatypes.Network_Subnet_IpAddress, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Component", "getPrimaryIpAddressRecord", nil, &r.Options, &resp) + return +} + +// Retrieve The subnet of the primary IP address assigned to this network component. +func (r Network_Component) GetPrimarySubnet() (resp datatypes.Network_Subnet, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Component", "getPrimarySubnet", nil, &r.Options, &resp) + return +} + +// Retrieve The primary IPv6 Address record for a network component. +func (r Network_Component) GetPrimaryVersion6IpAddressRecord() (resp datatypes.Network_Subnet_IpAddress, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Component", "getPrimaryVersion6IpAddressRecord", nil, &r.Options, &resp) + return +} + +// Retrieve The last five reboot/power (rebootDefault, rebootSoft, rebootHard, powerOn, powerOff and powerCycle) commands issued to the server's remote management card. +func (r Network_Component) GetRecentCommands() (resp []datatypes.Hardware_Component_RemoteManagement_Command_Request, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Component", "getRecentCommands", nil, &r.Options, &resp) + return +} + +// Retrieve Indicates whether the network component is participating in a group of two or more components capable of being operationally redundant, if enabled. +func (r Network_Component) GetRedundancyCapableFlag() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Component", "getRedundancyCapableFlag", nil, &r.Options, &resp) + return +} + +// Retrieve Indicates whether the network component is participating in a group of two or more components which is actively providing link redundancy. +func (r Network_Component) GetRedundancyEnabledFlag() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Component", "getRedundancyEnabledFlag", nil, &r.Options, &resp) + return +} + +// Retrieve User(s) credentials to issue commands and/or interact with the server's remote management card. +func (r Network_Component) GetRemoteManagementUsers() (resp []datatypes.Hardware_Component_RemoteManagement_User, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Component", "getRemoteManagementUsers", nil, &r.Options, &resp) + return +} + +// Retrieve A network component's routers. +func (r Network_Component) GetRouter() (resp datatypes.Hardware, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Component", "getRouter", nil, &r.Options, &resp) + return +} + +// Retrieve Whether a network component's primary ip address is from a storage network subnet or not. +func (r Network_Component) GetStorageNetworkFlag() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Component", "getStorageNetworkFlag", nil, &r.Options, &resp) + return +} + +// Retrieve A network component's subnets. A subnet is a group of IP addresses +func (r Network_Component) GetSubnets() (resp []datatypes.Network_Subnet, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Component", "getSubnets", nil, &r.Options, &resp) + return +} + +// Retrieve The network component linking this object to parent +func (r Network_Component) GetUplinkComponent() (resp datatypes.Network_Component, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Component", "getUplinkComponent", nil, &r.Options, &resp) + return +} + +// Retrieve The duplex mode of the uplink network component linking to this object +func (r Network_Component) GetUplinkDuplexMode() (resp datatypes.Network_Component_Duplex_Mode, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Component", "getUplinkDuplexMode", nil, &r.Options, &resp) + return +} + +// Remove one or more VLANs currently attached to the uplinkComponent of this networkComponent. The VLANs given must be assigned to your account, and on the router the network component is connected to. If any VLANs not currently trunked are given, they will be silently ignored. +// +// This method should be called on a network component attached directly to customer assigned hardware, though all trunking operations will occur on the uplinkComponent. A current list of VLAN trunks for a network component on a customer server can be found at 'uplinkComponent->networkVlanTrunks'. +// +// Configuration of network hardware is done asynchronously, do not depend on the return of this call as an indication that the removed VLANs will be inaccessible. +func (r Network_Component) RemoveNetworkVlanTrunks(networkVlans []datatypes.Network_Vlan) (resp []datatypes.Network_Vlan, err error) { + params := []interface{}{ + networkVlans, + } + err = r.Session.DoRequest("SoftLayer_Network_Component", "removeNetworkVlanTrunks", params, &r.Options, &resp) + return +} + +// The SoftLayer_Network_Component_Firewall data type contains general information relating to a single SoftLayer network component firewall. This is the object which ties the running rules to a specific downstream server. Use the [[SoftLayer Network Firewall Template]] service to pull SoftLayer recommended rule set templates. Use the [[SoftLayer Network Firewall Update Request]] service to submit a firewall update request. +type Network_Component_Firewall struct { + Session *session.Session + Options sl.Options +} + +// GetNetworkComponentFirewallService returns an instance of the Network_Component_Firewall SoftLayer service +func GetNetworkComponentFirewallService(sess *session.Session) Network_Component_Firewall { + return Network_Component_Firewall{Session: sess} +} + +func (r Network_Component_Firewall) Id(id int) Network_Component_Firewall { + r.Options.Id = &id + return r +} + +func (r Network_Component_Firewall) Mask(mask string) Network_Component_Firewall { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r Network_Component_Firewall) Filter(filter string) Network_Component_Firewall { + r.Options.Filter = filter + return r +} + +func (r Network_Component_Firewall) Limit(limit int) Network_Component_Firewall { + r.Options.Limit = &limit + return r +} + +func (r Network_Component_Firewall) Offset(offset int) Network_Component_Firewall { + r.Options.Offset = &offset + return r +} + +// Retrieve The additional subnets linked to this network component firewall, that inherit rules from the host that the context slot is attached to. +func (r Network_Component_Firewall) GetApplyServerRuleSubnets() (resp []datatypes.Network_Subnet, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Component_Firewall", "getApplyServerRuleSubnets", nil, &r.Options, &resp) + return +} + +// Retrieve The billing item for a Hardware Firewall (Dedicated). +func (r Network_Component_Firewall) GetBillingItem() (resp datatypes.Billing_Item, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Component_Firewall", "getBillingItem", nil, &r.Options, &resp) + return +} + +// Retrieve The network component of the guest virtual server that this network component firewall belongs to. +func (r Network_Component_Firewall) GetGuestNetworkComponent() (resp datatypes.Virtual_Guest_Network_Component, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Component_Firewall", "getGuestNetworkComponent", nil, &r.Options, &resp) + return +} + +// Retrieve The network component of the switch interface that this network component firewall belongs to. +func (r Network_Component_Firewall) GetNetworkComponent() (resp datatypes.Network_Component, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Component_Firewall", "getNetworkComponent", nil, &r.Options, &resp) + return +} + +// Retrieve The update requests made for this firewall. +func (r Network_Component_Firewall) GetNetworkFirewallUpdateRequest() (resp []datatypes.Network_Firewall_Update_Request, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Component_Firewall", "getNetworkFirewallUpdateRequest", nil, &r.Options, &resp) + return +} + +// getObject returns a SoftLayer_Network_Firewall_Module_Context_Interface_AccessControlList_Network_Component object. You can only get objects for servers attached to your account that have a network firewall enabled. +func (r Network_Component_Firewall) GetObject() (resp datatypes.Network_Component_Firewall, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Component_Firewall", "getObject", nil, &r.Options, &resp) + return +} + +// Retrieve The currently running rule set of this network component firewall. +func (r Network_Component_Firewall) GetRules() (resp []datatypes.Network_Component_Firewall_Rule, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Component_Firewall", "getRules", nil, &r.Options, &resp) + return +} + +// Retrieve The additional subnets linked to this network component firewall. +func (r Network_Component_Firewall) GetSubnets() (resp []datatypes.Network_Subnet, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Component_Firewall", "getSubnets", nil, &r.Options, &resp) + return +} + +// The SoftLayer_Network_ContentDelivery_Account data type models an individual CDN account. CDN accounts contain references to the SoftLayer customer account they belong to, login credentials for upload services, and a CDN account's status. Please contact SoftLayer sales to purchase or cancel a CDN account +type Network_ContentDelivery_Account struct { + Session *session.Session + Options sl.Options +} + +// GetNetworkContentDeliveryAccountService returns an instance of the Network_ContentDelivery_Account SoftLayer service +func GetNetworkContentDeliveryAccountService(sess *session.Session) Network_ContentDelivery_Account { + return Network_ContentDelivery_Account{Session: sess} +} + +func (r Network_ContentDelivery_Account) Id(id int) Network_ContentDelivery_Account { + r.Options.Id = &id + return r +} + +func (r Network_ContentDelivery_Account) Mask(mask string) Network_ContentDelivery_Account { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r Network_ContentDelivery_Account) Filter(filter string) Network_ContentDelivery_Account { + r.Options.Filter = filter + return r +} + +func (r Network_ContentDelivery_Account) Limit(limit int) Network_ContentDelivery_Account { + r.Options.Limit = &limit + return r +} + +func (r Network_ContentDelivery_Account) Offset(offset int) Network_ContentDelivery_Account { + r.Options.Offset = &offset + return r +} + +// Internap servers attempts to validate a token before serving a protected content. SoftLayer customer does not need to invoke this method. Please refer to [[SoftLayer_Network_ContentDelivery_Authentication_Token|Authentication Token]] object for more details on Content Authentication Service. +func (r Network_ContentDelivery_Account) AuthenticateResourceRequest(parameter *datatypes.Container_Network_ContentDelivery_Authentication_Parameter) (resp bool, err error) { + params := []interface{}{ + parameter, + } + err = r.Session.DoRequest("SoftLayer_Network_ContentDelivery_Account", "authenticateResourceRequest", params, &r.Options, &resp) + return +} + +// You can further organize your contents on the CDN FTP server by creating sub directories. This method creates a directory on the CDN FTP server. A user must have CDN_FILE_MANAGE privilege to use this method. A directory name must be an absolute path and you can only create sub directories in /media folder. +func (r Network_ContentDelivery_Account) CreateDirectory(directoryName *string) (resp bool, err error) { + params := []interface{}{ + directoryName, + } + err = r.Session.DoRequest("SoftLayer_Network_ContentDelivery_Account", "createDirectory", params, &r.Options, &resp) + return +} + +// This method allows you to create a default CDN FTP user record on the ftp.cdnlayer.service.softlayer.com server. As with a CDN FTP user account, you can upload contents to the CDN host server through the SoftLayer private network. SoftLayer currently allows only one FTP user for each CDN account. Your default CDN FTP user record is created upon successful creation of a CDN account. You may not need to use this method at all. This is provided in support of the previous CDN customers. SoftLayer may offer multiple CDN FTP users for a single CDN account in the future. +// +// Optionally, you can provide a new password when invoking this method and a new password must follow the rules below: +// * ...must be between 8 and 20 characters long +// * ...must be an alphanumeric value +// * ...can contain these characters: - _ ! % # $ ^ & * +func (r Network_ContentDelivery_Account) CreateFtpUser(newPassword *string) (resp bool, err error) { + params := []interface{}{ + newPassword, + } + err = r.Session.DoRequest("SoftLayer_Network_ContentDelivery_Account", "createFtpUser", params, &r.Options, &resp) + return +} + +// With Origin Pull, content is pulled from your origin server as needed and then delivered to visitors. You do not need to upload your files to the CDN FTP: you can utilize the files that currently exist on your origin server. It will take 10 to 15 minutes for this to take effect after you create an Origin Pull rule. Origin Pull is only supported for HTTP protocol and you would continue to use the CDN FTP for Flash and Windows Media streaming services. +// +// A valid origin host can include a directory information. You may include an authentication username and password along with an origin host. If you set an authentication username and password, CDN servers will include "Authorization:" header in every request. You may use the "Authorization:" header to grant access to CDN servers or you may use it to distinguish CDN servers from normal visitors. Here is a list of valid origin hosts: +// * www.website.com +// * www.website.com/cdn_content +// * cdn_user:password@www.website.com +// * cdn_user:password@www.website.com/images +// +// +// An authentication username should be an alphanumeric string and allowed special characters are . - _
    An authentication password should be an alphanumeric string and allowed special characters are . - _ ! # $ % ^ & *
    Both username and password must be between 3 to 10 characters long. +// +// CDN nodes will cache your contents and you can control cache lifetime by modifying your web server's configuration. This method also creates a FTP directory restriction upon successful Origin Pull set up. You will not be able to access /media/http directory since contents will be pulled from your origin server. An origin domain must be a valid domain name and it can contain path information. This can help you organize contents on your origin server. For example, you could set an origin domain as: mydomain.com/cdn_contents +// +// A CNAME record allows you to have a customized URL. You can get rid of your CDN account name from the URL. A valid CNAME for the Origin Pull method must point to .http.cdn.softlayer.net. +// +// There are 2 types of origin pull mappings. The one with a CNAME record or the one without a CNAME record and they work very differently. +// +// gzip is supported if your web server sends a proper gzip header. For more details, visit our [http://knowledgelayer.softlayer.com/topic/cdn KnowledgeLayer] +func (r Network_ContentDelivery_Account) CreateOriginPullMapping(mappingObject *datatypes.Container_Network_ContentDelivery_OriginPull_Mapping) (resp bool, err error) { + params := []interface{}{ + mappingObject, + } + err = r.Session.DoRequest("SoftLayer_Network_ContentDelivery_Account", "createOriginPullMapping", params, &r.Options, &resp) + return +} + +// This method is deprecated, please use [[[[SoftLayer_Network_ContentDelivery_Account::createOriginPullMapping|createOriginPullMapping]] method instead. +func (r Network_ContentDelivery_Account) CreateOriginPullRule(originDomain *string, cnameRecord *string) (resp bool, err error) { + params := []interface{}{ + originDomain, + cnameRecord, + } + err = r.Session.DoRequest("SoftLayer_Network_ContentDelivery_Account", "createOriginPullRule", params, &r.Options, &resp) + return +} + +// You need to specify a directory on your CDN FTP or on your origin host in which your secure content resides to enable the token authentication . It will take about about 30 minutes for a newly configured token authentication directory to take effect. +func (r Network_ContentDelivery_Account) CreateTokenAuthenticationDirectory(directory *string, mediaType *string) (resp bool, err error) { + params := []interface{}{ + directory, + mediaType, + } + err = r.Session.DoRequest("SoftLayer_Network_ContentDelivery_Account", "createTokenAuthenticationDirectory", params, &r.Options, &resp) + return +} + +// This method deletes your FTP user record on the ftp.cdnlayer.service.softlayer.com server. Refer to the service overview of [[SoftLayer_Network_ContentDelivery_Account::createFtpUser|createFtpUser]] method for more information on the CDN FTP server. +func (r Network_ContentDelivery_Account) DeleteFtpUser() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Network_ContentDelivery_Account", "deleteFtpUser", nil, &r.Options, &resp) + return +} + +// This method removes an Origin Pull domain rule. Once an Origin Pull rule is removed, you will be able to access the /media/http directory. It will take 10 to 15 minutes for this to take effect after you remove your Origin Pull rule. Cached contents on CDN POPs may live longer than 15 minutes. +func (r Network_ContentDelivery_Account) DeleteOriginPullRule(originMappingId *string) (resp bool, err error) { + params := []interface{}{ + originMappingId, + } + err = r.Session.DoRequest("SoftLayer_Network_ContentDelivery_Account", "deleteOriginPullRule", params, &r.Options, &resp) + return +} + +// This method disables CDN access log. +func (r Network_ContentDelivery_Account) DisableLogging() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Network_ContentDelivery_Account", "disableLogging", nil, &r.Options, &resp) + return +} + +// This method enables CDN access log. +func (r Network_ContentDelivery_Account) EnableLogging() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Network_ContentDelivery_Account", "enableLogging", nil, &r.Options, &resp) + return +} + +// Retrieve The customer account that a CDN account belongs to. +func (r Network_ContentDelivery_Account) GetAccount() (resp datatypes.Account, err error) { + err = r.Session.DoRequest("SoftLayer_Network_ContentDelivery_Account", "getAccount", nil, &r.Options, &resp) + return +} + +// This method returns bandwidth data for each POP. [[SoftLayer_Container_Network_ContentDelivery_Bandwidth_PointsOfPresence_Summary|POP Bandwidth]] object contains a starting time, ending time, total bytes, POP name and bandwidth unit. +// +// POP bandwidth data is updated everyday at 22:50 CST (or CDT). It queries and stores POP data from the day before. It is a more resource intensive process than a regular CDN bandwidth update thus we run this once a day. Since the POP bandwidth data is delayed for a day, there is no correction process for POP data. The POP bandwidth is not associated with any billing process and is mainly used to generate a POP bandwidth graph. +func (r Network_ContentDelivery_Account) GetAllPopsBandwidthData(beginDateTime *datatypes.Time, endDateTime *datatypes.Time) (resp []datatypes.Container_Network_ContentDelivery_Bandwidth_PointsOfPresence_Summary, err error) { + params := []interface{}{ + beginDateTime, + endDateTime, + } + err = r.Session.DoRequest("SoftLayer_Network_ContentDelivery_Account", "getAllPopsBandwidthData", params, &r.Options, &resp) + return +} + +// This method returns a bandwidth graph for every POP wrapped in [[SoftLayer_Container_Bandwidth_GraphOutputsExtended|Bandwidth Graph]] object. A POP bandwidth graph shows bandwidth consumption per each POP in a bar graph. [[SoftLayer_Container_Bandwidth_GraphOutputsExtended|Bandwidth Graph]] object contains a begin time, end time, title of the graph, binary date, in and outbound total bandwidth in bytes +func (r Network_ContentDelivery_Account) GetAllPopsBandwidthImage(title *string, beginDateTime *datatypes.Time, endDateTime *datatypes.Time, unit *string) (resp datatypes.Container_Bandwidth_GraphOutputsExtended, err error) { + params := []interface{}{ + title, + beginDateTime, + endDateTime, + unit, + } + err = r.Session.DoRequest("SoftLayer_Network_ContentDelivery_Account", "getAllPopsBandwidthImage", params, &r.Options, &resp) + return +} + +// Retrieve The CDN account id that this CDN account is associated with. +func (r Network_ContentDelivery_Account) GetAssociatedCdnAccountId() (resp string, err error) { + err = r.Session.DoRequest("SoftLayer_Network_ContentDelivery_Account", "getAssociatedCdnAccountId", nil, &r.Options, &resp) + return +} + +// Retrieve The IP addresses that are used for the content authentication service. +func (r Network_ContentDelivery_Account) GetAuthenticationIpAddresses() (resp []datatypes.Network_ContentDelivery_Authentication_Address, err error) { + err = r.Session.DoRequest("SoftLayer_Network_ContentDelivery_Account", "getAuthenticationIpAddresses", nil, &r.Options, &resp) + return +} + +// CDN servers will invoke a Web Service method to validate a content authentication token. This method returns all token validation web service endpoints set for a CDN account. You can override the default web service by calling [[SoftLayer_Network_ContentDelivery_Authentication_Token|setContentAuthenticationWsdl setContentAuthenticationWsdl]] method. +func (r Network_ContentDelivery_Account) GetAuthenticationServiceEndpoints() (resp []datatypes.Container_Network_ContentDelivery_Authentication_ServiceEndpoint, err error) { + err = r.Session.DoRequest("SoftLayer_Network_ContentDelivery_Account", "getAuthenticationServiceEndpoints", nil, &r.Options, &resp) + return +} + +// This method returns bandwidth data for a given time range. It returns an array of [[SoftLayer_Container_Network_ContentDelivery_Bandwidth_Summary|bandwidth summary]] objects. [[SoftLayer_Container_Network_ContentDelivery_Bandwidth_Summary|Bandwidth summary]] object contains a beginning time, ending time, and bandwidth in bytes. +// +// A Beginning and ending date parameters have to be a timestamp in "yyyy-mm-dd HH24:mi:ss" format and it assumes the time is in Central Standard Time (CST) or Central Daylight Time (CDT) time zone. CDN bandwidth data is stored in Greenwich Mean Time (GMT) internally and converts a beginning and ending time to GMT before querying. +// +// Unlike server bandwidth, CDN bandwidth returns total bytes consumed within an hour. For example, if you pass "2008-10-10 00:00:00" for a beginning time and "2008-10-10 05:00:00" for an ending time, your return value will have 6 elements of bandwidth summary objects. The first bandwidth summary object will have the total bytes consumed between 2008-10-10 00:00:00 and 2008-10-10 05:00:00. And the last object will have the bandwidth consumed between 2008-10-10 05:00:00 and 2008-10-10 00:59:59. The bandwidth data is updated at 10 minutes after every hour. The queried data is on a two hour time delay. The two hour delay is required to gather bandwidth data from each POP and that is the minimum delay required to create a feasible graph. It usually takes about 8 hours to reconcile all the data from every CDN POP. This hourly data is corrected after 24 hours if necessary. If you consume a large amount of bandwidth, your bandwidth data will be updated the next day. +func (r Network_ContentDelivery_Account) GetBandwidthData(beginDateTime *datatypes.Time, endDateTime *datatypes.Time) (resp []datatypes.Container_Network_ContentDelivery_Bandwidth_Summary, err error) { + params := []interface{}{ + beginDateTime, + endDateTime, + } + err = r.Session.DoRequest("SoftLayer_Network_ContentDelivery_Account", "getBandwidthData", params, &r.Options, &resp) + return +} + +// This method returns bandwidth data for a given time range. It returns an array of [[SoftLayer_Container_Network_ContentDelivery_Report_Usage|bandwidth usage report]] objects. +// +// These will be first sorted by timestamp, and there will be one entry with that timestamp for each enabled region. The region type 'NONE' is provided only when non-region-specific data is returned. [[SoftLayer_Container_Network_ContentDelivery_Report_Usage|bandwidth usage report]] objects with a region will never contain non-region-specific data. Non-region-specific values are standardTotal and sslTotal; standardTotal is computed by adding the HTTP Large, Windows Media, Flash and Application Delivery Network bandwidth. The sslTotal is computed by adding the HTTP Large SSL bandwidth and the Application Delivery Network SSL bandwidth. +// +// A Beginning and ending date parameters have to be a timestamp in "yyyy-mm-dd HH24:mi:ss" format and it assumes the time is in Central Standard Time (CST) or Central Daylight Time (CDT) time zone. CDN bandwidth data is stored in Greenwich Mean Time (GMT) internally and converts a beginning and ending time to GMT before querying. +// +// Unlike server bandwidth, CDN bandwidth returns total bytes consumed within an hour. For example, if you pass "2008-10-10 00:00:00" for a beginning time and "2008-10-10 05:00:00" for an ending time, your return value will have 6 elements of bandwidth summary objects. The first bandwidth summary object will have the total bytes consumed between 2008-10-10 00:00:00 and 2008-10-10 05:00:00. And the last object will have the bandwidth consumed between 2008-10-10 05:00:00 and 2008-10-10 00:59:59. The bandwidth data is updated at 10 minutes after every hour. The queried data is on a two hour time delay. The two hour delay is required to gather bandwidth data from each POP and that is the minimum delay required to create a feasible graph. It usually takes about 8 hours to reconcile all the data from every CDN POP. This hourly data is corrected after 24 hours if necessary. If you consume a large amount of bandwidth, your bandwidth data will be updated the next day. +func (r Network_ContentDelivery_Account) GetBandwidthDataWithTypes(beginDateTime *datatypes.Time, endDateTime *datatypes.Time, period *string) (resp []datatypes.Container_Network_ContentDelivery_Report_Usage, err error) { + params := []interface{}{ + beginDateTime, + endDateTime, + period, + } + err = r.Session.DoRequest("SoftLayer_Network_ContentDelivery_Account", "getBandwidthDataWithTypes", params, &r.Options, &resp) + return +} + +// This method returns a bandwidth graph wrapped in [[SoftLayer_Container_Bandwidth_GraphOutputsExtended|Bandwidth Graph]] object. [[SoftLayer_Container_Bandwidth_GraphOutputsExtended|Bandwidth Graph]] object contains a starting time, ending time, graph title, graph binary data, and in and outbound total bytes. +func (r Network_ContentDelivery_Account) GetBandwidthImage(title *string, beginDateTime *datatypes.Time, endDateTime *datatypes.Time) (resp datatypes.Container_Bandwidth_GraphOutputsExtended, err error) { + params := []interface{}{ + title, + beginDateTime, + endDateTime, + } + err = r.Session.DoRequest("SoftLayer_Network_ContentDelivery_Account", "getBandwidthImage", params, &r.Options, &resp) + return +} + +// Retrieve The current billing item for a CDN account. +func (r Network_ContentDelivery_Account) GetBillingItem() (resp datatypes.Billing_Item, err error) { + err = r.Session.DoRequest("SoftLayer_Network_ContentDelivery_Account", "getBillingItem", nil, &r.Options, &resp) + return +} + +// Retrieve The name of a CDN account. +func (r Network_ContentDelivery_Account) GetCdnAccountName() (resp string, err error) { + err = r.Session.DoRequest("SoftLayer_Network_ContentDelivery_Account", "getCdnAccountName", nil, &r.Options, &resp) + return +} + +// Retrieve A brief note on a CDN account. +func (r Network_ContentDelivery_Account) GetCdnAccountNote() (resp string, err error) { + err = r.Session.DoRequest("SoftLayer_Network_ContentDelivery_Account", "getCdnAccountNote", nil, &r.Options, &resp) + return +} + +// Retrieve The solution type of a CDN account. +func (r Network_ContentDelivery_Account) GetCdnSolutionName() (resp string, err error) { + err = r.Session.DoRequest("SoftLayer_Network_ContentDelivery_Account", "getCdnSolutionName", nil, &r.Options, &resp) + return +} + +// An origin pull mapping is a combination of your customer origin record and a CNAME (optional) record. You can now keep track of your customer origin records separate from your CNAME records. This service returns your customer origin records. +func (r Network_ContentDelivery_Account) GetCustomerOrigins(mediaType *string) (resp []datatypes.Container_Network_ContentDelivery_OriginPull_Mapping, err error) { + params := []interface{}{ + mediaType, + } + err = r.Session.DoRequest("SoftLayer_Network_ContentDelivery_Account", "getCustomerOrigins", params, &r.Options, &resp) + return +} + +// Retrieve Indicates if CDN account is dependent on other service. If set, this CDN account is limited to these services: createOriginPullMapping, deleteOriginPullRule, getOriginPullMappingInformation, getCdnUrls, purgeCache, loadContent, manageHttpCompression +func (r Network_ContentDelivery_Account) GetDependantServiceFlag() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Network_ContentDelivery_Account", "getDependantServiceFlag", nil, &r.Options, &resp) + return +} + +// This method returns an array of [[SoftLayer_Container_Network_Directory_Listing|Directory Listing]] objects. You must have CDN_FILE_MANAGE privilege and you can only retrieve directory information within /media directory. A [[SoftLayer_Container_Network_Directory_Listing|Directory Listing]] object contains type (indicating whether it is a file or a directory), name and file count if it is a directory. +func (r Network_ContentDelivery_Account) GetDirectoryInformation(directoryName *string) (resp []datatypes.Container_Network_Directory_Listing, err error) { + params := []interface{}{ + directoryName, + } + err = r.Session.DoRequest("SoftLayer_Network_ContentDelivery_Account", "getDirectoryInformation", params, &r.Options, &resp) + return +} + +// This method returns disk space usage data for your CDN FTP. +func (r Network_ContentDelivery_Account) GetDiskSpaceUsageDataByDate(beginDateTime *datatypes.Time, endDateTime *datatypes.Time) (resp []datatypes.Metric_Tracking_Object_Data, err error) { + params := []interface{}{ + beginDateTime, + endDateTime, + } + err = r.Session.DoRequest("SoftLayer_Network_ContentDelivery_Account", "getDiskSpaceUsageDataByDate", params, &r.Options, &resp) + return +} + +// This method returns a disk usage graph wrapped in [[SoftLayer_Container_Bandwidth_GraphOutputsExtended|Bandwidth Graph]] object. [[SoftLayer_Container_Bandwidth_GraphOutputsExtended|Bandwidth Graph]] object contains a starting time, ending time, graph title, graph binary data, and in and outbound total bytes. +func (r Network_ContentDelivery_Account) GetDiskSpaceUsageImageByDate(beginDateTime *datatypes.Time, endDateTime *datatypes.Time) (resp datatypes.Container_Bandwidth_GraphOutputs, err error) { + params := []interface{}{ + beginDateTime, + endDateTime, + } + err = r.Session.DoRequest("SoftLayer_Network_ContentDelivery_Account", "getDiskSpaceUsageImageByDate", params, &r.Options, &resp) + return +} + +// This method returns your login credentials to the CDN FTP server (ftp.cdnlayer.service.softlayer.com server). You must have CDN_FILE_MANAGE privilege. Refer to the service overview of [[SoftLayer_Network_ContentDelivery_Account::createFtpUser|createFtpUser]] method for more information on the CDN FTP server. +// +// If you want to download raw log files, prefix the username with "LOGS-" (without quotes) when logging in. SoftLayer designed CDN accounts so they can have multiple CDN FTP users. However, this method returns the default CDN FTP user information: multi user support may be implemented in the future. +func (r Network_ContentDelivery_Account) GetFtpAttributes() (resp datatypes.Container_Network_Authentication_Data, err error) { + err = r.Session.DoRequest("SoftLayer_Network_ContentDelivery_Account", "getFtpAttributes", nil, &r.Options, &resp) + return +} + +// Retrieve Indicates if it is a legacy CDN or not +func (r Network_ContentDelivery_Account) GetLegacyCdnFlag() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Network_ContentDelivery_Account", "getLegacyCdnFlag", nil, &r.Options, &resp) + return +} + +// Retrieve Indicates if CDN logging is enabled. +func (r Network_ContentDelivery_Account) GetLogEnabledFlag() (resp string, err error) { + err = r.Session.DoRequest("SoftLayer_Network_ContentDelivery_Account", "getLogEnabledFlag", nil, &r.Options, &resp) + return +} + +// This method returns CDN URLs for static file (http), Flash streaming (rtmp) and Window Media (mms) streaming services. You can generate your CDN URLs based on the information retrieved by this method. +func (r Network_ContentDelivery_Account) GetMediaUrls() (resp []datatypes.Container_Network_ContentDelivery_SupportedProtocol, err error) { + err = r.Session.DoRequest("SoftLayer_Network_ContentDelivery_Account", "getMediaUrls", nil, &r.Options, &resp) + return +} + +// getObject retrieves the SoftLayer_Network_ContentDelivery_Account object whose ID number corresponds to the ID number of the initial parameter passed to the SoftLayer_Network_ContentDelivery_Account service. You can only retrieve CDN accounts assigned to your SoftLayer customer account. +func (r Network_ContentDelivery_Account) GetObject() (resp datatypes.Network_ContentDelivery_Account, err error) { + err = r.Session.DoRequest("SoftLayer_Network_ContentDelivery_Account", "getObject", nil, &r.Options, &resp) + return +} + +// This method returns a list of origin pull configuration data. +func (r Network_ContentDelivery_Account) GetOriginPullMappingInformation() (resp []datatypes.Container_Network_ContentDelivery_OriginPull_Mapping, err error) { + err = r.Session.DoRequest("SoftLayer_Network_ContentDelivery_Account", "getOriginPullMappingInformation", nil, &r.Options, &resp) + return +} + +// This method returns CDN URLs that supports Origin Pull mappings. +func (r Network_ContentDelivery_Account) GetOriginPullSupportedMediaUrls() (resp []datatypes.Container_Network_ContentDelivery_SupportedProtocol, err error) { + err = r.Session.DoRequest("SoftLayer_Network_ContentDelivery_Account", "getOriginPullSupportedMediaUrls", nil, &r.Options, &resp) + return +} + +// This method returns the domain name of your Origin Pull rule. It assumes you have already setup an Origin Pull rule. Otherwise, it will throw an exception. A returning value is the value of the first parameter (origin pull domain) you provided to [[SoftLayer_Network_ContentDelivery_Account::createOriginPullRule|createOriginPullRule]] method. See Error Handling section below for possible errors. +func (r Network_ContentDelivery_Account) GetOriginPullUrl() (resp string, err error) { + err = r.Session.DoRequest("SoftLayer_Network_ContentDelivery_Account", "getOriginPullUrl", nil, &r.Options, &resp) + return +} + +// This method returns an array of CDN POPs (Points of Presence) object. [[SoftLayer_Container_Network_ContentDelivery_PointsOfPresence|POP object]] object contains the POP id and name. +func (r Network_ContentDelivery_Account) GetPopNames() (resp []datatypes.Container_Network_ContentDelivery_PointsOfPresence, err error) { + err = r.Session.DoRequest("SoftLayer_Network_ContentDelivery_Account", "getPopNames", nil, &r.Options, &resp) + return +} + +// Retrieve Indicates if customer is allowed to access the CDN provider's management portal. +func (r Network_ContentDelivery_Account) GetProviderPortalAccessFlag() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Network_ContentDelivery_Account", "getProviderPortalAccessFlag", nil, &r.Options, &resp) + return +} + +// This method returns your login credentials to the CDN provider portal. +func (r Network_ContentDelivery_Account) GetProviderPortalCredentials() (resp datatypes.Container_Network_Authentication_Data, err error) { + err = r.Session.DoRequest("SoftLayer_Network_ContentDelivery_Account", "getProviderPortalCredentials", nil, &r.Options, &resp) + return +} + +// Retrieve A CDN account's status presented in a more detailed data type. +func (r Network_ContentDelivery_Account) GetStatus() (resp datatypes.Network_ContentDelivery_Account_Status, err error) { + err = r.Session.DoRequest("SoftLayer_Network_ContentDelivery_Account", "getStatus", nil, &r.Options, &resp) + return +} + +// This method returns all token authentication directories. +func (r Network_ContentDelivery_Account) GetTokenAuthenticationDirectories() (resp []datatypes.Container_Network_Directory_Listing, err error) { + err = r.Session.DoRequest("SoftLayer_Network_ContentDelivery_Account", "getTokenAuthenticationDirectories", nil, &r.Options, &resp) + return +} + +// Retrieve Indicates if the token authentication service is enabled or not. +func (r Network_ContentDelivery_Account) GetTokenAuthenticationEnabledFlag() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Network_ContentDelivery_Account", "getTokenAuthenticationEnabledFlag", nil, &r.Options, &resp) + return +} + +// This method returns your login credentials to the public CDN FTP. +func (r Network_ContentDelivery_Account) GetVendorFtpAttributes() (resp datatypes.Container_Network_Authentication_Data, err error) { + err = r.Session.DoRequest("SoftLayer_Network_ContentDelivery_Account", "getVendorFtpAttributes", nil, &r.Options, &resp) + return +} + +// Whether you are using Origin Pull or POP Pull, your content will be transferred and cached on CDN POP (node) on the initial request. If you wish to load your content to all CDN POPs, you may use this service to accomplish that. Please keep in mind, it will take about 10 to 15 minutes to load content to all CDN POPs depending on the load. +// +// You can only specify 5 URLs at a time. +func (r Network_ContentDelivery_Account) LoadContent(objectUrls []string) (resp bool, err error) { + params := []interface{}{ + objectUrls, + } + err = r.Session.DoRequest("SoftLayer_Network_ContentDelivery_Account", "loadContent", params, &r.Options, &resp) + return +} + +// HTTP Compression is used to reduce the bandwidth used to deliver an object. You can specify list of content types that needs to be compressed. If you omit the content type parameter, these values will be used by default: +// * text/plain +// * text/html +// * text/css +// * application/x-javascript +// * text/javascript +// +// +// Note that files larger than 1MB will never be served with compression regardless of whether their content-type is enabled for compression. +func (r Network_ContentDelivery_Account) ManageHttpCompression(enableFlag *bool, mimeTypes []string) (resp bool, err error) { + params := []interface{}{ + enableFlag, + mimeTypes, + } + err = r.Session.DoRequest("SoftLayer_Network_ContentDelivery_Account", "manageHttpCompression", params, &r.Options, &resp) + return +} + +// CDN's cache mechanism works similar to that of web browsers. When CDN pulls a file from your origin server or from your CDN FTP directory for the first time, it creates a cache file on itself. CDN re-uses cache files to save trips to the origin server and thus it speeds up delivering content to visitors. This method removes cached objects on every server in the CDN network. If you see a stale content or a file that send an incorrect header, purging cache will correct the issue. CDN will pull a fresh content from your origin server or your CDN FTP. +// +// This method takes an array of URLs. A URL must be exact as it is being requested by clients. An example URLs may look like this: +// * http://.http.cdn.softlayer.net/mycdnname/some_file.txt +// +// +// If you created a CNAME that points to CDN host, use your CNAME URL instead. +// * http://image.mydomain.com/some_file.txt +// +// +// It takes approximately 3-5 minutes for the system to delete the requested object on every CDN server from submission . +func (r Network_ContentDelivery_Account) PurgeCache(objectUrls []string) (resp []datatypes.Container_Network_ContentDelivery_PurgeService_Response, err error) { + params := []interface{}{ + objectUrls, + } + err = r.Session.DoRequest("SoftLayer_Network_ContentDelivery_Account", "purgeCache", params, &r.Options, &resp) + return +} + +// If you want to turn off the token authentication, use this method to remove a directory from the token authentication directory. +func (r Network_ContentDelivery_Account) RemoveAuthenticationDirectory(directory *string, mediaType *string) (resp bool, err error) { + params := []interface{}{ + directory, + mediaType, + } + err = r.Session.DoRequest("SoftLayer_Network_ContentDelivery_Account", "removeAuthenticationDirectory", params, &r.Options, &resp) + return +} + +// With this method you can remove a file or a directory on the CDN FTP server. If a source name ends with a slash (/), this method assumes it is a directory. A source name must be an absolute path. It does not check to see if a file or directory exists before deletion. You can only remove files and directories that are in /media folder. Be sure to catch an exception for the detail on an error. +func (r Network_ContentDelivery_Account) RemoveFile(source *string) (resp bool, err error) { + params := []interface{}{ + source, + } + err = r.Session.DoRequest("SoftLayer_Network_ContentDelivery_Account", "removeFile", params, &r.Options, &resp) + return +} + +// CDN servers will invoke a Web Service method to validate a content authentication token. CDN uses the default Web Service provided by SoftLayer to validate a token. A customer can use their own implementation of the token authentication Web Service. A valid SOAP WSDL will look similar [https://manage.softlayer.com/CdnService/authenticationWsdlExample/wsdl this]. +func (r Network_ContentDelivery_Account) SetAuthenticationServiceEndpoint(webserviceEndpoint *string, protocol *string) (resp bool, err error) { + params := []interface{}{ + webserviceEndpoint, + protocol, + } + err = r.Session.DoRequest("SoftLayer_Network_ContentDelivery_Account", "setAuthenticationServiceEndpoint", params, &r.Options, &resp) + return +} + +// With a CDN FTP, you can upload contents to CDN host server. Once you uploaded contents, your contents will be fetched by the CDN POP (Points of Presence) servers as needed. +// +// CDN supports three protocols: Flash streaming (rtmp), Window Media streaming (mms) and HTTP. Once you log in to the CDN FTP server, you will see three directories under /media directory. You have to upload your contents to a proper directory to use the different services. Refer to [[SoftLayer_Network_ContentDelivery_Account|CDN Account]] service overview for details on the CDN FTP server. "gzip" is supported if you compress your content before uploading and you have to change its extension to ".gz". [SoftLayer_Network_ContentDelivery_Account::createOriginPullRule|Origin Pull] also supports "gzip" contents and you don't have to modify file extension with Origin Pull. Once uploaded, your contents should be available almost immediately to visitors. However, it may take about 30 minutes to propagate files to the entire CDN network after uploading. For more details, visit our [hhttp://knowledgelayer.softlayer.com/topic/cdn KnowledgeLayer] +// +// This method updates the password for your CDN FTP account on the ftp.cdnlayer.service.softlayer.com server. You must provide an alphanumeric value for a new password. - _ ! % # $ ^ & * characters are allowed beside an alphanumeric string. +func (r Network_ContentDelivery_Account) SetFtpPassword(newPassword *string) (resp bool, err error) { + params := []interface{}{ + newPassword, + } + err = r.Session.DoRequest("SoftLayer_Network_ContentDelivery_Account", "setFtpPassword", params, &r.Options, &resp) + return +} + +// This method allows you to edit CDN account note. The maximum length for CDN account note is 30 characters. +func (r Network_ContentDelivery_Account) UpdateNote(note *string) (resp bool, err error) { + params := []interface{}{ + note, + } + err = r.Session.DoRequest("SoftLayer_Network_ContentDelivery_Account", "updateNote", params, &r.Options, &resp) + return +} + +// With this method, you can upload binary data to the CDN FTP server. This method supports files up to 20 Mega Bytes. You need to use the CDN FTP (ftp.cdnlayer.service.softlayer.com) to upload files larger than 20 MB. This method takes [[SoftLayer_Container_Utility_File_Attachment]] a first parameter. A target name must be an absolute path and you can only upload a file to a directory that is in /media folder. +func (r Network_ContentDelivery_Account) UploadStream(source *datatypes.Container_Utility_File_Attachment, target *string) (resp bool, err error) { + params := []interface{}{ + source, + target, + } + err = r.Session.DoRequest("SoftLayer_Network_ContentDelivery_Account", "uploadStream", params, &r.Options, &resp) + return +} + +// The SoftLayer_Network_ContentDelivery_Authentication_Address data type models an individual IP address that CDN allow or deny access from. +type Network_ContentDelivery_Authentication_Address struct { + Session *session.Session + Options sl.Options +} + +// GetNetworkContentDeliveryAuthenticationAddressService returns an instance of the Network_ContentDelivery_Authentication_Address SoftLayer service +func GetNetworkContentDeliveryAuthenticationAddressService(sess *session.Session) Network_ContentDelivery_Authentication_Address { + return Network_ContentDelivery_Authentication_Address{Session: sess} +} + +func (r Network_ContentDelivery_Authentication_Address) Id(id int) Network_ContentDelivery_Authentication_Address { + r.Options.Id = &id + return r +} + +func (r Network_ContentDelivery_Authentication_Address) Mask(mask string) Network_ContentDelivery_Authentication_Address { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r Network_ContentDelivery_Authentication_Address) Filter(filter string) Network_ContentDelivery_Authentication_Address { + r.Options.Filter = filter + return r +} + +func (r Network_ContentDelivery_Authentication_Address) Limit(limit int) Network_ContentDelivery_Authentication_Address { + r.Options.Limit = &limit + return r +} + +func (r Network_ContentDelivery_Authentication_Address) Offset(offset int) Network_ContentDelivery_Authentication_Address { + r.Options.Offset = &offset + return r +} + +// This method creates an authentication IP record. Required parameters are +// +// +// * cdnAccountId - A CDN account id that belongs to your SoftLayer Account +// * ipAddress - An IP address or a IP range +// * accessType- It can be "ALLOW" or "DENY" +func (r Network_ContentDelivery_Authentication_Address) CreateObject(templateObject *datatypes.Network_ContentDelivery_Authentication_Address) (resp datatypes.Network_ContentDelivery_Authentication_Address, err error) { + params := []interface{}{ + templateObject, + } + err = r.Session.DoRequest("SoftLayer_Network_ContentDelivery_Authentication_Address", "createObject", params, &r.Options, &resp) + return +} + +// This method deletes an authentication IP address. +func (r Network_ContentDelivery_Authentication_Address) DeleteObject() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Network_ContentDelivery_Authentication_Address", "deleteObject", nil, &r.Options, &resp) + return +} + +// This method let you edit an authentication IP object by passing a modified object. +func (r Network_ContentDelivery_Authentication_Address) EditObject(templateObject *datatypes.Network_ContentDelivery_Authentication_Address) (resp bool, err error) { + params := []interface{}{ + templateObject, + } + err = r.Session.DoRequest("SoftLayer_Network_ContentDelivery_Authentication_Address", "editObject", params, &r.Options, &resp) + return +} + +// getObject retrieves the SoftLayer_Network_ContentDelivery_Authentication_Address object whose ID number corresponds to the ID number of the initial parameter passed to the SoftLayer_Network_ContentDelivery_Authentication_Address service. You can only retrieve authentication IP addresses assigned to one of your CDN account. +func (r Network_ContentDelivery_Authentication_Address) GetObject() (resp datatypes.Network_ContentDelivery_Authentication_Address, err error) { + err = r.Session.DoRequest("SoftLayer_Network_ContentDelivery_Authentication_Address", "getObject", nil, &r.Options, &resp) + return +} + +// The authentication IP address match occurs from the higher priority IP to the lower. This method will be helpful if you want to modify the order (priority) of the authentication IP addresses. You can use this method instead of editing individual authentication IP addresses. +// +// You can retrieve authentication IP address using [[SoftLayer_Network_ContentDelivery_Account::getAuthenticationIpAddresses|getAuthenticationIpAddresses]] method. Then, rearrange the authentication IP addresses and pass them to this method. When creating template objects as parameter, make sure to include the id of each authentication IP addresses. You must provide every authentication IP address. New priorities will be assigned to each authentication IP addresses in the order of they are passed. +func (r Network_ContentDelivery_Authentication_Address) RearrangeAuthenticationIp(cdnAccountId *int, templateObjects []datatypes.Network_ContentDelivery_Authentication_Address) (resp bool, err error) { + params := []interface{}{ + cdnAccountId, + templateObjects, + } + err = r.Session.DoRequest("SoftLayer_Network_ContentDelivery_Authentication_Address", "rearrangeAuthenticationIp", params, &r.Options, &resp) + return +} + +// The SoftLayer_Network_ContentDelivery_Authentication_Address data type models an individual IP address that CDN allow or deny access from. +type Network_ContentDelivery_Authentication_Token struct { + Session *session.Session + Options sl.Options +} + +// GetNetworkContentDeliveryAuthenticationTokenService returns an instance of the Network_ContentDelivery_Authentication_Token SoftLayer service +func GetNetworkContentDeliveryAuthenticationTokenService(sess *session.Session) Network_ContentDelivery_Authentication_Token { + return Network_ContentDelivery_Authentication_Token{Session: sess} +} + +func (r Network_ContentDelivery_Authentication_Token) Id(id int) Network_ContentDelivery_Authentication_Token { + r.Options.Id = &id + return r +} + +func (r Network_ContentDelivery_Authentication_Token) Mask(mask string) Network_ContentDelivery_Authentication_Token { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r Network_ContentDelivery_Authentication_Token) Filter(filter string) Network_ContentDelivery_Authentication_Token { + r.Options.Filter = filter + return r +} + +func (r Network_ContentDelivery_Authentication_Token) Limit(limit int) Network_ContentDelivery_Authentication_Token { + r.Options.Limit = &limit + return r +} + +func (r Network_ContentDelivery_Authentication_Token) Offset(offset int) Network_ContentDelivery_Authentication_Token { + r.Options.Offset = &offset + return r +} + +// This method is deprecated! Use the [[SoftLayer_Network_ContentDelivery_Authentication_Token::getTimedToken|getTimedToken]] method. +// +// This method creates a managed authentication token. When passing a parameter, the only required value is your CDN account id which can be obtained from the [[SoftLayer_Account::getCdnAccounts|getCdnAccounts]] method. There are 3 optional parameters you can pass: +// +// +// * name - This helps you keep track of managed tokens. +// * referrer - If set, the token validation will check the client's referrer. Keep in mind, if a client doesn't have the referrer information, the token validation will fail. +// * clientIp - If set, the token validation will check the client's IP address. +// +// +func (r Network_ContentDelivery_Authentication_Token) CreateObject(templateObject *datatypes.Network_ContentDelivery_Authentication_Token) (resp datatypes.Network_ContentDelivery_Authentication_Token, err error) { + params := []interface{}{ + templateObject, + } + err = r.Session.DoRequest("SoftLayer_Network_ContentDelivery_Authentication_Token", "createObject", params, &r.Options, &resp) + return +} + +// This method is deprecated! +// +// This method returns all managed tokens for a CDN account. +func (r Network_ContentDelivery_Authentication_Token) GetAllManagedTokens(cdnAccountId *int) (resp []datatypes.Network_ContentDelivery_Authentication_Token, err error) { + params := []interface{}{ + cdnAccountId, + } + err = r.Session.DoRequest("SoftLayer_Network_ContentDelivery_Authentication_Token", "getAllManagedTokens", params, &r.Options, &resp) + return +} + +// This method is deprecated! +// +// getObject retrieves the SoftLayer_Network_ContentDelivery_Authentication_Token object whose ID number corresponds to the ID number of the initial parameter passed to the SoftLayer_Network_ContentDelivery_Authentication_Token service. You can only retrieve managed tokens assigned to one of your CDN account. +func (r Network_ContentDelivery_Authentication_Token) GetObject() (resp datatypes.Network_ContentDelivery_Authentication_Token, err error) { + err = r.Session.DoRequest("SoftLayer_Network_ContentDelivery_Authentication_Token", "getObject", nil, &r.Options, &resp) + return +} + +// This method returns an authentication token that expires after the seconds you specify. You can provide number of seconds to manage the token life. This parameter sets the expiration time for a token. A valid life time must be an integer between 60 and 604800 (1 week). A customer can also provide client ip and (or) referrer information. If used, a client from the same IP and referrer can view the protected contents. +// +// A valid IP address must be an IPv4 format or an IP block. if you want to block access from IP 211.37.0.0/16, you can enter "211.37." instead. IP blocks can be specified in the manner of "8bit times n". +// +// The referrer is the URL of the previous webpage from which a link was followed. A referrer should not include "http://" prefix and it can be maximum of 30 characters. +func (r Network_ContentDelivery_Authentication_Token) GetTimedToken(cdnAccountId *int, tokenLife *int, clientIp *string, referrer *string, mediaType *string) (resp string, err error) { + params := []interface{}{ + cdnAccountId, + tokenLife, + clientIp, + referrer, + mediaType, + } + err = r.Session.DoRequest("SoftLayer_Network_ContentDelivery_Authentication_Token", "getTimedToken", params, &r.Options, &resp) + return +} + +// This method is deprecated! +// +// This method revokes all managed tokens belong to a CDN account. +func (r Network_ContentDelivery_Authentication_Token) RevokeAllManagedTokens(cdnAccountId *int) (resp bool, err error) { + params := []interface{}{ + cdnAccountId, + } + err = r.Session.DoRequest("SoftLayer_Network_ContentDelivery_Authentication_Token", "revokeAllManagedTokens", params, &r.Options, &resp) + return +} + +// This method revokes all tokens belong to a CDN account. Valid media types are "HTTP", "FLASH" and "WM". +func (r Network_ContentDelivery_Authentication_Token) RevokeAllTokens(cdnAccountId *int, mediaType *string) (resp bool, err error) { + params := []interface{}{ + cdnAccountId, + mediaType, + } + err = r.Session.DoRequest("SoftLayer_Network_ContentDelivery_Authentication_Token", "revokeAllTokens", params, &r.Options, &resp) + return +} + +// This method is deprecated! +// +// Revokes a managed token. If you revoke a token, the token will be removed from SoftLayer's system but it will not remove your content on CDN FTP. The content that requires token validation will not be available to the visitor who is using a revoked token. +func (r Network_ContentDelivery_Authentication_Token) RevokeManagedToken(cdnAccountId *int, token *string) (resp bool, err error) { + params := []interface{}{ + cdnAccountId, + token, + } + err = r.Session.DoRequest("SoftLayer_Network_ContentDelivery_Authentication_Token", "revokeManagedToken", params, &r.Options, &resp) + return +} + +// This method is deprecated! +// +// Deletes multiple managed tokens +func (r Network_ContentDelivery_Authentication_Token) RevokeManagedTokens(templateObjects []datatypes.Network_ContentDelivery_Authentication_Token) (resp bool, err error) { + params := []interface{}{ + templateObjects, + } + err = r.Session.DoRequest("SoftLayer_Network_ContentDelivery_Authentication_Token", "revokeManagedTokens", params, &r.Options, &resp) + return +} + +// The SoftLayer_Network_Customer_Subnet data type contains general information relating to a single customer subnet (remote). +type Network_Customer_Subnet struct { + Session *session.Session + Options sl.Options +} + +// GetNetworkCustomerSubnetService returns an instance of the Network_Customer_Subnet SoftLayer service +func GetNetworkCustomerSubnetService(sess *session.Session) Network_Customer_Subnet { + return Network_Customer_Subnet{Session: sess} +} + +func (r Network_Customer_Subnet) Id(id int) Network_Customer_Subnet { + r.Options.Id = &id + return r +} + +func (r Network_Customer_Subnet) Mask(mask string) Network_Customer_Subnet { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r Network_Customer_Subnet) Filter(filter string) Network_Customer_Subnet { + r.Options.Filter = filter + return r +} + +func (r Network_Customer_Subnet) Limit(limit int) Network_Customer_Subnet { + r.Options.Limit = &limit + return r +} + +func (r Network_Customer_Subnet) Offset(offset int) Network_Customer_Subnet { + r.Options.Offset = &offset + return r +} + +// For IPSec network tunnels, customers can create their local subnets using this method. After the customer is created successfully, the customer subnet can then be added to the IPSec network tunnel. +func (r Network_Customer_Subnet) CreateObject(templateObject *datatypes.Network_Customer_Subnet) (resp datatypes.Network_Customer_Subnet, err error) { + params := []interface{}{ + templateObject, + } + err = r.Session.DoRequest("SoftLayer_Network_Customer_Subnet", "createObject", params, &r.Options, &resp) + return +} + +// Retrieve All ip addresses associated with a subnet. +func (r Network_Customer_Subnet) GetIpAddresses() (resp []datatypes.Network_Customer_Subnet_IpAddress, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Customer_Subnet", "getIpAddresses", nil, &r.Options, &resp) + return +} + +// getObject retrieves the SoftLayer_Network_Customer_Subnet object whose ID number corresponds to the ID number of the init parameter passed to the SoftLayer_Network_Customer_Subnet service. You can only retrieve the subnet whose account matches the account that your portal user is assigned to. +func (r Network_Customer_Subnet) GetObject() (resp datatypes.Network_Customer_Subnet, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Customer_Subnet", "getObject", nil, &r.Options, &resp) + return +} + +// The SoftLayer_Network_Firewall_AccessControlList data type contains general information relating to a single SoftLayer firewall access to controll list. This is the object which ties the running rules to a specific context. Use the [[SoftLayer Network Firewall Template]] service to pull SoftLayer recommended rule set templates. Use the [[SoftLayer Network Firewall Update Request]] service to submit a firewall update request. +type Network_Firewall_AccessControlList struct { + Session *session.Session + Options sl.Options +} + +// GetNetworkFirewallAccessControlListService returns an instance of the Network_Firewall_AccessControlList SoftLayer service +func GetNetworkFirewallAccessControlListService(sess *session.Session) Network_Firewall_AccessControlList { + return Network_Firewall_AccessControlList{Session: sess} +} + +func (r Network_Firewall_AccessControlList) Id(id int) Network_Firewall_AccessControlList { + r.Options.Id = &id + return r +} + +func (r Network_Firewall_AccessControlList) Mask(mask string) Network_Firewall_AccessControlList { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r Network_Firewall_AccessControlList) Filter(filter string) Network_Firewall_AccessControlList { + r.Options.Filter = filter + return r +} + +func (r Network_Firewall_AccessControlList) Limit(limit int) Network_Firewall_AccessControlList { + r.Options.Limit = &limit + return r +} + +func (r Network_Firewall_AccessControlList) Offset(offset int) Network_Firewall_AccessControlList { + r.Options.Offset = &offset + return r +} + +// Retrieve The update requests made for this firewall. +func (r Network_Firewall_AccessControlList) GetNetworkFirewallUpdateRequests() (resp []datatypes.Network_Firewall_Update_Request, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Firewall_AccessControlList", "getNetworkFirewallUpdateRequests", nil, &r.Options, &resp) + return +} + +// Retrieve +func (r Network_Firewall_AccessControlList) GetNetworkVlan() (resp datatypes.Network_Vlan, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Firewall_AccessControlList", "getNetworkVlan", nil, &r.Options, &resp) + return +} + +// getObject returns a SoftLayer_Network_Firewall_AccessControlList object. You can only get objects for servers attached to your account that have a network firewall enabled. +func (r Network_Firewall_AccessControlList) GetObject() (resp datatypes.Network_Firewall_AccessControlList, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Firewall_AccessControlList", "getObject", nil, &r.Options, &resp) + return +} + +// Retrieve The currently running rule set of this context access control list firewall. +func (r Network_Firewall_AccessControlList) GetRules() (resp []datatypes.Network_Vlan_Firewall_Rule, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Firewall_AccessControlList", "getRules", nil, &r.Options, &resp) + return +} + +// The SoftLayer_Network_Firewall_Interface data type contains general information relating to a single SoftLayer firewall interface. This is the object which ties the firewall context access control list to a firewall. Use the [[SoftLayer Network Firewall Template]] service to pull SoftLayer recommended rule set templates. Use the [[SoftLayer Network Firewall Update Request]] service to submit a firewall update request. +type Network_Firewall_Interface struct { + Session *session.Session + Options sl.Options +} + +// GetNetworkFirewallInterfaceService returns an instance of the Network_Firewall_Interface SoftLayer service +func GetNetworkFirewallInterfaceService(sess *session.Session) Network_Firewall_Interface { + return Network_Firewall_Interface{Session: sess} +} + +func (r Network_Firewall_Interface) Id(id int) Network_Firewall_Interface { + r.Options.Id = &id + return r +} + +func (r Network_Firewall_Interface) Mask(mask string) Network_Firewall_Interface { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r Network_Firewall_Interface) Filter(filter string) Network_Firewall_Interface { + r.Options.Filter = filter + return r +} + +func (r Network_Firewall_Interface) Limit(limit int) Network_Firewall_Interface { + r.Options.Limit = &limit + return r +} + +func (r Network_Firewall_Interface) Offset(offset int) Network_Firewall_Interface { + r.Options.Offset = &offset + return r +} + +// Retrieve +func (r Network_Firewall_Interface) GetFirewallContextAccessControlLists() (resp []datatypes.Network_Firewall_AccessControlList, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Firewall_Interface", "getFirewallContextAccessControlLists", nil, &r.Options, &resp) + return +} + +// Retrieve +func (r Network_Firewall_Interface) GetNetworkVlan() (resp datatypes.Network_Vlan, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Firewall_Interface", "getNetworkVlan", nil, &r.Options, &resp) + return +} + +// getObject returns a SoftLayer_Network_Firewall_Interface object. You can only get objects for servers attached to your account that have a network firewall enabled. +func (r Network_Firewall_Interface) GetObject() (resp datatypes.Network_Firewall_Interface, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Firewall_Interface", "getObject", nil, &r.Options, &resp) + return +} + +// no documentation yet +type Network_Firewall_Module_Context_Interface struct { + Session *session.Session + Options sl.Options +} + +// GetNetworkFirewallModuleContextInterfaceService returns an instance of the Network_Firewall_Module_Context_Interface SoftLayer service +func GetNetworkFirewallModuleContextInterfaceService(sess *session.Session) Network_Firewall_Module_Context_Interface { + return Network_Firewall_Module_Context_Interface{Session: sess} +} + +func (r Network_Firewall_Module_Context_Interface) Id(id int) Network_Firewall_Module_Context_Interface { + r.Options.Id = &id + return r +} + +func (r Network_Firewall_Module_Context_Interface) Mask(mask string) Network_Firewall_Module_Context_Interface { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r Network_Firewall_Module_Context_Interface) Filter(filter string) Network_Firewall_Module_Context_Interface { + r.Options.Filter = filter + return r +} + +func (r Network_Firewall_Module_Context_Interface) Limit(limit int) Network_Firewall_Module_Context_Interface { + r.Options.Limit = &limit + return r +} + +func (r Network_Firewall_Module_Context_Interface) Offset(offset int) Network_Firewall_Module_Context_Interface { + r.Options.Offset = &offset + return r +} + +// Retrieve +func (r Network_Firewall_Module_Context_Interface) GetFirewallContextAccessControlLists() (resp []datatypes.Network_Firewall_AccessControlList, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Firewall_Module_Context_Interface", "getFirewallContextAccessControlLists", nil, &r.Options, &resp) + return +} + +// Retrieve +func (r Network_Firewall_Module_Context_Interface) GetNetworkVlan() (resp datatypes.Network_Vlan, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Firewall_Module_Context_Interface", "getNetworkVlan", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Network_Firewall_Module_Context_Interface) GetObject() (resp datatypes.Network_Firewall_Module_Context_Interface, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Firewall_Module_Context_Interface", "getObject", nil, &r.Options, &resp) + return +} + +// The SoftLayer_Network_Firewall_Template type contains general information for a SoftLayer network firewall template. +// +// Firewall templates are recommend rule sets for use with SoftLayer Hardware Firewall (Dedicated). These optimized templates are designed to balance security restriction with application availability. The templates given may be altered to provide custom network security, or may be used as-is for basic security. At least one rule set MUST be applied for the firewall to block traffic. Use the [[SoftLayer Network Component Firewall]] service to view current rules. Use the [[SoftLayer Network Firewall Update Request]] service to submit a firewall update request. +type Network_Firewall_Template struct { + Session *session.Session + Options sl.Options +} + +// GetNetworkFirewallTemplateService returns an instance of the Network_Firewall_Template SoftLayer service +func GetNetworkFirewallTemplateService(sess *session.Session) Network_Firewall_Template { + return Network_Firewall_Template{Session: sess} +} + +func (r Network_Firewall_Template) Id(id int) Network_Firewall_Template { + r.Options.Id = &id + return r +} + +func (r Network_Firewall_Template) Mask(mask string) Network_Firewall_Template { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r Network_Firewall_Template) Filter(filter string) Network_Firewall_Template { + r.Options.Filter = filter + return r +} + +func (r Network_Firewall_Template) Limit(limit int) Network_Firewall_Template { + r.Options.Limit = &limit + return r +} + +func (r Network_Firewall_Template) Offset(offset int) Network_Firewall_Template { + r.Options.Offset = &offset + return r +} + +// Get all available firewall template objects. +// +// ''getAllObjects'' returns an array of SoftLayer_Network_Firewall_Template objects upon success. +func (r Network_Firewall_Template) GetAllObjects() (resp []datatypes.Network_Firewall_Template, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Firewall_Template", "getAllObjects", nil, &r.Options, &resp) + return +} + +// getObject returns a SoftLayer_Network_Firewall_Template object. You can retrieve all available firewall templates. getAllObjects returns an array of all available SoftLayer_Network_Firewall_Template objects. You can use these templates to generate a [[SoftLayer Network Firewall Update Request]]. +// +// @SLDNDocumentation Service See Also SoftLayer_Network_Firewall_Update_Request +func (r Network_Firewall_Template) GetObject() (resp datatypes.Network_Firewall_Template, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Firewall_Template", "getObject", nil, &r.Options, &resp) + return +} + +// Retrieve The rule set that belongs to this firewall rules template. +func (r Network_Firewall_Template) GetRules() (resp []datatypes.Network_Firewall_Template_Rule, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Firewall_Template", "getRules", nil, &r.Options, &resp) + return +} + +// The SoftLayer_Network_Firewall_Update_Request data type contains information relating to a SoftLayer network firewall update request. Use the [[SoftLayer Network Component Firewall]] service to view current rules. Use the [[SoftLayer Network Firewall Template]] service to pull SoftLayer recommended rule set templates. +type Network_Firewall_Update_Request struct { + Session *session.Session + Options sl.Options +} + +// GetNetworkFirewallUpdateRequestService returns an instance of the Network_Firewall_Update_Request SoftLayer service +func GetNetworkFirewallUpdateRequestService(sess *session.Session) Network_Firewall_Update_Request { + return Network_Firewall_Update_Request{Session: sess} +} + +func (r Network_Firewall_Update_Request) Id(id int) Network_Firewall_Update_Request { + r.Options.Id = &id + return r +} + +func (r Network_Firewall_Update_Request) Mask(mask string) Network_Firewall_Update_Request { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r Network_Firewall_Update_Request) Filter(filter string) Network_Firewall_Update_Request { + r.Options.Filter = filter + return r +} + +func (r Network_Firewall_Update_Request) Limit(limit int) Network_Firewall_Update_Request { + r.Options.Limit = &limit + return r +} + +func (r Network_Firewall_Update_Request) Offset(offset int) Network_Firewall_Update_Request { + r.Options.Offset = &offset + return r +} + +// Create a new firewall update request. The SoftLayer_Network_Firewall_Update_Request object passed to this function must have at least one rule. +// +// ''createObject'' returns a Boolean ''true'' on successful object creation or ''false'' if your firewall update request was unable to be created. +func (r Network_Firewall_Update_Request) CreateObject(templateObject *datatypes.Network_Firewall_Update_Request) (resp datatypes.Network_Firewall_Update_Request, err error) { + params := []interface{}{ + templateObject, + } + err = r.Session.DoRequest("SoftLayer_Network_Firewall_Update_Request", "createObject", params, &r.Options, &resp) + return +} + +// Retrieve The user that authorized this firewall update request. +func (r Network_Firewall_Update_Request) GetAuthorizingUser() (resp datatypes.User_Interface, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Firewall_Update_Request", "getAuthorizingUser", nil, &r.Options, &resp) + return +} + +// Get the possible attribute values for a firewall update request rule. These are the valid values which may be submitted as rule parameters for a firewall update request. +// +// ''getFirewallUpdateRequestRuleAttributes'' returns a SoftLayer_Container_Utility_Network_Firewall_Rule_Attribute object upon success. +func (r Network_Firewall_Update_Request) GetFirewallUpdateRequestRuleAttributes() (resp datatypes.Container_Utility_Network_Firewall_Rule_Attribute, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Firewall_Update_Request", "getFirewallUpdateRequestRuleAttributes", nil, &r.Options, &resp) + return +} + +// Retrieve The downstream virtual server that the rule set will be applied to. +func (r Network_Firewall_Update_Request) GetGuest() (resp datatypes.Virtual_Guest, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Firewall_Update_Request", "getGuest", nil, &r.Options, &resp) + return +} + +// Retrieve The downstream server that the rule set will be applied to. +func (r Network_Firewall_Update_Request) GetHardware() (resp datatypes.Hardware, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Firewall_Update_Request", "getHardware", nil, &r.Options, &resp) + return +} + +// Retrieve The network component firewall that the rule set will be applied to. +func (r Network_Firewall_Update_Request) GetNetworkComponentFirewall() (resp datatypes.Network_Component_Firewall, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Firewall_Update_Request", "getNetworkComponentFirewall", nil, &r.Options, &resp) + return +} + +// ''getObject'' returns a SoftLayer_Network_Firewall_Update_Request object. You can only get historical objects for servers attached to your account that have a network firewall enabled. ''createObject'' inserts a new SoftLayer_Network_Firewall_Update_Request object. You can only insert requests for servers attached to your account that have a network firewall enabled. ''getFirewallUpdateRequestRuleAttributes'' Get the possible attribute values for a firewall update request rule. +func (r Network_Firewall_Update_Request) GetObject() (resp datatypes.Network_Firewall_Update_Request, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Firewall_Update_Request", "getObject", nil, &r.Options, &resp) + return +} + +// Retrieve The group of rules contained within the update request. +func (r Network_Firewall_Update_Request) GetRules() (resp []datatypes.Network_Firewall_Update_Request_Rule, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Firewall_Update_Request", "getRules", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Network_Firewall_Update_Request) UpdateRuleNote(fwRule *datatypes.Network_Component_Firewall_Rule, note *string) (resp bool, err error) { + params := []interface{}{ + fwRule, + note, + } + err = r.Session.DoRequest("SoftLayer_Network_Firewall_Update_Request", "updateRuleNote", params, &r.Options, &resp) + return +} + +// The SoftLayer_Network_Firewall_Update_Request_Rule type contains information relating to a SoftLayer network firewall update request rule. This rule is a member of a [[SoftLayer Network Firewall Update Request]]. Use the [[SoftLayer Network Component Firewall]] service to view current rules. Use the [[SoftLayer Network Firewall Template]] service to pull SoftLayer recommended rule set templates. +type Network_Firewall_Update_Request_Rule struct { + Session *session.Session + Options sl.Options +} + +// GetNetworkFirewallUpdateRequestRuleService returns an instance of the Network_Firewall_Update_Request_Rule SoftLayer service +func GetNetworkFirewallUpdateRequestRuleService(sess *session.Session) Network_Firewall_Update_Request_Rule { + return Network_Firewall_Update_Request_Rule{Session: sess} +} + +func (r Network_Firewall_Update_Request_Rule) Id(id int) Network_Firewall_Update_Request_Rule { + r.Options.Id = &id + return r +} + +func (r Network_Firewall_Update_Request_Rule) Mask(mask string) Network_Firewall_Update_Request_Rule { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r Network_Firewall_Update_Request_Rule) Filter(filter string) Network_Firewall_Update_Request_Rule { + r.Options.Filter = filter + return r +} + +func (r Network_Firewall_Update_Request_Rule) Limit(limit int) Network_Firewall_Update_Request_Rule { + r.Options.Limit = &limit + return r +} + +func (r Network_Firewall_Update_Request_Rule) Offset(offset int) Network_Firewall_Update_Request_Rule { + r.Options.Offset = &offset + return r +} + +// Create a new firewall update request. The SoftLayer_Network_Firewall_Update_Request object passed to this function must have at least one rule. +// +// ''createObject'' returns a Boolean ''true'' on successful object creation or ''false'' if your firewall update request was unable to be created.. +func (r Network_Firewall_Update_Request_Rule) CreateObject(templateObject *datatypes.Network_Firewall_Update_Request_Rule) (resp datatypes.Network_Firewall_Update_Request_Rule, err error) { + params := []interface{}{ + templateObject, + } + err = r.Session.DoRequest("SoftLayer_Network_Firewall_Update_Request_Rule", "createObject", params, &r.Options, &resp) + return +} + +// Retrieve The update request that this rule belongs to. +func (r Network_Firewall_Update_Request_Rule) GetFirewallUpdateRequest() (resp datatypes.Network_Firewall_Update_Request, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Firewall_Update_Request_Rule", "getFirewallUpdateRequest", nil, &r.Options, &resp) + return +} + +// getObject returns a SoftLayer_Network_Firewall_Update_Request_Rule object. You can only get historical objects for servers attached to your account that have a network firewall enabled. createObject inserts a new SoftLayer_Network_Firewall_Update_Request_Rule object. Use the SoftLayer_Network_Firewall_Update_Request to create groups of rules for an update request. +func (r Network_Firewall_Update_Request_Rule) GetObject() (resp datatypes.Network_Firewall_Update_Request_Rule, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Firewall_Update_Request_Rule", "getObject", nil, &r.Options, &resp) + return +} + +// Validate the supplied firewall request rule against the object it will apply to. For IPv4 rules, pass in an instance of SoftLayer_Network_Firewall_Update_Request_Rule. for IPv6 rules, pass in an instance of SoftLayer_Network_Firewall_Update_Request_Rule_Version6. The ID of the applied to object can either be applyToComponentId (an ID of a SoftLayer_Network_Component_Firewall) or applyToAclId (an ID of a SoftLayer_Network_Firewall_Module_Context_Interface_AccessControlList). One, and only one, of applyToComponentId and applyToAclId can be specified. +// +// If validation is successful, nothing is returned. If validation is unsuccessful, an exception is thrown explaining the nature of the validation error. +func (r Network_Firewall_Update_Request_Rule) ValidateRule(rule *datatypes.Network_Firewall_Update_Request_Rule, applyToComponentId *int, applyToAclId *int) (err error) { + var resp datatypes.Void + params := []interface{}{ + rule, + applyToComponentId, + applyToAclId, + } + err = r.Session.DoRequest("SoftLayer_Network_Firewall_Update_Request_Rule", "validateRule", params, &r.Options, &resp) + return +} + +// no documentation yet +type Network_Gateway struct { + Session *session.Session + Options sl.Options +} + +// GetNetworkGatewayService returns an instance of the Network_Gateway SoftLayer service +func GetNetworkGatewayService(sess *session.Session) Network_Gateway { + return Network_Gateway{Session: sess} +} + +func (r Network_Gateway) Id(id int) Network_Gateway { + r.Options.Id = &id + return r +} + +func (r Network_Gateway) Mask(mask string) Network_Gateway { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r Network_Gateway) Filter(filter string) Network_Gateway { + r.Options.Filter = filter + return r +} + +func (r Network_Gateway) Limit(limit int) Network_Gateway { + r.Options.Limit = &limit + return r +} + +func (r Network_Gateway) Offset(offset int) Network_Gateway { + r.Options.Offset = &offset + return r +} + +// Start the asynchronous process to bypass all VLANs. Any VLANs that are already bypassed will be ignored. The status field can be checked for progress. +func (r Network_Gateway) BypassAllVlans() (err error) { + var resp datatypes.Void + err = r.Session.DoRequest("SoftLayer_Network_Gateway", "bypassAllVlans", nil, &r.Options, &resp) + return +} + +// Start the asynchronous process to bypass the provided VLANs. The VLANs must already be attached. Any VLANs that are already bypassed will be ignored. The status field can be checked for progress. +func (r Network_Gateway) BypassVlans(vlans []datatypes.Network_Gateway_Vlan) (err error) { + var resp datatypes.Void + params := []interface{}{ + vlans, + } + err = r.Session.DoRequest("SoftLayer_Network_Gateway", "bypassVlans", params, &r.Options, &resp) + return +} + +// Create and return a new gateway. This object can be created with any number of members or VLANs, but they all must be in the same pod. By creating a gateway with members and/or VLANs attached, it is the equivalent of individually calling their createObject methods except this will start a single asynchronous process to setup the gateway. The status of this process can be checked using the status field. +func (r Network_Gateway) CreateObject(templateObject *datatypes.Network_Gateway) (resp datatypes.Network_Gateway, err error) { + params := []interface{}{ + templateObject, + } + err = r.Session.DoRequest("SoftLayer_Network_Gateway", "createObject", params, &r.Options, &resp) + return +} + +// Edit this gateway. Currently, the only value that can be edited is the name. +func (r Network_Gateway) EditObject(templateObject *datatypes.Network_Gateway) (resp bool, err error) { + params := []interface{}{ + templateObject, + } + err = r.Session.DoRequest("SoftLayer_Network_Gateway", "editObject", params, &r.Options, &resp) + return +} + +// Retrieve The account for this gateway. +func (r Network_Gateway) GetAccount() (resp datatypes.Account, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Gateway", "getAccount", nil, &r.Options, &resp) + return +} + +// Retrieve All VLANs trunked to this gateway. +func (r Network_Gateway) GetInsideVlans() (resp []datatypes.Network_Gateway_Vlan, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Gateway", "getInsideVlans", nil, &r.Options, &resp) + return +} + +// Retrieve The members for this gateway. +func (r Network_Gateway) GetMembers() (resp []datatypes.Network_Gateway_Member, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Gateway", "getMembers", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Network_Gateway) GetObject() (resp datatypes.Network_Gateway, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Gateway", "getObject", nil, &r.Options, &resp) + return +} + +// Get all VLANs that can become inside VLANs on this gateway. This means the VLAN must not already be an inside VLAN, on the same router as this gateway, not a gateway transit VLAN, and not firewalled. +func (r Network_Gateway) GetPossibleInsideVlans() (resp []datatypes.Network_Vlan, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Gateway", "getPossibleInsideVlans", nil, &r.Options, &resp) + return +} + +// Retrieve The private gateway IP address. +func (r Network_Gateway) GetPrivateIpAddress() (resp datatypes.Network_Subnet_IpAddress, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Gateway", "getPrivateIpAddress", nil, &r.Options, &resp) + return +} + +// Retrieve The private VLAN for accessing this gateway. +func (r Network_Gateway) GetPrivateVlan() (resp datatypes.Network_Vlan, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Gateway", "getPrivateVlan", nil, &r.Options, &resp) + return +} + +// Retrieve The public gateway IP address. +func (r Network_Gateway) GetPublicIpAddress() (resp datatypes.Network_Subnet_IpAddress, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Gateway", "getPublicIpAddress", nil, &r.Options, &resp) + return +} + +// Retrieve The public gateway IPv6 address. +func (r Network_Gateway) GetPublicIpv6Address() (resp datatypes.Network_Subnet_IpAddress, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Gateway", "getPublicIpv6Address", nil, &r.Options, &resp) + return +} + +// Retrieve The public VLAN for accessing this gateway. +func (r Network_Gateway) GetPublicVlan() (resp datatypes.Network_Vlan, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Gateway", "getPublicVlan", nil, &r.Options, &resp) + return +} + +// Retrieve The current status of the gateway. +func (r Network_Gateway) GetStatus() (resp datatypes.Network_Gateway_Status, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Gateway", "getStatus", nil, &r.Options, &resp) + return +} + +// Start the asynchronous process to unbypass all VLANs. Any VLANs that are already unbypassed will be ignored. The status field can be checked for progress. +func (r Network_Gateway) UnbypassAllVlans() (err error) { + var resp datatypes.Void + err = r.Session.DoRequest("SoftLayer_Network_Gateway", "unbypassAllVlans", nil, &r.Options, &resp) + return +} + +// Start the asynchronous process to unbypass the provided VLANs. The VLANs must already be attached. Any VLANs that are already unbypassed will be ignored. The status field can be checked for progress. +func (r Network_Gateway) UnbypassVlans(vlans []datatypes.Network_Gateway_Vlan) (err error) { + var resp datatypes.Void + params := []interface{}{ + vlans, + } + err = r.Session.DoRequest("SoftLayer_Network_Gateway", "unbypassVlans", params, &r.Options, &resp) + return +} + +// no documentation yet +type Network_Gateway_Member struct { + Session *session.Session + Options sl.Options +} + +// GetNetworkGatewayMemberService returns an instance of the Network_Gateway_Member SoftLayer service +func GetNetworkGatewayMemberService(sess *session.Session) Network_Gateway_Member { + return Network_Gateway_Member{Session: sess} +} + +func (r Network_Gateway_Member) Id(id int) Network_Gateway_Member { + r.Options.Id = &id + return r +} + +func (r Network_Gateway_Member) Mask(mask string) Network_Gateway_Member { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r Network_Gateway_Member) Filter(filter string) Network_Gateway_Member { + r.Options.Filter = filter + return r +} + +func (r Network_Gateway_Member) Limit(limit int) Network_Gateway_Member { + r.Options.Limit = &limit + return r +} + +func (r Network_Gateway_Member) Offset(offset int) Network_Gateway_Member { + r.Options.Offset = &offset + return r +} + +// Create a new hardware member on the gateway. This also asynchronously sets up the network for this member. Progress of this process can be monitored via the gateway status. All members created with this object must have no VLANs attached. +func (r Network_Gateway_Member) CreateObject(templateObject *datatypes.Network_Gateway_Member) (resp datatypes.Network_Gateway_Member, err error) { + params := []interface{}{ + templateObject, + } + err = r.Session.DoRequest("SoftLayer_Network_Gateway_Member", "createObject", params, &r.Options, &resp) + return +} + +// Create multiple new hardware members on the gateway. This also asynchronously sets up the network for the members. Progress of this process can be monitored via the gateway status. All members created with this object must have no VLANs attached. +func (r Network_Gateway_Member) CreateObjects(templateObjects []datatypes.Network_Gateway_Member) (resp []datatypes.Network_Gateway_Member, err error) { + params := []interface{}{ + templateObjects, + } + err = r.Session.DoRequest("SoftLayer_Network_Gateway_Member", "createObjects", params, &r.Options, &resp) + return +} + +// Retrieve The device for this member. +func (r Network_Gateway_Member) GetHardware() (resp datatypes.Hardware, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Gateway_Member", "getHardware", nil, &r.Options, &resp) + return +} + +// Retrieve The gateway this member belongs to. +func (r Network_Gateway_Member) GetNetworkGateway() (resp datatypes.Network_Gateway, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Gateway_Member", "getNetworkGateway", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Network_Gateway_Member) GetObject() (resp datatypes.Network_Gateway_Member, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Gateway_Member", "getObject", nil, &r.Options, &resp) + return +} + +// no documentation yet +type Network_Gateway_Status struct { + Session *session.Session + Options sl.Options +} + +// GetNetworkGatewayStatusService returns an instance of the Network_Gateway_Status SoftLayer service +func GetNetworkGatewayStatusService(sess *session.Session) Network_Gateway_Status { + return Network_Gateway_Status{Session: sess} +} + +func (r Network_Gateway_Status) Id(id int) Network_Gateway_Status { + r.Options.Id = &id + return r +} + +func (r Network_Gateway_Status) Mask(mask string) Network_Gateway_Status { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r Network_Gateway_Status) Filter(filter string) Network_Gateway_Status { + r.Options.Filter = filter + return r +} + +func (r Network_Gateway_Status) Limit(limit int) Network_Gateway_Status { + r.Options.Limit = &limit + return r +} + +func (r Network_Gateway_Status) Offset(offset int) Network_Gateway_Status { + r.Options.Offset = &offset + return r +} + +// no documentation yet +func (r Network_Gateway_Status) GetObject() (resp datatypes.Network_Gateway_Status, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Gateway_Status", "getObject", nil, &r.Options, &resp) + return +} + +// no documentation yet +type Network_Gateway_Vlan struct { + Session *session.Session + Options sl.Options +} + +// GetNetworkGatewayVlanService returns an instance of the Network_Gateway_Vlan SoftLayer service +func GetNetworkGatewayVlanService(sess *session.Session) Network_Gateway_Vlan { + return Network_Gateway_Vlan{Session: sess} +} + +func (r Network_Gateway_Vlan) Id(id int) Network_Gateway_Vlan { + r.Options.Id = &id + return r +} + +func (r Network_Gateway_Vlan) Mask(mask string) Network_Gateway_Vlan { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r Network_Gateway_Vlan) Filter(filter string) Network_Gateway_Vlan { + r.Options.Filter = filter + return r +} + +func (r Network_Gateway_Vlan) Limit(limit int) Network_Gateway_Vlan { + r.Options.Limit = &limit + return r +} + +func (r Network_Gateway_Vlan) Offset(offset int) Network_Gateway_Vlan { + r.Options.Offset = &offset + return r +} + +// Start the asynchronous process to bypass/unroute the VLAN from this gateway. +func (r Network_Gateway_Vlan) Bypass() (err error) { + var resp datatypes.Void + err = r.Session.DoRequest("SoftLayer_Network_Gateway_Vlan", "bypass", nil, &r.Options, &resp) + return +} + +// Create a new VLAN attachment. If the bypassFlag is false, this will also create an asynchronous process to route the VLAN through the gateway. +func (r Network_Gateway_Vlan) CreateObject(templateObject *datatypes.Network_Gateway_Vlan) (resp datatypes.Network_Gateway_Vlan, err error) { + params := []interface{}{ + templateObject, + } + err = r.Session.DoRequest("SoftLayer_Network_Gateway_Vlan", "createObject", params, &r.Options, &resp) + return +} + +// Create multiple new VLAN attachments. If the bypassFlag is false, this will also create an asynchronous process to route the VLANs through the gateway. +func (r Network_Gateway_Vlan) CreateObjects(templateObjects []datatypes.Network_Gateway_Vlan) (resp []datatypes.Network_Gateway_Vlan, err error) { + params := []interface{}{ + templateObjects, + } + err = r.Session.DoRequest("SoftLayer_Network_Gateway_Vlan", "createObjects", params, &r.Options, &resp) + return +} + +// Start the asynchronous process to detach this VLANs from the gateway. +func (r Network_Gateway_Vlan) DeleteObject() (err error) { + var resp datatypes.Void + err = r.Session.DoRequest("SoftLayer_Network_Gateway_Vlan", "deleteObject", nil, &r.Options, &resp) + return +} + +// Detach several VLANs. This will not detach them right away, but rather start an asynchronous process to detach. +func (r Network_Gateway_Vlan) DeleteObjects(templateObjects []datatypes.Network_Gateway_Vlan) (resp bool, err error) { + params := []interface{}{ + templateObjects, + } + err = r.Session.DoRequest("SoftLayer_Network_Gateway_Vlan", "deleteObjects", params, &r.Options, &resp) + return +} + +// Retrieve The gateway this VLAN is attached to. +func (r Network_Gateway_Vlan) GetNetworkGateway() (resp datatypes.Network_Gateway, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Gateway_Vlan", "getNetworkGateway", nil, &r.Options, &resp) + return +} + +// Retrieve The network VLAN record. +func (r Network_Gateway_Vlan) GetNetworkVlan() (resp datatypes.Network_Vlan, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Gateway_Vlan", "getNetworkVlan", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Network_Gateway_Vlan) GetObject() (resp datatypes.Network_Gateway_Vlan, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Gateway_Vlan", "getObject", nil, &r.Options, &resp) + return +} + +// Start the asynchronous process to route the VLAN to this gateway. +func (r Network_Gateway_Vlan) Unbypass() (err error) { + var resp datatypes.Void + err = r.Session.DoRequest("SoftLayer_Network_Gateway_Vlan", "unbypass", nil, &r.Options, &resp) + return +} + +// The SoftLayer_Network_LBaaS_Listener type presents a data structure for a load balancers listener, also called frontend. +type Network_LBaaS_Listener struct { + Session *session.Session + Options sl.Options +} + +// GetNetworkLBaaSListenerService returns an instance of the Network_LBaaS_Listener SoftLayer service +func GetNetworkLBaaSListenerService(sess *session.Session) Network_LBaaS_Listener { + return Network_LBaaS_Listener{Session: sess} +} + +func (r Network_LBaaS_Listener) Id(id int) Network_LBaaS_Listener { + r.Options.Id = &id + return r +} + +func (r Network_LBaaS_Listener) Mask(mask string) Network_LBaaS_Listener { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r Network_LBaaS_Listener) Filter(filter string) Network_LBaaS_Listener { + r.Options.Filter = filter + return r +} + +func (r Network_LBaaS_Listener) Limit(limit int) Network_LBaaS_Listener { + r.Options.Limit = &limit + return r +} + +func (r Network_LBaaS_Listener) Offset(offset int) Network_LBaaS_Listener { + r.Options.Offset = &offset + return r +} + +// Delete load balancers front- and backend protocols and return load balancer object with listeners (frontend), pools (backend), server instances (members) and datacenter populated. +func (r Network_LBaaS_Listener) DeleteLoadBalancerProtocols(loadBalancerUuid *string, listenerUuids []string) (resp datatypes.Network_LBaaS_LoadBalancer, err error) { + params := []interface{}{ + loadBalancerUuid, + listenerUuids, + } + err = r.Session.DoRequest("SoftLayer_Network_LBaaS_Listener", "deleteLoadBalancerProtocols", params, &r.Options, &resp) + return +} + +// Retrieve +func (r Network_LBaaS_Listener) GetDefaultPool() (resp datatypes.Network_LBaaS_Pool, err error) { + err = r.Session.DoRequest("SoftLayer_Network_LBaaS_Listener", "getDefaultPool", nil, &r.Options, &resp) + return +} + +// Retrieve +func (r Network_LBaaS_Listener) GetLoadBalancer() (resp datatypes.Network_LBaaS_LoadBalancer, err error) { + err = r.Session.DoRequest("SoftLayer_Network_LBaaS_Listener", "getLoadBalancer", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Network_LBaaS_Listener) GetObject() (resp datatypes.Network_LBaaS_Listener, err error) { + err = r.Session.DoRequest("SoftLayer_Network_LBaaS_Listener", "getObject", nil, &r.Options, &resp) + return +} + +// Update (create) load balancers front- and backend protocols and return load balancer object with listeners (frontend), pools (backend), server instances (members) and datacenter populated. Note if a protocolConfiguration has no listenerUuid set, this function will create the specified front- and backend accordingly. Otherwise the given front- and backend will be updated with the new protocol and port. +func (r Network_LBaaS_Listener) UpdateLoadBalancerProtocols(loadBalancerUuid *string, protocolConfigurations []datatypes.Network_LBaaS_LoadBalancerProtocolConfiguration) (resp datatypes.Network_LBaaS_LoadBalancer, err error) { + params := []interface{}{ + loadBalancerUuid, + protocolConfigurations, + } + err = r.Session.DoRequest("SoftLayer_Network_LBaaS_Listener", "updateLoadBalancerProtocols", params, &r.Options, &resp) + return +} + +// The SoftLayer_Network_LBaaS_LoadBalancer type presents a structure containing attributes of a load balancer, and its related objects including listeners, pools and members. +type Network_LBaaS_LoadBalancer struct { + Session *session.Session + Options sl.Options +} + +// GetNetworkLBaaSLoadBalancerService returns an instance of the Network_LBaaS_LoadBalancer SoftLayer service +func GetNetworkLBaaSLoadBalancerService(sess *session.Session) Network_LBaaS_LoadBalancer { + return Network_LBaaS_LoadBalancer{Session: sess} +} + +func (r Network_LBaaS_LoadBalancer) Id(id int) Network_LBaaS_LoadBalancer { + r.Options.Id = &id + return r +} + +func (r Network_LBaaS_LoadBalancer) Mask(mask string) Network_LBaaS_LoadBalancer { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r Network_LBaaS_LoadBalancer) Filter(filter string) Network_LBaaS_LoadBalancer { + r.Options.Filter = filter + return r +} + +func (r Network_LBaaS_LoadBalancer) Limit(limit int) Network_LBaaS_LoadBalancer { + r.Options.Limit = &limit + return r +} + +func (r Network_LBaaS_LoadBalancer) Offset(offset int) Network_LBaaS_LoadBalancer { + r.Options.Offset = &offset + return r +} + +// Cancel a load balancer with the given uuid. The billing system will execute the deletion of load balancer and all objects associated with it such as load balancer appliances, listeners, pools and members in the background. +func (r Network_LBaaS_LoadBalancer) CancelLoadBalancer(uuid *string) (resp bool, err error) { + params := []interface{}{ + uuid, + } + err = r.Session.DoRequest("SoftLayer_Network_LBaaS_LoadBalancer", "cancelLoadBalancer", params, &r.Options, &resp) + return +} + +// Return all existing load balancers +func (r Network_LBaaS_LoadBalancer) GetAllObjects() (resp []datatypes.Network_LBaaS_LoadBalancer, err error) { + err = r.Session.DoRequest("SoftLayer_Network_LBaaS_LoadBalancer", "getAllObjects", nil, &r.Options, &resp) + return +} + +// Retrieve Datacenter, where load balancer is located. +func (r Network_LBaaS_LoadBalancer) GetDatacenter() (resp datatypes.Location, err error) { + err = r.Session.DoRequest("SoftLayer_Network_LBaaS_LoadBalancer", "getDatacenter", nil, &r.Options, &resp) + return +} + +// Retrieve +func (r Network_LBaaS_LoadBalancer) GetIpAddress() (resp datatypes.Network_Subnet_IpAddress, err error) { + err = r.Session.DoRequest("SoftLayer_Network_LBaaS_LoadBalancer", "getIpAddress", nil, &r.Options, &resp) + return +} + +// Retrieve Listeners assigned to load balancer. +func (r Network_LBaaS_LoadBalancer) GetListeners() (resp []datatypes.Network_LBaaS_Listener, err error) { + err = r.Session.DoRequest("SoftLayer_Network_LBaaS_LoadBalancer", "getListeners", nil, &r.Options, &resp) + return +} + +// Get the load balancer object with given uuid. +func (r Network_LBaaS_LoadBalancer) GetLoadBalancer(uuid *string) (resp datatypes.Network_LBaaS_LoadBalancer, err error) { + params := []interface{}{ + uuid, + } + err = r.Session.DoRequest("SoftLayer_Network_LBaaS_LoadBalancer", "getLoadBalancer", params, &r.Options, &resp) + return +} + +// Return load balancer members health +func (r Network_LBaaS_LoadBalancer) GetLoadBalancerMemberHealth(uuid *string) (resp []datatypes.Network_LBaaS_PoolMembersHealth, err error) { + params := []interface{}{ + uuid, + } + err = r.Session.DoRequest("SoftLayer_Network_LBaaS_LoadBalancer", "getLoadBalancerMemberHealth", params, &r.Options, &resp) + return +} + +// Return load balancers statistics such as total number of current sessions and total number of accumulated connections. +func (r Network_LBaaS_LoadBalancer) GetLoadBalancerStatistics(uuid *string) (resp datatypes.Network_LBaaS_LoadBalancerStatistics, err error) { + params := []interface{}{ + uuid, + } + err = r.Session.DoRequest("SoftLayer_Network_LBaaS_LoadBalancer", "getLoadBalancerStatistics", params, &r.Options, &resp) + return +} + +// Retrieve Members assigned to load balancer. +func (r Network_LBaaS_LoadBalancer) GetMembers() (resp []datatypes.Network_LBaaS_Member, err error) { + err = r.Session.DoRequest("SoftLayer_Network_LBaaS_LoadBalancer", "getMembers", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Network_LBaaS_LoadBalancer) GetObject() (resp datatypes.Network_LBaaS_LoadBalancer, err error) { + err = r.Session.DoRequest("SoftLayer_Network_LBaaS_LoadBalancer", "getObject", nil, &r.Options, &resp) + return +} + +// Update load balancer's description, and return the load balancer object containing all listeners, pools, members and datacenter. +func (r Network_LBaaS_LoadBalancer) UpdateLoadBalancer(uuid *string, newDescription *string) (resp datatypes.Network_LBaaS_LoadBalancer, err error) { + params := []interface{}{ + uuid, + newDescription, + } + err = r.Session.DoRequest("SoftLayer_Network_LBaaS_LoadBalancer", "updateLoadBalancer", params, &r.Options, &resp) + return +} + +// The SoftLayer_Network_LBaaS_Member represents the backend member for a load balancer. It can be either a virtual server or a bare metal machine. +type Network_LBaaS_Member struct { + Session *session.Session + Options sl.Options +} + +// GetNetworkLBaaSMemberService returns an instance of the Network_LBaaS_Member SoftLayer service +func GetNetworkLBaaSMemberService(sess *session.Session) Network_LBaaS_Member { + return Network_LBaaS_Member{Session: sess} +} + +func (r Network_LBaaS_Member) Id(id int) Network_LBaaS_Member { + r.Options.Id = &id + return r +} + +func (r Network_LBaaS_Member) Mask(mask string) Network_LBaaS_Member { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r Network_LBaaS_Member) Filter(filter string) Network_LBaaS_Member { + r.Options.Filter = filter + return r +} + +func (r Network_LBaaS_Member) Limit(limit int) Network_LBaaS_Member { + r.Options.Limit = &limit + return r +} + +func (r Network_LBaaS_Member) Offset(offset int) Network_LBaaS_Member { + r.Options.Offset = &offset + return r +} + +// Add server instances as members to load balancer and return it with listeners, pools and members populated +func (r Network_LBaaS_Member) AddLoadBalancerMembers(loadBalancerUuid *string, serverInstances []datatypes.Network_LBaaS_LoadBalancerServerInstanceInfo) (resp datatypes.Network_LBaaS_LoadBalancer, err error) { + params := []interface{}{ + loadBalancerUuid, + serverInstances, + } + err = r.Session.DoRequest("SoftLayer_Network_LBaaS_Member", "addLoadBalancerMembers", params, &r.Options, &resp) + return +} + +// Delete given members from load balancer and return load balancer object with listeners, pools and members populated +func (r Network_LBaaS_Member) DeleteLoadBalancerMembers(loadBalancerUuid *string, memberUuids []string) (resp datatypes.Network_LBaaS_LoadBalancer, err error) { + params := []interface{}{ + loadBalancerUuid, + memberUuids, + } + err = r.Session.DoRequest("SoftLayer_Network_LBaaS_Member", "deleteLoadBalancerMembers", params, &r.Options, &resp) + return +} + +// Retrieve +func (r Network_LBaaS_Member) GetLoadBalancer() (resp datatypes.Network_LBaaS_LoadBalancer, err error) { + err = r.Session.DoRequest("SoftLayer_Network_LBaaS_Member", "getLoadBalancer", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Network_LBaaS_Member) GetObject() (resp datatypes.Network_LBaaS_Member, err error) { + err = r.Session.DoRequest("SoftLayer_Network_LBaaS_Member", "getObject", nil, &r.Options, &resp) + return +} + +// Update members weight and return load balancer object with listeners, pools and members populated +func (r Network_LBaaS_Member) UpdateLoadBalancerMembers(loadBalancerUuid *string, members []datatypes.Network_LBaaS_Member) (resp datatypes.Network_LBaaS_LoadBalancer, err error) { + params := []interface{}{ + loadBalancerUuid, + members, + } + err = r.Session.DoRequest("SoftLayer_Network_LBaaS_Member", "updateLoadBalancerMembers", params, &r.Options, &resp) + return +} + +// The SoftLayer_Network_LoadBalancer_Global_Account data type contains the properties for a single global load balancer account. The properties you are able to edit are fallbackIp, loadBalanceTypeId, and notes. The hosts relational property can be used for creating and editing hosts that belong to the global load balancer account. The [[SoftLayer_Network_LoadBalancer_Global_Account::editObject|editObject]] method contains details on creating and edited hosts through the hosts relational property. +type Network_LoadBalancer_Global_Account struct { + Session *session.Session + Options sl.Options +} + +// GetNetworkLoadBalancerGlobalAccountService returns an instance of the Network_LoadBalancer_Global_Account SoftLayer service +func GetNetworkLoadBalancerGlobalAccountService(sess *session.Session) Network_LoadBalancer_Global_Account { + return Network_LoadBalancer_Global_Account{Session: sess} +} + +func (r Network_LoadBalancer_Global_Account) Id(id int) Network_LoadBalancer_Global_Account { + r.Options.Id = &id + return r +} + +func (r Network_LoadBalancer_Global_Account) Mask(mask string) Network_LoadBalancer_Global_Account { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r Network_LoadBalancer_Global_Account) Filter(filter string) Network_LoadBalancer_Global_Account { + r.Options.Filter = filter + return r +} + +func (r Network_LoadBalancer_Global_Account) Limit(limit int) Network_LoadBalancer_Global_Account { + r.Options.Limit = &limit + return r +} + +func (r Network_LoadBalancer_Global_Account) Offset(offset int) Network_LoadBalancer_Global_Account { + r.Options.Offset = &offset + return r +} + +// If your globally load balanced domain is hosted on the SoftLayer nameservers this method will add the required NS resource record to your DNS zone file and remove any A records that match the host portion of a global load balancer account hostname. A NS resource record is required to be able to use your SoftLayer global load balancer account. Please make sure the zone file for the hostname listed on your SoftLayer global load balancer account is setup prior to using this method. If your globally load balanced domain is hosted on any other nameservers this method will not be able to add the required NS record. +func (r Network_LoadBalancer_Global_Account) AddNsRecord() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Network_LoadBalancer_Global_Account", "addNsRecord", nil, &r.Options, &resp) + return +} + +// Edit the properties of a global load balancer account by passing in a modified instance of the object. The global load balancer account properties you are able to edit are: fallback ip, load balance type id, and notes. Hosts that belong to your SoftLayer global load balancer account are created and modified through this method. An example templateObject that updates global load balancer account properties, updates the properties of a host, and adds a new host is shown below: +// +// +// * id: 2 +// * loadBalanceTypeId: 2 +// * notes: Notes updated +// * fallbackIp: 1.1.1.1 +// * hosts: +// ** id: 19 +// ** destinationIp: 2.2.2.2 +// ** weight: 25 +// ** healthCheck: http +// ** destinationPort: 80 +// ** enabled: 1

    +// ** destinationIp: 3.3.3.3 +// ** weight: 25 +// ** healthCheck: http +// ** destinationPort: 80 +// ** enabled: 1 +// +// +// +// +// The first section contains the properties of the global load balancer account that will be updated, while the second section contains the elements of the 'hosts' property of the global load balancer account. The first host listed will have its properties updated because the 'id' property of the host is set, meaning the global load balancer host with an id of 19 will be updated. The second host listed will be created because it lacks the 'id' property. +// +// There is a limit to the maximum number of hosts that you are allowed to add, and is defined by the allowedNumberOfHosts property on the global load balancer account. The destination IP address of a host must be an IP address that belongs to your SoftLayer Account, or a local load balancer virtual IP address that belongs to your account. The destination IP address and destination port are required and must be provided when creating a host. +func (r Network_LoadBalancer_Global_Account) EditObject(templateObject *datatypes.Network_LoadBalancer_Global_Account) (resp bool, err error) { + params := []interface{}{ + templateObject, + } + err = r.Session.DoRequest("SoftLayer_Network_LoadBalancer_Global_Account", "editObject", params, &r.Options, &resp) + return +} + +// Retrieve Your SoftLayer customer account. +func (r Network_LoadBalancer_Global_Account) GetAccount() (resp datatypes.Account, err error) { + err = r.Session.DoRequest("SoftLayer_Network_LoadBalancer_Global_Account", "getAccount", nil, &r.Options, &resp) + return +} + +// Retrieve The current billing item for a Global Load Balancer account. +func (r Network_LoadBalancer_Global_Account) GetBillingItem() (resp datatypes.Billing_Item, err error) { + err = r.Session.DoRequest("SoftLayer_Network_LoadBalancer_Global_Account", "getBillingItem", nil, &r.Options, &resp) + return +} + +// Retrieve The hosts in the load balancing pool for a global load balancer account. +func (r Network_LoadBalancer_Global_Account) GetHosts() (resp []datatypes.Network_LoadBalancer_Global_Host, err error) { + err = r.Session.DoRequest("SoftLayer_Network_LoadBalancer_Global_Account", "getHosts", nil, &r.Options, &resp) + return +} + +// Retrieve The load balance method of a global load balancer account +func (r Network_LoadBalancer_Global_Account) GetLoadBalanceType() (resp datatypes.Network_LoadBalancer_Global_Type, err error) { + err = r.Session.DoRequest("SoftLayer_Network_LoadBalancer_Global_Account", "getLoadBalanceType", nil, &r.Options, &resp) + return +} + +// Retrieve A flag indicating that the global load balancer is a managed resource. +func (r Network_LoadBalancer_Global_Account) GetManagedResourceFlag() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Network_LoadBalancer_Global_Account", "getManagedResourceFlag", nil, &r.Options, &resp) + return +} + +// getObject retrieves the SoftLayer_Network_LoadBalancer_Global_Account object whose ID number corresponds to the ID number of the init paramater passed to the SoftLayer_Network_LoadBalancer_Global_Account service. You can only retrieve a global load balancer account that is assigned to your SoftLayer customer account. +func (r Network_LoadBalancer_Global_Account) GetObject() (resp datatypes.Network_LoadBalancer_Global_Account, err error) { + err = r.Session.DoRequest("SoftLayer_Network_LoadBalancer_Global_Account", "getObject", nil, &r.Options, &resp) + return +} + +// If your globally load balanced domain is hosted on the SoftLayer nameservers this method will remove the NS resource record from your DNS zone file. Removing the NS resource record will basically disable your global load balancer account since no DNS requests will be forwarded to the global load balancers. Any A records that were removed when the NS resource record was added will not be created for you. If your globally load balanced domain is hosted on any other nameservers this method will not be able to remove the required NS record. +func (r Network_LoadBalancer_Global_Account) RemoveNsRecord() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Network_LoadBalancer_Global_Account", "removeNsRecord", nil, &r.Options, &resp) + return +} + +// The SoftLayer_Network_LoadBalancer_Global_Host data type represents a single host that belongs to a global load balancer account's load balancing pool. +// +// The destination IP address of a host must be one that belongs to your SoftLayer customer account, or to a datacenter load balancer virtual ip that belongs to your SoftLayer customer account. The destination IP address and port of a global load balancer host is a required field and must exist during creation and can not be removed. The acceptable values for the health check type are 'none', 'http', and 'tcp'. The status property is updated in 5 minute intervals and the hits property is updated in 10 minute intervals. +// +// The order of the host is only important if you are using the 'failover' load balance method, and the weight is only important if you are using the 'weighted round robin' load balance method. +type Network_LoadBalancer_Global_Host struct { + Session *session.Session + Options sl.Options +} + +// GetNetworkLoadBalancerGlobalHostService returns an instance of the Network_LoadBalancer_Global_Host SoftLayer service +func GetNetworkLoadBalancerGlobalHostService(sess *session.Session) Network_LoadBalancer_Global_Host { + return Network_LoadBalancer_Global_Host{Session: sess} +} + +func (r Network_LoadBalancer_Global_Host) Id(id int) Network_LoadBalancer_Global_Host { + r.Options.Id = &id + return r +} + +func (r Network_LoadBalancer_Global_Host) Mask(mask string) Network_LoadBalancer_Global_Host { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r Network_LoadBalancer_Global_Host) Filter(filter string) Network_LoadBalancer_Global_Host { + r.Options.Filter = filter + return r +} + +func (r Network_LoadBalancer_Global_Host) Limit(limit int) Network_LoadBalancer_Global_Host { + r.Options.Limit = &limit + return r +} + +func (r Network_LoadBalancer_Global_Host) Offset(offset int) Network_LoadBalancer_Global_Host { + r.Options.Offset = &offset + return r +} + +// Remove a host from the load balancing pool of a global load balancer account. +func (r Network_LoadBalancer_Global_Host) DeleteObject() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Network_LoadBalancer_Global_Host", "deleteObject", nil, &r.Options, &resp) + return +} + +// Retrieve The global load balancer account a host belongs to. +func (r Network_LoadBalancer_Global_Host) GetLoadBalancerAccount() (resp datatypes.Network_LoadBalancer_Global_Account, err error) { + err = r.Session.DoRequest("SoftLayer_Network_LoadBalancer_Global_Host", "getLoadBalancerAccount", nil, &r.Options, &resp) + return +} + +// getObject retrieves the SoftLayer_Network_LoadBalancer_Global_Host object whose ID number corresponds to the ID number of the init paramater passed to the SoftLayer_Network_LoadBalancer_Global_Host service. You can only retrieve a global load balancer host that is assigned to your SoftLayer global load balancer account. +func (r Network_LoadBalancer_Global_Host) GetObject() (resp datatypes.Network_LoadBalancer_Global_Host, err error) { + err = r.Session.DoRequest("SoftLayer_Network_LoadBalancer_Global_Host", "getObject", nil, &r.Options, &resp) + return +} + +// The SoftLayer_Network_LoadBalancer_Service data type contains all the information relating to a specific service (destination) on a particular load balancer. +// +// Information retained on the object itself is the the source and destination of the service, routing type, weight, and whether or not the service is currently enabled. +type Network_LoadBalancer_Service struct { + Session *session.Session + Options sl.Options +} + +// GetNetworkLoadBalancerServiceService returns an instance of the Network_LoadBalancer_Service SoftLayer service +func GetNetworkLoadBalancerServiceService(sess *session.Session) Network_LoadBalancer_Service { + return Network_LoadBalancer_Service{Session: sess} +} + +func (r Network_LoadBalancer_Service) Id(id int) Network_LoadBalancer_Service { + r.Options.Id = &id + return r +} + +func (r Network_LoadBalancer_Service) Mask(mask string) Network_LoadBalancer_Service { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r Network_LoadBalancer_Service) Filter(filter string) Network_LoadBalancer_Service { + r.Options.Filter = filter + return r +} + +func (r Network_LoadBalancer_Service) Limit(limit int) Network_LoadBalancer_Service { + r.Options.Limit = &limit + return r +} + +func (r Network_LoadBalancer_Service) Offset(offset int) Network_LoadBalancer_Service { + r.Options.Offset = &offset + return r +} + +// Calling deleteObject on a particular server will remove it from the load balancer. This is the only way to remove a service from your load balancer. If you wish to remove a server, first call this function, then reload the virtualIpAddress object and edit the remaining services to reflect the other changes that you wish to make. +func (r Network_LoadBalancer_Service) DeleteObject() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Network_LoadBalancer_Service", "deleteObject", nil, &r.Options, &resp) + return +} + +// Get the graph image for a load balancer service based on the supplied graph type and metric. The available graph types are: 'connections' and 'status', and the available metrics are: 'day', 'week' and 'month'. +// +// This method returns the raw binary image data. +func (r Network_LoadBalancer_Service) GetGraphImage(graphType *string, metric *string) (resp []byte, err error) { + params := []interface{}{ + graphType, + metric, + } + err = r.Session.DoRequest("SoftLayer_Network_LoadBalancer_Service", "getGraphImage", params, &r.Options, &resp) + return +} + +// getObject retrieves the SoftLayer_Network_LoadBalancer_Service object whose ID number corresponds to the ID number of the init parameter passed to the SoftLayer_Network_LoadBalancer_Service service. You can only retrieve services on load balancers assigned to your account, and it is recommended that you simply retrieve the entire load balancer, as an individual service has no explicit purpose without its "siblings". +func (r Network_LoadBalancer_Service) GetObject() (resp datatypes.Network_LoadBalancer_Service, err error) { + err = r.Session.DoRequest("SoftLayer_Network_LoadBalancer_Service", "getObject", nil, &r.Options, &resp) + return +} + +// Returns an array of SoftLayer_Container_Network_LoadBalancer_StatusEntry objects. A SoftLayer_Container_Network_LoadBalancer_StatusEntry object has two variables, "Label" and "Value" +// +// Calling this function executes a command on the physical load balancer itself, and therefore should be called infrequently. For a general idea of the load balancer service, use the "peakConnections" variable on the Type +// +// Possible values for "Label" are: +// +// +// * IP Address +// * Port +// * Server Status +// * Load Status +// * Current Connections +// * Total Hits +// +// +// Not all labels are guaranteed to be returned. +func (r Network_LoadBalancer_Service) GetStatus() (resp []datatypes.Container_Network_LoadBalancer_StatusEntry, err error) { + err = r.Session.DoRequest("SoftLayer_Network_LoadBalancer_Service", "getStatus", nil, &r.Options, &resp) + return +} + +// Retrieve The load balancer that this service belongs to. +func (r Network_LoadBalancer_Service) GetVip() (resp datatypes.Network_LoadBalancer_VirtualIpAddress, err error) { + err = r.Session.DoRequest("SoftLayer_Network_LoadBalancer_Service", "getVip", nil, &r.Options, &resp) + return +} + +// Calling resetPeakConnections will set the peakConnections variable to zero on this particular object. Peak connections will continue to increase normally after this method call, it will only temporarily reset the statistic to zero, until the next time it is polled. +func (r Network_LoadBalancer_Service) ResetPeakConnections() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Network_LoadBalancer_Service", "resetPeakConnections", nil, &r.Options, &resp) + return +} + +// The SoftLayer_Network_LoadBalancer_VirtualIpAddress data type contains all the information relating to a specific load balancer assigned to a customer account. +// +// Information retained on the object itself is the virtual IP address, load balancing method, and any notes that are related to the load balancer. There is also an array of SoftLayer_Network_LoadBalancer_Service objects, which represent the load balancer services, explained more fully in the SoftLayer_Network_LoadBalancer_Service documentation. +type Network_LoadBalancer_VirtualIpAddress struct { + Session *session.Session + Options sl.Options +} + +// GetNetworkLoadBalancerVirtualIpAddressService returns an instance of the Network_LoadBalancer_VirtualIpAddress SoftLayer service +func GetNetworkLoadBalancerVirtualIpAddressService(sess *session.Session) Network_LoadBalancer_VirtualIpAddress { + return Network_LoadBalancer_VirtualIpAddress{Session: sess} +} + +func (r Network_LoadBalancer_VirtualIpAddress) Id(id int) Network_LoadBalancer_VirtualIpAddress { + r.Options.Id = &id + return r +} + +func (r Network_LoadBalancer_VirtualIpAddress) Mask(mask string) Network_LoadBalancer_VirtualIpAddress { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r Network_LoadBalancer_VirtualIpAddress) Filter(filter string) Network_LoadBalancer_VirtualIpAddress { + r.Options.Filter = filter + return r +} + +func (r Network_LoadBalancer_VirtualIpAddress) Limit(limit int) Network_LoadBalancer_VirtualIpAddress { + r.Options.Limit = &limit + return r +} + +func (r Network_LoadBalancer_VirtualIpAddress) Offset(offset int) Network_LoadBalancer_VirtualIpAddress { + r.Options.Offset = &offset + return r +} + +// Disable a Virtual IP Address, removing it from load balancer rotation and denying all connections to that IP address. +func (r Network_LoadBalancer_VirtualIpAddress) Disable() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Network_LoadBalancer_VirtualIpAddress", "disable", nil, &r.Options, &resp) + return +} + +// Like any other API object, the load balancers can have their exposed properties edited by passing in a modified version of the object. The load balancer object also can modify its services in this way. Simply request the load balancer object you wish to edit, then modify the objects in the services array and pass the modified object to this function. WARNING: Services cannot be deleted in this manner, you must call deleteObject() on the service to physically remove them from the load balancer. +func (r Network_LoadBalancer_VirtualIpAddress) EditObject(templateObject *datatypes.Network_LoadBalancer_VirtualIpAddress) (resp bool, err error) { + params := []interface{}{ + templateObject, + } + err = r.Session.DoRequest("SoftLayer_Network_LoadBalancer_VirtualIpAddress", "editObject", params, &r.Options, &resp) + return +} + +// Enable a disabled Virtual IP Address, allowing connections back to the IP address. +func (r Network_LoadBalancer_VirtualIpAddress) Enable() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Network_LoadBalancer_VirtualIpAddress", "enable", nil, &r.Options, &resp) + return +} + +// Retrieve The account that owns this load balancer. +func (r Network_LoadBalancer_VirtualIpAddress) GetAccount() (resp datatypes.Account, err error) { + err = r.Session.DoRequest("SoftLayer_Network_LoadBalancer_VirtualIpAddress", "getAccount", nil, &r.Options, &resp) + return +} + +// Retrieve The current billing item for the Load Balancer. +func (r Network_LoadBalancer_VirtualIpAddress) GetBillingItem() (resp datatypes.Billing_Item, err error) { + err = r.Session.DoRequest("SoftLayer_Network_LoadBalancer_VirtualIpAddress", "getBillingItem", nil, &r.Options, &resp) + return +} + +// Retrieve If false, this VIP and associated services may be edited via the portal or the API. If true, you must configure this VIP manually on the device. +func (r Network_LoadBalancer_VirtualIpAddress) GetCustomerManagedFlag() (resp int, err error) { + err = r.Session.DoRequest("SoftLayer_Network_LoadBalancer_VirtualIpAddress", "getCustomerManagedFlag", nil, &r.Options, &resp) + return +} + +// Retrieve A flag indicating that the load balancer is a managed resource. +func (r Network_LoadBalancer_VirtualIpAddress) GetManagedResourceFlag() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Network_LoadBalancer_VirtualIpAddress", "getManagedResourceFlag", nil, &r.Options, &resp) + return +} + +// getObject retrieves the SoftLayer_Network_LoadBalancer_VirtualIpAddress object whose ID number corresponds to the ID number of the init parameter passed to the SoftLayer_Network_LoadBalancer_VirtualIpAddress service. You can only retrieve Load Balancers assigned to your account. +func (r Network_LoadBalancer_VirtualIpAddress) GetObject() (resp datatypes.Network_LoadBalancer_VirtualIpAddress, err error) { + err = r.Session.DoRequest("SoftLayer_Network_LoadBalancer_VirtualIpAddress", "getObject", nil, &r.Options, &resp) + return +} + +// Retrieve the services on this load balancer. +func (r Network_LoadBalancer_VirtualIpAddress) GetServices() (resp []datatypes.Network_LoadBalancer_Service, err error) { + err = r.Session.DoRequest("SoftLayer_Network_LoadBalancer_VirtualIpAddress", "getServices", nil, &r.Options, &resp) + return +} + +// Quickly remove all active external connections to a Virtual IP Address. +func (r Network_LoadBalancer_VirtualIpAddress) KickAllConnections() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Network_LoadBalancer_VirtualIpAddress", "kickAllConnections", nil, &r.Options, &resp) + return +} + +// Upgrades the connection limit on the VirtualIp and changes the billing item on your account to reflect the change. This function will only upgrade you to the next "level" of service. The next level follows this pattern Current Level => Next Level 50 100 100 200 200 500 500 1000 1000 1200 1200 1500 1500 2000 2000 2500 2500 3000 +func (r Network_LoadBalancer_VirtualIpAddress) UpgradeConnectionLimit() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Network_LoadBalancer_VirtualIpAddress", "upgradeConnectionLimit", nil, &r.Options, &resp) + return +} + +// The SoftLayer_Network_Media_Transcode_Account contains information regarding a transcode account. +type Network_Media_Transcode_Account struct { + Session *session.Session + Options sl.Options +} + +// GetNetworkMediaTranscodeAccountService returns an instance of the Network_Media_Transcode_Account SoftLayer service +func GetNetworkMediaTranscodeAccountService(sess *session.Session) Network_Media_Transcode_Account { + return Network_Media_Transcode_Account{Session: sess} +} + +func (r Network_Media_Transcode_Account) Id(id int) Network_Media_Transcode_Account { + r.Options.Id = &id + return r +} + +func (r Network_Media_Transcode_Account) Mask(mask string) Network_Media_Transcode_Account { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r Network_Media_Transcode_Account) Filter(filter string) Network_Media_Transcode_Account { + r.Options.Filter = filter + return r +} + +func (r Network_Media_Transcode_Account) Limit(limit int) Network_Media_Transcode_Account { + r.Options.Limit = &limit + return r +} + +func (r Network_Media_Transcode_Account) Offset(offset int) Network_Media_Transcode_Account { + r.Options.Offset = &offset + return r +} + +// With this method, you can create a transcode account. Individual SoftLayer account can have a single Transcode account. You have to pass your SoftLayer account id as a parameter. +func (r Network_Media_Transcode_Account) CreateTranscodeAccount() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Media_Transcode_Account", "createTranscodeAccount", nil, &r.Options, &resp) + return +} + +// '''Note'''. This method is obsolete. Please use the [[SoftLayer_Network_Media_Transcode_Job::createObject|createObject]] method on SoftLayer_Network_Media_Transcode_Job object instead. SoftLayer_Network_Media_Transcode_Job::createObject returns an object of a newly created Transcode Job. +// +// With this method, you can create a transcode job. +// +// The very first step of creating a transcode job is to upload your media files to the /in directory on your Transcode FTP space. Then, you have to pass a [[SoftLayer_Network_Media_Transcode_Job|Transcode job]] object as a parameter for this method. +// +// There are 4 required properties of SoftLayer_Network_Media_Transcode_Job object: transcodePresetName, transcodePresetGuid, inputFile, and outputFile. A transcode preset is a configuration that defines a certain media output. You can retrieve all the supported presets with the [[SoftLayer_Network_Media_Transcode_Account::getPresets|getPresets]] method. You can also use [[SoftLayer_Network_Media_Transcode_Account::getPresetDetail|getPresetDetail]] method to get more information on a preset. Use these two methods to determine appropriate values for "transcodePresetName" and "transcodePresetGuid" properties. For an "inputFile", you must specify a file that exists in the /in directory of your Transcode FTP space. An "outputFile" name will be used by the Transcode server for naming a transcoded file. An output file name must be in /out directory. If your outputFile name already exists in the /out directory, the Transcode server will append a file name with _n (an underscore and the total number of files with the identical name plus 1). +// +// The "name" property is optional and it can help you keep track of transcode jobs easily. "autoDeleteDuration" is another optional property that you can specify. It determines how soon your input file will be deleted. If autoDeleteDuration is set to zero, your input file will be removed immediately after the last transcode job running on it is completed. A value for autoDeleteDuration property is in seconds and the maximum value is 259200 which is 3 days. +// +// An example SoftLayer_Network_Media_Transcode_Job parameter looks like this: +// +// +// * name: My transcoding +// * transcodePresetName: F4V 896kbps 640x352 16x9 29.97fps +// * transcodePresetGuid: {87E01268-C3E3-4A85-9701-052C9AC42BD4} +// * inputFile: /in/my_birthday.wmv +// * outputFile: /out/my_birthday_flash +// +// +// Notice that an output file does not have a file extension. The Transcode server will append a file extension based on an output format. A newly created transcode job will be in "Pending" status and it will be added to the Transcoding queue. You will receive a notification email whenever there is a status change on your transcode job. For example, the Transcode server starts to process your transcode job, you will be notified via an email. +// +// You can add up to 3 pending jobs at a time. Transcode jobs with any other status such as "Complete" or "Error" will not be counted toward your pending jobs. +// +// Once a job is complete, the Transcode server will place the output file into the /out directory along with a notification email. The files in the /out directory will be removed 3 days after they were created. You will need to use an FTP client to download transcoded files. +// +// +func (r Network_Media_Transcode_Account) CreateTranscodeJob(newJob *datatypes.Network_Media_Transcode_Job) (resp bool, err error) { + params := []interface{}{ + newJob, + } + err = r.Session.DoRequest("SoftLayer_Network_Media_Transcode_Account", "createTranscodeJob", params, &r.Options, &resp) + return +} + +// Retrieve The SoftLayer account information +func (r Network_Media_Transcode_Account) GetAccount() (resp datatypes.Account, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Media_Transcode_Account", "getAccount", nil, &r.Options, &resp) + return +} + +// This method returns a collection of SoftLayer_Container_Network_Ftp_Directory objects. You can retrieve directory information for /in and /out directories. A [[SoftLayer_Container_Network_Directory_Listing|Directory Listing]] object contains a type (indicating whether it is a file or a directory), name and file count if it is a directory. +func (r Network_Media_Transcode_Account) GetDirectoryInformation(directoryName *string, extensionFilter *string) (resp []datatypes.Container_Network_Directory_Listing, err error) { + params := []interface{}{ + directoryName, + extensionFilter, + } + err = r.Session.DoRequest("SoftLayer_Network_Media_Transcode_Account", "getDirectoryInformation", params, &r.Options, &resp) + return +} + +// This method returns detailed information of a media file that resides in the Transcode FTP server. A [[SoftLayer_Container_Network_Media_Information|media information]] object contains media details such as file size, media format, frame rate, aspect ratio and so on. This information is merely for reference purposes. You should not rely on this data. Our library grabs small pieces of data from a media file to gather media details. This information may not be available for some files. +func (r Network_Media_Transcode_Account) GetFileDetail(source *string) (resp datatypes.Container_Network_Media_Information, err error) { + params := []interface{}{ + source, + } + err = r.Session.DoRequest("SoftLayer_Network_Media_Transcode_Account", "getFileDetail", params, &r.Options, &resp) + return +} + +// This method returns your Transcode FTP login credentials to the transcode.service.softlayer.com server. +// +// The Transcode FTP server is available via the SoftLayer private network. There is no API method that you can upload a file to Transcode server so you need to use an FTP client. You will have /in and /out directories on the Transcode FTP server. You will have read-write privileges for /in directory and read-only privilege for /out directory. All the files in both /in and /out directories will be deleted after 72 hours from the creation date. +func (r Network_Media_Transcode_Account) GetFtpAttributes() (resp datatypes.Container_Network_Authentication_Data, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Media_Transcode_Account", "getFtpAttributes", nil, &r.Options, &resp) + return +} + +// getObject method retrieves the SoftLayer_Network_Media_Transcode_Account object whose ID number corresponds to the ID number of the initial parameter passed to the SoftLayer_Network_Media_Transcode_Account service. You can only retrieve a Transcode account assigned to your SoftLayer customer account. +func (r Network_Media_Transcode_Account) GetObject() (resp datatypes.Network_Media_Transcode_Account, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Media_Transcode_Account", "getObject", nil, &r.Options, &resp) + return +} + +// This method returns an array of [[SoftLayer_Container_Network_Media_Transcode_Preset_Element|preset element]] objects. Each preset has its own collection of preset elements such as encoder, frame rate, aspect ratio and so on. Each element object has a default value for itself and an array of [[SoftLayer_Container_Network_Media_Transcode_Preset_Element_Option|element option]] objects. For example, "Frame Rate" element for "Windows Media 9 - Download - 1 Mbps - NTSC - Constrained VBR" preset has 19 element options. 15.0 frame rate is selected by default. Currently, you are not able to change the default value. Customizing these values may be possible in the future. +func (r Network_Media_Transcode_Account) GetPresetDetail(guid *string) (resp []datatypes.Container_Network_Media_Transcode_Preset_Element, err error) { + params := []interface{}{ + guid, + } + err = r.Session.DoRequest("SoftLayer_Network_Media_Transcode_Account", "getPresetDetail", params, &r.Options, &resp) + return +} + +// A transcode preset is a configuration that defines a certain media output. This method returns an array of transcoding preset objects supported by SoftLayer's Transcode server. Each [[SoftLayer_Container_Network_Media_Transcode_Preset|preset object]] contains a GUID property. You will need a GUID string when you create a new transcode job. +func (r Network_Media_Transcode_Account) GetPresets() (resp []datatypes.Container_Network_Media_Transcode_Preset, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Media_Transcode_Account", "getPresets", nil, &r.Options, &resp) + return +} + +// Retrieve Transcode jobs +func (r Network_Media_Transcode_Account) GetTranscodeJobs() (resp []datatypes.Network_Media_Transcode_Job, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Media_Transcode_Account", "getTranscodeJobs", nil, &r.Options, &resp) + return +} + +// The SoftLayer_Network_Media_Transcode_Job contains information regarding a transcode job such as input file, output format, user id and so on. +type Network_Media_Transcode_Job struct { + Session *session.Session + Options sl.Options +} + +// GetNetworkMediaTranscodeJobService returns an instance of the Network_Media_Transcode_Job SoftLayer service +func GetNetworkMediaTranscodeJobService(sess *session.Session) Network_Media_Transcode_Job { + return Network_Media_Transcode_Job{Session: sess} +} + +func (r Network_Media_Transcode_Job) Id(id int) Network_Media_Transcode_Job { + r.Options.Id = &id + return r +} + +func (r Network_Media_Transcode_Job) Mask(mask string) Network_Media_Transcode_Job { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r Network_Media_Transcode_Job) Filter(filter string) Network_Media_Transcode_Job { + r.Options.Filter = filter + return r +} + +func (r Network_Media_Transcode_Job) Limit(limit int) Network_Media_Transcode_Job { + r.Options.Limit = &limit + return r +} + +func (r Network_Media_Transcode_Job) Offset(offset int) Network_Media_Transcode_Job { + r.Options.Offset = &offset + return r +} + +// With this method, you can create a transcode job. +// +// The very first step of creating a transcode job is to upload your media files to the /in directory on your Transcode FTP space. Then, you have to pass a [[SoftLayer_Network_Media_Transcode_Job|Transcode job]] object as a parameter for this method. +// +// There are 4 required properties of SoftLayer_Network_Media_Transcode_Job object: transcodePresetName, transcodePresetGuid, inputFile, and outputFile. A transcode preset is a configuration that defines a certain media output. You can retrieve all the supported presets with the [[SoftLayer_Network_Media_Transcode_Account::getPresets|getPresets]] method. You can also use [[SoftLayer_Network_Media_Transcode_Account::getPresetDetail|getPresetDetail]] method to get more information on a preset. Use these two methods to determine appropriate values for "transcodePresetName" and "transcodePresetGuid" properties. For an "inputFile", you must specify a file that exists in the /in directory of your Transcode FTP space. An "outputFile" name will be used by the Transcode server for naming a transcoded file. An output file name must be in /out directory. If your outputFile name already exists in the /out directory, the Transcode server will append a file name with _n (an underscore and the total number of files with the identical name plus 1). +// +// The "name" property is optional and it can help you keep track of transcode jobs easily. "autoDeleteDuration" is another optional property that you can specify. It determines how soon your input file will be deleted. If autoDeleteDuration is set to zero, your input file will be removed immediately after the last transcode job running on it is completed. A value for autoDeleteDuration property is in seconds and the maximum value is 259200 which is 3 days. +// +// An example SoftLayer_Network_Media_Transcode_Job parameter looks like this: +// +// +// * name: My transcoding +// * transcodePresetName: F4V 896kbps 640x352 16x9 29.97fps +// * transcodePresetGuid: {87E01268-C3E3-4A85-9701-052C9AC42BD4} +// * inputFile: /in/my_birthday.wmv +// * outputFile: /out/my_birthday_flash +// +// +// Notice that an output file does not have a file extension. The Transcode server will append a file extension based on an output format. A newly created transcode job will be in "Pending" status and it will be added to the Transcoding queue. You will receive a notification email whenever there is a status change on your transcode job. For example, the Transcode server starts to process your transcode job, you will be notified via an email. +// +// You can add up to 3 pending jobs at a time. Transcode jobs with any other status such as "Complete" or "Error" will not be counted toward your pending jobs. +// +// Once a job is complete, the Transcode server will place the output file into the /out directory along with a notification email. The files in the /out directory will be removed 3 days after they were created. You will need to use an FTP client to download transcoded files. +// +// +func (r Network_Media_Transcode_Job) CreateObject(templateObject *datatypes.Network_Media_Transcode_Job) (resp datatypes.Network_Media_Transcode_Job, err error) { + params := []interface{}{ + templateObject, + } + err = r.Session.DoRequest("SoftLayer_Network_Media_Transcode_Job", "createObject", params, &r.Options, &resp) + return +} + +// Retrieve +func (r Network_Media_Transcode_Job) GetHistory() (resp []datatypes.Network_Media_Transcode_Job_History, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Media_Transcode_Job", "getHistory", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Network_Media_Transcode_Job) GetObject() (resp datatypes.Network_Media_Transcode_Job, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Media_Transcode_Job", "getObject", nil, &r.Options, &resp) + return +} + +// Retrieve The transcode service account +func (r Network_Media_Transcode_Job) GetTranscodeAccount() (resp datatypes.Network_Media_Transcode_Account, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Media_Transcode_Job", "getTranscodeAccount", nil, &r.Options, &resp) + return +} + +// Retrieve The status information of a transcode job +func (r Network_Media_Transcode_Job) GetTranscodeStatus() (resp datatypes.Network_Media_Transcode_Job_Status, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Media_Transcode_Job", "getTranscodeStatus", nil, &r.Options, &resp) + return +} + +// Retrieve The status of a transcode job +func (r Network_Media_Transcode_Job) GetTranscodeStatusName() (resp string, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Media_Transcode_Job", "getTranscodeStatusName", nil, &r.Options, &resp) + return +} + +// Retrieve The SoftLayer user that created the transcode job +func (r Network_Media_Transcode_Job) GetUser() (resp datatypes.User_Customer, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Media_Transcode_Job", "getUser", nil, &r.Options, &resp) + return +} + +// The SoftLayer_Network_Media_Transcode_Job_Status contains information on a transcode job status. +type Network_Media_Transcode_Job_Status struct { + Session *session.Session + Options sl.Options +} + +// GetNetworkMediaTranscodeJobStatusService returns an instance of the Network_Media_Transcode_Job_Status SoftLayer service +func GetNetworkMediaTranscodeJobStatusService(sess *session.Session) Network_Media_Transcode_Job_Status { + return Network_Media_Transcode_Job_Status{Session: sess} +} + +func (r Network_Media_Transcode_Job_Status) Id(id int) Network_Media_Transcode_Job_Status { + r.Options.Id = &id + return r +} + +func (r Network_Media_Transcode_Job_Status) Mask(mask string) Network_Media_Transcode_Job_Status { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r Network_Media_Transcode_Job_Status) Filter(filter string) Network_Media_Transcode_Job_Status { + r.Options.Filter = filter + return r +} + +func (r Network_Media_Transcode_Job_Status) Limit(limit int) Network_Media_Transcode_Job_Status { + r.Options.Limit = &limit + return r +} + +func (r Network_Media_Transcode_Job_Status) Offset(offset int) Network_Media_Transcode_Job_Status { + r.Options.Offset = &offset + return r +} + +// This method returns all transcode job statuses. +func (r Network_Media_Transcode_Job_Status) GetAllStatuses() (resp []datatypes.Network_Media_Transcode_Job_Status, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Media_Transcode_Job_Status", "getAllStatuses", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Network_Media_Transcode_Job_Status) GetObject() (resp datatypes.Network_Media_Transcode_Job_Status, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Media_Transcode_Job_Status", "getObject", nil, &r.Options, &resp) + return +} + +// no documentation yet +type Network_Message_Delivery struct { + Session *session.Session + Options sl.Options +} + +// GetNetworkMessageDeliveryService returns an instance of the Network_Message_Delivery SoftLayer service +func GetNetworkMessageDeliveryService(sess *session.Session) Network_Message_Delivery { + return Network_Message_Delivery{Session: sess} +} + +func (r Network_Message_Delivery) Id(id int) Network_Message_Delivery { + r.Options.Id = &id + return r +} + +func (r Network_Message_Delivery) Mask(mask string) Network_Message_Delivery { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r Network_Message_Delivery) Filter(filter string) Network_Message_Delivery { + r.Options.Filter = filter + return r +} + +func (r Network_Message_Delivery) Limit(limit int) Network_Message_Delivery { + r.Options.Limit = &limit + return r +} + +func (r Network_Message_Delivery) Offset(offset int) Network_Message_Delivery { + r.Options.Offset = &offset + return r +} + +// no documentation yet +func (r Network_Message_Delivery) EditObject(templateObject *datatypes.Network_Message_Delivery) (resp bool, err error) { + params := []interface{}{ + templateObject, + } + err = r.Session.DoRequest("SoftLayer_Network_Message_Delivery", "editObject", params, &r.Options, &resp) + return +} + +// Retrieve The SoftLayer customer account that a network message delivery account belongs to. +func (r Network_Message_Delivery) GetAccount() (resp datatypes.Account, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Message_Delivery", "getAccount", nil, &r.Options, &resp) + return +} + +// Retrieve The billing item for a network message delivery account. +func (r Network_Message_Delivery) GetBillingItem() (resp datatypes.Billing_Item, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Message_Delivery", "getBillingItem", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Network_Message_Delivery) GetObject() (resp datatypes.Network_Message_Delivery, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Message_Delivery", "getObject", nil, &r.Options, &resp) + return +} + +// Retrieve The message delivery type of a network message delivery account. +func (r Network_Message_Delivery) GetType() (resp datatypes.Network_Message_Delivery_Type, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Message_Delivery", "getType", nil, &r.Options, &resp) + return +} + +// Retrieve The vendor for a network message delivery account. +func (r Network_Message_Delivery) GetVendor() (resp datatypes.Network_Message_Delivery_Vendor, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Message_Delivery", "getVendor", nil, &r.Options, &resp) + return +} + +// no documentation yet +type Network_Message_Delivery_Email_Sendgrid struct { + Session *session.Session + Options sl.Options +} + +// GetNetworkMessageDeliveryEmailSendgridService returns an instance of the Network_Message_Delivery_Email_Sendgrid SoftLayer service +func GetNetworkMessageDeliveryEmailSendgridService(sess *session.Session) Network_Message_Delivery_Email_Sendgrid { + return Network_Message_Delivery_Email_Sendgrid{Session: sess} +} + +func (r Network_Message_Delivery_Email_Sendgrid) Id(id int) Network_Message_Delivery_Email_Sendgrid { + r.Options.Id = &id + return r +} + +func (r Network_Message_Delivery_Email_Sendgrid) Mask(mask string) Network_Message_Delivery_Email_Sendgrid { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r Network_Message_Delivery_Email_Sendgrid) Filter(filter string) Network_Message_Delivery_Email_Sendgrid { + r.Options.Filter = filter + return r +} + +func (r Network_Message_Delivery_Email_Sendgrid) Limit(limit int) Network_Message_Delivery_Email_Sendgrid { + r.Options.Limit = &limit + return r +} + +func (r Network_Message_Delivery_Email_Sendgrid) Offset(offset int) Network_Message_Delivery_Email_Sendgrid { + r.Options.Offset = &offset + return r +} + +// no documentation yet +func (r Network_Message_Delivery_Email_Sendgrid) AddUnsubscribeEmailAddress(emailAddress *string) (resp bool, err error) { + params := []interface{}{ + emailAddress, + } + err = r.Session.DoRequest("SoftLayer_Network_Message_Delivery_Email_Sendgrid", "addUnsubscribeEmailAddress", params, &r.Options, &resp) + return +} + +// no documentation yet +func (r Network_Message_Delivery_Email_Sendgrid) DeleteEmailListEntries(list *string, entries []string) (resp bool, err error) { + params := []interface{}{ + list, + entries, + } + err = r.Session.DoRequest("SoftLayer_Network_Message_Delivery_Email_Sendgrid", "deleteEmailListEntries", params, &r.Options, &resp) + return +} + +// no documentation yet +func (r Network_Message_Delivery_Email_Sendgrid) DisableSmtpAccess() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Message_Delivery_Email_Sendgrid", "disableSmtpAccess", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Network_Message_Delivery_Email_Sendgrid) EditObject(templateObject *datatypes.Network_Message_Delivery) (resp bool, err error) { + params := []interface{}{ + templateObject, + } + err = r.Session.DoRequest("SoftLayer_Network_Message_Delivery_Email_Sendgrid", "editObject", params, &r.Options, &resp) + return +} + +// no documentation yet +func (r Network_Message_Delivery_Email_Sendgrid) EnableSmtpAccess() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Message_Delivery_Email_Sendgrid", "enableSmtpAccess", nil, &r.Options, &resp) + return +} + +// Retrieve The SoftLayer customer account that a network message delivery account belongs to. +func (r Network_Message_Delivery_Email_Sendgrid) GetAccount() (resp datatypes.Account, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Message_Delivery_Email_Sendgrid", "getAccount", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Network_Message_Delivery_Email_Sendgrid) GetAccountOverview() (resp datatypes.Container_Network_Message_Delivery_Email_Sendgrid_Account_Overview, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Message_Delivery_Email_Sendgrid", "getAccountOverview", nil, &r.Options, &resp) + return +} + +// Retrieve The billing item for a network message delivery account. +func (r Network_Message_Delivery_Email_Sendgrid) GetBillingItem() (resp datatypes.Billing_Item, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Message_Delivery_Email_Sendgrid", "getBillingItem", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Network_Message_Delivery_Email_Sendgrid) GetCategoryList() (resp []string, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Message_Delivery_Email_Sendgrid", "getCategoryList", nil, &r.Options, &resp) + return +} + +// Retrieve The contact e-mail address used by SendGrid. +func (r Network_Message_Delivery_Email_Sendgrid) GetEmailAddress() (resp string, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Message_Delivery_Email_Sendgrid", "getEmailAddress", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Network_Message_Delivery_Email_Sendgrid) GetEmailList(list *string) (resp []datatypes.Container_Network_Message_Delivery_Email_Sendgrid_List_Entry, err error) { + params := []interface{}{ + list, + } + err = r.Session.DoRequest("SoftLayer_Network_Message_Delivery_Email_Sendgrid", "getEmailList", params, &r.Options, &resp) + return +} + +// no documentation yet +func (r Network_Message_Delivery_Email_Sendgrid) GetObject() (resp datatypes.Network_Message_Delivery_Email_Sendgrid, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Message_Delivery_Email_Sendgrid", "getObject", nil, &r.Options, &resp) + return +} + +// Retrieve A flag that determines if a SendGrid e-mail delivery account has access to send mail through the SendGrid SMTP server. +func (r Network_Message_Delivery_Email_Sendgrid) GetSmtpAccess() (resp string, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Message_Delivery_Email_Sendgrid", "getSmtpAccess", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Network_Message_Delivery_Email_Sendgrid) GetStatistics(options *datatypes.Container_Network_Message_Delivery_Email_Sendgrid_Statistics_Options) (resp []datatypes.Container_Network_Message_Delivery_Email_Sendgrid_Statistics, err error) { + params := []interface{}{ + options, + } + err = r.Session.DoRequest("SoftLayer_Network_Message_Delivery_Email_Sendgrid", "getStatistics", params, &r.Options, &resp) + return +} + +// no documentation yet +func (r Network_Message_Delivery_Email_Sendgrid) GetStatisticsGraph(options *datatypes.Container_Network_Message_Delivery_Email_Sendgrid_Statistics_Options) (resp datatypes.Container_Network_Message_Delivery_Email_Sendgrid_Statistics_Graph, err error) { + params := []interface{}{ + options, + } + err = r.Session.DoRequest("SoftLayer_Network_Message_Delivery_Email_Sendgrid", "getStatisticsGraph", params, &r.Options, &resp) + return +} + +// Retrieve The message delivery type of a network message delivery account. +func (r Network_Message_Delivery_Email_Sendgrid) GetType() (resp datatypes.Network_Message_Delivery_Type, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Message_Delivery_Email_Sendgrid", "getType", nil, &r.Options, &resp) + return +} + +// Retrieve The vendor for a network message delivery account. +func (r Network_Message_Delivery_Email_Sendgrid) GetVendor() (resp datatypes.Network_Message_Delivery_Vendor, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Message_Delivery_Email_Sendgrid", "getVendor", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Network_Message_Delivery_Email_Sendgrid) GetVendorPortalUrl() (resp string, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Message_Delivery_Email_Sendgrid", "getVendorPortalUrl", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Network_Message_Delivery_Email_Sendgrid) SendEmail(emailContainer *datatypes.Container_Network_Message_Delivery_Email) (resp bool, err error) { + params := []interface{}{ + emailContainer, + } + err = r.Session.DoRequest("SoftLayer_Network_Message_Delivery_Email_Sendgrid", "sendEmail", params, &r.Options, &resp) + return +} + +// no documentation yet +func (r Network_Message_Delivery_Email_Sendgrid) UpdateEmailAddress(emailAddress *string) (resp bool, err error) { + params := []interface{}{ + emailAddress, + } + err = r.Session.DoRequest("SoftLayer_Network_Message_Delivery_Email_Sendgrid", "updateEmailAddress", params, &r.Options, &resp) + return +} + +// The SoftLayer_Network_Message_Queue data type contains general information relating to Message Queue account +type Network_Message_Queue struct { + Session *session.Session + Options sl.Options +} + +// GetNetworkMessageQueueService returns an instance of the Network_Message_Queue SoftLayer service +func GetNetworkMessageQueueService(sess *session.Session) Network_Message_Queue { + return Network_Message_Queue{Session: sess} +} + +func (r Network_Message_Queue) Id(id int) Network_Message_Queue { + r.Options.Id = &id + return r +} + +func (r Network_Message_Queue) Mask(mask string) Network_Message_Queue { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r Network_Message_Queue) Filter(filter string) Network_Message_Queue { + r.Options.Filter = filter + return r +} + +func (r Network_Message_Queue) Limit(limit int) Network_Message_Queue { + r.Options.Limit = &limit + return r +} + +func (r Network_Message_Queue) Offset(offset int) Network_Message_Queue { + r.Options.Offset = &offset + return r +} + +// Retrieve The account that a message queue belongs to. +func (r Network_Message_Queue) GetAccount() (resp datatypes.Account, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Message_Queue", "getAccount", nil, &r.Options, &resp) + return +} + +// Retrieve The current billing item for this message queue account. +func (r Network_Message_Queue) GetBillingItem() (resp datatypes.Billing_Item, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Message_Queue", "getBillingItem", nil, &r.Options, &resp) + return +} + +// Retrieve All available message queue nodes +func (r Network_Message_Queue) GetNodes() (resp []datatypes.Network_Message_Queue_Node, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Message_Queue", "getNodes", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Network_Message_Queue) GetObject() (resp datatypes.Network_Message_Queue, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Message_Queue", "getObject", nil, &r.Options, &resp) + return +} + +// Retrieve A message queue account status. +func (r Network_Message_Queue) GetStatus() (resp datatypes.Network_Message_Queue_Status, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Message_Queue", "getStatus", nil, &r.Options, &resp) + return +} + +// The SoftLayer_Network_Message_Queue_Node data type contains general information relating to Message Queue node +type Network_Message_Queue_Node struct { + Session *session.Session + Options sl.Options +} + +// GetNetworkMessageQueueNodeService returns an instance of the Network_Message_Queue_Node SoftLayer service +func GetNetworkMessageQueueNodeService(sess *session.Session) Network_Message_Queue_Node { + return Network_Message_Queue_Node{Session: sess} +} + +func (r Network_Message_Queue_Node) Id(id int) Network_Message_Queue_Node { + r.Options.Id = &id + return r +} + +func (r Network_Message_Queue_Node) Mask(mask string) Network_Message_Queue_Node { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r Network_Message_Queue_Node) Filter(filter string) Network_Message_Queue_Node { + r.Options.Filter = filter + return r +} + +func (r Network_Message_Queue_Node) Limit(limit int) Network_Message_Queue_Node { + r.Options.Limit = &limit + return r +} + +func (r Network_Message_Queue_Node) Offset(offset int) Network_Message_Queue_Node { + r.Options.Offset = &offset + return r +} + +// no documentation yet +func (r Network_Message_Queue_Node) AddUser(username *string) (resp bool, err error) { + params := []interface{}{ + username, + } + err = r.Session.DoRequest("SoftLayer_Network_Message_Queue_Node", "addUser", params, &r.Options, &resp) + return +} + +// no documentation yet +func (r Network_Message_Queue_Node) DeleteUser(username *string) (resp bool, err error) { + params := []interface{}{ + username, + } + err = r.Session.DoRequest("SoftLayer_Network_Message_Queue_Node", "deleteUser", params, &r.Options, &resp) + return +} + +// no documentation yet +func (r Network_Message_Queue_Node) GetAllUsers() (resp []string, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Message_Queue_Node", "getAllUsers", nil, &r.Options, &resp) + return +} + +// Retrieve The message queue account this node belongs to. +func (r Network_Message_Queue_Node) GetMessageQueue() (resp datatypes.Network_Message_Queue, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Message_Queue_Node", "getMessageQueue", nil, &r.Options, &resp) + return +} + +// Retrieve A message queue node's metric tracking object. This object records all request and notification count data for this message queue node. +func (r Network_Message_Queue_Node) GetMetricTrackingObject() (resp datatypes.Metric_Tracking_Object, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Message_Queue_Node", "getMetricTrackingObject", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Network_Message_Queue_Node) GetObject() (resp datatypes.Network_Message_Queue_Node, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Message_Queue_Node", "getObject", nil, &r.Options, &resp) + return +} + +// Retrieve +func (r Network_Message_Queue_Node) GetServiceResource() (resp datatypes.Network_Service_Resource, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Message_Queue_Node", "getServiceResource", nil, &r.Options, &resp) + return +} + +// Retrieve usage graph by date. +func (r Network_Message_Queue_Node) GetUsage(startDate *datatypes.Time, endDate *datatypes.Time) (resp []datatypes.Metric_Tracking_Object_Data, err error) { + params := []interface{}{ + startDate, + endDate, + } + err = r.Session.DoRequest("SoftLayer_Network_Message_Queue_Node", "getUsage", params, &r.Options, &resp) + return +} + +// Retrieve usage graph by date. +func (r Network_Message_Queue_Node) GetUsageGraph(graphData *datatypes.Container_Graph) (resp datatypes.Container_Graph, err error) { + params := []interface{}{ + graphData, + } + err = r.Session.DoRequest("SoftLayer_Network_Message_Queue_Node", "getUsageGraph", params, &r.Options, &resp) + return +} + +// The SoftLayer_Network_Message_Queue_Status data type contains general information relating to Message Queue account status. +type Network_Message_Queue_Status struct { + Session *session.Session + Options sl.Options +} + +// GetNetworkMessageQueueStatusService returns an instance of the Network_Message_Queue_Status SoftLayer service +func GetNetworkMessageQueueStatusService(sess *session.Session) Network_Message_Queue_Status { + return Network_Message_Queue_Status{Session: sess} +} + +func (r Network_Message_Queue_Status) Id(id int) Network_Message_Queue_Status { + r.Options.Id = &id + return r +} + +func (r Network_Message_Queue_Status) Mask(mask string) Network_Message_Queue_Status { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r Network_Message_Queue_Status) Filter(filter string) Network_Message_Queue_Status { + r.Options.Filter = filter + return r +} + +func (r Network_Message_Queue_Status) Limit(limit int) Network_Message_Queue_Status { + r.Options.Limit = &limit + return r +} + +func (r Network_Message_Queue_Status) Offset(offset int) Network_Message_Queue_Status { + r.Options.Offset = &offset + return r +} + +// no documentation yet +func (r Network_Message_Queue_Status) GetObject() (resp datatypes.Network_Message_Queue_Status, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Message_Queue_Status", "getObject", nil, &r.Options, &resp) + return +} + +// no documentation yet +type Network_Monitor struct { + Session *session.Session + Options sl.Options +} + +// GetNetworkMonitorService returns an instance of the Network_Monitor SoftLayer service +func GetNetworkMonitorService(sess *session.Session) Network_Monitor { + return Network_Monitor{Session: sess} +} + +func (r Network_Monitor) Id(id int) Network_Monitor { + r.Options.Id = &id + return r +} + +func (r Network_Monitor) Mask(mask string) Network_Monitor { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r Network_Monitor) Filter(filter string) Network_Monitor { + r.Options.Filter = filter + return r +} + +func (r Network_Monitor) Limit(limit int) Network_Monitor { + r.Options.Limit = &limit + return r +} + +func (r Network_Monitor) Offset(offset int) Network_Monitor { + r.Options.Offset = &offset + return r +} + +// This will return an arrayObject of objects containing the ipaddresses. Using an string parameter you can send a partial ipaddress to search within a given ipaddress. You can also set the max limit as well using the setting the resultLimit. +func (r Network_Monitor) GetIpAddressesByHardware(hardware *datatypes.Hardware, partialIpAddress *string) (resp []datatypes.Network_Subnet_IpAddress, err error) { + params := []interface{}{ + hardware, + partialIpAddress, + } + err = r.Session.DoRequest("SoftLayer_Network_Monitor", "getIpAddressesByHardware", params, &r.Options, &resp) + return +} + +// This will return an arrayObject of objects containing the ipaddresses. Using an string parameter you can send a partial ipaddress to search within a given ipaddress. You can also set the max limit as well using the setting the resultLimit. +func (r Network_Monitor) GetIpAddressesByVirtualGuest(guest *datatypes.Virtual_Guest, partialIpAddress *string) (resp []datatypes.Network_Subnet_IpAddress, err error) { + params := []interface{}{ + guest, + partialIpAddress, + } + err = r.Session.DoRequest("SoftLayer_Network_Monitor", "getIpAddressesByVirtualGuest", params, &r.Options, &resp) + return +} + +// The Monitoring_Query_Host type represents a monitoring instance. It consists of a hardware ID to monitor, an IP address attached to that hardware ID, a method of monitoring, and what to do in the instance that the monitor ever fails. +type Network_Monitor_Version1_Query_Host struct { + Session *session.Session + Options sl.Options +} + +// GetNetworkMonitorVersion1QueryHostService returns an instance of the Network_Monitor_Version1_Query_Host SoftLayer service +func GetNetworkMonitorVersion1QueryHostService(sess *session.Session) Network_Monitor_Version1_Query_Host { + return Network_Monitor_Version1_Query_Host{Session: sess} +} + +func (r Network_Monitor_Version1_Query_Host) Id(id int) Network_Monitor_Version1_Query_Host { + r.Options.Id = &id + return r +} + +func (r Network_Monitor_Version1_Query_Host) Mask(mask string) Network_Monitor_Version1_Query_Host { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r Network_Monitor_Version1_Query_Host) Filter(filter string) Network_Monitor_Version1_Query_Host { + r.Options.Filter = filter + return r +} + +func (r Network_Monitor_Version1_Query_Host) Limit(limit int) Network_Monitor_Version1_Query_Host { + r.Options.Limit = &limit + return r +} + +func (r Network_Monitor_Version1_Query_Host) Offset(offset int) Network_Monitor_Version1_Query_Host { + r.Options.Offset = &offset + return r +} + +// Passing in an unsaved instances of a Query_Host object into this function will create the object and return the results to the user. +func (r Network_Monitor_Version1_Query_Host) CreateObject(templateObject *datatypes.Network_Monitor_Version1_Query_Host) (resp datatypes.Network_Monitor_Version1_Query_Host, err error) { + params := []interface{}{ + templateObject, + } + err = r.Session.DoRequest("SoftLayer_Network_Monitor_Version1_Query_Host", "createObject", params, &r.Options, &resp) + return +} + +// Passing in a collection of unsaved instances of Query_Host objects into this function will create all objects and return the results to the user. +func (r Network_Monitor_Version1_Query_Host) CreateObjects(templateObjects []datatypes.Network_Monitor_Version1_Query_Host) (resp []datatypes.Network_Monitor_Version1_Query_Host, err error) { + params := []interface{}{ + templateObjects, + } + err = r.Session.DoRequest("SoftLayer_Network_Monitor_Version1_Query_Host", "createObjects", params, &r.Options, &resp) + return +} + +// Like any other API object, the monitoring objects can be deleted by passing an instance of them into this function. The ID on the object must be set. +func (r Network_Monitor_Version1_Query_Host) DeleteObject() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Monitor_Version1_Query_Host", "deleteObject", nil, &r.Options, &resp) + return +} + +// Like any other API object, the monitoring objects can be deleted by passing an instance of them into this function. The ID on the object must be set. +func (r Network_Monitor_Version1_Query_Host) DeleteObjects(templateObjects []datatypes.Network_Monitor_Version1_Query_Host) (resp bool, err error) { + params := []interface{}{ + templateObjects, + } + err = r.Session.DoRequest("SoftLayer_Network_Monitor_Version1_Query_Host", "deleteObjects", params, &r.Options, &resp) + return +} + +// Like any other API object, the monitoring objects can have their exposed properties edited by passing in a modified version of the object. +func (r Network_Monitor_Version1_Query_Host) EditObject(templateObject *datatypes.Network_Monitor_Version1_Query_Host) (resp bool, err error) { + params := []interface{}{ + templateObject, + } + err = r.Session.DoRequest("SoftLayer_Network_Monitor_Version1_Query_Host", "editObject", params, &r.Options, &resp) + return +} + +// Like any other API object, the monitoring objects can have their exposed properties edited by passing in a modified version of the object. +func (r Network_Monitor_Version1_Query_Host) EditObjects(templateObjects []datatypes.Network_Monitor_Version1_Query_Host) (resp bool, err error) { + params := []interface{}{ + templateObjects, + } + err = r.Session.DoRequest("SoftLayer_Network_Monitor_Version1_Query_Host", "editObjects", params, &r.Options, &resp) + return +} + +// This method returns all Query_Host objects associated with the passed in hardware ID as long as that hardware ID is owned by the current user's account. +// +// This behavior can also be accomplished by simply tapping networkMonitors on the Hardware_Server object. +func (r Network_Monitor_Version1_Query_Host) FindByHardwareId(hardwareId *int) (resp []datatypes.Network_Monitor_Version1_Query_Host, err error) { + params := []interface{}{ + hardwareId, + } + err = r.Session.DoRequest("SoftLayer_Network_Monitor_Version1_Query_Host", "findByHardwareId", params, &r.Options, &resp) + return +} + +// Retrieve The hardware that is being monitored by this monitoring instance +func (r Network_Monitor_Version1_Query_Host) GetHardware() (resp datatypes.Hardware, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Monitor_Version1_Query_Host", "getHardware", nil, &r.Options, &resp) + return +} + +// Retrieve The most recent result for this particular monitoring instance. +func (r Network_Monitor_Version1_Query_Host) GetLastResult() (resp datatypes.Network_Monitor_Version1_Query_Result, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Monitor_Version1_Query_Host", "getLastResult", nil, &r.Options, &resp) + return +} + +// getObject retrieves the SoftLayer_Network_Monitor_Version1_Query_Host object whose ID number corresponds to the ID number of the init parameter passed to the SoftLayer_Network_Monitor_Version1_Query_Host service. You can only retrieve query hosts attached to hardware that belong to your account. +func (r Network_Monitor_Version1_Query_Host) GetObject() (resp datatypes.Network_Monitor_Version1_Query_Host, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Monitor_Version1_Query_Host", "getObject", nil, &r.Options, &resp) + return +} + +// Retrieve The type of monitoring query that is executed when this hardware is monitored. +func (r Network_Monitor_Version1_Query_Host) GetQueryType() (resp datatypes.Network_Monitor_Version1_Query_Type, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Monitor_Version1_Query_Host", "getQueryType", nil, &r.Options, &resp) + return +} + +// Retrieve The action taken when a monitor fails. +func (r Network_Monitor_Version1_Query_Host) GetResponseAction() (resp datatypes.Network_Monitor_Version1_Query_ResponseType, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Monitor_Version1_Query_Host", "getResponseAction", nil, &r.Options, &resp) + return +} + +// The monitoring stratum type stores the maximum level of the various components of the monitoring system that a particular hardware object has access to. This object cannot be accessed by ID, and cannot be modified. The user can access this object through Hardware_Server->availableMonitoring. +// +// There are two values on this object that are important: +// # monitorLevel determines the highest level of SoftLayer_Network_Monitor_Version1_Query_Type object that can be placed in a monitoring instance on this server +// # responseLevel determines the highest level of SoftLayer_Network_Monitor_Version1_Query_ResponseType object that can be placed in a monitoring instance on this server +// +// +// Also note that the query type and response types are available through getAllQueryTypes and getAllResponseTypes, respectively. +type Network_Monitor_Version1_Query_Host_Stratum struct { + Session *session.Session + Options sl.Options +} + +// GetNetworkMonitorVersion1QueryHostStratumService returns an instance of the Network_Monitor_Version1_Query_Host_Stratum SoftLayer service +func GetNetworkMonitorVersion1QueryHostStratumService(sess *session.Session) Network_Monitor_Version1_Query_Host_Stratum { + return Network_Monitor_Version1_Query_Host_Stratum{Session: sess} +} + +func (r Network_Monitor_Version1_Query_Host_Stratum) Id(id int) Network_Monitor_Version1_Query_Host_Stratum { + r.Options.Id = &id + return r +} + +func (r Network_Monitor_Version1_Query_Host_Stratum) Mask(mask string) Network_Monitor_Version1_Query_Host_Stratum { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r Network_Monitor_Version1_Query_Host_Stratum) Filter(filter string) Network_Monitor_Version1_Query_Host_Stratum { + r.Options.Filter = filter + return r +} + +func (r Network_Monitor_Version1_Query_Host_Stratum) Limit(limit int) Network_Monitor_Version1_Query_Host_Stratum { + r.Options.Limit = &limit + return r +} + +func (r Network_Monitor_Version1_Query_Host_Stratum) Offset(offset int) Network_Monitor_Version1_Query_Host_Stratum { + r.Options.Offset = &offset + return r +} + +// Calling this function returns all possible query type objects. These objects are to be used to set the values on the SoftLayer_Network_Monitor_Version1_Query_Host when creating new monitoring instances. +func (r Network_Monitor_Version1_Query_Host_Stratum) GetAllQueryTypes() (resp []datatypes.Network_Monitor_Version1_Query_Type, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Monitor_Version1_Query_Host_Stratum", "getAllQueryTypes", nil, &r.Options, &resp) + return +} + +// Calling this function returns all possible response type objects. These objects are to be used to set the values on the SoftLayer_Network_Monitor_Version1_Query_Host when creating new monitoring instances. +func (r Network_Monitor_Version1_Query_Host_Stratum) GetAllResponseTypes() (resp []datatypes.Network_Monitor_Version1_Query_ResponseType, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Monitor_Version1_Query_Host_Stratum", "getAllResponseTypes", nil, &r.Options, &resp) + return +} + +// Retrieve The hardware object that these monitoring permissions applies to. +func (r Network_Monitor_Version1_Query_Host_Stratum) GetHardware() (resp datatypes.Hardware, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Monitor_Version1_Query_Host_Stratum", "getHardware", nil, &r.Options, &resp) + return +} + +// getObject retrieves the SoftLayer_Network_Monitor_Version1_Query_Host_Stratum object whose ID number corresponds to the ID number of the init parameter passed to the SoftLayer_Network_Monitor_Version1_Query_Host_Stratum service. You can only retrieve strata attached to hardware that belong to your account. +func (r Network_Monitor_Version1_Query_Host_Stratum) GetObject() (resp datatypes.Network_Monitor_Version1_Query_Host_Stratum, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Monitor_Version1_Query_Host_Stratum", "getObject", nil, &r.Options, &resp) + return +} + +// SoftLayer_Network_Pod refers to a portion of a data center that share a Backend Customer Router (BCR) and usually a front-end counterpart known as a Frontend Customer Router (FCR). A Pod primarily denotes a logical location within the network and the physical aspects that support networks. This is in contrast to representing a specific physical location. +// +// A ``Pod`` is identified by a ``name``, which is unique. A Pod name follows the format 'dddnn.podii', where 'ddd' is a data center code, 'nn' is the data center number, 'pod' is a literal string and 'ii' is a two digit, left-zero- padded number which corresponds to a Backend Customer Router (BCR) of the desired data center. Examples: +// * dal09.pod01 = Dallas 9, Pod 1 (ie. bcr01) +// * sjc01.pod04 = San Jose 1, Pod 4 (ie. bcr04) +// * ams01.pod01 = Amsterdam 1, Pod 1 (ie. bcr01) +type Network_Pod struct { + Session *session.Session + Options sl.Options +} + +// GetNetworkPodService returns an instance of the Network_Pod SoftLayer service +func GetNetworkPodService(sess *session.Session) Network_Pod { + return Network_Pod{Session: sess} +} + +func (r Network_Pod) Id(id int) Network_Pod { + r.Options.Id = &id + return r +} + +func (r Network_Pod) Mask(mask string) Network_Pod { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r Network_Pod) Filter(filter string) Network_Pod { + r.Options.Filter = filter + return r +} + +func (r Network_Pod) Limit(limit int) Network_Pod { + r.Options.Limit = &limit + return r +} + +func (r Network_Pod) Offset(offset int) Network_Pod { + r.Options.Offset = &offset + return r +} + +// no documentation yet +func (r Network_Pod) GetAllObjects() (resp []datatypes.Network_Pod, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Pod", "getAllObjects", nil, &r.Options, &resp) + return +} + +// Provides the list of capabilities a Pod fulfills. See [[SoftLayer_Network_Pod/listCapabilities]] for more information on capabilities. +func (r Network_Pod) GetCapabilities() (resp []string, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Pod", "getCapabilities", nil, &r.Options, &resp) + return +} + +// Set the initialization parameter to the ``name`` of the Pod to retrieve. +func (r Network_Pod) GetObject() (resp datatypes.Network_Pod, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Pod", "getObject", nil, &r.Options, &resp) + return +} + +// A capability is simply a string literal that denotes the availability of a feature. Capabilities are generally self describing, but any additional details concerning the implications of a capability will be documented elsewhere; usually by the Service or Operation related to it. +func (r Network_Pod) ListCapabilities() (resp []string, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Pod", "listCapabilities", nil, &r.Options, &resp) + return +} + +// +// This is a Beta release of the Security Group feature. The use of this feature is restricted to select +// users. When the Beta period is over, security groups will be available for all users. Contact sgbeta@us.ibm.com +// using 'Security Groups' in the subject line with any questions. +// +// +// The SoftLayer_Network_SecurityGroup data type contains general information for a single security group. +// Security groups contain a set of [[SoftLayer_Network_SecurityGroup_Rule (type)|rules]] that handle traffic +// to virtual guest instances and a set of +// [[SoftLayer_Virtual_Network_SecurityGroup_NetworkComponentBinding (type)|bindings]] to associate virtual guest +// network components with the security group. +type Network_SecurityGroup struct { + Session *session.Session + Options sl.Options +} + +// GetNetworkSecurityGroupService returns an instance of the Network_SecurityGroup SoftLayer service +func GetNetworkSecurityGroupService(sess *session.Session) Network_SecurityGroup { + return Network_SecurityGroup{Session: sess} +} + +func (r Network_SecurityGroup) Id(id int) Network_SecurityGroup { + r.Options.Id = &id + return r +} + +func (r Network_SecurityGroup) Mask(mask string) Network_SecurityGroup { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r Network_SecurityGroup) Filter(filter string) Network_SecurityGroup { + r.Options.Filter = filter + return r +} + +func (r Network_SecurityGroup) Limit(limit int) Network_SecurityGroup { + r.Options.Limit = &limit + return r +} + +func (r Network_SecurityGroup) Offset(offset int) Network_SecurityGroup { + r.Options.Offset = &offset + return r +} + +// Add new rules to a security group by sending in an array of template [[SoftLayer_Network_SecurityGroup_Rule (type)]] objects to be created. +func (r Network_SecurityGroup) AddRules(ruleTemplates []datatypes.Network_SecurityGroup_Rule) (resp bool, err error) { + params := []interface{}{ + ruleTemplates, + } + err = r.Session.DoRequest("SoftLayer_Network_SecurityGroup", "addRules", params, &r.Options, &resp) + return +} + +// Attach virtual guest network components to a security group by creating [[SoftLayer_Virtual_Network_SecurityGroup_NetworkComponentBinding (type)]] objects. +func (r Network_SecurityGroup) AttachNetworkComponents(networkComponentIds []int) (resp bool, err error) { + params := []interface{}{ + networkComponentIds, + } + err = r.Session.DoRequest("SoftLayer_Network_SecurityGroup", "attachNetworkComponents", params, &r.Options, &resp) + return +} + +// Create new security groups +func (r Network_SecurityGroup) CreateObjects(templateObjects []datatypes.Network_SecurityGroup) (resp []datatypes.Network_SecurityGroup, err error) { + params := []interface{}{ + templateObjects, + } + err = r.Session.DoRequest("SoftLayer_Network_SecurityGroup", "createObjects", params, &r.Options, &resp) + return +} + +// Delete security groups for an account. A security group cannot be deleted if any network components are attached or if the security group is a remote security group for a [[SoftLayer_Network_SecurityGroup_Rule (type)|rule]]. +func (r Network_SecurityGroup) DeleteObjects(templateObjects []datatypes.Network_SecurityGroup) (resp bool, err error) { + params := []interface{}{ + templateObjects, + } + err = r.Session.DoRequest("SoftLayer_Network_SecurityGroup", "deleteObjects", params, &r.Options, &resp) + return +} + +// Detach virtual guest network components from a security group by deleting its [[SoftLayer_Virtual_Network_SecurityGroup_NetworkComponentBinding (type)]] +func (r Network_SecurityGroup) DetachNetworkComponents(networkComponentIds []int) (resp bool, err error) { + params := []interface{}{ + networkComponentIds, + } + err = r.Session.DoRequest("SoftLayer_Network_SecurityGroup", "detachNetworkComponents", params, &r.Options, &resp) + return +} + +// Edit security groups +func (r Network_SecurityGroup) EditObjects(templateObjects []datatypes.Network_SecurityGroup) (resp bool, err error) { + params := []interface{}{ + templateObjects, + } + err = r.Session.DoRequest("SoftLayer_Network_SecurityGroup", "editObjects", params, &r.Options, &resp) + return +} + +// Edit rules that belong to the security group. An array of skeleton [SoftLayer_Network_SecurityGroup_Rule]] objects must be sent in with only the properties defined that you want to change. Unchanged properties are left alone. +func (r Network_SecurityGroup) EditRules(rules []datatypes.Network_SecurityGroup_Rule) (resp bool, err error) { + params := []interface{}{ + rules, + } + err = r.Session.DoRequest("SoftLayer_Network_SecurityGroup", "editRules", params, &r.Options, &resp) + return +} + +// Retrieve The account for this security group +func (r Network_SecurityGroup) GetAccount() (resp datatypes.Account, err error) { + err = r.Session.DoRequest("SoftLayer_Network_SecurityGroup", "getAccount", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Network_SecurityGroup) GetAllObjects() (resp []datatypes.Network_SecurityGroup, err error) { + err = r.Session.DoRequest("SoftLayer_Network_SecurityGroup", "getAllObjects", nil, &r.Options, &resp) + return +} + +// Retrieve The network component bindings for this security group +func (r Network_SecurityGroup) GetNetworkComponentBindings() (resp []datatypes.Virtual_Network_SecurityGroup_NetworkComponentBinding, err error) { + err = r.Session.DoRequest("SoftLayer_Network_SecurityGroup", "getNetworkComponentBindings", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Network_SecurityGroup) GetObject() (resp datatypes.Network_SecurityGroup, err error) { + err = r.Session.DoRequest("SoftLayer_Network_SecurityGroup", "getObject", nil, &r.Options, &resp) + return +} + +// Retrieve The rules for this security group +func (r Network_SecurityGroup) GetRules() (resp []datatypes.Network_SecurityGroup_Rule, err error) { + err = r.Session.DoRequest("SoftLayer_Network_SecurityGroup", "getRules", nil, &r.Options, &resp) + return +} + +// Remove rules from a security group +func (r Network_SecurityGroup) RemoveRules(ruleIds []int) (resp bool, err error) { + params := []interface{}{ + ruleIds, + } + err = r.Session.DoRequest("SoftLayer_Network_SecurityGroup", "removeRules", params, &r.Options, &resp) + return +} + +// The SoftLayer_Network_Security_Scanner_Request data type represents a single vulnerability scan request. It provides information on when the scan was created, last updated, and the current status. The status messages are as follows: +// *Scan Pending +// *Scan Processing +// *Scan Complete +// *Scan Cancelled +// *Generating Report. +type Network_Security_Scanner_Request struct { + Session *session.Session + Options sl.Options +} + +// GetNetworkSecurityScannerRequestService returns an instance of the Network_Security_Scanner_Request SoftLayer service +func GetNetworkSecurityScannerRequestService(sess *session.Session) Network_Security_Scanner_Request { + return Network_Security_Scanner_Request{Session: sess} +} + +func (r Network_Security_Scanner_Request) Id(id int) Network_Security_Scanner_Request { + r.Options.Id = &id + return r +} + +func (r Network_Security_Scanner_Request) Mask(mask string) Network_Security_Scanner_Request { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r Network_Security_Scanner_Request) Filter(filter string) Network_Security_Scanner_Request { + r.Options.Filter = filter + return r +} + +func (r Network_Security_Scanner_Request) Limit(limit int) Network_Security_Scanner_Request { + r.Options.Limit = &limit + return r +} + +func (r Network_Security_Scanner_Request) Offset(offset int) Network_Security_Scanner_Request { + r.Options.Offset = &offset + return r +} + +// Create a new vulnerability scan request. New scan requests are picked up every five minutes, and the time to complete an actual scan may vary. Once the scan is finished, it can take up to another five minutes for the report to be generated and accessible. +func (r Network_Security_Scanner_Request) CreateObject(templateObject *datatypes.Network_Security_Scanner_Request) (resp datatypes.Network_Security_Scanner_Request, err error) { + params := []interface{}{ + templateObject, + } + err = r.Session.DoRequest("SoftLayer_Network_Security_Scanner_Request", "createObject", params, &r.Options, &resp) + return +} + +// Retrieve The account associated with a security scan request. +func (r Network_Security_Scanner_Request) GetAccount() (resp datatypes.Account, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Security_Scanner_Request", "getAccount", nil, &r.Options, &resp) + return +} + +// Retrieve The virtual guest a security scan is run against. +func (r Network_Security_Scanner_Request) GetGuest() (resp datatypes.Virtual_Guest, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Security_Scanner_Request", "getGuest", nil, &r.Options, &resp) + return +} + +// Retrieve The hardware a security scan is run against. +func (r Network_Security_Scanner_Request) GetHardware() (resp datatypes.Hardware, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Security_Scanner_Request", "getHardware", nil, &r.Options, &resp) + return +} + +// getObject retrieves the SoftLayer_Network_Security_Scanner_Request object whose ID number corresponds to the ID number of the init parameter passed to the SoftLayer_Network_Security_Scanner_Request service. You can only retrieve requests and reports that are assigned to your SoftLayer account. +func (r Network_Security_Scanner_Request) GetObject() (resp datatypes.Network_Security_Scanner_Request, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Security_Scanner_Request", "getObject", nil, &r.Options, &resp) + return +} + +// Get the vulnerability report for a scan request, formatted as HTML string. Previous scan reports are held indefinitely. +func (r Network_Security_Scanner_Request) GetReport() (resp string, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Security_Scanner_Request", "getReport", nil, &r.Options, &resp) + return +} + +// Retrieve Flag whether the requestor owns the hardware the scan was run on. This flag will return for hardware servers only, virtual servers will result in a null return even if you have a request out for them. +func (r Network_Security_Scanner_Request) GetRequestorOwnedFlag() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Security_Scanner_Request", "getRequestorOwnedFlag", nil, &r.Options, &resp) + return +} + +// Retrieve A security scan request's status. +func (r Network_Security_Scanner_Request) GetStatus() (resp datatypes.Network_Security_Scanner_Request_Status, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Security_Scanner_Request", "getStatus", nil, &r.Options, &resp) + return +} + +// The SoftLayer_Network_Service_Vpn_Overrides data type contains information relating user ids to subnet ids when VPN access is manually configured. It is essentially an entry in a 'white list' of subnets a SoftLayer portal VPN user may access. +type Network_Service_Vpn_Overrides struct { + Session *session.Session + Options sl.Options +} + +// GetNetworkServiceVpnOverridesService returns an instance of the Network_Service_Vpn_Overrides SoftLayer service +func GetNetworkServiceVpnOverridesService(sess *session.Session) Network_Service_Vpn_Overrides { + return Network_Service_Vpn_Overrides{Session: sess} +} + +func (r Network_Service_Vpn_Overrides) Id(id int) Network_Service_Vpn_Overrides { + r.Options.Id = &id + return r +} + +func (r Network_Service_Vpn_Overrides) Mask(mask string) Network_Service_Vpn_Overrides { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r Network_Service_Vpn_Overrides) Filter(filter string) Network_Service_Vpn_Overrides { + r.Options.Filter = filter + return r +} + +func (r Network_Service_Vpn_Overrides) Limit(limit int) Network_Service_Vpn_Overrides { + r.Options.Limit = &limit + return r +} + +func (r Network_Service_Vpn_Overrides) Offset(offset int) Network_Service_Vpn_Overrides { + r.Options.Offset = &offset + return r +} + +// Create Softlayer portal user VPN overrides. +func (r Network_Service_Vpn_Overrides) CreateObjects(templateObjects []datatypes.Network_Service_Vpn_Overrides) (resp bool, err error) { + params := []interface{}{ + templateObjects, + } + err = r.Session.DoRequest("SoftLayer_Network_Service_Vpn_Overrides", "createObjects", params, &r.Options, &resp) + return +} + +// Use this method to delete a single SoftLayer portal VPN user subnet override. +func (r Network_Service_Vpn_Overrides) DeleteObject() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Service_Vpn_Overrides", "deleteObject", nil, &r.Options, &resp) + return +} + +// Use this method to delete a collection of SoftLayer portal VPN user subnet overrides. +func (r Network_Service_Vpn_Overrides) DeleteObjects(templateObjects []datatypes.Network_Service_Vpn_Overrides) (resp bool, err error) { + params := []interface{}{ + templateObjects, + } + err = r.Session.DoRequest("SoftLayer_Network_Service_Vpn_Overrides", "deleteObjects", params, &r.Options, &resp) + return +} + +// no documentation yet +func (r Network_Service_Vpn_Overrides) GetObject() (resp datatypes.Network_Service_Vpn_Overrides, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Service_Vpn_Overrides", "getObject", nil, &r.Options, &resp) + return +} + +// Retrieve Subnet components accessible by a SoftLayer VPN portal user. +func (r Network_Service_Vpn_Overrides) GetSubnet() (resp datatypes.Network_Subnet, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Service_Vpn_Overrides", "getSubnet", nil, &r.Options, &resp) + return +} + +// Retrieve SoftLayer VPN portal user. +func (r Network_Service_Vpn_Overrides) GetUser() (resp datatypes.User_Customer, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Service_Vpn_Overrides", "getUser", nil, &r.Options, &resp) + return +} + +// The SoftLayer_Network_Storage data type contains general information regarding a Storage product such as account id, access username and password, the Storage product type, and the server the Storage service is associated with. Currently, only EVault backup storage has an associated server. +type Network_Storage struct { + Session *session.Session + Options sl.Options +} + +// GetNetworkStorageService returns an instance of the Network_Storage SoftLayer service +func GetNetworkStorageService(sess *session.Session) Network_Storage { + return Network_Storage{Session: sess} +} + +func (r Network_Storage) Id(id int) Network_Storage { + r.Options.Id = &id + return r +} + +func (r Network_Storage) Mask(mask string) Network_Storage { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r Network_Storage) Filter(filter string) Network_Storage { + r.Options.Filter = filter + return r +} + +func (r Network_Storage) Limit(limit int) Network_Storage { + r.Options.Limit = &limit + return r +} + +func (r Network_Storage) Offset(offset int) Network_Storage { + r.Options.Offset = &offset + return r +} + +// This method is used to modify the access control list for this Storage volume. The SoftLayer_Hardware objects which have been allowed access to this storage will be listed in the allowedHardware property of this storage volume. +func (r Network_Storage) AllowAccessFromHardware(hardwareObjectTemplate *datatypes.Hardware) (resp bool, err error) { + params := []interface{}{ + hardwareObjectTemplate, + } + err = r.Session.DoRequest("SoftLayer_Network_Storage", "allowAccessFromHardware", params, &r.Options, &resp) + return +} + +// no documentation yet +func (r Network_Storage) AllowAccessFromHardwareList(hardwareObjectTemplates []datatypes.Hardware) (resp bool, err error) { + params := []interface{}{ + hardwareObjectTemplates, + } + err = r.Session.DoRequest("SoftLayer_Network_Storage", "allowAccessFromHardwareList", params, &r.Options, &resp) + return +} + +// This method is used to modify the access control list for this Storage volume. The [[SoftLayer_Hardware|SoftLayer_Virtual_Guest|SoftLayer_Network_Subnet|SoftLayer_Network_Subnet_IpAddress]] objects which have been allowed access to this storage will be listed in the [[allowedHardware|allowedVirtualGuests|allowedSubnets|allowedIpAddresses]] property of this storage volume. +func (r Network_Storage) AllowAccessFromHost(typeClassName *string, hostId *int) (resp datatypes.Network_Storage_Allowed_Host, err error) { + params := []interface{}{ + typeClassName, + hostId, + } + err = r.Session.DoRequest("SoftLayer_Network_Storage", "allowAccessFromHost", params, &r.Options, &resp) + return +} + +// This method is used to modify the access control list for this Storage volume. The [[SoftLayer_Hardware|SoftLayer_Virtual_Guest|SoftLayer_Network_Subnet|SoftLayer_Network_Subnet_IpAddress]] objects which have been allowed access to this storage volume will be listed in the [[allowedHardware|allowedVirtualGuests|allowedSubnets|allowedIpAddresses]] property of this storage volume. +func (r Network_Storage) AllowAccessFromHostList(hostObjectTemplates []datatypes.Container_Network_Storage_Host) (resp []datatypes.Network_Storage_Allowed_Host, err error) { + params := []interface{}{ + hostObjectTemplates, + } + err = r.Session.DoRequest("SoftLayer_Network_Storage", "allowAccessFromHostList", params, &r.Options, &resp) + return +} + +// This method is used to modify the access control list for this Storage volume. The SoftLayer_Network_Subnet_IpAddress objects which have been allowed access to this storage will be listed in the allowedIpAddresses property of this storage volume. +func (r Network_Storage) AllowAccessFromIpAddress(ipAddressObjectTemplate *datatypes.Network_Subnet_IpAddress) (resp bool, err error) { + params := []interface{}{ + ipAddressObjectTemplate, + } + err = r.Session.DoRequest("SoftLayer_Network_Storage", "allowAccessFromIpAddress", params, &r.Options, &resp) + return +} + +// no documentation yet +func (r Network_Storage) AllowAccessFromIpAddressList(ipAddressObjectTemplates []datatypes.Network_Subnet_IpAddress) (resp bool, err error) { + params := []interface{}{ + ipAddressObjectTemplates, + } + err = r.Session.DoRequest("SoftLayer_Network_Storage", "allowAccessFromIpAddressList", params, &r.Options, &resp) + return +} + +// This method is used to modify the access control list for this Storage volume. The SoftLayer_Network_Subnet objects which have been allowed access to this storage will be listed in the allowedHardware property of this storage volume. +func (r Network_Storage) AllowAccessFromSubnet(subnetObjectTemplate *datatypes.Network_Subnet) (resp bool, err error) { + params := []interface{}{ + subnetObjectTemplate, + } + err = r.Session.DoRequest("SoftLayer_Network_Storage", "allowAccessFromSubnet", params, &r.Options, &resp) + return +} + +// no documentation yet +func (r Network_Storage) AllowAccessFromSubnetList(subnetObjectTemplates []datatypes.Network_Subnet) (resp bool, err error) { + params := []interface{}{ + subnetObjectTemplates, + } + err = r.Session.DoRequest("SoftLayer_Network_Storage", "allowAccessFromSubnetList", params, &r.Options, &resp) + return +} + +// This method is used to modify the access control list for this Storage volume. The SoftLayer_Virtual_Guest objects which have been allowed access to this storage will be listed in the allowedVirtualGuests property of this storage volume. +func (r Network_Storage) AllowAccessFromVirtualGuest(virtualGuestObjectTemplate *datatypes.Virtual_Guest) (resp bool, err error) { + params := []interface{}{ + virtualGuestObjectTemplate, + } + err = r.Session.DoRequest("SoftLayer_Network_Storage", "allowAccessFromVirtualGuest", params, &r.Options, &resp) + return +} + +// This method is used to modify the access control list for this Storage volume. The SoftLayer_Virtual_Guest objects which have been allowed access to this storage will be listed in the allowedVirtualGuests property of this storage volume. +func (r Network_Storage) AllowAccessFromVirtualGuestList(virtualGuestObjectTemplates []datatypes.Virtual_Guest) (resp bool, err error) { + params := []interface{}{ + virtualGuestObjectTemplates, + } + err = r.Session.DoRequest("SoftLayer_Network_Storage", "allowAccessFromVirtualGuestList", params, &r.Options, &resp) + return +} + +// This method is used to modify the access control list for this Storage replicant volume. The SoftLayer_Hardware objects which have been allowed access to this storage will be listed in the allowedHardware property of this storage replicant volume. +func (r Network_Storage) AllowAccessToReplicantFromHardware(hardwareObjectTemplate *datatypes.Hardware) (resp bool, err error) { + params := []interface{}{ + hardwareObjectTemplate, + } + err = r.Session.DoRequest("SoftLayer_Network_Storage", "allowAccessToReplicantFromHardware", params, &r.Options, &resp) + return +} + +// This method is used to modify the access control list for this Storage volume's replica. The SoftLayer_Hardware objects which have been allowed access to this storage volume's replica will be listed in the allowedReplicationHardware property of this storage volume. +func (r Network_Storage) AllowAccessToReplicantFromHardwareList(hardwareObjectTemplates []datatypes.Hardware) (resp bool, err error) { + params := []interface{}{ + hardwareObjectTemplates, + } + err = r.Session.DoRequest("SoftLayer_Network_Storage", "allowAccessToReplicantFromHardwareList", params, &r.Options, &resp) + return +} + +// no documentation yet +func (r Network_Storage) AllowAccessToReplicantFromIpAddress(ipAddressObjectTemplate *datatypes.Network_Subnet_IpAddress) (resp bool, err error) { + params := []interface{}{ + ipAddressObjectTemplate, + } + err = r.Session.DoRequest("SoftLayer_Network_Storage", "allowAccessToReplicantFromIpAddress", params, &r.Options, &resp) + return +} + +// This method is used to modify the access control list for this Storage volume's replica. The SoftLayer_Network_Subnet_IpAddress objects which have been allowed access to this storage volume's replica will be listed in the allowedReplicationIpAddresses property of this storage volume. +func (r Network_Storage) AllowAccessToReplicantFromIpAddressList(ipAddressObjectTemplates []datatypes.Network_Subnet_IpAddress) (resp bool, err error) { + params := []interface{}{ + ipAddressObjectTemplates, + } + err = r.Session.DoRequest("SoftLayer_Network_Storage", "allowAccessToReplicantFromIpAddressList", params, &r.Options, &resp) + return +} + +// This method is used to modify the access control list for this Storage replicant volume. The SoftLayer_Network_Subnet objects which have been allowed access to this storage will be listed in the allowedHardware property of this storage replicant volume. +func (r Network_Storage) AllowAccessToReplicantFromSubnet(subnetObjectTemplate *datatypes.Network_Subnet) (resp bool, err error) { + params := []interface{}{ + subnetObjectTemplate, + } + err = r.Session.DoRequest("SoftLayer_Network_Storage", "allowAccessToReplicantFromSubnet", params, &r.Options, &resp) + return +} + +// This method is used to modify the access control list for this Storage volume's replica. The SoftLayer_Network_Subnet objects which have been allowed access to this storage volume's replica will be listed in the allowedReplicationSubnets property of this storage volume. +func (r Network_Storage) AllowAccessToReplicantFromSubnetList(subnetObjectTemplates []datatypes.Network_Subnet) (resp bool, err error) { + params := []interface{}{ + subnetObjectTemplates, + } + err = r.Session.DoRequest("SoftLayer_Network_Storage", "allowAccessToReplicantFromSubnetList", params, &r.Options, &resp) + return +} + +// This method is used to modify the access control list for this Storage replicant volume. The SoftLayer_Virtual_Guest objects which have been allowed access to this storage will be listed in the allowedVirtualGuests property of this storage replicant volume. +func (r Network_Storage) AllowAccessToReplicantFromVirtualGuest(virtualGuestObjectTemplate *datatypes.Virtual_Guest) (resp bool, err error) { + params := []interface{}{ + virtualGuestObjectTemplate, + } + err = r.Session.DoRequest("SoftLayer_Network_Storage", "allowAccessToReplicantFromVirtualGuest", params, &r.Options, &resp) + return +} + +// This method is used to modify the access control list for this Storage volume's replica. The SoftLayer_Virtual_Guest objects which have been allowed access to this storage volume's replica will be listed in the allowedReplicationVirtualGuests property of this storage volume. +func (r Network_Storage) AllowAccessToReplicantFromVirtualGuestList(virtualGuestObjectTemplates []datatypes.Virtual_Guest) (resp bool, err error) { + params := []interface{}{ + virtualGuestObjectTemplates, + } + err = r.Session.DoRequest("SoftLayer_Network_Storage", "allowAccessToReplicantFromVirtualGuestList", params, &r.Options, &resp) + return +} + +// This method will assign an existing credential to the current volume. The credential must have been created using the 'addNewCredential' method. The volume type must support an additional credential. +func (r Network_Storage) AssignCredential(username *string) (resp bool, err error) { + params := []interface{}{ + username, + } + err = r.Session.DoRequest("SoftLayer_Network_Storage", "assignCredential", params, &r.Options, &resp) + return +} + +// This method will set up a new credential for the remote storage volume. The storage volume must support an additional credential. Once created, the credential will be automatically assigned to the current volume. If there are no volumes assigned to the credential it will be automatically deleted. +func (r Network_Storage) AssignNewCredential(typ *string) (resp datatypes.Network_Storage_Credential, err error) { + params := []interface{}{ + typ, + } + err = r.Session.DoRequest("SoftLayer_Network_Storage", "assignNewCredential", params, &r.Options, &resp) + return +} + +// The method will change the password for the given Storage/Virtual Server Storage account. +func (r Network_Storage) ChangePassword(username *string, currentPassword *string, newPassword *string) (resp bool, err error) { + params := []interface{}{ + username, + currentPassword, + newPassword, + } + err = r.Session.DoRequest("SoftLayer_Network_Storage", "changePassword", params, &r.Options, &resp) + return +} + +// {{CloudLayerOnlyMethod}} +// +// collectBandwidth() Retrieve the bandwidth usage for the current billing cycle. +func (r Network_Storage) CollectBandwidth(typ *string, startDate *datatypes.Time, endDate *datatypes.Time) (resp uint, err error) { + params := []interface{}{ + typ, + startDate, + endDate, + } + err = r.Session.DoRequest("SoftLayer_Network_Storage", "collectBandwidth", params, &r.Options, &resp) + return +} + +// {{CloudLayerOnlyMethod}} +// +// collectBytesUsed() retrieves the number of bytes capacity currently in use on a Storage account. +func (r Network_Storage) CollectBytesUsed() (resp uint, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage", "collectBytesUsed", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Network_Storage) CreateFolder(folder *string) (resp bool, err error) { + params := []interface{}{ + folder, + } + err = r.Session.DoRequest("SoftLayer_Network_Storage", "createFolder", params, &r.Options, &resp) + return +} + +// no documentation yet +func (r Network_Storage) CreateSnapshot(notes *string) (resp datatypes.Network_Storage, err error) { + params := []interface{}{ + notes, + } + err = r.Session.DoRequest("SoftLayer_Network_Storage", "createSnapshot", params, &r.Options, &resp) + return +} + +// {{CloudLayerOnlyMethod}} Delete all files within a Storage account. Depending on the type of Storage account, Deleting either deletes files permanently or sends files to your account's recycle bin. +// +// Currently, Virtual Server storage is the only type of Storage account that sends files to a recycle bin when deleted. When called against a Virtual Server storage account , this method also determines if the files are in the account's recycle bin. If the files exist in the recycle bin, then they are permanently deleted. +// +// Please note, files can not be restored once they are permanently deleted. +func (r Network_Storage) DeleteAllFiles() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage", "deleteAllFiles", nil, &r.Options, &resp) + return +} + +// {{CloudLayerOnlyMethod}} Delete an individual file within a Storage account. Depending on the type of Storage account, Deleting a file either deletes the file permanently or sends the file to your account's recycle bin. +// +// Currently, Virtual Server storage is the only type of Storage account that sends files to a recycle bin when deleted. When called against a Virtual Server storage account , this method also determines if the file is in the account's recycle bin. If the file exist in the recycle bin, then it is permanently deleted. +// +// Please note, a file can not be restored once it is permanently deleted. +func (r Network_Storage) DeleteFile(fileId *string) (resp bool, err error) { + params := []interface{}{ + fileId, + } + err = r.Session.DoRequest("SoftLayer_Network_Storage", "deleteFile", params, &r.Options, &resp) + return +} + +// {{CloudLayerOnlyMethod}} Delete multiple files within a Storage account. Depending on the type of Storage account, Deleting either deletes files permanently or sends files to your account's recycle bin. +// +// Currently, Virtual Server storage is the only type of Storage account that sends files to a recycle bin when deleted. When called against a Virtual Server storage account , this method also determines if the files are in the account's recycle bin. If the files exist in the recycle bin, then they are permanently deleted. +// +// Please note, files can not be restored once they are permanently deleted. +func (r Network_Storage) DeleteFiles(fileIds []string) (resp bool, err error) { + params := []interface{}{ + fileIds, + } + err = r.Session.DoRequest("SoftLayer_Network_Storage", "deleteFiles", params, &r.Options, &resp) + return +} + +// no documentation yet +func (r Network_Storage) DeleteFolder(folder *string) (resp bool, err error) { + params := []interface{}{ + folder, + } + err = r.Session.DoRequest("SoftLayer_Network_Storage", "deleteFolder", params, &r.Options, &resp) + return +} + +// Delete a network storage volume. '''This cannot be undone.''' At this time only network storage snapshots may be deleted with this method. +// +// ''deleteObject'' returns Boolean ''true'' on successful deletion or ''false'' if it was unable to remove a volume; +func (r Network_Storage) DeleteObject() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage", "deleteObject", nil, &r.Options, &resp) + return +} + +// This method is not valid for Legacy iSCSI Storage Volumes. +// +// Disable scheduled snapshots of this storage volume. Scheduling options include HOURLY, DAILY and WEEKLY schedules. +func (r Network_Storage) DisableSnapshots(scheduleType *string) (resp bool, err error) { + params := []interface{}{ + scheduleType, + } + err = r.Session.DoRequest("SoftLayer_Network_Storage", "disableSnapshots", params, &r.Options, &resp) + return +} + +// {{CloudLayerOnlyMethod}} Download a file from a Storage account. This method returns a file's details including the file's raw content. +func (r Network_Storage) DownloadFile(fileId *string) (resp datatypes.Container_Utility_File_Entity, err error) { + params := []interface{}{ + fileId, + } + err = r.Session.DoRequest("SoftLayer_Network_Storage", "downloadFile", params, &r.Options, &resp) + return +} + +// This method will change the password of a credential created using the 'addNewCredential' method. If the credential exists on multiple storage volumes it will change for those volumes as well. +func (r Network_Storage) EditCredential(username *string, newPassword *string) (resp bool, err error) { + params := []interface{}{ + username, + newPassword, + } + err = r.Session.DoRequest("SoftLayer_Network_Storage", "editCredential", params, &r.Options, &resp) + return +} + +// The password and/or notes may be modified for the Storage service except evault passwords and notes. +func (r Network_Storage) EditObject(templateObject *datatypes.Network_Storage) (resp bool, err error) { + params := []interface{}{ + templateObject, + } + err = r.Session.DoRequest("SoftLayer_Network_Storage", "editObject", params, &r.Options, &resp) + return +} + +// This method is not valid for Legacy iSCSI Storage Volumes. +// +// Enable scheduled snapshots of this storage volume. Scheduling options include HOURLY, DAILY and WEEKLY schedules. For HOURLY schedules, provide relevant data for $scheduleType, $retentionCount and $minute. For DAILY schedules, provide relevant data for $scheduleType, $retentionCount, $minute, and $hour. For WEEKLY schedules, provide relevant data for all parameters of this method. +func (r Network_Storage) EnableSnapshots(scheduleType *string, retentionCount *int, minute *int, hour *int, dayOfWeek *string) (resp bool, err error) { + params := []interface{}{ + scheduleType, + retentionCount, + minute, + hour, + dayOfWeek, + } + err = r.Session.DoRequest("SoftLayer_Network_Storage", "enableSnapshots", params, &r.Options, &resp) + return +} + +// Failback from a volume replicant. In order to failback the volume must have already been failed over to a replicant. +func (r Network_Storage) FailbackFromReplicant() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage", "failbackFromReplicant", nil, &r.Options, &resp) + return +} + +// Failover to a volume replicant. During the time which the replicant is in use the local nas volume will not be available. +func (r Network_Storage) FailoverToReplicant(replicantId *int) (resp bool, err error) { + params := []interface{}{ + replicantId, + } + err = r.Session.DoRequest("SoftLayer_Network_Storage", "failoverToReplicant", params, &r.Options, &resp) + return +} + +// Retrieve The account that a Storage services belongs to. +func (r Network_Storage) GetAccount() (resp datatypes.Account, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage", "getAccount", nil, &r.Options, &resp) + return +} + +// Retrieve Other usernames and passwords associated with a Storage volume. +func (r Network_Storage) GetAccountPassword() (resp datatypes.Account_Password, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage", "getAccountPassword", nil, &r.Options, &resp) + return +} + +// Retrieve The currently active transactions on a network storage volume. +func (r Network_Storage) GetActiveTransactions() (resp []datatypes.Provisioning_Version1_Transaction, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage", "getActiveTransactions", nil, &r.Options, &resp) + return +} + +// {{CloudLayerOnlyMethod}} Retrieve details such as id, name, size, create date for all files in a Storage account's root directory. This does not download file content. +func (r Network_Storage) GetAllFiles() (resp []datatypes.Container_Utility_File_Entity, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage", "getAllFiles", nil, &r.Options, &resp) + return +} + +// {{CloudLayerOnlyMethod}} Retrieve details such as id, name, size, create date for all files matching the filter's criteria in a Storage account's root directory. This does not download file content. +func (r Network_Storage) GetAllFilesByFilter(filter *datatypes.Container_Utility_File_Entity) (resp []datatypes.Container_Utility_File_Entity, err error) { + params := []interface{}{ + filter, + } + err = r.Session.DoRequest("SoftLayer_Network_Storage", "getAllFilesByFilter", params, &r.Options, &resp) + return +} + +// This method retrieves a list of SoftLayer_Hardware that can be authorized to this SoftLayer_Network_Storage. +func (r Network_Storage) GetAllowableHardware(filterHostname *string) (resp []datatypes.Hardware, err error) { + params := []interface{}{ + filterHostname, + } + err = r.Session.DoRequest("SoftLayer_Network_Storage", "getAllowableHardware", params, &r.Options, &resp) + return +} + +// This method retrieves a list of SoftLayer_Network_Subnet_IpAddress that can be authorized to this SoftLayer_Network_Storage. +func (r Network_Storage) GetAllowableIpAddresses(subnetId *int, filterIpAddress *string) (resp []datatypes.Network_Subnet_IpAddress, err error) { + params := []interface{}{ + subnetId, + filterIpAddress, + } + err = r.Session.DoRequest("SoftLayer_Network_Storage", "getAllowableIpAddresses", params, &r.Options, &resp) + return +} + +// This method retrieves a list of SoftLayer_Network_Subnet that can be authorized to this SoftLayer_Network_Storage. +func (r Network_Storage) GetAllowableSubnets(filterNetworkIdentifier *string) (resp []datatypes.Network_Subnet, err error) { + params := []interface{}{ + filterNetworkIdentifier, + } + err = r.Session.DoRequest("SoftLayer_Network_Storage", "getAllowableSubnets", params, &r.Options, &resp) + return +} + +// This method retrieves a list of SoftLayer_Virtual_Guest that can be authorized to this SoftLayer_Network_Storage. +func (r Network_Storage) GetAllowableVirtualGuests(filterHostname *string) (resp []datatypes.Virtual_Guest, err error) { + params := []interface{}{ + filterHostname, + } + err = r.Session.DoRequest("SoftLayer_Network_Storage", "getAllowableVirtualGuests", params, &r.Options, &resp) + return +} + +// Retrieve The SoftLayer_Hardware objects which are allowed access to this storage volume. +func (r Network_Storage) GetAllowedHardware() (resp []datatypes.Hardware, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage", "getAllowedHardware", nil, &r.Options, &resp) + return +} + +// Retrieves the total number of allowed hosts limit per volume. +func (r Network_Storage) GetAllowedHostsLimit() (resp int, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage", "getAllowedHostsLimit", nil, &r.Options, &resp) + return +} + +// Retrieve The SoftLayer_Network_Subnet_IpAddress objects which are allowed access to this storage volume. +func (r Network_Storage) GetAllowedIpAddresses() (resp []datatypes.Network_Subnet_IpAddress, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage", "getAllowedIpAddresses", nil, &r.Options, &resp) + return +} + +// Retrieve The SoftLayer_Hardware objects which are allowed access to this storage volume's Replicant. +func (r Network_Storage) GetAllowedReplicationHardware() (resp []datatypes.Hardware, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage", "getAllowedReplicationHardware", nil, &r.Options, &resp) + return +} + +// Retrieve The SoftLayer_Network_Subnet_IpAddress objects which are allowed access to this storage volume's Replicant. +func (r Network_Storage) GetAllowedReplicationIpAddresses() (resp []datatypes.Network_Subnet_IpAddress, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage", "getAllowedReplicationIpAddresses", nil, &r.Options, &resp) + return +} + +// Retrieve The SoftLayer_Network_Subnet objects which are allowed access to this storage volume's Replicant. +func (r Network_Storage) GetAllowedReplicationSubnets() (resp []datatypes.Network_Subnet, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage", "getAllowedReplicationSubnets", nil, &r.Options, &resp) + return +} + +// Retrieve The SoftLayer_Hardware objects which are allowed access to this storage volume's Replicant. +func (r Network_Storage) GetAllowedReplicationVirtualGuests() (resp []datatypes.Virtual_Guest, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage", "getAllowedReplicationVirtualGuests", nil, &r.Options, &resp) + return +} + +// Retrieve The SoftLayer_Network_Subnet objects which are allowed access to this storage volume. +func (r Network_Storage) GetAllowedSubnets() (resp []datatypes.Network_Subnet, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage", "getAllowedSubnets", nil, &r.Options, &resp) + return +} + +// Retrieve The SoftLayer_Virtual_Guest objects which are allowed access to this storage volume. +func (r Network_Storage) GetAllowedVirtualGuests() (resp []datatypes.Virtual_Guest, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage", "getAllowedVirtualGuests", nil, &r.Options, &resp) + return +} + +// Retrieve The current billing item for a Storage volume. +func (r Network_Storage) GetBillingItem() (resp datatypes.Billing_Item, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage", "getBillingItem", nil, &r.Options, &resp) + return +} + +// Retrieve +func (r Network_Storage) GetBillingItemCategory() (resp datatypes.Product_Item_Category, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage", "getBillingItemCategory", nil, &r.Options, &resp) + return +} + +// Retrieve network storage accounts by username and storage account type. Use this method if you wish to retrieve a storage record by username rather than by id. The ''type'' parameter must correspond to one of the available ''nasType'' values in the SoftLayer_Network_Storage data type. +func (r Network_Storage) GetByUsername(username *string, typ *string) (resp []datatypes.Network_Storage, err error) { + params := []interface{}{ + username, + typ, + } + err = r.Session.DoRequest("SoftLayer_Network_Storage", "getByUsername", params, &r.Options, &resp) + return +} + +// Retrieve The amount of space used by the volume, in bytes. +func (r Network_Storage) GetBytesUsed() (resp string, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage", "getBytesUsed", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Network_Storage) GetCdnUrls() (resp []datatypes.Container_Network_Storage_Hub_ObjectStorage_ContentDeliveryUrl, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage", "getCdnUrls", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Network_Storage) GetClusterResource() (resp datatypes.Network_Service_Resource, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage", "getClusterResource", nil, &r.Options, &resp) + return +} + +// Retrieve The schedule id which was executed to create a snapshot. +func (r Network_Storage) GetCreationScheduleId() (resp string, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage", "getCreationScheduleId", nil, &r.Options, &resp) + return +} + +// Retrieve +func (r Network_Storage) GetCredentials() (resp []datatypes.Network_Storage_Credential, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage", "getCredentials", nil, &r.Options, &resp) + return +} + +// Retrieve The Daily Schedule which is associated with this network storage volume. +func (r Network_Storage) GetDailySchedule() (resp datatypes.Network_Storage_Schedule, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage", "getDailySchedule", nil, &r.Options, &resp) + return +} + +// Retrieve The events which have taken place on a network storage volume. +func (r Network_Storage) GetEvents() (resp []datatypes.Network_Storage_Event, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage", "getEvents", nil, &r.Options, &resp) + return +} + +// +// +// +func (r Network_Storage) GetFileBlockEncryptedLocations() (resp []datatypes.Location, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage", "getFileBlockEncryptedLocations", nil, &r.Options, &resp) + return +} + +// {{CloudLayerOnlyMethod}} Retrieve details such as id, name, size, create date of a file within a Storage account. This does not download file content. +func (r Network_Storage) GetFileByIdentifier(identifier *string) (resp datatypes.Container_Utility_File_Entity, err error) { + params := []interface{}{ + identifier, + } + err = r.Session.DoRequest("SoftLayer_Network_Storage", "getFileByIdentifier", params, &r.Options, &resp) + return +} + +// {{CloudLayerOnlyMethod}} Retrieve the file number of files in a Virtual Server Storage account's root directory. This does not include the files stored in the recycle bin. +func (r Network_Storage) GetFileCount() (resp int, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage", "getFileCount", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Network_Storage) GetFileList(folder *string, path *string) (resp []datatypes.Container_Utility_File_Entity, err error) { + params := []interface{}{ + folder, + path, + } + err = r.Session.DoRequest("SoftLayer_Network_Storage", "getFileList", params, &r.Options, &resp) + return +} + +// Retrieve Retrieves the NFS Network Mount Address Name for a given File Storage Volume. +func (r Network_Storage) GetFileNetworkMountAddress() (resp string, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage", "getFileNetworkMountAddress", nil, &r.Options, &resp) + return +} + +// {{CloudLayerOnlyMethod}} Retrieve the number of files pending deletion in a Storage account's recycle bin. Files in an account's recycle bin may either be restored to the account's root directory or permanently deleted. +func (r Network_Storage) GetFilePendingDeleteCount() (resp int, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage", "getFilePendingDeleteCount", nil, &r.Options, &resp) + return +} + +// {{CloudLayerOnlyMethod}} Retrieve a list of files that are pending deletion in a Storage account's recycle bin. Files in an account's recycle bin may either be restored to the account's root directory or permanently deleted. This method does not download file content. +func (r Network_Storage) GetFilesPendingDelete() (resp []datatypes.Container_Utility_File_Entity, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage", "getFilesPendingDelete", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Network_Storage) GetFolderList() (resp []datatypes.Container_Network_Storage_Hub_ObjectStorage_Folder, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage", "getFolderList", nil, &r.Options, &resp) + return +} + +// {{CloudLayerOnlyMethod}} +// +// getGraph() retrieves a Storage account's usage and returns a PNG graph image, title, and the minimum and maximum dates included in the graphed date range. Virtual Server storage accounts can also graph upload and download bandwidth usage. +func (r Network_Storage) GetGraph(startDate *datatypes.Time, endDate *datatypes.Time, typ *string) (resp datatypes.Container_Bandwidth_GraphOutputs, err error) { + params := []interface{}{ + startDate, + endDate, + typ, + } + err = r.Session.DoRequest("SoftLayer_Network_Storage", "getGraph", params, &r.Options, &resp) + return +} + +// Retrieve When applicable, the hardware associated with a Storage service. +func (r Network_Storage) GetHardware() (resp datatypes.Hardware, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage", "getHardware", nil, &r.Options, &resp) + return +} + +// Retrieve +func (r Network_Storage) GetHasEncryptionAtRest() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage", "getHasEncryptionAtRest", nil, &r.Options, &resp) + return +} + +// Retrieve The Hourly Schedule which is associated with this network storage volume. +func (r Network_Storage) GetHourlySchedule() (resp datatypes.Network_Storage_Schedule, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage", "getHourlySchedule", nil, &r.Options, &resp) + return +} + +// Retrieve The maximum number of IOPs selected for this volume. +func (r Network_Storage) GetIops() (resp string, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage", "getIops", nil, &r.Options, &resp) + return +} + +// Retrieve Determines whether a volume is ready to order snapshot space, or, if snapshot space is already available, to assign a snapshot schedule, or to take a manual snapshot. +func (r Network_Storage) GetIsReadyForSnapshot() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage", "getIsReadyForSnapshot", nil, &r.Options, &resp) + return +} + +// Retrieve Determines whether a volume is ready to have Hosts authorized to access it. This does not indicate whether another operation may be blocking, please refer to this volume's volumeStatus property for details. +func (r Network_Storage) GetIsReadyToMount() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage", "getIsReadyToMount", nil, &r.Options, &resp) + return +} + +// Retrieve Relationship between a container volume and iSCSI LUNs. +func (r Network_Storage) GetIscsiLuns() (resp []datatypes.Network_Storage, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage", "getIscsiLuns", nil, &r.Options, &resp) + return +} + +// Retrieve The ID of the LUN volume. +func (r Network_Storage) GetLunId() (resp string, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage", "getLunId", nil, &r.Options, &resp) + return +} + +// Retrieve The manually-created snapshots associated with this SoftLayer_Network_Storage volume. Does not support pagination by result limit and offset. +func (r Network_Storage) GetManualSnapshots() (resp []datatypes.Network_Storage, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage", "getManualSnapshots", nil, &r.Options, &resp) + return +} + +// Retrieve A network storage volume's metric tracking object. This object records all periodic polled data available to this volume. +func (r Network_Storage) GetMetricTrackingObject() (resp datatypes.Metric_Tracking_Object, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage", "getMetricTrackingObject", nil, &r.Options, &resp) + return +} + +// Retrieve Whether or not a network storage volume may be mounted. +func (r Network_Storage) GetMountableFlag() (resp string, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage", "getMountableFlag", nil, &r.Options, &resp) + return +} + +// Retrieve The current status of split or move operation as a part of volume duplication. +func (r Network_Storage) GetMoveAndSplitStatus() (resp string, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage", "getMoveAndSplitStatus", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Network_Storage) GetNetworkConnectionDetails() (resp datatypes.Container_Network_Storage_NetworkConnectionInformation, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage", "getNetworkConnectionDetails", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Network_Storage) GetNetworkMountAddress() (resp string, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage", "getNetworkMountAddress", nil, &r.Options, &resp) + return +} + +// Retrieve The subscribers that will be notified for usage amount warnings and overages. +func (r Network_Storage) GetNotificationSubscribers() (resp []datatypes.Notification_User_Subscriber, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage", "getNotificationSubscribers", nil, &r.Options, &resp) + return +} + +// getObject retrieves the SoftLayer_Network_Storage object whose ID corresponds to the ID number of the init parameter passed to the SoftLayer_Network_Storage service. +// +// Please use the associated methods in the [[SoftLayer_Network_Storage]] service to retrieve a Storage account's id. +func (r Network_Storage) GetObject() (resp datatypes.Network_Storage, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage", "getObject", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Network_Storage) GetObjectStorageConnectionInformation() (resp []datatypes.Container_Network_Service_Resource_ObjectStorage_ConnectionInformation, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage", "getObjectStorageConnectionInformation", nil, &r.Options, &resp) + return +} + +// Retrieve network storage accounts by SoftLayer_Network_Storage_Credential object. Use this method if you wish to retrieve a storage record by a credential rather than by id. +func (r Network_Storage) GetObjectsByCredential(credentialObject *datatypes.Network_Storage_Credential) (resp []datatypes.Network_Storage, err error) { + params := []interface{}{ + credentialObject, + } + err = r.Session.DoRequest("SoftLayer_Network_Storage", "getObjectsByCredential", params, &r.Options, &resp) + return +} + +// Retrieve The name of the snapshot that this volume was duplicated from. +func (r Network_Storage) GetOriginalSnapshotName() (resp string, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage", "getOriginalSnapshotName", nil, &r.Options, &resp) + return +} + +// Retrieve The name of the volume that this volume was duplicated from. +func (r Network_Storage) GetOriginalVolumeName() (resp string, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage", "getOriginalVolumeName", nil, &r.Options, &resp) + return +} + +// Retrieve The size (in GB) of the volume that this volume was duplicated from, or in the case of iSCSI LUNs, the size of the base originally-provisioned LUN. +func (r Network_Storage) GetOriginalVolumeSize() (resp string, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage", "getOriginalVolumeSize", nil, &r.Options, &resp) + return +} + +// Retrieve A volume's configured SoftLayer_Network_Storage_Iscsi_OS_Type. +func (r Network_Storage) GetOsType() (resp datatypes.Network_Storage_Iscsi_OS_Type, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage", "getOsType", nil, &r.Options, &resp) + return +} + +// Retrieve A volume's configured SoftLayer_Network_Storage_Iscsi_OS_Type ID. +func (r Network_Storage) GetOsTypeId() (resp string, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage", "getOsTypeId", nil, &r.Options, &resp) + return +} + +// Retrieve The volumes or snapshots partnered with a network storage volume in a parental role. +func (r Network_Storage) GetParentPartnerships() (resp []datatypes.Network_Storage_Partnership, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage", "getParentPartnerships", nil, &r.Options, &resp) + return +} + +// Retrieve The parent volume of a volume in a complex storage relationship. +func (r Network_Storage) GetParentVolume() (resp datatypes.Network_Storage, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage", "getParentVolume", nil, &r.Options, &resp) + return +} + +// Retrieve The volumes or snapshots partnered with a network storage volume. +func (r Network_Storage) GetPartnerships() (resp []datatypes.Network_Storage_Partnership, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage", "getPartnerships", nil, &r.Options, &resp) + return +} + +// Retrieve All permissions group(s) this volume is in. +func (r Network_Storage) GetPermissionsGroups() (resp []datatypes.Network_Storage_Group, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage", "getPermissionsGroups", nil, &r.Options, &resp) + return +} + +// Retrieve The properties used to provide additional details about a network storage volume. +func (r Network_Storage) GetProperties() (resp []datatypes.Network_Storage_Property, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage", "getProperties", nil, &r.Options, &resp) + return +} + +// Retrieve The number of IOPs provisioned for this volume. +func (r Network_Storage) GetProvisionedIops() (resp string, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage", "getProvisionedIops", nil, &r.Options, &resp) + return +} + +// {{CloudLayerOnlyMethod}} Retrieve the details of a file that is pending deletion in a Storage account's a recycle bin. +func (r Network_Storage) GetRecycleBinFileByIdentifier(fileId *string) (resp datatypes.Container_Utility_File_Entity, err error) { + params := []interface{}{ + fileId, + } + err = r.Session.DoRequest("SoftLayer_Network_Storage", "getRecycleBinFileByIdentifier", params, &r.Options, &resp) + return +} + +// Retrieves the remaining number of allowed hosts per volume. +func (r Network_Storage) GetRemainingAllowedHosts() (resp int, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage", "getRemainingAllowedHosts", nil, &r.Options, &resp) + return +} + +// Retrieve The iSCSI LUN volumes being replicated by this network storage volume. +func (r Network_Storage) GetReplicatingLuns() (resp []datatypes.Network_Storage, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage", "getReplicatingLuns", nil, &r.Options, &resp) + return +} + +// Retrieve The network storage volume being replicated by a volume. +func (r Network_Storage) GetReplicatingVolume() (resp datatypes.Network_Storage, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage", "getReplicatingVolume", nil, &r.Options, &resp) + return +} + +// Retrieve The volume replication events. +func (r Network_Storage) GetReplicationEvents() (resp []datatypes.Network_Storage_Event, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage", "getReplicationEvents", nil, &r.Options, &resp) + return +} + +// Retrieve The network storage volumes configured to be replicants of a volume. +func (r Network_Storage) GetReplicationPartners() (resp []datatypes.Network_Storage, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage", "getReplicationPartners", nil, &r.Options, &resp) + return +} + +// Retrieve The Replication Schedule associated with a network storage volume. +func (r Network_Storage) GetReplicationSchedule() (resp datatypes.Network_Storage_Schedule, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage", "getReplicationSchedule", nil, &r.Options, &resp) + return +} + +// Retrieve The current replication status of a network storage volume. Indicates Failover or Failback status. +func (r Network_Storage) GetReplicationStatus() (resp string, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage", "getReplicationStatus", nil, &r.Options, &resp) + return +} + +// Retrieve The schedules which are associated with a network storage volume. +func (r Network_Storage) GetSchedules() (resp []datatypes.Network_Storage_Schedule, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage", "getSchedules", nil, &r.Options, &resp) + return +} + +// Retrieve The network resource a Storage service is connected to. +func (r Network_Storage) GetServiceResource() (resp datatypes.Network_Service_Resource, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage", "getServiceResource", nil, &r.Options, &resp) + return +} + +// Retrieve The IP address of a Storage resource. +func (r Network_Storage) GetServiceResourceBackendIpAddress() (resp string, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage", "getServiceResourceBackendIpAddress", nil, &r.Options, &resp) + return +} + +// Retrieve The name of a Storage's network resource. +func (r Network_Storage) GetServiceResourceName() (resp string, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage", "getServiceResourceName", nil, &r.Options, &resp) + return +} + +// Retrieve A volume's configured snapshot space size. +func (r Network_Storage) GetSnapshotCapacityGb() (resp string, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage", "getSnapshotCapacityGb", nil, &r.Options, &resp) + return +} + +// Retrieve The creation timestamp of the snapshot on the storage platform. +func (r Network_Storage) GetSnapshotCreationTimestamp() (resp string, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage", "getSnapshotCreationTimestamp", nil, &r.Options, &resp) + return +} + +// Retrieve The percentage of used snapshot space after which to delete automated snapshots. +func (r Network_Storage) GetSnapshotDeletionThresholdPercentage() (resp string, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage", "getSnapshotDeletionThresholdPercentage", nil, &r.Options, &resp) + return +} + +// Retrieve The snapshot size in bytes. +func (r Network_Storage) GetSnapshotSizeBytes() (resp string, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage", "getSnapshotSizeBytes", nil, &r.Options, &resp) + return +} + +// Retrieve A volume's available snapshot reservation space. +func (r Network_Storage) GetSnapshotSpaceAvailable() (resp string, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage", "getSnapshotSpaceAvailable", nil, &r.Options, &resp) + return +} + +// Retrieve The snapshots associated with this SoftLayer_Network_Storage volume. +func (r Network_Storage) GetSnapshots() (resp []datatypes.Network_Storage, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage", "getSnapshots", nil, &r.Options, &resp) + return +} + +// Retrieves a list of snapshots for this SoftLayer_Network_Storage volume. This method works with the result limits and offset to support pagination. +func (r Network_Storage) GetSnapshotsForVolume() (resp []datatypes.Network_Storage, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage", "getSnapshotsForVolume", nil, &r.Options, &resp) + return +} + +// Retrieve +func (r Network_Storage) GetStaasVersion() (resp string, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage", "getStaasVersion", nil, &r.Options, &resp) + return +} + +// Retrieve The network storage groups this volume is attached to. +func (r Network_Storage) GetStorageGroups() (resp []datatypes.Network_Storage_Group, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage", "getStorageGroups", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Network_Storage) GetStorageGroupsNetworkConnectionDetails() (resp []datatypes.Container_Network_Storage_NetworkConnectionInformation, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage", "getStorageGroupsNetworkConnectionDetails", nil, &r.Options, &resp) + return +} + +// Retrieve +func (r Network_Storage) GetStorageTierLevel() (resp string, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage", "getStorageTierLevel", nil, &r.Options, &resp) + return +} + +// Retrieve A description of the Storage object. +func (r Network_Storage) GetStorageType() (resp datatypes.Network_Storage_Type, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage", "getStorageType", nil, &r.Options, &resp) + return +} + +// Retrieve The amount of space used by the volume. +func (r Network_Storage) GetTotalBytesUsed() (resp string, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage", "getTotalBytesUsed", nil, &r.Options, &resp) + return +} + +// Retrieve The total snapshot retention count of all schedules on this network storage volume. +func (r Network_Storage) GetTotalScheduleSnapshotRetentionCount() (resp uint, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage", "getTotalScheduleSnapshotRetentionCount", nil, &r.Options, &resp) + return +} + +// Retrieve The usage notification for SL Storage services. +func (r Network_Storage) GetUsageNotification() (resp datatypes.Notification, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage", "getUsageNotification", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Network_Storage) GetValidReplicationTargetDatacenterLocations() (resp []datatypes.Location, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage", "getValidReplicationTargetDatacenterLocations", nil, &r.Options, &resp) + return +} + +// Retrieve The type of network storage service. +func (r Network_Storage) GetVendorName() (resp string, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage", "getVendorName", nil, &r.Options, &resp) + return +} + +// Retrieve When applicable, the virtual guest associated with a Storage service. +func (r Network_Storage) GetVirtualGuest() (resp datatypes.Virtual_Guest, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage", "getVirtualGuest", nil, &r.Options, &resp) + return +} + +// This method returns the parameters for cloning a volume +func (r Network_Storage) GetVolumeDuplicateParameters() (resp datatypes.Container_Network_Storage_VolumeDuplicateParameters, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage", "getVolumeDuplicateParameters", nil, &r.Options, &resp) + return +} + +// Retrieve The username and password history for a Storage service. +func (r Network_Storage) GetVolumeHistory() (resp []datatypes.Network_Storage_History, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage", "getVolumeHistory", nil, &r.Options, &resp) + return +} + +// Retrieve The current status of a network storage volume. +func (r Network_Storage) GetVolumeStatus() (resp string, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage", "getVolumeStatus", nil, &r.Options, &resp) + return +} + +// Retrieve The account username and password for the EVault webCC interface. +func (r Network_Storage) GetWebccAccount() (resp datatypes.Account_Password, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage", "getWebccAccount", nil, &r.Options, &resp) + return +} + +// Retrieve The Weekly Schedule which is associated with this network storage volume. +func (r Network_Storage) GetWeeklySchedule() (resp datatypes.Network_Storage_Schedule, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage", "getWeeklySchedule", nil, &r.Options, &resp) + return +} + +// Immediate Failover to a volume replicant. During the time which the replicant is in use the local nas volume will not be available. +func (r Network_Storage) ImmediateFailoverToReplicant(replicantId *int) (resp bool, err error) { + params := []interface{}{ + replicantId, + } + err = r.Session.DoRequest("SoftLayer_Network_Storage", "immediateFailoverToReplicant", params, &r.Options, &resp) + return +} + +// no documentation yet +func (r Network_Storage) IsBlockingOperationInProgress(exemptStatusKeyNames []string) (resp bool, err error) { + params := []interface{}{ + exemptStatusKeyNames, + } + err = r.Session.DoRequest("SoftLayer_Network_Storage", "isBlockingOperationInProgress", params, &r.Options, &resp) + return +} + +// This method returns a boolean indicating whether the clone volume is ready for snapshot. +func (r Network_Storage) IsDuplicateReadyForSnapshot() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage", "isDuplicateReadyForSnapshot", nil, &r.Options, &resp) + return +} + +// This method returns a boolean indicating whether the clone volume is ready to mount. +func (r Network_Storage) IsDuplicateReadyToMount() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage", "isDuplicateReadyToMount", nil, &r.Options, &resp) + return +} + +// This method is used to modify the access control list for this Storage volume. The SoftLayer_Hardware objects which have been allowed access to this storage will be listed in the allowedHardware property of this storage volume. +func (r Network_Storage) RemoveAccessFromHardware(hardwareObjectTemplate *datatypes.Hardware) (resp bool, err error) { + params := []interface{}{ + hardwareObjectTemplate, + } + err = r.Session.DoRequest("SoftLayer_Network_Storage", "removeAccessFromHardware", params, &r.Options, &resp) + return +} + +// This method is used to modify the access control list for this Storage volume. The SoftLayer_Hardware objects which have been allowed access to this storage will be listed in the allowedHardware property of this storage volume. +func (r Network_Storage) RemoveAccessFromHardwareList(hardwareObjectTemplates []datatypes.Hardware) (resp bool, err error) { + params := []interface{}{ + hardwareObjectTemplates, + } + err = r.Session.DoRequest("SoftLayer_Network_Storage", "removeAccessFromHardwareList", params, &r.Options, &resp) + return +} + +// This method is used to modify the access control list for this Storage volume. The [[SoftLayer_Hardware|SoftLayer_Virtual_Guest|SoftLayer_Network_Subnet|SoftLayer_Network_Subnet_IpAddress]] objects which have been allowed access to this storage will be listed in the [[allowedHardware|allowedVirtualGuests|allowedSubnets|allowedIpAddresses]] property of this storage volume. +func (r Network_Storage) RemoveAccessFromHost(typeClassName *string, hostId *int) (resp datatypes.Network_Storage_Allowed_Host, err error) { + params := []interface{}{ + typeClassName, + hostId, + } + err = r.Session.DoRequest("SoftLayer_Network_Storage", "removeAccessFromHost", params, &r.Options, &resp) + return +} + +// This method is used to modify the access control list for this Storage volume. The [[SoftLayer_Hardware|SoftLayer_Virtual_Guest|SoftLayer_Network_Subnet|SoftLayer_Network_Subnet_IpAddress]] objects which have been allowed access to this storage will be listed in the [[allowedHardware|allowedVirtualGuests|allowedSubnets|allowedIpAddresses]] property of this storage volume. +func (r Network_Storage) RemoveAccessFromHostList(hostObjectTemplates []datatypes.Container_Network_Storage_Host) (resp []datatypes.Network_Storage_Allowed_Host, err error) { + params := []interface{}{ + hostObjectTemplates, + } + err = r.Session.DoRequest("SoftLayer_Network_Storage", "removeAccessFromHostList", params, &r.Options, &resp) + return +} + +// This method is used to modify the access control list for this Storage volume. The SoftLayer_Network_Subnet_IpAddress objects which have been allowed access to this storage will be listed in the allowedIpAddresses property of this storage volume. +func (r Network_Storage) RemoveAccessFromIpAddress(ipAddressObjectTemplate *datatypes.Network_Subnet_IpAddress) (resp bool, err error) { + params := []interface{}{ + ipAddressObjectTemplate, + } + err = r.Session.DoRequest("SoftLayer_Network_Storage", "removeAccessFromIpAddress", params, &r.Options, &resp) + return +} + +// no documentation yet +func (r Network_Storage) RemoveAccessFromIpAddressList(ipAddressObjectTemplates []datatypes.Network_Subnet_IpAddress) (resp bool, err error) { + params := []interface{}{ + ipAddressObjectTemplates, + } + err = r.Session.DoRequest("SoftLayer_Network_Storage", "removeAccessFromIpAddressList", params, &r.Options, &resp) + return +} + +// no documentation yet +func (r Network_Storage) RemoveAccessFromSubnet(subnetObjectTemplate *datatypes.Network_Subnet) (resp bool, err error) { + params := []interface{}{ + subnetObjectTemplate, + } + err = r.Session.DoRequest("SoftLayer_Network_Storage", "removeAccessFromSubnet", params, &r.Options, &resp) + return +} + +// no documentation yet +func (r Network_Storage) RemoveAccessFromSubnetList(subnetObjectTemplates []datatypes.Network_Subnet) (resp bool, err error) { + params := []interface{}{ + subnetObjectTemplates, + } + err = r.Session.DoRequest("SoftLayer_Network_Storage", "removeAccessFromSubnetList", params, &r.Options, &resp) + return +} + +// This method is used to modify the access control list for this Storage volume. The SoftLayer_Virtual_Guest objects which have been allowed access to this storage will be listed in the allowedVirtualGuests property of this storage volume. +func (r Network_Storage) RemoveAccessFromVirtualGuest(virtualGuestObjectTemplate *datatypes.Virtual_Guest) (resp bool, err error) { + params := []interface{}{ + virtualGuestObjectTemplate, + } + err = r.Session.DoRequest("SoftLayer_Network_Storage", "removeAccessFromVirtualGuest", params, &r.Options, &resp) + return +} + +// This method is used to modify the access control list for this Storage volume. The SoftLayer_Virtual_Guest objects which have been allowed access to this storage will be listed in the allowedVirtualGuests property of this storage volume. +func (r Network_Storage) RemoveAccessFromVirtualGuestList(virtualGuestObjectTemplates []datatypes.Virtual_Guest) (resp bool, err error) { + params := []interface{}{ + virtualGuestObjectTemplates, + } + err = r.Session.DoRequest("SoftLayer_Network_Storage", "removeAccessFromVirtualGuestList", params, &r.Options, &resp) + return +} + +// This method is used to modify the access control list for this Storage volume's replica. The SoftLayer_Hardware objects which have been allowed access to this storage volume's replica will be listed in the allowedReplicationHardware property of this storage volume. +func (r Network_Storage) RemoveAccessToReplicantFromHardwareList(hardwareObjectTemplates []datatypes.Hardware) (resp bool, err error) { + params := []interface{}{ + hardwareObjectTemplates, + } + err = r.Session.DoRequest("SoftLayer_Network_Storage", "removeAccessToReplicantFromHardwareList", params, &r.Options, &resp) + return +} + +// This method is used to modify the access control list for this Storage volume's replica. The SoftLayer_Network_Subnet_IpAddress objects which have been allowed access to this storage volume's replica will be listed in the allowedReplicationIpAddresses property of this storage volume. +func (r Network_Storage) RemoveAccessToReplicantFromIpAddressList(ipAddressObjectTemplates []datatypes.Network_Subnet_IpAddress) (resp bool, err error) { + params := []interface{}{ + ipAddressObjectTemplates, + } + err = r.Session.DoRequest("SoftLayer_Network_Storage", "removeAccessToReplicantFromIpAddressList", params, &r.Options, &resp) + return +} + +// no documentation yet +func (r Network_Storage) RemoveAccessToReplicantFromSubnet(subnetObjectTemplate *datatypes.Network_Subnet) (resp bool, err error) { + params := []interface{}{ + subnetObjectTemplate, + } + err = r.Session.DoRequest("SoftLayer_Network_Storage", "removeAccessToReplicantFromSubnet", params, &r.Options, &resp) + return +} + +// This method is used to modify the access control list for this Storage volume's replica. The SoftLayer_Network_Subnet objects which have been allowed access to this storage volume's replica will be listed in the allowedReplicationSubnets property of this storage volume. +func (r Network_Storage) RemoveAccessToReplicantFromSubnetList(subnetObjectTemplates []datatypes.Network_Subnet) (resp bool, err error) { + params := []interface{}{ + subnetObjectTemplates, + } + err = r.Session.DoRequest("SoftLayer_Network_Storage", "removeAccessToReplicantFromSubnetList", params, &r.Options, &resp) + return +} + +// This method is used to modify the access control list for this Storage volume's replica. The SoftLayer_Virtual_Guest objects which have been allowed access to this storage volume's replica will be listed in the allowedReplicationVirtualGuests property of this storage volume. +func (r Network_Storage) RemoveAccessToReplicantFromVirtualGuestList(virtualGuestObjectTemplates []datatypes.Virtual_Guest) (resp bool, err error) { + params := []interface{}{ + virtualGuestObjectTemplates, + } + err = r.Session.DoRequest("SoftLayer_Network_Storage", "removeAccessToReplicantFromVirtualGuestList", params, &r.Options, &resp) + return +} + +// This method will remove a credential from the current volume. The credential must have been created using the 'addNewCredential' method. +func (r Network_Storage) RemoveCredential(username *string) (resp bool, err error) { + params := []interface{}{ + username, + } + err = r.Session.DoRequest("SoftLayer_Network_Storage", "removeCredential", params, &r.Options, &resp) + return +} + +// {{CloudLayerOnlyMethod}} Restore an individual file so that it may be used as it was before it was deleted. +// +// If a file is deleted from a Virtual Server Storage account, the file is placed into the account's recycle bin and not permanently deleted. Therefore, restoreFile can be used to place the file back into your Virtual Server account's root directory. +func (r Network_Storage) RestoreFile(fileId *string) (resp datatypes.Container_Utility_File_Entity, err error) { + params := []interface{}{ + fileId, + } + err = r.Session.DoRequest("SoftLayer_Network_Storage", "restoreFile", params, &r.Options, &resp) + return +} + +// Restore the volume from a snapshot that was previously taken. +func (r Network_Storage) RestoreFromSnapshot(snapshotId *int) (resp bool, err error) { + params := []interface{}{ + snapshotId, + } + err = r.Session.DoRequest("SoftLayer_Network_Storage", "restoreFromSnapshot", params, &r.Options, &resp) + return +} + +// The method will retrieve the password for the StorageLayer or Virtual Server Storage Account and email the password. The Storage Account passwords will be emailed to the master user. For Virtual Server Storage, the password will be sent to the email address used as the username. +func (r Network_Storage) SendPasswordReminderEmail(username *string) (resp bool, err error) { + params := []interface{}{ + username, + } + err = r.Session.DoRequest("SoftLayer_Network_Storage", "sendPasswordReminderEmail", params, &r.Options, &resp) + return +} + +// Enable or disable the mounting of a Storage volume. When mounting is enabled the Storage volume will be mountable or available for use. +// +// For Virtual Server volumes, disabling mounting will deny access to the Virtual Server Account, remove published material and deny all file interaction including uploads and downloads. +// +// Enabling or disabling mounting for Storage volumes is not possible if mounting has been disabled by SoftLayer or a parent account. +func (r Network_Storage) SetMountable(mountable *bool) (resp bool, err error) { + params := []interface{}{ + mountable, + } + err = r.Session.DoRequest("SoftLayer_Network_Storage", "setMountable", params, &r.Options, &resp) + return +} + +// no documentation yet +func (r Network_Storage) SetSnapshotAllocation(capacityGb *int) (err error) { + var resp datatypes.Void + params := []interface{}{ + capacityGb, + } + err = r.Session.DoRequest("SoftLayer_Network_Storage", "setSnapshotAllocation", params, &r.Options, &resp) + return +} + +// Upgrade the Storage volume to one of the upgradable packages (for example from 10 Gigs of EVault storage to 100 Gigs of EVault storage). +func (r Network_Storage) UpgradeVolumeCapacity(itemId *int) (resp bool, err error) { + params := []interface{}{ + itemId, + } + err = r.Session.DoRequest("SoftLayer_Network_Storage", "upgradeVolumeCapacity", params, &r.Options, &resp) + return +} + +// {{CloudLayerOnlyMethod}} Upload a file to a Storage account's root directory. Once uploaded, this method returns new file entity identifier for the upload file. +// +// The following properties are required in the ''file'' parameter. +// *'''name''': The name of the file you wish to upload +// *'''content''': The raw contents of the file you wish to upload. +// *'''contentType''': The MIME-type of content that you wish to upload. +func (r Network_Storage) UploadFile(file *datatypes.Container_Utility_File_Entity) (resp datatypes.Container_Utility_File_Entity, err error) { + params := []interface{}{ + file, + } + err = r.Session.DoRequest("SoftLayer_Network_Storage", "uploadFile", params, &r.Options, &resp) + return +} + +// no documentation yet +type Network_Storage_Allowed_Host struct { + Session *session.Session + Options sl.Options +} + +// GetNetworkStorageAllowedHostService returns an instance of the Network_Storage_Allowed_Host SoftLayer service +func GetNetworkStorageAllowedHostService(sess *session.Session) Network_Storage_Allowed_Host { + return Network_Storage_Allowed_Host{Session: sess} +} + +func (r Network_Storage_Allowed_Host) Id(id int) Network_Storage_Allowed_Host { + r.Options.Id = &id + return r +} + +func (r Network_Storage_Allowed_Host) Mask(mask string) Network_Storage_Allowed_Host { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r Network_Storage_Allowed_Host) Filter(filter string) Network_Storage_Allowed_Host { + r.Options.Filter = filter + return r +} + +func (r Network_Storage_Allowed_Host) Limit(limit int) Network_Storage_Allowed_Host { + r.Options.Limit = &limit + return r +} + +func (r Network_Storage_Allowed_Host) Offset(offset int) Network_Storage_Allowed_Host { + r.Options.Offset = &offset + return r +} + +// no documentation yet +func (r Network_Storage_Allowed_Host) CreateObject(templateObject *datatypes.Network_Storage_Allowed_Host) (resp bool, err error) { + params := []interface{}{ + templateObject, + } + err = r.Session.DoRequest("SoftLayer_Network_Storage_Allowed_Host", "createObject", params, &r.Options, &resp) + return +} + +// no documentation yet +func (r Network_Storage_Allowed_Host) DeleteObject() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Allowed_Host", "deleteObject", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Network_Storage_Allowed_Host) EditObject(templateObject *datatypes.Network_Storage_Allowed_Host) (resp bool, err error) { + params := []interface{}{ + templateObject, + } + err = r.Session.DoRequest("SoftLayer_Network_Storage_Allowed_Host", "editObject", params, &r.Options, &resp) + return +} + +// Retrieve The SoftLayer_Network_Storage_Group objects this SoftLayer_Network_Storage_Allowed_Host is present in. +func (r Network_Storage_Allowed_Host) GetAssignedGroups() (resp []datatypes.Network_Storage_Group, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Allowed_Host", "getAssignedGroups", nil, &r.Options, &resp) + return +} + +// Retrieve The SoftLayer_Network_Storage primary volumes whose replicas are allowed access. +func (r Network_Storage_Allowed_Host) GetAssignedReplicationVolumes() (resp []datatypes.Network_Storage, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Allowed_Host", "getAssignedReplicationVolumes", nil, &r.Options, &resp) + return +} + +// Retrieve The SoftLayer_Network_Storage volumes to which this SoftLayer_Network_Storage_Allowed_Host is allowed access. +func (r Network_Storage_Allowed_Host) GetAssignedVolumes() (resp []datatypes.Network_Storage, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Allowed_Host", "getAssignedVolumes", nil, &r.Options, &resp) + return +} + +// Retrieve The SoftLayer_Network_Storage_Credential this allowed host uses. +func (r Network_Storage_Allowed_Host) GetCredential() (resp datatypes.Network_Storage_Credential, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Allowed_Host", "getCredential", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Network_Storage_Allowed_Host) GetObject() (resp datatypes.Network_Storage_Allowed_Host, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Allowed_Host", "getObject", nil, &r.Options, &resp) + return +} + +// Use this method to modify the credential password for a SoftLayer_Network_Storage_Allowed_Host object. +func (r Network_Storage_Allowed_Host) SetCredentialPassword(password *string) (resp bool, err error) { + params := []interface{}{ + password, + } + err = r.Session.DoRequest("SoftLayer_Network_Storage_Allowed_Host", "setCredentialPassword", params, &r.Options, &resp) + return +} + +// no documentation yet +type Network_Storage_Allowed_Host_Hardware struct { + Session *session.Session + Options sl.Options +} + +// GetNetworkStorageAllowedHostHardwareService returns an instance of the Network_Storage_Allowed_Host_Hardware SoftLayer service +func GetNetworkStorageAllowedHostHardwareService(sess *session.Session) Network_Storage_Allowed_Host_Hardware { + return Network_Storage_Allowed_Host_Hardware{Session: sess} +} + +func (r Network_Storage_Allowed_Host_Hardware) Id(id int) Network_Storage_Allowed_Host_Hardware { + r.Options.Id = &id + return r +} + +func (r Network_Storage_Allowed_Host_Hardware) Mask(mask string) Network_Storage_Allowed_Host_Hardware { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r Network_Storage_Allowed_Host_Hardware) Filter(filter string) Network_Storage_Allowed_Host_Hardware { + r.Options.Filter = filter + return r +} + +func (r Network_Storage_Allowed_Host_Hardware) Limit(limit int) Network_Storage_Allowed_Host_Hardware { + r.Options.Limit = &limit + return r +} + +func (r Network_Storage_Allowed_Host_Hardware) Offset(offset int) Network_Storage_Allowed_Host_Hardware { + r.Options.Offset = &offset + return r +} + +// no documentation yet +func (r Network_Storage_Allowed_Host_Hardware) CreateObject(templateObject *datatypes.Network_Storage_Allowed_Host) (resp bool, err error) { + params := []interface{}{ + templateObject, + } + err = r.Session.DoRequest("SoftLayer_Network_Storage_Allowed_Host_Hardware", "createObject", params, &r.Options, &resp) + return +} + +// no documentation yet +func (r Network_Storage_Allowed_Host_Hardware) DeleteObject() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Allowed_Host_Hardware", "deleteObject", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Network_Storage_Allowed_Host_Hardware) EditObject(templateObject *datatypes.Network_Storage_Allowed_Host) (resp bool, err error) { + params := []interface{}{ + templateObject, + } + err = r.Session.DoRequest("SoftLayer_Network_Storage_Allowed_Host_Hardware", "editObject", params, &r.Options, &resp) + return +} + +// Retrieve The SoftLayer_Network_Storage_Group objects this SoftLayer_Network_Storage_Allowed_Host is present in. +func (r Network_Storage_Allowed_Host_Hardware) GetAssignedGroups() (resp []datatypes.Network_Storage_Group, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Allowed_Host_Hardware", "getAssignedGroups", nil, &r.Options, &resp) + return +} + +// Retrieve The SoftLayer_Network_Storage primary volumes whose replicas are allowed access. +func (r Network_Storage_Allowed_Host_Hardware) GetAssignedReplicationVolumes() (resp []datatypes.Network_Storage, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Allowed_Host_Hardware", "getAssignedReplicationVolumes", nil, &r.Options, &resp) + return +} + +// Retrieve The SoftLayer_Network_Storage volumes to which this SoftLayer_Network_Storage_Allowed_Host is allowed access. +func (r Network_Storage_Allowed_Host_Hardware) GetAssignedVolumes() (resp []datatypes.Network_Storage, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Allowed_Host_Hardware", "getAssignedVolumes", nil, &r.Options, &resp) + return +} + +// Retrieve The SoftLayer_Network_Storage_Credential this allowed host uses. +func (r Network_Storage_Allowed_Host_Hardware) GetCredential() (resp datatypes.Network_Storage_Credential, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Allowed_Host_Hardware", "getCredential", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Network_Storage_Allowed_Host_Hardware) GetObject() (resp datatypes.Network_Storage_Allowed_Host_Hardware, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Allowed_Host_Hardware", "getObject", nil, &r.Options, &resp) + return +} + +// Retrieve The SoftLayer_Hardware object which this SoftLayer_Network_Storage_Allowed_Host is referencing. +func (r Network_Storage_Allowed_Host_Hardware) GetResource() (resp datatypes.Hardware, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Allowed_Host_Hardware", "getResource", nil, &r.Options, &resp) + return +} + +// Use this method to modify the credential password for a SoftLayer_Network_Storage_Allowed_Host object. +func (r Network_Storage_Allowed_Host_Hardware) SetCredentialPassword(password *string) (resp bool, err error) { + params := []interface{}{ + password, + } + err = r.Session.DoRequest("SoftLayer_Network_Storage_Allowed_Host_Hardware", "setCredentialPassword", params, &r.Options, &resp) + return +} + +// no documentation yet +type Network_Storage_Allowed_Host_IpAddress struct { + Session *session.Session + Options sl.Options +} + +// GetNetworkStorageAllowedHostIpAddressService returns an instance of the Network_Storage_Allowed_Host_IpAddress SoftLayer service +func GetNetworkStorageAllowedHostIpAddressService(sess *session.Session) Network_Storage_Allowed_Host_IpAddress { + return Network_Storage_Allowed_Host_IpAddress{Session: sess} +} + +func (r Network_Storage_Allowed_Host_IpAddress) Id(id int) Network_Storage_Allowed_Host_IpAddress { + r.Options.Id = &id + return r +} + +func (r Network_Storage_Allowed_Host_IpAddress) Mask(mask string) Network_Storage_Allowed_Host_IpAddress { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r Network_Storage_Allowed_Host_IpAddress) Filter(filter string) Network_Storage_Allowed_Host_IpAddress { + r.Options.Filter = filter + return r +} + +func (r Network_Storage_Allowed_Host_IpAddress) Limit(limit int) Network_Storage_Allowed_Host_IpAddress { + r.Options.Limit = &limit + return r +} + +func (r Network_Storage_Allowed_Host_IpAddress) Offset(offset int) Network_Storage_Allowed_Host_IpAddress { + r.Options.Offset = &offset + return r +} + +// no documentation yet +func (r Network_Storage_Allowed_Host_IpAddress) CreateObject(templateObject *datatypes.Network_Storage_Allowed_Host) (resp bool, err error) { + params := []interface{}{ + templateObject, + } + err = r.Session.DoRequest("SoftLayer_Network_Storage_Allowed_Host_IpAddress", "createObject", params, &r.Options, &resp) + return +} + +// no documentation yet +func (r Network_Storage_Allowed_Host_IpAddress) DeleteObject() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Allowed_Host_IpAddress", "deleteObject", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Network_Storage_Allowed_Host_IpAddress) EditObject(templateObject *datatypes.Network_Storage_Allowed_Host) (resp bool, err error) { + params := []interface{}{ + templateObject, + } + err = r.Session.DoRequest("SoftLayer_Network_Storage_Allowed_Host_IpAddress", "editObject", params, &r.Options, &resp) + return +} + +// Retrieve The SoftLayer_Network_Storage_Group objects this SoftLayer_Network_Storage_Allowed_Host is present in. +func (r Network_Storage_Allowed_Host_IpAddress) GetAssignedGroups() (resp []datatypes.Network_Storage_Group, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Allowed_Host_IpAddress", "getAssignedGroups", nil, &r.Options, &resp) + return +} + +// Retrieve The SoftLayer_Network_Storage primary volumes whose replicas are allowed access. +func (r Network_Storage_Allowed_Host_IpAddress) GetAssignedReplicationVolumes() (resp []datatypes.Network_Storage, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Allowed_Host_IpAddress", "getAssignedReplicationVolumes", nil, &r.Options, &resp) + return +} + +// Retrieve The SoftLayer_Network_Storage volumes to which this SoftLayer_Network_Storage_Allowed_Host is allowed access. +func (r Network_Storage_Allowed_Host_IpAddress) GetAssignedVolumes() (resp []datatypes.Network_Storage, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Allowed_Host_IpAddress", "getAssignedVolumes", nil, &r.Options, &resp) + return +} + +// Retrieve The SoftLayer_Network_Storage_Credential this allowed host uses. +func (r Network_Storage_Allowed_Host_IpAddress) GetCredential() (resp datatypes.Network_Storage_Credential, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Allowed_Host_IpAddress", "getCredential", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Network_Storage_Allowed_Host_IpAddress) GetObject() (resp datatypes.Network_Storage_Allowed_Host_IpAddress, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Allowed_Host_IpAddress", "getObject", nil, &r.Options, &resp) + return +} + +// Retrieve The SoftLayer_Network_Subnet_IpAddress object which this SoftLayer_Network_Storage_Allowed_Host is referencing. +func (r Network_Storage_Allowed_Host_IpAddress) GetResource() (resp datatypes.Network_Subnet_IpAddress, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Allowed_Host_IpAddress", "getResource", nil, &r.Options, &resp) + return +} + +// Use this method to modify the credential password for a SoftLayer_Network_Storage_Allowed_Host object. +func (r Network_Storage_Allowed_Host_IpAddress) SetCredentialPassword(password *string) (resp bool, err error) { + params := []interface{}{ + password, + } + err = r.Session.DoRequest("SoftLayer_Network_Storage_Allowed_Host_IpAddress", "setCredentialPassword", params, &r.Options, &resp) + return +} + +// no documentation yet +type Network_Storage_Allowed_Host_Subnet struct { + Session *session.Session + Options sl.Options +} + +// GetNetworkStorageAllowedHostSubnetService returns an instance of the Network_Storage_Allowed_Host_Subnet SoftLayer service +func GetNetworkStorageAllowedHostSubnetService(sess *session.Session) Network_Storage_Allowed_Host_Subnet { + return Network_Storage_Allowed_Host_Subnet{Session: sess} +} + +func (r Network_Storage_Allowed_Host_Subnet) Id(id int) Network_Storage_Allowed_Host_Subnet { + r.Options.Id = &id + return r +} + +func (r Network_Storage_Allowed_Host_Subnet) Mask(mask string) Network_Storage_Allowed_Host_Subnet { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r Network_Storage_Allowed_Host_Subnet) Filter(filter string) Network_Storage_Allowed_Host_Subnet { + r.Options.Filter = filter + return r +} + +func (r Network_Storage_Allowed_Host_Subnet) Limit(limit int) Network_Storage_Allowed_Host_Subnet { + r.Options.Limit = &limit + return r +} + +func (r Network_Storage_Allowed_Host_Subnet) Offset(offset int) Network_Storage_Allowed_Host_Subnet { + r.Options.Offset = &offset + return r +} + +// no documentation yet +func (r Network_Storage_Allowed_Host_Subnet) CreateObject(templateObject *datatypes.Network_Storage_Allowed_Host) (resp bool, err error) { + params := []interface{}{ + templateObject, + } + err = r.Session.DoRequest("SoftLayer_Network_Storage_Allowed_Host_Subnet", "createObject", params, &r.Options, &resp) + return +} + +// no documentation yet +func (r Network_Storage_Allowed_Host_Subnet) DeleteObject() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Allowed_Host_Subnet", "deleteObject", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Network_Storage_Allowed_Host_Subnet) EditObject(templateObject *datatypes.Network_Storage_Allowed_Host) (resp bool, err error) { + params := []interface{}{ + templateObject, + } + err = r.Session.DoRequest("SoftLayer_Network_Storage_Allowed_Host_Subnet", "editObject", params, &r.Options, &resp) + return +} + +// Retrieve The SoftLayer_Network_Storage_Group objects this SoftLayer_Network_Storage_Allowed_Host is present in. +func (r Network_Storage_Allowed_Host_Subnet) GetAssignedGroups() (resp []datatypes.Network_Storage_Group, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Allowed_Host_Subnet", "getAssignedGroups", nil, &r.Options, &resp) + return +} + +// Retrieve The SoftLayer_Network_Storage primary volumes whose replicas are allowed access. +func (r Network_Storage_Allowed_Host_Subnet) GetAssignedReplicationVolumes() (resp []datatypes.Network_Storage, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Allowed_Host_Subnet", "getAssignedReplicationVolumes", nil, &r.Options, &resp) + return +} + +// Retrieve The SoftLayer_Network_Storage volumes to which this SoftLayer_Network_Storage_Allowed_Host is allowed access. +func (r Network_Storage_Allowed_Host_Subnet) GetAssignedVolumes() (resp []datatypes.Network_Storage, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Allowed_Host_Subnet", "getAssignedVolumes", nil, &r.Options, &resp) + return +} + +// Retrieve The SoftLayer_Network_Storage_Credential this allowed host uses. +func (r Network_Storage_Allowed_Host_Subnet) GetCredential() (resp datatypes.Network_Storage_Credential, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Allowed_Host_Subnet", "getCredential", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Network_Storage_Allowed_Host_Subnet) GetObject() (resp datatypes.Network_Storage_Allowed_Host_Subnet, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Allowed_Host_Subnet", "getObject", nil, &r.Options, &resp) + return +} + +// Retrieve The SoftLayer_Network_Subnet object which this SoftLayer_Network_Storage_Allowed_Host is referencing. +func (r Network_Storage_Allowed_Host_Subnet) GetResource() (resp datatypes.Network_Subnet, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Allowed_Host_Subnet", "getResource", nil, &r.Options, &resp) + return +} + +// Use this method to modify the credential password for a SoftLayer_Network_Storage_Allowed_Host object. +func (r Network_Storage_Allowed_Host_Subnet) SetCredentialPassword(password *string) (resp bool, err error) { + params := []interface{}{ + password, + } + err = r.Session.DoRequest("SoftLayer_Network_Storage_Allowed_Host_Subnet", "setCredentialPassword", params, &r.Options, &resp) + return +} + +// no documentation yet +type Network_Storage_Allowed_Host_VirtualGuest struct { + Session *session.Session + Options sl.Options +} + +// GetNetworkStorageAllowedHostVirtualGuestService returns an instance of the Network_Storage_Allowed_Host_VirtualGuest SoftLayer service +func GetNetworkStorageAllowedHostVirtualGuestService(sess *session.Session) Network_Storage_Allowed_Host_VirtualGuest { + return Network_Storage_Allowed_Host_VirtualGuest{Session: sess} +} + +func (r Network_Storage_Allowed_Host_VirtualGuest) Id(id int) Network_Storage_Allowed_Host_VirtualGuest { + r.Options.Id = &id + return r +} + +func (r Network_Storage_Allowed_Host_VirtualGuest) Mask(mask string) Network_Storage_Allowed_Host_VirtualGuest { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r Network_Storage_Allowed_Host_VirtualGuest) Filter(filter string) Network_Storage_Allowed_Host_VirtualGuest { + r.Options.Filter = filter + return r +} + +func (r Network_Storage_Allowed_Host_VirtualGuest) Limit(limit int) Network_Storage_Allowed_Host_VirtualGuest { + r.Options.Limit = &limit + return r +} + +func (r Network_Storage_Allowed_Host_VirtualGuest) Offset(offset int) Network_Storage_Allowed_Host_VirtualGuest { + r.Options.Offset = &offset + return r +} + +// no documentation yet +func (r Network_Storage_Allowed_Host_VirtualGuest) CreateObject(templateObject *datatypes.Network_Storage_Allowed_Host) (resp bool, err error) { + params := []interface{}{ + templateObject, + } + err = r.Session.DoRequest("SoftLayer_Network_Storage_Allowed_Host_VirtualGuest", "createObject", params, &r.Options, &resp) + return +} + +// no documentation yet +func (r Network_Storage_Allowed_Host_VirtualGuest) DeleteObject() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Allowed_Host_VirtualGuest", "deleteObject", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Network_Storage_Allowed_Host_VirtualGuest) EditObject(templateObject *datatypes.Network_Storage_Allowed_Host) (resp bool, err error) { + params := []interface{}{ + templateObject, + } + err = r.Session.DoRequest("SoftLayer_Network_Storage_Allowed_Host_VirtualGuest", "editObject", params, &r.Options, &resp) + return +} + +// Retrieve The SoftLayer_Network_Storage_Group objects this SoftLayer_Network_Storage_Allowed_Host is present in. +func (r Network_Storage_Allowed_Host_VirtualGuest) GetAssignedGroups() (resp []datatypes.Network_Storage_Group, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Allowed_Host_VirtualGuest", "getAssignedGroups", nil, &r.Options, &resp) + return +} + +// Retrieve The SoftLayer_Network_Storage primary volumes whose replicas are allowed access. +func (r Network_Storage_Allowed_Host_VirtualGuest) GetAssignedReplicationVolumes() (resp []datatypes.Network_Storage, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Allowed_Host_VirtualGuest", "getAssignedReplicationVolumes", nil, &r.Options, &resp) + return +} + +// Retrieve The SoftLayer_Network_Storage volumes to which this SoftLayer_Network_Storage_Allowed_Host is allowed access. +func (r Network_Storage_Allowed_Host_VirtualGuest) GetAssignedVolumes() (resp []datatypes.Network_Storage, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Allowed_Host_VirtualGuest", "getAssignedVolumes", nil, &r.Options, &resp) + return +} + +// Retrieve The SoftLayer_Network_Storage_Credential this allowed host uses. +func (r Network_Storage_Allowed_Host_VirtualGuest) GetCredential() (resp datatypes.Network_Storage_Credential, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Allowed_Host_VirtualGuest", "getCredential", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Network_Storage_Allowed_Host_VirtualGuest) GetObject() (resp datatypes.Network_Storage_Allowed_Host_VirtualGuest, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Allowed_Host_VirtualGuest", "getObject", nil, &r.Options, &resp) + return +} + +// Retrieve The SoftLayer_Virtual_Guest object which this SoftLayer_Network_Storage_Allowed_Host is referencing. +func (r Network_Storage_Allowed_Host_VirtualGuest) GetResource() (resp datatypes.Virtual_Guest, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Allowed_Host_VirtualGuest", "getResource", nil, &r.Options, &resp) + return +} + +// Use this method to modify the credential password for a SoftLayer_Network_Storage_Allowed_Host object. +func (r Network_Storage_Allowed_Host_VirtualGuest) SetCredentialPassword(password *string) (resp bool, err error) { + params := []interface{}{ + password, + } + err = r.Session.DoRequest("SoftLayer_Network_Storage_Allowed_Host_VirtualGuest", "setCredentialPassword", params, &r.Options, &resp) + return +} + +// The SoftLayer_Network_Storage_Backup_Evault contains general information regarding an EVault Storage service such as account id, username, maximum capacity, password, Storage's product type and the server id. +type Network_Storage_Backup_Evault struct { + Session *session.Session + Options sl.Options +} + +// GetNetworkStorageBackupEvaultService returns an instance of the Network_Storage_Backup_Evault SoftLayer service +func GetNetworkStorageBackupEvaultService(sess *session.Session) Network_Storage_Backup_Evault { + return Network_Storage_Backup_Evault{Session: sess} +} + +func (r Network_Storage_Backup_Evault) Id(id int) Network_Storage_Backup_Evault { + r.Options.Id = &id + return r +} + +func (r Network_Storage_Backup_Evault) Mask(mask string) Network_Storage_Backup_Evault { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r Network_Storage_Backup_Evault) Filter(filter string) Network_Storage_Backup_Evault { + r.Options.Filter = filter + return r +} + +func (r Network_Storage_Backup_Evault) Limit(limit int) Network_Storage_Backup_Evault { + r.Options.Limit = &limit + return r +} + +func (r Network_Storage_Backup_Evault) Offset(offset int) Network_Storage_Backup_Evault { + r.Options.Offset = &offset + return r +} + +// This method is used to modify the access control list for this Storage volume. The SoftLayer_Hardware objects which have been allowed access to this storage will be listed in the allowedHardware property of this storage volume. +func (r Network_Storage_Backup_Evault) AllowAccessFromHardware(hardwareObjectTemplate *datatypes.Hardware) (resp bool, err error) { + params := []interface{}{ + hardwareObjectTemplate, + } + err = r.Session.DoRequest("SoftLayer_Network_Storage_Backup_Evault", "allowAccessFromHardware", params, &r.Options, &resp) + return +} + +// no documentation yet +func (r Network_Storage_Backup_Evault) AllowAccessFromHardwareList(hardwareObjectTemplates []datatypes.Hardware) (resp bool, err error) { + params := []interface{}{ + hardwareObjectTemplates, + } + err = r.Session.DoRequest("SoftLayer_Network_Storage_Backup_Evault", "allowAccessFromHardwareList", params, &r.Options, &resp) + return +} + +// This method is used to modify the access control list for this Storage volume. The [[SoftLayer_Hardware|SoftLayer_Virtual_Guest|SoftLayer_Network_Subnet|SoftLayer_Network_Subnet_IpAddress]] objects which have been allowed access to this storage will be listed in the [[allowedHardware|allowedVirtualGuests|allowedSubnets|allowedIpAddresses]] property of this storage volume. +func (r Network_Storage_Backup_Evault) AllowAccessFromHost(typeClassName *string, hostId *int) (resp datatypes.Network_Storage_Allowed_Host, err error) { + params := []interface{}{ + typeClassName, + hostId, + } + err = r.Session.DoRequest("SoftLayer_Network_Storage_Backup_Evault", "allowAccessFromHost", params, &r.Options, &resp) + return +} + +// This method is used to modify the access control list for this Storage volume. The [[SoftLayer_Hardware|SoftLayer_Virtual_Guest|SoftLayer_Network_Subnet|SoftLayer_Network_Subnet_IpAddress]] objects which have been allowed access to this storage volume will be listed in the [[allowedHardware|allowedVirtualGuests|allowedSubnets|allowedIpAddresses]] property of this storage volume. +func (r Network_Storage_Backup_Evault) AllowAccessFromHostList(hostObjectTemplates []datatypes.Container_Network_Storage_Host) (resp []datatypes.Network_Storage_Allowed_Host, err error) { + params := []interface{}{ + hostObjectTemplates, + } + err = r.Session.DoRequest("SoftLayer_Network_Storage_Backup_Evault", "allowAccessFromHostList", params, &r.Options, &resp) + return +} + +// This method is used to modify the access control list for this Storage volume. The SoftLayer_Network_Subnet_IpAddress objects which have been allowed access to this storage will be listed in the allowedIpAddresses property of this storage volume. +func (r Network_Storage_Backup_Evault) AllowAccessFromIpAddress(ipAddressObjectTemplate *datatypes.Network_Subnet_IpAddress) (resp bool, err error) { + params := []interface{}{ + ipAddressObjectTemplate, + } + err = r.Session.DoRequest("SoftLayer_Network_Storage_Backup_Evault", "allowAccessFromIpAddress", params, &r.Options, &resp) + return +} + +// no documentation yet +func (r Network_Storage_Backup_Evault) AllowAccessFromIpAddressList(ipAddressObjectTemplates []datatypes.Network_Subnet_IpAddress) (resp bool, err error) { + params := []interface{}{ + ipAddressObjectTemplates, + } + err = r.Session.DoRequest("SoftLayer_Network_Storage_Backup_Evault", "allowAccessFromIpAddressList", params, &r.Options, &resp) + return +} + +// This method is used to modify the access control list for this Storage volume. The SoftLayer_Network_Subnet objects which have been allowed access to this storage will be listed in the allowedHardware property of this storage volume. +func (r Network_Storage_Backup_Evault) AllowAccessFromSubnet(subnetObjectTemplate *datatypes.Network_Subnet) (resp bool, err error) { + params := []interface{}{ + subnetObjectTemplate, + } + err = r.Session.DoRequest("SoftLayer_Network_Storage_Backup_Evault", "allowAccessFromSubnet", params, &r.Options, &resp) + return +} + +// no documentation yet +func (r Network_Storage_Backup_Evault) AllowAccessFromSubnetList(subnetObjectTemplates []datatypes.Network_Subnet) (resp bool, err error) { + params := []interface{}{ + subnetObjectTemplates, + } + err = r.Session.DoRequest("SoftLayer_Network_Storage_Backup_Evault", "allowAccessFromSubnetList", params, &r.Options, &resp) + return +} + +// This method is used to modify the access control list for this Storage volume. The SoftLayer_Virtual_Guest objects which have been allowed access to this storage will be listed in the allowedVirtualGuests property of this storage volume. +func (r Network_Storage_Backup_Evault) AllowAccessFromVirtualGuest(virtualGuestObjectTemplate *datatypes.Virtual_Guest) (resp bool, err error) { + params := []interface{}{ + virtualGuestObjectTemplate, + } + err = r.Session.DoRequest("SoftLayer_Network_Storage_Backup_Evault", "allowAccessFromVirtualGuest", params, &r.Options, &resp) + return +} + +// This method is used to modify the access control list for this Storage volume. The SoftLayer_Virtual_Guest objects which have been allowed access to this storage will be listed in the allowedVirtualGuests property of this storage volume. +func (r Network_Storage_Backup_Evault) AllowAccessFromVirtualGuestList(virtualGuestObjectTemplates []datatypes.Virtual_Guest) (resp bool, err error) { + params := []interface{}{ + virtualGuestObjectTemplates, + } + err = r.Session.DoRequest("SoftLayer_Network_Storage_Backup_Evault", "allowAccessFromVirtualGuestList", params, &r.Options, &resp) + return +} + +// This method is used to modify the access control list for this Storage replicant volume. The SoftLayer_Hardware objects which have been allowed access to this storage will be listed in the allowedHardware property of this storage replicant volume. +func (r Network_Storage_Backup_Evault) AllowAccessToReplicantFromHardware(hardwareObjectTemplate *datatypes.Hardware) (resp bool, err error) { + params := []interface{}{ + hardwareObjectTemplate, + } + err = r.Session.DoRequest("SoftLayer_Network_Storage_Backup_Evault", "allowAccessToReplicantFromHardware", params, &r.Options, &resp) + return +} + +// This method is used to modify the access control list for this Storage volume's replica. The SoftLayer_Hardware objects which have been allowed access to this storage volume's replica will be listed in the allowedReplicationHardware property of this storage volume. +func (r Network_Storage_Backup_Evault) AllowAccessToReplicantFromHardwareList(hardwareObjectTemplates []datatypes.Hardware) (resp bool, err error) { + params := []interface{}{ + hardwareObjectTemplates, + } + err = r.Session.DoRequest("SoftLayer_Network_Storage_Backup_Evault", "allowAccessToReplicantFromHardwareList", params, &r.Options, &resp) + return +} + +// no documentation yet +func (r Network_Storage_Backup_Evault) AllowAccessToReplicantFromIpAddress(ipAddressObjectTemplate *datatypes.Network_Subnet_IpAddress) (resp bool, err error) { + params := []interface{}{ + ipAddressObjectTemplate, + } + err = r.Session.DoRequest("SoftLayer_Network_Storage_Backup_Evault", "allowAccessToReplicantFromIpAddress", params, &r.Options, &resp) + return +} + +// This method is used to modify the access control list for this Storage volume's replica. The SoftLayer_Network_Subnet_IpAddress objects which have been allowed access to this storage volume's replica will be listed in the allowedReplicationIpAddresses property of this storage volume. +func (r Network_Storage_Backup_Evault) AllowAccessToReplicantFromIpAddressList(ipAddressObjectTemplates []datatypes.Network_Subnet_IpAddress) (resp bool, err error) { + params := []interface{}{ + ipAddressObjectTemplates, + } + err = r.Session.DoRequest("SoftLayer_Network_Storage_Backup_Evault", "allowAccessToReplicantFromIpAddressList", params, &r.Options, &resp) + return +} + +// This method is used to modify the access control list for this Storage replicant volume. The SoftLayer_Network_Subnet objects which have been allowed access to this storage will be listed in the allowedHardware property of this storage replicant volume. +func (r Network_Storage_Backup_Evault) AllowAccessToReplicantFromSubnet(subnetObjectTemplate *datatypes.Network_Subnet) (resp bool, err error) { + params := []interface{}{ + subnetObjectTemplate, + } + err = r.Session.DoRequest("SoftLayer_Network_Storage_Backup_Evault", "allowAccessToReplicantFromSubnet", params, &r.Options, &resp) + return +} + +// This method is used to modify the access control list for this Storage volume's replica. The SoftLayer_Network_Subnet objects which have been allowed access to this storage volume's replica will be listed in the allowedReplicationSubnets property of this storage volume. +func (r Network_Storage_Backup_Evault) AllowAccessToReplicantFromSubnetList(subnetObjectTemplates []datatypes.Network_Subnet) (resp bool, err error) { + params := []interface{}{ + subnetObjectTemplates, + } + err = r.Session.DoRequest("SoftLayer_Network_Storage_Backup_Evault", "allowAccessToReplicantFromSubnetList", params, &r.Options, &resp) + return +} + +// This method is used to modify the access control list for this Storage replicant volume. The SoftLayer_Virtual_Guest objects which have been allowed access to this storage will be listed in the allowedVirtualGuests property of this storage replicant volume. +func (r Network_Storage_Backup_Evault) AllowAccessToReplicantFromVirtualGuest(virtualGuestObjectTemplate *datatypes.Virtual_Guest) (resp bool, err error) { + params := []interface{}{ + virtualGuestObjectTemplate, + } + err = r.Session.DoRequest("SoftLayer_Network_Storage_Backup_Evault", "allowAccessToReplicantFromVirtualGuest", params, &r.Options, &resp) + return +} + +// This method is used to modify the access control list for this Storage volume's replica. The SoftLayer_Virtual_Guest objects which have been allowed access to this storage volume's replica will be listed in the allowedReplicationVirtualGuests property of this storage volume. +func (r Network_Storage_Backup_Evault) AllowAccessToReplicantFromVirtualGuestList(virtualGuestObjectTemplates []datatypes.Virtual_Guest) (resp bool, err error) { + params := []interface{}{ + virtualGuestObjectTemplates, + } + err = r.Session.DoRequest("SoftLayer_Network_Storage_Backup_Evault", "allowAccessToReplicantFromVirtualGuestList", params, &r.Options, &resp) + return +} + +// This method will assign an existing credential to the current volume. The credential must have been created using the 'addNewCredential' method. The volume type must support an additional credential. +func (r Network_Storage_Backup_Evault) AssignCredential(username *string) (resp bool, err error) { + params := []interface{}{ + username, + } + err = r.Session.DoRequest("SoftLayer_Network_Storage_Backup_Evault", "assignCredential", params, &r.Options, &resp) + return +} + +// This method will set up a new credential for the remote storage volume. The storage volume must support an additional credential. Once created, the credential will be automatically assigned to the current volume. If there are no volumes assigned to the credential it will be automatically deleted. +func (r Network_Storage_Backup_Evault) AssignNewCredential(typ *string) (resp datatypes.Network_Storage_Credential, err error) { + params := []interface{}{ + typ, + } + err = r.Session.DoRequest("SoftLayer_Network_Storage_Backup_Evault", "assignNewCredential", params, &r.Options, &resp) + return +} + +// The method will change the password for the given Storage/Virtual Server Storage account. +func (r Network_Storage_Backup_Evault) ChangePassword(username *string, currentPassword *string, newPassword *string) (resp bool, err error) { + params := []interface{}{ + username, + currentPassword, + newPassword, + } + err = r.Session.DoRequest("SoftLayer_Network_Storage_Backup_Evault", "changePassword", params, &r.Options, &resp) + return +} + +// {{CloudLayerOnlyMethod}} +// +// collectBandwidth() Retrieve the bandwidth usage for the current billing cycle. +func (r Network_Storage_Backup_Evault) CollectBandwidth(typ *string, startDate *datatypes.Time, endDate *datatypes.Time) (resp uint, err error) { + params := []interface{}{ + typ, + startDate, + endDate, + } + err = r.Session.DoRequest("SoftLayer_Network_Storage_Backup_Evault", "collectBandwidth", params, &r.Options, &resp) + return +} + +// {{CloudLayerOnlyMethod}} +// +// collectBytesUsed() retrieves the number of bytes capacity currently in use on a Storage account. +func (r Network_Storage_Backup_Evault) CollectBytesUsed() (resp uint, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Backup_Evault", "collectBytesUsed", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Network_Storage_Backup_Evault) CreateFolder(folder *string) (resp bool, err error) { + params := []interface{}{ + folder, + } + err = r.Session.DoRequest("SoftLayer_Network_Storage_Backup_Evault", "createFolder", params, &r.Options, &resp) + return +} + +// no documentation yet +func (r Network_Storage_Backup_Evault) CreateSnapshot(notes *string) (resp datatypes.Network_Storage, err error) { + params := []interface{}{ + notes, + } + err = r.Session.DoRequest("SoftLayer_Network_Storage_Backup_Evault", "createSnapshot", params, &r.Options, &resp) + return +} + +// {{CloudLayerOnlyMethod}} Delete all files within a Storage account. Depending on the type of Storage account, Deleting either deletes files permanently or sends files to your account's recycle bin. +// +// Currently, Virtual Server storage is the only type of Storage account that sends files to a recycle bin when deleted. When called against a Virtual Server storage account , this method also determines if the files are in the account's recycle bin. If the files exist in the recycle bin, then they are permanently deleted. +// +// Please note, files can not be restored once they are permanently deleted. +func (r Network_Storage_Backup_Evault) DeleteAllFiles() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Backup_Evault", "deleteAllFiles", nil, &r.Options, &resp) + return +} + +// {{CloudLayerOnlyMethod}} Delete an individual file within a Storage account. Depending on the type of Storage account, Deleting a file either deletes the file permanently or sends the file to your account's recycle bin. +// +// Currently, Virtual Server storage is the only type of Storage account that sends files to a recycle bin when deleted. When called against a Virtual Server storage account , this method also determines if the file is in the account's recycle bin. If the file exist in the recycle bin, then it is permanently deleted. +// +// Please note, a file can not be restored once it is permanently deleted. +func (r Network_Storage_Backup_Evault) DeleteFile(fileId *string) (resp bool, err error) { + params := []interface{}{ + fileId, + } + err = r.Session.DoRequest("SoftLayer_Network_Storage_Backup_Evault", "deleteFile", params, &r.Options, &resp) + return +} + +// {{CloudLayerOnlyMethod}} Delete multiple files within a Storage account. Depending on the type of Storage account, Deleting either deletes files permanently or sends files to your account's recycle bin. +// +// Currently, Virtual Server storage is the only type of Storage account that sends files to a recycle bin when deleted. When called against a Virtual Server storage account , this method also determines if the files are in the account's recycle bin. If the files exist in the recycle bin, then they are permanently deleted. +// +// Please note, files can not be restored once they are permanently deleted. +func (r Network_Storage_Backup_Evault) DeleteFiles(fileIds []string) (resp bool, err error) { + params := []interface{}{ + fileIds, + } + err = r.Session.DoRequest("SoftLayer_Network_Storage_Backup_Evault", "deleteFiles", params, &r.Options, &resp) + return +} + +// no documentation yet +func (r Network_Storage_Backup_Evault) DeleteFolder(folder *string) (resp bool, err error) { + params := []interface{}{ + folder, + } + err = r.Session.DoRequest("SoftLayer_Network_Storage_Backup_Evault", "deleteFolder", params, &r.Options, &resp) + return +} + +// Delete a network storage volume. '''This cannot be undone.''' At this time only network storage snapshots may be deleted with this method. +// +// ''deleteObject'' returns Boolean ''true'' on successful deletion or ''false'' if it was unable to remove a volume; +func (r Network_Storage_Backup_Evault) DeleteObject() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Backup_Evault", "deleteObject", nil, &r.Options, &resp) + return +} + +// This method can be used to help maintain the storage space on a vault. When a job is removed from the Webcc, the task and stored usage still exists on the vault. This method can be used to delete the associated task and its usage. +// +// All that is required for the use of the method is to pass in an integer array of task(s). +// +// +func (r Network_Storage_Backup_Evault) DeleteTasks(tasks []int) (resp bool, err error) { + params := []interface{}{ + tasks, + } + err = r.Session.DoRequest("SoftLayer_Network_Storage_Backup_Evault", "deleteTasks", params, &r.Options, &resp) + return +} + +// This method is not valid for Legacy iSCSI Storage Volumes. +// +// Disable scheduled snapshots of this storage volume. Scheduling options include HOURLY, DAILY and WEEKLY schedules. +func (r Network_Storage_Backup_Evault) DisableSnapshots(scheduleType *string) (resp bool, err error) { + params := []interface{}{ + scheduleType, + } + err = r.Session.DoRequest("SoftLayer_Network_Storage_Backup_Evault", "disableSnapshots", params, &r.Options, &resp) + return +} + +// {{CloudLayerOnlyMethod}} Download a file from a Storage account. This method returns a file's details including the file's raw content. +func (r Network_Storage_Backup_Evault) DownloadFile(fileId *string) (resp datatypes.Container_Utility_File_Entity, err error) { + params := []interface{}{ + fileId, + } + err = r.Session.DoRequest("SoftLayer_Network_Storage_Backup_Evault", "downloadFile", params, &r.Options, &resp) + return +} + +// This method will change the password of a credential created using the 'addNewCredential' method. If the credential exists on multiple storage volumes it will change for those volumes as well. +func (r Network_Storage_Backup_Evault) EditCredential(username *string, newPassword *string) (resp bool, err error) { + params := []interface{}{ + username, + newPassword, + } + err = r.Session.DoRequest("SoftLayer_Network_Storage_Backup_Evault", "editCredential", params, &r.Options, &resp) + return +} + +// The password and/or notes may be modified for the Storage service except evault passwords and notes. +func (r Network_Storage_Backup_Evault) EditObject(templateObject *datatypes.Network_Storage) (resp bool, err error) { + params := []interface{}{ + templateObject, + } + err = r.Session.DoRequest("SoftLayer_Network_Storage_Backup_Evault", "editObject", params, &r.Options, &resp) + return +} + +// This method is not valid for Legacy iSCSI Storage Volumes. +// +// Enable scheduled snapshots of this storage volume. Scheduling options include HOURLY, DAILY and WEEKLY schedules. For HOURLY schedules, provide relevant data for $scheduleType, $retentionCount and $minute. For DAILY schedules, provide relevant data for $scheduleType, $retentionCount, $minute, and $hour. For WEEKLY schedules, provide relevant data for all parameters of this method. +func (r Network_Storage_Backup_Evault) EnableSnapshots(scheduleType *string, retentionCount *int, minute *int, hour *int, dayOfWeek *string) (resp bool, err error) { + params := []interface{}{ + scheduleType, + retentionCount, + minute, + hour, + dayOfWeek, + } + err = r.Session.DoRequest("SoftLayer_Network_Storage_Backup_Evault", "enableSnapshots", params, &r.Options, &resp) + return +} + +// Failback from a volume replicant. In order to failback the volume must have already been failed over to a replicant. +func (r Network_Storage_Backup_Evault) FailbackFromReplicant() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Backup_Evault", "failbackFromReplicant", nil, &r.Options, &resp) + return +} + +// Failover to a volume replicant. During the time which the replicant is in use the local nas volume will not be available. +func (r Network_Storage_Backup_Evault) FailoverToReplicant(replicantId *int) (resp bool, err error) { + params := []interface{}{ + replicantId, + } + err = r.Session.DoRequest("SoftLayer_Network_Storage_Backup_Evault", "failoverToReplicant", params, &r.Options, &resp) + return +} + +// Retrieve The account that a Storage services belongs to. +func (r Network_Storage_Backup_Evault) GetAccount() (resp datatypes.Account, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Backup_Evault", "getAccount", nil, &r.Options, &resp) + return +} + +// Retrieve Other usernames and passwords associated with a Storage volume. +func (r Network_Storage_Backup_Evault) GetAccountPassword() (resp datatypes.Account_Password, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Backup_Evault", "getAccountPassword", nil, &r.Options, &resp) + return +} + +// Retrieve The currently active transactions on a network storage volume. +func (r Network_Storage_Backup_Evault) GetActiveTransactions() (resp []datatypes.Provisioning_Version1_Transaction, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Backup_Evault", "getActiveTransactions", nil, &r.Options, &resp) + return +} + +// {{CloudLayerOnlyMethod}} Retrieve details such as id, name, size, create date for all files in a Storage account's root directory. This does not download file content. +func (r Network_Storage_Backup_Evault) GetAllFiles() (resp []datatypes.Container_Utility_File_Entity, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Backup_Evault", "getAllFiles", nil, &r.Options, &resp) + return +} + +// {{CloudLayerOnlyMethod}} Retrieve details such as id, name, size, create date for all files matching the filter's criteria in a Storage account's root directory. This does not download file content. +func (r Network_Storage_Backup_Evault) GetAllFilesByFilter(filter *datatypes.Container_Utility_File_Entity) (resp []datatypes.Container_Utility_File_Entity, err error) { + params := []interface{}{ + filter, + } + err = r.Session.DoRequest("SoftLayer_Network_Storage_Backup_Evault", "getAllFilesByFilter", params, &r.Options, &resp) + return +} + +// This method retrieves a list of SoftLayer_Hardware that can be authorized to this SoftLayer_Network_Storage. +func (r Network_Storage_Backup_Evault) GetAllowableHardware(filterHostname *string) (resp []datatypes.Hardware, err error) { + params := []interface{}{ + filterHostname, + } + err = r.Session.DoRequest("SoftLayer_Network_Storage_Backup_Evault", "getAllowableHardware", params, &r.Options, &resp) + return +} + +// This method retrieves a list of SoftLayer_Network_Subnet_IpAddress that can be authorized to this SoftLayer_Network_Storage. +func (r Network_Storage_Backup_Evault) GetAllowableIpAddresses(subnetId *int, filterIpAddress *string) (resp []datatypes.Network_Subnet_IpAddress, err error) { + params := []interface{}{ + subnetId, + filterIpAddress, + } + err = r.Session.DoRequest("SoftLayer_Network_Storage_Backup_Evault", "getAllowableIpAddresses", params, &r.Options, &resp) + return +} + +// This method retrieves a list of SoftLayer_Network_Subnet that can be authorized to this SoftLayer_Network_Storage. +func (r Network_Storage_Backup_Evault) GetAllowableSubnets(filterNetworkIdentifier *string) (resp []datatypes.Network_Subnet, err error) { + params := []interface{}{ + filterNetworkIdentifier, + } + err = r.Session.DoRequest("SoftLayer_Network_Storage_Backup_Evault", "getAllowableSubnets", params, &r.Options, &resp) + return +} + +// This method retrieves a list of SoftLayer_Virtual_Guest that can be authorized to this SoftLayer_Network_Storage. +func (r Network_Storage_Backup_Evault) GetAllowableVirtualGuests(filterHostname *string) (resp []datatypes.Virtual_Guest, err error) { + params := []interface{}{ + filterHostname, + } + err = r.Session.DoRequest("SoftLayer_Network_Storage_Backup_Evault", "getAllowableVirtualGuests", params, &r.Options, &resp) + return +} + +// Retrieve The SoftLayer_Hardware objects which are allowed access to this storage volume. +func (r Network_Storage_Backup_Evault) GetAllowedHardware() (resp []datatypes.Hardware, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Backup_Evault", "getAllowedHardware", nil, &r.Options, &resp) + return +} + +// Retrieves the total number of allowed hosts limit per volume. +func (r Network_Storage_Backup_Evault) GetAllowedHostsLimit() (resp int, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Backup_Evault", "getAllowedHostsLimit", nil, &r.Options, &resp) + return +} + +// Retrieve The SoftLayer_Network_Subnet_IpAddress objects which are allowed access to this storage volume. +func (r Network_Storage_Backup_Evault) GetAllowedIpAddresses() (resp []datatypes.Network_Subnet_IpAddress, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Backup_Evault", "getAllowedIpAddresses", nil, &r.Options, &resp) + return +} + +// Retrieve The SoftLayer_Hardware objects which are allowed access to this storage volume's Replicant. +func (r Network_Storage_Backup_Evault) GetAllowedReplicationHardware() (resp []datatypes.Hardware, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Backup_Evault", "getAllowedReplicationHardware", nil, &r.Options, &resp) + return +} + +// Retrieve The SoftLayer_Network_Subnet_IpAddress objects which are allowed access to this storage volume's Replicant. +func (r Network_Storage_Backup_Evault) GetAllowedReplicationIpAddresses() (resp []datatypes.Network_Subnet_IpAddress, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Backup_Evault", "getAllowedReplicationIpAddresses", nil, &r.Options, &resp) + return +} + +// Retrieve The SoftLayer_Network_Subnet objects which are allowed access to this storage volume's Replicant. +func (r Network_Storage_Backup_Evault) GetAllowedReplicationSubnets() (resp []datatypes.Network_Subnet, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Backup_Evault", "getAllowedReplicationSubnets", nil, &r.Options, &resp) + return +} + +// Retrieve The SoftLayer_Hardware objects which are allowed access to this storage volume's Replicant. +func (r Network_Storage_Backup_Evault) GetAllowedReplicationVirtualGuests() (resp []datatypes.Virtual_Guest, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Backup_Evault", "getAllowedReplicationVirtualGuests", nil, &r.Options, &resp) + return +} + +// Retrieve The SoftLayer_Network_Subnet objects which are allowed access to this storage volume. +func (r Network_Storage_Backup_Evault) GetAllowedSubnets() (resp []datatypes.Network_Subnet, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Backup_Evault", "getAllowedSubnets", nil, &r.Options, &resp) + return +} + +// Retrieve The SoftLayer_Virtual_Guest objects which are allowed access to this storage volume. +func (r Network_Storage_Backup_Evault) GetAllowedVirtualGuests() (resp []datatypes.Virtual_Guest, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Backup_Evault", "getAllowedVirtualGuests", nil, &r.Options, &resp) + return +} + +// Retrieve The current billing item for a Storage volume. +func (r Network_Storage_Backup_Evault) GetBillingItem() (resp datatypes.Billing_Item, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Backup_Evault", "getBillingItem", nil, &r.Options, &resp) + return +} + +// Retrieve +func (r Network_Storage_Backup_Evault) GetBillingItemCategory() (resp datatypes.Product_Item_Category, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Backup_Evault", "getBillingItemCategory", nil, &r.Options, &resp) + return +} + +// Retrieve network storage accounts by username and storage account type. Use this method if you wish to retrieve a storage record by username rather than by id. The ''type'' parameter must correspond to one of the available ''nasType'' values in the SoftLayer_Network_Storage data type. +func (r Network_Storage_Backup_Evault) GetByUsername(username *string, typ *string) (resp []datatypes.Network_Storage, err error) { + params := []interface{}{ + username, + typ, + } + err = r.Session.DoRequest("SoftLayer_Network_Storage_Backup_Evault", "getByUsername", params, &r.Options, &resp) + return +} + +// Retrieve The amount of space used by the volume, in bytes. +func (r Network_Storage_Backup_Evault) GetBytesUsed() (resp string, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Backup_Evault", "getBytesUsed", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Network_Storage_Backup_Evault) GetCdnUrls() (resp []datatypes.Container_Network_Storage_Hub_ObjectStorage_ContentDeliveryUrl, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Backup_Evault", "getCdnUrls", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Network_Storage_Backup_Evault) GetClusterResource() (resp datatypes.Network_Service_Resource, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Backup_Evault", "getClusterResource", nil, &r.Options, &resp) + return +} + +// Retrieve The schedule id which was executed to create a snapshot. +func (r Network_Storage_Backup_Evault) GetCreationScheduleId() (resp string, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Backup_Evault", "getCreationScheduleId", nil, &r.Options, &resp) + return +} + +// Retrieve +func (r Network_Storage_Backup_Evault) GetCredentials() (resp []datatypes.Network_Storage_Credential, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Backup_Evault", "getCredentials", nil, &r.Options, &resp) + return +} + +// Retrieve The Daily Schedule which is associated with this network storage volume. +func (r Network_Storage_Backup_Evault) GetDailySchedule() (resp datatypes.Network_Storage_Schedule, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Backup_Evault", "getDailySchedule", nil, &r.Options, &resp) + return +} + +// Retrieve The events which have taken place on a network storage volume. +func (r Network_Storage_Backup_Evault) GetEvents() (resp []datatypes.Network_Storage_Event, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Backup_Evault", "getEvents", nil, &r.Options, &resp) + return +} + +// +// +// +func (r Network_Storage_Backup_Evault) GetFileBlockEncryptedLocations() (resp []datatypes.Location, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Backup_Evault", "getFileBlockEncryptedLocations", nil, &r.Options, &resp) + return +} + +// {{CloudLayerOnlyMethod}} Retrieve details such as id, name, size, create date of a file within a Storage account. This does not download file content. +func (r Network_Storage_Backup_Evault) GetFileByIdentifier(identifier *string) (resp datatypes.Container_Utility_File_Entity, err error) { + params := []interface{}{ + identifier, + } + err = r.Session.DoRequest("SoftLayer_Network_Storage_Backup_Evault", "getFileByIdentifier", params, &r.Options, &resp) + return +} + +// {{CloudLayerOnlyMethod}} Retrieve the file number of files in a Virtual Server Storage account's root directory. This does not include the files stored in the recycle bin. +func (r Network_Storage_Backup_Evault) GetFileCount() (resp int, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Backup_Evault", "getFileCount", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Network_Storage_Backup_Evault) GetFileList(folder *string, path *string) (resp []datatypes.Container_Utility_File_Entity, err error) { + params := []interface{}{ + folder, + path, + } + err = r.Session.DoRequest("SoftLayer_Network_Storage_Backup_Evault", "getFileList", params, &r.Options, &resp) + return +} + +// Retrieve Retrieves the NFS Network Mount Address Name for a given File Storage Volume. +func (r Network_Storage_Backup_Evault) GetFileNetworkMountAddress() (resp string, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Backup_Evault", "getFileNetworkMountAddress", nil, &r.Options, &resp) + return +} + +// {{CloudLayerOnlyMethod}} Retrieve the number of files pending deletion in a Storage account's recycle bin. Files in an account's recycle bin may either be restored to the account's root directory or permanently deleted. +func (r Network_Storage_Backup_Evault) GetFilePendingDeleteCount() (resp int, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Backup_Evault", "getFilePendingDeleteCount", nil, &r.Options, &resp) + return +} + +// {{CloudLayerOnlyMethod}} Retrieve a list of files that are pending deletion in a Storage account's recycle bin. Files in an account's recycle bin may either be restored to the account's root directory or permanently deleted. This method does not download file content. +func (r Network_Storage_Backup_Evault) GetFilesPendingDelete() (resp []datatypes.Container_Utility_File_Entity, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Backup_Evault", "getFilesPendingDelete", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Network_Storage_Backup_Evault) GetFolderList() (resp []datatypes.Container_Network_Storage_Hub_ObjectStorage_Folder, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Backup_Evault", "getFolderList", nil, &r.Options, &resp) + return +} + +// {{CloudLayerOnlyMethod}} +// +// getGraph() retrieves a Storage account's usage and returns a PNG graph image, title, and the minimum and maximum dates included in the graphed date range. Virtual Server storage accounts can also graph upload and download bandwidth usage. +func (r Network_Storage_Backup_Evault) GetGraph(startDate *datatypes.Time, endDate *datatypes.Time, typ *string) (resp datatypes.Container_Bandwidth_GraphOutputs, err error) { + params := []interface{}{ + startDate, + endDate, + typ, + } + err = r.Session.DoRequest("SoftLayer_Network_Storage_Backup_Evault", "getGraph", params, &r.Options, &resp) + return +} + +// Retrieve When applicable, the hardware associated with a Storage service. +func (r Network_Storage_Backup_Evault) GetHardware() (resp datatypes.Hardware, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Backup_Evault", "getHardware", nil, &r.Options, &resp) + return +} + +// Retrieve a list of hardware associated with a SoftLayer customer account, placing all hardware with associated EVault storage accounts at the beginning of the list. The return type is SoftLayer_Hardware_Server[] contains the results; the number of items returned in the result will be returned in the soap header (totalItems). ''getHardwareWithEvaultFirst'' is useful in situations where you wish to search for hardware and provide paginated output. +// +// +// +// +// +// Results are only returned for hardware belonging to the account of the user making the API call. +// +// This method drives the backup page of the SoftLayer customer portal. It serves a very specific function, but we have exposed it as it may prove useful for API developers too. +func (r Network_Storage_Backup_Evault) GetHardwareWithEvaultFirst(option *string, exactMatch *bool, criteria *string, mode *string) (resp []datatypes.Hardware, err error) { + params := []interface{}{ + option, + exactMatch, + criteria, + mode, + } + err = r.Session.DoRequest("SoftLayer_Network_Storage_Backup_Evault", "getHardwareWithEvaultFirst", params, &r.Options, &resp) + return +} + +// Retrieve +func (r Network_Storage_Backup_Evault) GetHasEncryptionAtRest() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Backup_Evault", "getHasEncryptionAtRest", nil, &r.Options, &resp) + return +} + +// Retrieve The Hourly Schedule which is associated with this network storage volume. +func (r Network_Storage_Backup_Evault) GetHourlySchedule() (resp datatypes.Network_Storage_Schedule, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Backup_Evault", "getHourlySchedule", nil, &r.Options, &resp) + return +} + +// Retrieve The maximum number of IOPs selected for this volume. +func (r Network_Storage_Backup_Evault) GetIops() (resp string, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Backup_Evault", "getIops", nil, &r.Options, &resp) + return +} + +// Retrieve Determines whether a volume is ready to order snapshot space, or, if snapshot space is already available, to assign a snapshot schedule, or to take a manual snapshot. +func (r Network_Storage_Backup_Evault) GetIsReadyForSnapshot() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Backup_Evault", "getIsReadyForSnapshot", nil, &r.Options, &resp) + return +} + +// Retrieve Determines whether a volume is ready to have Hosts authorized to access it. This does not indicate whether another operation may be blocking, please refer to this volume's volumeStatus property for details. +func (r Network_Storage_Backup_Evault) GetIsReadyToMount() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Backup_Evault", "getIsReadyToMount", nil, &r.Options, &resp) + return +} + +// Retrieve Relationship between a container volume and iSCSI LUNs. +func (r Network_Storage_Backup_Evault) GetIscsiLuns() (resp []datatypes.Network_Storage, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Backup_Evault", "getIscsiLuns", nil, &r.Options, &resp) + return +} + +// Retrieve The ID of the LUN volume. +func (r Network_Storage_Backup_Evault) GetLunId() (resp string, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Backup_Evault", "getLunId", nil, &r.Options, &resp) + return +} + +// Retrieve The manually-created snapshots associated with this SoftLayer_Network_Storage volume. Does not support pagination by result limit and offset. +func (r Network_Storage_Backup_Evault) GetManualSnapshots() (resp []datatypes.Network_Storage, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Backup_Evault", "getManualSnapshots", nil, &r.Options, &resp) + return +} + +// Retrieve A network storage volume's metric tracking object. This object records all periodic polled data available to this volume. +func (r Network_Storage_Backup_Evault) GetMetricTrackingObject() (resp datatypes.Metric_Tracking_Object, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Backup_Evault", "getMetricTrackingObject", nil, &r.Options, &resp) + return +} + +// Retrieve Whether or not a network storage volume may be mounted. +func (r Network_Storage_Backup_Evault) GetMountableFlag() (resp string, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Backup_Evault", "getMountableFlag", nil, &r.Options, &resp) + return +} + +// Retrieve The current status of split or move operation as a part of volume duplication. +func (r Network_Storage_Backup_Evault) GetMoveAndSplitStatus() (resp string, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Backup_Evault", "getMoveAndSplitStatus", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Network_Storage_Backup_Evault) GetNetworkConnectionDetails() (resp datatypes.Container_Network_Storage_NetworkConnectionInformation, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Backup_Evault", "getNetworkConnectionDetails", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Network_Storage_Backup_Evault) GetNetworkMountAddress() (resp string, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Backup_Evault", "getNetworkMountAddress", nil, &r.Options, &resp) + return +} + +// Retrieve The subscribers that will be notified for usage amount warnings and overages. +func (r Network_Storage_Backup_Evault) GetNotificationSubscribers() (resp []datatypes.Notification_User_Subscriber, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Backup_Evault", "getNotificationSubscribers", nil, &r.Options, &resp) + return +} + +// getObject retrieves the SoftLayer_Network_Storage_Backup_Evault object whose ID corresponds to the ID number of the init parameter passed to the SoftLayer_Network_Storage_Backup_Evault service. +func (r Network_Storage_Backup_Evault) GetObject() (resp datatypes.Network_Storage_Backup_Evault, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Backup_Evault", "getObject", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Network_Storage_Backup_Evault) GetObjectStorageConnectionInformation() (resp []datatypes.Container_Network_Service_Resource_ObjectStorage_ConnectionInformation, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Backup_Evault", "getObjectStorageConnectionInformation", nil, &r.Options, &resp) + return +} + +// Retrieve network storage accounts by SoftLayer_Network_Storage_Credential object. Use this method if you wish to retrieve a storage record by a credential rather than by id. +func (r Network_Storage_Backup_Evault) GetObjectsByCredential(credentialObject *datatypes.Network_Storage_Credential) (resp []datatypes.Network_Storage, err error) { + params := []interface{}{ + credentialObject, + } + err = r.Session.DoRequest("SoftLayer_Network_Storage_Backup_Evault", "getObjectsByCredential", params, &r.Options, &resp) + return +} + +// Retrieve The name of the snapshot that this volume was duplicated from. +func (r Network_Storage_Backup_Evault) GetOriginalSnapshotName() (resp string, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Backup_Evault", "getOriginalSnapshotName", nil, &r.Options, &resp) + return +} + +// Retrieve The name of the volume that this volume was duplicated from. +func (r Network_Storage_Backup_Evault) GetOriginalVolumeName() (resp string, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Backup_Evault", "getOriginalVolumeName", nil, &r.Options, &resp) + return +} + +// Retrieve The size (in GB) of the volume that this volume was duplicated from, or in the case of iSCSI LUNs, the size of the base originally-provisioned LUN. +func (r Network_Storage_Backup_Evault) GetOriginalVolumeSize() (resp string, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Backup_Evault", "getOriginalVolumeSize", nil, &r.Options, &resp) + return +} + +// Retrieve A volume's configured SoftLayer_Network_Storage_Iscsi_OS_Type. +func (r Network_Storage_Backup_Evault) GetOsType() (resp datatypes.Network_Storage_Iscsi_OS_Type, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Backup_Evault", "getOsType", nil, &r.Options, &resp) + return +} + +// Retrieve A volume's configured SoftLayer_Network_Storage_Iscsi_OS_Type ID. +func (r Network_Storage_Backup_Evault) GetOsTypeId() (resp string, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Backup_Evault", "getOsTypeId", nil, &r.Options, &resp) + return +} + +// Retrieve The volumes or snapshots partnered with a network storage volume in a parental role. +func (r Network_Storage_Backup_Evault) GetParentPartnerships() (resp []datatypes.Network_Storage_Partnership, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Backup_Evault", "getParentPartnerships", nil, &r.Options, &resp) + return +} + +// Retrieve The parent volume of a volume in a complex storage relationship. +func (r Network_Storage_Backup_Evault) GetParentVolume() (resp datatypes.Network_Storage, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Backup_Evault", "getParentVolume", nil, &r.Options, &resp) + return +} + +// Retrieve The volumes or snapshots partnered with a network storage volume. +func (r Network_Storage_Backup_Evault) GetPartnerships() (resp []datatypes.Network_Storage_Partnership, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Backup_Evault", "getPartnerships", nil, &r.Options, &resp) + return +} + +// Retrieve All permissions group(s) this volume is in. +func (r Network_Storage_Backup_Evault) GetPermissionsGroups() (resp []datatypes.Network_Storage_Group, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Backup_Evault", "getPermissionsGroups", nil, &r.Options, &resp) + return +} + +// Retrieve The properties used to provide additional details about a network storage volume. +func (r Network_Storage_Backup_Evault) GetProperties() (resp []datatypes.Network_Storage_Property, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Backup_Evault", "getProperties", nil, &r.Options, &resp) + return +} + +// Retrieve The number of IOPs provisioned for this volume. +func (r Network_Storage_Backup_Evault) GetProvisionedIops() (resp string, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Backup_Evault", "getProvisionedIops", nil, &r.Options, &resp) + return +} + +// {{CloudLayerOnlyMethod}} Retrieve the details of a file that is pending deletion in a Storage account's a recycle bin. +func (r Network_Storage_Backup_Evault) GetRecycleBinFileByIdentifier(fileId *string) (resp datatypes.Container_Utility_File_Entity, err error) { + params := []interface{}{ + fileId, + } + err = r.Session.DoRequest("SoftLayer_Network_Storage_Backup_Evault", "getRecycleBinFileByIdentifier", params, &r.Options, &resp) + return +} + +// Retrieves the remaining number of allowed hosts per volume. +func (r Network_Storage_Backup_Evault) GetRemainingAllowedHosts() (resp int, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Backup_Evault", "getRemainingAllowedHosts", nil, &r.Options, &resp) + return +} + +// Retrieve The iSCSI LUN volumes being replicated by this network storage volume. +func (r Network_Storage_Backup_Evault) GetReplicatingLuns() (resp []datatypes.Network_Storage, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Backup_Evault", "getReplicatingLuns", nil, &r.Options, &resp) + return +} + +// Retrieve The network storage volume being replicated by a volume. +func (r Network_Storage_Backup_Evault) GetReplicatingVolume() (resp datatypes.Network_Storage, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Backup_Evault", "getReplicatingVolume", nil, &r.Options, &resp) + return +} + +// Retrieve The volume replication events. +func (r Network_Storage_Backup_Evault) GetReplicationEvents() (resp []datatypes.Network_Storage_Event, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Backup_Evault", "getReplicationEvents", nil, &r.Options, &resp) + return +} + +// Retrieve The network storage volumes configured to be replicants of a volume. +func (r Network_Storage_Backup_Evault) GetReplicationPartners() (resp []datatypes.Network_Storage, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Backup_Evault", "getReplicationPartners", nil, &r.Options, &resp) + return +} + +// Retrieve The Replication Schedule associated with a network storage volume. +func (r Network_Storage_Backup_Evault) GetReplicationSchedule() (resp datatypes.Network_Storage_Schedule, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Backup_Evault", "getReplicationSchedule", nil, &r.Options, &resp) + return +} + +// Retrieve The current replication status of a network storage volume. Indicates Failover or Failback status. +func (r Network_Storage_Backup_Evault) GetReplicationStatus() (resp string, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Backup_Evault", "getReplicationStatus", nil, &r.Options, &resp) + return +} + +// Retrieve The schedules which are associated with a network storage volume. +func (r Network_Storage_Backup_Evault) GetSchedules() (resp []datatypes.Network_Storage_Schedule, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Backup_Evault", "getSchedules", nil, &r.Options, &resp) + return +} + +// Retrieve The network resource a Storage service is connected to. +func (r Network_Storage_Backup_Evault) GetServiceResource() (resp datatypes.Network_Service_Resource, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Backup_Evault", "getServiceResource", nil, &r.Options, &resp) + return +} + +// Retrieve The IP address of a Storage resource. +func (r Network_Storage_Backup_Evault) GetServiceResourceBackendIpAddress() (resp string, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Backup_Evault", "getServiceResourceBackendIpAddress", nil, &r.Options, &resp) + return +} + +// Retrieve The name of a Storage's network resource. +func (r Network_Storage_Backup_Evault) GetServiceResourceName() (resp string, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Backup_Evault", "getServiceResourceName", nil, &r.Options, &resp) + return +} + +// Retrieve A volume's configured snapshot space size. +func (r Network_Storage_Backup_Evault) GetSnapshotCapacityGb() (resp string, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Backup_Evault", "getSnapshotCapacityGb", nil, &r.Options, &resp) + return +} + +// Retrieve The creation timestamp of the snapshot on the storage platform. +func (r Network_Storage_Backup_Evault) GetSnapshotCreationTimestamp() (resp string, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Backup_Evault", "getSnapshotCreationTimestamp", nil, &r.Options, &resp) + return +} + +// Retrieve The percentage of used snapshot space after which to delete automated snapshots. +func (r Network_Storage_Backup_Evault) GetSnapshotDeletionThresholdPercentage() (resp string, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Backup_Evault", "getSnapshotDeletionThresholdPercentage", nil, &r.Options, &resp) + return +} + +// Retrieve The snapshot size in bytes. +func (r Network_Storage_Backup_Evault) GetSnapshotSizeBytes() (resp string, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Backup_Evault", "getSnapshotSizeBytes", nil, &r.Options, &resp) + return +} + +// Retrieve A volume's available snapshot reservation space. +func (r Network_Storage_Backup_Evault) GetSnapshotSpaceAvailable() (resp string, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Backup_Evault", "getSnapshotSpaceAvailable", nil, &r.Options, &resp) + return +} + +// Retrieve The snapshots associated with this SoftLayer_Network_Storage volume. +func (r Network_Storage_Backup_Evault) GetSnapshots() (resp []datatypes.Network_Storage, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Backup_Evault", "getSnapshots", nil, &r.Options, &resp) + return +} + +// Retrieves a list of snapshots for this SoftLayer_Network_Storage volume. This method works with the result limits and offset to support pagination. +func (r Network_Storage_Backup_Evault) GetSnapshotsForVolume() (resp []datatypes.Network_Storage, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Backup_Evault", "getSnapshotsForVolume", nil, &r.Options, &resp) + return +} + +// Retrieve +func (r Network_Storage_Backup_Evault) GetStaasVersion() (resp string, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Backup_Evault", "getStaasVersion", nil, &r.Options, &resp) + return +} + +// Retrieve The network storage groups this volume is attached to. +func (r Network_Storage_Backup_Evault) GetStorageGroups() (resp []datatypes.Network_Storage_Group, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Backup_Evault", "getStorageGroups", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Network_Storage_Backup_Evault) GetStorageGroupsNetworkConnectionDetails() (resp []datatypes.Container_Network_Storage_NetworkConnectionInformation, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Backup_Evault", "getStorageGroupsNetworkConnectionDetails", nil, &r.Options, &resp) + return +} + +// Retrieve +func (r Network_Storage_Backup_Evault) GetStorageTierLevel() (resp string, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Backup_Evault", "getStorageTierLevel", nil, &r.Options, &resp) + return +} + +// Retrieve A description of the Storage object. +func (r Network_Storage_Backup_Evault) GetStorageType() (resp datatypes.Network_Storage_Type, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Backup_Evault", "getStorageType", nil, &r.Options, &resp) + return +} + +// Retrieve The amount of space used by the volume. +func (r Network_Storage_Backup_Evault) GetTotalBytesUsed() (resp string, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Backup_Evault", "getTotalBytesUsed", nil, &r.Options, &resp) + return +} + +// Retrieve The total snapshot retention count of all schedules on this network storage volume. +func (r Network_Storage_Backup_Evault) GetTotalScheduleSnapshotRetentionCount() (resp uint, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Backup_Evault", "getTotalScheduleSnapshotRetentionCount", nil, &r.Options, &resp) + return +} + +// Retrieve The usage notification for SL Storage services. +func (r Network_Storage_Backup_Evault) GetUsageNotification() (resp datatypes.Notification, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Backup_Evault", "getUsageNotification", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Network_Storage_Backup_Evault) GetValidReplicationTargetDatacenterLocations() (resp []datatypes.Location, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Backup_Evault", "getValidReplicationTargetDatacenterLocations", nil, &r.Options, &resp) + return +} + +// Retrieve The type of network storage service. +func (r Network_Storage_Backup_Evault) GetVendorName() (resp string, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Backup_Evault", "getVendorName", nil, &r.Options, &resp) + return +} + +// Retrieve When applicable, the virtual guest associated with a Storage service. +func (r Network_Storage_Backup_Evault) GetVirtualGuest() (resp datatypes.Virtual_Guest, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Backup_Evault", "getVirtualGuest", nil, &r.Options, &resp) + return +} + +// This method returns the parameters for cloning a volume +func (r Network_Storage_Backup_Evault) GetVolumeDuplicateParameters() (resp datatypes.Container_Network_Storage_VolumeDuplicateParameters, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Backup_Evault", "getVolumeDuplicateParameters", nil, &r.Options, &resp) + return +} + +// Retrieve The username and password history for a Storage service. +func (r Network_Storage_Backup_Evault) GetVolumeHistory() (resp []datatypes.Network_Storage_History, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Backup_Evault", "getVolumeHistory", nil, &r.Options, &resp) + return +} + +// Retrieve The current status of a network storage volume. +func (r Network_Storage_Backup_Evault) GetVolumeStatus() (resp string, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Backup_Evault", "getVolumeStatus", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Network_Storage_Backup_Evault) GetWebCCAuthenticationDetails() (resp datatypes.Container_Network_Storage_Backup_Evault_WebCc_Authentication_Details, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Backup_Evault", "getWebCCAuthenticationDetails", nil, &r.Options, &resp) + return +} + +// Retrieve The account username and password for the EVault webCC interface. +func (r Network_Storage_Backup_Evault) GetWebccAccount() (resp datatypes.Account_Password, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Backup_Evault", "getWebccAccount", nil, &r.Options, &resp) + return +} + +// Retrieve The Weekly Schedule which is associated with this network storage volume. +func (r Network_Storage_Backup_Evault) GetWeeklySchedule() (resp datatypes.Network_Storage_Schedule, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Backup_Evault", "getWeeklySchedule", nil, &r.Options, &resp) + return +} + +// Immediate Failover to a volume replicant. During the time which the replicant is in use the local nas volume will not be available. +func (r Network_Storage_Backup_Evault) ImmediateFailoverToReplicant(replicantId *int) (resp bool, err error) { + params := []interface{}{ + replicantId, + } + err = r.Session.DoRequest("SoftLayer_Network_Storage_Backup_Evault", "immediateFailoverToReplicant", params, &r.Options, &resp) + return +} + +// Evault Bare Metal Restore is a special version of Rescue Kernel designed specifically for making full system restores made with Evault's BMR backup. This process works very similar to Rescue Kernel, except only the Evault restore program is available. The process takes approximately 10 minutes. Once completed you will be able to access your server to do a restore through VNC or your servers KVM-over-IP. IP information and credentials can be found on the hardware page of the customer portal. The Evault Application will be running automatically upon startup, and will walk you through the restore process. +func (r Network_Storage_Backup_Evault) InitiateBareMetalRestore() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Backup_Evault", "initiateBareMetalRestore", nil, &r.Options, &resp) + return +} + +// This method operates the same as the initiateBareMetalRestore() method. However, using this method, the Bare Metal Restore can be initiated on any Windows server under the account. +func (r Network_Storage_Backup_Evault) InitiateBareMetalRestoreForServer(hardwareId *int) (resp bool, err error) { + params := []interface{}{ + hardwareId, + } + err = r.Session.DoRequest("SoftLayer_Network_Storage_Backup_Evault", "initiateBareMetalRestoreForServer", params, &r.Options, &resp) + return +} + +// no documentation yet +func (r Network_Storage_Backup_Evault) IsBlockingOperationInProgress(exemptStatusKeyNames []string) (resp bool, err error) { + params := []interface{}{ + exemptStatusKeyNames, + } + err = r.Session.DoRequest("SoftLayer_Network_Storage_Backup_Evault", "isBlockingOperationInProgress", params, &r.Options, &resp) + return +} + +// This method returns a boolean indicating whether the clone volume is ready for snapshot. +func (r Network_Storage_Backup_Evault) IsDuplicateReadyForSnapshot() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Backup_Evault", "isDuplicateReadyForSnapshot", nil, &r.Options, &resp) + return +} + +// This method returns a boolean indicating whether the clone volume is ready to mount. +func (r Network_Storage_Backup_Evault) IsDuplicateReadyToMount() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Backup_Evault", "isDuplicateReadyToMount", nil, &r.Options, &resp) + return +} + +// This method is used to modify the access control list for this Storage volume. The SoftLayer_Hardware objects which have been allowed access to this storage will be listed in the allowedHardware property of this storage volume. +func (r Network_Storage_Backup_Evault) RemoveAccessFromHardware(hardwareObjectTemplate *datatypes.Hardware) (resp bool, err error) { + params := []interface{}{ + hardwareObjectTemplate, + } + err = r.Session.DoRequest("SoftLayer_Network_Storage_Backup_Evault", "removeAccessFromHardware", params, &r.Options, &resp) + return +} + +// This method is used to modify the access control list for this Storage volume. The SoftLayer_Hardware objects which have been allowed access to this storage will be listed in the allowedHardware property of this storage volume. +func (r Network_Storage_Backup_Evault) RemoveAccessFromHardwareList(hardwareObjectTemplates []datatypes.Hardware) (resp bool, err error) { + params := []interface{}{ + hardwareObjectTemplates, + } + err = r.Session.DoRequest("SoftLayer_Network_Storage_Backup_Evault", "removeAccessFromHardwareList", params, &r.Options, &resp) + return +} + +// This method is used to modify the access control list for this Storage volume. The [[SoftLayer_Hardware|SoftLayer_Virtual_Guest|SoftLayer_Network_Subnet|SoftLayer_Network_Subnet_IpAddress]] objects which have been allowed access to this storage will be listed in the [[allowedHardware|allowedVirtualGuests|allowedSubnets|allowedIpAddresses]] property of this storage volume. +func (r Network_Storage_Backup_Evault) RemoveAccessFromHost(typeClassName *string, hostId *int) (resp datatypes.Network_Storage_Allowed_Host, err error) { + params := []interface{}{ + typeClassName, + hostId, + } + err = r.Session.DoRequest("SoftLayer_Network_Storage_Backup_Evault", "removeAccessFromHost", params, &r.Options, &resp) + return +} + +// This method is used to modify the access control list for this Storage volume. The [[SoftLayer_Hardware|SoftLayer_Virtual_Guest|SoftLayer_Network_Subnet|SoftLayer_Network_Subnet_IpAddress]] objects which have been allowed access to this storage will be listed in the [[allowedHardware|allowedVirtualGuests|allowedSubnets|allowedIpAddresses]] property of this storage volume. +func (r Network_Storage_Backup_Evault) RemoveAccessFromHostList(hostObjectTemplates []datatypes.Container_Network_Storage_Host) (resp []datatypes.Network_Storage_Allowed_Host, err error) { + params := []interface{}{ + hostObjectTemplates, + } + err = r.Session.DoRequest("SoftLayer_Network_Storage_Backup_Evault", "removeAccessFromHostList", params, &r.Options, &resp) + return +} + +// This method is used to modify the access control list for this Storage volume. The SoftLayer_Network_Subnet_IpAddress objects which have been allowed access to this storage will be listed in the allowedIpAddresses property of this storage volume. +func (r Network_Storage_Backup_Evault) RemoveAccessFromIpAddress(ipAddressObjectTemplate *datatypes.Network_Subnet_IpAddress) (resp bool, err error) { + params := []interface{}{ + ipAddressObjectTemplate, + } + err = r.Session.DoRequest("SoftLayer_Network_Storage_Backup_Evault", "removeAccessFromIpAddress", params, &r.Options, &resp) + return +} + +// no documentation yet +func (r Network_Storage_Backup_Evault) RemoveAccessFromIpAddressList(ipAddressObjectTemplates []datatypes.Network_Subnet_IpAddress) (resp bool, err error) { + params := []interface{}{ + ipAddressObjectTemplates, + } + err = r.Session.DoRequest("SoftLayer_Network_Storage_Backup_Evault", "removeAccessFromIpAddressList", params, &r.Options, &resp) + return +} + +// no documentation yet +func (r Network_Storage_Backup_Evault) RemoveAccessFromSubnet(subnetObjectTemplate *datatypes.Network_Subnet) (resp bool, err error) { + params := []interface{}{ + subnetObjectTemplate, + } + err = r.Session.DoRequest("SoftLayer_Network_Storage_Backup_Evault", "removeAccessFromSubnet", params, &r.Options, &resp) + return +} + +// no documentation yet +func (r Network_Storage_Backup_Evault) RemoveAccessFromSubnetList(subnetObjectTemplates []datatypes.Network_Subnet) (resp bool, err error) { + params := []interface{}{ + subnetObjectTemplates, + } + err = r.Session.DoRequest("SoftLayer_Network_Storage_Backup_Evault", "removeAccessFromSubnetList", params, &r.Options, &resp) + return +} + +// This method is used to modify the access control list for this Storage volume. The SoftLayer_Virtual_Guest objects which have been allowed access to this storage will be listed in the allowedVirtualGuests property of this storage volume. +func (r Network_Storage_Backup_Evault) RemoveAccessFromVirtualGuest(virtualGuestObjectTemplate *datatypes.Virtual_Guest) (resp bool, err error) { + params := []interface{}{ + virtualGuestObjectTemplate, + } + err = r.Session.DoRequest("SoftLayer_Network_Storage_Backup_Evault", "removeAccessFromVirtualGuest", params, &r.Options, &resp) + return +} + +// This method is used to modify the access control list for this Storage volume. The SoftLayer_Virtual_Guest objects which have been allowed access to this storage will be listed in the allowedVirtualGuests property of this storage volume. +func (r Network_Storage_Backup_Evault) RemoveAccessFromVirtualGuestList(virtualGuestObjectTemplates []datatypes.Virtual_Guest) (resp bool, err error) { + params := []interface{}{ + virtualGuestObjectTemplates, + } + err = r.Session.DoRequest("SoftLayer_Network_Storage_Backup_Evault", "removeAccessFromVirtualGuestList", params, &r.Options, &resp) + return +} + +// This method is used to modify the access control list for this Storage volume's replica. The SoftLayer_Hardware objects which have been allowed access to this storage volume's replica will be listed in the allowedReplicationHardware property of this storage volume. +func (r Network_Storage_Backup_Evault) RemoveAccessToReplicantFromHardwareList(hardwareObjectTemplates []datatypes.Hardware) (resp bool, err error) { + params := []interface{}{ + hardwareObjectTemplates, + } + err = r.Session.DoRequest("SoftLayer_Network_Storage_Backup_Evault", "removeAccessToReplicantFromHardwareList", params, &r.Options, &resp) + return +} + +// This method is used to modify the access control list for this Storage volume's replica. The SoftLayer_Network_Subnet_IpAddress objects which have been allowed access to this storage volume's replica will be listed in the allowedReplicationIpAddresses property of this storage volume. +func (r Network_Storage_Backup_Evault) RemoveAccessToReplicantFromIpAddressList(ipAddressObjectTemplates []datatypes.Network_Subnet_IpAddress) (resp bool, err error) { + params := []interface{}{ + ipAddressObjectTemplates, + } + err = r.Session.DoRequest("SoftLayer_Network_Storage_Backup_Evault", "removeAccessToReplicantFromIpAddressList", params, &r.Options, &resp) + return +} + +// no documentation yet +func (r Network_Storage_Backup_Evault) RemoveAccessToReplicantFromSubnet(subnetObjectTemplate *datatypes.Network_Subnet) (resp bool, err error) { + params := []interface{}{ + subnetObjectTemplate, + } + err = r.Session.DoRequest("SoftLayer_Network_Storage_Backup_Evault", "removeAccessToReplicantFromSubnet", params, &r.Options, &resp) + return +} + +// This method is used to modify the access control list for this Storage volume's replica. The SoftLayer_Network_Subnet objects which have been allowed access to this storage volume's replica will be listed in the allowedReplicationSubnets property of this storage volume. +func (r Network_Storage_Backup_Evault) RemoveAccessToReplicantFromSubnetList(subnetObjectTemplates []datatypes.Network_Subnet) (resp bool, err error) { + params := []interface{}{ + subnetObjectTemplates, + } + err = r.Session.DoRequest("SoftLayer_Network_Storage_Backup_Evault", "removeAccessToReplicantFromSubnetList", params, &r.Options, &resp) + return +} + +// This method is used to modify the access control list for this Storage volume's replica. The SoftLayer_Virtual_Guest objects which have been allowed access to this storage volume's replica will be listed in the allowedReplicationVirtualGuests property of this storage volume. +func (r Network_Storage_Backup_Evault) RemoveAccessToReplicantFromVirtualGuestList(virtualGuestObjectTemplates []datatypes.Virtual_Guest) (resp bool, err error) { + params := []interface{}{ + virtualGuestObjectTemplates, + } + err = r.Session.DoRequest("SoftLayer_Network_Storage_Backup_Evault", "removeAccessToReplicantFromVirtualGuestList", params, &r.Options, &resp) + return +} + +// This method will remove a credential from the current volume. The credential must have been created using the 'addNewCredential' method. +func (r Network_Storage_Backup_Evault) RemoveCredential(username *string) (resp bool, err error) { + params := []interface{}{ + username, + } + err = r.Session.DoRequest("SoftLayer_Network_Storage_Backup_Evault", "removeCredential", params, &r.Options, &resp) + return +} + +// {{CloudLayerOnlyMethod}} Restore an individual file so that it may be used as it was before it was deleted. +// +// If a file is deleted from a Virtual Server Storage account, the file is placed into the account's recycle bin and not permanently deleted. Therefore, restoreFile can be used to place the file back into your Virtual Server account's root directory. +func (r Network_Storage_Backup_Evault) RestoreFile(fileId *string) (resp datatypes.Container_Utility_File_Entity, err error) { + params := []interface{}{ + fileId, + } + err = r.Session.DoRequest("SoftLayer_Network_Storage_Backup_Evault", "restoreFile", params, &r.Options, &resp) + return +} + +// Restore the volume from a snapshot that was previously taken. +func (r Network_Storage_Backup_Evault) RestoreFromSnapshot(snapshotId *int) (resp bool, err error) { + params := []interface{}{ + snapshotId, + } + err = r.Session.DoRequest("SoftLayer_Network_Storage_Backup_Evault", "restoreFromSnapshot", params, &r.Options, &resp) + return +} + +// The method will retrieve the password for the StorageLayer or Virtual Server Storage Account and email the password. The Storage Account passwords will be emailed to the master user. For Virtual Server Storage, the password will be sent to the email address used as the username. +func (r Network_Storage_Backup_Evault) SendPasswordReminderEmail(username *string) (resp bool, err error) { + params := []interface{}{ + username, + } + err = r.Session.DoRequest("SoftLayer_Network_Storage_Backup_Evault", "sendPasswordReminderEmail", params, &r.Options, &resp) + return +} + +// Enable or disable the mounting of a Storage volume. When mounting is enabled the Storage volume will be mountable or available for use. +// +// For Virtual Server volumes, disabling mounting will deny access to the Virtual Server Account, remove published material and deny all file interaction including uploads and downloads. +// +// Enabling or disabling mounting for Storage volumes is not possible if mounting has been disabled by SoftLayer or a parent account. +func (r Network_Storage_Backup_Evault) SetMountable(mountable *bool) (resp bool, err error) { + params := []interface{}{ + mountable, + } + err = r.Session.DoRequest("SoftLayer_Network_Storage_Backup_Evault", "setMountable", params, &r.Options, &resp) + return +} + +// no documentation yet +func (r Network_Storage_Backup_Evault) SetSnapshotAllocation(capacityGb *int) (err error) { + var resp datatypes.Void + params := []interface{}{ + capacityGb, + } + err = r.Session.DoRequest("SoftLayer_Network_Storage_Backup_Evault", "setSnapshotAllocation", params, &r.Options, &resp) + return +} + +// Upgrade the Storage volume to one of the upgradable packages (for example from 10 Gigs of EVault storage to 100 Gigs of EVault storage). +func (r Network_Storage_Backup_Evault) UpgradeVolumeCapacity(itemId *int) (resp bool, err error) { + params := []interface{}{ + itemId, + } + err = r.Session.DoRequest("SoftLayer_Network_Storage_Backup_Evault", "upgradeVolumeCapacity", params, &r.Options, &resp) + return +} + +// {{CloudLayerOnlyMethod}} Upload a file to a Storage account's root directory. Once uploaded, this method returns new file entity identifier for the upload file. +// +// The following properties are required in the ''file'' parameter. +// *'''name''': The name of the file you wish to upload +// *'''content''': The raw contents of the file you wish to upload. +// *'''contentType''': The MIME-type of content that you wish to upload. +func (r Network_Storage_Backup_Evault) UploadFile(file *datatypes.Container_Utility_File_Entity) (resp datatypes.Container_Utility_File_Entity, err error) { + params := []interface{}{ + file, + } + err = r.Session.DoRequest("SoftLayer_Network_Storage_Backup_Evault", "uploadFile", params, &r.Options, &resp) + return +} + +// no documentation yet +type Network_Storage_Group struct { + Session *session.Session + Options sl.Options +} + +// GetNetworkStorageGroupService returns an instance of the Network_Storage_Group SoftLayer service +func GetNetworkStorageGroupService(sess *session.Session) Network_Storage_Group { + return Network_Storage_Group{Session: sess} +} + +func (r Network_Storage_Group) Id(id int) Network_Storage_Group { + r.Options.Id = &id + return r +} + +func (r Network_Storage_Group) Mask(mask string) Network_Storage_Group { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r Network_Storage_Group) Filter(filter string) Network_Storage_Group { + r.Options.Filter = filter + return r +} + +func (r Network_Storage_Group) Limit(limit int) Network_Storage_Group { + r.Options.Limit = &limit + return r +} + +func (r Network_Storage_Group) Offset(offset int) Network_Storage_Group { + r.Options.Offset = &offset + return r +} + +// Use this method to attach a SoftLayer_Network_Storage_Allowed_Host object to this group. This will automatically enable access from this host to any SoftLayer_Network_Storage volumes currently attached to this group. +func (r Network_Storage_Group) AddAllowedHost(allowedHost *datatypes.Network_Storage_Allowed_Host) (resp bool, err error) { + params := []interface{}{ + allowedHost, + } + err = r.Session.DoRequest("SoftLayer_Network_Storage_Group", "addAllowedHost", params, &r.Options, &resp) + return +} + +// Use this method to attach a SoftLayer_Network_Storage volume to this group. This will automatically enable access to this volume for any SoftLayer_Network_Storage_Allowed_Host objects currently attached to this group. +func (r Network_Storage_Group) AttachToVolume(volume *datatypes.Network_Storage) (resp bool, err error) { + params := []interface{}{ + volume, + } + err = r.Session.DoRequest("SoftLayer_Network_Storage_Group", "attachToVolume", params, &r.Options, &resp) + return +} + +// no documentation yet +func (r Network_Storage_Group) CreateObject(templateObject *datatypes.Network_Storage_Group) (resp bool, err error) { + params := []interface{}{ + templateObject, + } + err = r.Session.DoRequest("SoftLayer_Network_Storage_Group", "createObject", params, &r.Options, &resp) + return +} + +// no documentation yet +func (r Network_Storage_Group) DeleteObject() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Group", "deleteObject", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Network_Storage_Group) EditObject(templateObject *datatypes.Network_Storage_Group) (resp bool, err error) { + params := []interface{}{ + templateObject, + } + err = r.Session.DoRequest("SoftLayer_Network_Storage_Group", "editObject", params, &r.Options, &resp) + return +} + +// Retrieve The SoftLayer_Account which owns this group. +func (r Network_Storage_Group) GetAccount() (resp datatypes.Account, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Group", "getAccount", nil, &r.Options, &resp) + return +} + +// Use this method to retrieve all network storage groups. +func (r Network_Storage_Group) GetAllObjects() (resp []datatypes.Network_Storage_Group, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Group", "getAllObjects", nil, &r.Options, &resp) + return +} + +// Retrieve The allowed hosts list for this group. +func (r Network_Storage_Group) GetAllowedHosts() (resp []datatypes.Network_Storage_Allowed_Host, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Group", "getAllowedHosts", nil, &r.Options, &resp) + return +} + +// Retrieve The network storage volumes this group is attached to. +func (r Network_Storage_Group) GetAttachedVolumes() (resp []datatypes.Network_Storage, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Group", "getAttachedVolumes", nil, &r.Options, &resp) + return +} + +// Retrieve The type which defines this group. +func (r Network_Storage_Group) GetGroupType() (resp datatypes.Network_Storage_Group_Type, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Group", "getGroupType", nil, &r.Options, &resp) + return +} + +// Use this method to retrieve network connection information for SoftLayer_Network_Storage_Allowed_Host objects within this group. +func (r Network_Storage_Group) GetNetworkConnectionDetails() (resp datatypes.Container_Network_Storage_NetworkConnectionInformation, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Group", "getNetworkConnectionDetails", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Network_Storage_Group) GetObject() (resp datatypes.Network_Storage_Group, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Group", "getObject", nil, &r.Options, &resp) + return +} + +// Retrieve The OS Type this group is configured for. +func (r Network_Storage_Group) GetOsType() (resp datatypes.Network_Storage_Iscsi_OS_Type, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Group", "getOsType", nil, &r.Options, &resp) + return +} + +// Retrieve The network resource this group is created on. +func (r Network_Storage_Group) GetServiceResource() (resp datatypes.Network_Service_Resource, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Group", "getServiceResource", nil, &r.Options, &resp) + return +} + +// Use this method to remove a SoftLayer_Network_Storage_Allowed_Host object from this group. This will automatically disable access from this host to any SoftLayer_Network_Storage volumes currently attached to this group. +func (r Network_Storage_Group) RemoveAllowedHost(allowedHost *datatypes.Network_Storage_Allowed_Host) (resp bool, err error) { + params := []interface{}{ + allowedHost, + } + err = r.Session.DoRequest("SoftLayer_Network_Storage_Group", "removeAllowedHost", params, &r.Options, &resp) + return +} + +// Use this method to remove a SoftLayer_Network_Storage volume from this group. This will automatically disable access to this volume for any SoftLayer_Network_Storage_Allowed_Host objects currently attached to this group. +func (r Network_Storage_Group) RemoveFromVolume(volume *datatypes.Network_Storage) (resp bool, err error) { + params := []interface{}{ + volume, + } + err = r.Session.DoRequest("SoftLayer_Network_Storage_Group", "removeFromVolume", params, &r.Options, &resp) + return +} + +// no documentation yet +type Network_Storage_Group_Iscsi struct { + Session *session.Session + Options sl.Options +} + +// GetNetworkStorageGroupIscsiService returns an instance of the Network_Storage_Group_Iscsi SoftLayer service +func GetNetworkStorageGroupIscsiService(sess *session.Session) Network_Storage_Group_Iscsi { + return Network_Storage_Group_Iscsi{Session: sess} +} + +func (r Network_Storage_Group_Iscsi) Id(id int) Network_Storage_Group_Iscsi { + r.Options.Id = &id + return r +} + +func (r Network_Storage_Group_Iscsi) Mask(mask string) Network_Storage_Group_Iscsi { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r Network_Storage_Group_Iscsi) Filter(filter string) Network_Storage_Group_Iscsi { + r.Options.Filter = filter + return r +} + +func (r Network_Storage_Group_Iscsi) Limit(limit int) Network_Storage_Group_Iscsi { + r.Options.Limit = &limit + return r +} + +func (r Network_Storage_Group_Iscsi) Offset(offset int) Network_Storage_Group_Iscsi { + r.Options.Offset = &offset + return r +} + +// Use this method to attach a SoftLayer_Network_Storage_Allowed_Host object to this group. This will automatically enable access from this host to any SoftLayer_Network_Storage volumes currently attached to this group. +func (r Network_Storage_Group_Iscsi) AddAllowedHost(allowedHost *datatypes.Network_Storage_Allowed_Host) (resp bool, err error) { + params := []interface{}{ + allowedHost, + } + err = r.Session.DoRequest("SoftLayer_Network_Storage_Group_Iscsi", "addAllowedHost", params, &r.Options, &resp) + return +} + +// Use this method to attach a SoftLayer_Network_Storage volume to this group. This will automatically enable access to this volume for any SoftLayer_Network_Storage_Allowed_Host objects currently attached to this group. +func (r Network_Storage_Group_Iscsi) AttachToVolume(volume *datatypes.Network_Storage) (resp bool, err error) { + params := []interface{}{ + volume, + } + err = r.Session.DoRequest("SoftLayer_Network_Storage_Group_Iscsi", "attachToVolume", params, &r.Options, &resp) + return +} + +// no documentation yet +func (r Network_Storage_Group_Iscsi) CreateObject(templateObject *datatypes.Network_Storage_Group) (resp bool, err error) { + params := []interface{}{ + templateObject, + } + err = r.Session.DoRequest("SoftLayer_Network_Storage_Group_Iscsi", "createObject", params, &r.Options, &resp) + return +} + +// no documentation yet +func (r Network_Storage_Group_Iscsi) DeleteObject() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Group_Iscsi", "deleteObject", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Network_Storage_Group_Iscsi) EditObject(templateObject *datatypes.Network_Storage_Group) (resp bool, err error) { + params := []interface{}{ + templateObject, + } + err = r.Session.DoRequest("SoftLayer_Network_Storage_Group_Iscsi", "editObject", params, &r.Options, &resp) + return +} + +// Retrieve The SoftLayer_Account which owns this group. +func (r Network_Storage_Group_Iscsi) GetAccount() (resp datatypes.Account, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Group_Iscsi", "getAccount", nil, &r.Options, &resp) + return +} + +// Use this method to retrieve all network storage groups. +func (r Network_Storage_Group_Iscsi) GetAllObjects() (resp []datatypes.Network_Storage_Group, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Group_Iscsi", "getAllObjects", nil, &r.Options, &resp) + return +} + +// Retrieve The allowed hosts list for this group. +func (r Network_Storage_Group_Iscsi) GetAllowedHosts() (resp []datatypes.Network_Storage_Allowed_Host, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Group_Iscsi", "getAllowedHosts", nil, &r.Options, &resp) + return +} + +// Retrieve The network storage volumes this group is attached to. +func (r Network_Storage_Group_Iscsi) GetAttachedVolumes() (resp []datatypes.Network_Storage, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Group_Iscsi", "getAttachedVolumes", nil, &r.Options, &resp) + return +} + +// Retrieve The type which defines this group. +func (r Network_Storage_Group_Iscsi) GetGroupType() (resp datatypes.Network_Storage_Group_Type, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Group_Iscsi", "getGroupType", nil, &r.Options, &resp) + return +} + +// Use this method to retrieve network connection information for SoftLayer_Network_Storage_Allowed_Host objects within this group. +func (r Network_Storage_Group_Iscsi) GetNetworkConnectionDetails() (resp datatypes.Container_Network_Storage_NetworkConnectionInformation, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Group_Iscsi", "getNetworkConnectionDetails", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Network_Storage_Group_Iscsi) GetObject() (resp datatypes.Network_Storage_Group_Iscsi, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Group_Iscsi", "getObject", nil, &r.Options, &resp) + return +} + +// Retrieve The OS Type this group is configured for. +func (r Network_Storage_Group_Iscsi) GetOsType() (resp datatypes.Network_Storage_Iscsi_OS_Type, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Group_Iscsi", "getOsType", nil, &r.Options, &resp) + return +} + +// Retrieve The network resource this group is created on. +func (r Network_Storage_Group_Iscsi) GetServiceResource() (resp datatypes.Network_Service_Resource, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Group_Iscsi", "getServiceResource", nil, &r.Options, &resp) + return +} + +// Use this method to remove a SoftLayer_Network_Storage_Allowed_Host object from this group. This will automatically disable access from this host to any SoftLayer_Network_Storage volumes currently attached to this group. +func (r Network_Storage_Group_Iscsi) RemoveAllowedHost(allowedHost *datatypes.Network_Storage_Allowed_Host) (resp bool, err error) { + params := []interface{}{ + allowedHost, + } + err = r.Session.DoRequest("SoftLayer_Network_Storage_Group_Iscsi", "removeAllowedHost", params, &r.Options, &resp) + return +} + +// Use this method to remove a SoftLayer_Network_Storage volume from this group. This will automatically disable access to this volume for any SoftLayer_Network_Storage_Allowed_Host objects currently attached to this group. +func (r Network_Storage_Group_Iscsi) RemoveFromVolume(volume *datatypes.Network_Storage) (resp bool, err error) { + params := []interface{}{ + volume, + } + err = r.Session.DoRequest("SoftLayer_Network_Storage_Group_Iscsi", "removeFromVolume", params, &r.Options, &resp) + return +} + +// no documentation yet +type Network_Storage_Group_Nfs struct { + Session *session.Session + Options sl.Options +} + +// GetNetworkStorageGroupNfsService returns an instance of the Network_Storage_Group_Nfs SoftLayer service +func GetNetworkStorageGroupNfsService(sess *session.Session) Network_Storage_Group_Nfs { + return Network_Storage_Group_Nfs{Session: sess} +} + +func (r Network_Storage_Group_Nfs) Id(id int) Network_Storage_Group_Nfs { + r.Options.Id = &id + return r +} + +func (r Network_Storage_Group_Nfs) Mask(mask string) Network_Storage_Group_Nfs { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r Network_Storage_Group_Nfs) Filter(filter string) Network_Storage_Group_Nfs { + r.Options.Filter = filter + return r +} + +func (r Network_Storage_Group_Nfs) Limit(limit int) Network_Storage_Group_Nfs { + r.Options.Limit = &limit + return r +} + +func (r Network_Storage_Group_Nfs) Offset(offset int) Network_Storage_Group_Nfs { + r.Options.Offset = &offset + return r +} + +// Use this method to attach a SoftLayer_Network_Storage_Allowed_Host object to this group. This will automatically enable access from this host to any SoftLayer_Network_Storage volumes currently attached to this group. +func (r Network_Storage_Group_Nfs) AddAllowedHost(allowedHost *datatypes.Network_Storage_Allowed_Host) (resp bool, err error) { + params := []interface{}{ + allowedHost, + } + err = r.Session.DoRequest("SoftLayer_Network_Storage_Group_Nfs", "addAllowedHost", params, &r.Options, &resp) + return +} + +// Use this method to attach a SoftLayer_Network_Storage volume to this group. This will automatically enable access to this volume for any SoftLayer_Network_Storage_Allowed_Host objects currently attached to this group. +func (r Network_Storage_Group_Nfs) AttachToVolume(volume *datatypes.Network_Storage) (resp bool, err error) { + params := []interface{}{ + volume, + } + err = r.Session.DoRequest("SoftLayer_Network_Storage_Group_Nfs", "attachToVolume", params, &r.Options, &resp) + return +} + +// no documentation yet +func (r Network_Storage_Group_Nfs) CreateObject(templateObject *datatypes.Network_Storage_Group) (resp bool, err error) { + params := []interface{}{ + templateObject, + } + err = r.Session.DoRequest("SoftLayer_Network_Storage_Group_Nfs", "createObject", params, &r.Options, &resp) + return +} + +// no documentation yet +func (r Network_Storage_Group_Nfs) DeleteObject() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Group_Nfs", "deleteObject", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Network_Storage_Group_Nfs) EditObject(templateObject *datatypes.Network_Storage_Group) (resp bool, err error) { + params := []interface{}{ + templateObject, + } + err = r.Session.DoRequest("SoftLayer_Network_Storage_Group_Nfs", "editObject", params, &r.Options, &resp) + return +} + +// Retrieve The SoftLayer_Account which owns this group. +func (r Network_Storage_Group_Nfs) GetAccount() (resp datatypes.Account, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Group_Nfs", "getAccount", nil, &r.Options, &resp) + return +} + +// Use this method to retrieve all network storage groups. +func (r Network_Storage_Group_Nfs) GetAllObjects() (resp []datatypes.Network_Storage_Group, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Group_Nfs", "getAllObjects", nil, &r.Options, &resp) + return +} + +// Retrieve The allowed hosts list for this group. +func (r Network_Storage_Group_Nfs) GetAllowedHosts() (resp []datatypes.Network_Storage_Allowed_Host, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Group_Nfs", "getAllowedHosts", nil, &r.Options, &resp) + return +} + +// Retrieve The network storage volumes this group is attached to. +func (r Network_Storage_Group_Nfs) GetAttachedVolumes() (resp []datatypes.Network_Storage, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Group_Nfs", "getAttachedVolumes", nil, &r.Options, &resp) + return +} + +// Retrieve The type which defines this group. +func (r Network_Storage_Group_Nfs) GetGroupType() (resp datatypes.Network_Storage_Group_Type, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Group_Nfs", "getGroupType", nil, &r.Options, &resp) + return +} + +// Use this method to retrieve network connection information for SoftLayer_Network_Storage_Allowed_Host objects within this group. +func (r Network_Storage_Group_Nfs) GetNetworkConnectionDetails() (resp datatypes.Container_Network_Storage_NetworkConnectionInformation, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Group_Nfs", "getNetworkConnectionDetails", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Network_Storage_Group_Nfs) GetObject() (resp datatypes.Network_Storage_Group_Nfs, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Group_Nfs", "getObject", nil, &r.Options, &resp) + return +} + +// Retrieve The OS Type this group is configured for. +func (r Network_Storage_Group_Nfs) GetOsType() (resp datatypes.Network_Storage_Iscsi_OS_Type, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Group_Nfs", "getOsType", nil, &r.Options, &resp) + return +} + +// Retrieve The network resource this group is created on. +func (r Network_Storage_Group_Nfs) GetServiceResource() (resp datatypes.Network_Service_Resource, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Group_Nfs", "getServiceResource", nil, &r.Options, &resp) + return +} + +// Use this method to remove a SoftLayer_Network_Storage_Allowed_Host object from this group. This will automatically disable access from this host to any SoftLayer_Network_Storage volumes currently attached to this group. +func (r Network_Storage_Group_Nfs) RemoveAllowedHost(allowedHost *datatypes.Network_Storage_Allowed_Host) (resp bool, err error) { + params := []interface{}{ + allowedHost, + } + err = r.Session.DoRequest("SoftLayer_Network_Storage_Group_Nfs", "removeAllowedHost", params, &r.Options, &resp) + return +} + +// Use this method to remove a SoftLayer_Network_Storage volume from this group. This will automatically disable access to this volume for any SoftLayer_Network_Storage_Allowed_Host objects currently attached to this group. +func (r Network_Storage_Group_Nfs) RemoveFromVolume(volume *datatypes.Network_Storage) (resp bool, err error) { + params := []interface{}{ + volume, + } + err = r.Session.DoRequest("SoftLayer_Network_Storage_Group_Nfs", "removeFromVolume", params, &r.Options, &resp) + return +} + +// no documentation yet +type Network_Storage_Group_Type struct { + Session *session.Session + Options sl.Options +} + +// GetNetworkStorageGroupTypeService returns an instance of the Network_Storage_Group_Type SoftLayer service +func GetNetworkStorageGroupTypeService(sess *session.Session) Network_Storage_Group_Type { + return Network_Storage_Group_Type{Session: sess} +} + +func (r Network_Storage_Group_Type) Id(id int) Network_Storage_Group_Type { + r.Options.Id = &id + return r +} + +func (r Network_Storage_Group_Type) Mask(mask string) Network_Storage_Group_Type { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r Network_Storage_Group_Type) Filter(filter string) Network_Storage_Group_Type { + r.Options.Filter = filter + return r +} + +func (r Network_Storage_Group_Type) Limit(limit int) Network_Storage_Group_Type { + r.Options.Limit = &limit + return r +} + +func (r Network_Storage_Group_Type) Offset(offset int) Network_Storage_Group_Type { + r.Options.Offset = &offset + return r +} + +// Use this method to retrieve all storage group types available. +func (r Network_Storage_Group_Type) GetAllObjects() (resp []datatypes.Network_Storage_Group_Type, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Group_Type", "getAllObjects", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Network_Storage_Group_Type) GetObject() (resp datatypes.Network_Storage_Group_Type, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Group_Type", "getObject", nil, &r.Options, &resp) + return +} + +// no documentation yet +type Network_Storage_Hub_Cleversafe_Account struct { + Session *session.Session + Options sl.Options +} + +// GetNetworkStorageHubCleversafeAccountService returns an instance of the Network_Storage_Hub_Cleversafe_Account SoftLayer service +func GetNetworkStorageHubCleversafeAccountService(sess *session.Session) Network_Storage_Hub_Cleversafe_Account { + return Network_Storage_Hub_Cleversafe_Account{Session: sess} +} + +func (r Network_Storage_Hub_Cleversafe_Account) Id(id int) Network_Storage_Hub_Cleversafe_Account { + r.Options.Id = &id + return r +} + +func (r Network_Storage_Hub_Cleversafe_Account) Mask(mask string) Network_Storage_Hub_Cleversafe_Account { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r Network_Storage_Hub_Cleversafe_Account) Filter(filter string) Network_Storage_Hub_Cleversafe_Account { + r.Options.Filter = filter + return r +} + +func (r Network_Storage_Hub_Cleversafe_Account) Limit(limit int) Network_Storage_Hub_Cleversafe_Account { + r.Options.Limit = &limit + return r +} + +func (r Network_Storage_Hub_Cleversafe_Account) Offset(offset int) Network_Storage_Hub_Cleversafe_Account { + r.Options.Offset = &offset + return r +} + +// Create credentials for an IBM Cloud Object Storage Account +func (r Network_Storage_Hub_Cleversafe_Account) CredentialCreate() (resp []datatypes.Network_Storage_Credential, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Hub_Cleversafe_Account", "credentialCreate", nil, &r.Options, &resp) + return +} + +// Delete a credential +func (r Network_Storage_Hub_Cleversafe_Account) CredentialDelete(credential *datatypes.Network_Storage_Credential) (resp bool, err error) { + params := []interface{}{ + credential, + } + err = r.Session.DoRequest("SoftLayer_Network_Storage_Hub_Cleversafe_Account", "credentialDelete", params, &r.Options, &resp) + return +} + +// Retrieve SoftLayer account to which an IBM Cloud Object Storage account belongs to. +func (r Network_Storage_Hub_Cleversafe_Account) GetAccount() (resp datatypes.Account, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Hub_Cleversafe_Account", "getAccount", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Network_Storage_Hub_Cleversafe_Account) GetAllObjects() (resp []datatypes.Network_Storage_Hub_Cleversafe_Account, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Hub_Cleversafe_Account", "getAllObjects", nil, &r.Options, &resp) + return +} + +// Retrieve An associated parent billing item which is active. Includes billing items which are scheduled to be cancelled in the future. +func (r Network_Storage_Hub_Cleversafe_Account) GetBillingItem() (resp datatypes.Billing_Item, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Hub_Cleversafe_Account", "getBillingItem", nil, &r.Options, &resp) + return +} + +// Get buckets +func (r Network_Storage_Hub_Cleversafe_Account) GetBuckets() (resp []datatypes.Container_Network_Storage_Hub_ObjectStorage_Bucket, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Hub_Cleversafe_Account", "getBuckets", nil, &r.Options, &resp) + return +} + +// Retrieve An associated parent billing item which has been cancelled. +func (r Network_Storage_Hub_Cleversafe_Account) GetCancelledBillingItem() (resp datatypes.Billing_Item, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Hub_Cleversafe_Account", "getCancelledBillingItem", nil, &r.Options, &resp) + return +} + +// Returns the capacity usage for an IBM Cloud Object Storage account. +func (r Network_Storage_Hub_Cleversafe_Account) GetCapacityUsage() (resp int, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Hub_Cleversafe_Account", "getCapacityUsage", nil, &r.Options, &resp) + return +} + +// Returns a collection of valid storage policies to be used during bucket creation. +func (r Network_Storage_Hub_Cleversafe_Account) GetCloudObjectStoragePolicy() (resp []datatypes.Container_Network_Storage_Hub_ObjectStorage_Policy, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Hub_Cleversafe_Account", "getCloudObjectStoragePolicy", nil, &r.Options, &resp) + return +} + +// Returns credential limits for this IBM Cloud Object Storage account. +func (r Network_Storage_Hub_Cleversafe_Account) GetCredentialLimit() (resp int, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Hub_Cleversafe_Account", "getCredentialLimit", nil, &r.Options, &resp) + return +} + +// Retrieve Credentials used for generating an AWS signature. Max of 2. +func (r Network_Storage_Hub_Cleversafe_Account) GetCredentials() (resp []datatypes.Network_Storage_Credential, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Hub_Cleversafe_Account", "getCredentials", nil, &r.Options, &resp) + return +} + +// Returns a collection of endpoint URLs available to this IBM Cloud Object Storage account. +func (r Network_Storage_Hub_Cleversafe_Account) GetEndpoints() (resp []datatypes.Container_Network_Storage_Hub_ObjectStorage_Endpoint, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Hub_Cleversafe_Account", "getEndpoints", nil, &r.Options, &resp) + return +} + +// Retrieve Provides an interface to various metrics relating to the usage of an IBM Cloud Object Storage account. +func (r Network_Storage_Hub_Cleversafe_Account) GetMetricTrackingObject() (resp datatypes.Metric_Tracking_Object, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Hub_Cleversafe_Account", "getMetricTrackingObject", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Network_Storage_Hub_Cleversafe_Account) GetObject() (resp datatypes.Network_Storage_Hub_Cleversafe_Account, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Hub_Cleversafe_Account", "getObject", nil, &r.Options, &resp) + return +} + +// Retrieve Unique identifier for an IBM Cloud Object Storage account. +func (r Network_Storage_Hub_Cleversafe_Account) GetUuid() (resp string, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Hub_Cleversafe_Account", "getUuid", nil, &r.Options, &resp) + return +} + +// no documentation yet +type Network_Storage_Hub_Swift_Share struct { + Session *session.Session + Options sl.Options +} + +// GetNetworkStorageHubSwiftShareService returns an instance of the Network_Storage_Hub_Swift_Share SoftLayer service +func GetNetworkStorageHubSwiftShareService(sess *session.Session) Network_Storage_Hub_Swift_Share { + return Network_Storage_Hub_Swift_Share{Session: sess} +} + +func (r Network_Storage_Hub_Swift_Share) Id(id int) Network_Storage_Hub_Swift_Share { + r.Options.Id = &id + return r +} + +func (r Network_Storage_Hub_Swift_Share) Mask(mask string) Network_Storage_Hub_Swift_Share { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r Network_Storage_Hub_Swift_Share) Filter(filter string) Network_Storage_Hub_Swift_Share { + r.Options.Filter = filter + return r +} + +func (r Network_Storage_Hub_Swift_Share) Limit(limit int) Network_Storage_Hub_Swift_Share { + r.Options.Limit = &limit + return r +} + +func (r Network_Storage_Hub_Swift_Share) Offset(offset int) Network_Storage_Hub_Swift_Share { + r.Options.Offset = &offset + return r +} + +// This method returns a collection of container objects. +func (r Network_Storage_Hub_Swift_Share) GetContainerList() (resp []datatypes.Container_Network_Storage_Hub_ObjectStorage_Folder, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Hub_Swift_Share", "getContainerList", nil, &r.Options, &resp) + return +} + +// This method returns a file object given the file's full name. +func (r Network_Storage_Hub_Swift_Share) GetFile(fileName *string, container *string) (resp datatypes.Container_Network_Storage_Hub_ObjectStorage_File, err error) { + params := []interface{}{ + fileName, + container, + } + err = r.Session.DoRequest("SoftLayer_Network_Storage_Hub_Swift_Share", "getFile", params, &r.Options, &resp) + return +} + +// This method returns a collection of the file objects within a container and the given path. +func (r Network_Storage_Hub_Swift_Share) GetFileList(container *string, path *string) (resp []datatypes.Container_Utility_File_Entity, err error) { + params := []interface{}{ + container, + path, + } + err = r.Session.DoRequest("SoftLayer_Network_Storage_Hub_Swift_Share", "getFileList", params, &r.Options, &resp) + return +} + +// The iscsi data type provides access to additional information about an iscsi volume such as the snapshot capacity limit and replication partners. +type Network_Storage_Iscsi struct { + Session *session.Session + Options sl.Options +} + +// GetNetworkStorageIscsiService returns an instance of the Network_Storage_Iscsi SoftLayer service +func GetNetworkStorageIscsiService(sess *session.Session) Network_Storage_Iscsi { + return Network_Storage_Iscsi{Session: sess} +} + +func (r Network_Storage_Iscsi) Id(id int) Network_Storage_Iscsi { + r.Options.Id = &id + return r +} + +func (r Network_Storage_Iscsi) Mask(mask string) Network_Storage_Iscsi { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r Network_Storage_Iscsi) Filter(filter string) Network_Storage_Iscsi { + r.Options.Filter = filter + return r +} + +func (r Network_Storage_Iscsi) Limit(limit int) Network_Storage_Iscsi { + r.Options.Limit = &limit + return r +} + +func (r Network_Storage_Iscsi) Offset(offset int) Network_Storage_Iscsi { + r.Options.Offset = &offset + return r +} + +// This method is used to modify the access control list for this Storage volume. The SoftLayer_Hardware objects which have been allowed access to this storage will be listed in the allowedHardware property of this storage volume. +func (r Network_Storage_Iscsi) AllowAccessFromHardware(hardwareObjectTemplate *datatypes.Hardware) (resp bool, err error) { + params := []interface{}{ + hardwareObjectTemplate, + } + err = r.Session.DoRequest("SoftLayer_Network_Storage_Iscsi", "allowAccessFromHardware", params, &r.Options, &resp) + return +} + +// no documentation yet +func (r Network_Storage_Iscsi) AllowAccessFromHardwareList(hardwareObjectTemplates []datatypes.Hardware) (resp bool, err error) { + params := []interface{}{ + hardwareObjectTemplates, + } + err = r.Session.DoRequest("SoftLayer_Network_Storage_Iscsi", "allowAccessFromHardwareList", params, &r.Options, &resp) + return +} + +// This method is used to modify the access control list for this Storage volume. The [[SoftLayer_Hardware|SoftLayer_Virtual_Guest|SoftLayer_Network_Subnet|SoftLayer_Network_Subnet_IpAddress]] objects which have been allowed access to this storage will be listed in the [[allowedHardware|allowedVirtualGuests|allowedSubnets|allowedIpAddresses]] property of this storage volume. +func (r Network_Storage_Iscsi) AllowAccessFromHost(typeClassName *string, hostId *int) (resp datatypes.Network_Storage_Allowed_Host, err error) { + params := []interface{}{ + typeClassName, + hostId, + } + err = r.Session.DoRequest("SoftLayer_Network_Storage_Iscsi", "allowAccessFromHost", params, &r.Options, &resp) + return +} + +// This method is used to modify the access control list for this Storage volume. The [[SoftLayer_Hardware|SoftLayer_Virtual_Guest|SoftLayer_Network_Subnet|SoftLayer_Network_Subnet_IpAddress]] objects which have been allowed access to this storage volume will be listed in the [[allowedHardware|allowedVirtualGuests|allowedSubnets|allowedIpAddresses]] property of this storage volume. +func (r Network_Storage_Iscsi) AllowAccessFromHostList(hostObjectTemplates []datatypes.Container_Network_Storage_Host) (resp []datatypes.Network_Storage_Allowed_Host, err error) { + params := []interface{}{ + hostObjectTemplates, + } + err = r.Session.DoRequest("SoftLayer_Network_Storage_Iscsi", "allowAccessFromHostList", params, &r.Options, &resp) + return +} + +// no documentation yet +func (r Network_Storage_Iscsi) AllowAccessFromIpAddress(ipAddressObjectTemplate *datatypes.Network_Subnet_IpAddress) (resp bool, err error) { + params := []interface{}{ + ipAddressObjectTemplate, + } + err = r.Session.DoRequest("SoftLayer_Network_Storage_Iscsi", "allowAccessFromIpAddress", params, &r.Options, &resp) + return +} + +// no documentation yet +func (r Network_Storage_Iscsi) AllowAccessFromIpAddressList(ipAddressObjectTemplates []datatypes.Network_Subnet_IpAddress) (resp bool, err error) { + params := []interface{}{ + ipAddressObjectTemplates, + } + err = r.Session.DoRequest("SoftLayer_Network_Storage_Iscsi", "allowAccessFromIpAddressList", params, &r.Options, &resp) + return +} + +// This method is used to modify the access control list for this Storage volume. The SoftLayer_Network_Subnet objects which have been allowed access to this storage will be listed in the allowedHardware property of this storage volume. +func (r Network_Storage_Iscsi) AllowAccessFromSubnet(subnetObjectTemplate *datatypes.Network_Subnet) (resp bool, err error) { + params := []interface{}{ + subnetObjectTemplate, + } + err = r.Session.DoRequest("SoftLayer_Network_Storage_Iscsi", "allowAccessFromSubnet", params, &r.Options, &resp) + return +} + +// no documentation yet +func (r Network_Storage_Iscsi) AllowAccessFromSubnetList(subnetObjectTemplates []datatypes.Network_Subnet) (resp bool, err error) { + params := []interface{}{ + subnetObjectTemplates, + } + err = r.Session.DoRequest("SoftLayer_Network_Storage_Iscsi", "allowAccessFromSubnetList", params, &r.Options, &resp) + return +} + +// This method is used to modify the access control list for this Storage volume. The SoftLayer_Virtual_Guest objects which have been allowed access to this storage will be listed in the allowedVirtualGuests property of this storage volume. +func (r Network_Storage_Iscsi) AllowAccessFromVirtualGuest(virtualGuestObjectTemplate *datatypes.Virtual_Guest) (resp bool, err error) { + params := []interface{}{ + virtualGuestObjectTemplate, + } + err = r.Session.DoRequest("SoftLayer_Network_Storage_Iscsi", "allowAccessFromVirtualGuest", params, &r.Options, &resp) + return +} + +// This method is used to modify the access control list for this Storage volume. The SoftLayer_Virtual_Guest objects which have been allowed access to this storage will be listed in the allowedVirtualGuests property of this storage volume. +func (r Network_Storage_Iscsi) AllowAccessFromVirtualGuestList(virtualGuestObjectTemplates []datatypes.Virtual_Guest) (resp bool, err error) { + params := []interface{}{ + virtualGuestObjectTemplates, + } + err = r.Session.DoRequest("SoftLayer_Network_Storage_Iscsi", "allowAccessFromVirtualGuestList", params, &r.Options, &resp) + return +} + +// This method is used to modify the access control list for this Storage replicant volume. The SoftLayer_Hardware objects which have been allowed access to this storage will be listed in the allowedHardware property of this storage replicant volume. +func (r Network_Storage_Iscsi) AllowAccessToReplicantFromHardware(hardwareObjectTemplate *datatypes.Hardware) (resp bool, err error) { + params := []interface{}{ + hardwareObjectTemplate, + } + err = r.Session.DoRequest("SoftLayer_Network_Storage_Iscsi", "allowAccessToReplicantFromHardware", params, &r.Options, &resp) + return +} + +// This method is used to modify the access control list for this Storage replica volume. The SoftLayer_Hardware objects which have been allowed access to this storage will be listed in the allowedHardware property of this storage replica volume. +func (r Network_Storage_Iscsi) AllowAccessToReplicantFromHardwareList(hardwareObjectTemplates []datatypes.Hardware) (resp bool, err error) { + params := []interface{}{ + hardwareObjectTemplates, + } + err = r.Session.DoRequest("SoftLayer_Network_Storage_Iscsi", "allowAccessToReplicantFromHardwareList", params, &r.Options, &resp) + return +} + +// no documentation yet +func (r Network_Storage_Iscsi) AllowAccessToReplicantFromIpAddress(ipAddressObjectTemplate *datatypes.Network_Subnet_IpAddress) (resp bool, err error) { + params := []interface{}{ + ipAddressObjectTemplate, + } + err = r.Session.DoRequest("SoftLayer_Network_Storage_Iscsi", "allowAccessToReplicantFromIpAddress", params, &r.Options, &resp) + return +} + +// This method is used to modify the access control list for this Storage volume. The SoftLayer_Network_Subnet_IpAddress objects which have been allowed access to this storage will be listed in the allowedVirtualGuests property of this storage volume. +func (r Network_Storage_Iscsi) AllowAccessToReplicantFromIpAddressList(ipAddressObjectTemplates []datatypes.Network_Subnet_IpAddress) (resp bool, err error) { + params := []interface{}{ + ipAddressObjectTemplates, + } + err = r.Session.DoRequest("SoftLayer_Network_Storage_Iscsi", "allowAccessToReplicantFromIpAddressList", params, &r.Options, &resp) + return +} + +// This method is used to modify the access control list for this Storage replicant volume. The SoftLayer_Network_Subnet objects which have been allowed access to this storage will be listed in the allowedHardware property of this storage replicant volume. +func (r Network_Storage_Iscsi) AllowAccessToReplicantFromSubnet(subnetObjectTemplate *datatypes.Network_Subnet) (resp bool, err error) { + params := []interface{}{ + subnetObjectTemplate, + } + err = r.Session.DoRequest("SoftLayer_Network_Storage_Iscsi", "allowAccessToReplicantFromSubnet", params, &r.Options, &resp) + return +} + +// This method is used to modify the access control list for this Storage volume's replica. The SoftLayer_Network_Subnet objects which have been allowed access to this storage volume's replica will be listed in the allowedReplicationSubnets property of this storage volume. +func (r Network_Storage_Iscsi) AllowAccessToReplicantFromSubnetList(subnetObjectTemplates []datatypes.Network_Subnet) (resp bool, err error) { + params := []interface{}{ + subnetObjectTemplates, + } + err = r.Session.DoRequest("SoftLayer_Network_Storage_Iscsi", "allowAccessToReplicantFromSubnetList", params, &r.Options, &resp) + return +} + +// This method is used to modify the access control list for this Storage replicant volume. The SoftLayer_Virtual_Guest objects which have been allowed access to this storage will be listed in the allowedVirtualGuests property of this storage replicant volume. +func (r Network_Storage_Iscsi) AllowAccessToReplicantFromVirtualGuest(virtualGuestObjectTemplate *datatypes.Virtual_Guest) (resp bool, err error) { + params := []interface{}{ + virtualGuestObjectTemplate, + } + err = r.Session.DoRequest("SoftLayer_Network_Storage_Iscsi", "allowAccessToReplicantFromVirtualGuest", params, &r.Options, &resp) + return +} + +// This method is used to modify the access control list for this Storage volume. The SoftLayer_Virtual_Guest objects which have been allowed access to this storage will be listed in the allowedVirtualGuests property of this storage volume. +func (r Network_Storage_Iscsi) AllowAccessToReplicantFromVirtualGuestList(virtualGuestObjectTemplates []datatypes.Virtual_Guest) (resp bool, err error) { + params := []interface{}{ + virtualGuestObjectTemplates, + } + err = r.Session.DoRequest("SoftLayer_Network_Storage_Iscsi", "allowAccessToReplicantFromVirtualGuestList", params, &r.Options, &resp) + return +} + +// This method will assign an existing credential to the current volume. The credential must have been created using the 'addNewCredential' method. The volume type must support an additional credential. +func (r Network_Storage_Iscsi) AssignCredential(username *string) (resp bool, err error) { + params := []interface{}{ + username, + } + err = r.Session.DoRequest("SoftLayer_Network_Storage_Iscsi", "assignCredential", params, &r.Options, &resp) + return +} + +// This method will set up a new credential for the remote storage volume. The storage volume must support an additional credential. Once created, the credential will be automatically assigned to the current volume. If there are no volumes assigned to the credential it will be automatically deleted. +func (r Network_Storage_Iscsi) AssignNewCredential(typ *string) (resp datatypes.Network_Storage_Credential, err error) { + params := []interface{}{ + typ, + } + err = r.Session.DoRequest("SoftLayer_Network_Storage_Iscsi", "assignNewCredential", params, &r.Options, &resp) + return +} + +// The method will change the password for the given Storage/Virtual Server Storage account. +func (r Network_Storage_Iscsi) ChangePassword(username *string, currentPassword *string, newPassword *string) (resp bool, err error) { + params := []interface{}{ + username, + currentPassword, + newPassword, + } + err = r.Session.DoRequest("SoftLayer_Network_Storage_Iscsi", "changePassword", params, &r.Options, &resp) + return +} + +// {{CloudLayerOnlyMethod}} +// +// collectBandwidth() Retrieve the bandwidth usage for the current billing cycle. +func (r Network_Storage_Iscsi) CollectBandwidth(typ *string, startDate *datatypes.Time, endDate *datatypes.Time) (resp uint, err error) { + params := []interface{}{ + typ, + startDate, + endDate, + } + err = r.Session.DoRequest("SoftLayer_Network_Storage_Iscsi", "collectBandwidth", params, &r.Options, &resp) + return +} + +// {{CloudLayerOnlyMethod}} +// +// collectBytesUsed() retrieves the number of bytes capacity currently in use on a Storage account. +func (r Network_Storage_Iscsi) CollectBytesUsed() (resp uint, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Iscsi", "collectBytesUsed", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Network_Storage_Iscsi) CreateFolder(folder *string) (resp bool, err error) { + params := []interface{}{ + folder, + } + err = r.Session.DoRequest("SoftLayer_Network_Storage_Iscsi", "createFolder", params, &r.Options, &resp) + return +} + +// no documentation yet +func (r Network_Storage_Iscsi) CreateSnapshot(notes *string) (resp datatypes.Network_Storage, err error) { + params := []interface{}{ + notes, + } + err = r.Session.DoRequest("SoftLayer_Network_Storage_Iscsi", "createSnapshot", params, &r.Options, &resp) + return +} + +// {{CloudLayerOnlyMethod}} Delete all files within a Storage account. Depending on the type of Storage account, Deleting either deletes files permanently or sends files to your account's recycle bin. +// +// Currently, Virtual Server storage is the only type of Storage account that sends files to a recycle bin when deleted. When called against a Virtual Server storage account , this method also determines if the files are in the account's recycle bin. If the files exist in the recycle bin, then they are permanently deleted. +// +// Please note, files can not be restored once they are permanently deleted. +func (r Network_Storage_Iscsi) DeleteAllFiles() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Iscsi", "deleteAllFiles", nil, &r.Options, &resp) + return +} + +// {{CloudLayerOnlyMethod}} Delete an individual file within a Storage account. Depending on the type of Storage account, Deleting a file either deletes the file permanently or sends the file to your account's recycle bin. +// +// Currently, Virtual Server storage is the only type of Storage account that sends files to a recycle bin when deleted. When called against a Virtual Server storage account , this method also determines if the file is in the account's recycle bin. If the file exist in the recycle bin, then it is permanently deleted. +// +// Please note, a file can not be restored once it is permanently deleted. +func (r Network_Storage_Iscsi) DeleteFile(fileId *string) (resp bool, err error) { + params := []interface{}{ + fileId, + } + err = r.Session.DoRequest("SoftLayer_Network_Storage_Iscsi", "deleteFile", params, &r.Options, &resp) + return +} + +// {{CloudLayerOnlyMethod}} Delete multiple files within a Storage account. Depending on the type of Storage account, Deleting either deletes files permanently or sends files to your account's recycle bin. +// +// Currently, Virtual Server storage is the only type of Storage account that sends files to a recycle bin when deleted. When called against a Virtual Server storage account , this method also determines if the files are in the account's recycle bin. If the files exist in the recycle bin, then they are permanently deleted. +// +// Please note, files can not be restored once they are permanently deleted. +func (r Network_Storage_Iscsi) DeleteFiles(fileIds []string) (resp bool, err error) { + params := []interface{}{ + fileIds, + } + err = r.Session.DoRequest("SoftLayer_Network_Storage_Iscsi", "deleteFiles", params, &r.Options, &resp) + return +} + +// no documentation yet +func (r Network_Storage_Iscsi) DeleteFolder(folder *string) (resp bool, err error) { + params := []interface{}{ + folder, + } + err = r.Session.DoRequest("SoftLayer_Network_Storage_Iscsi", "deleteFolder", params, &r.Options, &resp) + return +} + +// Delete a network storage volume. '''This cannot be undone.''' At this time only network storage snapshots may be deleted with this method. +// +// ''deleteObject'' returns Boolean ''true'' on successful deletion or ''false'' if it was unable to remove a volume; +func (r Network_Storage_Iscsi) DeleteObject() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Iscsi", "deleteObject", nil, &r.Options, &resp) + return +} + +// This method is not valid for Legacy iSCSI Storage Volumes. +// +// Disable scheduled snapshots of this storage volume. Scheduling options include HOURLY, DAILY and WEEKLY schedules. +func (r Network_Storage_Iscsi) DisableSnapshots(scheduleType *string) (resp bool, err error) { + params := []interface{}{ + scheduleType, + } + err = r.Session.DoRequest("SoftLayer_Network_Storage_Iscsi", "disableSnapshots", params, &r.Options, &resp) + return +} + +// {{CloudLayerOnlyMethod}} Download a file from a Storage account. This method returns a file's details including the file's raw content. +func (r Network_Storage_Iscsi) DownloadFile(fileId *string) (resp datatypes.Container_Utility_File_Entity, err error) { + params := []interface{}{ + fileId, + } + err = r.Session.DoRequest("SoftLayer_Network_Storage_Iscsi", "downloadFile", params, &r.Options, &resp) + return +} + +// This method will change the password of a credential created using the 'addNewCredential' method. If the credential exists on multiple storage volumes it will change for those volumes as well. +func (r Network_Storage_Iscsi) EditCredential(username *string, newPassword *string) (resp bool, err error) { + params := []interface{}{ + username, + newPassword, + } + err = r.Session.DoRequest("SoftLayer_Network_Storage_Iscsi", "editCredential", params, &r.Options, &resp) + return +} + +// The password and/or notes may be modified for the Storage service except evault passwords and notes. +func (r Network_Storage_Iscsi) EditObject(templateObject *datatypes.Network_Storage) (resp bool, err error) { + params := []interface{}{ + templateObject, + } + err = r.Session.DoRequest("SoftLayer_Network_Storage_Iscsi", "editObject", params, &r.Options, &resp) + return +} + +// This method is not valid for Legacy iSCSI Storage Volumes. +// +// Enable scheduled snapshots of this storage volume. Scheduling options include HOURLY, DAILY and WEEKLY schedules. For HOURLY schedules, provide relevant data for $scheduleType, $retentionCount and $minute. For DAILY schedules, provide relevant data for $scheduleType, $retentionCount, $minute, and $hour. For WEEKLY schedules, provide relevant data for all parameters of this method. +func (r Network_Storage_Iscsi) EnableSnapshots(scheduleType *string, retentionCount *int, minute *int, hour *int, dayOfWeek *string) (resp bool, err error) { + params := []interface{}{ + scheduleType, + retentionCount, + minute, + hour, + dayOfWeek, + } + err = r.Session.DoRequest("SoftLayer_Network_Storage_Iscsi", "enableSnapshots", params, &r.Options, &resp) + return +} + +// Failback from a volume replicant. In order to failback the volume must have already been failed over to a replicant. +func (r Network_Storage_Iscsi) FailbackFromReplicant() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Iscsi", "failbackFromReplicant", nil, &r.Options, &resp) + return +} + +// Failover to a volume replicant. During the time which the replicant is in use the local nas volume will not be available. +func (r Network_Storage_Iscsi) FailoverToReplicant(replicantId *int) (resp bool, err error) { + params := []interface{}{ + replicantId, + } + err = r.Session.DoRequest("SoftLayer_Network_Storage_Iscsi", "failoverToReplicant", params, &r.Options, &resp) + return +} + +// Retrieve The account that a Storage services belongs to. +func (r Network_Storage_Iscsi) GetAccount() (resp datatypes.Account, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Iscsi", "getAccount", nil, &r.Options, &resp) + return +} + +// Retrieve Other usernames and passwords associated with a Storage volume. +func (r Network_Storage_Iscsi) GetAccountPassword() (resp datatypes.Account_Password, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Iscsi", "getAccountPassword", nil, &r.Options, &resp) + return +} + +// Retrieve The currently active transactions on a network storage volume. +func (r Network_Storage_Iscsi) GetActiveTransactions() (resp []datatypes.Provisioning_Version1_Transaction, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Iscsi", "getActiveTransactions", nil, &r.Options, &resp) + return +} + +// {{CloudLayerOnlyMethod}} Retrieve details such as id, name, size, create date for all files in a Storage account's root directory. This does not download file content. +func (r Network_Storage_Iscsi) GetAllFiles() (resp []datatypes.Container_Utility_File_Entity, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Iscsi", "getAllFiles", nil, &r.Options, &resp) + return +} + +// {{CloudLayerOnlyMethod}} Retrieve details such as id, name, size, create date for all files matching the filter's criteria in a Storage account's root directory. This does not download file content. +func (r Network_Storage_Iscsi) GetAllFilesByFilter(filter *datatypes.Container_Utility_File_Entity) (resp []datatypes.Container_Utility_File_Entity, err error) { + params := []interface{}{ + filter, + } + err = r.Session.DoRequest("SoftLayer_Network_Storage_Iscsi", "getAllFilesByFilter", params, &r.Options, &resp) + return +} + +// This method retrieves a list of SoftLayer_Hardware that can be authorized to this SoftLayer_Network_Storage. +func (r Network_Storage_Iscsi) GetAllowableHardware(filterHostname *string) (resp []datatypes.Hardware, err error) { + params := []interface{}{ + filterHostname, + } + err = r.Session.DoRequest("SoftLayer_Network_Storage_Iscsi", "getAllowableHardware", params, &r.Options, &resp) + return +} + +// This method retrieves a list of SoftLayer_Network_Subnet_IpAddress that can be authorized to this SoftLayer_Network_Storage. +func (r Network_Storage_Iscsi) GetAllowableIpAddresses(subnetId *int, filterIpAddress *string) (resp []datatypes.Network_Subnet_IpAddress, err error) { + params := []interface{}{ + subnetId, + filterIpAddress, + } + err = r.Session.DoRequest("SoftLayer_Network_Storage_Iscsi", "getAllowableIpAddresses", params, &r.Options, &resp) + return +} + +// This method retrieves a list of SoftLayer_Network_Subnet that can be authorized to this SoftLayer_Network_Storage. +func (r Network_Storage_Iscsi) GetAllowableSubnets(filterNetworkIdentifier *string) (resp []datatypes.Network_Subnet, err error) { + params := []interface{}{ + filterNetworkIdentifier, + } + err = r.Session.DoRequest("SoftLayer_Network_Storage_Iscsi", "getAllowableSubnets", params, &r.Options, &resp) + return +} + +// This method retrieves a list of SoftLayer_Virtual_Guest that can be authorized to this SoftLayer_Network_Storage. +func (r Network_Storage_Iscsi) GetAllowableVirtualGuests(filterHostname *string) (resp []datatypes.Virtual_Guest, err error) { + params := []interface{}{ + filterHostname, + } + err = r.Session.DoRequest("SoftLayer_Network_Storage_Iscsi", "getAllowableVirtualGuests", params, &r.Options, &resp) + return +} + +// Retrieve The SoftLayer_Hardware objects which are allowed access to this storage volume. +func (r Network_Storage_Iscsi) GetAllowedHardware() (resp []datatypes.Hardware, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Iscsi", "getAllowedHardware", nil, &r.Options, &resp) + return +} + +// Retrieves the total number of allowed hosts limit per volume. +func (r Network_Storage_Iscsi) GetAllowedHostsLimit() (resp int, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Iscsi", "getAllowedHostsLimit", nil, &r.Options, &resp) + return +} + +// Retrieve The SoftLayer_Network_Subnet_IpAddress objects which are allowed access to this storage volume. +func (r Network_Storage_Iscsi) GetAllowedIpAddresses() (resp []datatypes.Network_Subnet_IpAddress, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Iscsi", "getAllowedIpAddresses", nil, &r.Options, &resp) + return +} + +// Retrieve The SoftLayer_Hardware objects which are allowed access to this storage volume's Replicant. +func (r Network_Storage_Iscsi) GetAllowedReplicationHardware() (resp []datatypes.Hardware, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Iscsi", "getAllowedReplicationHardware", nil, &r.Options, &resp) + return +} + +// Retrieve The SoftLayer_Network_Subnet_IpAddress objects which are allowed access to this storage volume's Replicant. +func (r Network_Storage_Iscsi) GetAllowedReplicationIpAddresses() (resp []datatypes.Network_Subnet_IpAddress, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Iscsi", "getAllowedReplicationIpAddresses", nil, &r.Options, &resp) + return +} + +// Retrieve The SoftLayer_Network_Subnet objects which are allowed access to this storage volume's Replicant. +func (r Network_Storage_Iscsi) GetAllowedReplicationSubnets() (resp []datatypes.Network_Subnet, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Iscsi", "getAllowedReplicationSubnets", nil, &r.Options, &resp) + return +} + +// Retrieve The SoftLayer_Hardware objects which are allowed access to this storage volume's Replicant. +func (r Network_Storage_Iscsi) GetAllowedReplicationVirtualGuests() (resp []datatypes.Virtual_Guest, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Iscsi", "getAllowedReplicationVirtualGuests", nil, &r.Options, &resp) + return +} + +// Retrieve The SoftLayer_Network_Subnet objects which are allowed access to this storage volume. +func (r Network_Storage_Iscsi) GetAllowedSubnets() (resp []datatypes.Network_Subnet, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Iscsi", "getAllowedSubnets", nil, &r.Options, &resp) + return +} + +// Retrieve The SoftLayer_Virtual_Guest objects which are allowed access to this storage volume. +func (r Network_Storage_Iscsi) GetAllowedVirtualGuests() (resp []datatypes.Virtual_Guest, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Iscsi", "getAllowedVirtualGuests", nil, &r.Options, &resp) + return +} + +// Retrieve The current billing item for a Storage volume. +func (r Network_Storage_Iscsi) GetBillingItem() (resp datatypes.Billing_Item, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Iscsi", "getBillingItem", nil, &r.Options, &resp) + return +} + +// Retrieve +func (r Network_Storage_Iscsi) GetBillingItemCategory() (resp datatypes.Product_Item_Category, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Iscsi", "getBillingItemCategory", nil, &r.Options, &resp) + return +} + +// Retrieve network storage accounts by username and storage account type. Use this method if you wish to retrieve a storage record by username rather than by id. The ''type'' parameter must correspond to one of the available ''nasType'' values in the SoftLayer_Network_Storage data type. +func (r Network_Storage_Iscsi) GetByUsername(username *string, typ *string) (resp []datatypes.Network_Storage, err error) { + params := []interface{}{ + username, + typ, + } + err = r.Session.DoRequest("SoftLayer_Network_Storage_Iscsi", "getByUsername", params, &r.Options, &resp) + return +} + +// Retrieve The amount of space used by the volume, in bytes. +func (r Network_Storage_Iscsi) GetBytesUsed() (resp string, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Iscsi", "getBytesUsed", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Network_Storage_Iscsi) GetCdnUrls() (resp []datatypes.Container_Network_Storage_Hub_ObjectStorage_ContentDeliveryUrl, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Iscsi", "getCdnUrls", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Network_Storage_Iscsi) GetClusterResource() (resp datatypes.Network_Service_Resource, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Iscsi", "getClusterResource", nil, &r.Options, &resp) + return +} + +// Retrieve The schedule id which was executed to create a snapshot. +func (r Network_Storage_Iscsi) GetCreationScheduleId() (resp string, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Iscsi", "getCreationScheduleId", nil, &r.Options, &resp) + return +} + +// Retrieve +func (r Network_Storage_Iscsi) GetCredentials() (resp []datatypes.Network_Storage_Credential, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Iscsi", "getCredentials", nil, &r.Options, &resp) + return +} + +// Retrieve The Daily Schedule which is associated with this network storage volume. +func (r Network_Storage_Iscsi) GetDailySchedule() (resp datatypes.Network_Storage_Schedule, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Iscsi", "getDailySchedule", nil, &r.Options, &resp) + return +} + +// Retrieve The events which have taken place on a network storage volume. +func (r Network_Storage_Iscsi) GetEvents() (resp []datatypes.Network_Storage_Event, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Iscsi", "getEvents", nil, &r.Options, &resp) + return +} + +// +// +// +func (r Network_Storage_Iscsi) GetFileBlockEncryptedLocations() (resp []datatypes.Location, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Iscsi", "getFileBlockEncryptedLocations", nil, &r.Options, &resp) + return +} + +// {{CloudLayerOnlyMethod}} Retrieve details such as id, name, size, create date of a file within a Storage account. This does not download file content. +func (r Network_Storage_Iscsi) GetFileByIdentifier(identifier *string) (resp datatypes.Container_Utility_File_Entity, err error) { + params := []interface{}{ + identifier, + } + err = r.Session.DoRequest("SoftLayer_Network_Storage_Iscsi", "getFileByIdentifier", params, &r.Options, &resp) + return +} + +// {{CloudLayerOnlyMethod}} Retrieve the file number of files in a Virtual Server Storage account's root directory. This does not include the files stored in the recycle bin. +func (r Network_Storage_Iscsi) GetFileCount() (resp int, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Iscsi", "getFileCount", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Network_Storage_Iscsi) GetFileList(folder *string, path *string) (resp []datatypes.Container_Utility_File_Entity, err error) { + params := []interface{}{ + folder, + path, + } + err = r.Session.DoRequest("SoftLayer_Network_Storage_Iscsi", "getFileList", params, &r.Options, &resp) + return +} + +// Retrieve Retrieves the NFS Network Mount Address Name for a given File Storage Volume. +func (r Network_Storage_Iscsi) GetFileNetworkMountAddress() (resp string, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Iscsi", "getFileNetworkMountAddress", nil, &r.Options, &resp) + return +} + +// {{CloudLayerOnlyMethod}} Retrieve the number of files pending deletion in a Storage account's recycle bin. Files in an account's recycle bin may either be restored to the account's root directory or permanently deleted. +func (r Network_Storage_Iscsi) GetFilePendingDeleteCount() (resp int, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Iscsi", "getFilePendingDeleteCount", nil, &r.Options, &resp) + return +} + +// {{CloudLayerOnlyMethod}} Retrieve a list of files that are pending deletion in a Storage account's recycle bin. Files in an account's recycle bin may either be restored to the account's root directory or permanently deleted. This method does not download file content. +func (r Network_Storage_Iscsi) GetFilesPendingDelete() (resp []datatypes.Container_Utility_File_Entity, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Iscsi", "getFilesPendingDelete", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Network_Storage_Iscsi) GetFolderList() (resp []datatypes.Container_Network_Storage_Hub_ObjectStorage_Folder, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Iscsi", "getFolderList", nil, &r.Options, &resp) + return +} + +// {{CloudLayerOnlyMethod}} +// +// getGraph() retrieves a Storage account's usage and returns a PNG graph image, title, and the minimum and maximum dates included in the graphed date range. Virtual Server storage accounts can also graph upload and download bandwidth usage. +func (r Network_Storage_Iscsi) GetGraph(startDate *datatypes.Time, endDate *datatypes.Time, typ *string) (resp datatypes.Container_Bandwidth_GraphOutputs, err error) { + params := []interface{}{ + startDate, + endDate, + typ, + } + err = r.Session.DoRequest("SoftLayer_Network_Storage_Iscsi", "getGraph", params, &r.Options, &resp) + return +} + +// Retrieve When applicable, the hardware associated with a Storage service. +func (r Network_Storage_Iscsi) GetHardware() (resp datatypes.Hardware, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Iscsi", "getHardware", nil, &r.Options, &resp) + return +} + +// Retrieve +func (r Network_Storage_Iscsi) GetHasEncryptionAtRest() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Iscsi", "getHasEncryptionAtRest", nil, &r.Options, &resp) + return +} + +// Retrieve The Hourly Schedule which is associated with this network storage volume. +func (r Network_Storage_Iscsi) GetHourlySchedule() (resp datatypes.Network_Storage_Schedule, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Iscsi", "getHourlySchedule", nil, &r.Options, &resp) + return +} + +// Retrieve The maximum number of IOPs selected for this volume. +func (r Network_Storage_Iscsi) GetIops() (resp string, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Iscsi", "getIops", nil, &r.Options, &resp) + return +} + +// Retrieve Determines whether a volume is ready to order snapshot space, or, if snapshot space is already available, to assign a snapshot schedule, or to take a manual snapshot. +func (r Network_Storage_Iscsi) GetIsReadyForSnapshot() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Iscsi", "getIsReadyForSnapshot", nil, &r.Options, &resp) + return +} + +// Retrieve Determines whether a volume is ready to have Hosts authorized to access it. This does not indicate whether another operation may be blocking, please refer to this volume's volumeStatus property for details. +func (r Network_Storage_Iscsi) GetIsReadyToMount() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Iscsi", "getIsReadyToMount", nil, &r.Options, &resp) + return +} + +// Retrieve Relationship between a container volume and iSCSI LUNs. +func (r Network_Storage_Iscsi) GetIscsiLuns() (resp []datatypes.Network_Storage, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Iscsi", "getIscsiLuns", nil, &r.Options, &resp) + return +} + +// Retrieve The ID of the LUN volume. +func (r Network_Storage_Iscsi) GetLunId() (resp string, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Iscsi", "getLunId", nil, &r.Options, &resp) + return +} + +// Retrieve The manually-created snapshots associated with this SoftLayer_Network_Storage volume. Does not support pagination by result limit and offset. +func (r Network_Storage_Iscsi) GetManualSnapshots() (resp []datatypes.Network_Storage, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Iscsi", "getManualSnapshots", nil, &r.Options, &resp) + return +} + +// Retrieve A network storage volume's metric tracking object. This object records all periodic polled data available to this volume. +func (r Network_Storage_Iscsi) GetMetricTrackingObject() (resp datatypes.Metric_Tracking_Object, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Iscsi", "getMetricTrackingObject", nil, &r.Options, &resp) + return +} + +// Retrieve Whether or not a network storage volume may be mounted. +func (r Network_Storage_Iscsi) GetMountableFlag() (resp string, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Iscsi", "getMountableFlag", nil, &r.Options, &resp) + return +} + +// Retrieve The current status of split or move operation as a part of volume duplication. +func (r Network_Storage_Iscsi) GetMoveAndSplitStatus() (resp string, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Iscsi", "getMoveAndSplitStatus", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Network_Storage_Iscsi) GetNetworkConnectionDetails() (resp datatypes.Container_Network_Storage_NetworkConnectionInformation, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Iscsi", "getNetworkConnectionDetails", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Network_Storage_Iscsi) GetNetworkMountAddress() (resp string, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Iscsi", "getNetworkMountAddress", nil, &r.Options, &resp) + return +} + +// Retrieve The subscribers that will be notified for usage amount warnings and overages. +func (r Network_Storage_Iscsi) GetNotificationSubscribers() (resp []datatypes.Notification_User_Subscriber, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Iscsi", "getNotificationSubscribers", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Network_Storage_Iscsi) GetObject() (resp datatypes.Network_Storage_Iscsi, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Iscsi", "getObject", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Network_Storage_Iscsi) GetObjectStorageConnectionInformation() (resp []datatypes.Container_Network_Service_Resource_ObjectStorage_ConnectionInformation, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Iscsi", "getObjectStorageConnectionInformation", nil, &r.Options, &resp) + return +} + +// Retrieve network storage accounts by SoftLayer_Network_Storage_Credential object. Use this method if you wish to retrieve a storage record by a credential rather than by id. +func (r Network_Storage_Iscsi) GetObjectsByCredential(credentialObject *datatypes.Network_Storage_Credential) (resp []datatypes.Network_Storage, err error) { + params := []interface{}{ + credentialObject, + } + err = r.Session.DoRequest("SoftLayer_Network_Storage_Iscsi", "getObjectsByCredential", params, &r.Options, &resp) + return +} + +// Retrieve The name of the snapshot that this volume was duplicated from. +func (r Network_Storage_Iscsi) GetOriginalSnapshotName() (resp string, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Iscsi", "getOriginalSnapshotName", nil, &r.Options, &resp) + return +} + +// Retrieve The name of the volume that this volume was duplicated from. +func (r Network_Storage_Iscsi) GetOriginalVolumeName() (resp string, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Iscsi", "getOriginalVolumeName", nil, &r.Options, &resp) + return +} + +// Retrieve The size (in GB) of the volume that this volume was duplicated from, or in the case of iSCSI LUNs, the size of the base originally-provisioned LUN. +func (r Network_Storage_Iscsi) GetOriginalVolumeSize() (resp string, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Iscsi", "getOriginalVolumeSize", nil, &r.Options, &resp) + return +} + +// Retrieve A volume's configured SoftLayer_Network_Storage_Iscsi_OS_Type. +func (r Network_Storage_Iscsi) GetOsType() (resp datatypes.Network_Storage_Iscsi_OS_Type, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Iscsi", "getOsType", nil, &r.Options, &resp) + return +} + +// Retrieve A volume's configured SoftLayer_Network_Storage_Iscsi_OS_Type ID. +func (r Network_Storage_Iscsi) GetOsTypeId() (resp string, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Iscsi", "getOsTypeId", nil, &r.Options, &resp) + return +} + +// Retrieve The volumes or snapshots partnered with a network storage volume in a parental role. +func (r Network_Storage_Iscsi) GetParentPartnerships() (resp []datatypes.Network_Storage_Partnership, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Iscsi", "getParentPartnerships", nil, &r.Options, &resp) + return +} + +// Retrieve The parent volume of a volume in a complex storage relationship. +func (r Network_Storage_Iscsi) GetParentVolume() (resp datatypes.Network_Storage, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Iscsi", "getParentVolume", nil, &r.Options, &resp) + return +} + +// Retrieve The volumes or snapshots partnered with a network storage volume. +func (r Network_Storage_Iscsi) GetPartnerships() (resp []datatypes.Network_Storage_Partnership, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Iscsi", "getPartnerships", nil, &r.Options, &resp) + return +} + +// Retrieve All permissions group(s) this volume is in. +func (r Network_Storage_Iscsi) GetPermissionsGroups() (resp []datatypes.Network_Storage_Group, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Iscsi", "getPermissionsGroups", nil, &r.Options, &resp) + return +} + +// Retrieve The properties used to provide additional details about a network storage volume. +func (r Network_Storage_Iscsi) GetProperties() (resp []datatypes.Network_Storage_Property, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Iscsi", "getProperties", nil, &r.Options, &resp) + return +} + +// Retrieve The number of IOPs provisioned for this volume. +func (r Network_Storage_Iscsi) GetProvisionedIops() (resp string, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Iscsi", "getProvisionedIops", nil, &r.Options, &resp) + return +} + +// {{CloudLayerOnlyMethod}} Retrieve the details of a file that is pending deletion in a Storage account's a recycle bin. +func (r Network_Storage_Iscsi) GetRecycleBinFileByIdentifier(fileId *string) (resp datatypes.Container_Utility_File_Entity, err error) { + params := []interface{}{ + fileId, + } + err = r.Session.DoRequest("SoftLayer_Network_Storage_Iscsi", "getRecycleBinFileByIdentifier", params, &r.Options, &resp) + return +} + +// Retrieves the remaining number of allowed hosts per volume. +func (r Network_Storage_Iscsi) GetRemainingAllowedHosts() (resp int, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Iscsi", "getRemainingAllowedHosts", nil, &r.Options, &resp) + return +} + +// Retrieve The iSCSI LUN volumes being replicated by this network storage volume. +func (r Network_Storage_Iscsi) GetReplicatingLuns() (resp []datatypes.Network_Storage, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Iscsi", "getReplicatingLuns", nil, &r.Options, &resp) + return +} + +// Retrieve The network storage volume being replicated by a volume. +func (r Network_Storage_Iscsi) GetReplicatingVolume() (resp datatypes.Network_Storage, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Iscsi", "getReplicatingVolume", nil, &r.Options, &resp) + return +} + +// Retrieve The volume replication events. +func (r Network_Storage_Iscsi) GetReplicationEvents() (resp []datatypes.Network_Storage_Event, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Iscsi", "getReplicationEvents", nil, &r.Options, &resp) + return +} + +// Retrieve The network storage volumes configured to be replicants of a volume. +func (r Network_Storage_Iscsi) GetReplicationPartners() (resp []datatypes.Network_Storage, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Iscsi", "getReplicationPartners", nil, &r.Options, &resp) + return +} + +// Retrieve The Replication Schedule associated with a network storage volume. +func (r Network_Storage_Iscsi) GetReplicationSchedule() (resp datatypes.Network_Storage_Schedule, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Iscsi", "getReplicationSchedule", nil, &r.Options, &resp) + return +} + +// Retrieve The current replication status of a network storage volume. Indicates Failover or Failback status. +func (r Network_Storage_Iscsi) GetReplicationStatus() (resp string, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Iscsi", "getReplicationStatus", nil, &r.Options, &resp) + return +} + +// Retrieve The schedules which are associated with a network storage volume. +func (r Network_Storage_Iscsi) GetSchedules() (resp []datatypes.Network_Storage_Schedule, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Iscsi", "getSchedules", nil, &r.Options, &resp) + return +} + +// Retrieve The network resource a Storage service is connected to. +func (r Network_Storage_Iscsi) GetServiceResource() (resp datatypes.Network_Service_Resource, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Iscsi", "getServiceResource", nil, &r.Options, &resp) + return +} + +// Retrieve The IP address of a Storage resource. +func (r Network_Storage_Iscsi) GetServiceResourceBackendIpAddress() (resp string, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Iscsi", "getServiceResourceBackendIpAddress", nil, &r.Options, &resp) + return +} + +// Retrieve The name of a Storage's network resource. +func (r Network_Storage_Iscsi) GetServiceResourceName() (resp string, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Iscsi", "getServiceResourceName", nil, &r.Options, &resp) + return +} + +// Retrieve A volume's configured snapshot space size. +func (r Network_Storage_Iscsi) GetSnapshotCapacityGb() (resp string, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Iscsi", "getSnapshotCapacityGb", nil, &r.Options, &resp) + return +} + +// Retrieve The creation timestamp of the snapshot on the storage platform. +func (r Network_Storage_Iscsi) GetSnapshotCreationTimestamp() (resp string, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Iscsi", "getSnapshotCreationTimestamp", nil, &r.Options, &resp) + return +} + +// Retrieve The percentage of used snapshot space after which to delete automated snapshots. +func (r Network_Storage_Iscsi) GetSnapshotDeletionThresholdPercentage() (resp string, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Iscsi", "getSnapshotDeletionThresholdPercentage", nil, &r.Options, &resp) + return +} + +// Retrieve The snapshot size in bytes. +func (r Network_Storage_Iscsi) GetSnapshotSizeBytes() (resp string, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Iscsi", "getSnapshotSizeBytes", nil, &r.Options, &resp) + return +} + +// Retrieve A volume's available snapshot reservation space. +func (r Network_Storage_Iscsi) GetSnapshotSpaceAvailable() (resp string, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Iscsi", "getSnapshotSpaceAvailable", nil, &r.Options, &resp) + return +} + +// Retrieve The snapshots associated with this SoftLayer_Network_Storage volume. +func (r Network_Storage_Iscsi) GetSnapshots() (resp []datatypes.Network_Storage, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Iscsi", "getSnapshots", nil, &r.Options, &resp) + return +} + +// Retrieves a list of snapshots for this SoftLayer_Network_Storage volume. This method works with the result limits and offset to support pagination. +func (r Network_Storage_Iscsi) GetSnapshotsForVolume() (resp []datatypes.Network_Storage, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Iscsi", "getSnapshotsForVolume", nil, &r.Options, &resp) + return +} + +// Retrieve +func (r Network_Storage_Iscsi) GetStaasVersion() (resp string, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Iscsi", "getStaasVersion", nil, &r.Options, &resp) + return +} + +// Retrieve The network storage groups this volume is attached to. +func (r Network_Storage_Iscsi) GetStorageGroups() (resp []datatypes.Network_Storage_Group, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Iscsi", "getStorageGroups", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Network_Storage_Iscsi) GetStorageGroupsNetworkConnectionDetails() (resp []datatypes.Container_Network_Storage_NetworkConnectionInformation, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Iscsi", "getStorageGroupsNetworkConnectionDetails", nil, &r.Options, &resp) + return +} + +// Retrieve +func (r Network_Storage_Iscsi) GetStorageTierLevel() (resp string, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Iscsi", "getStorageTierLevel", nil, &r.Options, &resp) + return +} + +// Retrieve A description of the Storage object. +func (r Network_Storage_Iscsi) GetStorageType() (resp datatypes.Network_Storage_Type, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Iscsi", "getStorageType", nil, &r.Options, &resp) + return +} + +// Retrieve The amount of space used by the volume. +func (r Network_Storage_Iscsi) GetTotalBytesUsed() (resp string, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Iscsi", "getTotalBytesUsed", nil, &r.Options, &resp) + return +} + +// Retrieve The total snapshot retention count of all schedules on this network storage volume. +func (r Network_Storage_Iscsi) GetTotalScheduleSnapshotRetentionCount() (resp uint, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Iscsi", "getTotalScheduleSnapshotRetentionCount", nil, &r.Options, &resp) + return +} + +// Retrieve The usage notification for SL Storage services. +func (r Network_Storage_Iscsi) GetUsageNotification() (resp datatypes.Notification, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Iscsi", "getUsageNotification", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Network_Storage_Iscsi) GetValidReplicationTargetDatacenterLocations() (resp []datatypes.Location, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Iscsi", "getValidReplicationTargetDatacenterLocations", nil, &r.Options, &resp) + return +} + +// Retrieve The type of network storage service. +func (r Network_Storage_Iscsi) GetVendorName() (resp string, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Iscsi", "getVendorName", nil, &r.Options, &resp) + return +} + +// Retrieve When applicable, the virtual guest associated with a Storage service. +func (r Network_Storage_Iscsi) GetVirtualGuest() (resp datatypes.Virtual_Guest, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Iscsi", "getVirtualGuest", nil, &r.Options, &resp) + return +} + +// This method returns the parameters for cloning a volume +func (r Network_Storage_Iscsi) GetVolumeDuplicateParameters() (resp datatypes.Container_Network_Storage_VolumeDuplicateParameters, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Iscsi", "getVolumeDuplicateParameters", nil, &r.Options, &resp) + return +} + +// Retrieve The username and password history for a Storage service. +func (r Network_Storage_Iscsi) GetVolumeHistory() (resp []datatypes.Network_Storage_History, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Iscsi", "getVolumeHistory", nil, &r.Options, &resp) + return +} + +// Retrieve The current status of a network storage volume. +func (r Network_Storage_Iscsi) GetVolumeStatus() (resp string, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Iscsi", "getVolumeStatus", nil, &r.Options, &resp) + return +} + +// Retrieve The account username and password for the EVault webCC interface. +func (r Network_Storage_Iscsi) GetWebccAccount() (resp datatypes.Account_Password, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Iscsi", "getWebccAccount", nil, &r.Options, &resp) + return +} + +// Retrieve The Weekly Schedule which is associated with this network storage volume. +func (r Network_Storage_Iscsi) GetWeeklySchedule() (resp datatypes.Network_Storage_Schedule, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Iscsi", "getWeeklySchedule", nil, &r.Options, &resp) + return +} + +// Immediate Failover to a volume replicant. During the time which the replicant is in use the local nas volume will not be available. +func (r Network_Storage_Iscsi) ImmediateFailoverToReplicant(replicantId *int) (resp bool, err error) { + params := []interface{}{ + replicantId, + } + err = r.Session.DoRequest("SoftLayer_Network_Storage_Iscsi", "immediateFailoverToReplicant", params, &r.Options, &resp) + return +} + +// no documentation yet +func (r Network_Storage_Iscsi) IsBlockingOperationInProgress(exemptStatusKeyNames []string) (resp bool, err error) { + params := []interface{}{ + exemptStatusKeyNames, + } + err = r.Session.DoRequest("SoftLayer_Network_Storage_Iscsi", "isBlockingOperationInProgress", params, &r.Options, &resp) + return +} + +// This method returns a boolean indicating whether the clone volume is ready for snapshot. +func (r Network_Storage_Iscsi) IsDuplicateReadyForSnapshot() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Iscsi", "isDuplicateReadyForSnapshot", nil, &r.Options, &resp) + return +} + +// This method returns a boolean indicating whether the clone volume is ready to mount. +func (r Network_Storage_Iscsi) IsDuplicateReadyToMount() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Iscsi", "isDuplicateReadyToMount", nil, &r.Options, &resp) + return +} + +// This method is used to modify the access control list for this Storage volume. The SoftLayer_Hardware objects which have been allowed access to this storage will be listed in the allowedHardware property of this storage volume. +func (r Network_Storage_Iscsi) RemoveAccessFromHardware(hardwareObjectTemplate *datatypes.Hardware) (resp bool, err error) { + params := []interface{}{ + hardwareObjectTemplate, + } + err = r.Session.DoRequest("SoftLayer_Network_Storage_Iscsi", "removeAccessFromHardware", params, &r.Options, &resp) + return +} + +// This method is used to modify the access control list for this Storage volume. The SoftLayer_Hardware objects which have been allowed access to this storage will be listed in the allowedHardware property of this storage volume. +func (r Network_Storage_Iscsi) RemoveAccessFromHardwareList(hardwareObjectTemplates []datatypes.Hardware) (resp bool, err error) { + params := []interface{}{ + hardwareObjectTemplates, + } + err = r.Session.DoRequest("SoftLayer_Network_Storage_Iscsi", "removeAccessFromHardwareList", params, &r.Options, &resp) + return +} + +// This method is used to modify the access control list for this Storage volume. The [[SoftLayer_Hardware|SoftLayer_Virtual_Guest|SoftLayer_Network_Subnet|SoftLayer_Network_Subnet_IpAddress]] objects which have been allowed access to this storage will be listed in the [[allowedHardware|allowedVirtualGuests|allowedSubnets|allowedIpAddresses]] property of this storage volume. +func (r Network_Storage_Iscsi) RemoveAccessFromHost(typeClassName *string, hostId *int) (resp datatypes.Network_Storage_Allowed_Host, err error) { + params := []interface{}{ + typeClassName, + hostId, + } + err = r.Session.DoRequest("SoftLayer_Network_Storage_Iscsi", "removeAccessFromHost", params, &r.Options, &resp) + return +} + +// This method is used to modify the access control list for this Storage volume. The [[SoftLayer_Hardware|SoftLayer_Virtual_Guest|SoftLayer_Network_Subnet|SoftLayer_Network_Subnet_IpAddress]] objects which have been allowed access to this storage will be listed in the [[allowedHardware|allowedVirtualGuests|allowedSubnets|allowedIpAddresses]] property of this storage volume. +func (r Network_Storage_Iscsi) RemoveAccessFromHostList(hostObjectTemplates []datatypes.Container_Network_Storage_Host) (resp []datatypes.Network_Storage_Allowed_Host, err error) { + params := []interface{}{ + hostObjectTemplates, + } + err = r.Session.DoRequest("SoftLayer_Network_Storage_Iscsi", "removeAccessFromHostList", params, &r.Options, &resp) + return +} + +// no documentation yet +func (r Network_Storage_Iscsi) RemoveAccessFromIpAddress(ipAddressObjectTemplate *datatypes.Network_Subnet_IpAddress) (resp bool, err error) { + params := []interface{}{ + ipAddressObjectTemplate, + } + err = r.Session.DoRequest("SoftLayer_Network_Storage_Iscsi", "removeAccessFromIpAddress", params, &r.Options, &resp) + return +} + +// no documentation yet +func (r Network_Storage_Iscsi) RemoveAccessFromIpAddressList(ipAddressObjectTemplates []datatypes.Network_Subnet_IpAddress) (resp bool, err error) { + params := []interface{}{ + ipAddressObjectTemplates, + } + err = r.Session.DoRequest("SoftLayer_Network_Storage_Iscsi", "removeAccessFromIpAddressList", params, &r.Options, &resp) + return +} + +// no documentation yet +func (r Network_Storage_Iscsi) RemoveAccessFromSubnet(subnetObjectTemplate *datatypes.Network_Subnet) (resp bool, err error) { + params := []interface{}{ + subnetObjectTemplate, + } + err = r.Session.DoRequest("SoftLayer_Network_Storage_Iscsi", "removeAccessFromSubnet", params, &r.Options, &resp) + return +} + +// no documentation yet +func (r Network_Storage_Iscsi) RemoveAccessFromSubnetList(subnetObjectTemplates []datatypes.Network_Subnet) (resp bool, err error) { + params := []interface{}{ + subnetObjectTemplates, + } + err = r.Session.DoRequest("SoftLayer_Network_Storage_Iscsi", "removeAccessFromSubnetList", params, &r.Options, &resp) + return +} + +// This method is used to modify the access control list for this Storage volume. The SoftLayer_Virtual_Guest objects which have been allowed access to this storage will be listed in the allowedVirtualGuests property of this storage volume. +func (r Network_Storage_Iscsi) RemoveAccessFromVirtualGuest(virtualGuestObjectTemplate *datatypes.Virtual_Guest) (resp bool, err error) { + params := []interface{}{ + virtualGuestObjectTemplate, + } + err = r.Session.DoRequest("SoftLayer_Network_Storage_Iscsi", "removeAccessFromVirtualGuest", params, &r.Options, &resp) + return +} + +// This method is used to modify the access control list for this Storage volume. The SoftLayer_Virtual_Guest objects which have been allowed access to this storage will be listed in the allowedVirtualGuests property of this storage volume. +func (r Network_Storage_Iscsi) RemoveAccessFromVirtualGuestList(virtualGuestObjectTemplates []datatypes.Virtual_Guest) (resp bool, err error) { + params := []interface{}{ + virtualGuestObjectTemplates, + } + err = r.Session.DoRequest("SoftLayer_Network_Storage_Iscsi", "removeAccessFromVirtualGuestList", params, &r.Options, &resp) + return +} + +// This method is used to modify the access control list for this Storage replica volume. The SoftLayer_Hardware objects which have been allowed access to this storage will be listed in the allowedHardware property of this storage replica volume. +func (r Network_Storage_Iscsi) RemoveAccessToReplicantFromHardwareList(hardwareObjectTemplates []datatypes.Hardware) (resp bool, err error) { + params := []interface{}{ + hardwareObjectTemplates, + } + err = r.Session.DoRequest("SoftLayer_Network_Storage_Iscsi", "removeAccessToReplicantFromHardwareList", params, &r.Options, &resp) + return +} + +// This method is used to modify the access control list for this Storage replica volume. The SoftLayer_Network_Subnet_IpAddress objects which have been allowed access to this storage will be listed in the allowedIpAddresses property of this storage replica volume. +func (r Network_Storage_Iscsi) RemoveAccessToReplicantFromIpAddressList(ipAddressObjectTemplates []datatypes.Network_Subnet_IpAddress) (resp bool, err error) { + params := []interface{}{ + ipAddressObjectTemplates, + } + err = r.Session.DoRequest("SoftLayer_Network_Storage_Iscsi", "removeAccessToReplicantFromIpAddressList", params, &r.Options, &resp) + return +} + +// no documentation yet +func (r Network_Storage_Iscsi) RemoveAccessToReplicantFromSubnet(subnetObjectTemplate *datatypes.Network_Subnet) (resp bool, err error) { + params := []interface{}{ + subnetObjectTemplate, + } + err = r.Session.DoRequest("SoftLayer_Network_Storage_Iscsi", "removeAccessToReplicantFromSubnet", params, &r.Options, &resp) + return +} + +// This method is used to modify the access control list for this Storage volume's replica. The SoftLayer_Network_Subnet objects which have been allowed access to this storage volume's replica will be listed in the allowedReplicationSubnets property of this storage volume. +func (r Network_Storage_Iscsi) RemoveAccessToReplicantFromSubnetList(subnetObjectTemplates []datatypes.Network_Subnet) (resp bool, err error) { + params := []interface{}{ + subnetObjectTemplates, + } + err = r.Session.DoRequest("SoftLayer_Network_Storage_Iscsi", "removeAccessToReplicantFromSubnetList", params, &r.Options, &resp) + return +} + +// This method is used to modify the access control list for this Storage replica volume. The SoftLayer_Virtual_Guest objects which have been allowed access to this storage will be listed in the allowedVirtualGuests property of this storage replica volume. +func (r Network_Storage_Iscsi) RemoveAccessToReplicantFromVirtualGuestList(virtualGuestObjectTemplates []datatypes.Virtual_Guest) (resp bool, err error) { + params := []interface{}{ + virtualGuestObjectTemplates, + } + err = r.Session.DoRequest("SoftLayer_Network_Storage_Iscsi", "removeAccessToReplicantFromVirtualGuestList", params, &r.Options, &resp) + return +} + +// This method will remove a credential from the current volume. The credential must have been created using the 'addNewCredential' method. +func (r Network_Storage_Iscsi) RemoveCredential(username *string) (resp bool, err error) { + params := []interface{}{ + username, + } + err = r.Session.DoRequest("SoftLayer_Network_Storage_Iscsi", "removeCredential", params, &r.Options, &resp) + return +} + +// {{CloudLayerOnlyMethod}} Restore an individual file so that it may be used as it was before it was deleted. +// +// If a file is deleted from a Virtual Server Storage account, the file is placed into the account's recycle bin and not permanently deleted. Therefore, restoreFile can be used to place the file back into your Virtual Server account's root directory. +func (r Network_Storage_Iscsi) RestoreFile(fileId *string) (resp datatypes.Container_Utility_File_Entity, err error) { + params := []interface{}{ + fileId, + } + err = r.Session.DoRequest("SoftLayer_Network_Storage_Iscsi", "restoreFile", params, &r.Options, &resp) + return +} + +// Restore the volume from a snapshot that was previously taken. +func (r Network_Storage_Iscsi) RestoreFromSnapshot(snapshotId *int) (resp bool, err error) { + params := []interface{}{ + snapshotId, + } + err = r.Session.DoRequest("SoftLayer_Network_Storage_Iscsi", "restoreFromSnapshot", params, &r.Options, &resp) + return +} + +// The method will retrieve the password for the StorageLayer or Virtual Server Storage Account and email the password. The Storage Account passwords will be emailed to the master user. For Virtual Server Storage, the password will be sent to the email address used as the username. +func (r Network_Storage_Iscsi) SendPasswordReminderEmail(username *string) (resp bool, err error) { + params := []interface{}{ + username, + } + err = r.Session.DoRequest("SoftLayer_Network_Storage_Iscsi", "sendPasswordReminderEmail", params, &r.Options, &resp) + return +} + +// Enable or disable the mounting of a Storage volume. When mounting is enabled the Storage volume will be mountable or available for use. +// +// For Virtual Server volumes, disabling mounting will deny access to the Virtual Server Account, remove published material and deny all file interaction including uploads and downloads. +// +// Enabling or disabling mounting for Storage volumes is not possible if mounting has been disabled by SoftLayer or a parent account. +func (r Network_Storage_Iscsi) SetMountable(mountable *bool) (resp bool, err error) { + params := []interface{}{ + mountable, + } + err = r.Session.DoRequest("SoftLayer_Network_Storage_Iscsi", "setMountable", params, &r.Options, &resp) + return +} + +// no documentation yet +func (r Network_Storage_Iscsi) SetSnapshotAllocation(capacityGb *int) (err error) { + var resp datatypes.Void + params := []interface{}{ + capacityGb, + } + err = r.Session.DoRequest("SoftLayer_Network_Storage_Iscsi", "setSnapshotAllocation", params, &r.Options, &resp) + return +} + +// Upgrade the Storage volume to one of the upgradable packages (for example from 10 Gigs of EVault storage to 100 Gigs of EVault storage). +func (r Network_Storage_Iscsi) UpgradeVolumeCapacity(itemId *int) (resp bool, err error) { + params := []interface{}{ + itemId, + } + err = r.Session.DoRequest("SoftLayer_Network_Storage_Iscsi", "upgradeVolumeCapacity", params, &r.Options, &resp) + return +} + +// {{CloudLayerOnlyMethod}} Upload a file to a Storage account's root directory. Once uploaded, this method returns new file entity identifier for the upload file. +// +// The following properties are required in the ''file'' parameter. +// *'''name''': The name of the file you wish to upload +// *'''content''': The raw contents of the file you wish to upload. +// *'''contentType''': The MIME-type of content that you wish to upload. +func (r Network_Storage_Iscsi) UploadFile(file *datatypes.Container_Utility_File_Entity) (resp datatypes.Container_Utility_File_Entity, err error) { + params := []interface{}{ + file, + } + err = r.Session.DoRequest("SoftLayer_Network_Storage_Iscsi", "uploadFile", params, &r.Options, &resp) + return +} + +// no documentation yet +type Network_Storage_Iscsi_OS_Type struct { + Session *session.Session + Options sl.Options +} + +// GetNetworkStorageIscsiOSTypeService returns an instance of the Network_Storage_Iscsi_OS_Type SoftLayer service +func GetNetworkStorageIscsiOSTypeService(sess *session.Session) Network_Storage_Iscsi_OS_Type { + return Network_Storage_Iscsi_OS_Type{Session: sess} +} + +func (r Network_Storage_Iscsi_OS_Type) Id(id int) Network_Storage_Iscsi_OS_Type { + r.Options.Id = &id + return r +} + +func (r Network_Storage_Iscsi_OS_Type) Mask(mask string) Network_Storage_Iscsi_OS_Type { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r Network_Storage_Iscsi_OS_Type) Filter(filter string) Network_Storage_Iscsi_OS_Type { + r.Options.Filter = filter + return r +} + +func (r Network_Storage_Iscsi_OS_Type) Limit(limit int) Network_Storage_Iscsi_OS_Type { + r.Options.Limit = &limit + return r +} + +func (r Network_Storage_Iscsi_OS_Type) Offset(offset int) Network_Storage_Iscsi_OS_Type { + r.Options.Offset = &offset + return r +} + +// Use this method to retrieve all iSCSI OS Types. +func (r Network_Storage_Iscsi_OS_Type) GetAllObjects() (resp []datatypes.Network_Storage_Iscsi_OS_Type, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Iscsi_OS_Type", "getAllObjects", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Network_Storage_Iscsi_OS_Type) GetObject() (resp datatypes.Network_Storage_Iscsi_OS_Type, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Iscsi_OS_Type", "getObject", nil, &r.Options, &resp) + return +} + +// Schedules can be created for select Storage services, such as iscsi. These schedules are used to perform various tasks such as scheduling snapshots or synchronizing replicants. +type Network_Storage_Schedule struct { + Session *session.Session + Options sl.Options +} + +// GetNetworkStorageScheduleService returns an instance of the Network_Storage_Schedule SoftLayer service +func GetNetworkStorageScheduleService(sess *session.Session) Network_Storage_Schedule { + return Network_Storage_Schedule{Session: sess} +} + +func (r Network_Storage_Schedule) Id(id int) Network_Storage_Schedule { + r.Options.Id = &id + return r +} + +func (r Network_Storage_Schedule) Mask(mask string) Network_Storage_Schedule { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r Network_Storage_Schedule) Filter(filter string) Network_Storage_Schedule { + r.Options.Filter = filter + return r +} + +func (r Network_Storage_Schedule) Limit(limit int) Network_Storage_Schedule { + r.Options.Limit = &limit + return r +} + +func (r Network_Storage_Schedule) Offset(offset int) Network_Storage_Schedule { + r.Options.Offset = &offset + return r +} + +// Create a nas volume schedule +func (r Network_Storage_Schedule) CreateObject(templateObject *datatypes.Network_Storage_Schedule) (resp datatypes.Network_Storage_Schedule, err error) { + params := []interface{}{ + templateObject, + } + err = r.Session.DoRequest("SoftLayer_Network_Storage_Schedule", "createObject", params, &r.Options, &resp) + return +} + +// Delete a network storage schedule. '''This cannot be undone.''' ''deleteObject'' returns Boolean ''true'' on successful deletion or ''false'' if it was unable to remove a schedule; +func (r Network_Storage_Schedule) DeleteObject() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Schedule", "deleteObject", nil, &r.Options, &resp) + return +} + +// Edit a nas volume schedule +func (r Network_Storage_Schedule) EditObject(templateObject *datatypes.Network_Storage_Schedule) (resp bool, err error) { + params := []interface{}{ + templateObject, + } + err = r.Session.DoRequest("SoftLayer_Network_Storage_Schedule", "editObject", params, &r.Options, &resp) + return +} + +// Retrieve The day of the month parameter of this schedule. +func (r Network_Storage_Schedule) GetDayOfMonth() (resp string, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Schedule", "getDayOfMonth", nil, &r.Options, &resp) + return +} + +// Retrieve The day of the week parameter of this schedule. +func (r Network_Storage_Schedule) GetDayOfWeek() (resp string, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Schedule", "getDayOfWeek", nil, &r.Options, &resp) + return +} + +// Retrieve Events which have been created as the result of a schedule execution. +func (r Network_Storage_Schedule) GetEvents() (resp []datatypes.Network_Storage_Event, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Schedule", "getEvents", nil, &r.Options, &resp) + return +} + +// Retrieve The hour parameter of this schedule. +func (r Network_Storage_Schedule) GetHour() (resp string, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Schedule", "getHour", nil, &r.Options, &resp) + return +} + +// Retrieve The minute parameter of this schedule. +func (r Network_Storage_Schedule) GetMinute() (resp string, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Schedule", "getMinute", nil, &r.Options, &resp) + return +} + +// Retrieve The month of the year parameter of this schedule. +func (r Network_Storage_Schedule) GetMonthOfYear() (resp string, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Schedule", "getMonthOfYear", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Network_Storage_Schedule) GetObject() (resp datatypes.Network_Storage_Schedule, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Schedule", "getObject", nil, &r.Options, &resp) + return +} + +// Retrieve The associated partnership for a schedule. +func (r Network_Storage_Schedule) GetPartnership() (resp datatypes.Network_Storage_Partnership, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Schedule", "getPartnership", nil, &r.Options, &resp) + return +} + +// Retrieve Properties used for configuration of a schedule. +func (r Network_Storage_Schedule) GetProperties() (resp []datatypes.Network_Storage_Schedule_Property, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Schedule", "getProperties", nil, &r.Options, &resp) + return +} + +// Retrieve Replica snapshots which have been created as the result of this schedule's execution. +func (r Network_Storage_Schedule) GetReplicaSnapshots() (resp []datatypes.Network_Storage, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Schedule", "getReplicaSnapshots", nil, &r.Options, &resp) + return +} + +// Retrieve The number of snapshots this schedule is configured to retain. +func (r Network_Storage_Schedule) GetRetentionCount() (resp string, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Schedule", "getRetentionCount", nil, &r.Options, &resp) + return +} + +// Retrieve Snapshots which have been created as the result of this schedule's execution. +func (r Network_Storage_Schedule) GetSnapshots() (resp []datatypes.Network_Storage, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Schedule", "getSnapshots", nil, &r.Options, &resp) + return +} + +// Retrieve The type provides a standardized definition for a schedule. +func (r Network_Storage_Schedule) GetType() (resp datatypes.Network_Storage_Schedule_Type, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Schedule", "getType", nil, &r.Options, &resp) + return +} + +// Retrieve The associated volume for a schedule. +func (r Network_Storage_Schedule) GetVolume() (resp datatypes.Network_Storage, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Schedule", "getVolume", nil, &r.Options, &resp) + return +} + +// A schedule property type is used to allow for a standardized method of defining network storage schedules. +type Network_Storage_Schedule_Property_Type struct { + Session *session.Session + Options sl.Options +} + +// GetNetworkStorageSchedulePropertyTypeService returns an instance of the Network_Storage_Schedule_Property_Type SoftLayer service +func GetNetworkStorageSchedulePropertyTypeService(sess *session.Session) Network_Storage_Schedule_Property_Type { + return Network_Storage_Schedule_Property_Type{Session: sess} +} + +func (r Network_Storage_Schedule_Property_Type) Id(id int) Network_Storage_Schedule_Property_Type { + r.Options.Id = &id + return r +} + +func (r Network_Storage_Schedule_Property_Type) Mask(mask string) Network_Storage_Schedule_Property_Type { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r Network_Storage_Schedule_Property_Type) Filter(filter string) Network_Storage_Schedule_Property_Type { + r.Options.Filter = filter + return r +} + +func (r Network_Storage_Schedule_Property_Type) Limit(limit int) Network_Storage_Schedule_Property_Type { + r.Options.Limit = &limit + return r +} + +func (r Network_Storage_Schedule_Property_Type) Offset(offset int) Network_Storage_Schedule_Property_Type { + r.Options.Offset = &offset + return r +} + +// Use this method to retrieve all network storage schedule property types. +func (r Network_Storage_Schedule_Property_Type) GetAllObjects() (resp []datatypes.Network_Storage_Schedule_Property_Type, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Schedule_Property_Type", "getAllObjects", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Network_Storage_Schedule_Property_Type) GetObject() (resp datatypes.Network_Storage_Schedule_Property_Type, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Storage_Schedule_Property_Type", "getObject", nil, &r.Options, &resp) + return +} + +// The SoftLayer_Network_Subnet data type contains general information relating to a single SoftLayer subnet. Personal information in this type such as names, addresses, and phone numbers are assigned to the account only and not to users belonging to the account. +type Network_Subnet struct { + Session *session.Session + Options sl.Options +} + +// GetNetworkSubnetService returns an instance of the Network_Subnet SoftLayer service +func GetNetworkSubnetService(sess *session.Session) Network_Subnet { + return Network_Subnet{Session: sess} +} + +func (r Network_Subnet) Id(id int) Network_Subnet { + r.Options.Id = &id + return r +} + +func (r Network_Subnet) Mask(mask string) Network_Subnet { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r Network_Subnet) Filter(filter string) Network_Subnet { + r.Options.Filter = filter + return r +} + +func (r Network_Subnet) Limit(limit int) Network_Subnet { + r.Options.Limit = &limit + return r +} + +func (r Network_Subnet) Offset(offset int) Network_Subnet { + r.Options.Offset = &offset + return r +} + +// This method is used to allow access to a SoftLayer_Network_Storage volume that supports host- or network-level access control. +func (r Network_Subnet) AllowAccessToNetworkStorage(networkStorageTemplateObject *datatypes.Network_Storage) (resp bool, err error) { + params := []interface{}{ + networkStorageTemplateObject, + } + err = r.Session.DoRequest("SoftLayer_Network_Subnet", "allowAccessToNetworkStorage", params, &r.Options, &resp) + return +} + +// This method is used to allow access to multiple SoftLayer_Network_Storage volumes that support host- or network-level access control. +func (r Network_Subnet) AllowAccessToNetworkStorageList(networkStorageTemplateObjects []datatypes.Network_Storage) (resp bool, err error) { + params := []interface{}{ + networkStorageTemplateObjects, + } + err = r.Session.DoRequest("SoftLayer_Network_Subnet", "allowAccessToNetworkStorageList", params, &r.Options, &resp) + return +} + +// Create the default PTR records for this subnet +func (r Network_Subnet) CreateReverseDomainRecords() (resp datatypes.Dns_Domain_Reverse, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Subnet", "createReverseDomainRecords", nil, &r.Options, &resp) + return +} + +// This function is used to create a new transaction to modify a subnet route. Routes are updated in one to two minutes depending on the number of transactions that are pending for a router. +// +// Usage of this function is restricted and may only be called from authorized accounts. It is not available for general API users without justification and consent from a SoftLayer representative. +func (r Network_Subnet) CreateSubnetRouteUpdateTransaction(newEndPointIpAddress *string) (resp bool, err error) { + params := []interface{}{ + newEndPointIpAddress, + } + err = r.Session.DoRequest("SoftLayer_Network_Subnet", "createSubnetRouteUpdateTransaction", params, &r.Options, &resp) + return +} + +// This function is used to create a new SoftLayer SWIP transaction to register your RWHOIS data with ARIN. SWIP transactions can only be initiated on subnets that contain more than 8 IP addresses. +func (r Network_Subnet) CreateSwipTransaction() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Subnet", "createSwipTransaction", nil, &r.Options, &resp) + return +} + +// Edit the note for this subnet. +func (r Network_Subnet) EditNote(note *string) (resp bool, err error) { + params := []interface{}{ + note, + } + err = r.Session.DoRequest("SoftLayer_Network_Subnet", "editNote", params, &r.Options, &resp) + return +} + +// Retrieve a list of a SoftLayer customer's subnets along with their SWIP transaction statuses. This is a shortcut method that combines the SoftLayer_Network_Subnet retrieval methods along with [[object masks]] to retrieve their subnets' associated SWIP transactions as well. +// +// This is a special function built for SoftLayer's use on the SWIP section of the customer portal, but may also be useful for API users looking for the same data. +func (r Network_Subnet) FindAllSubnetsAndActiveSwipTransactionStatus() (resp []datatypes.Network_Subnet, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Subnet", "findAllSubnetsAndActiveSwipTransactionStatus", nil, &r.Options, &resp) + return +} + +// Retrieve +func (r Network_Subnet) GetAccount() (resp datatypes.Account, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Subnet", "getAccount", nil, &r.Options, &resp) + return +} + +// Retrieve If present, the active registration for this subnet. +func (r Network_Subnet) GetActiveRegistration() (resp datatypes.Network_Subnet_Registration, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Subnet", "getActiveRegistration", nil, &r.Options, &resp) + return +} + +// Retrieve All the swip transactions associated with a subnet that are still active. +func (r Network_Subnet) GetActiveSwipTransaction() (resp datatypes.Network_Subnet_Swip_Transaction, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Subnet", "getActiveSwipTransaction", nil, &r.Options, &resp) + return +} + +// Retrieve The billing item for a subnet. +func (r Network_Subnet) GetActiveTransaction() (resp datatypes.Provisioning_Version1_Transaction, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Subnet", "getActiveTransaction", nil, &r.Options, &resp) + return +} + +// Retrieve Identifier which distinguishes whether the subnet is public or private address space. +func (r Network_Subnet) GetAddressSpace() (resp string, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Subnet", "getAddressSpace", nil, &r.Options, &resp) + return +} + +// Retrieve The SoftLayer_Network_Storage_Allowed_Host information to connect this Subnet to Network Storage supporting access control lists. +func (r Network_Subnet) GetAllowedHost() (resp datatypes.Network_Storage_Allowed_Host, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Subnet", "getAllowedHost", nil, &r.Options, &resp) + return +} + +// Retrieve The SoftLayer_Network_Storage objects that this SoftLayer_Hardware has access to. +func (r Network_Subnet) GetAllowedNetworkStorage() (resp []datatypes.Network_Storage, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Subnet", "getAllowedNetworkStorage", nil, &r.Options, &resp) + return +} + +// Retrieve The SoftLayer_Network_Storage objects whose Replica that this SoftLayer_Hardware has access to. +func (r Network_Subnet) GetAllowedNetworkStorageReplicas() (resp []datatypes.Network_Storage, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Subnet", "getAllowedNetworkStorageReplicas", nil, &r.Options, &resp) + return +} + +// This method is retrieve a list of SoftLayer_Network_Storage volumes that are authorized access to this SoftLayer_Network_Subnet. +func (r Network_Subnet) GetAttachedNetworkStorages(nasType *string) (resp []datatypes.Network_Storage, err error) { + params := []interface{}{ + nasType, + } + err = r.Session.DoRequest("SoftLayer_Network_Subnet", "getAttachedNetworkStorages", params, &r.Options, &resp) + return +} + +// This method retrieves a list of SoftLayer_Network_Storage volumes that can be authorized to this SoftLayer_Network_Subnet. +func (r Network_Subnet) GetAvailableNetworkStorages(nasType *string) (resp []datatypes.Network_Storage, err error) { + params := []interface{}{ + nasType, + } + err = r.Session.DoRequest("SoftLayer_Network_Subnet", "getAvailableNetworkStorages", params, &r.Options, &resp) + return +} + +// Retrieve The billing item for a subnet. +func (r Network_Subnet) GetBillingItem() (resp datatypes.Billing_Item, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Subnet", "getBillingItem", nil, &r.Options, &resp) + return +} + +// Retrieve +func (r Network_Subnet) GetBoundDescendants() (resp []datatypes.Network_Subnet, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Subnet", "getBoundDescendants", nil, &r.Options, &resp) + return +} + +// Retrieve Whether or not this subnet is associated with a router. Subnets that are not associated with a router cannot be routed. +func (r Network_Subnet) GetBoundRouterFlag() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Subnet", "getBoundRouterFlag", nil, &r.Options, &resp) + return +} + +// Retrieve +func (r Network_Subnet) GetBoundRouters() (resp []datatypes.Hardware, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Subnet", "getBoundRouters", nil, &r.Options, &resp) + return +} + +// Retrieve +func (r Network_Subnet) GetChildren() (resp []datatypes.Network_Subnet, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Subnet", "getChildren", nil, &r.Options, &resp) + return +} + +// Retrieve The data center this subnet may be routed within. +func (r Network_Subnet) GetDatacenter() (resp datatypes.Location_Datacenter, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Subnet", "getDatacenter", nil, &r.Options, &resp) + return +} + +// Retrieve +func (r Network_Subnet) GetDescendants() (resp []datatypes.Network_Subnet, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Subnet", "getDescendants", nil, &r.Options, &resp) + return +} + +// Retrieve +func (r Network_Subnet) GetDisplayLabel() (resp string, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Subnet", "getDisplayLabel", nil, &r.Options, &resp) + return +} + +// Retrieve A static routed ip address +func (r Network_Subnet) GetEndPointIpAddress() (resp datatypes.Network_Subnet_IpAddress, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Subnet", "getEndPointIpAddress", nil, &r.Options, &resp) + return +} + +// Retrieve +func (r Network_Subnet) GetGlobalIpRecord() (resp datatypes.Network_Subnet_IpAddress_Global, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Subnet", "getGlobalIpRecord", nil, &r.Options, &resp) + return +} + +// Retrieve The hardware using IP addresses on this subnet. +func (r Network_Subnet) GetHardware() (resp []datatypes.Hardware, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Subnet", "getHardware", nil, &r.Options, &resp) + return +} + +// Retrieve All the ip addresses associated with a subnet. +func (r Network_Subnet) GetIpAddresses() (resp []datatypes.Network_Subnet_IpAddress, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Subnet", "getIpAddresses", nil, &r.Options, &resp) + return +} + +// Retrieve A subnet's associated network component. +func (r Network_Subnet) GetNetworkComponent() (resp datatypes.Network_Component, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Subnet", "getNetworkComponent", nil, &r.Options, &resp) + return +} + +// Retrieve The upstream network component firewall. +func (r Network_Subnet) GetNetworkComponentFirewall() (resp datatypes.Network_Component_Firewall, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Subnet", "getNetworkComponentFirewall", nil, &r.Options, &resp) + return +} + +// Retrieve The Private Network identifier this subnet is within, if applicable. +func (r Network_Subnet) GetNetworkId() (resp int, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Subnet", "getNetworkId", nil, &r.Options, &resp) + return +} + +// Retrieve +func (r Network_Subnet) GetNetworkProtectionAddresses() (resp []datatypes.Network_Protection_Address, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Subnet", "getNetworkProtectionAddresses", nil, &r.Options, &resp) + return +} + +// Retrieve IPSec network tunnels that have access to a private subnet. +func (r Network_Subnet) GetNetworkTunnelContexts() (resp []datatypes.Network_Tunnel_Module_Context, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Subnet", "getNetworkTunnelContexts", nil, &r.Options, &resp) + return +} + +// Retrieve The VLAN object that a subnet is associated with. +func (r Network_Subnet) GetNetworkVlan() (resp datatypes.Network_Vlan, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Subnet", "getNetworkVlan", nil, &r.Options, &resp) + return +} + +// getObject retrieves the SoftLayer_Network_Subnet object whose ID number corresponds to the ID number of the init parameter passed to the SoftLayer_Network_Subnet service. You can only retrieve the subnet whose vlan is associated with the account that you portal user is assigned to. +func (r Network_Subnet) GetObject() (resp datatypes.Network_Subnet, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Subnet", "getObject", nil, &r.Options, &resp) + return +} + +// Retrieve The pod in which this subnet resides. +func (r Network_Subnet) GetPodName() (resp string, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Subnet", "getPodName", nil, &r.Options, &resp) + return +} + +// Retrieve +func (r Network_Subnet) GetProtectedIpAddresses() (resp []datatypes.Network_Subnet_IpAddress, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Subnet", "getProtectedIpAddresses", nil, &r.Options, &resp) + return +} + +// Retrieve +func (r Network_Subnet) GetRegionalInternetRegistry() (resp datatypes.Network_Regional_Internet_Registry, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Subnet", "getRegionalInternetRegistry", nil, &r.Options, &resp) + return +} + +// Retrieve All registrations that have been created for this subnet. +func (r Network_Subnet) GetRegistrations() (resp []datatypes.Network_Subnet_Registration, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Subnet", "getRegistrations", nil, &r.Options, &resp) + return +} + +// Retrieve The resource groups in which this subnet is a member. +func (r Network_Subnet) GetResourceGroups() (resp []datatypes.Resource_Group, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Subnet", "getResourceGroups", nil, &r.Options, &resp) + return +} + +// Retrieve The reverse DNS domain associated with this subnet. +func (r Network_Subnet) GetReverseDomain() (resp datatypes.Dns_Domain, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Subnet", "getReverseDomain", nil, &r.Options, &resp) + return +} + +// Retrieve all reverse DNS records associated with a subnet. +func (r Network_Subnet) GetReverseDomainRecords() (resp []datatypes.Dns_Domain, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Subnet", "getReverseDomainRecords", nil, &r.Options, &resp) + return +} + +// Retrieve An identifier of the role the subnet is within. Roles dictate how a subnet may be used. +func (r Network_Subnet) GetRoleKeyName() (resp string, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Subnet", "getRoleKeyName", nil, &r.Options, &resp) + return +} + +// Retrieve The name of the role the subnet is within. Roles dictate how a subnet may be used. +func (r Network_Subnet) GetRoleName() (resp string, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Subnet", "getRoleName", nil, &r.Options, &resp) + return +} + +// getRoutableEndpointAddresses retrieves valid routable endpoint addresses for a subnet. You may use any IP address in a portable subnet, but may not use the network identifier, gateway, or broadcast address for primary and secondary on VLAN subnets. +func (r Network_Subnet) GetRoutableEndpointIpAddresses() (resp []datatypes.Network_Subnet_IpAddress, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Subnet", "getRoutableEndpointIpAddresses", nil, &r.Options, &resp) + return +} + +// Retrieve The identifier for the type of route then subnet is currently configured for. +func (r Network_Subnet) GetRoutingTypeKeyName() (resp string, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Subnet", "getRoutingTypeKeyName", nil, &r.Options, &resp) + return +} + +// Retrieve The name for the type of route then subnet is currently configured for. +func (r Network_Subnet) GetRoutingTypeName() (resp string, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Subnet", "getRoutingTypeName", nil, &r.Options, &resp) + return +} + +// Retrieve the subnet associated with an IP address. You may only retrieve subnets assigned to your SoftLayer customer account. +func (r Network_Subnet) GetSubnetForIpAddress(ipAddress *string) (resp datatypes.Network_Subnet, err error) { + params := []interface{}{ + ipAddress, + } + err = r.Session.DoRequest("SoftLayer_Network_Subnet", "getSubnetForIpAddress", params, &r.Options, &resp) + return +} + +// Retrieve All the swip transactions associated with a subnet. +func (r Network_Subnet) GetSwipTransaction() (resp []datatypes.Network_Subnet_Swip_Transaction, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Subnet", "getSwipTransaction", nil, &r.Options, &resp) + return +} + +// Retrieve +func (r Network_Subnet) GetUnboundDescendants() (resp []datatypes.Network_Subnet, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Subnet", "getUnboundDescendants", nil, &r.Options, &resp) + return +} + +// Retrieve Provides the total number of utilized IP addresses on this subnet. The primary consumer of IP addresses are compute resources, which can consume more than one address. This value is only supported for primary subnet types. +func (r Network_Subnet) GetUtilizedIpAddressCount() (resp uint, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Subnet", "getUtilizedIpAddressCount", nil, &r.Options, &resp) + return +} + +// Retrieve The Virtual Servers using IP addresses on this subnet. +func (r Network_Subnet) GetVirtualGuests() (resp []datatypes.Virtual_Guest, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Subnet", "getVirtualGuests", nil, &r.Options, &resp) + return +} + +// This method is used to remove access to multiple SoftLayer_Network_Storage volumes +func (r Network_Subnet) RemoveAccessToNetworkStorageList(networkStorageTemplateObjects []datatypes.Network_Storage) (resp bool, err error) { + params := []interface{}{ + networkStorageTemplateObjects, + } + err = r.Session.DoRequest("SoftLayer_Network_Subnet", "removeAccessToNetworkStorageList", params, &r.Options, &resp) + return +} + +// The SoftLayer_Network_Subnet_IpAddress data type contains general information relating to a single SoftLayer IPv4 address. +type Network_Subnet_IpAddress struct { + Session *session.Session + Options sl.Options +} + +// GetNetworkSubnetIpAddressService returns an instance of the Network_Subnet_IpAddress SoftLayer service +func GetNetworkSubnetIpAddressService(sess *session.Session) Network_Subnet_IpAddress { + return Network_Subnet_IpAddress{Session: sess} +} + +func (r Network_Subnet_IpAddress) Id(id int) Network_Subnet_IpAddress { + r.Options.Id = &id + return r +} + +func (r Network_Subnet_IpAddress) Mask(mask string) Network_Subnet_IpAddress { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r Network_Subnet_IpAddress) Filter(filter string) Network_Subnet_IpAddress { + r.Options.Filter = filter + return r +} + +func (r Network_Subnet_IpAddress) Limit(limit int) Network_Subnet_IpAddress { + r.Options.Limit = &limit + return r +} + +func (r Network_Subnet_IpAddress) Offset(offset int) Network_Subnet_IpAddress { + r.Options.Offset = &offset + return r +} + +// This method is used to allow access to a SoftLayer_Network_Storage volume that supports host- or network-level access control. +func (r Network_Subnet_IpAddress) AllowAccessToNetworkStorage(networkStorageTemplateObject *datatypes.Network_Storage) (resp bool, err error) { + params := []interface{}{ + networkStorageTemplateObject, + } + err = r.Session.DoRequest("SoftLayer_Network_Subnet_IpAddress", "allowAccessToNetworkStorage", params, &r.Options, &resp) + return +} + +// This method is used to allow access to multiple SoftLayer_Network_Storage volumes that support host- or network-level access control. +func (r Network_Subnet_IpAddress) AllowAccessToNetworkStorageList(networkStorageTemplateObjects []datatypes.Network_Storage) (resp bool, err error) { + params := []interface{}{ + networkStorageTemplateObjects, + } + err = r.Session.DoRequest("SoftLayer_Network_Subnet_IpAddress", "allowAccessToNetworkStorageList", params, &r.Options, &resp) + return +} + +// Edit a subnet IP address. +func (r Network_Subnet_IpAddress) EditObject(templateObject *datatypes.Network_Subnet_IpAddress) (resp bool, err error) { + params := []interface{}{ + templateObject, + } + err = r.Session.DoRequest("SoftLayer_Network_Subnet_IpAddress", "editObject", params, &r.Options, &resp) + return +} + +// This function is used to edit multiple objects at the same time. +func (r Network_Subnet_IpAddress) EditObjects(templateObjects []datatypes.Network_Subnet_IpAddress) (resp bool, err error) { + params := []interface{}{ + templateObjects, + } + err = r.Session.DoRequest("SoftLayer_Network_Subnet_IpAddress", "editObjects", params, &r.Options, &resp) + return +} + +// Search for an IP address record by IPv4 address. +func (r Network_Subnet_IpAddress) FindByIpv4Address(ipAddress *string) (resp datatypes.Network_Subnet_IpAddress, err error) { + params := []interface{}{ + ipAddress, + } + err = r.Session.DoRequest("SoftLayer_Network_Subnet_IpAddress", "findByIpv4Address", params, &r.Options, &resp) + return +} + +// Retrieve The SoftLayer_Network_Storage_Allowed_Host information to connect this IP Address to Network Storage supporting access control lists. +func (r Network_Subnet_IpAddress) GetAllowedHost() (resp datatypes.Network_Storage_Allowed_Host, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Subnet_IpAddress", "getAllowedHost", nil, &r.Options, &resp) + return +} + +// Retrieve The SoftLayer_Network_Storage objects that this SoftLayer_Hardware has access to. +func (r Network_Subnet_IpAddress) GetAllowedNetworkStorage() (resp []datatypes.Network_Storage, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Subnet_IpAddress", "getAllowedNetworkStorage", nil, &r.Options, &resp) + return +} + +// Retrieve The SoftLayer_Network_Storage objects whose Replica that this SoftLayer_Hardware has access to. +func (r Network_Subnet_IpAddress) GetAllowedNetworkStorageReplicas() (resp []datatypes.Network_Storage, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Subnet_IpAddress", "getAllowedNetworkStorageReplicas", nil, &r.Options, &resp) + return +} + +// Retrieve The application delivery controller using this address. +func (r Network_Subnet_IpAddress) GetApplicationDeliveryController() (resp datatypes.Network_Application_Delivery_Controller, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Subnet_IpAddress", "getApplicationDeliveryController", nil, &r.Options, &resp) + return +} + +// This method is retrieve a list of SoftLayer_Network_Storage volumes that are authorized access to this SoftLayer_Network_Subnet_IpAddress. +func (r Network_Subnet_IpAddress) GetAttachedNetworkStorages(nasType *string) (resp []datatypes.Network_Storage, err error) { + params := []interface{}{ + nasType, + } + err = r.Session.DoRequest("SoftLayer_Network_Subnet_IpAddress", "getAttachedNetworkStorages", params, &r.Options, &resp) + return +} + +// This method retrieves a list of SoftLayer_Network_Storage volumes that can be authorized to this SoftLayer_Network_Subnet_IpAddress. +func (r Network_Subnet_IpAddress) GetAvailableNetworkStorages(nasType *string) (resp []datatypes.Network_Storage, err error) { + params := []interface{}{ + nasType, + } + err = r.Session.DoRequest("SoftLayer_Network_Subnet_IpAddress", "getAvailableNetworkStorages", params, &r.Options, &resp) + return +} + +// Search for an IP address record by IP address. +func (r Network_Subnet_IpAddress) GetByIpAddress(ipAddress *string) (resp datatypes.Network_Subnet_IpAddress, err error) { + params := []interface{}{ + ipAddress, + } + err = r.Session.DoRequest("SoftLayer_Network_Subnet_IpAddress", "getByIpAddress", params, &r.Options, &resp) + return +} + +// Retrieve An IPSec network tunnel's address translations. These translations use a SoftLayer ip address from an assigned static NAT subnet to deliver the packets to the remote (customer) destination. +func (r Network_Subnet_IpAddress) GetContextTunnelTranslations() (resp []datatypes.Network_Tunnel_Module_Context_Address_Translation, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Subnet_IpAddress", "getContextTunnelTranslations", nil, &r.Options, &resp) + return +} + +// Retrieve All the subnets routed to an IP address. +func (r Network_Subnet_IpAddress) GetEndpointSubnets() (resp []datatypes.Network_Subnet, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Subnet_IpAddress", "getEndpointSubnets", nil, &r.Options, &resp) + return +} + +// Retrieve A network component that is statically routed to an IP address. +func (r Network_Subnet_IpAddress) GetGuestNetworkComponent() (resp datatypes.Virtual_Guest_Network_Component, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Subnet_IpAddress", "getGuestNetworkComponent", nil, &r.Options, &resp) + return +} + +// Retrieve A network component that is statically routed to an IP address. +func (r Network_Subnet_IpAddress) GetGuestNetworkComponentBinding() (resp datatypes.Virtual_Guest_Network_Component_IpAddress, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Subnet_IpAddress", "getGuestNetworkComponentBinding", nil, &r.Options, &resp) + return +} + +// Retrieve A server that this IP address is routed to. +func (r Network_Subnet_IpAddress) GetHardware() (resp datatypes.Hardware, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Subnet_IpAddress", "getHardware", nil, &r.Options, &resp) + return +} + +// Retrieve A network component that is statically routed to an IP address. +func (r Network_Subnet_IpAddress) GetNetworkComponent() (resp datatypes.Network_Component, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Subnet_IpAddress", "getNetworkComponent", nil, &r.Options, &resp) + return +} + +// getObject retrieves the SoftLayer_Network_Subnet_IpAddress object whose ID number corresponds to the ID number of the init parameter passed to the SoftLayer_Network_Subnet_IpAddress service. You can only retrieve the IP address whose subnet is associated with a VLAN that is associated with the account that your portal user is assigned to. +func (r Network_Subnet_IpAddress) GetObject() (resp datatypes.Network_Subnet_IpAddress, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Subnet_IpAddress", "getObject", nil, &r.Options, &resp) + return +} + +// Retrieve The network gateway appliance using this address as the private IP address. +func (r Network_Subnet_IpAddress) GetPrivateNetworkGateway() (resp datatypes.Network_Gateway, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Subnet_IpAddress", "getPrivateNetworkGateway", nil, &r.Options, &resp) + return +} + +// Retrieve +func (r Network_Subnet_IpAddress) GetProtectionAddress() (resp []datatypes.Network_Protection_Address, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Subnet_IpAddress", "getProtectionAddress", nil, &r.Options, &resp) + return +} + +// Retrieve The network gateway appliance using this address as the public IP address. +func (r Network_Subnet_IpAddress) GetPublicNetworkGateway() (resp datatypes.Network_Gateway, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Subnet_IpAddress", "getPublicNetworkGateway", nil, &r.Options, &resp) + return +} + +// Retrieve An IPMI-based management network component of the IP address. +func (r Network_Subnet_IpAddress) GetRemoteManagementNetworkComponent() (resp datatypes.Network_Component, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Subnet_IpAddress", "getRemoteManagementNetworkComponent", nil, &r.Options, &resp) + return +} + +// Retrieve An IP address' associated subnet. +func (r Network_Subnet_IpAddress) GetSubnet() (resp datatypes.Network_Subnet, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Subnet_IpAddress", "getSubnet", nil, &r.Options, &resp) + return +} + +// Retrieve All events for this IP address stored in the datacenter syslogs from the last 24 hours +func (r Network_Subnet_IpAddress) GetSyslogEventsOneDay() (resp []datatypes.Network_Logging_Syslog, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Subnet_IpAddress", "getSyslogEventsOneDay", nil, &r.Options, &resp) + return +} + +// Retrieve All events for this IP address stored in the datacenter syslogs from the last 7 days +func (r Network_Subnet_IpAddress) GetSyslogEventsSevenDays() (resp []datatypes.Network_Logging_Syslog, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Subnet_IpAddress", "getSyslogEventsSevenDays", nil, &r.Options, &resp) + return +} + +// Retrieve Top Ten network datacenter syslog events, grouped by destination port, for the last 24 hours +func (r Network_Subnet_IpAddress) GetTopTenSyslogEventsByDestinationPortOneDay() (resp []datatypes.Network_Logging_Syslog, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Subnet_IpAddress", "getTopTenSyslogEventsByDestinationPortOneDay", nil, &r.Options, &resp) + return +} + +// Retrieve Top Ten network datacenter syslog events, grouped by destination port, for the last 7 days +func (r Network_Subnet_IpAddress) GetTopTenSyslogEventsByDestinationPortSevenDays() (resp []datatypes.Network_Logging_Syslog, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Subnet_IpAddress", "getTopTenSyslogEventsByDestinationPortSevenDays", nil, &r.Options, &resp) + return +} + +// Retrieve Top Ten network datacenter syslog events, grouped by source port, for the last 24 hours +func (r Network_Subnet_IpAddress) GetTopTenSyslogEventsByProtocolsOneDay() (resp []datatypes.Network_Logging_Syslog, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Subnet_IpAddress", "getTopTenSyslogEventsByProtocolsOneDay", nil, &r.Options, &resp) + return +} + +// Retrieve Top Ten network datacenter syslog events, grouped by source port, for the last 7 days +func (r Network_Subnet_IpAddress) GetTopTenSyslogEventsByProtocolsSevenDays() (resp []datatypes.Network_Logging_Syslog, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Subnet_IpAddress", "getTopTenSyslogEventsByProtocolsSevenDays", nil, &r.Options, &resp) + return +} + +// Retrieve Top Ten network datacenter syslog events, grouped by source ip address, for the last 24 hours +func (r Network_Subnet_IpAddress) GetTopTenSyslogEventsBySourceIpOneDay() (resp []datatypes.Network_Logging_Syslog, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Subnet_IpAddress", "getTopTenSyslogEventsBySourceIpOneDay", nil, &r.Options, &resp) + return +} + +// Retrieve Top Ten network datacenter syslog events, grouped by source ip address, for the last 7 days +func (r Network_Subnet_IpAddress) GetTopTenSyslogEventsBySourceIpSevenDays() (resp []datatypes.Network_Logging_Syslog, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Subnet_IpAddress", "getTopTenSyslogEventsBySourceIpSevenDays", nil, &r.Options, &resp) + return +} + +// Retrieve Top Ten network datacenter syslog events, grouped by source port, for the last 24 hours +func (r Network_Subnet_IpAddress) GetTopTenSyslogEventsBySourcePortOneDay() (resp []datatypes.Network_Logging_Syslog, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Subnet_IpAddress", "getTopTenSyslogEventsBySourcePortOneDay", nil, &r.Options, &resp) + return +} + +// Retrieve Top Ten network datacenter syslog events, grouped by source port, for the last 7 days +func (r Network_Subnet_IpAddress) GetTopTenSyslogEventsBySourcePortSevenDays() (resp []datatypes.Network_Logging_Syslog, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Subnet_IpAddress", "getTopTenSyslogEventsBySourcePortSevenDays", nil, &r.Options, &resp) + return +} + +// Retrieve A virtual guest that this IP address is routed to. +func (r Network_Subnet_IpAddress) GetVirtualGuest() (resp datatypes.Virtual_Guest, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Subnet_IpAddress", "getVirtualGuest", nil, &r.Options, &resp) + return +} + +// Retrieve Virtual licenses allocated for an IP Address. +func (r Network_Subnet_IpAddress) GetVirtualLicenses() (resp []datatypes.Software_VirtualLicense, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Subnet_IpAddress", "getVirtualLicenses", nil, &r.Options, &resp) + return +} + +// This method is used to remove access to multiple SoftLayer_Network_Storage volumes +func (r Network_Subnet_IpAddress) RemoveAccessToNetworkStorageList(networkStorageTemplateObjects []datatypes.Network_Storage) (resp bool, err error) { + params := []interface{}{ + networkStorageTemplateObjects, + } + err = r.Session.DoRequest("SoftLayer_Network_Subnet_IpAddress", "removeAccessToNetworkStorageList", params, &r.Options, &resp) + return +} + +// no documentation yet +type Network_Subnet_IpAddress_Global struct { + Session *session.Session + Options sl.Options +} + +// GetNetworkSubnetIpAddressGlobalService returns an instance of the Network_Subnet_IpAddress_Global SoftLayer service +func GetNetworkSubnetIpAddressGlobalService(sess *session.Session) Network_Subnet_IpAddress_Global { + return Network_Subnet_IpAddress_Global{Session: sess} +} + +func (r Network_Subnet_IpAddress_Global) Id(id int) Network_Subnet_IpAddress_Global { + r.Options.Id = &id + return r +} + +func (r Network_Subnet_IpAddress_Global) Mask(mask string) Network_Subnet_IpAddress_Global { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r Network_Subnet_IpAddress_Global) Filter(filter string) Network_Subnet_IpAddress_Global { + r.Options.Filter = filter + return r +} + +func (r Network_Subnet_IpAddress_Global) Limit(limit int) Network_Subnet_IpAddress_Global { + r.Options.Limit = &limit + return r +} + +func (r Network_Subnet_IpAddress_Global) Offset(offset int) Network_Subnet_IpAddress_Global { + r.Options.Offset = &offset + return r +} + +// Retrieve +func (r Network_Subnet_IpAddress_Global) GetAccount() (resp datatypes.Account, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Subnet_IpAddress_Global", "getAccount", nil, &r.Options, &resp) + return +} + +// Retrieve The active transaction associated with this Global IP. +func (r Network_Subnet_IpAddress_Global) GetActiveTransaction() (resp datatypes.Provisioning_Version1_Transaction, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Subnet_IpAddress_Global", "getActiveTransaction", nil, &r.Options, &resp) + return +} + +// Retrieve The billing item for this Global IP. +func (r Network_Subnet_IpAddress_Global) GetBillingItem() (resp datatypes.Billing_Item_Network_Subnet_IpAddress_Global, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Subnet_IpAddress_Global", "getBillingItem", nil, &r.Options, &resp) + return +} + +// Retrieve +func (r Network_Subnet_IpAddress_Global) GetDestinationIpAddress() (resp datatypes.Network_Subnet_IpAddress, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Subnet_IpAddress_Global", "getDestinationIpAddress", nil, &r.Options, &resp) + return +} + +// Retrieve +func (r Network_Subnet_IpAddress_Global) GetIpAddress() (resp datatypes.Network_Subnet_IpAddress, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Subnet_IpAddress_Global", "getIpAddress", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Network_Subnet_IpAddress_Global) GetObject() (resp datatypes.Network_Subnet_IpAddress_Global, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Subnet_IpAddress_Global", "getObject", nil, &r.Options, &resp) + return +} + +// This function is used to create a new transaction to modify a global IP route. Routes are updated in one to two minutes depending on the number of transactions that are pending for a router. +func (r Network_Subnet_IpAddress_Global) Route(newEndPointIpAddress *string) (resp datatypes.Provisioning_Version1_Transaction, err error) { + params := []interface{}{ + newEndPointIpAddress, + } + err = r.Session.DoRequest("SoftLayer_Network_Subnet_IpAddress_Global", "route", params, &r.Options, &resp) + return +} + +// This function is used to create a new transaction to unroute a global IP address. Routes are updated in one to two minutes depending on the number of transactions that are pending for a router. +func (r Network_Subnet_IpAddress_Global) Unroute() (resp datatypes.Provisioning_Version1_Transaction, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Subnet_IpAddress_Global", "unroute", nil, &r.Options, &resp) + return +} + +// The subnet registration data type contains general information relating to a single subnet registration instance. These registration instances can be updated to reflect changes, and will record the changes in the [[SoftLayer_Network_Subnet_Registration_Event|events]]. +type Network_Subnet_Registration struct { + Session *session.Session + Options sl.Options +} + +// GetNetworkSubnetRegistrationService returns an instance of the Network_Subnet_Registration SoftLayer service +func GetNetworkSubnetRegistrationService(sess *session.Session) Network_Subnet_Registration { + return Network_Subnet_Registration{Session: sess} +} + +func (r Network_Subnet_Registration) Id(id int) Network_Subnet_Registration { + r.Options.Id = &id + return r +} + +func (r Network_Subnet_Registration) Mask(mask string) Network_Subnet_Registration { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r Network_Subnet_Registration) Filter(filter string) Network_Subnet_Registration { + r.Options.Filter = filter + return r +} + +func (r Network_Subnet_Registration) Limit(limit int) Network_Subnet_Registration { + r.Options.Limit = &limit + return r +} + +func (r Network_Subnet_Registration) Offset(offset int) Network_Subnet_Registration { + r.Options.Offset = &offset + return r +} + +// This method will initiate the removal of a subnet registration. +func (r Network_Subnet_Registration) ClearRegistration() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Subnet_Registration", "clearRegistration", nil, &r.Options, &resp) + return +} + +// This method will create a new SoftLayer_Network_Subnet_Registration object. +// +// Input - [[SoftLayer_Network_Subnet_Registration (type)|SoftLayer_Network_Subnet_Registration]]
    • networkIdentifier
      The base address of the [[SoftLayer_Network_Subnet|subnet]] being registered. This can be derived directly from the SoftLayer_Network_Subnet object's networkIdentifier property.
      • Required
      • Type - string
    • cidr
      The CIDR prefix of the [[SoftLayer_Network_Subnet|subnet]] being registered. This can be derived directly from the SoftLayer_Network_Subnet object's cidr property.
      • Required
      • Type - integer
    +func (r Network_Subnet_Registration) CreateObject(templateObject *datatypes.Network_Subnet_Registration) (resp datatypes.Network_Subnet_Registration, err error) { + params := []interface{}{ + templateObject, + } + err = r.Session.DoRequest("SoftLayer_Network_Subnet_Registration", "createObject", params, &r.Options, &resp) + return +} + +// This method will edit an existing SoftLayer_Network_Subnet_Registration object. For more detail, see [[SoftLayer_Network_Subnet_Registration::createObject|createObject]]. +func (r Network_Subnet_Registration) EditObject(templateObject *datatypes.Network_Subnet_Registration) (resp bool, err error) { + params := []interface{}{ + templateObject, + } + err = r.Session.DoRequest("SoftLayer_Network_Subnet_Registration", "editObject", params, &r.Options, &resp) + return +} + +// This method modifies a single registration by modifying the current [[SoftLayer_Network_Subnet_Registration_Details]] objects that are linked to that registration. +func (r Network_Subnet_Registration) EditRegistrationAttachedDetails(personObjectSkeleton *datatypes.Network_Subnet_Registration_Details, networkObjectSkeleton *datatypes.Network_Subnet_Registration_Details) (resp bool, err error) { + params := []interface{}{ + personObjectSkeleton, + networkObjectSkeleton, + } + err = r.Session.DoRequest("SoftLayer_Network_Subnet_Registration", "editRegistrationAttachedDetails", params, &r.Options, &resp) + return +} + +// Retrieve The account that this registration belongs to. +func (r Network_Subnet_Registration) GetAccount() (resp datatypes.Account, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Subnet_Registration", "getAccount", nil, &r.Options, &resp) + return +} + +// Retrieve The cross-reference records that tie the [[SoftLayer_Account_Regional_Registry_Detail]] objects to the registration object. +func (r Network_Subnet_Registration) GetDetailReferences() (resp []datatypes.Network_Subnet_Registration_Details, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Subnet_Registration", "getDetailReferences", nil, &r.Options, &resp) + return +} + +// Retrieve The related registration events. +func (r Network_Subnet_Registration) GetEvents() (resp []datatypes.Network_Subnet_Registration_Event, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Subnet_Registration", "getEvents", nil, &r.Options, &resp) + return +} + +// Retrieve The "network" detail object. +func (r Network_Subnet_Registration) GetNetworkDetail() (resp datatypes.Account_Regional_Registry_Detail, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Subnet_Registration", "getNetworkDetail", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Network_Subnet_Registration) GetObject() (resp datatypes.Network_Subnet_Registration, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Subnet_Registration", "getObject", nil, &r.Options, &resp) + return +} + +// Retrieve The "person" detail object. +func (r Network_Subnet_Registration) GetPersonDetail() (resp datatypes.Account_Regional_Registry_Detail, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Subnet_Registration", "getPersonDetail", nil, &r.Options, &resp) + return +} + +// Retrieve The related Regional Internet Registry. +func (r Network_Subnet_Registration) GetRegionalInternetRegistry() (resp datatypes.Network_Regional_Internet_Registry, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Subnet_Registration", "getRegionalInternetRegistry", nil, &r.Options, &resp) + return +} + +// Retrieve The RIR handle that this registration object belongs to. This field may not be populated until the registration is complete. +func (r Network_Subnet_Registration) GetRegionalInternetRegistryHandle() (resp datatypes.Account_Rwhois_Handle, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Subnet_Registration", "getRegionalInternetRegistryHandle", nil, &r.Options, &resp) + return +} + +// Retrieve The status of this registration. +func (r Network_Subnet_Registration) GetStatus() (resp datatypes.Network_Subnet_Registration_Status, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Subnet_Registration", "getStatus", nil, &r.Options, &resp) + return +} + +// Retrieve The subnet that this registration pertains to. +func (r Network_Subnet_Registration) GetSubnet() (resp datatypes.Network_Subnet, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Subnet_Registration", "getSubnet", nil, &r.Options, &resp) + return +} + +// The SoftLayer_Network_Subnet_Registration_Details objects are used to relate [[SoftLayer_Account_Regional_Registry_Detail]] objects to a [[SoftLayer_Network_Subnet_Registration]] object. This allows for easy reuse of registration details. It is important to note that only one detail object per type may be associated to a registration object. +type Network_Subnet_Registration_Details struct { + Session *session.Session + Options sl.Options +} + +// GetNetworkSubnetRegistrationDetailsService returns an instance of the Network_Subnet_Registration_Details SoftLayer service +func GetNetworkSubnetRegistrationDetailsService(sess *session.Session) Network_Subnet_Registration_Details { + return Network_Subnet_Registration_Details{Session: sess} +} + +func (r Network_Subnet_Registration_Details) Id(id int) Network_Subnet_Registration_Details { + r.Options.Id = &id + return r +} + +func (r Network_Subnet_Registration_Details) Mask(mask string) Network_Subnet_Registration_Details { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r Network_Subnet_Registration_Details) Filter(filter string) Network_Subnet_Registration_Details { + r.Options.Filter = filter + return r +} + +func (r Network_Subnet_Registration_Details) Limit(limit int) Network_Subnet_Registration_Details { + r.Options.Limit = &limit + return r +} + +func (r Network_Subnet_Registration_Details) Offset(offset int) Network_Subnet_Registration_Details { + r.Options.Offset = &offset + return r +} + +// This method will create a new SoftLayer_Network_Subnet_Registration_Details object. +// +// Input - [[SoftLayer_Network_Subnet_Registration_Details (type)|SoftLayer_Network_Subnet_Registration_Details]]
    • detailId
      The numeric ID of the [[SoftLayer_Account_Regional_Registry_Detail|detail]] object to relate.
      • Required
      • Type - integer
    • registrationId
      The numeric ID of the [[SoftLayer_Network_Subnet_Registration|registration]] object to relate.
      • Required
      • Type - integer
    +func (r Network_Subnet_Registration_Details) CreateObject(templateObject *datatypes.Network_Subnet_Registration_Details) (resp datatypes.Network_Subnet_Registration_Details, err error) { + params := []interface{}{ + templateObject, + } + err = r.Session.DoRequest("SoftLayer_Network_Subnet_Registration_Details", "createObject", params, &r.Options, &resp) + return +} + +// This method will delete an existing SoftLayer_Account_Regional_Registry_Detail object. +func (r Network_Subnet_Registration_Details) DeleteObject() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Subnet_Registration_Details", "deleteObject", nil, &r.Options, &resp) + return +} + +// Retrieve The related [[SoftLayer_Account_Regional_Registry_Detail|detail object]]. +func (r Network_Subnet_Registration_Details) GetDetail() (resp datatypes.Account_Regional_Registry_Detail, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Subnet_Registration_Details", "getDetail", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Network_Subnet_Registration_Details) GetObject() (resp datatypes.Network_Subnet_Registration_Details, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Subnet_Registration_Details", "getObject", nil, &r.Options, &resp) + return +} + +// Retrieve The related [[SoftLayer_Network_Subnet_Registration|registration object]]. +func (r Network_Subnet_Registration_Details) GetRegistration() (resp datatypes.Network_Subnet_Registration, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Subnet_Registration_Details", "getRegistration", nil, &r.Options, &resp) + return +} + +// Subnet Registration Status objects describe the current status of a subnet registration. +// +// The standard values for these objects are as follows:
    • OPEN - Indicates that the registration object is new and has yet to be submitted to the RIR
    • PENDING - Indicates that the registration object has been submitted to the RIR and is awaiting response
    • COMPLETE - Indicates that the RIR action has completed
    • DELETED - Indicates that the registration object has been gracefully removed is no longer valid
    • CANCELLED - Indicates that the registration object has been abruptly removed is no longer valid
    +type Network_Subnet_Registration_Status struct { + Session *session.Session + Options sl.Options +} + +// GetNetworkSubnetRegistrationStatusService returns an instance of the Network_Subnet_Registration_Status SoftLayer service +func GetNetworkSubnetRegistrationStatusService(sess *session.Session) Network_Subnet_Registration_Status { + return Network_Subnet_Registration_Status{Session: sess} +} + +func (r Network_Subnet_Registration_Status) Id(id int) Network_Subnet_Registration_Status { + r.Options.Id = &id + return r +} + +func (r Network_Subnet_Registration_Status) Mask(mask string) Network_Subnet_Registration_Status { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r Network_Subnet_Registration_Status) Filter(filter string) Network_Subnet_Registration_Status { + r.Options.Filter = filter + return r +} + +func (r Network_Subnet_Registration_Status) Limit(limit int) Network_Subnet_Registration_Status { + r.Options.Limit = &limit + return r +} + +func (r Network_Subnet_Registration_Status) Offset(offset int) Network_Subnet_Registration_Status { + r.Options.Offset = &offset + return r +} + +// no documentation yet +func (r Network_Subnet_Registration_Status) GetAllObjects() (resp []datatypes.Network_Subnet_Registration_Status, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Subnet_Registration_Status", "getAllObjects", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Network_Subnet_Registration_Status) GetObject() (resp datatypes.Network_Subnet_Registration_Status, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Subnet_Registration_Status", "getObject", nil, &r.Options, &resp) + return +} + +// Every SoftLayer customer account has contact information associated with it for reverse WHOIS purposes. An account's RWHOIS data, modeled by the SoftLayer_Network_Subnet_Rwhois_Data data type, is used by SoftLayer's reverse WHOIS server as well as for SWIP transactions. SoftLayer's reverse WHOIS servers respond to WHOIS queries for IP addresses belonging to a customer's servers, returning this RWHOIS data. +// +// A SoftLayer customer's RWHOIS data may not necessarily match their account or portal users' contact information. +type Network_Subnet_Rwhois_Data struct { + Session *session.Session + Options sl.Options +} + +// GetNetworkSubnetRwhoisDataService returns an instance of the Network_Subnet_Rwhois_Data SoftLayer service +func GetNetworkSubnetRwhoisDataService(sess *session.Session) Network_Subnet_Rwhois_Data { + return Network_Subnet_Rwhois_Data{Session: sess} +} + +func (r Network_Subnet_Rwhois_Data) Id(id int) Network_Subnet_Rwhois_Data { + r.Options.Id = &id + return r +} + +func (r Network_Subnet_Rwhois_Data) Mask(mask string) Network_Subnet_Rwhois_Data { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r Network_Subnet_Rwhois_Data) Filter(filter string) Network_Subnet_Rwhois_Data { + r.Options.Filter = filter + return r +} + +func (r Network_Subnet_Rwhois_Data) Limit(limit int) Network_Subnet_Rwhois_Data { + r.Options.Limit = &limit + return r +} + +func (r Network_Subnet_Rwhois_Data) Offset(offset int) Network_Subnet_Rwhois_Data { + r.Options.Offset = &offset + return r +} + +// Edit the RWHOIS record by passing in a modified version of the record object. All fields are editable. +func (r Network_Subnet_Rwhois_Data) EditObject(templateObject *datatypes.Network_Subnet_Rwhois_Data) (resp bool, err error) { + params := []interface{}{ + templateObject, + } + err = r.Session.DoRequest("SoftLayer_Network_Subnet_Rwhois_Data", "editObject", params, &r.Options, &resp) + return +} + +// Retrieve The SoftLayer customer account associated with this reverse WHOIS data. +func (r Network_Subnet_Rwhois_Data) GetAccount() (resp datatypes.Account, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Subnet_Rwhois_Data", "getAccount", nil, &r.Options, &resp) + return +} + +// getObject retrieves the SoftLayer_Network_Subnet_Rwhois_Data object whose ID corresponds to the ID number of the init parameter passed to the SoftLayer_Network_Subnet_Rwhois_Data service. +// +// The best way to get Rwhois Data for an account is through getRhwoisData on the Account service. +func (r Network_Subnet_Rwhois_Data) GetObject() (resp datatypes.Network_Subnet_Rwhois_Data, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Subnet_Rwhois_Data", "getObject", nil, &r.Options, &resp) + return +} + +// The SoftLayer_Network_Subnet_Swip_Transaction data type contains basic information tracked at SoftLayer to allow automation of Swip creation, update, and removal requests. A specific transaction is attached to an accountId and a subnetId. This also contains a "Status Name" which tells the customer what the transaction is doing: +// +// +// * REQUEST QUEUED: Request is queued up to be sent to ARIN +// * REQUEST SENT: The email request has been sent to ARIN +// * REQUEST CONFIRMED: ARIN has confirmed that the request is good, and should be available in 24 hours +// * OK: The subnet has been checked with WHOIS and it the SWIP transaction has completed correctly +// * REMOVE QUEUED: A subnet is queued to be removed from ARIN's systems +// * REMOVE SENT: The removal email request has been sent to ARIN +// * REMOVE CONFIRMED: ARIN has confirmed that the removal request is good, and the subnet should be clear in WHOIS in 24 hours +// * DELETED: This specific SWIP Transaction has been removed from ARIN and is no longer in effect +// * SOFTLAYER MANUALLY PROCESSING: Sometimes a request doesn't go through correctly and has to be manually processed by SoftLayer. This may take some time. +type Network_Subnet_Swip_Transaction struct { + Session *session.Session + Options sl.Options +} + +// GetNetworkSubnetSwipTransactionService returns an instance of the Network_Subnet_Swip_Transaction SoftLayer service +func GetNetworkSubnetSwipTransactionService(sess *session.Session) Network_Subnet_Swip_Transaction { + return Network_Subnet_Swip_Transaction{Session: sess} +} + +func (r Network_Subnet_Swip_Transaction) Id(id int) Network_Subnet_Swip_Transaction { + r.Options.Id = &id + return r +} + +func (r Network_Subnet_Swip_Transaction) Mask(mask string) Network_Subnet_Swip_Transaction { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r Network_Subnet_Swip_Transaction) Filter(filter string) Network_Subnet_Swip_Transaction { + r.Options.Filter = filter + return r +} + +func (r Network_Subnet_Swip_Transaction) Limit(limit int) Network_Subnet_Swip_Transaction { + r.Options.Limit = &limit + return r +} + +func (r Network_Subnet_Swip_Transaction) Offset(offset int) Network_Subnet_Swip_Transaction { + r.Options.Offset = &offset + return r +} + +// This function will return an array of SoftLayer_Network_Subnet_Swip_Transaction objects, one for each SWIP that is currently in transaction with ARIN. This includes all swip registrations, swip removal requests, and SWIP objects that are currently OK. +func (r Network_Subnet_Swip_Transaction) FindMyTransactions() (resp []datatypes.Network_Subnet_Swip_Transaction, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Subnet_Swip_Transaction", "findMyTransactions", nil, &r.Options, &resp) + return +} + +// Retrieve The Account whose RWHOIS data was used to SWIP this subnet +func (r Network_Subnet_Swip_Transaction) GetAccount() (resp datatypes.Account, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Subnet_Swip_Transaction", "getAccount", nil, &r.Options, &resp) + return +} + +// getObject retrieves the SoftLayer_Network_Subnet_Swip_Transaction object whose ID number corresponds to the ID number of the init parameter passed to the SoftLayer_Network_Subnet_Swip_transaction service. You can only retrieve Swip transactions tied to the account. +func (r Network_Subnet_Swip_Transaction) GetObject() (resp datatypes.Network_Subnet_Swip_Transaction, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Subnet_Swip_Transaction", "getObject", nil, &r.Options, &resp) + return +} + +// Retrieve The subnet that this SWIP transaction was created for. +func (r Network_Subnet_Swip_Transaction) GetSubnet() (resp datatypes.Network_Subnet, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Subnet_Swip_Transaction", "getSubnet", nil, &r.Options, &resp) + return +} + +// This method finds all subnets attached to your account that are in OK status and starts "DELETE" transactions with ARIN, allowing you to remove your SWIP registration information. +func (r Network_Subnet_Swip_Transaction) RemoveAllSubnetSwips() (resp int, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Subnet_Swip_Transaction", "removeAllSubnetSwips", nil, &r.Options, &resp) + return +} + +// This function, when called on an instantiated SWIP transaction, will allow you to start a "DELETE" transaction with ARIN, allowing you to remove your SWIP registration information. +func (r Network_Subnet_Swip_Transaction) RemoveSwipData() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Subnet_Swip_Transaction", "removeSwipData", nil, &r.Options, &resp) + return +} + +// This function will allow you to update ARIN's registration data for a subnet to your current RWHOIS data. +func (r Network_Subnet_Swip_Transaction) ResendSwipData() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Subnet_Swip_Transaction", "resendSwipData", nil, &r.Options, &resp) + return +} + +// swipAllSubnets finds all subnets attached to your account and attempts to create a SWIP transaction for all subnets that do not already have a SWIP transaction in progress. +func (r Network_Subnet_Swip_Transaction) SwipAllSubnets() (resp int, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Subnet_Swip_Transaction", "swipAllSubnets", nil, &r.Options, &resp) + return +} + +// This method finds all subnets attached to your account that are in "OK" status and updates their data with ARIN. Use this function after you have updated your RWHOIS data if you want to keep SWIP up to date. +func (r Network_Subnet_Swip_Transaction) UpdateAllSubnetSwips() (resp int, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Subnet_Swip_Transaction", "updateAllSubnetSwips", nil, &r.Options, &resp) + return +} + +// no documentation yet +type Network_TippingPointReporting struct { + Session *session.Session + Options sl.Options +} + +// GetNetworkTippingPointReportingService returns an instance of the Network_TippingPointReporting SoftLayer service +func GetNetworkTippingPointReportingService(sess *session.Session) Network_TippingPointReporting { + return Network_TippingPointReporting{Session: sess} +} + +func (r Network_TippingPointReporting) Id(id int) Network_TippingPointReporting { + r.Options.Id = &id + return r +} + +func (r Network_TippingPointReporting) Mask(mask string) Network_TippingPointReporting { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r Network_TippingPointReporting) Filter(filter string) Network_TippingPointReporting { + r.Options.Filter = filter + return r +} + +func (r Network_TippingPointReporting) Limit(limit int) Network_TippingPointReporting { + r.Options.Limit = &limit + return r +} + +func (r Network_TippingPointReporting) Offset(offset int) Network_TippingPointReporting { + r.Options.Offset = &offset + return r +} + +// This method, when given an attack signature ID (available in the return values of getReportForIpAddressOrSubnet and getSubnetReportForEntireAccount) and an IP Address and subnet mask, returns all attacks for that subnet in the specified time frame and direction. Once the results have been filtered, additional data is available, including starting and ending times for the attack, originating IP address and port, and destination IP address and port. +// +// CVE and Bugtraq information is not available at this level. +func (r Network_TippingPointReporting) DrillDownAttack(signatureId *string, IpAddress *string, subnetMask *int, timeFrame *int, direction *string) (resp datatypes.Container_Network_IntrusionProtection_SubnetReport, err error) { + params := []interface{}{ + signatureId, + IpAddress, + subnetMask, + timeFrame, + direction, + } + err = r.Session.DoRequest("SoftLayer_Network_TippingPointReporting", "drillDownAttack", params, &r.Options, &resp) + return +} + +// This method returns the attack statistics for the current user's account and for the entire SoftLayer network. These attacks are recorded and monitored at the entry point to the network, and represent attacks in both directions. +// +// The data returned is: +// * Top attacks (by attack name) on datacenter Dal01 in the last hour (and last 24 hours) +// * Top attacks (by attack name) on IPs you own in the last hour (and last 24 hours) +// * Top IPs attacking IPs you own in the last hour (and last 24 hours) +// Each one of these lists can contain any number of items, the default is 5. The usable limit is less than 10, but setting the limit to an abnormally high value will effectively return all records. +// +// The data is returned as a collection of SoftLayer_Container_Network_IntrusionProtection_Statistics objects. +func (r Network_TippingPointReporting) GetMainStatistics(numberOfAttacks *int) (resp []datatypes.Container_Network_IntrusionProtection_Statistics, err error) { + params := []interface{}{ + numberOfAttacks, + } + err = r.Session.DoRequest("SoftLayer_Network_TippingPointReporting", "getMainStatistics", params, &r.Options, &resp) + return +} + +// This method expands on the getSubnetReportForEntireAccount method by offering the ability to filter by subnet or IP address. This method is identical to getSubnetReportForEntireAccount, but allows filtering by subnet. Like in the getSubnetReportForEntireAccount method, CVE and BugTraq IDs are provided, if available. +// +// This method should be called once an attack has been identified using getSubnetReportForEntireAccount (in which case "All Subnets" is the subnet) or getReportForIpAddressOrSubnet. +func (r Network_TippingPointReporting) GetReportForIpAddressOrSubnet(IpAddress *string, subnetMask *int, timeFrame *int, orderBy *string, orderDirection *string) (resp []datatypes.Container_Network_IntrusionProtection_SubnetReport, err error) { + params := []interface{}{ + IpAddress, + subnetMask, + timeFrame, + orderBy, + orderDirection, + } + err = r.Session.DoRequest("SoftLayer_Network_TippingPointReporting", "getReportForIpAddressOrSubnet", params, &r.Options, &resp) + return +} + +// This method returns specific attacks by name for all subnets on the current user's account. +// +// The data returned is stored in SoftLayer_Container_Network_IntrusionProtection_SubnetReport objects, with the "subnet" value set to "All Subnets" +// +// The data is separated into "Inbound" and "Outbound" traffic. A significant amount of outbound attack traffic could indicate that your servers have been compromised. +// +// The data returned includes Attack Count, attack name, extended attack description, and IDs that correspond with the BugTraq or CVE databases. BugTraq can be accessed at [http://www.securityfocus.com/vulnerabilities] The CVE database is located at [http://cve.mitre.org/find/index.html] +// +// For more detailed information, use the getReportForIpAddressOrSubnet method +func (r Network_TippingPointReporting) GetSubnetReportForEntireAccount(timeFrame *int, orderBy *string, orderDirection *string, returnSubnetGroups *bool) (resp []datatypes.Container_Network_IntrusionProtection_SubnetReport, err error) { + params := []interface{}{ + timeFrame, + orderBy, + orderDirection, + returnSubnetGroups, + } + err = r.Session.DoRequest("SoftLayer_Network_TippingPointReporting", "getSubnetReportForEntireAccount", params, &r.Options, &resp) + return +} + +// The SoftLayer_Network_Tunnel_Module_Context data type contains general information relating to a single SoftLayer network tunnel. The SoftLayer_Network_Tunnel_Module_Context is useful to gather information such as related customer subnets (remote) and internal subnets (local) associated with the network tunnel as well as other information needed to manage the network tunnel. Account and billing information related to the network tunnel can also be retrieved. +type Network_Tunnel_Module_Context struct { + Session *session.Session + Options sl.Options +} + +// GetNetworkTunnelModuleContextService returns an instance of the Network_Tunnel_Module_Context SoftLayer service +func GetNetworkTunnelModuleContextService(sess *session.Session) Network_Tunnel_Module_Context { + return Network_Tunnel_Module_Context{Session: sess} +} + +func (r Network_Tunnel_Module_Context) Id(id int) Network_Tunnel_Module_Context { + r.Options.Id = &id + return r +} + +func (r Network_Tunnel_Module_Context) Mask(mask string) Network_Tunnel_Module_Context { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r Network_Tunnel_Module_Context) Filter(filter string) Network_Tunnel_Module_Context { + r.Options.Filter = filter + return r +} + +func (r Network_Tunnel_Module_Context) Limit(limit int) Network_Tunnel_Module_Context { + r.Options.Limit = &limit + return r +} + +func (r Network_Tunnel_Module_Context) Offset(offset int) Network_Tunnel_Module_Context { + r.Options.Offset = &offset + return r +} + +// Associates a remote subnet to the network tunnel. When a remote subnet is associated, a network tunnel will allow the customer (remote) network to communicate with the private and service subnets on the SoftLayer network which are on the other end of this network tunnel. +// +// NOTE: A network tunnel's configurations must be applied to the network device in order for the association described above to take effect. +func (r Network_Tunnel_Module_Context) AddCustomerSubnetToNetworkTunnel(subnetId *int) (resp bool, err error) { + params := []interface{}{ + subnetId, + } + err = r.Session.DoRequest("SoftLayer_Network_Tunnel_Module_Context", "addCustomerSubnetToNetworkTunnel", params, &r.Options, &resp) + return +} + +// Associates a private subnet to the network tunnel. When a private subnet is associated, the network tunnel will allow the customer (remote) network to access the private subnet. +// +// NOTE: A network tunnel's configurations must be applied to the network device in order for the association described above to take effect. +func (r Network_Tunnel_Module_Context) AddPrivateSubnetToNetworkTunnel(subnetId *int) (resp bool, err error) { + params := []interface{}{ + subnetId, + } + err = r.Session.DoRequest("SoftLayer_Network_Tunnel_Module_Context", "addPrivateSubnetToNetworkTunnel", params, &r.Options, &resp) + return +} + +// Associates a service subnet to the network tunnel. When a service subnet is associated, a network tunnel will allow the customer (remote) network to communicate with the private and service subnets on the SoftLayer network which are on the other end of this network tunnel. Service subnets provide access to SoftLayer services such as the customer management portal and the SoftLayer API. +// +// NOTE: A network tunnel's configurations must be applied to the network device in order for the association described above to take effect. +func (r Network_Tunnel_Module_Context) AddServiceSubnetToNetworkTunnel(subnetId *int) (resp bool, err error) { + params := []interface{}{ + subnetId, + } + err = r.Session.DoRequest("SoftLayer_Network_Tunnel_Module_Context", "addServiceSubnetToNetworkTunnel", params, &r.Options, &resp) + return +} + +// A transaction will be created to apply the IPSec network tunnel's configuration to SoftLayer network devices. During this time, an IPSec network tunnel cannot be modified in anyway. Only one network tunnel configuration transaction can be created. If a transaction has been created or is running, a new transaction cannot be created until the previous transaction completes. +func (r Network_Tunnel_Module_Context) ApplyConfigurationsToDevice() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Tunnel_Module_Context", "applyConfigurationsToDevice", nil, &r.Options, &resp) + return +} + +// Create an address translation for a network tunnel. +// +// To create an address translation, ip addresses from an assigned /30 static route subnet are used. Address translations deliver packets to a destination ip address that is on a customer (remote) subnet. +// +// NOTE: A network tunnel's configurations must be applied to the network device in order for an address translation to be created. +func (r Network_Tunnel_Module_Context) CreateAddressTranslation(translation *datatypes.Network_Tunnel_Module_Context_Address_Translation) (resp datatypes.Network_Tunnel_Module_Context_Address_Translation, err error) { + params := []interface{}{ + translation, + } + err = r.Session.DoRequest("SoftLayer_Network_Tunnel_Module_Context", "createAddressTranslation", params, &r.Options, &resp) + return +} + +// This has the same functionality as the SoftLayer_Network_Tunnel_Module_Context::createAddressTranslation. However, it allows multiple translations to be passed in for creation. +// +// NOTE: A network tunnel's configurations must be applied to the network device in order for the address translations to be created. +func (r Network_Tunnel_Module_Context) CreateAddressTranslations(translations []datatypes.Network_Tunnel_Module_Context_Address_Translation) (resp []datatypes.Network_Tunnel_Module_Context_Address_Translation, err error) { + params := []interface{}{ + translations, + } + err = r.Session.DoRequest("SoftLayer_Network_Tunnel_Module_Context", "createAddressTranslations", params, &r.Options, &resp) + return +} + +// Remove an existing address translation from a network tunnel. +// +// Address translations deliver packets to a destination ip address that is on a customer subnet (remote). +// +// NOTE: A network tunnel's configurations must be applied to the network device in order for an address translation to be deleted. +func (r Network_Tunnel_Module_Context) DeleteAddressTranslation(translationId *int) (resp bool, err error) { + params := []interface{}{ + translationId, + } + err = r.Session.DoRequest("SoftLayer_Network_Tunnel_Module_Context", "deleteAddressTranslation", params, &r.Options, &resp) + return +} + +// Provides all of the address translation configurations for an IPSec VPN tunnel in a text file +func (r Network_Tunnel_Module_Context) DownloadAddressTranslationConfigurations() (resp datatypes.Container_Utility_File_Entity, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Tunnel_Module_Context", "downloadAddressTranslationConfigurations", nil, &r.Options, &resp) + return +} + +// Provides all of the configurations for an IPSec VPN network tunnel in a text file +func (r Network_Tunnel_Module_Context) DownloadParameterConfigurations() (resp datatypes.Container_Utility_File_Entity, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Tunnel_Module_Context", "downloadParameterConfigurations", nil, &r.Options, &resp) + return +} + +// Edit name, source (SoftLayer IP) ip address and/or destination (Customer IP) ip address for an existing address translation for a network tunnel. +// +// Address translations deliver packets to a destination ip address that is on a customer (remote) subnet. +// +// NOTE: A network tunnel's configurations must be applied to the network device in order for an address translation to be created. +func (r Network_Tunnel_Module_Context) EditAddressTranslation(translation *datatypes.Network_Tunnel_Module_Context_Address_Translation) (resp datatypes.Network_Tunnel_Module_Context_Address_Translation, err error) { + params := []interface{}{ + translation, + } + err = r.Session.DoRequest("SoftLayer_Network_Tunnel_Module_Context", "editAddressTranslation", params, &r.Options, &resp) + return +} + +// Edit name, source (SoftLayer IP) ip address and/or destination (Customer IP) ip address for existing address translations for a network tunnel. +// +// Address translations deliver packets to a destination ip address that is on a customer (remote) subnet. +// +// NOTE: A network tunnel's configurations must be applied to the network device in order for an address translation to be modified. +func (r Network_Tunnel_Module_Context) EditAddressTranslations(translations []datatypes.Network_Tunnel_Module_Context_Address_Translation) (resp []datatypes.Network_Tunnel_Module_Context_Address_Translation, err error) { + params := []interface{}{ + translations, + } + err = r.Session.DoRequest("SoftLayer_Network_Tunnel_Module_Context", "editAddressTranslations", params, &r.Options, &resp) + return +} + +// Negotiation parameters for both phases one and two are editable. Here are the phase one and two parameters that can modified: +// +// +// *Phase One +// **Authentication +// ***Default value is set to MD5. +// ***Valid Options are: MD5, SHA1, SHA256. +// **Encryption +// ***Default value is set to 3DES. +// ***Valid Options are: DES, 3DES, AES128, AES192, AES256. +// **Diffie-Hellman Group +// ***Default value is set to 2. +// ***Valid Options are: 0 (None), 1, 2, 5. +// **Keylife +// ***Default value is set to 3600. +// ***Limits are: MIN = 120, MAX = 172800 +// **Preshared Key +// *Phase Two +// **Authentication +// ***Default value is set to MD5. +// ***Valid Options are: MD5, SHA1, SHA256. +// **Encryption +// ***Default value is set to 3DES. +// ***Valid Options are: DES, 3DES, AES128, AES192, AES256. +// **Diffie-Hellman Group +// ***Default value is set to 2. +// ***Valid Options are: 0 (None), 1, 2, 5. +// **Keylife +// ***Default value is set to 28800. +// ***Limits are: MIN = 120, MAX = 172800 +// **Perfect Forward Secrecy +// ***Valid Options are: Off = 0, On = 1. +// ***NOTE: If perfect forward secrecy is turned On (set to 1), then a phase 2 diffie-hellman group is required. +// +// +// The remote peer address for the network tunnel may also be modified if needed. Invalid options will not be accepted and will cause an exception to be thrown. There are properties that provide valid options and limits for each negotiation parameter. Those properties are as follows: +// * encryptionDefault +// * encryptionOptions +// * authenticationDefault +// * authenticationOptions +// * diffieHellmanGroupDefault +// * diffieHellmanGroupOptions +// * phaseOneKeylifeDefault +// * phaseTwoKeylifeDefault +// * keylifeLimits +// +// +// Configurations cannot be modified if a network tunnel's requires complex manual setups/configuration modifications by the SoftLayer Network department. If the former is required, the configurations for the network tunnel will be locked until the manual configurations are complete. A network tunnel's configurations are applied via a transaction. If a network tunnel configuration change transaction is currently running, the network tunnel's setting cannot be modified until the running transaction completes. +// +// NOTE: A network tunnel's configurations must be applied to the network device in order for the modifications made to take effect. +func (r Network_Tunnel_Module_Context) EditObject(templateObject *datatypes.Network_Tunnel_Module_Context) (resp bool, err error) { + params := []interface{}{ + templateObject, + } + err = r.Session.DoRequest("SoftLayer_Network_Tunnel_Module_Context", "editObject", params, &r.Options, &resp) + return +} + +// Retrieve The account that a network tunnel belongs to. +func (r Network_Tunnel_Module_Context) GetAccount() (resp datatypes.Account, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Tunnel_Module_Context", "getAccount", nil, &r.Options, &resp) + return +} + +// Retrieve The transaction that is currently applying configurations for the network tunnel. +func (r Network_Tunnel_Module_Context) GetActiveTransaction() (resp datatypes.Provisioning_Version1_Transaction, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Tunnel_Module_Context", "getActiveTransaction", nil, &r.Options, &resp) + return +} + +// The address translations will be returned. All the translations will be formatted so that the configurations can be copied into a host file. +// +// Format: +// +// {address translation SoftLayer IP Address} {address translation name} +func (r Network_Tunnel_Module_Context) GetAddressTranslationConfigurations() (resp string, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Tunnel_Module_Context", "getAddressTranslationConfigurations", nil, &r.Options, &resp) + return +} + +// Retrieve A network tunnel's address translations. +func (r Network_Tunnel_Module_Context) GetAddressTranslations() (resp []datatypes.Network_Tunnel_Module_Context_Address_Translation, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Tunnel_Module_Context", "getAddressTranslations", nil, &r.Options, &resp) + return +} + +// Retrieve Subnets that provide access to SoftLayer services such as the management portal and the SoftLayer API. +func (r Network_Tunnel_Module_Context) GetAllAvailableServiceSubnets() (resp []datatypes.Network_Subnet, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Tunnel_Module_Context", "getAllAvailableServiceSubnets", nil, &r.Options, &resp) + return +} + +// The default authentication type used for both phases of the negotiation process. The default value is set to MD5. +func (r Network_Tunnel_Module_Context) GetAuthenticationDefault() (resp string, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Tunnel_Module_Context", "getAuthenticationDefault", nil, &r.Options, &resp) + return +} + +// Authentication options available for both phases of the negotiation process. +// +// The authentication options are as follows: +// * MD5 +// * SHA1 +// * SHA256 +func (r Network_Tunnel_Module_Context) GetAuthenticationOptions() (resp []string, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Tunnel_Module_Context", "getAuthenticationOptions", nil, &r.Options, &resp) + return +} + +// Retrieve The current billing item for network tunnel. +func (r Network_Tunnel_Module_Context) GetBillingItem() (resp datatypes.Billing_Item, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Tunnel_Module_Context", "getBillingItem", nil, &r.Options, &resp) + return +} + +// Retrieve Remote subnets that are allowed access through a network tunnel. +func (r Network_Tunnel_Module_Context) GetCustomerSubnets() (resp []datatypes.Network_Customer_Subnet, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Tunnel_Module_Context", "getCustomerSubnets", nil, &r.Options, &resp) + return +} + +// Retrieve The datacenter location for one end of the network tunnel that allows access to account's private subnets. +func (r Network_Tunnel_Module_Context) GetDatacenter() (resp datatypes.Location, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Tunnel_Module_Context", "getDatacenter", nil, &r.Options, &resp) + return +} + +// The default Diffie-Hellman group used for both phases of the negotiation process. The default value is set to 2. +func (r Network_Tunnel_Module_Context) GetDiffieHellmanGroupDefault() (resp int, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Tunnel_Module_Context", "getDiffieHellmanGroupDefault", nil, &r.Options, &resp) + return +} + +// The Diffie-Hellman group options used for both phases of the negotiation process. +// +// The diffie-hellman group options are as follows: +// * 0 (None) +// * 1 +// * 2 +// * 5 +func (r Network_Tunnel_Module_Context) GetDiffieHellmanGroupOptions() (resp []int, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Tunnel_Module_Context", "getDiffieHellmanGroupOptions", nil, &r.Options, &resp) + return +} + +// The default encryption type used for both phases of the negotiation process. The default value is set to 3DES. +func (r Network_Tunnel_Module_Context) GetEncryptionDefault() (resp string, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Tunnel_Module_Context", "getEncryptionDefault", nil, &r.Options, &resp) + return +} + +// Encryption options available for both phases of the negotiation process. +// +// The valid encryption options are as follows: +// * DES +// * 3DES +// * AES128 +// * AES192 +// * AES256 +func (r Network_Tunnel_Module_Context) GetEncryptionOptions() (resp []string, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Tunnel_Module_Context", "getEncryptionOptions", nil, &r.Options, &resp) + return +} + +// Retrieve Private subnets that can be accessed through the network tunnel. +func (r Network_Tunnel_Module_Context) GetInternalSubnets() (resp []datatypes.Network_Subnet, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Tunnel_Module_Context", "getInternalSubnets", nil, &r.Options, &resp) + return +} + +// The keylife limits. Keylife max limit is set to 120. Keylife min limit is set to 172800. +func (r Network_Tunnel_Module_Context) GetKeylifeLimits() (resp []int, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Tunnel_Module_Context", "getKeylifeLimits", nil, &r.Options, &resp) + return +} + +// getObject retrieves the SoftLayer_Network_Tunnel_Module_Context object whose ID number corresponds to the ID number of the init parameter passed to the SoftLayer_Network_Tunnel_Module_Context service. The IPSec network tunnel will be returned if it is associated with the account and the user has proper permission to manage network tunnels. +func (r Network_Tunnel_Module_Context) GetObject() (resp datatypes.Network_Tunnel_Module_Context, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Tunnel_Module_Context", "getObject", nil, &r.Options, &resp) + return +} + +// All of the IPSec VPN tunnel's configurations will be returned. It will list all of phase one and two negotiation parameters. Both remote and local subnets will be provided as well. This is useful when the configurations need to be passed on to another team and/or company for internal network configuration. +func (r Network_Tunnel_Module_Context) GetParameterConfigurationsForCustomerView() (resp string, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Tunnel_Module_Context", "getParameterConfigurationsForCustomerView", nil, &r.Options, &resp) + return +} + +// The default phase 1 keylife used if a value is not provided. The default value is set to 3600. +func (r Network_Tunnel_Module_Context) GetPhaseOneKeylifeDefault() (resp string, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Tunnel_Module_Context", "getPhaseOneKeylifeDefault", nil, &r.Options, &resp) + return +} + +// The default phase 2 keylife used if a value is not provided. The default value is set to 28800. +func (r Network_Tunnel_Module_Context) GetPhaseTwoKeylifeDefault() (resp string, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Tunnel_Module_Context", "getPhaseTwoKeylifeDefault", nil, &r.Options, &resp) + return +} + +// Retrieve Service subnets that can be access through the network tunnel. +func (r Network_Tunnel_Module_Context) GetServiceSubnets() (resp []datatypes.Network_Subnet, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Tunnel_Module_Context", "getServiceSubnets", nil, &r.Options, &resp) + return +} + +// Retrieve Subnets used for a network tunnel's address translations. +func (r Network_Tunnel_Module_Context) GetStaticRouteSubnets() (resp []datatypes.Network_Subnet, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Tunnel_Module_Context", "getStaticRouteSubnets", nil, &r.Options, &resp) + return +} + +// Retrieve The transaction history for this network tunnel. +func (r Network_Tunnel_Module_Context) GetTransactionHistory() (resp []datatypes.Provisioning_Version1_Transaction, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Tunnel_Module_Context", "getTransactionHistory", nil, &r.Options, &resp) + return +} + +// Disassociate a customer subnet (remote) from a network tunnel. When a remote subnet is disassociated, that subnet will not able to communicate with private and service subnets on the SoftLayer network. +// +// NOTE: A network tunnel's configurations must be applied to the network device in order for the disassociation described above to take effect. +func (r Network_Tunnel_Module_Context) RemoveCustomerSubnetFromNetworkTunnel(subnetId *int) (resp bool, err error) { + params := []interface{}{ + subnetId, + } + err = r.Session.DoRequest("SoftLayer_Network_Tunnel_Module_Context", "removeCustomerSubnetFromNetworkTunnel", params, &r.Options, &resp) + return +} + +// Disassociate a private subnet from a network tunnel. When a private subnet is disassociated, the customer (remote) subnet on the other end of the tunnel will not able to communicate with the private subnet that was just disassociated. +// +// NOTE: A network tunnel's configurations must be applied to the network device in order for the disassociation described above to take effect. +func (r Network_Tunnel_Module_Context) RemovePrivateSubnetFromNetworkTunnel(subnetId *int) (resp bool, err error) { + params := []interface{}{ + subnetId, + } + err = r.Session.DoRequest("SoftLayer_Network_Tunnel_Module_Context", "removePrivateSubnetFromNetworkTunnel", params, &r.Options, &resp) + return +} + +// Disassociate a service subnet from a network tunnel. When a service subnet is disassociated, that customer (remote) subnet on the other end of the network tunnel will not able to communicate with that service subnet on the SoftLayer network. +// +// NOTE: A network tunnel's configurations must be applied to the network device in order for the disassociation described above to take effect. +func (r Network_Tunnel_Module_Context) RemoveServiceSubnetFromNetworkTunnel(subnetId *int) (resp bool, err error) { + params := []interface{}{ + subnetId, + } + err = r.Session.DoRequest("SoftLayer_Network_Tunnel_Module_Context", "removeServiceSubnetFromNetworkTunnel", params, &r.Options, &resp) + return +} + +// The SoftLayer_Network_Vlan data type models a single VLAN within SoftLayer's public and private networks. a Virtual LAN is a structure that associates network interfaces on routers, switches, and servers in different locations to act as if they were on the same local network broadcast domain. VLANs are a central part of the SoftLayer network. They can determine how new IP subnets are routed and how individual servers communicate to each other. +type Network_Vlan struct { + Session *session.Session + Options sl.Options +} + +// GetNetworkVlanService returns an instance of the Network_Vlan SoftLayer service +func GetNetworkVlanService(sess *session.Session) Network_Vlan { + return Network_Vlan{Session: sess} +} + +func (r Network_Vlan) Id(id int) Network_Vlan { + r.Options.Id = &id + return r +} + +func (r Network_Vlan) Mask(mask string) Network_Vlan { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r Network_Vlan) Filter(filter string) Network_Vlan { + r.Options.Filter = filter + return r +} + +func (r Network_Vlan) Limit(limit int) Network_Vlan { + r.Options.Limit = &limit + return r +} + +func (r Network_Vlan) Offset(offset int) Network_Vlan { + r.Options.Offset = &offset + return r +} + +// Edit a VLAN's properties +func (r Network_Vlan) EditObject(templateObject *datatypes.Network_Vlan) (resp bool, err error) { + params := []interface{}{ + templateObject, + } + err = r.Session.DoRequest("SoftLayer_Network_Vlan", "editObject", params, &r.Options, &resp) + return +} + +// Retrieve The SoftLayer customer account associated with a VLAN. +func (r Network_Vlan) GetAccount() (resp datatypes.Account, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Vlan", "getAccount", nil, &r.Options, &resp) + return +} + +// Retrieve A VLAN's additional primary subnets. These are used to extend the number of servers attached to the VLAN by adding more ip addresses to the primary IP address pool. +func (r Network_Vlan) GetAdditionalPrimarySubnets() (resp []datatypes.Network_Subnet, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Vlan", "getAdditionalPrimarySubnets", nil, &r.Options, &resp) + return +} + +// Retrieve The gateway this VLAN is inside of. +func (r Network_Vlan) GetAttachedNetworkGateway() (resp datatypes.Network_Gateway, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Vlan", "getAttachedNetworkGateway", nil, &r.Options, &resp) + return +} + +// Retrieve Whether or not this VLAN is inside a gateway. +func (r Network_Vlan) GetAttachedNetworkGatewayFlag() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Vlan", "getAttachedNetworkGatewayFlag", nil, &r.Options, &resp) + return +} + +// Retrieve The inside VLAN record if this VLAN is inside a network gateway. +func (r Network_Vlan) GetAttachedNetworkGatewayVlan() (resp datatypes.Network_Gateway_Vlan, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Vlan", "getAttachedNetworkGatewayVlan", nil, &r.Options, &resp) + return +} + +// Retrieve The billing item for a network vlan. +func (r Network_Vlan) GetBillingItem() (resp datatypes.Billing_Item, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Vlan", "getBillingItem", nil, &r.Options, &resp) + return +} + +// Get a set of reasons why this VLAN may not be cancelled. If the result is empty, this VLAN may be cancelled. +func (r Network_Vlan) GetCancelFailureReasons() (resp []string, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Vlan", "getCancelFailureReasons", nil, &r.Options, &resp) + return +} + +// Retrieve A flag indicating that a network vlan is on a Hardware Firewall (Dedicated). +func (r Network_Vlan) GetDedicatedFirewallFlag() (resp int, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Vlan", "getDedicatedFirewallFlag", nil, &r.Options, &resp) + return +} + +// Retrieve The extension router that a VLAN is associated with. +func (r Network_Vlan) GetExtensionRouter() (resp datatypes.Hardware_Router, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Vlan", "getExtensionRouter", nil, &r.Options, &resp) + return +} + +// Retrieve A firewalled Vlan's network components. +func (r Network_Vlan) GetFirewallGuestNetworkComponents() (resp []datatypes.Network_Component_Firewall, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Vlan", "getFirewallGuestNetworkComponents", nil, &r.Options, &resp) + return +} + +// Retrieve A firewalled vlan's inbound/outbound interfaces. +func (r Network_Vlan) GetFirewallInterfaces() (resp []datatypes.Network_Firewall_Module_Context_Interface, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Vlan", "getFirewallInterfaces", nil, &r.Options, &resp) + return +} + +// Retrieve A firewalled Vlan's network components. +func (r Network_Vlan) GetFirewallNetworkComponents() (resp []datatypes.Network_Component_Firewall, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Vlan", "getFirewallNetworkComponents", nil, &r.Options, &resp) + return +} + +// Get the IP addresses associated with this server that are protectable by a network component firewall. Note, this may not return all values for IPv6 subnets for this VLAN. Please use getFirewallProtectableSubnets to get all protectable subnets. +func (r Network_Vlan) GetFirewallProtectableIpAddresses() (resp []datatypes.Network_Subnet_IpAddress, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Vlan", "getFirewallProtectableIpAddresses", nil, &r.Options, &resp) + return +} + +// Get the subnets associated with this server that are protectable by a network component firewall. +func (r Network_Vlan) GetFirewallProtectableSubnets() (resp []datatypes.Network_Subnet, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Vlan", "getFirewallProtectableSubnets", nil, &r.Options, &resp) + return +} + +// Retrieve The currently running rule set of a firewalled VLAN. +func (r Network_Vlan) GetFirewallRules() (resp []datatypes.Network_Vlan_Firewall_Rule, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Vlan", "getFirewallRules", nil, &r.Options, &resp) + return +} + +// Retrieve The networking components that are connected to a VLAN. +func (r Network_Vlan) GetGuestNetworkComponents() (resp []datatypes.Virtual_Guest_Network_Component, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Vlan", "getGuestNetworkComponents", nil, &r.Options, &resp) + return +} + +// Retrieve All of the hardware that exists on a VLAN. Hardware is associated with a VLAN by its networking components. +func (r Network_Vlan) GetHardware() (resp []datatypes.Hardware, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Vlan", "getHardware", nil, &r.Options, &resp) + return +} + +// Retrieve +func (r Network_Vlan) GetHighAvailabilityFirewallFlag() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Vlan", "getHighAvailabilityFirewallFlag", nil, &r.Options, &resp) + return +} + +// Retrieve A flag indicating that a vlan can be assigned to a host that has local disk functionality. +func (r Network_Vlan) GetLocalDiskStorageCapabilityFlag() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Vlan", "getLocalDiskStorageCapabilityFlag", nil, &r.Options, &resp) + return +} + +// Retrieve The network in which this VLAN resides. +func (r Network_Vlan) GetNetwork() (resp datatypes.Network, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Vlan", "getNetwork", nil, &r.Options, &resp) + return +} + +// Retrieve The network components that are connected to this VLAN through a trunk. +func (r Network_Vlan) GetNetworkComponentTrunks() (resp []datatypes.Network_Component_Network_Vlan_Trunk, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Vlan", "getNetworkComponentTrunks", nil, &r.Options, &resp) + return +} + +// Retrieve The networking components that are connected to a VLAN. +func (r Network_Vlan) GetNetworkComponents() (resp []datatypes.Network_Component, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Vlan", "getNetworkComponents", nil, &r.Options, &resp) + return +} + +// Retrieve Identifier to denote whether a VLAN is used for public or private connectivity. +func (r Network_Vlan) GetNetworkSpace() (resp string, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Vlan", "getNetworkSpace", nil, &r.Options, &resp) + return +} + +// Retrieve The Hardware Firewall (Dedicated) for a network vlan. +func (r Network_Vlan) GetNetworkVlanFirewall() (resp datatypes.Network_Vlan_Firewall, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Vlan", "getNetworkVlanFirewall", nil, &r.Options, &resp) + return +} + +// getObject retrieves the SoftLayer_Network_Vlan object whose ID number corresponds to the ID number of the init parameter passed to the SoftLayer_Network_Vlan service. You can only retrieve VLANs that are associated with your SoftLayer customer account. +func (r Network_Vlan) GetObject() (resp datatypes.Network_Vlan, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Vlan", "getObject", nil, &r.Options, &resp) + return +} + +// Retrieve The primary router that a VLAN is associated with. Every SoftLayer VLAN is connected to more than one router for greater network redundancy. +func (r Network_Vlan) GetPrimaryRouter() (resp datatypes.Hardware_Router, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Vlan", "getPrimaryRouter", nil, &r.Options, &resp) + return +} + +// Retrieve A VLAN's primary subnet. Each VLAN has at least one subnet, usually the subnet that is assigned to a server or new IP address block when it's purchased. +func (r Network_Vlan) GetPrimarySubnet() (resp datatypes.Network_Subnet, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Vlan", "getPrimarySubnet", nil, &r.Options, &resp) + return +} + +// Retrieve A VLAN's primary IPv6 subnet. Some VLAN's may not have a primary IPv6 subnet. +func (r Network_Vlan) GetPrimarySubnetVersion6() (resp datatypes.Network_Subnet, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Vlan", "getPrimarySubnetVersion6", nil, &r.Options, &resp) + return +} + +// Retrieve +func (r Network_Vlan) GetPrimarySubnets() (resp []datatypes.Network_Subnet, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Vlan", "getPrimarySubnets", nil, &r.Options, &resp) + return +} + +// Retrieve The gateways this VLAN is the private VLAN of. +func (r Network_Vlan) GetPrivateNetworkGateways() (resp []datatypes.Network_Gateway, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Vlan", "getPrivateNetworkGateways", nil, &r.Options, &resp) + return +} + +// Retrieve a VLAN's associated private network VLAN. getPrivateVlan gathers it's information by retrieving the private VLAN of a VLAN's primary hardware object. +func (r Network_Vlan) GetPrivateVlan() (resp datatypes.Network_Vlan, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Vlan", "getPrivateVlan", nil, &r.Options, &resp) + return +} + +// Retrieve the private network VLAN associated with an IP address. +func (r Network_Vlan) GetPrivateVlanByIpAddress(ipAddress *string) (resp datatypes.Network_Vlan, err error) { + params := []interface{}{ + ipAddress, + } + err = r.Session.DoRequest("SoftLayer_Network_Vlan", "getPrivateVlanByIpAddress", params, &r.Options, &resp) + return +} + +// Retrieve +func (r Network_Vlan) GetProtectedIpAddresses() (resp []datatypes.Network_Subnet_IpAddress, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Vlan", "getProtectedIpAddresses", nil, &r.Options, &resp) + return +} + +// Retrieve The gateways this VLAN is the public VLAN of. +func (r Network_Vlan) GetPublicNetworkGateways() (resp []datatypes.Network_Gateway, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Vlan", "getPublicNetworkGateways", nil, &r.Options, &resp) + return +} + +// Retrieve the VLAN that belongs to a server's public network interface, as described by a server's fully-qualified domain name. A server's ''FQDN'' is it's hostname, followed by a period then it's domain name. +func (r Network_Vlan) GetPublicVlanByFqdn(fqdn *string) (resp datatypes.Network_Vlan, err error) { + params := []interface{}{ + fqdn, + } + err = r.Session.DoRequest("SoftLayer_Network_Vlan", "getPublicVlanByFqdn", params, &r.Options, &resp) + return +} + +// Retrieve The resource group member for a network vlan. +func (r Network_Vlan) GetResourceGroupMember() (resp []datatypes.Resource_Group_Member, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Vlan", "getResourceGroupMember", nil, &r.Options, &resp) + return +} + +// Retrieve The resource groups in which this VLAN is a member. +func (r Network_Vlan) GetResourceGroups() (resp []datatypes.Resource_Group, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Vlan", "getResourceGroups", nil, &r.Options, &resp) + return +} + +// Retrieve all reverse DNS records associated with the subnets assigned to a VLAN. +func (r Network_Vlan) GetReverseDomainRecords() (resp []datatypes.Dns_Domain, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Vlan", "getReverseDomainRecords", nil, &r.Options, &resp) + return +} + +// Retrieve A flag indicating that a vlan can be assigned to a host that has SAN disk functionality. +func (r Network_Vlan) GetSanStorageCapabilityFlag() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Vlan", "getSanStorageCapabilityFlag", nil, &r.Options, &resp) + return +} + +// Retrieve Collection of scale VLANs this VLAN applies to. +func (r Network_Vlan) GetScaleVlans() (resp []datatypes.Scale_Network_Vlan, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Vlan", "getScaleVlans", nil, &r.Options, &resp) + return +} + +// Retrieve The secondary router that a VLAN is associated with. Every SoftLayer VLAN is connected to more than one router for greater network redundancy. +func (r Network_Vlan) GetSecondaryRouter() (resp datatypes.Hardware, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Vlan", "getSecondaryRouter", nil, &r.Options, &resp) + return +} + +// Retrieve The subnets that exist as secondary interfaces on a VLAN +func (r Network_Vlan) GetSecondarySubnets() (resp []datatypes.Network_Subnet, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Vlan", "getSecondarySubnets", nil, &r.Options, &resp) + return +} + +// Retrieve All of the subnets that exist as VLAN interfaces. +func (r Network_Vlan) GetSubnets() (resp []datatypes.Network_Subnet, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Vlan", "getSubnets", nil, &r.Options, &resp) + return +} + +// Retrieve References to all tags for this VLAN. +func (r Network_Vlan) GetTagReferences() (resp []datatypes.Tag_Reference, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Vlan", "getTagReferences", nil, &r.Options, &resp) + return +} + +// Retrieve The number of primary IP addresses in a VLAN. +func (r Network_Vlan) GetTotalPrimaryIpAddressCount() (resp uint, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Vlan", "getTotalPrimaryIpAddressCount", nil, &r.Options, &resp) + return +} + +// Retrieve The type of this VLAN. +func (r Network_Vlan) GetType() (resp datatypes.Network_Vlan_Type, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Vlan", "getType", nil, &r.Options, &resp) + return +} + +// Retrieve All of the Virtual Servers that are connected to a VLAN. +func (r Network_Vlan) GetVirtualGuests() (resp []datatypes.Virtual_Guest, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Vlan", "getVirtualGuests", nil, &r.Options, &resp) + return +} + +// Retrieve the VLAN associated with an IP address via the IP's associated subnet. +func (r Network_Vlan) GetVlanForIpAddress(ipAddress *string) (resp datatypes.Network_Vlan, err error) { + params := []interface{}{ + ipAddress, + } + err = r.Session.DoRequest("SoftLayer_Network_Vlan", "getVlanForIpAddress", params, &r.Options, &resp) + return +} + +// Tag a VLAN by passing in one or more tags separated by a comma. Tag references are cleared out every time this method is called. If your VLAN is already tagged you will need to pass the current tags along with any new ones. To remove all tag references pass an empty string. To remove one or more tags omit them from the tag list. +func (r Network_Vlan) SetTags(tags *string) (resp bool, err error) { + params := []interface{}{ + tags, + } + err = r.Session.DoRequest("SoftLayer_Network_Vlan", "setTags", params, &r.Options, &resp) + return +} + +// The '''getSensorData''' method updates a VLAN's firewall to allow or disallow intra-VLAN communication. +func (r Network_Vlan) UpdateFirewallIntraVlanCommunication(enabled *bool) (err error) { + var resp datatypes.Void + params := []interface{}{ + enabled, + } + err = r.Session.DoRequest("SoftLayer_Network_Vlan", "updateFirewallIntraVlanCommunication", params, &r.Options, &resp) + return +} + +// The SoftLayer_Network_Vlan_Firewall data type contains general information relating to a single SoftLayer VLAN firewall. This is the object which ties the running rules to a specific downstream server. Use the [[SoftLayer Network Firewall Template]] service to pull SoftLayer recommended rule set templates. Use the [[SoftLayer Network Firewall Update Request]] service to submit a firewall update request. +type Network_Vlan_Firewall struct { + Session *session.Session + Options sl.Options +} + +// GetNetworkVlanFirewallService returns an instance of the Network_Vlan_Firewall SoftLayer service +func GetNetworkVlanFirewallService(sess *session.Session) Network_Vlan_Firewall { + return Network_Vlan_Firewall{Session: sess} +} + +func (r Network_Vlan_Firewall) Id(id int) Network_Vlan_Firewall { + r.Options.Id = &id + return r +} + +func (r Network_Vlan_Firewall) Mask(mask string) Network_Vlan_Firewall { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r Network_Vlan_Firewall) Filter(filter string) Network_Vlan_Firewall { + r.Options.Filter = filter + return r +} + +func (r Network_Vlan_Firewall) Limit(limit int) Network_Vlan_Firewall { + r.Options.Limit = &limit + return r +} + +func (r Network_Vlan_Firewall) Offset(offset int) Network_Vlan_Firewall { + r.Options.Offset = &offset + return r +} + +// Retrieve The billing item for a Hardware Firewall (Dedicated). +func (r Network_Vlan_Firewall) GetBillingItem() (resp datatypes.Billing_Item, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Vlan_Firewall", "getBillingItem", nil, &r.Options, &resp) + return +} + +// Retrieve The datacenter that the firewall resides in. +func (r Network_Vlan_Firewall) GetDatacenter() (resp datatypes.Location, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Vlan_Firewall", "getDatacenter", nil, &r.Options, &resp) + return +} + +// Retrieve The firewall device type. +func (r Network_Vlan_Firewall) GetFirewallType() (resp string, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Vlan_Firewall", "getFirewallType", nil, &r.Options, &resp) + return +} + +// Retrieve A name reflecting the hostname and domain of the firewall. This is created from the combined values of the firewall's logical name and vlan number automatically, and thus can not be edited directly. +func (r Network_Vlan_Firewall) GetFullyQualifiedDomainName() (resp string, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Vlan_Firewall", "getFullyQualifiedDomainName", nil, &r.Options, &resp) + return +} + +// Retrieve The credentials to log in to a firewall device. This is only present for dedicated appliances. +func (r Network_Vlan_Firewall) GetManagementCredentials() (resp datatypes.Software_Component_Password, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Vlan_Firewall", "getManagementCredentials", nil, &r.Options, &resp) + return +} + +// Retrieve The update requests made for this firewall. +func (r Network_Vlan_Firewall) GetNetworkFirewallUpdateRequests() (resp []datatypes.Network_Firewall_Update_Request, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Vlan_Firewall", "getNetworkFirewallUpdateRequests", nil, &r.Options, &resp) + return +} + +// Retrieve The VLAN object that a firewall is associated with and protecting. +func (r Network_Vlan_Firewall) GetNetworkVlan() (resp datatypes.Network_Vlan, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Vlan_Firewall", "getNetworkVlan", nil, &r.Options, &resp) + return +} + +// Retrieve The VLAN objects that a firewall is associated with and protecting. +func (r Network_Vlan_Firewall) GetNetworkVlans() (resp []datatypes.Network_Vlan, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Vlan_Firewall", "getNetworkVlans", nil, &r.Options, &resp) + return +} + +// getObject returns a SoftLayer_Network_Vlan_Firewall object. You can only get objects for vlans attached to your account that have a network firewall enabled. +func (r Network_Vlan_Firewall) GetObject() (resp datatypes.Network_Vlan_Firewall, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Vlan_Firewall", "getObject", nil, &r.Options, &resp) + return +} + +// Retrieve The currently running rule set of this network component firewall. +func (r Network_Vlan_Firewall) GetRules() (resp []datatypes.Network_Vlan_Firewall_Rule, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Vlan_Firewall", "getRules", nil, &r.Options, &resp) + return +} + +// Retrieve +func (r Network_Vlan_Firewall) GetTagReferences() (resp []datatypes.Tag_Reference, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Vlan_Firewall", "getTagReferences", nil, &r.Options, &resp) + return +} + +// This will completely reset the firewall to factory settings. If the firewall is not a dedicated appliance an error will occur. Note, this process is performed asynchronously. During the process all traffic will not be routed through the firewall. +func (r Network_Vlan_Firewall) RestoreDefaults() (resp datatypes.Provisioning_Version1_Transaction, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Vlan_Firewall", "restoreDefaults", nil, &r.Options, &resp) + return +} + +// This method will associate a comma separated list of tags with this object. +func (r Network_Vlan_Firewall) SetTags(tags *string) (resp bool, err error) { + params := []interface{}{ + tags, + } + err = r.Session.DoRequest("SoftLayer_Network_Vlan_Firewall", "setTags", params, &r.Options, &resp) + return +} + +// Enable or disable route bypass for this context. If enabled, this will bypass the firewall entirely and all traffic will be routed directly to the host(s) behind it. If disabled, traffic will flow through the firewall normally. This feature is only available for Hardware Firewall (Dedicated) and dedicated appliances. +func (r Network_Vlan_Firewall) UpdateRouteBypass(bypass *bool) (resp datatypes.Provisioning_Version1_Transaction, err error) { + params := []interface{}{ + bypass, + } + err = r.Session.DoRequest("SoftLayer_Network_Vlan_Firewall", "updateRouteBypass", params, &r.Options, &resp) + return +} + +// no documentation yet +type Network_Vlan_Type struct { + Session *session.Session + Options sl.Options +} + +// GetNetworkVlanTypeService returns an instance of the Network_Vlan_Type SoftLayer service +func GetNetworkVlanTypeService(sess *session.Session) Network_Vlan_Type { + return Network_Vlan_Type{Session: sess} +} + +func (r Network_Vlan_Type) Id(id int) Network_Vlan_Type { + r.Options.Id = &id + return r +} + +func (r Network_Vlan_Type) Mask(mask string) Network_Vlan_Type { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r Network_Vlan_Type) Filter(filter string) Network_Vlan_Type { + r.Options.Filter = filter + return r +} + +func (r Network_Vlan_Type) Limit(limit int) Network_Vlan_Type { + r.Options.Limit = &limit + return r +} + +func (r Network_Vlan_Type) Offset(offset int) Network_Vlan_Type { + r.Options.Offset = &offset + return r +} + +// no documentation yet +func (r Network_Vlan_Type) GetObject() (resp datatypes.Network_Vlan_Type, err error) { + err = r.Session.DoRequest("SoftLayer_Network_Vlan_Type", "getObject", nil, &r.Options, &resp) + return +} diff --git a/vendor/github.com/softlayer/softlayer-go/services/notification.go b/vendor/github.com/softlayer/softlayer-go/services/notification.go new file mode 100644 index 0000000000..628ad256d5 --- /dev/null +++ b/vendor/github.com/softlayer/softlayer-go/services/notification.go @@ -0,0 +1,892 @@ +/** + * Copyright 2016 IBM Corp. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** + * AUTOMATICALLY GENERATED CODE - DO NOT MODIFY + */ + +package services + +import ( + "fmt" + "strings" + + "github.com/softlayer/softlayer-go/datatypes" + "github.com/softlayer/softlayer-go/session" + "github.com/softlayer/softlayer-go/sl" +) + +// Details provided for the notification are basic. Details such as the related preferences, name and keyname for the notification can be retrieved. The keyname property for the notification can be used to refer to a notification when integrating into the SoftLayer Notification system. The name property can used more for display purposes. +type Notification struct { + Session *session.Session + Options sl.Options +} + +// GetNotificationService returns an instance of the Notification SoftLayer service +func GetNotificationService(sess *session.Session) Notification { + return Notification{Session: sess} +} + +func (r Notification) Id(id int) Notification { + r.Options.Id = &id + return r +} + +func (r Notification) Mask(mask string) Notification { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r Notification) Filter(filter string) Notification { + r.Options.Filter = filter + return r +} + +func (r Notification) Limit(limit int) Notification { + r.Options.Limit = &limit + return r +} + +func (r Notification) Offset(offset int) Notification { + r.Options.Offset = &offset + return r +} + +// Use this method to retrieve all active notifications that can be subscribed to. +func (r Notification) GetAllObjects() (resp []datatypes.Notification, err error) { + err = r.Session.DoRequest("SoftLayer_Notification", "getAllObjects", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Notification) GetObject() (resp datatypes.Notification, err error) { + err = r.Session.DoRequest("SoftLayer_Notification", "getObject", nil, &r.Options, &resp) + return +} + +// Retrieve The preferences related to the notification. These are preferences are configurable and optional for subscribers to use. +func (r Notification) GetPreferences() (resp []datatypes.Notification_Preference, err error) { + err = r.Session.DoRequest("SoftLayer_Notification", "getPreferences", nil, &r.Options, &resp) + return +} + +// Retrieve The required preferences related to the notification. While configurable, the subscriber does not have the option whether to use the preference. +func (r Notification) GetRequiredPreferences() (resp []datatypes.Notification_Preference, err error) { + err = r.Session.DoRequest("SoftLayer_Notification", "getRequiredPreferences", nil, &r.Options, &resp) + return +} + +// This is an extension of the SoftLayer_Notification class. These are implementation details specific to those notifications which can be subscribed to and received on a mobile device. +type Notification_Mobile struct { + Session *session.Session + Options sl.Options +} + +// GetNotificationMobileService returns an instance of the Notification_Mobile SoftLayer service +func GetNotificationMobileService(sess *session.Session) Notification_Mobile { + return Notification_Mobile{Session: sess} +} + +func (r Notification_Mobile) Id(id int) Notification_Mobile { + r.Options.Id = &id + return r +} + +func (r Notification_Mobile) Mask(mask string) Notification_Mobile { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r Notification_Mobile) Filter(filter string) Notification_Mobile { + r.Options.Filter = filter + return r +} + +func (r Notification_Mobile) Limit(limit int) Notification_Mobile { + r.Options.Limit = &limit + return r +} + +func (r Notification_Mobile) Offset(offset int) Notification_Mobile { + r.Options.Offset = &offset + return r +} + +// Create a new subscriber for a given resource. +func (r Notification_Mobile) CreateSubscriberForMobileDevice(keyName *string, resourceTableId *int, userRecordId *int) (resp bool, err error) { + params := []interface{}{ + keyName, + resourceTableId, + userRecordId, + } + err = r.Session.DoRequest("SoftLayer_Notification_Mobile", "createSubscriberForMobileDevice", params, &r.Options, &resp) + return +} + +// Use this method to retrieve all active notifications that can be subscribed to. +func (r Notification_Mobile) GetAllObjects() (resp []datatypes.Notification, err error) { + err = r.Session.DoRequest("SoftLayer_Notification_Mobile", "getAllObjects", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Notification_Mobile) GetObject() (resp datatypes.Notification_Mobile, err error) { + err = r.Session.DoRequest("SoftLayer_Notification_Mobile", "getObject", nil, &r.Options, &resp) + return +} + +// Retrieve The preferences related to the notification. These are preferences are configurable and optional for subscribers to use. +func (r Notification_Mobile) GetPreferences() (resp []datatypes.Notification_Preference, err error) { + err = r.Session.DoRequest("SoftLayer_Notification_Mobile", "getPreferences", nil, &r.Options, &resp) + return +} + +// Retrieve The required preferences related to the notification. While configurable, the subscriber does not have the option whether to use the preference. +func (r Notification_Mobile) GetRequiredPreferences() (resp []datatypes.Notification_Preference, err error) { + err = r.Session.DoRequest("SoftLayer_Notification_Mobile", "getRequiredPreferences", nil, &r.Options, &resp) + return +} + +// no documentation yet +type Notification_Occurrence_Event struct { + Session *session.Session + Options sl.Options +} + +// GetNotificationOccurrenceEventService returns an instance of the Notification_Occurrence_Event SoftLayer service +func GetNotificationOccurrenceEventService(sess *session.Session) Notification_Occurrence_Event { + return Notification_Occurrence_Event{Session: sess} +} + +func (r Notification_Occurrence_Event) Id(id int) Notification_Occurrence_Event { + r.Options.Id = &id + return r +} + +func (r Notification_Occurrence_Event) Mask(mask string) Notification_Occurrence_Event { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r Notification_Occurrence_Event) Filter(filter string) Notification_Occurrence_Event { + r.Options.Filter = filter + return r +} + +func (r Notification_Occurrence_Event) Limit(limit int) Notification_Occurrence_Event { + r.Options.Limit = &limit + return r +} + +func (r Notification_Occurrence_Event) Offset(offset int) Notification_Occurrence_Event { + r.Options.Offset = &offset + return r +} + +// <<<< EOT +func (r Notification_Occurrence_Event) AcknowledgeNotification() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Notification_Occurrence_Event", "acknowledgeNotification", nil, &r.Options, &resp) + return +} + +// Retrieve Indicates whether or not this event has been acknowledged by the user. +func (r Notification_Occurrence_Event) GetAcknowledgedFlag() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Notification_Occurrence_Event", "getAcknowledgedFlag", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Notification_Occurrence_Event) GetAllObjects() (resp []datatypes.Notification_Occurrence_Event, err error) { + err = r.Session.DoRequest("SoftLayer_Notification_Occurrence_Event", "getAllObjects", nil, &r.Options, &resp) + return +} + +// Retrieve the contents of the file attached to a SoftLayer event by it's given identifier. +func (r Notification_Occurrence_Event) GetAttachedFile(attachmentId *int) (resp []byte, err error) { + params := []interface{}{ + attachmentId, + } + err = r.Session.DoRequest("SoftLayer_Notification_Occurrence_Event", "getAttachedFile", params, &r.Options, &resp) + return +} + +// Retrieve A collection of attachments for this event which provide supplementary information to impacted users some examples are RFO (Reason For Outage) and root cause analysis documents. +func (r Notification_Occurrence_Event) GetAttachments() (resp []datatypes.Notification_Occurrence_Event_Attachment, err error) { + err = r.Session.DoRequest("SoftLayer_Notification_Occurrence_Event", "getAttachments", nil, &r.Options, &resp) + return +} + +// Retrieve The first update for this event. +func (r Notification_Occurrence_Event) GetFirstUpdate() (resp datatypes.Notification_Occurrence_Update, err error) { + err = r.Session.DoRequest("SoftLayer_Notification_Occurrence_Event", "getFirstUpdate", nil, &r.Options, &resp) + return +} + +// This method will return the number of impacted owned accounts associated with this event for the current user. +func (r Notification_Occurrence_Event) GetImpactedAccountCount() (resp int, err error) { + err = r.Session.DoRequest("SoftLayer_Notification_Occurrence_Event", "getImpactedAccountCount", nil, &r.Options, &resp) + return +} + +// Retrieve A collection of accounts impacted by this event. Each impacted account record relates directly to a [[SoftLayer_Account]]. +func (r Notification_Occurrence_Event) GetImpactedAccounts() (resp []datatypes.Notification_Occurrence_Account, err error) { + err = r.Session.DoRequest("SoftLayer_Notification_Occurrence_Event", "getImpactedAccounts", nil, &r.Options, &resp) + return +} + +// This method will return the number of impacted devices associated with this event for the current user. +func (r Notification_Occurrence_Event) GetImpactedDeviceCount() (resp int, err error) { + err = r.Session.DoRequest("SoftLayer_Notification_Occurrence_Event", "getImpactedDeviceCount", nil, &r.Options, &resp) + return +} + +// This method will return a collection of SoftLayer_Notification_Occurrence_Resource objects which is a listing of the current users' impacted devices that are associated with this event. +func (r Notification_Occurrence_Event) GetImpactedDevices() (resp []datatypes.Notification_Occurrence_Resource, err error) { + err = r.Session.DoRequest("SoftLayer_Notification_Occurrence_Event", "getImpactedDevices", nil, &r.Options, &resp) + return +} + +// Retrieve A collection of resources impacted by this event. Each record will relate to some physical resource that the user has access to such as [[SoftLayer_Hardware]] or [[SoftLayer_Virtual_Guest]]. +func (r Notification_Occurrence_Event) GetImpactedResources() (resp []datatypes.Notification_Occurrence_Resource, err error) { + err = r.Session.DoRequest("SoftLayer_Notification_Occurrence_Event", "getImpactedResources", nil, &r.Options, &resp) + return +} + +// Retrieve A collection of users impacted by this event. Each impacted user record relates directly to a [[SoftLayer_User_Customer]]. +func (r Notification_Occurrence_Event) GetImpactedUsers() (resp []datatypes.Notification_Occurrence_User, err error) { + err = r.Session.DoRequest("SoftLayer_Notification_Occurrence_Event", "getImpactedUsers", nil, &r.Options, &resp) + return +} + +// Retrieve The last update for this event. +func (r Notification_Occurrence_Event) GetLastUpdate() (resp datatypes.Notification_Occurrence_Update, err error) { + err = r.Session.DoRequest("SoftLayer_Notification_Occurrence_Event", "getLastUpdate", nil, &r.Options, &resp) + return +} + +// Retrieve The type of event such as planned or unplanned maintenance. +func (r Notification_Occurrence_Event) GetNotificationOccurrenceEventType() (resp datatypes.Notification_Occurrence_Event_Type, err error) { + err = r.Session.DoRequest("SoftLayer_Notification_Occurrence_Event", "getNotificationOccurrenceEventType", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Notification_Occurrence_Event) GetObject() (resp datatypes.Notification_Occurrence_Event, err error) { + err = r.Session.DoRequest("SoftLayer_Notification_Occurrence_Event", "getObject", nil, &r.Options, &resp) + return +} + +// Retrieve +func (r Notification_Occurrence_Event) GetStatusCode() (resp datatypes.Notification_Occurrence_Status_Code, err error) { + err = r.Session.DoRequest("SoftLayer_Notification_Occurrence_Event", "getStatusCode", nil, &r.Options, &resp) + return +} + +// Retrieve All updates for this event. +func (r Notification_Occurrence_Event) GetUpdates() (resp []datatypes.Notification_Occurrence_Update, err error) { + err = r.Session.DoRequest("SoftLayer_Notification_Occurrence_Event", "getUpdates", nil, &r.Options, &resp) + return +} + +// This type contains general information relating to a user that may be impacted by a [[SoftLayer_Notification_Occurrence_Event]]. +type Notification_Occurrence_User struct { + Session *session.Session + Options sl.Options +} + +// GetNotificationOccurrenceUserService returns an instance of the Notification_Occurrence_User SoftLayer service +func GetNotificationOccurrenceUserService(sess *session.Session) Notification_Occurrence_User { + return Notification_Occurrence_User{Session: sess} +} + +func (r Notification_Occurrence_User) Id(id int) Notification_Occurrence_User { + r.Options.Id = &id + return r +} + +func (r Notification_Occurrence_User) Mask(mask string) Notification_Occurrence_User { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r Notification_Occurrence_User) Filter(filter string) Notification_Occurrence_User { + r.Options.Filter = filter + return r +} + +func (r Notification_Occurrence_User) Limit(limit int) Notification_Occurrence_User { + r.Options.Limit = &limit + return r +} + +func (r Notification_Occurrence_User) Offset(offset int) Notification_Occurrence_User { + r.Options.Offset = &offset + return r +} + +// no documentation yet +func (r Notification_Occurrence_User) Acknowledge() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Notification_Occurrence_User", "acknowledge", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Notification_Occurrence_User) GetAllObjects() (resp []datatypes.Notification_Occurrence_User, err error) { + err = r.Session.DoRequest("SoftLayer_Notification_Occurrence_User", "getAllObjects", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Notification_Occurrence_User) GetImpactedDeviceCount() (resp int, err error) { + err = r.Session.DoRequest("SoftLayer_Notification_Occurrence_User", "getImpactedDeviceCount", nil, &r.Options, &resp) + return +} + +// Retrieve A collection of resources impacted by the associated event. +func (r Notification_Occurrence_User) GetImpactedResources() (resp []datatypes.Notification_Occurrence_Resource, err error) { + err = r.Session.DoRequest("SoftLayer_Notification_Occurrence_User", "getImpactedResources", nil, &r.Options, &resp) + return +} + +// Retrieve The associated event. +func (r Notification_Occurrence_User) GetNotificationOccurrenceEvent() (resp datatypes.Notification_Occurrence_Event, err error) { + err = r.Session.DoRequest("SoftLayer_Notification_Occurrence_User", "getNotificationOccurrenceEvent", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Notification_Occurrence_User) GetObject() (resp datatypes.Notification_Occurrence_User, err error) { + err = r.Session.DoRequest("SoftLayer_Notification_Occurrence_User", "getObject", nil, &r.Options, &resp) + return +} + +// Retrieve The impacted user. +func (r Notification_Occurrence_User) GetUser() (resp datatypes.User_Customer, err error) { + err = r.Session.DoRequest("SoftLayer_Notification_Occurrence_User", "getUser", nil, &r.Options, &resp) + return +} + +// A notification subscriber will have details pertaining to the subscriber's notification subscription. You can receive details such as preferences, details of the preferences, delivery methods and the delivery methods for the subscriber. +// +// NOTE: There are preferences and delivery methods that cannot be modified. Also, there are some subscriptions that are required. +type Notification_User_Subscriber struct { + Session *session.Session + Options sl.Options +} + +// GetNotificationUserSubscriberService returns an instance of the Notification_User_Subscriber SoftLayer service +func GetNotificationUserSubscriberService(sess *session.Session) Notification_User_Subscriber { + return Notification_User_Subscriber{Session: sess} +} + +func (r Notification_User_Subscriber) Id(id int) Notification_User_Subscriber { + r.Options.Id = &id + return r +} + +func (r Notification_User_Subscriber) Mask(mask string) Notification_User_Subscriber { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r Notification_User_Subscriber) Filter(filter string) Notification_User_Subscriber { + r.Options.Filter = filter + return r +} + +func (r Notification_User_Subscriber) Limit(limit int) Notification_User_Subscriber { + r.Options.Limit = &limit + return r +} + +func (r Notification_User_Subscriber) Offset(offset int) Notification_User_Subscriber { + r.Options.Offset = &offset + return r +} + +// Use the method to create a new subscription for a notification. This method is the entry method to the notification system. Certain properties are required to create a subscription while others are optional. +// +// The required property is the resourceRecord property which is type SoftLayer_Notification_User_Subscriber_Resource. For the resourceRecord property, the only property that needs to be populated is the resourceTableId. The resourceTableId is the unique identifier of a SoftLayer service to create the subscription for. For example, the unique identifier of the Storage Evault service to create the subscription on. +// +// Optional properties that can be set is the preferences property. The preference property is an array SoftLayer_Notification_User_Subscriber_Preference. By default, the system will populate the preferences with the default values if no preferences are passed in. The preferences passed in must be the preferences related to the notification subscribing to. The notification preferences and preference details (such as minimum and maximum values) can be retrieved using the SoftLayer_Notification service. The properties that need to be populated for preferences are the notificationPreferenceId and value. +// +// For example to create a subscriber for a Storage EVault service to be notified 15 times during a billing cycle and to be notified when the vault usage reaches 85% of its allowed capacity use the following structure: +// +// +// *userRecordId = 1111 +// *notificationId = 3 +// *resourceRecord +// **resourceTableId = 1234 +// *preferences[1] +// **notificationPreferenceId = 2 +// **value = 85 +// *preference[2] +// **notificationPreferenceId = 3 +// **value = 15 +// +// +func (r Notification_User_Subscriber) CreateObject(templateObject *datatypes.Notification_User_Subscriber) (resp bool, err error) { + params := []interface{}{ + templateObject, + } + err = r.Session.DoRequest("SoftLayer_Notification_User_Subscriber", "createObject", params, &r.Options, &resp) + return +} + +// The subscriber's subscription status can be "turned off" or "turned on" if the subscription is not required. +// +// Subscriber preferences may also be edited. To edit the preferences, you must pass in the id off the preferences to edit. Here is an example of structure to pass in. In this example, the structure will set the subscriber status to active and the threshold preference to 90 and the limit preference to 20 +// +// +// *id = 1111 +// *active = 1 +// *preferences[1] +// **id = 11 +// **value = 90 +// *preference[2] +// **id = 12 +// **value = 20 +func (r Notification_User_Subscriber) EditObject(templateObject *datatypes.Notification_User_Subscriber) (resp bool, err error) { + params := []interface{}{ + templateObject, + } + err = r.Session.DoRequest("SoftLayer_Notification_User_Subscriber", "editObject", params, &r.Options, &resp) + return +} + +// Retrieve The delivery methods used to send the subscribed notification. +func (r Notification_User_Subscriber) GetDeliveryMethods() (resp []datatypes.Notification_Delivery_Method, err error) { + err = r.Session.DoRequest("SoftLayer_Notification_User_Subscriber", "getDeliveryMethods", nil, &r.Options, &resp) + return +} + +// Retrieve Notification subscribed to. +func (r Notification_User_Subscriber) GetNotification() (resp datatypes.Notification, err error) { + err = r.Session.DoRequest("SoftLayer_Notification_User_Subscriber", "getNotification", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Notification_User_Subscriber) GetObject() (resp datatypes.Notification_User_Subscriber, err error) { + err = r.Session.DoRequest("SoftLayer_Notification_User_Subscriber", "getObject", nil, &r.Options, &resp) + return +} + +// Retrieve Associated subscriber preferences used for the notification subscription. For example, preferences include number of deliveries (limit) and threshold. +func (r Notification_User_Subscriber) GetPreferences() (resp []datatypes.Notification_User_Subscriber_Preference, err error) { + err = r.Session.DoRequest("SoftLayer_Notification_User_Subscriber", "getPreferences", nil, &r.Options, &resp) + return +} + +// Retrieve Preference details such as description, minimum and maximum limits, default value and unit of measure. +func (r Notification_User_Subscriber) GetPreferencesDetails() (resp []datatypes.Notification_Preference, err error) { + err = r.Session.DoRequest("SoftLayer_Notification_User_Subscriber", "getPreferencesDetails", nil, &r.Options, &resp) + return +} + +// Retrieve The subscriber id to resource id mapping. +func (r Notification_User_Subscriber) GetResourceRecord() (resp datatypes.Notification_User_Subscriber_Resource, err error) { + err = r.Session.DoRequest("SoftLayer_Notification_User_Subscriber", "getResourceRecord", nil, &r.Options, &resp) + return +} + +// Retrieve User record for the subscription. +func (r Notification_User_Subscriber) GetUserRecord() (resp datatypes.User_Customer, err error) { + err = r.Session.DoRequest("SoftLayer_Notification_User_Subscriber", "getUserRecord", nil, &r.Options, &resp) + return +} + +// A notification subscriber will have details pertaining to the subscriber's notification subscription. You can receive details such as preferences, details of the preferences, delivery methods and the delivery methods for the subscriber. +// +// NOTE: There are preferences and delivery methods that cannot be modified. Also, there are some subscriptions that are required. +type Notification_User_Subscriber_Billing struct { + Session *session.Session + Options sl.Options +} + +// GetNotificationUserSubscriberBillingService returns an instance of the Notification_User_Subscriber_Billing SoftLayer service +func GetNotificationUserSubscriberBillingService(sess *session.Session) Notification_User_Subscriber_Billing { + return Notification_User_Subscriber_Billing{Session: sess} +} + +func (r Notification_User_Subscriber_Billing) Id(id int) Notification_User_Subscriber_Billing { + r.Options.Id = &id + return r +} + +func (r Notification_User_Subscriber_Billing) Mask(mask string) Notification_User_Subscriber_Billing { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r Notification_User_Subscriber_Billing) Filter(filter string) Notification_User_Subscriber_Billing { + r.Options.Filter = filter + return r +} + +func (r Notification_User_Subscriber_Billing) Limit(limit int) Notification_User_Subscriber_Billing { + r.Options.Limit = &limit + return r +} + +func (r Notification_User_Subscriber_Billing) Offset(offset int) Notification_User_Subscriber_Billing { + r.Options.Offset = &offset + return r +} + +// Use the method to create a new subscription for a notification. This method is the entry method to the notification system. Certain properties are required to create a subscription while others are optional. +// +// The required property is the resourceRecord property which is type SoftLayer_Notification_User_Subscriber_Resource. For the resourceRecord property, the only property that needs to be populated is the resourceTableId. The resourceTableId is the unique identifier of a SoftLayer service to create the subscription for. For example, the unique identifier of the Storage Evault service to create the subscription on. +// +// Optional properties that can be set is the preferences property. The preference property is an array SoftLayer_Notification_User_Subscriber_Preference. By default, the system will populate the preferences with the default values if no preferences are passed in. The preferences passed in must be the preferences related to the notification subscribing to. The notification preferences and preference details (such as minimum and maximum values) can be retrieved using the SoftLayer_Notification service. The properties that need to be populated for preferences are the notificationPreferenceId and value. +// +// For example to create a subscriber for a Storage EVault service to be notified 15 times during a billing cycle and to be notified when the vault usage reaches 85% of its allowed capacity use the following structure: +// +// +// *userRecordId = 1111 +// *notificationId = 3 +// *resourceRecord +// **resourceTableId = 1234 +// *preferences[1] +// **notificationPreferenceId = 2 +// **value = 85 +// *preference[2] +// **notificationPreferenceId = 3 +// **value = 15 +// +// +func (r Notification_User_Subscriber_Billing) CreateObject(templateObject *datatypes.Notification_User_Subscriber) (resp bool, err error) { + params := []interface{}{ + templateObject, + } + err = r.Session.DoRequest("SoftLayer_Notification_User_Subscriber_Billing", "createObject", params, &r.Options, &resp) + return +} + +// The subscriber's subscription status can be "turned off" or "turned on" if the subscription is not required. +// +// Subscriber preferences may also be edited. To edit the preferences, you must pass in the id off the preferences to edit. Here is an example of structure to pass in. In this example, the structure will set the subscriber status to active and the threshold preference to 90 and the limit preference to 20 +// +// +// *id = 1111 +// *active = 1 +// *preferences[1] +// **id = 11 +// **value = 90 +// *preference[2] +// **id = 12 +// **value = 20 +func (r Notification_User_Subscriber_Billing) EditObject(templateObject *datatypes.Notification_User_Subscriber) (resp bool, err error) { + params := []interface{}{ + templateObject, + } + err = r.Session.DoRequest("SoftLayer_Notification_User_Subscriber_Billing", "editObject", params, &r.Options, &resp) + return +} + +// Retrieve The delivery methods used to send the subscribed notification. +func (r Notification_User_Subscriber_Billing) GetDeliveryMethods() (resp []datatypes.Notification_Delivery_Method, err error) { + err = r.Session.DoRequest("SoftLayer_Notification_User_Subscriber_Billing", "getDeliveryMethods", nil, &r.Options, &resp) + return +} + +// Retrieve Notification subscribed to. +func (r Notification_User_Subscriber_Billing) GetNotification() (resp datatypes.Notification, err error) { + err = r.Session.DoRequest("SoftLayer_Notification_User_Subscriber_Billing", "getNotification", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Notification_User_Subscriber_Billing) GetObject() (resp datatypes.Notification_User_Subscriber_Billing, err error) { + err = r.Session.DoRequest("SoftLayer_Notification_User_Subscriber_Billing", "getObject", nil, &r.Options, &resp) + return +} + +// Retrieve Associated subscriber preferences used for the notification subscription. For example, preferences include number of deliveries (limit) and threshold. +func (r Notification_User_Subscriber_Billing) GetPreferences() (resp []datatypes.Notification_User_Subscriber_Preference, err error) { + err = r.Session.DoRequest("SoftLayer_Notification_User_Subscriber_Billing", "getPreferences", nil, &r.Options, &resp) + return +} + +// Retrieve Preference details such as description, minimum and maximum limits, default value and unit of measure. +func (r Notification_User_Subscriber_Billing) GetPreferencesDetails() (resp []datatypes.Notification_Preference, err error) { + err = r.Session.DoRequest("SoftLayer_Notification_User_Subscriber_Billing", "getPreferencesDetails", nil, &r.Options, &resp) + return +} + +// Retrieve The subscriber id to resource id mapping. +func (r Notification_User_Subscriber_Billing) GetResourceRecord() (resp datatypes.Notification_User_Subscriber_Resource, err error) { + err = r.Session.DoRequest("SoftLayer_Notification_User_Subscriber_Billing", "getResourceRecord", nil, &r.Options, &resp) + return +} + +// Retrieve User record for the subscription. +func (r Notification_User_Subscriber_Billing) GetUserRecord() (resp datatypes.User_Customer, err error) { + err = r.Session.DoRequest("SoftLayer_Notification_User_Subscriber_Billing", "getUserRecord", nil, &r.Options, &resp) + return +} + +// A notification subscriber will have details pertaining to the subscriber's notification subscription. You can receive details such as preferences, details of the preferences, delivery methods and the delivery methods for the subscriber. +// +// NOTE: There are preferences and delivery methods that cannot be modified. Also, there are some subscriptions that are required. +type Notification_User_Subscriber_Mobile struct { + Session *session.Session + Options sl.Options +} + +// GetNotificationUserSubscriberMobileService returns an instance of the Notification_User_Subscriber_Mobile SoftLayer service +func GetNotificationUserSubscriberMobileService(sess *session.Session) Notification_User_Subscriber_Mobile { + return Notification_User_Subscriber_Mobile{Session: sess} +} + +func (r Notification_User_Subscriber_Mobile) Id(id int) Notification_User_Subscriber_Mobile { + r.Options.Id = &id + return r +} + +func (r Notification_User_Subscriber_Mobile) Mask(mask string) Notification_User_Subscriber_Mobile { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r Notification_User_Subscriber_Mobile) Filter(filter string) Notification_User_Subscriber_Mobile { + r.Options.Filter = filter + return r +} + +func (r Notification_User_Subscriber_Mobile) Limit(limit int) Notification_User_Subscriber_Mobile { + r.Options.Limit = &limit + return r +} + +func (r Notification_User_Subscriber_Mobile) Offset(offset int) Notification_User_Subscriber_Mobile { + r.Options.Offset = &offset + return r +} + +// no documentation yet +func (r Notification_User_Subscriber_Mobile) ClearSnoozeTimer() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Notification_User_Subscriber_Mobile", "clearSnoozeTimer", nil, &r.Options, &resp) + return +} + +// Use the method to create a new subscription for a notification. This method is the entry method to the notification system. Certain properties are required to create a subscription while others are optional. +// +// The required property is the resourceRecord property which is type SoftLayer_Notification_User_Subscriber_Resource. For the resourceRecord property, the only property that needs to be populated is the resourceTableId. The resourceTableId is the unique identifier of a SoftLayer service to create the subscription for. For example, the unique identifier of the Storage Evault service to create the subscription on. +// +// Optional properties that can be set is the preferences property. The preference property is an array SoftLayer_Notification_User_Subscriber_Preference. By default, the system will populate the preferences with the default values if no preferences are passed in. The preferences passed in must be the preferences related to the notification subscribing to. The notification preferences and preference details (such as minimum and maximum values) can be retrieved using the SoftLayer_Notification service. The properties that need to be populated for preferences are the notificationPreferenceId and value. +// +// For example to create a subscriber for a Storage EVault service to be notified 15 times during a billing cycle and to be notified when the vault usage reaches 85% of its allowed capacity use the following structure: +// +// +// *userRecordId = 1111 +// *notificationId = 3 +// *resourceRecord +// **resourceTableId = 1234 +// *preferences[1] +// **notificationPreferenceId = 2 +// **value = 85 +// *preference[2] +// **notificationPreferenceId = 3 +// **value = 15 +// +// +func (r Notification_User_Subscriber_Mobile) CreateObject(templateObject *datatypes.Notification_User_Subscriber) (resp bool, err error) { + params := []interface{}{ + templateObject, + } + err = r.Session.DoRequest("SoftLayer_Notification_User_Subscriber_Mobile", "createObject", params, &r.Options, &resp) + return +} + +// The subscriber's subscription status can be "turned off" or "turned on" if the subscription is not required. +// +// Subscriber preferences may also be edited. To edit the preferences, you must pass in the id off the preferences to edit. Here is an example of structure to pass in. In this example, the structure will set the subscriber status to active and the threshold preference to 90 and the limit preference to 20 +// +// +// *id = 1111 +// *active = 1 +// *preferences[1] +// **id = 11 +// **value = 90 +// *preference[2] +// **id = 12 +// **value = 20 +func (r Notification_User_Subscriber_Mobile) EditObject(templateObject *datatypes.Notification_User_Subscriber) (resp bool, err error) { + params := []interface{}{ + templateObject, + } + err = r.Session.DoRequest("SoftLayer_Notification_User_Subscriber_Mobile", "editObject", params, &r.Options, &resp) + return +} + +// Retrieve The delivery methods used to send the subscribed notification. +func (r Notification_User_Subscriber_Mobile) GetDeliveryMethods() (resp []datatypes.Notification_Delivery_Method, err error) { + err = r.Session.DoRequest("SoftLayer_Notification_User_Subscriber_Mobile", "getDeliveryMethods", nil, &r.Options, &resp) + return +} + +// Retrieve Notification subscribed to. +func (r Notification_User_Subscriber_Mobile) GetNotification() (resp datatypes.Notification, err error) { + err = r.Session.DoRequest("SoftLayer_Notification_User_Subscriber_Mobile", "getNotification", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Notification_User_Subscriber_Mobile) GetObject() (resp datatypes.Notification_User_Subscriber_Mobile, err error) { + err = r.Session.DoRequest("SoftLayer_Notification_User_Subscriber_Mobile", "getObject", nil, &r.Options, &resp) + return +} + +// Retrieve Associated subscriber preferences used for the notification subscription. For example, preferences include number of deliveries (limit) and threshold. +func (r Notification_User_Subscriber_Mobile) GetPreferences() (resp []datatypes.Notification_User_Subscriber_Preference, err error) { + err = r.Session.DoRequest("SoftLayer_Notification_User_Subscriber_Mobile", "getPreferences", nil, &r.Options, &resp) + return +} + +// Retrieve Preference details such as description, minimum and maximum limits, default value and unit of measure. +func (r Notification_User_Subscriber_Mobile) GetPreferencesDetails() (resp []datatypes.Notification_Preference, err error) { + err = r.Session.DoRequest("SoftLayer_Notification_User_Subscriber_Mobile", "getPreferencesDetails", nil, &r.Options, &resp) + return +} + +// Retrieve The subscriber id to resource id mapping. +func (r Notification_User_Subscriber_Mobile) GetResourceRecord() (resp datatypes.Notification_User_Subscriber_Resource, err error) { + err = r.Session.DoRequest("SoftLayer_Notification_User_Subscriber_Mobile", "getResourceRecord", nil, &r.Options, &resp) + return +} + +// Retrieve User record for the subscription. +func (r Notification_User_Subscriber_Mobile) GetUserRecord() (resp datatypes.User_Customer, err error) { + err = r.Session.DoRequest("SoftLayer_Notification_User_Subscriber_Mobile", "getUserRecord", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Notification_User_Subscriber_Mobile) SetSnoozeTimer(start *int, end *int) (resp bool, err error) { + params := []interface{}{ + start, + end, + } + err = r.Session.DoRequest("SoftLayer_Notification_User_Subscriber_Mobile", "setSnoozeTimer", params, &r.Options, &resp) + return +} + +// Preferences are settings that can be modified to change the behavior of the subscription. For example, modify the limit preference to only receive notifications 10 times instead of 1 during a billing cycle. +// +// NOTE: Some preferences have certain restrictions on values that can be set. +type Notification_User_Subscriber_Preference struct { + Session *session.Session + Options sl.Options +} + +// GetNotificationUserSubscriberPreferenceService returns an instance of the Notification_User_Subscriber_Preference SoftLayer service +func GetNotificationUserSubscriberPreferenceService(sess *session.Session) Notification_User_Subscriber_Preference { + return Notification_User_Subscriber_Preference{Session: sess} +} + +func (r Notification_User_Subscriber_Preference) Id(id int) Notification_User_Subscriber_Preference { + r.Options.Id = &id + return r +} + +func (r Notification_User_Subscriber_Preference) Mask(mask string) Notification_User_Subscriber_Preference { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r Notification_User_Subscriber_Preference) Filter(filter string) Notification_User_Subscriber_Preference { + r.Options.Filter = filter + return r +} + +func (r Notification_User_Subscriber_Preference) Limit(limit int) Notification_User_Subscriber_Preference { + r.Options.Limit = &limit + return r +} + +func (r Notification_User_Subscriber_Preference) Offset(offset int) Notification_User_Subscriber_Preference { + r.Options.Offset = &offset + return r +} + +// Use the method to create a new notification preference for a subscriber +func (r Notification_User_Subscriber_Preference) CreateObject(templateObject *datatypes.Notification_User_Subscriber_Preference) (resp bool, err error) { + params := []interface{}{ + templateObject, + } + err = r.Session.DoRequest("SoftLayer_Notification_User_Subscriber_Preference", "createObject", params, &r.Options, &resp) + return +} + +// no documentation yet +func (r Notification_User_Subscriber_Preference) EditObjects(templateObjects []datatypes.Notification_User_Subscriber_Preference) (resp bool, err error) { + params := []interface{}{ + templateObjects, + } + err = r.Session.DoRequest("SoftLayer_Notification_User_Subscriber_Preference", "editObjects", params, &r.Options, &resp) + return +} + +// Retrieve Details such name, keyname, minimum and maximum values for the preference. +func (r Notification_User_Subscriber_Preference) GetDefaultPreference() (resp datatypes.Notification_Preference, err error) { + err = r.Session.DoRequest("SoftLayer_Notification_User_Subscriber_Preference", "getDefaultPreference", nil, &r.Options, &resp) + return +} + +// Retrieve Details of the subscriber tied to the preference. +func (r Notification_User_Subscriber_Preference) GetNotificationUserSubscriber() (resp datatypes.Notification_User_Subscriber, err error) { + err = r.Session.DoRequest("SoftLayer_Notification_User_Subscriber_Preference", "getNotificationUserSubscriber", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Notification_User_Subscriber_Preference) GetObject() (resp datatypes.Notification_User_Subscriber_Preference, err error) { + err = r.Session.DoRequest("SoftLayer_Notification_User_Subscriber_Preference", "getObject", nil, &r.Options, &resp) + return +} diff --git a/vendor/github.com/softlayer/softlayer-go/services/product.go b/vendor/github.com/softlayer/softlayer-go/services/product.go new file mode 100644 index 0000000000..021926658a --- /dev/null +++ b/vendor/github.com/softlayer/softlayer-go/services/product.go @@ -0,0 +1,2028 @@ +/** + * Copyright 2016 IBM Corp. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** + * AUTOMATICALLY GENERATED CODE - DO NOT MODIFY + */ + +package services + +import ( + "fmt" + "strings" + + "github.com/softlayer/softlayer-go/datatypes" + "github.com/softlayer/softlayer-go/session" + "github.com/softlayer/softlayer-go/sl" +) + +// The SoftLayer_Product_Item_Category data type contains general category information for prices. +type Product_Item_Category struct { + Session *session.Session + Options sl.Options +} + +// GetProductItemCategoryService returns an instance of the Product_Item_Category SoftLayer service +func GetProductItemCategoryService(sess *session.Session) Product_Item_Category { + return Product_Item_Category{Session: sess} +} + +func (r Product_Item_Category) Id(id int) Product_Item_Category { + r.Options.Id = &id + return r +} + +func (r Product_Item_Category) Mask(mask string) Product_Item_Category { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r Product_Item_Category) Filter(filter string) Product_Item_Category { + r.Options.Filter = filter + return r +} + +func (r Product_Item_Category) Limit(limit int) Product_Item_Category { + r.Options.Limit = &limit + return r +} + +func (r Product_Item_Category) Offset(offset int) Product_Item_Category { + r.Options.Offset = &offset + return r +} + +// Returns a list of of active Items in the "Additional Services" package with their active prices for a given product item category and sorts them by price. +func (r Product_Item_Category) GetAdditionalProductsForCategory() (resp []datatypes.Product_Item, err error) { + err = r.Session.DoRequest("SoftLayer_Product_Item_Category", "getAdditionalProductsForCategory", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Product_Item_Category) GetBandwidthCategories() (resp []datatypes.Product_Item_Category, err error) { + err = r.Session.DoRequest("SoftLayer_Product_Item_Category", "getBandwidthCategories", nil, &r.Options, &resp) + return +} + +// Retrieve The billing items associated with an account that share a category code with an item category's category code. +func (r Product_Item_Category) GetBillingItems() (resp []datatypes.Billing_Item, err error) { + err = r.Session.DoRequest("SoftLayer_Product_Item_Category", "getBillingItems", nil, &r.Options, &resp) + return +} + +// This method returns a collection of computing categories. These categories are also top level items in a service offering. +func (r Product_Item_Category) GetComputingCategories(resetCache *bool) (resp []datatypes.Product_Item_Category, err error) { + params := []interface{}{ + resetCache, + } + err = r.Session.DoRequest("SoftLayer_Product_Item_Category", "getComputingCategories", params, &r.Options, &resp) + return +} + +// no documentation yet +func (r Product_Item_Category) GetCustomUsageRatesCategories(resetCache *bool) (resp []datatypes.Product_Item_Category, err error) { + params := []interface{}{ + resetCache, + } + err = r.Session.DoRequest("SoftLayer_Product_Item_Category", "getCustomUsageRatesCategories", params, &r.Options, &resp) + return +} + +// Retrieve This invoice item's "item category group". +func (r Product_Item_Category) GetGroup() (resp datatypes.Product_Item_Category_Group, err error) { + err = r.Session.DoRequest("SoftLayer_Product_Item_Category", "getGroup", nil, &r.Options, &resp) + return +} + +// Retrieve A collection of service offering category groups. Each group contains a collection of items associated with this category. +func (r Product_Item_Category) GetGroups() (resp []datatypes.Product_Package_Item_Category_Group, err error) { + err = r.Session.DoRequest("SoftLayer_Product_Item_Category", "getGroups", nil, &r.Options, &resp) + return +} + +// Each product item price must be tied to a category for it to be sold. These categories describe how a particular product item is sold. For example, the 250GB hard drive can be sold as disk0, disk1, ... disk11. There are different prices for this product item depending on which category it is. This keeps down the number of products in total. +func (r Product_Item_Category) GetObject() (resp datatypes.Product_Item_Category, err error) { + err = r.Session.DoRequest("SoftLayer_Product_Item_Category", "getObject", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Product_Item_Category) GetObjectStorageCategories(resetCache *bool) (resp []datatypes.Product_Item_Category, err error) { + params := []interface{}{ + resetCache, + } + err = r.Session.DoRequest("SoftLayer_Product_Item_Category", "getObjectStorageCategories", params, &r.Options, &resp) + return +} + +// Retrieve Any unique options associated with an item category. +func (r Product_Item_Category) GetOrderOptions() (resp []datatypes.Product_Item_Category_Order_Option_Type, err error) { + err = r.Session.DoRequest("SoftLayer_Product_Item_Category", "getOrderOptions", nil, &r.Options, &resp) + return +} + +// Retrieve A list of configuration available in this category.' +func (r Product_Item_Category) GetPackageConfigurations() (resp []datatypes.Product_Package_Order_Configuration, err error) { + err = r.Session.DoRequest("SoftLayer_Product_Item_Category", "getPackageConfigurations", nil, &r.Options, &resp) + return +} + +// Retrieve A list of preset configurations this category is used in.' +func (r Product_Item_Category) GetPresetConfigurations() (resp []datatypes.Product_Package_Preset_Configuration, err error) { + err = r.Session.DoRequest("SoftLayer_Product_Item_Category", "getPresetConfigurations", nil, &r.Options, &resp) + return +} + +// Retrieve The question references that are associated with an item category. +func (r Product_Item_Category) GetQuestionReferences() (resp []datatypes.Product_Item_Category_Question_Xref, err error) { + err = r.Session.DoRequest("SoftLayer_Product_Item_Category", "getQuestionReferences", nil, &r.Options, &resp) + return +} + +// Retrieve The questions that are associated with an item category. +func (r Product_Item_Category) GetQuestions() (resp []datatypes.Product_Item_Category_Question, err error) { + err = r.Session.DoRequest("SoftLayer_Product_Item_Category", "getQuestions", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Product_Item_Category) GetSoftwareCategories() (resp []datatypes.Product_Item_Category, err error) { + err = r.Session.DoRequest("SoftLayer_Product_Item_Category", "getSoftwareCategories", nil, &r.Options, &resp) + return +} + +// This method returns a list of subnet categories. +func (r Product_Item_Category) GetSubnetCategories() (resp []datatypes.Product_Item_Category, err error) { + err = r.Session.DoRequest("SoftLayer_Product_Item_Category", "getSubnetCategories", nil, &r.Options, &resp) + return +} + +// This method returns a collection of computing categories. These categories are also top level items in a service offering. +func (r Product_Item_Category) GetTopLevelCategories(resetCache *bool) (resp []datatypes.Product_Item_Category, err error) { + params := []interface{}{ + resetCache, + } + err = r.Session.DoRequest("SoftLayer_Product_Item_Category", "getTopLevelCategories", params, &r.Options, &resp) + return +} + +// This method returns service product categories that can be canceled via API. You can use these categories to find the billing items you wish to cancel. +func (r Product_Item_Category) GetValidCancelableServiceItemCategories() (resp []datatypes.Product_Item_Category, err error) { + err = r.Session.DoRequest("SoftLayer_Product_Item_Category", "getValidCancelableServiceItemCategories", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Product_Item_Category) GetVlanCategories() (resp []datatypes.Product_Item_Category, err error) { + err = r.Session.DoRequest("SoftLayer_Product_Item_Category", "getVlanCategories", nil, &r.Options, &resp) + return +} + +// The SoftLayer_Product_Item_Category_Group data type contains general category group information. +type Product_Item_Category_Group struct { + Session *session.Session + Options sl.Options +} + +// GetProductItemCategoryGroupService returns an instance of the Product_Item_Category_Group SoftLayer service +func GetProductItemCategoryGroupService(sess *session.Session) Product_Item_Category_Group { + return Product_Item_Category_Group{Session: sess} +} + +func (r Product_Item_Category_Group) Id(id int) Product_Item_Category_Group { + r.Options.Id = &id + return r +} + +func (r Product_Item_Category_Group) Mask(mask string) Product_Item_Category_Group { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r Product_Item_Category_Group) Filter(filter string) Product_Item_Category_Group { + r.Options.Filter = filter + return r +} + +func (r Product_Item_Category_Group) Limit(limit int) Product_Item_Category_Group { + r.Options.Limit = &limit + return r +} + +func (r Product_Item_Category_Group) Offset(offset int) Product_Item_Category_Group { + r.Options.Offset = &offset + return r +} + +// Each product item category must be tied to a category group. These category groups describe how a particular product item category is categorized. For example, the disk0, disk1, ... disk11 can be categorized as Server and Attached Services. There are different groups for each of this product item category depending on the function of the item product in the subject category. +func (r Product_Item_Category_Group) GetObject() (resp datatypes.Product_Item_Category_Group, err error) { + err = r.Session.DoRequest("SoftLayer_Product_Item_Category_Group", "getObject", nil, &r.Options, &resp) + return +} + +// Represents the assignment of a policy to a product. The existence of a record means that the associated product is subject to the terms defined in the document content of the policy. +type Product_Item_Policy_Assignment struct { + Session *session.Session + Options sl.Options +} + +// GetProductItemPolicyAssignmentService returns an instance of the Product_Item_Policy_Assignment SoftLayer service +func GetProductItemPolicyAssignmentService(sess *session.Session) Product_Item_Policy_Assignment { + return Product_Item_Policy_Assignment{Session: sess} +} + +func (r Product_Item_Policy_Assignment) Id(id int) Product_Item_Policy_Assignment { + r.Options.Id = &id + return r +} + +func (r Product_Item_Policy_Assignment) Mask(mask string) Product_Item_Policy_Assignment { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r Product_Item_Policy_Assignment) Filter(filter string) Product_Item_Policy_Assignment { + r.Options.Filter = filter + return r +} + +func (r Product_Item_Policy_Assignment) Limit(limit int) Product_Item_Policy_Assignment { + r.Options.Limit = &limit + return r +} + +func (r Product_Item_Policy_Assignment) Offset(offset int) Product_Item_Policy_Assignment { + r.Options.Offset = &offset + return r +} + +// Register the acceptance of the associated policy to product assignment, and link the created record to a Ticket. +func (r Product_Item_Policy_Assignment) AcceptFromTicket(ticketId *int) (resp bool, err error) { + params := []interface{}{ + ticketId, + } + err = r.Session.DoRequest("SoftLayer_Product_Item_Policy_Assignment", "acceptFromTicket", params, &r.Options, &resp) + return +} + +// no documentation yet +func (r Product_Item_Policy_Assignment) GetObject() (resp datatypes.Product_Item_Policy_Assignment, err error) { + err = r.Session.DoRequest("SoftLayer_Product_Item_Policy_Assignment", "getObject", nil, &r.Options, &resp) + return +} + +// Retrieve the binary contents of the associated PDF policy document. +func (r Product_Item_Policy_Assignment) GetPolicyDocumentContents() (resp []byte, err error) { + err = r.Session.DoRequest("SoftLayer_Product_Item_Policy_Assignment", "getPolicyDocumentContents", nil, &r.Options, &resp) + return +} + +// Retrieve The name of the assigned policy. +func (r Product_Item_Policy_Assignment) GetPolicyName() (resp string, err error) { + err = r.Session.DoRequest("SoftLayer_Product_Item_Policy_Assignment", "getPolicyName", nil, &r.Options, &resp) + return +} + +// Retrieve The [[SoftLayer_Product_Item]] for this policy assignment. +func (r Product_Item_Policy_Assignment) GetProduct() (resp datatypes.Product_Item, err error) { + err = r.Session.DoRequest("SoftLayer_Product_Item_Policy_Assignment", "getProduct", nil, &r.Options, &resp) + return +} + +// The SoftLayer_Product_Item_Price data type contains general information relating to a single SoftLayer product item price. You can find out what packages each price is in as well as which category under which this price is sold. All prices are returned in floating point values measured in US Dollars ($USD). +type Product_Item_Price struct { + Session *session.Session + Options sl.Options +} + +// GetProductItemPriceService returns an instance of the Product_Item_Price SoftLayer service +func GetProductItemPriceService(sess *session.Session) Product_Item_Price { + return Product_Item_Price{Session: sess} +} + +func (r Product_Item_Price) Id(id int) Product_Item_Price { + r.Options.Id = &id + return r +} + +func (r Product_Item_Price) Mask(mask string) Product_Item_Price { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r Product_Item_Price) Filter(filter string) Product_Item_Price { + r.Options.Filter = filter + return r +} + +func (r Product_Item_Price) Limit(limit int) Product_Item_Price { + r.Options.Limit = &limit + return r +} + +func (r Product_Item_Price) Offset(offset int) Product_Item_Price { + r.Options.Offset = &offset + return r +} + +// Retrieve The account that the item price is restricted to. +func (r Product_Item_Price) GetAccountRestrictions() (resp []datatypes.Product_Item_Price_Account_Restriction, err error) { + err = r.Session.DoRequest("SoftLayer_Product_Item_Price", "getAccountRestrictions", nil, &r.Options, &resp) + return +} + +// Retrieve +func (r Product_Item_Price) GetAttributes() (resp []datatypes.Product_Item_Price_Attribute, err error) { + err = r.Session.DoRequest("SoftLayer_Product_Item_Price", "getAttributes", nil, &r.Options, &resp) + return +} + +// Retrieve Whether the price is for Big Data OS/Journal disks only. (Deprecated) +func (r Product_Item_Price) GetBigDataOsJournalDiskFlag() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Product_Item_Price", "getBigDataOsJournalDiskFlag", nil, &r.Options, &resp) + return +} + +// Retrieve cross reference for bundles +func (r Product_Item_Price) GetBundleReferences() (resp []datatypes.Product_Item_Bundles, err error) { + err = r.Session.DoRequest("SoftLayer_Product_Item_Price", "getBundleReferences", nil, &r.Options, &resp) + return +} + +// Retrieve The maximum capacity value for which this price is suitable. +func (r Product_Item_Price) GetCapacityRestrictionMaximum() (resp string, err error) { + err = r.Session.DoRequest("SoftLayer_Product_Item_Price", "getCapacityRestrictionMaximum", nil, &r.Options, &resp) + return +} + +// Retrieve The minimum capacity value for which this price is suitable. +func (r Product_Item_Price) GetCapacityRestrictionMinimum() (resp string, err error) { + err = r.Session.DoRequest("SoftLayer_Product_Item_Price", "getCapacityRestrictionMinimum", nil, &r.Options, &resp) + return +} + +// Retrieve The type of capacity restriction by which this price must abide. +func (r Product_Item_Price) GetCapacityRestrictionType() (resp string, err error) { + err = r.Session.DoRequest("SoftLayer_Product_Item_Price", "getCapacityRestrictionType", nil, &r.Options, &resp) + return +} + +// Retrieve All categories which this item is a member. +func (r Product_Item_Price) GetCategories() (resp []datatypes.Product_Item_Category, err error) { + err = r.Session.DoRequest("SoftLayer_Product_Item_Price", "getCategories", nil, &r.Options, &resp) + return +} + +// Retrieve Whether this price defines a software license for its product item. +func (r Product_Item_Price) GetDefinedSoftwareLicenseFlag() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Product_Item_Price", "getDefinedSoftwareLicenseFlag", nil, &r.Options, &resp) + return +} + +// Retrieve An item price's inventory status per datacenter. +func (r Product_Item_Price) GetInventory() (resp []datatypes.Product_Package_Inventory, err error) { + err = r.Session.DoRequest("SoftLayer_Product_Item_Price", "getInventory", nil, &r.Options, &resp) + return +} + +// Retrieve The product item a price is tied to. +func (r Product_Item_Price) GetItem() (resp datatypes.Product_Item, err error) { + err = r.Session.DoRequest("SoftLayer_Product_Item_Price", "getItem", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Product_Item_Price) GetObject() (resp datatypes.Product_Item_Price, err error) { + err = r.Session.DoRequest("SoftLayer_Product_Item_Price", "getObject", nil, &r.Options, &resp) + return +} + +// Retrieve +func (r Product_Item_Price) GetOrderPremiums() (resp []datatypes.Product_Item_Price_Premium, err error) { + err = r.Session.DoRequest("SoftLayer_Product_Item_Price", "getOrderPremiums", nil, &r.Options, &resp) + return +} + +// Retrieve cross reference for packages +func (r Product_Item_Price) GetPackageReferences() (resp []datatypes.Product_Package_Item_Prices, err error) { + err = r.Session.DoRequest("SoftLayer_Product_Item_Price", "getPackageReferences", nil, &r.Options, &resp) + return +} + +// Retrieve A price's packages under which this item is sold. +func (r Product_Item_Price) GetPackages() (resp []datatypes.Product_Package, err error) { + err = r.Session.DoRequest("SoftLayer_Product_Item_Price", "getPackages", nil, &r.Options, &resp) + return +} + +// Retrieve A list of preset configurations this price is used in.' +func (r Product_Item_Price) GetPresetConfigurations() (resp []datatypes.Product_Package_Preset_Configuration, err error) { + err = r.Session.DoRequest("SoftLayer_Product_Item_Price", "getPresetConfigurations", nil, &r.Options, &resp) + return +} + +// Retrieve The pricing location group that this price is applicable for. Prices that have a pricing location group will only be available for ordering with the locations specified on the location group. +func (r Product_Item_Price) GetPricingLocationGroup() (resp datatypes.Location_Group_Pricing, err error) { + err = r.Session.DoRequest("SoftLayer_Product_Item_Price", "getPricingLocationGroup", nil, &r.Options, &resp) + return +} + +// Retrieve The number of server cores required to order this item. This is deprecated. Use [[SoftLayer_Product_Item_Price/getCapacityRestrictionMinimum|getCapacityRestrictionMinimum]] and [[SoftLayer_Product_Item_Price/getCapacityRestrictionMaximum|getCapacityRestrictionMaximum]] +func (r Product_Item_Price) GetRequiredCoreCount() (resp int, err error) { + err = r.Session.DoRequest("SoftLayer_Product_Item_Price", "getRequiredCoreCount", nil, &r.Options, &resp) + return +} + +// Returns a collection of rate-based [[SoftLayer_Product_Item_Price]] objects associated with the [[SoftLayer_Product_Item]] objects and the [[SoftLayer_Location]] specified. The location is required to get the appropriate rate-based prices because the usage rates may vary from datacenter to datacenter. +func (r Product_Item_Price) GetUsageRatePrices(location *datatypes.Location, items []datatypes.Product_Item) (resp []datatypes.Product_Item_Price, err error) { + params := []interface{}{ + location, + items, + } + err = r.Session.DoRequest("SoftLayer_Product_Item_Price", "getUsageRatePrices", params, &r.Options, &resp) + return +} + +// no documentation yet +type Product_Item_Price_Premium struct { + Session *session.Session + Options sl.Options +} + +// GetProductItemPricePremiumService returns an instance of the Product_Item_Price_Premium SoftLayer service +func GetProductItemPricePremiumService(sess *session.Session) Product_Item_Price_Premium { + return Product_Item_Price_Premium{Session: sess} +} + +func (r Product_Item_Price_Premium) Id(id int) Product_Item_Price_Premium { + r.Options.Id = &id + return r +} + +func (r Product_Item_Price_Premium) Mask(mask string) Product_Item_Price_Premium { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r Product_Item_Price_Premium) Filter(filter string) Product_Item_Price_Premium { + r.Options.Filter = filter + return r +} + +func (r Product_Item_Price_Premium) Limit(limit int) Product_Item_Price_Premium { + r.Options.Limit = &limit + return r +} + +func (r Product_Item_Price_Premium) Offset(offset int) Product_Item_Price_Premium { + r.Options.Offset = &offset + return r +} + +// Retrieve +func (r Product_Item_Price_Premium) GetItemPrice() (resp datatypes.Product_Item_Price, err error) { + err = r.Session.DoRequest("SoftLayer_Product_Item_Price_Premium", "getItemPrice", nil, &r.Options, &resp) + return +} + +// Retrieve +func (r Product_Item_Price_Premium) GetLocation() (resp datatypes.Location, err error) { + err = r.Session.DoRequest("SoftLayer_Product_Item_Price_Premium", "getLocation", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Product_Item_Price_Premium) GetObject() (resp datatypes.Product_Item_Price_Premium, err error) { + err = r.Session.DoRequest("SoftLayer_Product_Item_Price_Premium", "getObject", nil, &r.Options, &resp) + return +} + +// Retrieve +func (r Product_Item_Price_Premium) GetPackage() (resp datatypes.Product_Package, err error) { + err = r.Session.DoRequest("SoftLayer_Product_Item_Price_Premium", "getPackage", nil, &r.Options, &resp) + return +} + +// no documentation yet +type Product_Order struct { + Session *session.Session + Options sl.Options +} + +// GetProductOrderService returns an instance of the Product_Order SoftLayer service +func GetProductOrderService(sess *session.Session) Product_Order { + return Product_Order{Session: sess} +} + +func (r Product_Order) Id(id int) Product_Order { + r.Options.Id = &id + return r +} + +func (r Product_Order) Mask(mask string) Product_Order { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r Product_Order) Filter(filter string) Product_Order { + r.Options.Filter = filter + return r +} + +func (r Product_Order) Limit(limit int) Product_Order { + r.Options.Limit = &limit + return r +} + +func (r Product_Order) Offset(offset int) Product_Order { + r.Options.Offset = &offset + return r +} + +// no documentation yet +func (r Product_Order) CheckItemAvailability(itemPrices []datatypes.Product_Item_Price, accountId *int, availabilityTypeKeyNames []string) (resp bool, err error) { + params := []interface{}{ + itemPrices, + accountId, + availabilityTypeKeyNames, + } + err = r.Session.DoRequest("SoftLayer_Product_Order", "checkItemAvailability", params, &r.Options, &resp) + return +} + +// no documentation yet +func (r Product_Order) CheckItemAvailabilityForImageTemplate(imageTemplateId *int, accountId *int, packageId *int, availabilityTypeKeyNames []string) (resp bool, err error) { + params := []interface{}{ + imageTemplateId, + accountId, + packageId, + availabilityTypeKeyNames, + } + err = r.Session.DoRequest("SoftLayer_Product_Order", "checkItemAvailabilityForImageTemplate", params, &r.Options, &resp) + return +} + +// Check order items for conflicts +func (r Product_Order) CheckItemConflicts(itemPrices []datatypes.Product_Item_Price) (resp bool, err error) { + params := []interface{}{ + itemPrices, + } + err = r.Session.DoRequest("SoftLayer_Product_Order", "checkItemConflicts", params, &r.Options, &resp) + return +} + +// This method simply returns a receipt for a previously finalized payment authorization from PayPal. The response matches the response returned from placeOrder when the order was originally placed with PayPal as the payment type. +func (r Product_Order) GetExternalPaymentAuthorizationReceipt(token *string, payerId *string) (resp datatypes.Container_Product_Order_Receipt, err error) { + params := []interface{}{ + token, + payerId, + } + err = r.Session.DoRequest("SoftLayer_Product_Order", "getExternalPaymentAuthorizationReceipt", params, &r.Options, &resp) + return +} + +// This method returns a collection of [[SoftLayer_Container_Product_Order_Network]] objects. This will contain the available networks that can be used when ordering services. +// +// If a location id is supplied, the list of networks will be trimmed down to only those that are available at that particular datacenter. +// +// If a package id is supplied, the list of public VLANs and subnets will be trimmed down to those that are available for that particular package. +// +// The account id is for internal use only and will be ignored when supplied by customers. +func (r Product_Order) GetNetworks(locationId *int, packageId *int, accountId *int) (resp []datatypes.Container_Product_Order_Network, err error) { + params := []interface{}{ + locationId, + packageId, + accountId, + } + err = r.Session.DoRequest("SoftLayer_Product_Order", "getNetworks", params, &r.Options, &resp) + return +} + +// When the account is on an external reseller brand, this service will provide a SoftLayer_Product_Order with the the pricing adjusted by the external reseller. +func (r Product_Order) GetResellerOrder(orderContainer *datatypes.Container_Product_Order) (resp datatypes.Container_Product_Order, err error) { + params := []interface{}{ + orderContainer, + } + err = r.Session.DoRequest("SoftLayer_Product_Order", "getResellerOrder", params, &r.Options, &resp) + return +} + +// Sometimes taxes cannot be calculated immediately, so we start the calculations and let them run in the background. This method will return the current progress and information related to a specific tax calculation, which allows real-time progress updates on tax calculations. +func (r Product_Order) GetTaxCalculationResult(orderHash *string) (resp datatypes.Container_Tax_Cache, err error) { + params := []interface{}{ + orderHash, + } + err = r.Session.DoRequest("SoftLayer_Product_Order", "getTaxCalculationResult", params, &r.Options, &resp) + return +} + +// Return collections of public and private VLANs that are available during ordering. If a location ID is provided, the resulting VLANs will be limited to that location. If the Virtual Server package id (46) is provided, the VLANs will be narrowed down to those locations that contain routers with the VIRTUAL_IMAGE_STORE data attribute. +// +// For the selectedItems parameter, this is a comma-separated string of category codes and item values. For example: +// +//
    • port_speed=10,guest_disk0=LOCAL_DISK
    • port_speed=100,disk0=SAN_DISK
    • port_speed=100,private_network_only=1,guest_disk0=LOCAL_DISK
    +// +// This parameter is used to narrow the available results down even further. It's not necessary when selecting a VLAN, but it will help avoid errors when attempting to place an order. The only acceptable category codes are: +// +//
    • port_speed
    • A disk category, such as guest_disk0 or disk0, with values of either LOCAL_DISK or SAN_DISK
    • private_network_only
    • dual_path_network
    +// +// For most customers, it's sufficient to only provide the first 2 parameters. +func (r Product_Order) GetVlans(locationId *int, packageId *int, selectedItems *string, vlanIds []int, subnetIds []int, accountId *int, orderContainer *datatypes.Container_Product_Order, hardwareFirewallOrderedFlag *bool) (resp datatypes.Container_Product_Order_Network_Vlans, err error) { + params := []interface{}{ + locationId, + packageId, + selectedItems, + vlanIds, + subnetIds, + accountId, + orderContainer, + hardwareFirewallOrderedFlag, + } + err = r.Session.DoRequest("SoftLayer_Product_Order", "getVlans", params, &r.Options, &resp) + return +} + +// +// Use this method to place bare metal server, virtual server and additional service orders with SoftLayer. Upon success, your credit card or PayPal account will incur charges for the monthly order total (or prorated value if ordered mid billing cycle). If all products on the order are only billed hourly, you will be charged on your billing anniversary date, which occurs monthly on the day you ordered your first service with SoftLayer. For new customers, you are required to provide billing information when you place an order. For existing customers, the credit card on file will be charged. If you're a PayPal customer, a URL will be returned from the call to [[SoftLayer_Product_Order/placeOrder|placeOrder]] which is to be used to finish the authorization process. This authorization tells PayPal that you indeed want to place an order with SoftLayer. From PayPal's web site, you will be redirected back to SoftLayer for your order receipt.

    +// +// +// When an order is placed, your order will be in a "pending approval" state. When all internal checks pass, your order will be automatically approved. For orders that may need extra attention, a Sales representative will review the order and contact you if necessary. Once the order is approved, your server or service will be provisioned and available to you shortly thereafter. Depending on the type of server or service ordered, provisioning times will vary.

    +// +// +//

    Order Containers

    +// +// +// When placing API orders, it's important to order your server and services on the appropriate [[SoftLayer_Container_Product_Order (type)|order container]]. Failing to provide the correct container may delay your server or service from being provisioned in a timely manner. Some common order containers are included below.

    +// +// +// Note: SoftLayer_Container_Product_Order_ has been removed from the containers in the table below for readability.

    +// +// +// +// +// +// +// +// +// +// +// +// +// +// +// +// +// +// +// +// +// +// +// +// +// +// +// +// +// +// +// +// +// +// +// +// +// +// +// +// +// +// +// +// +// +// +// +// +// +// +// +// +// +// +// +// +// +// +// +// +// +// +// +// +// +// +// +// +// +// +// +// +// +// +// +// +// +// +// +// +// +// +// +// +// +// +// +// +// +// +// +// +// +// +// +// +// +// +// +// +// +// +// +// +// +// +// +// +// +// +// +// +// +// +// +// +// +// +// +// +// +// +// +// +// +// +// +// +// +// +// +// +// +//
    ProductOrder containerPackage type
    Bare metal server by CPU[[SoftLayer_Container_Product_Order_Hardware_Server (type)|Hardware_Server]]BARE_METAL_CPU
    Bare metal server by core[[SoftLayer_Container_Product_Order_Hardware_Server (type)|Hardware_Server]]BARE_METAL_CORE
    Virtual server[[SoftLayer_Container_Product_Order_Virtual_Guest (type)|Virtual_Guest]]VIRTUAL_SERVER_INSTANCE
    DNS domain registration[[SoftLayer_Container_Product_Order_Dns_Domain_Registration (type)|Dns_Domain_Registration]]ADDITIONAL_SERVICES
    Local & dedicated load balancers[[SoftLayer_Container_Product_Order_Network_LoadBalancer (type)|Network_LoadBalancer]]ADDITIONAL_SERVICES_LOAD_BALANCER
    Content delivery network[[SoftLayer_Container_Product_Order_Network_ContentDelivery_Account (type)|Network_ContentDelivery_Account]]ADDITIONAL_SERVICES_CDN
    Content delivery network Addon[[SoftLayer_Container_Product_Order_Network_ContentDelivery_Account_Addon (type)|Network_ContentDelivery_Account_Addon]]ADDITIONAL_SERVICES_CDN_ADDON
    Message queue[[SoftLayer_Container_Product_Order_Network_Message_Queue (type)|Network_Message_Queue]]ADDITIONAL_SERVICES_MESSAGE_QUEUE
    Hardware & software firewalls[[SoftLayer_Container_Product_Order_Network_Protection_Firewall (type)|Network_Protection_Firewall]]ADDITIONAL_SERVICES_FIREWALL
    Dedicated firewall[[SoftLayer_Container_Product_Order_Network_Protection_Firewall_Dedicated (type)|Network_Protection_Firewall_Dedicated]]ADDITIONAL_SERVICES_FIREWALL
    Object storage[[SoftLayer_Container_Product_Order_Network_Storage_Object (type)|Network_Storage_Object]]ADDITIONAL_SERVICES_OBJECT_STORAGE
    Object storage (hub)[[SoftLayer_Container_Product_Order_Network_Storage_Hub (type)|Network_Storage_Hub]]ADDITIONAL_SERVICES_OBJECT_STORAGE
    Network attached storage[[SoftLayer_Container_Product_Order_Network_Storage_Nas (type)|Network_Storage_Nas]]ADDITIONAL_SERVICES_NETWORK_ATTACHED_STORAGE
    Iscsi storage[[SoftLayer_Container_Product_Order_Network_Storage_Iscsi (type)|Network_Storage_Iscsi]]ADDITIONAL_SERVICES_ISCSI_STORAGE
    Evault[[SoftLayer_Container_Product_Order_Network_Storage_Backup_Evault_Vault (type)|Network_Storage_Backup_Evault_Vault]]ADDITIONAL_SERVICES
    Evault Plugin[[SoftLayer_Container_Product_Order_Network_Storage_Backup_Evault_Plugin (type)|Network_Storage_Backup_Evault_Plugin]]ADDITIONAL_SERVICES
    Application delivery appliance[[SoftLayer_Container_Product_Order_Network_Application_Delivery_Controller (type)|Network_Application_Delivery_Controller]]ADDITIONAL_SERVICES_APPLICATION_DELIVERY_APPLIANCE
    Network subnet[[SoftLayer_Container_Product_Order_Network_Subnet (type)|Network_Subnet]]ADDITIONAL_SERVICES
    Global IPv4[[SoftLayer_Container_Product_Order_Network_Subnet (type)|Network_Subnet]]ADDITIONAL_SERVICES_GLOBAL_IP_ADDRESSES
    Global IPv6[[SoftLayer_Container_Product_Order_Network_Subnet (type)|Network_Subnet]]ADDITIONAL_SERVICES_GLOBAL_IP_ADDRESSES
    Network VLAN[[SoftLayer_Container_Product_Order_Network_Vlan (type)|Network_Vlan]]ADDITIONAL_SERVICES_NETWORK_VLAN
    Portable storage[[SoftLayer_Container_Product_Order_Virtual_Disk_Image (type)|Virtual_Disk_Image]]ADDITIONAL_SERVICES_PORTABLE_STORAGE
    SSL certificate[[SoftLayer_Container_Product_Order_Security_Certificate (type)|Security_Certificate]]ADDITIONAL_SERVICES_SSL_CERTIFICATE
    External authentication[[SoftLayer_Container_Product_Order_User_Customer_External_Binding (type)|User_Customer_External_Binding]]ADDITIONAL_SERVICES
    Dedicated Host[[SoftLayer_Container_Product_Order_Virtual_DedicatedHost (type)|Virtual_DedicatedHosts]]DEDICATED_HOST
    +// +// +//

    Server example

    +// +// +// This example includes a single bare metal server being ordered with monthly billing.

    +// +// +// Warning: the price ids provided below may be outdated or unavailable, so you will need to determine the available prices from the bare metal server [[SoftLayer_Product_Package/getAllObjects|packages]], which have a [[SoftLayer_Product_Package_Type (type)|package type]] of '''BARE_METAL_CPU''' or '''BARE_METAL_CORE'''. You can get a full list of [[SoftLayer_Product_Package_Type/getAllObjects|package types]] to see other potentially available server packages.

    +// +// +// +// +// +// +// your username +// your api key +// +// +// +// +// +// +// +// example.com +// server1 +// +// +// 138124 +// 142 +// +// +// 58 +// +// +// 22337 +// +// +// 21189 +// +// +// 876 +// +// +// 57 +// +// +// 55 +// +// +// 21190 +// +// +// 36381 +// +// +// 21 +// +// +// 22013 +// +// +// 906 +// +// +// 420 +// +// +// 418 +// +// +// 342 +// +// +// false +// +// +// +// +// +//

    +// +// +//

    Virtual server example

    +// +// +// This example includes 2 identical virtual servers (except for hostname) being ordered for hourly billing. It includes an optional image template id and VLAN data specified on the virtualGuest objects - primaryBackendNetworkComponent and primaryNetworkComponent.

    +// +// +// Warning: the price ids provided below may be outdated or unavailable, so you will need to determine the available prices from the virtual server [[SoftLayer_Product_Package/getAllObjects|package]], which has a [[SoftLayer_Product_Package_Type (type)|package type]] of '''VIRTUAL_SERVER_INSTANCE'''.

    +// +// +// +// +// +// +// your username +// your api key +// +// +// +// +// +// 13251 +// 37473 +// 46 +// +// +// 2159 +// +// +// 55 +// +// +// 13754 +// +// +// 1641 +// +// +// 905 +// +// +// 1800 +// +// +// 58 +// +// +// 21 +// +// +// 1645 +// +// +// 272 +// +// +// 57 +// +// +// 418 +// +// +// 420 +// +// +// 2 +// true +// +// +// example.com +// server1 +// +// +// 12345 +// +// +// +// +// 67890 +// +// +// +// +// example.com +// server2 +// +// +// 12345 +// +// +// +// +// 67890 +// +// +// +// +// +// +// +// +// +//

    +// +// +//

    VLAN example

    +// +// +// Warning: the price ids provided below may be outdated or unavailable, so you will need to determine the available prices from the additional services [[SoftLayer_Product_Package/getAllObjects|package]], which has a [[SoftLayer_Product_Package_Type (type)|package type]] of '''ADDITIONAL_SERVICES'''. You can get a full list of [[SoftLayer_Product_Package_Type/getAllObjects|package types]] to find other available additional service packages.

    +// +// +// +// +// +// +// your username +// your api key +// +// +// +// +// +// 154820 +// 0 +// +// +// 2021 +// +// +// 2018 +// +// +// true +// +// +// +// +// +//

    +// +// +//

    Multiple products example

    +// +// +// This example includes a combination of the above examples in a single order. Note that all the configuration options for each individual order container are the same as above, except now we encapsulate each one within the orderContainers property on the base [[SoftLayer_Container_Product_Order (type)|order container]].

    +// +// +// Warning: not all products are available to be ordered with other products. For example, since SSL certificates require validation from a 3rd party, the approval process may take days or even weeks, and this would not be acceptable when you need your hourly virtual server right now. To better accommodate customers, we restrict several products to be ordered individually.

    +// +// +// +// +// +// +// your username +// your api key +// +// +// +// +// +// +// +// ... +// +// +// ... +// +// +// ... +// +// +// +// +// +// +// +// +// +// +func (r Product_Order) PlaceOrder(orderData interface{}, saveAsQuote *bool) (resp datatypes.Container_Product_Order_Receipt, err error) { + err = datatypes.SetComplexType(orderData) + if err != nil { + return + } + params := []interface{}{ + orderData, + saveAsQuote, + } + err = r.Session.DoRequest("SoftLayer_Product_Order", "placeOrder", params, &r.Options, &resp) + return +} + +// Use this method for placing server quotes and additional services quotes. The same applies for this as with verifyOrder. Send in the SoftLayer_Container_Product_Order_Hardware_Server for server quotes. After placing the quote, you must go to this URL to finish the order process. After going to this URL, it will direct you back to a SoftLayer webpage that tells us you have finished the process. After this, it will go to sales for final approval. +func (r Product_Order) PlaceQuote(orderData *datatypes.Container_Product_Order) (resp datatypes.Container_Product_Order_Receipt, err error) { + params := []interface{}{ + orderData, + } + err = r.Session.DoRequest("SoftLayer_Product_Order", "placeQuote", params, &r.Options, &resp) + return +} + +// This method simply finalizes an authorization from PayPal. It tells SoftLayer that the customer has completed the PayPal process. This is ONLY needed if you, the customer, have your own API into PayPal and wish to automate authorizations from PayPal and our system. For most, this method will not be needed. Once an order is placed using placeOrder() for PayPal customers, a URL is given back to the customer. In it is the token and PayerID. If you want to systematically pay with PayPal, do so then call this method with the token and PayerID. +func (r Product_Order) ProcessExternalPaymentAuthorization(token *string, payerId *string) (resp datatypes.Container_Product_Order, err error) { + params := []interface{}{ + token, + payerId, + } + err = r.Session.DoRequest("SoftLayer_Product_Order", "processExternalPaymentAuthorization", params, &r.Options, &resp) + return +} + +// Get list of items that are required with the item prices provided +func (r Product_Order) RequiredItems(itemPrices []datatypes.Product_Item_Price) (resp []datatypes.Product_Item, err error) { + params := []interface{}{ + itemPrices, + } + err = r.Session.DoRequest("SoftLayer_Product_Order", "requiredItems", params, &r.Options, &resp) + return +} + +// This service is used to verify that an order meets all the necessary requirements to purchase a server, virtual server or service from SoftLayer. It will verify that the products requested do not conflict. For example, you cannot order a Windows firewall with a Linux operating system. It will also check to make sure you have provided all the products that are required for the [[SoftLayer_Product_Package_Order_Configuration (type)|package configuration]] associated with the [[SoftLayer_Product_Package|package id]] on each of the [[SoftLayer_Container_Product_Order (type)|order containers]] specified.

    +// +// This service returns the same container that was provided, but with additional information that can be used for debugging or validation. It will also contain pricing information (prorated if applicable) for each of the products on the order. If an exception occurs during verification, a container with the SoftLayer_Exception_Order exception type will be specified in the result.

    +// +// verifyOrder accepts the same [[SoftLayer_Container_Product_Order (type)|container types]] as placeOrder, so see [[SoftLayer_Product_Order/placeOrder|placeOrder]] for more details. +// +// +func (r Product_Order) VerifyOrder(orderData interface{}) (resp datatypes.Container_Product_Order, err error) { + err = datatypes.SetComplexType(orderData) + if err != nil { + return + } + params := []interface{}{ + orderData, + } + err = r.Session.DoRequest("SoftLayer_Product_Order", "verifyOrder", params, &r.Options, &resp) + return +} + +// The SoftLayer_Product_Package data type contains information about packages from which orders can be generated. Packages contain general information regarding what is in them, where they are currently sold, availability, and pricing. +type Product_Package struct { + Session *session.Session + Options sl.Options +} + +// GetProductPackageService returns an instance of the Product_Package SoftLayer service +func GetProductPackageService(sess *session.Session) Product_Package { + return Product_Package{Session: sess} +} + +func (r Product_Package) Id(id int) Product_Package { + r.Options.Id = &id + return r +} + +func (r Product_Package) Mask(mask string) Product_Package { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r Product_Package) Filter(filter string) Product_Package { + r.Options.Filter = filter + return r +} + +func (r Product_Package) Limit(limit int) Product_Package { + r.Options.Limit = &limit + return r +} + +func (r Product_Package) Offset(offset int) Product_Package { + r.Options.Offset = &offset + return r +} + +// Retrieve The results from this call are similar to [[SoftLayer_Product_Package/getCategories|getCategories]], but these ONLY include account-restricted prices. Not all accounts have restricted pricing. +func (r Product_Package) GetAccountRestrictedCategories() (resp []datatypes.Product_Item_Category, err error) { + err = r.Session.DoRequest("SoftLayer_Product_Package", "getAccountRestrictedCategories", nil, &r.Options, &resp) + return +} + +// Retrieve The flag to indicate if there are any restricted prices in a package for the currently-active account. +func (r Product_Package) GetAccountRestrictedPricesFlag() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Product_Package", "getAccountRestrictedPricesFlag", nil, &r.Options, &resp) + return +} + +// Return a list of Items in the package with their active prices. +func (r Product_Package) GetActiveItems() (resp []datatypes.Product_Item, err error) { + err = r.Session.DoRequest("SoftLayer_Product_Package", "getActiveItems", nil, &r.Options, &resp) + return +} + +// This method is deprecated and should not be used in production code. +// +// This method will return the [[SoftLayer_Product_Package]] objects from which you can order a bare metal server, virtual server, service (such as CDN or Object Storage) or other software filtered by an attribute type associated with the package. Once you have the package you want to order from, you may query one of various endpoints from that package to get specific information about its products and pricing. See [[SoftLayer_Product_Package/getCategories|getCategories]] or [[SoftLayer_Product_Package/getItems|getItems]] for more information. +func (r Product_Package) GetActivePackagesByAttribute(attributeKeyName *string) (resp []datatypes.Product_Package, err error) { + params := []interface{}{ + attributeKeyName, + } + err = r.Session.DoRequest("SoftLayer_Product_Package", "getActivePackagesByAttribute", params, &r.Options, &resp) + return +} + +// Retrieve The available preset configurations for this package. +func (r Product_Package) GetActivePresets() (resp []datatypes.Product_Package_Preset, err error) { + err = r.Session.DoRequest("SoftLayer_Product_Package", "getActivePresets", nil, &r.Options, &resp) + return +} + +// This method pulls all the active private hosted cloud packages. This will give you a basic description of the packages that are currently active and from which you can order private hosted cloud configurations. +func (r Product_Package) GetActivePrivateHostedCloudPackages() (resp []datatypes.Product_Package, err error) { + err = r.Session.DoRequest("SoftLayer_Product_Package", "getActivePrivateHostedCloudPackages", nil, &r.Options, &resp) + return +} + +// Retrieve A collection of valid RAM items available for purchase in this package. +func (r Product_Package) GetActiveRamItems() (resp []datatypes.Product_Item, err error) { + err = r.Session.DoRequest("SoftLayer_Product_Package", "getActiveRamItems", nil, &r.Options, &resp) + return +} + +// Retrieve A collection of valid server items available for purchase in this package. +func (r Product_Package) GetActiveServerItems() (resp []datatypes.Product_Item, err error) { + err = r.Session.DoRequest("SoftLayer_Product_Package", "getActiveServerItems", nil, &r.Options, &resp) + return +} + +// Retrieve A collection of valid software items available for purchase in this package. +func (r Product_Package) GetActiveSoftwareItems() (resp []datatypes.Product_Item, err error) { + err = r.Session.DoRequest("SoftLayer_Product_Package", "getActiveSoftwareItems", nil, &r.Options, &resp) + return +} + +// Retrieve A collection of [[SoftLayer_Product_Item_Price]] objects for pay-as-you-go usage. +func (r Product_Package) GetActiveUsagePrices() (resp []datatypes.Product_Item_Price, err error) { + err = r.Session.DoRequest("SoftLayer_Product_Package", "getActiveUsagePrices", nil, &r.Options, &resp) + return +} + +// This method returns a collection of active usage rate [[SoftLayer_Product_Item_Price]] objects for the current package and specified datacenter. Optionally you can retrieve the active usage rate prices for a particular [[SoftLayer_Product_Item_Category]] by specifying a category code as the first parameter. This information is useful so that you can see "pay as you go" rates (if any) for the current package, location and optionally category. +func (r Product_Package) GetActiveUsageRatePrices(locationId *int, categoryCode *string) (resp []datatypes.Product_Item_Price, err error) { + params := []interface{}{ + locationId, + categoryCode, + } + err = r.Session.DoRequest("SoftLayer_Product_Package", "getActiveUsageRatePrices", params, &r.Options, &resp) + return +} + +// Retrieve This flag indicates that the package is an additional service. +func (r Product_Package) GetAdditionalServiceFlag() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Product_Package", "getAdditionalServiceFlag", nil, &r.Options, &resp) + return +} + +// This method pulls all the active packages. This will give you a basic description of the packages that are currently active +func (r Product_Package) GetAllObjects() (resp []datatypes.Product_Package, err error) { + err = r.Session.DoRequest("SoftLayer_Product_Package", "getAllObjects", nil, &r.Options, &resp) + return +} + +// Retrieve +func (r Product_Package) GetAttributes() (resp []datatypes.Product_Package_Attribute, err error) { + err = r.Session.DoRequest("SoftLayer_Product_Package", "getAttributes", nil, &r.Options, &resp) + return +} + +// Retrieve A collection of valid locations for this package. (Deprecated - Use [[SoftLayer_Product_Package/getRegions|getRegions]]) +func (r Product_Package) GetAvailableLocations() (resp []datatypes.Product_Package_Locations, err error) { + err = r.Session.DoRequest("SoftLayer_Product_Package", "getAvailableLocations", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Product_Package) GetAvailablePackagesForImageTemplate(imageTemplate *datatypes.Virtual_Guest_Block_Device_Template_Group) (resp []datatypes.Product_Package, err error) { + params := []interface{}{ + imageTemplate, + } + err = r.Session.DoRequest("SoftLayer_Product_Package", "getAvailablePackagesForImageTemplate", params, &r.Options, &resp) + return +} + +// Retrieve The maximum number of available disk storage units associated with the servers in a package. +func (r Product_Package) GetAvailableStorageUnits() (resp uint, err error) { + err = r.Session.DoRequest("SoftLayer_Product_Package", "getAvailableStorageUnits", nil, &r.Options, &resp) + return +} + +// Retrieve This is a collection of categories ([[SoftLayer_Product_Item_Category]]) associated with a package which can be used for ordering. These categories have several objects prepopulated which are useful when determining the available products for purchase. The categories contain groups ([[SoftLayer_Product_Package_Item_Category_Group]]) that organize the products and prices by similar features. For example, operating systems will be grouped by their manufacturer and virtual server disks will be grouped by their disk type (SAN vs. local). Each group will contain prices ([[SoftLayer_Product_Item_Price]]) which you can use determine the cost of each product. Each price has a product ([[SoftLayer_Product_Item]]) which provides the name and other useful information about the server, service or software you may purchase. +func (r Product_Package) GetCategories() (resp []datatypes.Product_Item_Category, err error) { + err = r.Session.DoRequest("SoftLayer_Product_Package", "getCategories", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Product_Package) GetCdnItems() (resp []datatypes.Product_Item, err error) { + err = r.Session.DoRequest("SoftLayer_Product_Package", "getCdnItems", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Product_Package) GetCloudStorageItems(provider *int) (resp []datatypes.Product_Item, err error) { + params := []interface{}{ + provider, + } + err = r.Session.DoRequest("SoftLayer_Product_Package", "getCloudStorageItems", params, &r.Options, &resp) + return +} + +// Retrieve The item categories associated with a package, including information detailing which item categories are required as part of a SoftLayer product order. +func (r Product_Package) GetConfiguration() (resp []datatypes.Product_Package_Order_Configuration, err error) { + err = r.Session.DoRequest("SoftLayer_Product_Package", "getConfiguration", nil, &r.Options, &resp) + return +} + +// Retrieve A collection of valid RAM items available for purchase in this package. +func (r Product_Package) GetDefaultRamItems() (resp []datatypes.Product_Item, err error) { + err = r.Session.DoRequest("SoftLayer_Product_Package", "getDefaultRamItems", nil, &r.Options, &resp) + return +} + +// Retrieve The node type for a package in a solution deployment. +func (r Product_Package) GetDeploymentNodeType() (resp string, err error) { + err = r.Session.DoRequest("SoftLayer_Product_Package", "getDeploymentNodeType", nil, &r.Options, &resp) + return +} + +// Retrieve The packages that are allowed in a multi-server solution. (Deprecated) +func (r Product_Package) GetDeploymentPackages() (resp []datatypes.Product_Package, err error) { + err = r.Session.DoRequest("SoftLayer_Product_Package", "getDeploymentPackages", nil, &r.Options, &resp) + return +} + +// Retrieve The solution deployment type. +func (r Product_Package) GetDeploymentType() (resp string, err error) { + err = r.Session.DoRequest("SoftLayer_Product_Package", "getDeploymentType", nil, &r.Options, &resp) + return +} + +// Retrieve The package that represents a multi-server solution. (Deprecated) +func (r Product_Package) GetDeployments() (resp []datatypes.Product_Package, err error) { + err = r.Session.DoRequest("SoftLayer_Product_Package", "getDeployments", nil, &r.Options, &resp) + return +} + +// Retrieve This flag indicates the package does not allow custom disk partitions. +func (r Product_Package) GetDisallowCustomDiskPartitions() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Product_Package", "getDisallowCustomDiskPartitions", nil, &r.Options, &resp) + return +} + +// Retrieve The Softlayer order step is optionally step-based. This returns the first SoftLayer_Product_Package_Order_Step in the step-based order process. +func (r Product_Package) GetFirstOrderStep() (resp datatypes.Product_Package_Order_Step, err error) { + err = r.Session.DoRequest("SoftLayer_Product_Package", "getFirstOrderStep", nil, &r.Options, &resp) + return +} + +// Retrieve Whether the package is a specialized network gateway appliance package. +func (r Product_Package) GetGatewayApplianceFlag() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Product_Package", "getGatewayApplianceFlag", nil, &r.Options, &resp) + return +} + +// Retrieve This flag indicates that the package supports GPUs. +func (r Product_Package) GetGpuFlag() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Product_Package", "getGpuFlag", nil, &r.Options, &resp) + return +} + +// Retrieve Determines whether the package contains prices that can be ordered hourly. +func (r Product_Package) GetHourlyBillingAvailableFlag() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Product_Package", "getHourlyBillingAvailableFlag", nil, &r.Options, &resp) + return +} + +// Returns a collection of SoftLayer_Product_Item_Attribute_Type objects. These item attribute types specifically deal with when an item, SoftLayer_Product_Item, from the product catalog may no longer be available. The keynames for these attribute types start with 'UNAVAILABLE_AFTER_DATE_*', where the '*' may represent any string. For example, 'UNAVAILABLE_AFTER_DATE_NEW_ORDERS', signifies that the item is not available for new orders. There is a catch all attribute type, 'UNAVAILABLE_AFTER_DATE_ALL'. If an item has one of these availability attributes set, the value should be a valid date in MM/DD/YYYY, indicating the date after which the item will no longer be available. +func (r Product_Package) GetItemAvailabilityTypes() (resp []datatypes.Product_Item_Attribute_Type, err error) { + err = r.Session.DoRequest("SoftLayer_Product_Package", "getItemAvailabilityTypes", nil, &r.Options, &resp) + return +} + +// Retrieve The item-item conflicts associated with a package. +func (r Product_Package) GetItemConflicts() (resp []datatypes.Product_Item_Resource_Conflict, err error) { + err = r.Session.DoRequest("SoftLayer_Product_Package", "getItemConflicts", nil, &r.Options, &resp) + return +} + +// Retrieve The item-location conflicts associated with a package. +func (r Product_Package) GetItemLocationConflicts() (resp []datatypes.Product_Item_Resource_Conflict, err error) { + err = r.Session.DoRequest("SoftLayer_Product_Package", "getItemLocationConflicts", nil, &r.Options, &resp) + return +} + +// Retrieve cross reference for item prices +func (r Product_Package) GetItemPriceReferences() (resp []datatypes.Product_Package_Item_Prices, err error) { + err = r.Session.DoRequest("SoftLayer_Product_Package", "getItemPriceReferences", nil, &r.Options, &resp) + return +} + +// Retrieve A collection of SoftLayer_Product_Item_Prices that are valid for this package. +func (r Product_Package) GetItemPrices() (resp []datatypes.Product_Item_Price, err error) { + err = r.Session.DoRequest("SoftLayer_Product_Package", "getItemPrices", nil, &r.Options, &resp) + return +} + +// Return a collection of SoftLayer_Item_Price objects from a collection of SoftLayer_Software_Description +func (r Product_Package) GetItemPricesFromSoftwareDescriptions(softwareDescriptions []datatypes.Software_Description, includeTranslationsFlag *bool, returnAllPricesFlag *bool) (resp []datatypes.Product_Item_Price, err error) { + params := []interface{}{ + softwareDescriptions, + includeTranslationsFlag, + returnAllPricesFlag, + } + err = r.Session.DoRequest("SoftLayer_Product_Package", "getItemPricesFromSoftwareDescriptions", params, &r.Options, &resp) + return +} + +// Retrieve A collection of valid items available for purchase in this package. +func (r Product_Package) GetItems() (resp []datatypes.Product_Item, err error) { + err = r.Session.DoRequest("SoftLayer_Product_Package", "getItems", nil, &r.Options, &resp) + return +} + +// Return a collection of [[SoftLayer_Product_Item]] objects from a [[SoftLayer_Virtual_Guest_Block_Device_Template_Group]] object +func (r Product_Package) GetItemsFromImageTemplate(imageTemplate *datatypes.Virtual_Guest_Block_Device_Template_Group) (resp []datatypes.Product_Item, err error) { + params := []interface{}{ + imageTemplate, + } + err = r.Session.DoRequest("SoftLayer_Product_Package", "getItemsFromImageTemplate", params, &r.Options, &resp) + return +} + +// Retrieve A collection of valid locations for this package. (Deprecated - Use [[SoftLayer_Product_Package/getRegions|getRegions]]) +func (r Product_Package) GetLocations() (resp []datatypes.Location, err error) { + err = r.Session.DoRequest("SoftLayer_Product_Package", "getLocations", nil, &r.Options, &resp) + return +} + +// Retrieve The lowest server [[SoftLayer_Product_Item_Price]] related to this package. +func (r Product_Package) GetLowestServerPrice() (resp datatypes.Product_Item_Price, err error) { + err = r.Session.DoRequest("SoftLayer_Product_Package", "getLowestServerPrice", nil, &r.Options, &resp) + return +} + +// Retrieve The maximum available network speed associated with the package. +func (r Product_Package) GetMaximumPortSpeed() (resp uint, err error) { + err = r.Session.DoRequest("SoftLayer_Product_Package", "getMaximumPortSpeed", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Product_Package) GetMessageQueueItems() (resp []datatypes.Product_Item, err error) { + err = r.Session.DoRequest("SoftLayer_Product_Package", "getMessageQueueItems", nil, &r.Options, &resp) + return +} + +// Retrieve The minimum available network speed associated with the package. +func (r Product_Package) GetMinimumPortSpeed() (resp uint, err error) { + err = r.Session.DoRequest("SoftLayer_Product_Package", "getMinimumPortSpeed", nil, &r.Options, &resp) + return +} + +// Retrieve This flag indicates that this is a MongoDB engineered package. (Deprecated) +func (r Product_Package) GetMongoDbEngineeredFlag() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Product_Package", "getMongoDbEngineeredFlag", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Product_Package) GetObject() (resp datatypes.Product_Package, err error) { + err = r.Session.DoRequest("SoftLayer_Product_Package", "getObject", nil, &r.Options, &resp) + return +} + +// This method will return a collection of [[SoftLayer_Container_Product_Order_Network_Storage_Hub_Datacenter]] objects which contain a datacenter location and all the associated active usage rate prices where object storage is available. This method is really only applicable to the object storage additional service package which has a [[SoftLayer_Product_Package_Type]] of '''ADDITIONAL_SERVICES_OBJECT_STORAGE'''. This information is useful so that you can see the "pay as you go" rates per datacenter. +func (r Product_Package) GetObjectStorageDatacenters() (resp []datatypes.Container_Product_Order_Network_Storage_Hub_Datacenter, err error) { + err = r.Session.DoRequest("SoftLayer_Product_Package", "getObjectStorageDatacenters", nil, &r.Options, &resp) + return +} + +// This method will return a collection of [[SoftLayer_Container_Product_Order_Network_Storage_ObjectStorage_LocationGroup]] objects which contain a location group and all the associated active usage rate prices where object storage is available. This method is really only applicable to the object storage additional service package which has a [[SoftLayer_Product_Package_Type]] of '''ADDITIONAL_SERVICES_OBJECT_STORAGE'''. This information is useful so that you can see the "pay as you go" rates per location group. +func (r Product_Package) GetObjectStorageLocationGroups() (resp []datatypes.Container_Product_Order_Network_Storage_ObjectStorage_LocationGroup, err error) { + err = r.Session.DoRequest("SoftLayer_Product_Package", "getObjectStorageLocationGroups", nil, &r.Options, &resp) + return +} + +// Retrieve The premium price modifiers associated with the [[SoftLayer_Product_Item_Price]] and [[SoftLayer_Location]] objects in a package. +func (r Product_Package) GetOrderPremiums() (resp []datatypes.Product_Item_Price_Premium, err error) { + err = r.Session.DoRequest("SoftLayer_Product_Package", "getOrderPremiums", nil, &r.Options, &resp) + return +} + +// Retrieve This flag indicates the package is pre-configured. (Deprecated) +func (r Product_Package) GetPreconfiguredFlag() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Product_Package", "getPreconfiguredFlag", nil, &r.Options, &resp) + return +} + +// Retrieve Whether the package requires the user to define a preset configuration. +func (r Product_Package) GetPresetConfigurationRequiredFlag() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Product_Package", "getPresetConfigurationRequiredFlag", nil, &r.Options, &resp) + return +} + +// Retrieve Whether the package prevents the user from specifying a Vlan. +func (r Product_Package) GetPreventVlanSelectionFlag() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Product_Package", "getPreventVlanSelectionFlag", nil, &r.Options, &resp) + return +} + +// Retrieve This flag indicates the package is for a private hosted cloud deployment. (Deprecated) +func (r Product_Package) GetPrivateHostedCloudPackageFlag() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Product_Package", "getPrivateHostedCloudPackageFlag", nil, &r.Options, &resp) + return +} + +// Retrieve The server role of the private hosted cloud deployment. (Deprecated) +func (r Product_Package) GetPrivateHostedCloudPackageType() (resp string, err error) { + err = r.Session.DoRequest("SoftLayer_Product_Package", "getPrivateHostedCloudPackageType", nil, &r.Options, &resp) + return +} + +// Retrieve Whether the package only has access to the private network. +func (r Product_Package) GetPrivateNetworkOnlyFlag() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Product_Package", "getPrivateNetworkOnlyFlag", nil, &r.Options, &resp) + return +} + +// Retrieve Whether the package is a specialized mass storage QuantaStor package. +func (r Product_Package) GetQuantaStorPackageFlag() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Product_Package", "getQuantaStorPackageFlag", nil, &r.Options, &resp) + return +} + +// Retrieve This flag indicates the package does not allow different disks with RAID. +func (r Product_Package) GetRaidDiskRestrictionFlag() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Product_Package", "getRaidDiskRestrictionFlag", nil, &r.Options, &resp) + return +} + +// Retrieve This flag determines if the package contains a redundant power supply product. +func (r Product_Package) GetRedundantPowerFlag() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Product_Package", "getRedundantPowerFlag", nil, &r.Options, &resp) + return +} + +// Retrieve The regional locations that a package is available in. +func (r Product_Package) GetRegions() (resp []datatypes.Location_Region, err error) { + err = r.Session.DoRequest("SoftLayer_Product_Package", "getRegions", nil, &r.Options, &resp) + return +} + +// Retrieve The resource group template that describes a multi-server solution. (Deprecated) +func (r Product_Package) GetResourceGroupTemplate() (resp datatypes.Resource_Group_Template, err error) { + err = r.Session.DoRequest("SoftLayer_Product_Package", "getResourceGroupTemplate", nil, &r.Options, &resp) + return +} + +// This call is similar to [[SoftLayer_Product_Package/getCategories|getCategories]], except that it does not include account-restricted pricing. Not all accounts have restricted pricing. +func (r Product_Package) GetStandardCategories() (resp []datatypes.Product_Item_Category, err error) { + err = r.Session.DoRequest("SoftLayer_Product_Package", "getStandardCategories", nil, &r.Options, &resp) + return +} + +// Retrieve The top level category code for this service offering. +func (r Product_Package) GetTopLevelItemCategoryCode() (resp string, err error) { + err = r.Session.DoRequest("SoftLayer_Product_Package", "getTopLevelItemCategoryCode", nil, &r.Options, &resp) + return +} + +// Retrieve The type of service offering. This property can be used to help filter packages. +func (r Product_Package) GetType() (resp datatypes.Product_Package_Type, err error) { + err = r.Session.DoRequest("SoftLayer_Product_Package", "getType", nil, &r.Options, &resp) + return +} + +// Package presets are used to simplify ordering by eliminating the need for price ids when submitting orders. +// +// Orders submitted with a preset id defined will use the prices included in the package preset. Prices submitted on an order with a preset id will replace the prices included in the package preset for that prices category. If the package preset has a fixed configuration flag (fixedConfigurationFlag) set then the prices included in the preset configuration cannot be replaced by prices submitted on the order. The only exception to the fixed configuration flag would be if a price submitted on the order is an account-restricted price for the same product item. +type Product_Package_Preset struct { + Session *session.Session + Options sl.Options +} + +// GetProductPackagePresetService returns an instance of the Product_Package_Preset SoftLayer service +func GetProductPackagePresetService(sess *session.Session) Product_Package_Preset { + return Product_Package_Preset{Session: sess} +} + +func (r Product_Package_Preset) Id(id int) Product_Package_Preset { + r.Options.Id = &id + return r +} + +func (r Product_Package_Preset) Mask(mask string) Product_Package_Preset { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r Product_Package_Preset) Filter(filter string) Product_Package_Preset { + r.Options.Filter = filter + return r +} + +func (r Product_Package_Preset) Limit(limit int) Product_Package_Preset { + r.Options.Limit = &limit + return r +} + +func (r Product_Package_Preset) Offset(offset int) Product_Package_Preset { + r.Options.Offset = &offset + return r +} + +// This method returns all the active package presets. +func (r Product_Package_Preset) GetAllObjects() (resp []datatypes.Product_Package_Preset, err error) { + err = r.Session.DoRequest("SoftLayer_Product_Package_Preset", "getAllObjects", nil, &r.Options, &resp) + return +} + +// Retrieve +func (r Product_Package_Preset) GetAvailableStorageUnits() (resp uint, err error) { + err = r.Session.DoRequest("SoftLayer_Product_Package_Preset", "getAvailableStorageUnits", nil, &r.Options, &resp) + return +} + +// Retrieve The item categories that are included in this package preset configuration. +func (r Product_Package_Preset) GetCategories() (resp []datatypes.Product_Item_Category, err error) { + err = r.Session.DoRequest("SoftLayer_Product_Package_Preset", "getCategories", nil, &r.Options, &resp) + return +} + +// Retrieve The preset configuration (category and price). +func (r Product_Package_Preset) GetConfiguration() (resp []datatypes.Product_Package_Preset_Configuration, err error) { + err = r.Session.DoRequest("SoftLayer_Product_Package_Preset", "getConfiguration", nil, &r.Options, &resp) + return +} + +// Retrieve A package preset with this flag set will not allow the price's defined in the preset configuration to be overriden during order placement. +func (r Product_Package_Preset) GetFixedConfigurationFlag() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Product_Package_Preset", "getFixedConfigurationFlag", nil, &r.Options, &resp) + return +} + +// Retrieve The lowest server prices related to this package preset. +func (r Product_Package_Preset) GetLowestPresetServerPrice() (resp datatypes.Product_Item_Price, err error) { + err = r.Session.DoRequest("SoftLayer_Product_Package_Preset", "getLowestPresetServerPrice", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Product_Package_Preset) GetObject() (resp datatypes.Product_Package_Preset, err error) { + err = r.Session.DoRequest("SoftLayer_Product_Package_Preset", "getObject", nil, &r.Options, &resp) + return +} + +// Retrieve The package this preset belongs to. +func (r Product_Package_Preset) GetPackage() (resp datatypes.Product_Package, err error) { + err = r.Session.DoRequest("SoftLayer_Product_Package_Preset", "getPackage", nil, &r.Options, &resp) + return +} + +// Retrieve The item categories associated with a package preset, including information detailing which item categories are required as part of a SoftLayer product order. +func (r Product_Package_Preset) GetPackageConfiguration() (resp []datatypes.Product_Package_Order_Configuration, err error) { + err = r.Session.DoRequest("SoftLayer_Product_Package_Preset", "getPackageConfiguration", nil, &r.Options, &resp) + return +} + +// Retrieve The item prices that are included in this package preset configuration. +func (r Product_Package_Preset) GetPrices() (resp []datatypes.Product_Item_Price, err error) { + err = r.Session.DoRequest("SoftLayer_Product_Package_Preset", "getPrices", nil, &r.Options, &resp) + return +} + +// Retrieve Describes how all disks in this preset will be configured. +func (r Product_Package_Preset) GetStorageGroupTemplateArrays() (resp []datatypes.Configuration_Storage_Group_Template_Group, err error) { + err = r.Session.DoRequest("SoftLayer_Product_Package_Preset", "getStorageGroupTemplateArrays", nil, &r.Options, &resp) + return +} + +// Retrieve The starting hourly price for this configuration. Additional options not defined in the preset may increase the cost. +func (r Product_Package_Preset) GetTotalMinimumHourlyFee() (resp datatypes.Float64, err error) { + err = r.Session.DoRequest("SoftLayer_Product_Package_Preset", "getTotalMinimumHourlyFee", nil, &r.Options, &resp) + return +} + +// Retrieve The starting monthly price for this configuration. Additional options not defined in the preset may increase the cost. +func (r Product_Package_Preset) GetTotalMinimumRecurringFee() (resp datatypes.Float64, err error) { + err = r.Session.DoRequest("SoftLayer_Product_Package_Preset", "getTotalMinimumRecurringFee", nil, &r.Options, &resp) + return +} + +// The SoftLayer_Product_Package_Server data type contains summarized information for bare metal servers regarding pricing, processor stats, and feature sets. +type Product_Package_Server struct { + Session *session.Session + Options sl.Options +} + +// GetProductPackageServerService returns an instance of the Product_Package_Server SoftLayer service +func GetProductPackageServerService(sess *session.Session) Product_Package_Server { + return Product_Package_Server{Session: sess} +} + +func (r Product_Package_Server) Id(id int) Product_Package_Server { + r.Options.Id = &id + return r +} + +func (r Product_Package_Server) Mask(mask string) Product_Package_Server { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r Product_Package_Server) Filter(filter string) Product_Package_Server { + r.Options.Filter = filter + return r +} + +func (r Product_Package_Server) Limit(limit int) Product_Package_Server { + r.Options.Limit = &limit + return r +} + +func (r Product_Package_Server) Offset(offset int) Product_Package_Server { + r.Options.Offset = &offset + return r +} + +// This method will grab all the package servers. +func (r Product_Package_Server) GetAllObjects() (resp []datatypes.Product_Package_Server, err error) { + err = r.Session.DoRequest("SoftLayer_Product_Package_Server", "getAllObjects", nil, &r.Options, &resp) + return +} + +// Retrieve +func (r Product_Package_Server) GetCatalog() (resp datatypes.Product_Catalog, err error) { + err = r.Session.DoRequest("SoftLayer_Product_Package_Server", "getCatalog", nil, &r.Options, &resp) + return +} + +// Retrieve +func (r Product_Package_Server) GetItem() (resp datatypes.Product_Item, err error) { + err = r.Session.DoRequest("SoftLayer_Product_Package_Server", "getItem", nil, &r.Options, &resp) + return +} + +// Retrieve +func (r Product_Package_Server) GetItemPrice() (resp datatypes.Product_Item_Price, err error) { + err = r.Session.DoRequest("SoftLayer_Product_Package_Server", "getItemPrice", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Product_Package_Server) GetObject() (resp datatypes.Product_Package_Server, err error) { + err = r.Session.DoRequest("SoftLayer_Product_Package_Server", "getObject", nil, &r.Options, &resp) + return +} + +// Retrieve +func (r Product_Package_Server) GetPackage() (resp datatypes.Product_Package, err error) { + err = r.Session.DoRequest("SoftLayer_Product_Package_Server", "getPackage", nil, &r.Options, &resp) + return +} + +// Retrieve +func (r Product_Package_Server) GetPreset() (resp datatypes.Product_Package_Preset, err error) { + err = r.Session.DoRequest("SoftLayer_Product_Package_Server", "getPreset", nil, &r.Options, &resp) + return +} + +// The [[SoftLayer_Product_Package_Server_Option]] data type contains various data points associated with package servers that can be used in selection criteria. +type Product_Package_Server_Option struct { + Session *session.Session + Options sl.Options +} + +// GetProductPackageServerOptionService returns an instance of the Product_Package_Server_Option SoftLayer service +func GetProductPackageServerOptionService(sess *session.Session) Product_Package_Server_Option { + return Product_Package_Server_Option{Session: sess} +} + +func (r Product_Package_Server_Option) Id(id int) Product_Package_Server_Option { + r.Options.Id = &id + return r +} + +func (r Product_Package_Server_Option) Mask(mask string) Product_Package_Server_Option { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r Product_Package_Server_Option) Filter(filter string) Product_Package_Server_Option { + r.Options.Filter = filter + return r +} + +func (r Product_Package_Server_Option) Limit(limit int) Product_Package_Server_Option { + r.Options.Limit = &limit + return r +} + +func (r Product_Package_Server_Option) Offset(offset int) Product_Package_Server_Option { + r.Options.Offset = &offset + return r +} + +// This method will grab all the package server options. +func (r Product_Package_Server_Option) GetAllOptions() (resp []datatypes.Product_Package_Server_Option, err error) { + err = r.Session.DoRequest("SoftLayer_Product_Package_Server_Option", "getAllOptions", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Product_Package_Server_Option) GetObject() (resp datatypes.Product_Package_Server_Option, err error) { + err = r.Session.DoRequest("SoftLayer_Product_Package_Server_Option", "getObject", nil, &r.Options, &resp) + return +} + +// This method will grab all the package server options for the specified type. +func (r Product_Package_Server_Option) GetOptions(typ *string) (resp []datatypes.Product_Package_Server_Option, err error) { + params := []interface{}{ + typ, + } + err = r.Session.DoRequest("SoftLayer_Product_Package_Server_Option", "getOptions", params, &r.Options, &resp) + return +} + +// The [[SoftLayer_Product_Package_Type]] object indicates the type for a service offering (package). The type can be used to filter packages. For example, if you are looking for the package representing virtual servers, you can filter on the type's key name of '''VIRTUAL_SERVER_INSTANCE'''. For bare metal servers by core or CPU, filter on '''BARE_METAL_CORE''' or '''BARE_METAL_CPU''', respectively. +type Product_Package_Type struct { + Session *session.Session + Options sl.Options +} + +// GetProductPackageTypeService returns an instance of the Product_Package_Type SoftLayer service +func GetProductPackageTypeService(sess *session.Session) Product_Package_Type { + return Product_Package_Type{Session: sess} +} + +func (r Product_Package_Type) Id(id int) Product_Package_Type { + r.Options.Id = &id + return r +} + +func (r Product_Package_Type) Mask(mask string) Product_Package_Type { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r Product_Package_Type) Filter(filter string) Product_Package_Type { + r.Options.Filter = filter + return r +} + +func (r Product_Package_Type) Limit(limit int) Product_Package_Type { + r.Options.Limit = &limit + return r +} + +func (r Product_Package_Type) Offset(offset int) Product_Package_Type { + r.Options.Offset = &offset + return r +} + +// This method will return all of the available package types. +func (r Product_Package_Type) GetAllObjects() (resp []datatypes.Product_Package_Type, err error) { + err = r.Session.DoRequest("SoftLayer_Product_Package_Type", "getAllObjects", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Product_Package_Type) GetObject() (resp datatypes.Product_Package_Type, err error) { + err = r.Session.DoRequest("SoftLayer_Product_Package_Type", "getObject", nil, &r.Options, &resp) + return +} + +// Retrieve All the packages associated with the given package type. +func (r Product_Package_Type) GetPackages() (resp []datatypes.Product_Package, err error) { + err = r.Session.DoRequest("SoftLayer_Product_Package_Type", "getPackages", nil, &r.Options, &resp) + return +} + +// The SoftLayer_Product_Upgrade_Request data type contains general information relating to a hardware, virtual server, or service upgrade. It also relates a [[SoftLayer_Billing_Order]] to a [[SoftLayer_Ticket]]. +type Product_Upgrade_Request struct { + Session *session.Session + Options sl.Options +} + +// GetProductUpgradeRequestService returns an instance of the Product_Upgrade_Request SoftLayer service +func GetProductUpgradeRequestService(sess *session.Session) Product_Upgrade_Request { + return Product_Upgrade_Request{Session: sess} +} + +func (r Product_Upgrade_Request) Id(id int) Product_Upgrade_Request { + r.Options.Id = &id + return r +} + +func (r Product_Upgrade_Request) Mask(mask string) Product_Upgrade_Request { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r Product_Upgrade_Request) Filter(filter string) Product_Upgrade_Request { + r.Options.Filter = filter + return r +} + +func (r Product_Upgrade_Request) Limit(limit int) Product_Upgrade_Request { + r.Options.Limit = &limit + return r +} + +func (r Product_Upgrade_Request) Offset(offset int) Product_Upgrade_Request { + r.Options.Offset = &offset + return r +} + +// When a change is made to an upgrade by Sales, this method will approve the changes that were made. A customer must acknowledge the change and approve it so that the upgrade request can proceed. +func (r Product_Upgrade_Request) ApproveChanges() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Product_Upgrade_Request", "approveChanges", nil, &r.Options, &resp) + return +} + +// Retrieve The account that an order belongs to +func (r Product_Upgrade_Request) GetAccount() (resp datatypes.Account, err error) { + err = r.Session.DoRequest("SoftLayer_Product_Upgrade_Request", "getAccount", nil, &r.Options, &resp) + return +} + +// Retrieve Indicates that the upgrade request has completed or has been cancelled. +func (r Product_Upgrade_Request) GetCompletedFlag() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Product_Upgrade_Request", "getCompletedFlag", nil, &r.Options, &resp) + return +} + +// Retrieve This is the invoice associated with the upgrade request. For hourly servers or services, an invoice will not be available. +func (r Product_Upgrade_Request) GetInvoice() (resp datatypes.Billing_Invoice, err error) { + err = r.Session.DoRequest("SoftLayer_Product_Upgrade_Request", "getInvoice", nil, &r.Options, &resp) + return +} + +// getObject retrieves a SoftLayer_Product_Upgrade_Request object on your account whose ID corresponds to the ID of the init parameter passed to the SoftLayer_Product_Upgrade_Request service. +func (r Product_Upgrade_Request) GetObject() (resp datatypes.Product_Upgrade_Request, err error) { + err = r.Session.DoRequest("SoftLayer_Product_Upgrade_Request", "getObject", nil, &r.Options, &resp) + return +} + +// Retrieve An order record associated to the upgrade request +func (r Product_Upgrade_Request) GetOrder() (resp datatypes.Billing_Order, err error) { + err = r.Session.DoRequest("SoftLayer_Product_Upgrade_Request", "getOrder", nil, &r.Options, &resp) + return +} + +// Retrieve A server object associated with the upgrade request if any. +func (r Product_Upgrade_Request) GetServer() (resp datatypes.Hardware, err error) { + err = r.Session.DoRequest("SoftLayer_Product_Upgrade_Request", "getServer", nil, &r.Options, &resp) + return +} + +// Retrieve The current status of the upgrade request. +func (r Product_Upgrade_Request) GetStatus() (resp datatypes.Product_Upgrade_Request_Status, err error) { + err = r.Session.DoRequest("SoftLayer_Product_Upgrade_Request", "getStatus", nil, &r.Options, &resp) + return +} + +// Retrieve The ticket that is used to coordinate the upgrade process. +func (r Product_Upgrade_Request) GetTicket() (resp datatypes.Ticket, err error) { + err = r.Session.DoRequest("SoftLayer_Product_Upgrade_Request", "getTicket", nil, &r.Options, &resp) + return +} + +// Retrieve The user that placed the order. +func (r Product_Upgrade_Request) GetUser() (resp datatypes.User_Customer, err error) { + err = r.Session.DoRequest("SoftLayer_Product_Upgrade_Request", "getUser", nil, &r.Options, &resp) + return +} + +// Retrieve A virtual server object associated with the upgrade request if any. +func (r Product_Upgrade_Request) GetVirtualGuest() (resp datatypes.Virtual_Guest, err error) { + err = r.Session.DoRequest("SoftLayer_Product_Upgrade_Request", "getVirtualGuest", nil, &r.Options, &resp) + return +} + +// In case an upgrade cannot be performed, the maintenance window needs to be updated to a future date. +func (r Product_Upgrade_Request) UpdateMaintenanceWindow(maintenanceStartTime *datatypes.Time, maintenanceWindowId *int) (resp bool, err error) { + params := []interface{}{ + maintenanceStartTime, + maintenanceWindowId, + } + err = r.Session.DoRequest("SoftLayer_Product_Upgrade_Request", "updateMaintenanceWindow", params, &r.Options, &resp) + return +} diff --git a/vendor/github.com/softlayer/softlayer-go/services/provisioning.go b/vendor/github.com/softlayer/softlayer-go/services/provisioning.go new file mode 100644 index 0000000000..dc426bbb28 --- /dev/null +++ b/vendor/github.com/softlayer/softlayer-go/services/provisioning.go @@ -0,0 +1,564 @@ +/** + * Copyright 2016 IBM Corp. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** + * AUTOMATICALLY GENERATED CODE - DO NOT MODIFY + */ + +package services + +import ( + "fmt" + "strings" + + "github.com/softlayer/softlayer-go/datatypes" + "github.com/softlayer/softlayer-go/session" + "github.com/softlayer/softlayer-go/sl" +) + +// The SoftLayer_Provisioning_Hook contains all the information needed to add a hook into a server/Virtual provision and os reload. +type Provisioning_Hook struct { + Session *session.Session + Options sl.Options +} + +// GetProvisioningHookService returns an instance of the Provisioning_Hook SoftLayer service +func GetProvisioningHookService(sess *session.Session) Provisioning_Hook { + return Provisioning_Hook{Session: sess} +} + +func (r Provisioning_Hook) Id(id int) Provisioning_Hook { + r.Options.Id = &id + return r +} + +func (r Provisioning_Hook) Mask(mask string) Provisioning_Hook { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r Provisioning_Hook) Filter(filter string) Provisioning_Hook { + r.Options.Filter = filter + return r +} + +func (r Provisioning_Hook) Limit(limit int) Provisioning_Hook { + r.Options.Limit = &limit + return r +} + +func (r Provisioning_Hook) Offset(offset int) Provisioning_Hook { + r.Options.Offset = &offset + return r +} + +// no documentation yet +func (r Provisioning_Hook) CreateObject(templateObject *datatypes.Provisioning_Hook) (resp datatypes.Provisioning_Hook, err error) { + params := []interface{}{ + templateObject, + } + err = r.Session.DoRequest("SoftLayer_Provisioning_Hook", "createObject", params, &r.Options, &resp) + return +} + +// no documentation yet +func (r Provisioning_Hook) DeleteObject() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Provisioning_Hook", "deleteObject", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Provisioning_Hook) EditObject(templateObject *datatypes.Provisioning_Hook) (resp bool, err error) { + params := []interface{}{ + templateObject, + } + err = r.Session.DoRequest("SoftLayer_Provisioning_Hook", "editObject", params, &r.Options, &resp) + return +} + +// Retrieve +func (r Provisioning_Hook) GetAccount() (resp datatypes.Account, err error) { + err = r.Session.DoRequest("SoftLayer_Provisioning_Hook", "getAccount", nil, &r.Options, &resp) + return +} + +// Retrieve +func (r Provisioning_Hook) GetHookType() (resp datatypes.Provisioning_Hook_Type, err error) { + err = r.Session.DoRequest("SoftLayer_Provisioning_Hook", "getHookType", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Provisioning_Hook) GetObject() (resp datatypes.Provisioning_Hook, err error) { + err = r.Session.DoRequest("SoftLayer_Provisioning_Hook", "getObject", nil, &r.Options, &resp) + return +} + +// no documentation yet +type Provisioning_Hook_Type struct { + Session *session.Session + Options sl.Options +} + +// GetProvisioningHookTypeService returns an instance of the Provisioning_Hook_Type SoftLayer service +func GetProvisioningHookTypeService(sess *session.Session) Provisioning_Hook_Type { + return Provisioning_Hook_Type{Session: sess} +} + +func (r Provisioning_Hook_Type) Id(id int) Provisioning_Hook_Type { + r.Options.Id = &id + return r +} + +func (r Provisioning_Hook_Type) Mask(mask string) Provisioning_Hook_Type { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r Provisioning_Hook_Type) Filter(filter string) Provisioning_Hook_Type { + r.Options.Filter = filter + return r +} + +func (r Provisioning_Hook_Type) Limit(limit int) Provisioning_Hook_Type { + r.Options.Limit = &limit + return r +} + +func (r Provisioning_Hook_Type) Offset(offset int) Provisioning_Hook_Type { + r.Options.Offset = &offset + return r +} + +// no documentation yet +func (r Provisioning_Hook_Type) GetAllHookTypes() (resp []datatypes.Provisioning_Hook_Type, err error) { + err = r.Session.DoRequest("SoftLayer_Provisioning_Hook_Type", "getAllHookTypes", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Provisioning_Hook_Type) GetObject() (resp datatypes.Provisioning_Hook_Type, err error) { + err = r.Session.DoRequest("SoftLayer_Provisioning_Hook_Type", "getObject", nil, &r.Options, &resp) + return +} + +// The SoftLayer_Provisioning_Maintenance_Classification represent a maintenance type for the specific hardware maintenance desired. +type Provisioning_Maintenance_Classification struct { + Session *session.Session + Options sl.Options +} + +// GetProvisioningMaintenanceClassificationService returns an instance of the Provisioning_Maintenance_Classification SoftLayer service +func GetProvisioningMaintenanceClassificationService(sess *session.Session) Provisioning_Maintenance_Classification { + return Provisioning_Maintenance_Classification{Session: sess} +} + +func (r Provisioning_Maintenance_Classification) Id(id int) Provisioning_Maintenance_Classification { + r.Options.Id = &id + return r +} + +func (r Provisioning_Maintenance_Classification) Mask(mask string) Provisioning_Maintenance_Classification { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r Provisioning_Maintenance_Classification) Filter(filter string) Provisioning_Maintenance_Classification { + r.Options.Filter = filter + return r +} + +func (r Provisioning_Maintenance_Classification) Limit(limit int) Provisioning_Maintenance_Classification { + r.Options.Limit = &limit + return r +} + +func (r Provisioning_Maintenance_Classification) Offset(offset int) Provisioning_Maintenance_Classification { + r.Options.Offset = &offset + return r +} + +// Retrieve +func (r Provisioning_Maintenance_Classification) GetItemCategories() (resp []datatypes.Provisioning_Maintenance_Classification_Item_Category, err error) { + err = r.Session.DoRequest("SoftLayer_Provisioning_Maintenance_Classification", "getItemCategories", nil, &r.Options, &resp) + return +} + +// Retrieve an array of SoftLayer_Provisioning_Maintenance_Classification data types, which contain all maintenance classifications. +func (r Provisioning_Maintenance_Classification) GetMaintenanceClassification(maintenanceClassificationId *int) (resp []datatypes.Provisioning_Maintenance_Classification, err error) { + params := []interface{}{ + maintenanceClassificationId, + } + err = r.Session.DoRequest("SoftLayer_Provisioning_Maintenance_Classification", "getMaintenanceClassification", params, &r.Options, &resp) + return +} + +// Retrieve an array of SoftLayer_Provisioning_Maintenance_Classification data types, which contain all maintenance classifications. +func (r Provisioning_Maintenance_Classification) GetMaintenanceClassificationsByItemCategory() (resp []datatypes.Provisioning_Maintenance_Classification_Item_Category, err error) { + err = r.Session.DoRequest("SoftLayer_Provisioning_Maintenance_Classification", "getMaintenanceClassificationsByItemCategory", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Provisioning_Maintenance_Classification) GetObject() (resp datatypes.Provisioning_Maintenance_Classification, err error) { + err = r.Session.DoRequest("SoftLayer_Provisioning_Maintenance_Classification", "getObject", nil, &r.Options, &resp) + return +} + +// no documentation yet +type Provisioning_Maintenance_Classification_Item_Category struct { + Session *session.Session + Options sl.Options +} + +// GetProvisioningMaintenanceClassificationItemCategoryService returns an instance of the Provisioning_Maintenance_Classification_Item_Category SoftLayer service +func GetProvisioningMaintenanceClassificationItemCategoryService(sess *session.Session) Provisioning_Maintenance_Classification_Item_Category { + return Provisioning_Maintenance_Classification_Item_Category{Session: sess} +} + +func (r Provisioning_Maintenance_Classification_Item_Category) Id(id int) Provisioning_Maintenance_Classification_Item_Category { + r.Options.Id = &id + return r +} + +func (r Provisioning_Maintenance_Classification_Item_Category) Mask(mask string) Provisioning_Maintenance_Classification_Item_Category { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r Provisioning_Maintenance_Classification_Item_Category) Filter(filter string) Provisioning_Maintenance_Classification_Item_Category { + r.Options.Filter = filter + return r +} + +func (r Provisioning_Maintenance_Classification_Item_Category) Limit(limit int) Provisioning_Maintenance_Classification_Item_Category { + r.Options.Limit = &limit + return r +} + +func (r Provisioning_Maintenance_Classification_Item_Category) Offset(offset int) Provisioning_Maintenance_Classification_Item_Category { + r.Options.Offset = &offset + return r +} + +// Retrieve +func (r Provisioning_Maintenance_Classification_Item_Category) GetMaintenanceClassification() (resp datatypes.Provisioning_Maintenance_Classification, err error) { + err = r.Session.DoRequest("SoftLayer_Provisioning_Maintenance_Classification_Item_Category", "getMaintenanceClassification", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Provisioning_Maintenance_Classification_Item_Category) GetObject() (resp datatypes.Provisioning_Maintenance_Classification_Item_Category, err error) { + err = r.Session.DoRequest("SoftLayer_Provisioning_Maintenance_Classification_Item_Category", "getObject", nil, &r.Options, &resp) + return +} + +// The SoftLayer_Provisioning_Maintenance_Slots represent the available slots for a given maintenance window at a SoftLayer data center. +type Provisioning_Maintenance_Slots struct { + Session *session.Session + Options sl.Options +} + +// GetProvisioningMaintenanceSlotsService returns an instance of the Provisioning_Maintenance_Slots SoftLayer service +func GetProvisioningMaintenanceSlotsService(sess *session.Session) Provisioning_Maintenance_Slots { + return Provisioning_Maintenance_Slots{Session: sess} +} + +func (r Provisioning_Maintenance_Slots) Id(id int) Provisioning_Maintenance_Slots { + r.Options.Id = &id + return r +} + +func (r Provisioning_Maintenance_Slots) Mask(mask string) Provisioning_Maintenance_Slots { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r Provisioning_Maintenance_Slots) Filter(filter string) Provisioning_Maintenance_Slots { + r.Options.Filter = filter + return r +} + +func (r Provisioning_Maintenance_Slots) Limit(limit int) Provisioning_Maintenance_Slots { + r.Options.Limit = &limit + return r +} + +func (r Provisioning_Maintenance_Slots) Offset(offset int) Provisioning_Maintenance_Slots { + r.Options.Offset = &offset + return r +} + +// no documentation yet +func (r Provisioning_Maintenance_Slots) GetObject() (resp datatypes.Provisioning_Maintenance_Slots, err error) { + err = r.Session.DoRequest("SoftLayer_Provisioning_Maintenance_Slots", "getObject", nil, &r.Options, &resp) + return +} + +// no documentation yet +type Provisioning_Maintenance_Ticket struct { + Session *session.Session + Options sl.Options +} + +// GetProvisioningMaintenanceTicketService returns an instance of the Provisioning_Maintenance_Ticket SoftLayer service +func GetProvisioningMaintenanceTicketService(sess *session.Session) Provisioning_Maintenance_Ticket { + return Provisioning_Maintenance_Ticket{Session: sess} +} + +func (r Provisioning_Maintenance_Ticket) Id(id int) Provisioning_Maintenance_Ticket { + r.Options.Id = &id + return r +} + +func (r Provisioning_Maintenance_Ticket) Mask(mask string) Provisioning_Maintenance_Ticket { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r Provisioning_Maintenance_Ticket) Filter(filter string) Provisioning_Maintenance_Ticket { + r.Options.Filter = filter + return r +} + +func (r Provisioning_Maintenance_Ticket) Limit(limit int) Provisioning_Maintenance_Ticket { + r.Options.Limit = &limit + return r +} + +func (r Provisioning_Maintenance_Ticket) Offset(offset int) Provisioning_Maintenance_Ticket { + r.Options.Offset = &offset + return r +} + +// Retrieve +func (r Provisioning_Maintenance_Ticket) GetAvailableSlots() (resp datatypes.Provisioning_Maintenance_Slots, err error) { + err = r.Session.DoRequest("SoftLayer_Provisioning_Maintenance_Ticket", "getAvailableSlots", nil, &r.Options, &resp) + return +} + +// Retrieve +func (r Provisioning_Maintenance_Ticket) GetMaintenanceClass() (resp datatypes.Provisioning_Maintenance_Classification, err error) { + err = r.Session.DoRequest("SoftLayer_Provisioning_Maintenance_Ticket", "getMaintenanceClass", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Provisioning_Maintenance_Ticket) GetObject() (resp datatypes.Provisioning_Maintenance_Ticket, err error) { + err = r.Session.DoRequest("SoftLayer_Provisioning_Maintenance_Ticket", "getObject", nil, &r.Options, &resp) + return +} + +// Retrieve +func (r Provisioning_Maintenance_Ticket) GetTicket() (resp datatypes.Ticket, err error) { + err = r.Session.DoRequest("SoftLayer_Provisioning_Maintenance_Ticket", "getTicket", nil, &r.Options, &resp) + return +} + +// The SoftLayer_Provisioning_Maintenance_Window represent a time window that SoftLayer performs a hardware or software maintenance and upgrades. +type Provisioning_Maintenance_Window struct { + Session *session.Session + Options sl.Options +} + +// GetProvisioningMaintenanceWindowService returns an instance of the Provisioning_Maintenance_Window SoftLayer service +func GetProvisioningMaintenanceWindowService(sess *session.Session) Provisioning_Maintenance_Window { + return Provisioning_Maintenance_Window{Session: sess} +} + +func (r Provisioning_Maintenance_Window) Id(id int) Provisioning_Maintenance_Window { + r.Options.Id = &id + return r +} + +func (r Provisioning_Maintenance_Window) Mask(mask string) Provisioning_Maintenance_Window { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r Provisioning_Maintenance_Window) Filter(filter string) Provisioning_Maintenance_Window { + r.Options.Filter = filter + return r +} + +func (r Provisioning_Maintenance_Window) Limit(limit int) Provisioning_Maintenance_Window { + r.Options.Limit = &limit + return r +} + +func (r Provisioning_Maintenance_Window) Offset(offset int) Provisioning_Maintenance_Window { + r.Options.Offset = &offset + return r +} + +// getMaintenceWindowForTicket() returns a boolean +func (r Provisioning_Maintenance_Window) AddCustomerUpgradeWindow(customerUpgradeWindow *datatypes.Container_Provisioning_Maintenance_Window) (resp bool, err error) { + params := []interface{}{ + customerUpgradeWindow, + } + err = r.Session.DoRequest("SoftLayer_Provisioning_Maintenance_Window", "addCustomerUpgradeWindow", params, &r.Options, &resp) + return +} + +// getMaintenanceClassifications() returns an object of maintenance classifications +func (r Provisioning_Maintenance_Window) GetMaintenanceClassifications() (resp []datatypes.Provisioning_Maintenance_Classification, err error) { + err = r.Session.DoRequest("SoftLayer_Provisioning_Maintenance_Window", "getMaintenanceClassifications", nil, &r.Options, &resp) + return +} + +// getMaintenanceStartEndTime() returns a specific maintenance window +func (r Provisioning_Maintenance_Window) GetMaintenanceStartEndTime(ticketId *int) (resp datatypes.Provisioning_Maintenance_Window, err error) { + params := []interface{}{ + ticketId, + } + err = r.Session.DoRequest("SoftLayer_Provisioning_Maintenance_Window", "getMaintenanceStartEndTime", params, &r.Options, &resp) + return +} + +// getMaintenceWindowForTicket() returns a specific maintenance window +func (r Provisioning_Maintenance_Window) GetMaintenanceWindowForTicket(maintenanceWindowId *int) (resp []datatypes.Provisioning_Maintenance_Window, err error) { + params := []interface{}{ + maintenanceWindowId, + } + err = r.Session.DoRequest("SoftLayer_Provisioning_Maintenance_Window", "getMaintenanceWindowForTicket", params, &r.Options, &resp) + return +} + +// getMaintenanceWindowTicketsByTicketId() returns a list maintenance window ticket records by ticket id +func (r Provisioning_Maintenance_Window) GetMaintenanceWindowTicketsByTicketId(ticketId *int) (resp []datatypes.Provisioning_Maintenance_Ticket, err error) { + params := []interface{}{ + ticketId, + } + err = r.Session.DoRequest("SoftLayer_Provisioning_Maintenance_Window", "getMaintenanceWindowTicketsByTicketId", params, &r.Options, &resp) + return +} + +// This method returns a list of available maintenance windows +func (r Provisioning_Maintenance_Window) GetMaintenanceWindows(beginDate *datatypes.Time, endDate *datatypes.Time, locationId *int, slotsNeeded *int) (resp []datatypes.Provisioning_Maintenance_Window, err error) { + params := []interface{}{ + beginDate, + endDate, + locationId, + slotsNeeded, + } + err = r.Session.DoRequest("SoftLayer_Provisioning_Maintenance_Window", "getMaintenanceWindows", params, &r.Options, &resp) + return +} + +// (DEPRECATED) Use [[SoftLayer_Provisioning_Maintenance_Window::getMaintenanceWindows|getMaintenanceWindows]] method. +func (r Provisioning_Maintenance_Window) GetMaintenceWindows(beginDate *datatypes.Time, endDate *datatypes.Time, locationId *int, slotsNeeded *int) (resp []datatypes.Provisioning_Maintenance_Window, err error) { + params := []interface{}{ + beginDate, + endDate, + locationId, + slotsNeeded, + } + err = r.Session.DoRequest("SoftLayer_Provisioning_Maintenance_Window", "getMaintenceWindows", params, &r.Options, &resp) + return +} + +// getMaintenceWindowForTicket() returns a boolean +func (r Provisioning_Maintenance_Window) UpdateCustomerUpgradeWindow(maintenanceStartTime *datatypes.Time, newMaintenanceWindowId *int, ticketId *int) (resp bool, err error) { + params := []interface{}{ + maintenanceStartTime, + newMaintenanceWindowId, + ticketId, + } + err = r.Session.DoRequest("SoftLayer_Provisioning_Maintenance_Window", "updateCustomerUpgradeWindow", params, &r.Options, &resp) + return +} + +// The SoftLayer_Provisioning_Version1_Transaction_Group data type contains general information relating to a single SoftLayer hardware transaction group. +// +// SoftLayer customers are unable to change their hardware transactions or the hardware transaction group. +type Provisioning_Version1_Transaction_Group struct { + Session *session.Session + Options sl.Options +} + +// GetProvisioningVersion1TransactionGroupService returns an instance of the Provisioning_Version1_Transaction_Group SoftLayer service +func GetProvisioningVersion1TransactionGroupService(sess *session.Session) Provisioning_Version1_Transaction_Group { + return Provisioning_Version1_Transaction_Group{Session: sess} +} + +func (r Provisioning_Version1_Transaction_Group) Id(id int) Provisioning_Version1_Transaction_Group { + r.Options.Id = &id + return r +} + +func (r Provisioning_Version1_Transaction_Group) Mask(mask string) Provisioning_Version1_Transaction_Group { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r Provisioning_Version1_Transaction_Group) Filter(filter string) Provisioning_Version1_Transaction_Group { + r.Options.Filter = filter + return r +} + +func (r Provisioning_Version1_Transaction_Group) Limit(limit int) Provisioning_Version1_Transaction_Group { + r.Options.Limit = &limit + return r +} + +func (r Provisioning_Version1_Transaction_Group) Offset(offset int) Provisioning_Version1_Transaction_Group { + r.Options.Offset = &offset + return r +} + +// no documentation yet +func (r Provisioning_Version1_Transaction_Group) GetAllObjects() (resp []datatypes.Provisioning_Version1_Transaction_Group, err error) { + err = r.Session.DoRequest("SoftLayer_Provisioning_Version1_Transaction_Group", "getAllObjects", nil, &r.Options, &resp) + return +} + +// getObject retrieves the SoftLayer_Provisioning_Version1_Transaction_Group object whose ID number corresponds to the ID number of the init parameter passed to the SoftLayer_Provisioning_Version1_Transaction_Group service. +func (r Provisioning_Version1_Transaction_Group) GetObject() (resp datatypes.Provisioning_Version1_Transaction_Group, err error) { + err = r.Session.DoRequest("SoftLayer_Provisioning_Version1_Transaction_Group", "getObject", nil, &r.Options, &resp) + return +} diff --git a/vendor/github.com/softlayer/softlayer-go/services/resource.go b/vendor/github.com/softlayer/softlayer-go/services/resource.go new file mode 100644 index 0000000000..47a77317cd --- /dev/null +++ b/vendor/github.com/softlayer/softlayer-go/services/resource.go @@ -0,0 +1,419 @@ +/** + * Copyright 2016 IBM Corp. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** + * AUTOMATICALLY GENERATED CODE - DO NOT MODIFY + */ + +package services + +import ( + "fmt" + "strings" + + "github.com/softlayer/softlayer-go/datatypes" + "github.com/softlayer/softlayer-go/session" + "github.com/softlayer/softlayer-go/sl" +) + +// no documentation yet +type Resource_Configuration struct { + Session *session.Session + Options sl.Options +} + +// GetResourceConfigurationService returns an instance of the Resource_Configuration SoftLayer service +func GetResourceConfigurationService(sess *session.Session) Resource_Configuration { + return Resource_Configuration{Session: sess} +} + +func (r Resource_Configuration) Id(id int) Resource_Configuration { + r.Options.Id = &id + return r +} + +func (r Resource_Configuration) Mask(mask string) Resource_Configuration { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r Resource_Configuration) Filter(filter string) Resource_Configuration { + r.Options.Filter = filter + return r +} + +func (r Resource_Configuration) Limit(limit int) Resource_Configuration { + r.Options.Limit = &limit + return r +} + +func (r Resource_Configuration) Offset(offset int) Resource_Configuration { + r.Options.Offset = &offset + return r +} + +// The setOsPasswordFromEncrypted method is used to set the operating system password from a key/pair encrypted password signed by SoftLayer. +func (r Resource_Configuration) SetOsPasswordFromEncrypted(encryptedPassword *string) (resp bool, err error) { + params := []interface{}{ + encryptedPassword, + } + err = r.Session.DoRequest("SoftLayer_Resource_Configuration", "setOsPasswordFromEncrypted", params, &r.Options, &resp) + return +} + +// no documentation yet +type Resource_Group struct { + Session *session.Session + Options sl.Options +} + +// GetResourceGroupService returns an instance of the Resource_Group SoftLayer service +func GetResourceGroupService(sess *session.Session) Resource_Group { + return Resource_Group{Session: sess} +} + +func (r Resource_Group) Id(id int) Resource_Group { + r.Options.Id = &id + return r +} + +func (r Resource_Group) Mask(mask string) Resource_Group { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r Resource_Group) Filter(filter string) Resource_Group { + r.Options.Filter = filter + return r +} + +func (r Resource_Group) Limit(limit int) Resource_Group { + r.Options.Limit = &limit + return r +} + +func (r Resource_Group) Offset(offset int) Resource_Group { + r.Options.Offset = &offset + return r +} + +// no documentation yet +func (r Resource_Group) EditObject(templateObject *datatypes.Resource_Group) (resp bool, err error) { + params := []interface{}{ + templateObject, + } + err = r.Session.DoRequest("SoftLayer_Resource_Group", "editObject", params, &r.Options, &resp) + return +} + +// Retrieve A resource group's associated group ancestors. +func (r Resource_Group) GetAncestorGroups() (resp []datatypes.Resource_Group, err error) { + err = r.Session.DoRequest("SoftLayer_Resource_Group", "getAncestorGroups", nil, &r.Options, &resp) + return +} + +// Retrieve A resource group's associated attributes. +func (r Resource_Group) GetAttributes() (resp []datatypes.Resource_Group_Attribute, err error) { + err = r.Session.DoRequest("SoftLayer_Resource_Group", "getAttributes", nil, &r.Options, &resp) + return +} + +// Retrieve A resource group's associated hardware members. +func (r Resource_Group) GetHardwareMembers() (resp []datatypes.Resource_Group_Member, err error) { + err = r.Session.DoRequest("SoftLayer_Resource_Group", "getHardwareMembers", nil, &r.Options, &resp) + return +} + +// Retrieve A resource group's associated members. +func (r Resource_Group) GetMembers() (resp []datatypes.Resource_Group_Member, err error) { + err = r.Session.DoRequest("SoftLayer_Resource_Group", "getMembers", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Resource_Group) GetObject() (resp datatypes.Resource_Group, err error) { + err = r.Session.DoRequest("SoftLayer_Resource_Group", "getObject", nil, &r.Options, &resp) + return +} + +// Retrieve A resource group's associated root resource group. +func (r Resource_Group) GetRootResourceGroup() (resp datatypes.Resource_Group, err error) { + err = r.Session.DoRequest("SoftLayer_Resource_Group", "getRootResourceGroup", nil, &r.Options, &resp) + return +} + +// Retrieve A resource group's associated subnet members. +func (r Resource_Group) GetSubnetMembers() (resp []datatypes.Resource_Group_Member, err error) { + err = r.Session.DoRequest("SoftLayer_Resource_Group", "getSubnetMembers", nil, &r.Options, &resp) + return +} + +// Retrieve A resource group's associated template. +func (r Resource_Group) GetTemplate() (resp datatypes.Resource_Group_Template, err error) { + err = r.Session.DoRequest("SoftLayer_Resource_Group", "getTemplate", nil, &r.Options, &resp) + return +} + +// Retrieve A resource group's associated VLAN members. +func (r Resource_Group) GetVlanMembers() (resp []datatypes.Resource_Group_Member, err error) { + err = r.Session.DoRequest("SoftLayer_Resource_Group", "getVlanMembers", nil, &r.Options, &resp) + return +} + +// no documentation yet +type Resource_Group_Template struct { + Session *session.Session + Options sl.Options +} + +// GetResourceGroupTemplateService returns an instance of the Resource_Group_Template SoftLayer service +func GetResourceGroupTemplateService(sess *session.Session) Resource_Group_Template { + return Resource_Group_Template{Session: sess} +} + +func (r Resource_Group_Template) Id(id int) Resource_Group_Template { + r.Options.Id = &id + return r +} + +func (r Resource_Group_Template) Mask(mask string) Resource_Group_Template { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r Resource_Group_Template) Filter(filter string) Resource_Group_Template { + r.Options.Filter = filter + return r +} + +func (r Resource_Group_Template) Limit(limit int) Resource_Group_Template { + r.Options.Limit = &limit + return r +} + +func (r Resource_Group_Template) Offset(offset int) Resource_Group_Template { + r.Options.Offset = &offset + return r +} + +// no documentation yet +func (r Resource_Group_Template) GetAllObjects() (resp []datatypes.Resource_Group_Template, err error) { + err = r.Session.DoRequest("SoftLayer_Resource_Group_Template", "getAllObjects", nil, &r.Options, &resp) + return +} + +// Retrieve +func (r Resource_Group_Template) GetChildren() (resp []datatypes.Resource_Group_Template, err error) { + err = r.Session.DoRequest("SoftLayer_Resource_Group_Template", "getChildren", nil, &r.Options, &resp) + return +} + +// Retrieve +func (r Resource_Group_Template) GetMembers() (resp []datatypes.Resource_Group_Template_Member, err error) { + err = r.Session.DoRequest("SoftLayer_Resource_Group_Template", "getMembers", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Resource_Group_Template) GetObject() (resp datatypes.Resource_Group_Template, err error) { + err = r.Session.DoRequest("SoftLayer_Resource_Group_Template", "getObject", nil, &r.Options, &resp) + return +} + +// Retrieve +func (r Resource_Group_Template) GetPackage() (resp datatypes.Product_Package, err error) { + err = r.Session.DoRequest("SoftLayer_Resource_Group_Template", "getPackage", nil, &r.Options, &resp) + return +} + +// no documentation yet +type Resource_Metadata struct { + Session *session.Session + Options sl.Options +} + +// GetResourceMetadataService returns an instance of the Resource_Metadata SoftLayer service +func GetResourceMetadataService(sess *session.Session) Resource_Metadata { + return Resource_Metadata{Session: sess} +} + +func (r Resource_Metadata) Id(id int) Resource_Metadata { + r.Options.Id = &id + return r +} + +func (r Resource_Metadata) Mask(mask string) Resource_Metadata { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r Resource_Metadata) Filter(filter string) Resource_Metadata { + r.Options.Filter = filter + return r +} + +func (r Resource_Metadata) Limit(limit int) Resource_Metadata { + r.Options.Limit = &limit + return r +} + +func (r Resource_Metadata) Offset(offset int) Resource_Metadata { + r.Options.Offset = &offset + return r +} + +// The getBackendMacAddresses method retrieves a list of backend MAC addresses for the resource +func (r Resource_Metadata) GetBackendMacAddresses() (resp []string, err error) { + err = r.Session.DoRequest("SoftLayer_Resource_Metadata", "getBackendMacAddresses", nil, &r.Options, &resp) + return +} + +// The getDatacenter method retrieves the name of the datacenter in which the resource is located. +func (r Resource_Metadata) GetDatacenter() (resp string, err error) { + err = r.Session.DoRequest("SoftLayer_Resource_Metadata", "getDatacenter", nil, &r.Options, &resp) + return +} + +// The getDatacenterId retrieves the ID for the datacenter in which the resource is located. +func (r Resource_Metadata) GetDatacenterId() (resp int, err error) { + err = r.Session.DoRequest("SoftLayer_Resource_Metadata", "getDatacenterId", nil, &r.Options, &resp) + return +} + +// The getDomain method retrieves the hostname for the resource. +func (r Resource_Metadata) GetDomain() (resp string, err error) { + err = r.Session.DoRequest("SoftLayer_Resource_Metadata", "getDomain", nil, &r.Options, &resp) + return +} + +// The getFrontendMacAddresses method retrieves a list of frontend MAC addresses for the resource +func (r Resource_Metadata) GetFrontendMacAddresses() (resp []string, err error) { + err = r.Session.DoRequest("SoftLayer_Resource_Metadata", "getFrontendMacAddresses", nil, &r.Options, &resp) + return +} + +// The getFullyQualifiedDomainName method provides the user with a combined return which includes the hostname and domain for the resource. Because this method returns multiple pieces of information, it avoids the need to use multiple methods to return the desired information. +func (r Resource_Metadata) GetFullyQualifiedDomainName() (resp string, err error) { + err = r.Session.DoRequest("SoftLayer_Resource_Metadata", "getFullyQualifiedDomainName", nil, &r.Options, &resp) + return +} + +// The getId getGlobalIdentifier retrieves the globalIdentifier for the resource +func (r Resource_Metadata) GetGlobalIdentifier() (resp string, err error) { + err = r.Session.DoRequest("SoftLayer_Resource_Metadata", "getGlobalIdentifier", nil, &r.Options, &resp) + return +} + +// The getHostname method retrieves the hostname for the resource. +func (r Resource_Metadata) GetHostname() (resp string, err error) { + err = r.Session.DoRequest("SoftLayer_Resource_Metadata", "getHostname", nil, &r.Options, &resp) + return +} + +// The getId method retrieves the ID for the resource +func (r Resource_Metadata) GetId() (resp int, err error) { + err = r.Session.DoRequest("SoftLayer_Resource_Metadata", "getId", nil, &r.Options, &resp) + return +} + +// The getPrimaryBackendIpAddress method retrieves the primary backend IP address for the resource +func (r Resource_Metadata) GetPrimaryBackendIpAddress() (resp string, err error) { + err = r.Session.DoRequest("SoftLayer_Resource_Metadata", "getPrimaryBackendIpAddress", nil, &r.Options, &resp) + return +} + +// The getPrimaryIpAddress method retrieves the primary IP address for the resource. For resources with a frontend network, the frontend IP address will be returned. For resources that have been provisioned with only a backend network, the backend IP address will be returned, as a frontend address will not exist. +func (r Resource_Metadata) GetPrimaryIpAddress() (resp string, err error) { + err = r.Session.DoRequest("SoftLayer_Resource_Metadata", "getPrimaryIpAddress", nil, &r.Options, &resp) + return +} + +// The getProvisionState method retrieves the provision state of the resource. The provision state may be used to determine when it is considered safe to perform additional setup operations. The method returns 'PROCESSING' to indicate the provision is in progress and 'COMPLETE' when the provision is complete. +func (r Resource_Metadata) GetProvisionState() (resp string, err error) { + err = r.Session.DoRequest("SoftLayer_Resource_Metadata", "getProvisionState", nil, &r.Options, &resp) + return +} + +// The getRouter method will return the router associated with a network component. When the router is redundant, the hostname of the redundant group will be returned, rather than the router hostname. +func (r Resource_Metadata) GetRouter(macAddress *string) (resp string, err error) { + params := []interface{}{ + macAddress, + } + err = r.Session.DoRequest("SoftLayer_Resource_Metadata", "getRouter", params, &r.Options, &resp) + return +} + +// The getServiceResource method retrieves a specific service resource associated with the resource. Service resources are additional resources that may be used by this resource. +func (r Resource_Metadata) GetServiceResource(serviceName *string, index *int) (resp string, err error) { + params := []interface{}{ + serviceName, + index, + } + err = r.Session.DoRequest("SoftLayer_Resource_Metadata", "getServiceResource", params, &r.Options, &resp) + return +} + +// The getServiceResources method retrieves all service resources associated with the resource. Service resources are additional resources that may be used by this resource. The output format is =
    for each service resource. +func (r Resource_Metadata) GetServiceResources() (resp []datatypes.Container_Resource_Metadata_ServiceResource, err error) { + err = r.Session.DoRequest("SoftLayer_Resource_Metadata", "getServiceResources", nil, &r.Options, &resp) + return +} + +// The getTags method retrieves all tags associated with the resource. Tags are single keywords assigned to a resource that assist the user in identifying the resource and its roles when performing a simple search. Tags are assigned by any user with access to the resource. +func (r Resource_Metadata) GetTags() (resp []string, err error) { + err = r.Session.DoRequest("SoftLayer_Resource_Metadata", "getTags", nil, &r.Options, &resp) + return +} + +// The getUserMetadata method retrieves metadata completed by users who interact with the resource. Metadata gathered using this method is unique to parameters set using the '''setUserMetadata''' method, which must be executed prior to completing this method. User metadata may also be provided while placing an order for a resource. +func (r Resource_Metadata) GetUserMetadata() (resp string, err error) { + err = r.Session.DoRequest("SoftLayer_Resource_Metadata", "getUserMetadata", nil, &r.Options, &resp) + return +} + +// The getVlanIds method returns a list of VLAN IDs for the network component matching the provided MAC address associated with the resource. For each return, the native VLAN will appear first, followed by any trunked VLANs associated with the network component. +func (r Resource_Metadata) GetVlanIds(macAddress *string) (resp []int, err error) { + params := []interface{}{ + macAddress, + } + err = r.Session.DoRequest("SoftLayer_Resource_Metadata", "getVlanIds", params, &r.Options, &resp) + return +} + +// The getVlans method returns a list of VLAN numbers for the network component matching the provided MAC address associated with the resource. For each return, the native VLAN will appear first, followed by any trunked VLANs associated with the network component. +func (r Resource_Metadata) GetVlans(macAddress *string) (resp []int, err error) { + params := []interface{}{ + macAddress, + } + err = r.Session.DoRequest("SoftLayer_Resource_Metadata", "getVlans", params, &r.Options, &resp) + return +} diff --git a/vendor/github.com/softlayer/softlayer-go/services/sales.go b/vendor/github.com/softlayer/softlayer-go/services/sales.go new file mode 100644 index 0000000000..4a0e777baa --- /dev/null +++ b/vendor/github.com/softlayer/softlayer-go/services/sales.go @@ -0,0 +1,112 @@ +/** + * Copyright 2016 IBM Corp. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** + * AUTOMATICALLY GENERATED CODE - DO NOT MODIFY + */ + +package services + +import ( + "fmt" + "strings" + + "github.com/softlayer/softlayer-go/datatypes" + "github.com/softlayer/softlayer-go/session" + "github.com/softlayer/softlayer-go/sl" +) + +// The presale event data types indicate the information regarding an individual presale event. The '''locationId''' will indicate the datacenter associated with the presale event. The '''itemId''' will indicate the product item associated with a particular presale event - however these are more rare. The '''startDate''' and '''endDate''' will provide information regarding when the presale event is available for use. At the end of the presale event, the server or services purchased will be available once approved and provisioned. +type Sales_Presale_Event struct { + Session *session.Session + Options sl.Options +} + +// GetSalesPresaleEventService returns an instance of the Sales_Presale_Event SoftLayer service +func GetSalesPresaleEventService(sess *session.Session) Sales_Presale_Event { + return Sales_Presale_Event{Session: sess} +} + +func (r Sales_Presale_Event) Id(id int) Sales_Presale_Event { + r.Options.Id = &id + return r +} + +func (r Sales_Presale_Event) Mask(mask string) Sales_Presale_Event { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r Sales_Presale_Event) Filter(filter string) Sales_Presale_Event { + r.Options.Filter = filter + return r +} + +func (r Sales_Presale_Event) Limit(limit int) Sales_Presale_Event { + r.Options.Limit = &limit + return r +} + +func (r Sales_Presale_Event) Offset(offset int) Sales_Presale_Event { + r.Options.Offset = &offset + return r +} + +// Retrieve A flag to indicate that the presale event is currently active. A presale event is active if the current time is between the start and end dates. +func (r Sales_Presale_Event) GetActiveFlag() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Sales_Presale_Event", "getActiveFlag", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Sales_Presale_Event) GetAllObjects() (resp []datatypes.Sales_Presale_Event, err error) { + err = r.Session.DoRequest("SoftLayer_Sales_Presale_Event", "getAllObjects", nil, &r.Options, &resp) + return +} + +// Retrieve A flag to indicate that the presale event is expired. A presale event is expired if the current time is after the end date. +func (r Sales_Presale_Event) GetExpiredFlag() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Sales_Presale_Event", "getExpiredFlag", nil, &r.Options, &resp) + return +} + +// Retrieve The [[SoftLayer_Product_Item]] associated with the presale event. +func (r Sales_Presale_Event) GetItem() (resp datatypes.Product_Item, err error) { + err = r.Session.DoRequest("SoftLayer_Sales_Presale_Event", "getItem", nil, &r.Options, &resp) + return +} + +// Retrieve The [[SoftLayer_Location]] associated with the presale event. +func (r Sales_Presale_Event) GetLocation() (resp datatypes.Location, err error) { + err = r.Session.DoRequest("SoftLayer_Sales_Presale_Event", "getLocation", nil, &r.Options, &resp) + return +} + +// '''getObject''' retrieves the [[SoftLayer_Sales_Presale_Event]] object whose id number corresponds to the id number of the init parameter passed to the SoftLayer_Sales_Presale_Event service. Customers may only retrieve presale events that are currently active. +func (r Sales_Presale_Event) GetObject() (resp datatypes.Sales_Presale_Event, err error) { + err = r.Session.DoRequest("SoftLayer_Sales_Presale_Event", "getObject", nil, &r.Options, &resp) + return +} + +// Retrieve The orders ([[SoftLayer_Billing_Order]]) associated with this presale event that were created for the customer's account. +func (r Sales_Presale_Event) GetOrders() (resp []datatypes.Billing_Order, err error) { + err = r.Session.DoRequest("SoftLayer_Sales_Presale_Event", "getOrders", nil, &r.Options, &resp) + return +} diff --git a/vendor/github.com/softlayer/softlayer-go/services/scale.go b/vendor/github.com/softlayer/softlayer-go/services/scale.go new file mode 100644 index 0000000000..3c2b604618 --- /dev/null +++ b/vendor/github.com/softlayer/softlayer-go/services/scale.go @@ -0,0 +1,1664 @@ +/** + * Copyright 2016 IBM Corp. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** + * AUTOMATICALLY GENERATED CODE - DO NOT MODIFY + */ + +package services + +import ( + "fmt" + "strings" + + "github.com/softlayer/softlayer-go/datatypes" + "github.com/softlayer/softlayer-go/session" + "github.com/softlayer/softlayer-go/sl" +) + +// no documentation yet +type Scale_Asset struct { + Session *session.Session + Options sl.Options +} + +// GetScaleAssetService returns an instance of the Scale_Asset SoftLayer service +func GetScaleAssetService(sess *session.Session) Scale_Asset { + return Scale_Asset{Session: sess} +} + +func (r Scale_Asset) Id(id int) Scale_Asset { + r.Options.Id = &id + return r +} + +func (r Scale_Asset) Mask(mask string) Scale_Asset { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r Scale_Asset) Filter(filter string) Scale_Asset { + r.Options.Filter = filter + return r +} + +func (r Scale_Asset) Limit(limit int) Scale_Asset { + r.Options.Limit = &limit + return r +} + +func (r Scale_Asset) Offset(offset int) Scale_Asset { + r.Options.Offset = &offset + return r +} + +// no documentation yet +func (r Scale_Asset) DeleteObject() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Scale_Asset", "deleteObject", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Scale_Asset) GetObject() (resp datatypes.Scale_Asset, err error) { + err = r.Session.DoRequest("SoftLayer_Scale_Asset", "getObject", nil, &r.Options, &resp) + return +} + +// Retrieve The group this asset belongs to. +func (r Scale_Asset) GetScaleGroup() (resp datatypes.Scale_Group, err error) { + err = r.Session.DoRequest("SoftLayer_Scale_Asset", "getScaleGroup", nil, &r.Options, &resp) + return +} + +// no documentation yet +type Scale_Asset_Hardware struct { + Session *session.Session + Options sl.Options +} + +// GetScaleAssetHardwareService returns an instance of the Scale_Asset_Hardware SoftLayer service +func GetScaleAssetHardwareService(sess *session.Session) Scale_Asset_Hardware { + return Scale_Asset_Hardware{Session: sess} +} + +func (r Scale_Asset_Hardware) Id(id int) Scale_Asset_Hardware { + r.Options.Id = &id + return r +} + +func (r Scale_Asset_Hardware) Mask(mask string) Scale_Asset_Hardware { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r Scale_Asset_Hardware) Filter(filter string) Scale_Asset_Hardware { + r.Options.Filter = filter + return r +} + +func (r Scale_Asset_Hardware) Limit(limit int) Scale_Asset_Hardware { + r.Options.Limit = &limit + return r +} + +func (r Scale_Asset_Hardware) Offset(offset int) Scale_Asset_Hardware { + r.Options.Offset = &offset + return r +} + +// no documentation yet +func (r Scale_Asset_Hardware) CreateObject(templateObject *datatypes.Scale_Asset_Hardware) (resp datatypes.Scale_Asset_Hardware, err error) { + params := []interface{}{ + templateObject, + } + err = r.Session.DoRequest("SoftLayer_Scale_Asset_Hardware", "createObject", params, &r.Options, &resp) + return +} + +// no documentation yet +func (r Scale_Asset_Hardware) DeleteObject() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Scale_Asset_Hardware", "deleteObject", nil, &r.Options, &resp) + return +} + +// Retrieve The hardware for this asset. +func (r Scale_Asset_Hardware) GetHardware() (resp datatypes.Hardware, err error) { + err = r.Session.DoRequest("SoftLayer_Scale_Asset_Hardware", "getHardware", nil, &r.Options, &resp) + return +} + +// Retrieve The identifier of the hardware for this asset. +func (r Scale_Asset_Hardware) GetHardwareId() (resp int, err error) { + err = r.Session.DoRequest("SoftLayer_Scale_Asset_Hardware", "getHardwareId", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Scale_Asset_Hardware) GetObject() (resp datatypes.Scale_Asset_Hardware, err error) { + err = r.Session.DoRequest("SoftLayer_Scale_Asset_Hardware", "getObject", nil, &r.Options, &resp) + return +} + +// Retrieve The group this asset belongs to. +func (r Scale_Asset_Hardware) GetScaleGroup() (resp datatypes.Scale_Group, err error) { + err = r.Session.DoRequest("SoftLayer_Scale_Asset_Hardware", "getScaleGroup", nil, &r.Options, &resp) + return +} + +// no documentation yet +type Scale_Asset_Virtual_Guest struct { + Session *session.Session + Options sl.Options +} + +// GetScaleAssetVirtualGuestService returns an instance of the Scale_Asset_Virtual_Guest SoftLayer service +func GetScaleAssetVirtualGuestService(sess *session.Session) Scale_Asset_Virtual_Guest { + return Scale_Asset_Virtual_Guest{Session: sess} +} + +func (r Scale_Asset_Virtual_Guest) Id(id int) Scale_Asset_Virtual_Guest { + r.Options.Id = &id + return r +} + +func (r Scale_Asset_Virtual_Guest) Mask(mask string) Scale_Asset_Virtual_Guest { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r Scale_Asset_Virtual_Guest) Filter(filter string) Scale_Asset_Virtual_Guest { + r.Options.Filter = filter + return r +} + +func (r Scale_Asset_Virtual_Guest) Limit(limit int) Scale_Asset_Virtual_Guest { + r.Options.Limit = &limit + return r +} + +func (r Scale_Asset_Virtual_Guest) Offset(offset int) Scale_Asset_Virtual_Guest { + r.Options.Offset = &offset + return r +} + +// no documentation yet +func (r Scale_Asset_Virtual_Guest) CreateObject(templateObject *datatypes.Scale_Asset_Virtual_Guest) (resp datatypes.Scale_Asset_Virtual_Guest, err error) { + params := []interface{}{ + templateObject, + } + err = r.Session.DoRequest("SoftLayer_Scale_Asset_Virtual_Guest", "createObject", params, &r.Options, &resp) + return +} + +// no documentation yet +func (r Scale_Asset_Virtual_Guest) DeleteObject() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Scale_Asset_Virtual_Guest", "deleteObject", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Scale_Asset_Virtual_Guest) GetObject() (resp datatypes.Scale_Asset_Virtual_Guest, err error) { + err = r.Session.DoRequest("SoftLayer_Scale_Asset_Virtual_Guest", "getObject", nil, &r.Options, &resp) + return +} + +// Retrieve The group this asset belongs to. +func (r Scale_Asset_Virtual_Guest) GetScaleGroup() (resp datatypes.Scale_Group, err error) { + err = r.Session.DoRequest("SoftLayer_Scale_Asset_Virtual_Guest", "getScaleGroup", nil, &r.Options, &resp) + return +} + +// Retrieve The guest for this asset. +func (r Scale_Asset_Virtual_Guest) GetVirtualGuest() (resp datatypes.Virtual_Guest, err error) { + err = r.Session.DoRequest("SoftLayer_Scale_Asset_Virtual_Guest", "getVirtualGuest", nil, &r.Options, &resp) + return +} + +// Retrieve The identifier of the guest for this asset. +func (r Scale_Asset_Virtual_Guest) GetVirtualGuestId() (resp int, err error) { + err = r.Session.DoRequest("SoftLayer_Scale_Asset_Virtual_Guest", "getVirtualGuestId", nil, &r.Options, &resp) + return +} + +// no documentation yet +type Scale_Group struct { + Session *session.Session + Options sl.Options +} + +// GetScaleGroupService returns an instance of the Scale_Group SoftLayer service +func GetScaleGroupService(sess *session.Session) Scale_Group { + return Scale_Group{Session: sess} +} + +func (r Scale_Group) Id(id int) Scale_Group { + r.Options.Id = &id + return r +} + +func (r Scale_Group) Mask(mask string) Scale_Group { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r Scale_Group) Filter(filter string) Scale_Group { + r.Options.Filter = filter + return r +} + +func (r Scale_Group) Limit(limit int) Scale_Group { + r.Options.Limit = &limit + return r +} + +func (r Scale_Group) Offset(offset int) Scale_Group { + r.Options.Offset = &offset + return r +} + +// no documentation yet +func (r Scale_Group) CreateObject(templateObject *datatypes.Scale_Group) (resp datatypes.Scale_Group, err error) { + params := []interface{}{ + templateObject, + } + err = r.Session.DoRequest("SoftLayer_Scale_Group", "createObject", params, &r.Options, &resp) + return +} + +// no documentation yet +func (r Scale_Group) DeleteObject() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Scale_Group", "deleteObject", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Scale_Group) EditObject(templateObject *datatypes.Scale_Group) (resp bool, err error) { + params := []interface{}{ + templateObject, + } + err = r.Session.DoRequest("SoftLayer_Scale_Group", "editObject", params, &r.Options, &resp) + return +} + +// no documentation yet +func (r Scale_Group) ForceDeleteObject() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Scale_Group", "forceDeleteObject", nil, &r.Options, &resp) + return +} + +// Retrieve The account for this scaling group. +func (r Scale_Group) GetAccount() (resp datatypes.Account, err error) { + err = r.Session.DoRequest("SoftLayer_Scale_Group", "getAccount", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Scale_Group) GetAvailableHourlyInstanceLimit() (resp int, err error) { + err = r.Session.DoRequest("SoftLayer_Scale_Group", "getAvailableHourlyInstanceLimit", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Scale_Group) GetAvailableRegionalGroups() (resp []datatypes.Location_Group, err error) { + err = r.Session.DoRequest("SoftLayer_Scale_Group", "getAvailableRegionalGroups", nil, &r.Options, &resp) + return +} + +// Retrieve Collection of load balancers for this auto scale group. +func (r Scale_Group) GetLoadBalancers() (resp []datatypes.Scale_LoadBalancer, err error) { + err = r.Session.DoRequest("SoftLayer_Scale_Group", "getLoadBalancers", nil, &r.Options, &resp) + return +} + +// Retrieve Collection of log entries for this group. +func (r Scale_Group) GetLogs() (resp []datatypes.Scale_Group_Log, err error) { + err = r.Session.DoRequest("SoftLayer_Scale_Group", "getLogs", nil, &r.Options, &resp) + return +} + +// Retrieve Collection of VLANs for this auto scale group. VLANs are optional. This can contain a public or private VLAN or both. When a single VLAN for a public/private type is given it can be a non-purchased VLAN only if the minimumMemberCount on the group is >= 1. This can also contain any number of public/private purchased VLANs and members are staggered across them when scaled up. +func (r Scale_Group) GetNetworkVlans() (resp []datatypes.Scale_Network_Vlan, err error) { + err = r.Session.DoRequest("SoftLayer_Scale_Group", "getNetworkVlans", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Scale_Group) GetObject() (resp datatypes.Scale_Group, err error) { + err = r.Session.DoRequest("SoftLayer_Scale_Group", "getObject", nil, &r.Options, &resp) + return +} + +// Retrieve Collection of policies for this group. This can be empty. +func (r Scale_Group) GetPolicies() (resp []datatypes.Scale_Policy, err error) { + err = r.Session.DoRequest("SoftLayer_Scale_Group", "getPolicies", nil, &r.Options, &resp) + return +} + +// Retrieve The regional group for this scale group. +func (r Scale_Group) GetRegionalGroup() (resp datatypes.Location_Group_Regional, err error) { + err = r.Session.DoRequest("SoftLayer_Scale_Group", "getRegionalGroup", nil, &r.Options, &resp) + return +} + +// Retrieve The status for this scale group. +func (r Scale_Group) GetStatus() (resp datatypes.Scale_Group_Status, err error) { + err = r.Session.DoRequest("SoftLayer_Scale_Group", "getStatus", nil, &r.Options, &resp) + return +} + +// Retrieve The termination policy for this scaling group. +func (r Scale_Group) GetTerminationPolicy() (resp datatypes.Scale_Termination_Policy, err error) { + err = r.Session.DoRequest("SoftLayer_Scale_Group", "getTerminationPolicy", nil, &r.Options, &resp) + return +} + +// Retrieve Collection of guests that have been pinned to this group. Guest assets are only used for certain trigger checks such as resource watches. They do not count towards the auto scaling guest counts of this group in anyway and are never automatically added or removed. +func (r Scale_Group) GetVirtualGuestAssets() (resp []datatypes.Scale_Asset_Virtual_Guest, err error) { + err = r.Session.DoRequest("SoftLayer_Scale_Group", "getVirtualGuestAssets", nil, &r.Options, &resp) + return +} + +// Retrieve Collection of guests that have been scaled with the group. When this group is active, the count of guests here is guaranteed to be between minimumMemberCount and maximumMemberCount inclusively. +func (r Scale_Group) GetVirtualGuestMembers() (resp []datatypes.Scale_Member_Virtual_Guest, err error) { + err = r.Session.DoRequest("SoftLayer_Scale_Group", "getVirtualGuestMembers", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Scale_Group) Resume() (err error) { + var resp datatypes.Void + err = r.Session.DoRequest("SoftLayer_Scale_Group", "resume", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Scale_Group) Scale(delta *int) (resp []datatypes.Scale_Member, err error) { + params := []interface{}{ + delta, + } + err = r.Session.DoRequest("SoftLayer_Scale_Group", "scale", params, &r.Options, &resp) + return +} + +// no documentation yet +func (r Scale_Group) ScaleTo(number *int) (resp []datatypes.Scale_Member, err error) { + params := []interface{}{ + number, + } + err = r.Session.DoRequest("SoftLayer_Scale_Group", "scaleTo", params, &r.Options, &resp) + return +} + +// no documentation yet +func (r Scale_Group) Suspend() (err error) { + var resp datatypes.Void + err = r.Session.DoRequest("SoftLayer_Scale_Group", "suspend", nil, &r.Options, &resp) + return +} + +// no documentation yet +type Scale_Group_Status struct { + Session *session.Session + Options sl.Options +} + +// GetScaleGroupStatusService returns an instance of the Scale_Group_Status SoftLayer service +func GetScaleGroupStatusService(sess *session.Session) Scale_Group_Status { + return Scale_Group_Status{Session: sess} +} + +func (r Scale_Group_Status) Id(id int) Scale_Group_Status { + r.Options.Id = &id + return r +} + +func (r Scale_Group_Status) Mask(mask string) Scale_Group_Status { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r Scale_Group_Status) Filter(filter string) Scale_Group_Status { + r.Options.Filter = filter + return r +} + +func (r Scale_Group_Status) Limit(limit int) Scale_Group_Status { + r.Options.Limit = &limit + return r +} + +func (r Scale_Group_Status) Offset(offset int) Scale_Group_Status { + r.Options.Offset = &offset + return r +} + +// no documentation yet +func (r Scale_Group_Status) GetAllObjects() (resp []datatypes.Scale_Group_Status, err error) { + err = r.Session.DoRequest("SoftLayer_Scale_Group_Status", "getAllObjects", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Scale_Group_Status) GetObject() (resp datatypes.Scale_Group_Status, err error) { + err = r.Session.DoRequest("SoftLayer_Scale_Group_Status", "getObject", nil, &r.Options, &resp) + return +} + +// no documentation yet +type Scale_LoadBalancer struct { + Session *session.Session + Options sl.Options +} + +// GetScaleLoadBalancerService returns an instance of the Scale_LoadBalancer SoftLayer service +func GetScaleLoadBalancerService(sess *session.Session) Scale_LoadBalancer { + return Scale_LoadBalancer{Session: sess} +} + +func (r Scale_LoadBalancer) Id(id int) Scale_LoadBalancer { + r.Options.Id = &id + return r +} + +func (r Scale_LoadBalancer) Mask(mask string) Scale_LoadBalancer { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r Scale_LoadBalancer) Filter(filter string) Scale_LoadBalancer { + r.Options.Filter = filter + return r +} + +func (r Scale_LoadBalancer) Limit(limit int) Scale_LoadBalancer { + r.Options.Limit = &limit + return r +} + +func (r Scale_LoadBalancer) Offset(offset int) Scale_LoadBalancer { + r.Options.Offset = &offset + return r +} + +// no documentation yet +func (r Scale_LoadBalancer) CreateObject(templateObject *datatypes.Scale_LoadBalancer) (resp datatypes.Scale_LoadBalancer, err error) { + params := []interface{}{ + templateObject, + } + err = r.Session.DoRequest("SoftLayer_Scale_LoadBalancer", "createObject", params, &r.Options, &resp) + return +} + +// no documentation yet +func (r Scale_LoadBalancer) DeleteObject() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Scale_LoadBalancer", "deleteObject", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Scale_LoadBalancer) EditObject(templateObject *datatypes.Scale_LoadBalancer) (resp bool, err error) { + params := []interface{}{ + templateObject, + } + err = r.Session.DoRequest("SoftLayer_Scale_LoadBalancer", "editObject", params, &r.Options, &resp) + return +} + +// Retrieve The percentage of connections allocated to this virtual server. +func (r Scale_LoadBalancer) GetAllocationPercent() (resp int, err error) { + err = r.Session.DoRequest("SoftLayer_Scale_LoadBalancer", "getAllocationPercent", nil, &r.Options, &resp) + return +} + +// Retrieve The health check for this configuration. +func (r Scale_LoadBalancer) GetHealthCheck() (resp datatypes.Network_Application_Delivery_Controller_LoadBalancer_Health_Check, err error) { + err = r.Session.DoRequest("SoftLayer_Scale_LoadBalancer", "getHealthCheck", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Scale_LoadBalancer) GetObject() (resp datatypes.Scale_LoadBalancer, err error) { + err = r.Session.DoRequest("SoftLayer_Scale_LoadBalancer", "getObject", nil, &r.Options, &resp) + return +} + +// Retrieve The routing method. +func (r Scale_LoadBalancer) GetRoutingMethod() (resp datatypes.Network_Application_Delivery_Controller_LoadBalancer_Routing_Method, err error) { + err = r.Session.DoRequest("SoftLayer_Scale_LoadBalancer", "getRoutingMethod", nil, &r.Options, &resp) + return +} + +// Retrieve The routing type. +func (r Scale_LoadBalancer) GetRoutingType() (resp datatypes.Network_Application_Delivery_Controller_LoadBalancer_Routing_Type, err error) { + err = r.Session.DoRequest("SoftLayer_Scale_LoadBalancer", "getRoutingType", nil, &r.Options, &resp) + return +} + +// Retrieve The group this load balancer configuration is for. +func (r Scale_LoadBalancer) GetScaleGroup() (resp datatypes.Scale_Group, err error) { + err = r.Session.DoRequest("SoftLayer_Scale_LoadBalancer", "getScaleGroup", nil, &r.Options, &resp) + return +} + +// Retrieve The ID of the virtual IP address. +func (r Scale_LoadBalancer) GetVirtualIpAddressId() (resp int, err error) { + err = r.Session.DoRequest("SoftLayer_Scale_LoadBalancer", "getVirtualIpAddressId", nil, &r.Options, &resp) + return +} + +// Retrieve The virtual server for this configuration. +func (r Scale_LoadBalancer) GetVirtualServer() (resp datatypes.Network_Application_Delivery_Controller_LoadBalancer_VirtualServer, err error) { + err = r.Session.DoRequest("SoftLayer_Scale_LoadBalancer", "getVirtualServer", nil, &r.Options, &resp) + return +} + +// Retrieve The port on the virtual server. +func (r Scale_LoadBalancer) GetVirtualServerPort() (resp int, err error) { + err = r.Session.DoRequest("SoftLayer_Scale_LoadBalancer", "getVirtualServerPort", nil, &r.Options, &resp) + return +} + +// no documentation yet +type Scale_Member struct { + Session *session.Session + Options sl.Options +} + +// GetScaleMemberService returns an instance of the Scale_Member SoftLayer service +func GetScaleMemberService(sess *session.Session) Scale_Member { + return Scale_Member{Session: sess} +} + +func (r Scale_Member) Id(id int) Scale_Member { + r.Options.Id = &id + return r +} + +func (r Scale_Member) Mask(mask string) Scale_Member { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r Scale_Member) Filter(filter string) Scale_Member { + r.Options.Filter = filter + return r +} + +func (r Scale_Member) Limit(limit int) Scale_Member { + r.Options.Limit = &limit + return r +} + +func (r Scale_Member) Offset(offset int) Scale_Member { + r.Options.Offset = &offset + return r +} + +// no documentation yet +func (r Scale_Member) DeleteObject() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Scale_Member", "deleteObject", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Scale_Member) GetObject() (resp datatypes.Scale_Member, err error) { + err = r.Session.DoRequest("SoftLayer_Scale_Member", "getObject", nil, &r.Options, &resp) + return +} + +// Retrieve The group this member belongs to. +func (r Scale_Member) GetScaleGroup() (resp datatypes.Scale_Group, err error) { + err = r.Session.DoRequest("SoftLayer_Scale_Member", "getScaleGroup", nil, &r.Options, &resp) + return +} + +// no documentation yet +type Scale_Member_Virtual_Guest struct { + Session *session.Session + Options sl.Options +} + +// GetScaleMemberVirtualGuestService returns an instance of the Scale_Member_Virtual_Guest SoftLayer service +func GetScaleMemberVirtualGuestService(sess *session.Session) Scale_Member_Virtual_Guest { + return Scale_Member_Virtual_Guest{Session: sess} +} + +func (r Scale_Member_Virtual_Guest) Id(id int) Scale_Member_Virtual_Guest { + r.Options.Id = &id + return r +} + +func (r Scale_Member_Virtual_Guest) Mask(mask string) Scale_Member_Virtual_Guest { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r Scale_Member_Virtual_Guest) Filter(filter string) Scale_Member_Virtual_Guest { + r.Options.Filter = filter + return r +} + +func (r Scale_Member_Virtual_Guest) Limit(limit int) Scale_Member_Virtual_Guest { + r.Options.Limit = &limit + return r +} + +func (r Scale_Member_Virtual_Guest) Offset(offset int) Scale_Member_Virtual_Guest { + r.Options.Offset = &offset + return r +} + +// no documentation yet +func (r Scale_Member_Virtual_Guest) DeleteObject() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Scale_Member_Virtual_Guest", "deleteObject", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Scale_Member_Virtual_Guest) GetObject() (resp datatypes.Scale_Member_Virtual_Guest, err error) { + err = r.Session.DoRequest("SoftLayer_Scale_Member_Virtual_Guest", "getObject", nil, &r.Options, &resp) + return +} + +// Retrieve The group this member belongs to. +func (r Scale_Member_Virtual_Guest) GetScaleGroup() (resp datatypes.Scale_Group, err error) { + err = r.Session.DoRequest("SoftLayer_Scale_Member_Virtual_Guest", "getScaleGroup", nil, &r.Options, &resp) + return +} + +// Retrieve The guest for this member. +func (r Scale_Member_Virtual_Guest) GetVirtualGuest() (resp datatypes.Virtual_Guest, err error) { + err = r.Session.DoRequest("SoftLayer_Scale_Member_Virtual_Guest", "getVirtualGuest", nil, &r.Options, &resp) + return +} + +// Retrieve The identifier of the guest for this member. +func (r Scale_Member_Virtual_Guest) GetVirtualGuestId() (resp int, err error) { + err = r.Session.DoRequest("SoftLayer_Scale_Member_Virtual_Guest", "getVirtualGuestId", nil, &r.Options, &resp) + return +} + +// no documentation yet +type Scale_Network_Vlan struct { + Session *session.Session + Options sl.Options +} + +// GetScaleNetworkVlanService returns an instance of the Scale_Network_Vlan SoftLayer service +func GetScaleNetworkVlanService(sess *session.Session) Scale_Network_Vlan { + return Scale_Network_Vlan{Session: sess} +} + +func (r Scale_Network_Vlan) Id(id int) Scale_Network_Vlan { + r.Options.Id = &id + return r +} + +func (r Scale_Network_Vlan) Mask(mask string) Scale_Network_Vlan { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r Scale_Network_Vlan) Filter(filter string) Scale_Network_Vlan { + r.Options.Filter = filter + return r +} + +func (r Scale_Network_Vlan) Limit(limit int) Scale_Network_Vlan { + r.Options.Limit = &limit + return r +} + +func (r Scale_Network_Vlan) Offset(offset int) Scale_Network_Vlan { + r.Options.Offset = &offset + return r +} + +// no documentation yet +func (r Scale_Network_Vlan) CreateObject(templateObject *datatypes.Scale_Network_Vlan) (resp datatypes.Scale_Network_Vlan, err error) { + params := []interface{}{ + templateObject, + } + err = r.Session.DoRequest("SoftLayer_Scale_Network_Vlan", "createObject", params, &r.Options, &resp) + return +} + +// no documentation yet +func (r Scale_Network_Vlan) DeleteObject() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Scale_Network_Vlan", "deleteObject", nil, &r.Options, &resp) + return +} + +// Retrieve The network VLAN to scale with. +func (r Scale_Network_Vlan) GetNetworkVlan() (resp datatypes.Network_Vlan, err error) { + err = r.Session.DoRequest("SoftLayer_Scale_Network_Vlan", "getNetworkVlan", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Scale_Network_Vlan) GetObject() (resp datatypes.Scale_Network_Vlan, err error) { + err = r.Session.DoRequest("SoftLayer_Scale_Network_Vlan", "getObject", nil, &r.Options, &resp) + return +} + +// Retrieve The group this network VLAN is for. +func (r Scale_Network_Vlan) GetScaleGroup() (resp datatypes.Scale_Group, err error) { + err = r.Session.DoRequest("SoftLayer_Scale_Network_Vlan", "getScaleGroup", nil, &r.Options, &resp) + return +} + +// no documentation yet +type Scale_Policy struct { + Session *session.Session + Options sl.Options +} + +// GetScalePolicyService returns an instance of the Scale_Policy SoftLayer service +func GetScalePolicyService(sess *session.Session) Scale_Policy { + return Scale_Policy{Session: sess} +} + +func (r Scale_Policy) Id(id int) Scale_Policy { + r.Options.Id = &id + return r +} + +func (r Scale_Policy) Mask(mask string) Scale_Policy { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r Scale_Policy) Filter(filter string) Scale_Policy { + r.Options.Filter = filter + return r +} + +func (r Scale_Policy) Limit(limit int) Scale_Policy { + r.Options.Limit = &limit + return r +} + +func (r Scale_Policy) Offset(offset int) Scale_Policy { + r.Options.Offset = &offset + return r +} + +// no documentation yet +func (r Scale_Policy) CreateObject(templateObject *datatypes.Scale_Policy) (resp datatypes.Scale_Policy, err error) { + params := []interface{}{ + templateObject, + } + err = r.Session.DoRequest("SoftLayer_Scale_Policy", "createObject", params, &r.Options, &resp) + return +} + +// no documentation yet +func (r Scale_Policy) DeleteObject() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Scale_Policy", "deleteObject", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Scale_Policy) EditObject(templateObject *datatypes.Scale_Policy) (resp bool, err error) { + params := []interface{}{ + templateObject, + } + err = r.Session.DoRequest("SoftLayer_Scale_Policy", "editObject", params, &r.Options, &resp) + return +} + +// Retrieve The actions to perform upon any trigger hit. Currently this must be a single value. +func (r Scale_Policy) GetActions() (resp []datatypes.Scale_Policy_Action, err error) { + err = r.Session.DoRequest("SoftLayer_Scale_Policy", "getActions", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Scale_Policy) GetObject() (resp datatypes.Scale_Policy, err error) { + err = r.Session.DoRequest("SoftLayer_Scale_Policy", "getObject", nil, &r.Options, &resp) + return +} + +// Retrieve The one-time triggers to check for this group. +func (r Scale_Policy) GetOneTimeTriggers() (resp []datatypes.Scale_Policy_Trigger_OneTime, err error) { + err = r.Session.DoRequest("SoftLayer_Scale_Policy", "getOneTimeTriggers", nil, &r.Options, &resp) + return +} + +// Retrieve The repeating triggers to check for this group. +func (r Scale_Policy) GetRepeatingTriggers() (resp []datatypes.Scale_Policy_Trigger_Repeating, err error) { + err = r.Session.DoRequest("SoftLayer_Scale_Policy", "getRepeatingTriggers", nil, &r.Options, &resp) + return +} + +// Retrieve The resource-use triggers to check for this group. +func (r Scale_Policy) GetResourceUseTriggers() (resp []datatypes.Scale_Policy_Trigger_ResourceUse, err error) { + err = r.Session.DoRequest("SoftLayer_Scale_Policy", "getResourceUseTriggers", nil, &r.Options, &resp) + return +} + +// Retrieve The scale actions to perform upon any trigger hit. Currently this must be a single value. +func (r Scale_Policy) GetScaleActions() (resp []datatypes.Scale_Policy_Action_Scale, err error) { + err = r.Session.DoRequest("SoftLayer_Scale_Policy", "getScaleActions", nil, &r.Options, &resp) + return +} + +// Retrieve The group this policy is on. +func (r Scale_Policy) GetScaleGroup() (resp datatypes.Scale_Group, err error) { + err = r.Session.DoRequest("SoftLayer_Scale_Policy", "getScaleGroup", nil, &r.Options, &resp) + return +} + +// Retrieve The triggers to check for this group. +func (r Scale_Policy) GetTriggers() (resp []datatypes.Scale_Policy_Trigger, err error) { + err = r.Session.DoRequest("SoftLayer_Scale_Policy", "getTriggers", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Scale_Policy) Trigger() (resp []datatypes.Scale_Member, err error) { + err = r.Session.DoRequest("SoftLayer_Scale_Policy", "trigger", nil, &r.Options, &resp) + return +} + +// no documentation yet +type Scale_Policy_Action struct { + Session *session.Session + Options sl.Options +} + +// GetScalePolicyActionService returns an instance of the Scale_Policy_Action SoftLayer service +func GetScalePolicyActionService(sess *session.Session) Scale_Policy_Action { + return Scale_Policy_Action{Session: sess} +} + +func (r Scale_Policy_Action) Id(id int) Scale_Policy_Action { + r.Options.Id = &id + return r +} + +func (r Scale_Policy_Action) Mask(mask string) Scale_Policy_Action { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r Scale_Policy_Action) Filter(filter string) Scale_Policy_Action { + r.Options.Filter = filter + return r +} + +func (r Scale_Policy_Action) Limit(limit int) Scale_Policy_Action { + r.Options.Limit = &limit + return r +} + +func (r Scale_Policy_Action) Offset(offset int) Scale_Policy_Action { + r.Options.Offset = &offset + return r +} + +// no documentation yet +func (r Scale_Policy_Action) DeleteObject() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Scale_Policy_Action", "deleteObject", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Scale_Policy_Action) EditObject(templateObject *datatypes.Scale_Policy_Action) (resp bool, err error) { + params := []interface{}{ + templateObject, + } + err = r.Session.DoRequest("SoftLayer_Scale_Policy_Action", "editObject", params, &r.Options, &resp) + return +} + +// no documentation yet +func (r Scale_Policy_Action) GetObject() (resp datatypes.Scale_Policy_Action, err error) { + err = r.Session.DoRequest("SoftLayer_Scale_Policy_Action", "getObject", nil, &r.Options, &resp) + return +} + +// Retrieve The policy this action is on. +func (r Scale_Policy_Action) GetScalePolicy() (resp datatypes.Scale_Policy, err error) { + err = r.Session.DoRequest("SoftLayer_Scale_Policy_Action", "getScalePolicy", nil, &r.Options, &resp) + return +} + +// Retrieve The type of action. +func (r Scale_Policy_Action) GetType() (resp datatypes.Scale_Policy_Action_Type, err error) { + err = r.Session.DoRequest("SoftLayer_Scale_Policy_Action", "getType", nil, &r.Options, &resp) + return +} + +// no documentation yet +type Scale_Policy_Action_Scale struct { + Session *session.Session + Options sl.Options +} + +// GetScalePolicyActionScaleService returns an instance of the Scale_Policy_Action_Scale SoftLayer service +func GetScalePolicyActionScaleService(sess *session.Session) Scale_Policy_Action_Scale { + return Scale_Policy_Action_Scale{Session: sess} +} + +func (r Scale_Policy_Action_Scale) Id(id int) Scale_Policy_Action_Scale { + r.Options.Id = &id + return r +} + +func (r Scale_Policy_Action_Scale) Mask(mask string) Scale_Policy_Action_Scale { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r Scale_Policy_Action_Scale) Filter(filter string) Scale_Policy_Action_Scale { + r.Options.Filter = filter + return r +} + +func (r Scale_Policy_Action_Scale) Limit(limit int) Scale_Policy_Action_Scale { + r.Options.Limit = &limit + return r +} + +func (r Scale_Policy_Action_Scale) Offset(offset int) Scale_Policy_Action_Scale { + r.Options.Offset = &offset + return r +} + +// no documentation yet +func (r Scale_Policy_Action_Scale) CreateObject(templateObject *datatypes.Scale_Policy_Action_Scale) (resp datatypes.Scale_Policy_Action_Scale, err error) { + params := []interface{}{ + templateObject, + } + err = r.Session.DoRequest("SoftLayer_Scale_Policy_Action_Scale", "createObject", params, &r.Options, &resp) + return +} + +// no documentation yet +func (r Scale_Policy_Action_Scale) DeleteObject() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Scale_Policy_Action_Scale", "deleteObject", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Scale_Policy_Action_Scale) EditObject(templateObject *datatypes.Scale_Policy_Action) (resp bool, err error) { + params := []interface{}{ + templateObject, + } + err = r.Session.DoRequest("SoftLayer_Scale_Policy_Action_Scale", "editObject", params, &r.Options, &resp) + return +} + +// no documentation yet +func (r Scale_Policy_Action_Scale) GetObject() (resp datatypes.Scale_Policy_Action_Scale, err error) { + err = r.Session.DoRequest("SoftLayer_Scale_Policy_Action_Scale", "getObject", nil, &r.Options, &resp) + return +} + +// Retrieve The policy this action is on. +func (r Scale_Policy_Action_Scale) GetScalePolicy() (resp datatypes.Scale_Policy, err error) { + err = r.Session.DoRequest("SoftLayer_Scale_Policy_Action_Scale", "getScalePolicy", nil, &r.Options, &resp) + return +} + +// Retrieve The type of action. +func (r Scale_Policy_Action_Scale) GetType() (resp datatypes.Scale_Policy_Action_Type, err error) { + err = r.Session.DoRequest("SoftLayer_Scale_Policy_Action_Scale", "getType", nil, &r.Options, &resp) + return +} + +// no documentation yet +type Scale_Policy_Action_Type struct { + Session *session.Session + Options sl.Options +} + +// GetScalePolicyActionTypeService returns an instance of the Scale_Policy_Action_Type SoftLayer service +func GetScalePolicyActionTypeService(sess *session.Session) Scale_Policy_Action_Type { + return Scale_Policy_Action_Type{Session: sess} +} + +func (r Scale_Policy_Action_Type) Id(id int) Scale_Policy_Action_Type { + r.Options.Id = &id + return r +} + +func (r Scale_Policy_Action_Type) Mask(mask string) Scale_Policy_Action_Type { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r Scale_Policy_Action_Type) Filter(filter string) Scale_Policy_Action_Type { + r.Options.Filter = filter + return r +} + +func (r Scale_Policy_Action_Type) Limit(limit int) Scale_Policy_Action_Type { + r.Options.Limit = &limit + return r +} + +func (r Scale_Policy_Action_Type) Offset(offset int) Scale_Policy_Action_Type { + r.Options.Offset = &offset + return r +} + +// no documentation yet +func (r Scale_Policy_Action_Type) GetAllObjects() (resp []datatypes.Scale_Policy_Action_Type, err error) { + err = r.Session.DoRequest("SoftLayer_Scale_Policy_Action_Type", "getAllObjects", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Scale_Policy_Action_Type) GetObject() (resp datatypes.Scale_Policy_Action_Type, err error) { + err = r.Session.DoRequest("SoftLayer_Scale_Policy_Action_Type", "getObject", nil, &r.Options, &resp) + return +} + +// no documentation yet +type Scale_Policy_Trigger struct { + Session *session.Session + Options sl.Options +} + +// GetScalePolicyTriggerService returns an instance of the Scale_Policy_Trigger SoftLayer service +func GetScalePolicyTriggerService(sess *session.Session) Scale_Policy_Trigger { + return Scale_Policy_Trigger{Session: sess} +} + +func (r Scale_Policy_Trigger) Id(id int) Scale_Policy_Trigger { + r.Options.Id = &id + return r +} + +func (r Scale_Policy_Trigger) Mask(mask string) Scale_Policy_Trigger { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r Scale_Policy_Trigger) Filter(filter string) Scale_Policy_Trigger { + r.Options.Filter = filter + return r +} + +func (r Scale_Policy_Trigger) Limit(limit int) Scale_Policy_Trigger { + r.Options.Limit = &limit + return r +} + +func (r Scale_Policy_Trigger) Offset(offset int) Scale_Policy_Trigger { + r.Options.Offset = &offset + return r +} + +// no documentation yet +func (r Scale_Policy_Trigger) CreateObject(templateObject *datatypes.Scale_Policy_Trigger) (resp datatypes.Scale_Policy_Trigger, err error) { + params := []interface{}{ + templateObject, + } + err = r.Session.DoRequest("SoftLayer_Scale_Policy_Trigger", "createObject", params, &r.Options, &resp) + return +} + +// no documentation yet +func (r Scale_Policy_Trigger) DeleteObject() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Scale_Policy_Trigger", "deleteObject", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Scale_Policy_Trigger) EditObject(templateObject *datatypes.Scale_Policy_Trigger) (resp bool, err error) { + params := []interface{}{ + templateObject, + } + err = r.Session.DoRequest("SoftLayer_Scale_Policy_Trigger", "editObject", params, &r.Options, &resp) + return +} + +// no documentation yet +func (r Scale_Policy_Trigger) GetObject() (resp datatypes.Scale_Policy_Trigger, err error) { + err = r.Session.DoRequest("SoftLayer_Scale_Policy_Trigger", "getObject", nil, &r.Options, &resp) + return +} + +// Retrieve The policy this trigger is on. +func (r Scale_Policy_Trigger) GetScalePolicy() (resp datatypes.Scale_Policy, err error) { + err = r.Session.DoRequest("SoftLayer_Scale_Policy_Trigger", "getScalePolicy", nil, &r.Options, &resp) + return +} + +// Retrieve The type of trigger. +func (r Scale_Policy_Trigger) GetType() (resp datatypes.Scale_Policy_Trigger_Type, err error) { + err = r.Session.DoRequest("SoftLayer_Scale_Policy_Trigger", "getType", nil, &r.Options, &resp) + return +} + +// no documentation yet +type Scale_Policy_Trigger_OneTime struct { + Session *session.Session + Options sl.Options +} + +// GetScalePolicyTriggerOneTimeService returns an instance of the Scale_Policy_Trigger_OneTime SoftLayer service +func GetScalePolicyTriggerOneTimeService(sess *session.Session) Scale_Policy_Trigger_OneTime { + return Scale_Policy_Trigger_OneTime{Session: sess} +} + +func (r Scale_Policy_Trigger_OneTime) Id(id int) Scale_Policy_Trigger_OneTime { + r.Options.Id = &id + return r +} + +func (r Scale_Policy_Trigger_OneTime) Mask(mask string) Scale_Policy_Trigger_OneTime { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r Scale_Policy_Trigger_OneTime) Filter(filter string) Scale_Policy_Trigger_OneTime { + r.Options.Filter = filter + return r +} + +func (r Scale_Policy_Trigger_OneTime) Limit(limit int) Scale_Policy_Trigger_OneTime { + r.Options.Limit = &limit + return r +} + +func (r Scale_Policy_Trigger_OneTime) Offset(offset int) Scale_Policy_Trigger_OneTime { + r.Options.Offset = &offset + return r +} + +// no documentation yet +func (r Scale_Policy_Trigger_OneTime) CreateObject(templateObject *datatypes.Scale_Policy_Trigger_OneTime) (resp datatypes.Scale_Policy_Trigger_OneTime, err error) { + params := []interface{}{ + templateObject, + } + err = r.Session.DoRequest("SoftLayer_Scale_Policy_Trigger_OneTime", "createObject", params, &r.Options, &resp) + return +} + +// no documentation yet +func (r Scale_Policy_Trigger_OneTime) DeleteObject() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Scale_Policy_Trigger_OneTime", "deleteObject", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Scale_Policy_Trigger_OneTime) EditObject(templateObject *datatypes.Scale_Policy_Trigger) (resp bool, err error) { + params := []interface{}{ + templateObject, + } + err = r.Session.DoRequest("SoftLayer_Scale_Policy_Trigger_OneTime", "editObject", params, &r.Options, &resp) + return +} + +// no documentation yet +func (r Scale_Policy_Trigger_OneTime) GetObject() (resp datatypes.Scale_Policy_Trigger_OneTime, err error) { + err = r.Session.DoRequest("SoftLayer_Scale_Policy_Trigger_OneTime", "getObject", nil, &r.Options, &resp) + return +} + +// Retrieve The policy this trigger is on. +func (r Scale_Policy_Trigger_OneTime) GetScalePolicy() (resp datatypes.Scale_Policy, err error) { + err = r.Session.DoRequest("SoftLayer_Scale_Policy_Trigger_OneTime", "getScalePolicy", nil, &r.Options, &resp) + return +} + +// Retrieve The type of trigger. +func (r Scale_Policy_Trigger_OneTime) GetType() (resp datatypes.Scale_Policy_Trigger_Type, err error) { + err = r.Session.DoRequest("SoftLayer_Scale_Policy_Trigger_OneTime", "getType", nil, &r.Options, &resp) + return +} + +// no documentation yet +type Scale_Policy_Trigger_Repeating struct { + Session *session.Session + Options sl.Options +} + +// GetScalePolicyTriggerRepeatingService returns an instance of the Scale_Policy_Trigger_Repeating SoftLayer service +func GetScalePolicyTriggerRepeatingService(sess *session.Session) Scale_Policy_Trigger_Repeating { + return Scale_Policy_Trigger_Repeating{Session: sess} +} + +func (r Scale_Policy_Trigger_Repeating) Id(id int) Scale_Policy_Trigger_Repeating { + r.Options.Id = &id + return r +} + +func (r Scale_Policy_Trigger_Repeating) Mask(mask string) Scale_Policy_Trigger_Repeating { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r Scale_Policy_Trigger_Repeating) Filter(filter string) Scale_Policy_Trigger_Repeating { + r.Options.Filter = filter + return r +} + +func (r Scale_Policy_Trigger_Repeating) Limit(limit int) Scale_Policy_Trigger_Repeating { + r.Options.Limit = &limit + return r +} + +func (r Scale_Policy_Trigger_Repeating) Offset(offset int) Scale_Policy_Trigger_Repeating { + r.Options.Offset = &offset + return r +} + +// no documentation yet +func (r Scale_Policy_Trigger_Repeating) CreateObject(templateObject *datatypes.Scale_Policy_Trigger_Repeating) (resp datatypes.Scale_Policy_Trigger_Repeating, err error) { + params := []interface{}{ + templateObject, + } + err = r.Session.DoRequest("SoftLayer_Scale_Policy_Trigger_Repeating", "createObject", params, &r.Options, &resp) + return +} + +// no documentation yet +func (r Scale_Policy_Trigger_Repeating) DeleteObject() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Scale_Policy_Trigger_Repeating", "deleteObject", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Scale_Policy_Trigger_Repeating) EditObject(templateObject *datatypes.Scale_Policy_Trigger) (resp bool, err error) { + params := []interface{}{ + templateObject, + } + err = r.Session.DoRequest("SoftLayer_Scale_Policy_Trigger_Repeating", "editObject", params, &r.Options, &resp) + return +} + +// no documentation yet +func (r Scale_Policy_Trigger_Repeating) GetObject() (resp datatypes.Scale_Policy_Trigger_Repeating, err error) { + err = r.Session.DoRequest("SoftLayer_Scale_Policy_Trigger_Repeating", "getObject", nil, &r.Options, &resp) + return +} + +// Retrieve The policy this trigger is on. +func (r Scale_Policy_Trigger_Repeating) GetScalePolicy() (resp datatypes.Scale_Policy, err error) { + err = r.Session.DoRequest("SoftLayer_Scale_Policy_Trigger_Repeating", "getScalePolicy", nil, &r.Options, &resp) + return +} + +// Retrieve The type of trigger. +func (r Scale_Policy_Trigger_Repeating) GetType() (resp datatypes.Scale_Policy_Trigger_Type, err error) { + err = r.Session.DoRequest("SoftLayer_Scale_Policy_Trigger_Repeating", "getType", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Scale_Policy_Trigger_Repeating) ValidateCronExpression(expression *string) (err error) { + var resp datatypes.Void + params := []interface{}{ + expression, + } + err = r.Session.DoRequest("SoftLayer_Scale_Policy_Trigger_Repeating", "validateCronExpression", params, &r.Options, &resp) + return +} + +// no documentation yet +type Scale_Policy_Trigger_ResourceUse struct { + Session *session.Session + Options sl.Options +} + +// GetScalePolicyTriggerResourceUseService returns an instance of the Scale_Policy_Trigger_ResourceUse SoftLayer service +func GetScalePolicyTriggerResourceUseService(sess *session.Session) Scale_Policy_Trigger_ResourceUse { + return Scale_Policy_Trigger_ResourceUse{Session: sess} +} + +func (r Scale_Policy_Trigger_ResourceUse) Id(id int) Scale_Policy_Trigger_ResourceUse { + r.Options.Id = &id + return r +} + +func (r Scale_Policy_Trigger_ResourceUse) Mask(mask string) Scale_Policy_Trigger_ResourceUse { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r Scale_Policy_Trigger_ResourceUse) Filter(filter string) Scale_Policy_Trigger_ResourceUse { + r.Options.Filter = filter + return r +} + +func (r Scale_Policy_Trigger_ResourceUse) Limit(limit int) Scale_Policy_Trigger_ResourceUse { + r.Options.Limit = &limit + return r +} + +func (r Scale_Policy_Trigger_ResourceUse) Offset(offset int) Scale_Policy_Trigger_ResourceUse { + r.Options.Offset = &offset + return r +} + +// no documentation yet +func (r Scale_Policy_Trigger_ResourceUse) CreateObject(templateObject *datatypes.Scale_Policy_Trigger_ResourceUse) (resp datatypes.Scale_Policy_Trigger_ResourceUse, err error) { + params := []interface{}{ + templateObject, + } + err = r.Session.DoRequest("SoftLayer_Scale_Policy_Trigger_ResourceUse", "createObject", params, &r.Options, &resp) + return +} + +// no documentation yet +func (r Scale_Policy_Trigger_ResourceUse) DeleteObject() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Scale_Policy_Trigger_ResourceUse", "deleteObject", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Scale_Policy_Trigger_ResourceUse) EditObject(templateObject *datatypes.Scale_Policy_Trigger) (resp bool, err error) { + params := []interface{}{ + templateObject, + } + err = r.Session.DoRequest("SoftLayer_Scale_Policy_Trigger_ResourceUse", "editObject", params, &r.Options, &resp) + return +} + +// no documentation yet +func (r Scale_Policy_Trigger_ResourceUse) GetObject() (resp datatypes.Scale_Policy_Trigger_ResourceUse, err error) { + err = r.Session.DoRequest("SoftLayer_Scale_Policy_Trigger_ResourceUse", "getObject", nil, &r.Options, &resp) + return +} + +// Retrieve The policy this trigger is on. +func (r Scale_Policy_Trigger_ResourceUse) GetScalePolicy() (resp datatypes.Scale_Policy, err error) { + err = r.Session.DoRequest("SoftLayer_Scale_Policy_Trigger_ResourceUse", "getScalePolicy", nil, &r.Options, &resp) + return +} + +// Retrieve The type of trigger. +func (r Scale_Policy_Trigger_ResourceUse) GetType() (resp datatypes.Scale_Policy_Trigger_Type, err error) { + err = r.Session.DoRequest("SoftLayer_Scale_Policy_Trigger_ResourceUse", "getType", nil, &r.Options, &resp) + return +} + +// Retrieve The resource watches for this trigger. +func (r Scale_Policy_Trigger_ResourceUse) GetWatches() (resp []datatypes.Scale_Policy_Trigger_ResourceUse_Watch, err error) { + err = r.Session.DoRequest("SoftLayer_Scale_Policy_Trigger_ResourceUse", "getWatches", nil, &r.Options, &resp) + return +} + +// no documentation yet +type Scale_Policy_Trigger_ResourceUse_Watch struct { + Session *session.Session + Options sl.Options +} + +// GetScalePolicyTriggerResourceUseWatchService returns an instance of the Scale_Policy_Trigger_ResourceUse_Watch SoftLayer service +func GetScalePolicyTriggerResourceUseWatchService(sess *session.Session) Scale_Policy_Trigger_ResourceUse_Watch { + return Scale_Policy_Trigger_ResourceUse_Watch{Session: sess} +} + +func (r Scale_Policy_Trigger_ResourceUse_Watch) Id(id int) Scale_Policy_Trigger_ResourceUse_Watch { + r.Options.Id = &id + return r +} + +func (r Scale_Policy_Trigger_ResourceUse_Watch) Mask(mask string) Scale_Policy_Trigger_ResourceUse_Watch { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r Scale_Policy_Trigger_ResourceUse_Watch) Filter(filter string) Scale_Policy_Trigger_ResourceUse_Watch { + r.Options.Filter = filter + return r +} + +func (r Scale_Policy_Trigger_ResourceUse_Watch) Limit(limit int) Scale_Policy_Trigger_ResourceUse_Watch { + r.Options.Limit = &limit + return r +} + +func (r Scale_Policy_Trigger_ResourceUse_Watch) Offset(offset int) Scale_Policy_Trigger_ResourceUse_Watch { + r.Options.Offset = &offset + return r +} + +// no documentation yet +func (r Scale_Policy_Trigger_ResourceUse_Watch) CreateObject(templateObject *datatypes.Scale_Policy_Trigger_ResourceUse_Watch) (resp datatypes.Scale_Policy_Trigger_ResourceUse_Watch, err error) { + params := []interface{}{ + templateObject, + } + err = r.Session.DoRequest("SoftLayer_Scale_Policy_Trigger_ResourceUse_Watch", "createObject", params, &r.Options, &resp) + return +} + +// no documentation yet +func (r Scale_Policy_Trigger_ResourceUse_Watch) DeleteObject() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Scale_Policy_Trigger_ResourceUse_Watch", "deleteObject", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Scale_Policy_Trigger_ResourceUse_Watch) EditObject(templateObject *datatypes.Scale_Policy_Trigger_ResourceUse_Watch) (resp bool, err error) { + params := []interface{}{ + templateObject, + } + err = r.Session.DoRequest("SoftLayer_Scale_Policy_Trigger_ResourceUse_Watch", "editObject", params, &r.Options, &resp) + return +} + +// no documentation yet +func (r Scale_Policy_Trigger_ResourceUse_Watch) GetAllPossibleAlgorithms() (resp []string, err error) { + err = r.Session.DoRequest("SoftLayer_Scale_Policy_Trigger_ResourceUse_Watch", "getAllPossibleAlgorithms", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Scale_Policy_Trigger_ResourceUse_Watch) GetAllPossibleMetrics() (resp []string, err error) { + err = r.Session.DoRequest("SoftLayer_Scale_Policy_Trigger_ResourceUse_Watch", "getAllPossibleMetrics", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Scale_Policy_Trigger_ResourceUse_Watch) GetAllPossibleOperators() (resp []string, err error) { + err = r.Session.DoRequest("SoftLayer_Scale_Policy_Trigger_ResourceUse_Watch", "getAllPossibleOperators", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Scale_Policy_Trigger_ResourceUse_Watch) GetObject() (resp datatypes.Scale_Policy_Trigger_ResourceUse_Watch, err error) { + err = r.Session.DoRequest("SoftLayer_Scale_Policy_Trigger_ResourceUse_Watch", "getObject", nil, &r.Options, &resp) + return +} + +// Retrieve The trigger this watch is on. +func (r Scale_Policy_Trigger_ResourceUse_Watch) GetScalePolicyTrigger() (resp datatypes.Scale_Policy_Trigger_ResourceUse, err error) { + err = r.Session.DoRequest("SoftLayer_Scale_Policy_Trigger_ResourceUse_Watch", "getScalePolicyTrigger", nil, &r.Options, &resp) + return +} + +// no documentation yet +type Scale_Policy_Trigger_Type struct { + Session *session.Session + Options sl.Options +} + +// GetScalePolicyTriggerTypeService returns an instance of the Scale_Policy_Trigger_Type SoftLayer service +func GetScalePolicyTriggerTypeService(sess *session.Session) Scale_Policy_Trigger_Type { + return Scale_Policy_Trigger_Type{Session: sess} +} + +func (r Scale_Policy_Trigger_Type) Id(id int) Scale_Policy_Trigger_Type { + r.Options.Id = &id + return r +} + +func (r Scale_Policy_Trigger_Type) Mask(mask string) Scale_Policy_Trigger_Type { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r Scale_Policy_Trigger_Type) Filter(filter string) Scale_Policy_Trigger_Type { + r.Options.Filter = filter + return r +} + +func (r Scale_Policy_Trigger_Type) Limit(limit int) Scale_Policy_Trigger_Type { + r.Options.Limit = &limit + return r +} + +func (r Scale_Policy_Trigger_Type) Offset(offset int) Scale_Policy_Trigger_Type { + r.Options.Offset = &offset + return r +} + +// no documentation yet +func (r Scale_Policy_Trigger_Type) GetAllObjects() (resp []datatypes.Scale_Policy_Trigger_Type, err error) { + err = r.Session.DoRequest("SoftLayer_Scale_Policy_Trigger_Type", "getAllObjects", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Scale_Policy_Trigger_Type) GetObject() (resp datatypes.Scale_Policy_Trigger_Type, err error) { + err = r.Session.DoRequest("SoftLayer_Scale_Policy_Trigger_Type", "getObject", nil, &r.Options, &resp) + return +} + +// no documentation yet +type Scale_Termination_Policy struct { + Session *session.Session + Options sl.Options +} + +// GetScaleTerminationPolicyService returns an instance of the Scale_Termination_Policy SoftLayer service +func GetScaleTerminationPolicyService(sess *session.Session) Scale_Termination_Policy { + return Scale_Termination_Policy{Session: sess} +} + +func (r Scale_Termination_Policy) Id(id int) Scale_Termination_Policy { + r.Options.Id = &id + return r +} + +func (r Scale_Termination_Policy) Mask(mask string) Scale_Termination_Policy { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r Scale_Termination_Policy) Filter(filter string) Scale_Termination_Policy { + r.Options.Filter = filter + return r +} + +func (r Scale_Termination_Policy) Limit(limit int) Scale_Termination_Policy { + r.Options.Limit = &limit + return r +} + +func (r Scale_Termination_Policy) Offset(offset int) Scale_Termination_Policy { + r.Options.Offset = &offset + return r +} + +// no documentation yet +func (r Scale_Termination_Policy) GetAllObjects() (resp []datatypes.Scale_Termination_Policy, err error) { + err = r.Session.DoRequest("SoftLayer_Scale_Termination_Policy", "getAllObjects", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Scale_Termination_Policy) GetObject() (resp datatypes.Scale_Termination_Policy, err error) { + err = r.Session.DoRequest("SoftLayer_Scale_Termination_Policy", "getObject", nil, &r.Options, &resp) + return +} diff --git a/vendor/github.com/softlayer/softlayer-go/services/search.go b/vendor/github.com/softlayer/softlayer-go/services/search.go new file mode 100644 index 0000000000..5720699901 --- /dev/null +++ b/vendor/github.com/softlayer/softlayer-go/services/search.go @@ -0,0 +1,122 @@ +/** + * Copyright 2016 IBM Corp. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** + * AUTOMATICALLY GENERATED CODE - DO NOT MODIFY + */ + +package services + +import ( + "fmt" + "strings" + + "github.com/softlayer/softlayer-go/datatypes" + "github.com/softlayer/softlayer-go/session" + "github.com/softlayer/softlayer-go/sl" +) + +// no documentation yet +type Search struct { + Session *session.Session + Options sl.Options +} + +// GetSearchService returns an instance of the Search SoftLayer service +func GetSearchService(sess *session.Session) Search { + return Search{Session: sess} +} + +func (r Search) Id(id int) Search { + r.Options.Id = &id + return r +} + +func (r Search) Mask(mask string) Search { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r Search) Filter(filter string) Search { + r.Options.Filter = filter + return r +} + +func (r Search) Limit(limit int) Search { + r.Options.Limit = &limit + return r +} + +func (r Search) Offset(offset int) Search { + r.Options.Offset = &offset + return r +} + +// This method allows for searching for SoftLayer resources by simple terms and operators. Fields that are used for searching will be available at sldn.softlayer.com. It returns a collection or array of [[SoftLayer_Container_Search_Result (type)|SoftLayer_Container_Search_Result]] objects that have search metadata for each result and the resulting resource found. +// +// The advancedSearch() method recognizes the special _objectType: quantifier in search strings. See the documentation for the [[SoftLayer_Search/search|search()]] method on how to restrict searches using object types. +// +// The advancedSearch() method recognizes [[SoftLayer_Container_Search_ObjectType_Property (type)|object properties]], which can also be used to limit searches. Example: +// +// _objectType:Type_1 propertyA:value +// +// A search string can specify multiple properties, separated with spaces. Example: +// +// _objectType:Type_1 propertyA:value propertyB:value +// +// A collection of available object types and their properties can be retrieved by calling the [[SoftLayer_Search/getObjectTypes|getObjectTypes()]] method. +func (r Search) AdvancedSearch(searchString *string) (resp []datatypes.Container_Search_Result, err error) { + params := []interface{}{ + searchString, + } + err = r.Session.DoRequest("SoftLayer_Search", "advancedSearch", params, &r.Options, &resp) + return +} + +// This method returns a collection of [[SoftLayer_Container_Search_ObjectType (type)|SoftLayer_Container_Search_ObjectType]] containers that specify which indexed object types and properties are exposed for the current user. These object types can be used to discover searchable data and to create or validate object index search strings. +// +//

    Refer to the [[SoftLayer_Search/search|search()]] and [[SoftLayer_Search/advancedSearch|advancedSearch()]] methods for information on using object types and properties in search strings. +func (r Search) GetObjectTypes() (resp []datatypes.Container_Search_ObjectType, err error) { + err = r.Session.DoRequest("SoftLayer_Search", "getObjectTypes", nil, &r.Options, &resp) + return +} + +// This method allows for searching for SoftLayer resources by simple phrase. It returns a collection or array of [[SoftLayer_Container_Search_Result (type)|SoftLayer_Container_Search_Result]] objects that have search metadata for each result and the resulting resource found. +// +// This method recognizes the special _objectType: quantifier in search strings. This quantifier can be used to restrict a search to specific object types. Example usage: +// +// _objectType:Type_1 (other search terms...) +// +// A search string can specify multiple object types, separated by commas (no spaces are permitted between the type names). Example: +// +// _objectType:Type_1,Type_2,Type_3 (other search terms...) +// +// If the list of object types is prefixed with a hyphen or minus sign (-), then the specified types are excluded from the search. Example: +// +// _objectType:-Type_4,Type_5 (other search terms...) +// +// A collection of available object types can be retrieved by calling the [[SoftLayer_Search/getObjectTypes|getObjectTypes()]] method. +func (r Search) Search(searchString *string) (resp []datatypes.Container_Search_Result, err error) { + params := []interface{}{ + searchString, + } + err = r.Session.DoRequest("SoftLayer_Search", "search", params, &r.Options, &resp) + return +} diff --git a/vendor/github.com/softlayer/softlayer-go/services/security.go b/vendor/github.com/softlayer/softlayer-go/services/security.go new file mode 100644 index 0000000000..1a6fb22283 --- /dev/null +++ b/vendor/github.com/softlayer/softlayer-go/services/security.go @@ -0,0 +1,456 @@ +/** + * Copyright 2016 IBM Corp. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** + * AUTOMATICALLY GENERATED CODE - DO NOT MODIFY + */ + +package services + +import ( + "fmt" + "strings" + + "github.com/softlayer/softlayer-go/datatypes" + "github.com/softlayer/softlayer-go/session" + "github.com/softlayer/softlayer-go/sl" +) + +// no documentation yet +type Security_Certificate struct { + Session *session.Session + Options sl.Options +} + +// GetSecurityCertificateService returns an instance of the Security_Certificate SoftLayer service +func GetSecurityCertificateService(sess *session.Session) Security_Certificate { + return Security_Certificate{Session: sess} +} + +func (r Security_Certificate) Id(id int) Security_Certificate { + r.Options.Id = &id + return r +} + +func (r Security_Certificate) Mask(mask string) Security_Certificate { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r Security_Certificate) Filter(filter string) Security_Certificate { + r.Options.Filter = filter + return r +} + +func (r Security_Certificate) Limit(limit int) Security_Certificate { + r.Options.Limit = &limit + return r +} + +func (r Security_Certificate) Offset(offset int) Security_Certificate { + r.Options.Offset = &offset + return r +} + +// Add a certificate to your account for your records, or for use with various services. Only the certificate and private key are usually required. If your issuer provided an intermediate certificate, you must also provide that certificate. Details will be extracted from the certificate. Validation will be performed between the certificate and the private key as well as the certificate and the intermediate certificate, if provided. +// +// The certificate signing request is not required, but can be provided for your records. +func (r Security_Certificate) CreateObject(templateObject *datatypes.Security_Certificate) (resp datatypes.Security_Certificate, err error) { + params := []interface{}{ + templateObject, + } + err = r.Session.DoRequest("SoftLayer_Security_Certificate", "createObject", params, &r.Options, &resp) + return +} + +// Remove a certificate from your account. You may not remove a certificate with associated services. +func (r Security_Certificate) DeleteObject() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Security_Certificate", "deleteObject", nil, &r.Options, &resp) + return +} + +// Update a certificate. Modifications are restricted to the note and CSR if the are any services associated with the certificate. There are no modification restrictions for a certificate with no associated services. +func (r Security_Certificate) EditObject(templateObject *datatypes.Security_Certificate) (resp bool, err error) { + params := []interface{}{ + templateObject, + } + err = r.Session.DoRequest("SoftLayer_Security_Certificate", "editObject", params, &r.Options, &resp) + return +} + +// Locate certificates by their common name, traditionally a domain name. +func (r Security_Certificate) FindByCommonName(commonName *string) (resp []datatypes.Security_Certificate, err error) { + params := []interface{}{ + commonName, + } + err = r.Session.DoRequest("SoftLayer_Security_Certificate", "findByCommonName", params, &r.Options, &resp) + return +} + +// Retrieve The number of services currently associated with the certificate. +func (r Security_Certificate) GetAssociatedServiceCount() (resp int, err error) { + err = r.Session.DoRequest("SoftLayer_Security_Certificate", "getAssociatedServiceCount", nil, &r.Options, &resp) + return +} + +// Retrieve The load balancers virtual IP addresses currently associated with the certificate. +func (r Security_Certificate) GetLoadBalancerVirtualIpAddresses() (resp []datatypes.Network_Application_Delivery_Controller_LoadBalancer_VirtualIpAddress, err error) { + err = r.Session.DoRequest("SoftLayer_Security_Certificate", "getLoadBalancerVirtualIpAddresses", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Security_Certificate) GetObject() (resp datatypes.Security_Certificate, err error) { + err = r.Session.DoRequest("SoftLayer_Security_Certificate", "getObject", nil, &r.Options, &resp) + return +} + +// Retrieve the certificate in PEM (Privacy Enhanced Mail) format, which is a string containing all base64 encoded (DER) certificates delimited by -----BEGIN/END *----- clauses. +func (r Security_Certificate) GetPemFormat() (resp string, err error) { + err = r.Session.DoRequest("SoftLayer_Security_Certificate", "getPemFormat", nil, &r.Options, &resp) + return +} + +// SoftLayer_Security_Certificate_Request data type is used to harness your SSL certificate order to a Certificate Authority. This contains data that is required by a Certificate Authority to place an SSL certificate order. +type Security_Certificate_Request struct { + Session *session.Session + Options sl.Options +} + +// GetSecurityCertificateRequestService returns an instance of the Security_Certificate_Request SoftLayer service +func GetSecurityCertificateRequestService(sess *session.Session) Security_Certificate_Request { + return Security_Certificate_Request{Session: sess} +} + +func (r Security_Certificate_Request) Id(id int) Security_Certificate_Request { + r.Options.Id = &id + return r +} + +func (r Security_Certificate_Request) Mask(mask string) Security_Certificate_Request { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r Security_Certificate_Request) Filter(filter string) Security_Certificate_Request { + r.Options.Filter = filter + return r +} + +func (r Security_Certificate_Request) Limit(limit int) Security_Certificate_Request { + r.Options.Limit = &limit + return r +} + +func (r Security_Certificate_Request) Offset(offset int) Security_Certificate_Request { + r.Options.Offset = &offset + return r +} + +// Cancels a pending SSL certificate order at the Certificate Authority +func (r Security_Certificate_Request) CancelSslOrder() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Security_Certificate_Request", "cancelSslOrder", nil, &r.Options, &resp) + return +} + +// Retrieve The account to which a SSL certificate request belongs. +func (r Security_Certificate_Request) GetAccount() (resp datatypes.Account, err error) { + err = r.Session.DoRequest("SoftLayer_Security_Certificate_Request", "getAccount", nil, &r.Options, &resp) + return +} + +// Gets the email domains that can be used to validate a certificate to a domain. +func (r Security_Certificate_Request) GetAdministratorEmailDomains(commonName *string) (resp []string, err error) { + params := []interface{}{ + commonName, + } + err = r.Session.DoRequest("SoftLayer_Security_Certificate_Request", "getAdministratorEmailDomains", params, &r.Options, &resp) + return +} + +// Gets the email accounts that can be used to validate a certificate to a domain. +func (r Security_Certificate_Request) GetAdministratorEmailPrefixes() (resp []string, err error) { + err = r.Session.DoRequest("SoftLayer_Security_Certificate_Request", "getAdministratorEmailPrefixes", nil, &r.Options, &resp) + return +} + +// Retrieve The Certificate Authority name +func (r Security_Certificate_Request) GetCertificateAuthorityName() (resp string, err error) { + err = r.Session.DoRequest("SoftLayer_Security_Certificate_Request", "getCertificateAuthorityName", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Security_Certificate_Request) GetObject() (resp datatypes.Security_Certificate_Request, err error) { + err = r.Session.DoRequest("SoftLayer_Security_Certificate_Request", "getObject", nil, &r.Options, &resp) + return +} + +// Retrieve The order contains the information related to a SSL certificate request. +func (r Security_Certificate_Request) GetOrder() (resp datatypes.Billing_Order, err error) { + err = r.Session.DoRequest("SoftLayer_Security_Certificate_Request", "getOrder", nil, &r.Options, &resp) + return +} + +// Retrieve The associated order item for this SSL certificate request. +func (r Security_Certificate_Request) GetOrderItem() (resp datatypes.Billing_Order_Item, err error) { + err = r.Session.DoRequest("SoftLayer_Security_Certificate_Request", "getOrderItem", nil, &r.Options, &resp) + return +} + +// Returns previous SSL certificate order data. You can use this data for to place a renewal order for a completed SSL certificate. +func (r Security_Certificate_Request) GetPreviousOrderData() (resp datatypes.Container_Product_Order_Security_Certificate, err error) { + err = r.Session.DoRequest("SoftLayer_Security_Certificate_Request", "getPreviousOrderData", nil, &r.Options, &resp) + return +} + +// Returns all the SSL certificate requests. +func (r Security_Certificate_Request) GetSslCertificateRequests(accountId *int) (resp []datatypes.Security_Certificate_Request, err error) { + params := []interface{}{ + accountId, + } + err = r.Session.DoRequest("SoftLayer_Security_Certificate_Request", "getSslCertificateRequests", params, &r.Options, &resp) + return +} + +// Retrieve The status of a SSL certificate request. +func (r Security_Certificate_Request) GetStatus() (resp datatypes.Security_Certificate_Request_Status, err error) { + err = r.Session.DoRequest("SoftLayer_Security_Certificate_Request", "getStatus", nil, &r.Options, &resp) + return +} + +// A Certificate Authority sends out various emails to your domain administrator or your technical contact. Use this service to have these emails re-sent. +func (r Security_Certificate_Request) ResendEmail(emailType *string) (resp bool, err error) { + params := []interface{}{ + emailType, + } + err = r.Session.DoRequest("SoftLayer_Security_Certificate_Request", "resendEmail", params, &r.Options, &resp) + return +} + +// Allows you to validate a Certificate Signing Request (CSR) required for an SSL certificate with the certificate authority (CA). This method sends the CSR, the length of the subscription in months, the certificate type, and the server type for validation against requirements of the CA. Returns true if valid. +// +// More information on CSR generation can be found at: [http://en.wikipedia.org/wiki/Certificate_signing_request Wikipedia] [https://knowledge.verisign.com/support/ssl-certificates-support/index?page=content&id=AR235&actp=LIST&viewlocale=en_US VeriSign] +func (r Security_Certificate_Request) ValidateCsr(csr *string, validityMonths *int, itemId *int, serverType *string) (resp bool, err error) { + params := []interface{}{ + csr, + validityMonths, + itemId, + serverType, + } + err = r.Session.DoRequest("SoftLayer_Security_Certificate_Request", "validateCsr", params, &r.Options, &resp) + return +} + +// Represents a server type that can be specified when ordering an SSL certificate. +type Security_Certificate_Request_ServerType struct { + Session *session.Session + Options sl.Options +} + +// GetSecurityCertificateRequestServerTypeService returns an instance of the Security_Certificate_Request_ServerType SoftLayer service +func GetSecurityCertificateRequestServerTypeService(sess *session.Session) Security_Certificate_Request_ServerType { + return Security_Certificate_Request_ServerType{Session: sess} +} + +func (r Security_Certificate_Request_ServerType) Id(id int) Security_Certificate_Request_ServerType { + r.Options.Id = &id + return r +} + +func (r Security_Certificate_Request_ServerType) Mask(mask string) Security_Certificate_Request_ServerType { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r Security_Certificate_Request_ServerType) Filter(filter string) Security_Certificate_Request_ServerType { + r.Options.Filter = filter + return r +} + +func (r Security_Certificate_Request_ServerType) Limit(limit int) Security_Certificate_Request_ServerType { + r.Options.Limit = &limit + return r +} + +func (r Security_Certificate_Request_ServerType) Offset(offset int) Security_Certificate_Request_ServerType { + r.Options.Offset = &offset + return r +} + +// Returns all SSL certificate server types, which are passed in on a [[SoftLayer_Container_Product_Order_Security_Certificate|certificate order]]. +func (r Security_Certificate_Request_ServerType) GetAllObjects() (resp []datatypes.Security_Certificate_Request_ServerType, err error) { + err = r.Session.DoRequest("SoftLayer_Security_Certificate_Request_ServerType", "getAllObjects", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Security_Certificate_Request_ServerType) GetObject() (resp datatypes.Security_Certificate_Request_ServerType, err error) { + err = r.Session.DoRequest("SoftLayer_Security_Certificate_Request_ServerType", "getObject", nil, &r.Options, &resp) + return +} + +// Represents the status of an SSL certificate request. +type Security_Certificate_Request_Status struct { + Session *session.Session + Options sl.Options +} + +// GetSecurityCertificateRequestStatusService returns an instance of the Security_Certificate_Request_Status SoftLayer service +func GetSecurityCertificateRequestStatusService(sess *session.Session) Security_Certificate_Request_Status { + return Security_Certificate_Request_Status{Session: sess} +} + +func (r Security_Certificate_Request_Status) Id(id int) Security_Certificate_Request_Status { + r.Options.Id = &id + return r +} + +func (r Security_Certificate_Request_Status) Mask(mask string) Security_Certificate_Request_Status { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r Security_Certificate_Request_Status) Filter(filter string) Security_Certificate_Request_Status { + r.Options.Filter = filter + return r +} + +func (r Security_Certificate_Request_Status) Limit(limit int) Security_Certificate_Request_Status { + r.Options.Limit = &limit + return r +} + +func (r Security_Certificate_Request_Status) Offset(offset int) Security_Certificate_Request_Status { + r.Options.Offset = &offset + return r +} + +// no documentation yet +func (r Security_Certificate_Request_Status) GetObject() (resp datatypes.Security_Certificate_Request_Status, err error) { + err = r.Session.DoRequest("SoftLayer_Security_Certificate_Request_Status", "getObject", nil, &r.Options, &resp) + return +} + +// Returns all SSL certificate request status objects +func (r Security_Certificate_Request_Status) GetSslRequestStatuses() (resp []datatypes.Security_Certificate_Request_Status, err error) { + err = r.Session.DoRequest("SoftLayer_Security_Certificate_Request_Status", "getSslRequestStatuses", nil, &r.Options, &resp) + return +} + +// no documentation yet +type Security_Ssh_Key struct { + Session *session.Session + Options sl.Options +} + +// GetSecuritySshKeyService returns an instance of the Security_Ssh_Key SoftLayer service +func GetSecuritySshKeyService(sess *session.Session) Security_Ssh_Key { + return Security_Ssh_Key{Session: sess} +} + +func (r Security_Ssh_Key) Id(id int) Security_Ssh_Key { + r.Options.Id = &id + return r +} + +func (r Security_Ssh_Key) Mask(mask string) Security_Ssh_Key { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r Security_Ssh_Key) Filter(filter string) Security_Ssh_Key { + r.Options.Filter = filter + return r +} + +func (r Security_Ssh_Key) Limit(limit int) Security_Ssh_Key { + r.Options.Limit = &limit + return r +} + +func (r Security_Ssh_Key) Offset(offset int) Security_Ssh_Key { + r.Options.Offset = &offset + return r +} + +// Add a ssh key to your account for use during server provisioning and os reloads. +func (r Security_Ssh_Key) CreateObject(templateObject *datatypes.Security_Ssh_Key) (resp datatypes.Security_Ssh_Key, err error) { + params := []interface{}{ + templateObject, + } + err = r.Session.DoRequest("SoftLayer_Security_Ssh_Key", "createObject", params, &r.Options, &resp) + return +} + +// Remove a ssh key from your account. +func (r Security_Ssh_Key) DeleteObject() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Security_Ssh_Key", "deleteObject", nil, &r.Options, &resp) + return +} + +// Update a ssh key. +func (r Security_Ssh_Key) EditObject(templateObject *datatypes.Security_Ssh_Key) (resp bool, err error) { + params := []interface{}{ + templateObject, + } + err = r.Session.DoRequest("SoftLayer_Security_Ssh_Key", "editObject", params, &r.Options, &resp) + return +} + +// Retrieve +func (r Security_Ssh_Key) GetAccount() (resp datatypes.Account, err error) { + err = r.Session.DoRequest("SoftLayer_Security_Ssh_Key", "getAccount", nil, &r.Options, &resp) + return +} + +// Retrieve The image template groups that are linked to an SSH key. +func (r Security_Ssh_Key) GetBlockDeviceTemplateGroups() (resp []datatypes.Virtual_Guest_Block_Device_Template_Group, err error) { + err = r.Session.DoRequest("SoftLayer_Security_Ssh_Key", "getBlockDeviceTemplateGroups", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Security_Ssh_Key) GetObject() (resp datatypes.Security_Ssh_Key, err error) { + err = r.Session.DoRequest("SoftLayer_Security_Ssh_Key", "getObject", nil, &r.Options, &resp) + return +} + +// Retrieve The OS root users that are linked to an SSH key. +func (r Security_Ssh_Key) GetSoftwarePasswords() (resp []datatypes.Software_Component_Password, err error) { + err = r.Session.DoRequest("SoftLayer_Security_Ssh_Key", "getSoftwarePasswords", nil, &r.Options, &resp) + return +} diff --git a/vendor/github.com/softlayer/softlayer-go/services/software.go b/vendor/github.com/softlayer/softlayer-go/services/software.go new file mode 100644 index 0000000000..7dc00688f8 --- /dev/null +++ b/vendor/github.com/softlayer/softlayer-go/services/software.go @@ -0,0 +1,797 @@ +/** + * Copyright 2016 IBM Corp. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** + * AUTOMATICALLY GENERATED CODE - DO NOT MODIFY + */ + +package services + +import ( + "fmt" + "strings" + + "github.com/softlayer/softlayer-go/datatypes" + "github.com/softlayer/softlayer-go/session" + "github.com/softlayer/softlayer-go/sl" +) + +// SoftLayer_Software_AccountLicense is a class that represents software licenses that are tied only to a customer's account and not to any particular hardware, IP address, etc. +type Software_AccountLicense struct { + Session *session.Session + Options sl.Options +} + +// GetSoftwareAccountLicenseService returns an instance of the Software_AccountLicense SoftLayer service +func GetSoftwareAccountLicenseService(sess *session.Session) Software_AccountLicense { + return Software_AccountLicense{Session: sess} +} + +func (r Software_AccountLicense) Id(id int) Software_AccountLicense { + r.Options.Id = &id + return r +} + +func (r Software_AccountLicense) Mask(mask string) Software_AccountLicense { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r Software_AccountLicense) Filter(filter string) Software_AccountLicense { + r.Options.Filter = filter + return r +} + +func (r Software_AccountLicense) Limit(limit int) Software_AccountLicense { + r.Options.Limit = &limit + return r +} + +func (r Software_AccountLicense) Offset(offset int) Software_AccountLicense { + r.Options.Offset = &offset + return r +} + +// Retrieve The customer account this Account License belongs to. +func (r Software_AccountLicense) GetAccount() (resp datatypes.Account, err error) { + err = r.Session.DoRequest("SoftLayer_Software_AccountLicense", "getAccount", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Software_AccountLicense) GetAllObjects() (resp []datatypes.Software_AccountLicense, err error) { + err = r.Session.DoRequest("SoftLayer_Software_AccountLicense", "getAllObjects", nil, &r.Options, &resp) + return +} + +// Retrieve The billing item for a software account license. +func (r Software_AccountLicense) GetBillingItem() (resp datatypes.Billing_Item, err error) { + err = r.Session.DoRequest("SoftLayer_Software_AccountLicense", "getBillingItem", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Software_AccountLicense) GetObject() (resp datatypes.Software_AccountLicense, err error) { + err = r.Session.DoRequest("SoftLayer_Software_AccountLicense", "getObject", nil, &r.Options, &resp) + return +} + +// Retrieve The SoftLayer_Software_Description that this account license is for. +func (r Software_AccountLicense) GetSoftwareDescription() (resp datatypes.Software_Description, err error) { + err = r.Session.DoRequest("SoftLayer_Software_AccountLicense", "getSoftwareDescription", nil, &r.Options, &resp) + return +} + +// A SoftLayer_Software_Component ties the installation of a specific piece of software onto a specific piece of hardware. +// +// SoftLayer_Software_Component works with SoftLayer_Software_License and SoftLayer_Software_Description to tie this all together. +// +//

    • SoftLayer_Software_Component is the installation of a specific piece of software onto a specific piece of hardware in accordance to a software license.
      • SoftLayer_Software_License dictates when and how a specific piece of software may be installed onto a piece of hardware.
        • SoftLayer_Software_Description describes a specific piece of software which can be installed onto hardware in accordance with it's license agreement.
    +type Software_Component struct { + Session *session.Session + Options sl.Options +} + +// GetSoftwareComponentService returns an instance of the Software_Component SoftLayer service +func GetSoftwareComponentService(sess *session.Session) Software_Component { + return Software_Component{Session: sess} +} + +func (r Software_Component) Id(id int) Software_Component { + r.Options.Id = &id + return r +} + +func (r Software_Component) Mask(mask string) Software_Component { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r Software_Component) Filter(filter string) Software_Component { + r.Options.Filter = filter + return r +} + +func (r Software_Component) Limit(limit int) Software_Component { + r.Options.Limit = &limit + return r +} + +func (r Software_Component) Offset(offset int) Software_Component { + r.Options.Offset = &offset + return r +} + +// Retrieve The average amount of time that a software component takes to install. +func (r Software_Component) GetAverageInstallationDuration() (resp uint, err error) { + err = r.Session.DoRequest("SoftLayer_Software_Component", "getAverageInstallationDuration", nil, &r.Options, &resp) + return +} + +// Retrieve The billing item for a software component. +func (r Software_Component) GetBillingItem() (resp datatypes.Billing_Item, err error) { + err = r.Session.DoRequest("SoftLayer_Software_Component", "getBillingItem", nil, &r.Options, &resp) + return +} + +// Retrieve The hardware this Software Component is installed upon. +func (r Software_Component) GetHardware() (resp datatypes.Hardware, err error) { + err = r.Session.DoRequest("SoftLayer_Software_Component", "getHardware", nil, &r.Options, &resp) + return +} + +// Attempt to retrieve the file associated with a software component. If the software component does not support downloading license files an exception will be thrown. +func (r Software_Component) GetLicenseFile() (resp string, err error) { + err = r.Session.DoRequest("SoftLayer_Software_Component", "getLicenseFile", nil, &r.Options, &resp) + return +} + +// getObject retrieves the SoftLayer_Software_Component object whose ID corresponds to the ID number of the init parameter passed to the SoftLayer_Software_Component service. +// +// The best way to get software components is through getSoftwareComponents from the Hardware service. +func (r Software_Component) GetObject() (resp datatypes.Software_Component, err error) { + err = r.Session.DoRequest("SoftLayer_Software_Component", "getObject", nil, &r.Options, &resp) + return +} + +// Retrieve History Records for Software Passwords. +func (r Software_Component) GetPasswordHistory() (resp []datatypes.Software_Component_Password_History, err error) { + err = r.Session.DoRequest("SoftLayer_Software_Component", "getPasswordHistory", nil, &r.Options, &resp) + return +} + +// Retrieve Username/Password pairs used for access to this Software Installation. +func (r Software_Component) GetPasswords() (resp []datatypes.Software_Component_Password, err error) { + err = r.Session.DoRequest("SoftLayer_Software_Component", "getPasswords", nil, &r.Options, &resp) + return +} + +// Retrieve The Software Description of this Software Component. +func (r Software_Component) GetSoftwareDescription() (resp datatypes.Software_Description, err error) { + err = r.Session.DoRequest("SoftLayer_Software_Component", "getSoftwareDescription", nil, &r.Options, &resp) + return +} + +// Retrieve The License this Software Component uses. +func (r Software_Component) GetSoftwareLicense() (resp datatypes.Software_License, err error) { + err = r.Session.DoRequest("SoftLayer_Software_Component", "getSoftwareLicense", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Software_Component) GetVendorSetUpConfiguration() (resp string, err error) { + err = r.Session.DoRequest("SoftLayer_Software_Component", "getVendorSetUpConfiguration", nil, &r.Options, &resp) + return +} + +// Retrieve The virtual guest this software component is installed upon. +func (r Software_Component) GetVirtualGuest() (resp datatypes.Virtual_Guest, err error) { + err = r.Session.DoRequest("SoftLayer_Software_Component", "getVirtualGuest", nil, &r.Options, &resp) + return +} + +// This object specifies a specific type of Software Component: An Anti-virus/spyware instance. Anti-virus/spyware installations have specific properties and methods such as SoftLayer_Software_Component_AntivirusSpyware::updateAntivirusSpywarePolicy. Defaults are initiated by this object. +type Software_Component_AntivirusSpyware struct { + Session *session.Session + Options sl.Options +} + +// GetSoftwareComponentAntivirusSpywareService returns an instance of the Software_Component_AntivirusSpyware SoftLayer service +func GetSoftwareComponentAntivirusSpywareService(sess *session.Session) Software_Component_AntivirusSpyware { + return Software_Component_AntivirusSpyware{Session: sess} +} + +func (r Software_Component_AntivirusSpyware) Id(id int) Software_Component_AntivirusSpyware { + r.Options.Id = &id + return r +} + +func (r Software_Component_AntivirusSpyware) Mask(mask string) Software_Component_AntivirusSpyware { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r Software_Component_AntivirusSpyware) Filter(filter string) Software_Component_AntivirusSpyware { + r.Options.Filter = filter + return r +} + +func (r Software_Component_AntivirusSpyware) Limit(limit int) Software_Component_AntivirusSpyware { + r.Options.Limit = &limit + return r +} + +func (r Software_Component_AntivirusSpyware) Offset(offset int) Software_Component_AntivirusSpyware { + r.Options.Offset = &offset + return r +} + +// Retrieve The average amount of time that a software component takes to install. +func (r Software_Component_AntivirusSpyware) GetAverageInstallationDuration() (resp uint, err error) { + err = r.Session.DoRequest("SoftLayer_Software_Component_AntivirusSpyware", "getAverageInstallationDuration", nil, &r.Options, &resp) + return +} + +// Retrieve The billing item for a software component. +func (r Software_Component_AntivirusSpyware) GetBillingItem() (resp datatypes.Billing_Item, err error) { + err = r.Session.DoRequest("SoftLayer_Software_Component_AntivirusSpyware", "getBillingItem", nil, &r.Options, &resp) + return +} + +// Retrieve The hardware this Software Component is installed upon. +func (r Software_Component_AntivirusSpyware) GetHardware() (resp datatypes.Hardware, err error) { + err = r.Session.DoRequest("SoftLayer_Software_Component_AntivirusSpyware", "getHardware", nil, &r.Options, &resp) + return +} + +// Attempt to retrieve the file associated with a software component. If the software component does not support downloading license files an exception will be thrown. +func (r Software_Component_AntivirusSpyware) GetLicenseFile() (resp string, err error) { + err = r.Session.DoRequest("SoftLayer_Software_Component_AntivirusSpyware", "getLicenseFile", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Software_Component_AntivirusSpyware) GetObject() (resp datatypes.Software_Component_AntivirusSpyware, err error) { + err = r.Session.DoRequest("SoftLayer_Software_Component_AntivirusSpyware", "getObject", nil, &r.Options, &resp) + return +} + +// Retrieve History Records for Software Passwords. +func (r Software_Component_AntivirusSpyware) GetPasswordHistory() (resp []datatypes.Software_Component_Password_History, err error) { + err = r.Session.DoRequest("SoftLayer_Software_Component_AntivirusSpyware", "getPasswordHistory", nil, &r.Options, &resp) + return +} + +// Retrieve Username/Password pairs used for access to this Software Installation. +func (r Software_Component_AntivirusSpyware) GetPasswords() (resp []datatypes.Software_Component_Password, err error) { + err = r.Session.DoRequest("SoftLayer_Software_Component_AntivirusSpyware", "getPasswords", nil, &r.Options, &resp) + return +} + +// Retrieve The Software Description of this Software Component. +func (r Software_Component_AntivirusSpyware) GetSoftwareDescription() (resp datatypes.Software_Description, err error) { + err = r.Session.DoRequest("SoftLayer_Software_Component_AntivirusSpyware", "getSoftwareDescription", nil, &r.Options, &resp) + return +} + +// Retrieve The License this Software Component uses. +func (r Software_Component_AntivirusSpyware) GetSoftwareLicense() (resp datatypes.Software_License, err error) { + err = r.Session.DoRequest("SoftLayer_Software_Component_AntivirusSpyware", "getSoftwareLicense", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Software_Component_AntivirusSpyware) GetVendorSetUpConfiguration() (resp string, err error) { + err = r.Session.DoRequest("SoftLayer_Software_Component_AntivirusSpyware", "getVendorSetUpConfiguration", nil, &r.Options, &resp) + return +} + +// Retrieve The virtual guest this software component is installed upon. +func (r Software_Component_AntivirusSpyware) GetVirtualGuest() (resp datatypes.Virtual_Guest, err error) { + err = r.Session.DoRequest("SoftLayer_Software_Component_AntivirusSpyware", "getVirtualGuest", nil, &r.Options, &resp) + return +} + +// Update an anti-virus/spyware policy. The policy options that it accepts are the following: +// *1 - Minimal +// *2 - Relaxed +// *3 - Default +// *4 - High +// *5 - Ultimate +func (r Software_Component_AntivirusSpyware) UpdateAntivirusSpywarePolicy(newPolicy *string, enforce *bool) (resp bool, err error) { + params := []interface{}{ + newPolicy, + enforce, + } + err = r.Session.DoRequest("SoftLayer_Software_Component_AntivirusSpyware", "updateAntivirusSpywarePolicy", params, &r.Options, &resp) + return +} + +// This object specifies a specific type of Software Component: A Host Intrusion Protection System instance. +type Software_Component_HostIps struct { + Session *session.Session + Options sl.Options +} + +// GetSoftwareComponentHostIpsService returns an instance of the Software_Component_HostIps SoftLayer service +func GetSoftwareComponentHostIpsService(sess *session.Session) Software_Component_HostIps { + return Software_Component_HostIps{Session: sess} +} + +func (r Software_Component_HostIps) Id(id int) Software_Component_HostIps { + r.Options.Id = &id + return r +} + +func (r Software_Component_HostIps) Mask(mask string) Software_Component_HostIps { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r Software_Component_HostIps) Filter(filter string) Software_Component_HostIps { + r.Options.Filter = filter + return r +} + +func (r Software_Component_HostIps) Limit(limit int) Software_Component_HostIps { + r.Options.Limit = &limit + return r +} + +func (r Software_Component_HostIps) Offset(offset int) Software_Component_HostIps { + r.Options.Offset = &offset + return r +} + +// Retrieve The average amount of time that a software component takes to install. +func (r Software_Component_HostIps) GetAverageInstallationDuration() (resp uint, err error) { + err = r.Session.DoRequest("SoftLayer_Software_Component_HostIps", "getAverageInstallationDuration", nil, &r.Options, &resp) + return +} + +// Retrieve The billing item for a software component. +func (r Software_Component_HostIps) GetBillingItem() (resp datatypes.Billing_Item, err error) { + err = r.Session.DoRequest("SoftLayer_Software_Component_HostIps", "getBillingItem", nil, &r.Options, &resp) + return +} + +// Get the current Host IPS policies. +func (r Software_Component_HostIps) GetCurrentHostIpsPolicies() (resp []datatypes.Container_Software_Component_HostIps_Policy, err error) { + err = r.Session.DoRequest("SoftLayer_Software_Component_HostIps", "getCurrentHostIpsPolicies", nil, &r.Options, &resp) + return +} + +// Retrieve The hardware this Software Component is installed upon. +func (r Software_Component_HostIps) GetHardware() (resp datatypes.Hardware, err error) { + err = r.Session.DoRequest("SoftLayer_Software_Component_HostIps", "getHardware", nil, &r.Options, &resp) + return +} + +// Attempt to retrieve the file associated with a software component. If the software component does not support downloading license files an exception will be thrown. +func (r Software_Component_HostIps) GetLicenseFile() (resp string, err error) { + err = r.Session.DoRequest("SoftLayer_Software_Component_HostIps", "getLicenseFile", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Software_Component_HostIps) GetObject() (resp datatypes.Software_Component_HostIps, err error) { + err = r.Session.DoRequest("SoftLayer_Software_Component_HostIps", "getObject", nil, &r.Options, &resp) + return +} + +// Retrieve History Records for Software Passwords. +func (r Software_Component_HostIps) GetPasswordHistory() (resp []datatypes.Software_Component_Password_History, err error) { + err = r.Session.DoRequest("SoftLayer_Software_Component_HostIps", "getPasswordHistory", nil, &r.Options, &resp) + return +} + +// Retrieve Username/Password pairs used for access to this Software Installation. +func (r Software_Component_HostIps) GetPasswords() (resp []datatypes.Software_Component_Password, err error) { + err = r.Session.DoRequest("SoftLayer_Software_Component_HostIps", "getPasswords", nil, &r.Options, &resp) + return +} + +// Retrieve The Software Description of this Software Component. +func (r Software_Component_HostIps) GetSoftwareDescription() (resp datatypes.Software_Description, err error) { + err = r.Session.DoRequest("SoftLayer_Software_Component_HostIps", "getSoftwareDescription", nil, &r.Options, &resp) + return +} + +// Retrieve The License this Software Component uses. +func (r Software_Component_HostIps) GetSoftwareLicense() (resp datatypes.Software_License, err error) { + err = r.Session.DoRequest("SoftLayer_Software_Component_HostIps", "getSoftwareLicense", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Software_Component_HostIps) GetVendorSetUpConfiguration() (resp string, err error) { + err = r.Session.DoRequest("SoftLayer_Software_Component_HostIps", "getVendorSetUpConfiguration", nil, &r.Options, &resp) + return +} + +// Retrieve The virtual guest this software component is installed upon. +func (r Software_Component_HostIps) GetVirtualGuest() (resp datatypes.Virtual_Guest, err error) { + err = r.Session.DoRequest("SoftLayer_Software_Component_HostIps", "getVirtualGuest", nil, &r.Options, &resp) + return +} + +// Update the Host IPS policies. To retrieve valid policy options you must use the provided relationships. +func (r Software_Component_HostIps) UpdateHipsPolicies(newIpsMode *string, newIpsProtection *string, newFirewallMode *string, newFirewallRuleset *string, newApplicationMode *string, newApplicationRuleset *string, newEnforcementPolicy *string) (resp bool, err error) { + params := []interface{}{ + newIpsMode, + newIpsProtection, + newFirewallMode, + newFirewallRuleset, + newApplicationMode, + newApplicationRuleset, + newEnforcementPolicy, + } + err = r.Session.DoRequest("SoftLayer_Software_Component_HostIps", "updateHipsPolicies", params, &r.Options, &resp) + return +} + +// This SoftLayer_Software_Component_Password data type contains a password for a specific software component instance. +type Software_Component_Password struct { + Session *session.Session + Options sl.Options +} + +// GetSoftwareComponentPasswordService returns an instance of the Software_Component_Password SoftLayer service +func GetSoftwareComponentPasswordService(sess *session.Session) Software_Component_Password { + return Software_Component_Password{Session: sess} +} + +func (r Software_Component_Password) Id(id int) Software_Component_Password { + r.Options.Id = &id + return r +} + +func (r Software_Component_Password) Mask(mask string) Software_Component_Password { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r Software_Component_Password) Filter(filter string) Software_Component_Password { + r.Options.Filter = filter + return r +} + +func (r Software_Component_Password) Limit(limit int) Software_Component_Password { + r.Options.Limit = &limit + return r +} + +func (r Software_Component_Password) Offset(offset int) Software_Component_Password { + r.Options.Offset = &offset + return r +} + +// Create a password for a software component. +func (r Software_Component_Password) CreateObject(templateObject *datatypes.Software_Component_Password) (resp datatypes.Software_Component_Password, err error) { + params := []interface{}{ + templateObject, + } + err = r.Session.DoRequest("SoftLayer_Software_Component_Password", "createObject", params, &r.Options, &resp) + return +} + +// Create more than one password for a software component. +func (r Software_Component_Password) CreateObjects(templateObjects []datatypes.Software_Component_Password) (resp bool, err error) { + params := []interface{}{ + templateObjects, + } + err = r.Session.DoRequest("SoftLayer_Software_Component_Password", "createObjects", params, &r.Options, &resp) + return +} + +// Delete a password from a software component. +func (r Software_Component_Password) DeleteObject() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Software_Component_Password", "deleteObject", nil, &r.Options, &resp) + return +} + +// Delete more than one passwords from a software component. +func (r Software_Component_Password) DeleteObjects(templateObjects []datatypes.Software_Component_Password) (resp bool, err error) { + params := []interface{}{ + templateObjects, + } + err = r.Session.DoRequest("SoftLayer_Software_Component_Password", "deleteObjects", params, &r.Options, &resp) + return +} + +// Edit the properties of a software component password such as the username, password, port, and notes. +func (r Software_Component_Password) EditObject(templateObject *datatypes.Software_Component_Password) (resp bool, err error) { + params := []interface{}{ + templateObject, + } + err = r.Session.DoRequest("SoftLayer_Software_Component_Password", "editObject", params, &r.Options, &resp) + return +} + +// Edit more than one password from a software component. +func (r Software_Component_Password) EditObjects(templateObjects []datatypes.Software_Component_Password) (resp bool, err error) { + params := []interface{}{ + templateObjects, + } + err = r.Session.DoRequest("SoftLayer_Software_Component_Password", "editObjects", params, &r.Options, &resp) + return +} + +// no documentation yet +func (r Software_Component_Password) GetObject() (resp datatypes.Software_Component_Password, err error) { + err = r.Session.DoRequest("SoftLayer_Software_Component_Password", "getObject", nil, &r.Options, &resp) + return +} + +// Retrieve The SoftLayer_Software_Component instance that this username/password pair is valid for. +func (r Software_Component_Password) GetSoftware() (resp datatypes.Software_Component, err error) { + err = r.Session.DoRequest("SoftLayer_Software_Component_Password", "getSoftware", nil, &r.Options, &resp) + return +} + +// Retrieve SSH keys to be installed on the server during provisioning or an OS reload. +func (r Software_Component_Password) GetSshKeys() (resp []datatypes.Security_Ssh_Key, err error) { + err = r.Session.DoRequest("SoftLayer_Software_Component_Password", "getSshKeys", nil, &r.Options, &resp) + return +} + +// This class holds a description for a specific installation of a Software Component. +// +// SoftLayer_Software_Licenses tie a Software Component (A specific installation on a piece of hardware) to it's description. +// +// The "Manufacturer" and "Name" properties of a SoftLayer_Software_Description are used by the framework to factory specific objects, objects that may have special methods for that specific piece of software, or objects that contain application specific data, such as default ports. For example, if you create a SoftLayer_Software_Component who's SoftLayer_Software_License points to the SoftLayer_Software_Description for "Swsoft" "Plesk", you'll actually get a SoftLayer_Software_Component_Swsoft_Plesk object. +type Software_Description struct { + Session *session.Session + Options sl.Options +} + +// GetSoftwareDescriptionService returns an instance of the Software_Description SoftLayer service +func GetSoftwareDescriptionService(sess *session.Session) Software_Description { + return Software_Description{Session: sess} +} + +func (r Software_Description) Id(id int) Software_Description { + r.Options.Id = &id + return r +} + +func (r Software_Description) Mask(mask string) Software_Description { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r Software_Description) Filter(filter string) Software_Description { + r.Options.Filter = filter + return r +} + +func (r Software_Description) Limit(limit int) Software_Description { + r.Options.Limit = &limit + return r +} + +func (r Software_Description) Offset(offset int) Software_Description { + r.Options.Offset = &offset + return r +} + +// no documentation yet +func (r Software_Description) GetAllObjects() (resp []datatypes.Software_Description, err error) { + err = r.Session.DoRequest("SoftLayer_Software_Description", "getAllObjects", nil, &r.Options, &resp) + return +} + +// Retrieve +func (r Software_Description) GetAttributes() (resp []datatypes.Software_Description_Attribute, err error) { + err = r.Session.DoRequest("SoftLayer_Software_Description", "getAttributes", nil, &r.Options, &resp) + return +} + +// Retrieve The average amount of time that a software description takes to install. +func (r Software_Description) GetAverageInstallationDuration() (resp int, err error) { + err = r.Session.DoRequest("SoftLayer_Software_Description", "getAverageInstallationDuration", nil, &r.Options, &resp) + return +} + +// Retrieve A list of the software descriptions that are compatible with this software description. +func (r Software_Description) GetCompatibleSoftwareDescriptions() (resp []datatypes.Software_Description, err error) { + err = r.Session.DoRequest("SoftLayer_Software_Description", "getCompatibleSoftwareDescriptions", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Software_Description) GetCustomerOwnedLicenseDescriptions() (resp []datatypes.Software_Description, err error) { + err = r.Session.DoRequest("SoftLayer_Software_Description", "getCustomerOwnedLicenseDescriptions", nil, &r.Options, &resp) + return +} + +// Retrieve The feature attributes of a software description. +func (r Software_Description) GetFeatures() (resp []datatypes.Software_Description_Feature, err error) { + err = r.Session.DoRequest("SoftLayer_Software_Description", "getFeatures", nil, &r.Options, &resp) + return +} + +// Retrieve The latest version of a software description. +func (r Software_Description) GetLatestVersion() (resp []datatypes.Software_Description, err error) { + err = r.Session.DoRequest("SoftLayer_Software_Description", "getLatestVersion", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Software_Description) GetObject() (resp datatypes.Software_Description, err error) { + err = r.Session.DoRequest("SoftLayer_Software_Description", "getObject", nil, &r.Options, &resp) + return +} + +// Retrieve The various product items to which this software description is linked. +func (r Software_Description) GetProductItems() (resp []datatypes.Product_Item, err error) { + err = r.Session.DoRequest("SoftLayer_Software_Description", "getProductItems", nil, &r.Options, &resp) + return +} + +// Retrieve This details the provisioning transaction group for this software. This is only valid for Operating System software. +func (r Software_Description) GetProvisionTransactionGroup() (resp datatypes.Provisioning_Version1_Transaction_Group, err error) { + err = r.Session.DoRequest("SoftLayer_Software_Description", "getProvisionTransactionGroup", nil, &r.Options, &resp) + return +} + +// Retrieve The transaction group that a software description belongs to. A transaction group is a sequence of transactions that must be performed in a specific order for the installation of software. +func (r Software_Description) GetReloadTransactionGroup() (resp datatypes.Provisioning_Version1_Transaction_Group, err error) { + err = r.Session.DoRequest("SoftLayer_Software_Description", "getReloadTransactionGroup", nil, &r.Options, &resp) + return +} + +// Retrieve The default user created for a given a software description. +func (r Software_Description) GetRequiredUser() (resp string, err error) { + err = r.Session.DoRequest("SoftLayer_Software_Description", "getRequiredUser", nil, &r.Options, &resp) + return +} + +// Retrieve Software Licenses that govern this Software Description. +func (r Software_Description) GetSoftwareLicenses() (resp []datatypes.Software_License, err error) { + err = r.Session.DoRequest("SoftLayer_Software_Description", "getSoftwareLicenses", nil, &r.Options, &resp) + return +} + +// Retrieve A suggestion for an upgrade path from this Software Description +func (r Software_Description) GetUpgradeSoftwareDescription() (resp datatypes.Software_Description, err error) { + err = r.Session.DoRequest("SoftLayer_Software_Description", "getUpgradeSoftwareDescription", nil, &r.Options, &resp) + return +} + +// Retrieve A suggestion for an upgrade path from this Software Description (Deprecated - Use upgradeSoftwareDescription) +func (r Software_Description) GetUpgradeSwDesc() (resp datatypes.Software_Description, err error) { + err = r.Session.DoRequest("SoftLayer_Software_Description", "getUpgradeSwDesc", nil, &r.Options, &resp) + return +} + +// Retrieve +func (r Software_Description) GetValidFilesystemTypes() (resp []datatypes.Configuration_Storage_Filesystem_Type, err error) { + err = r.Session.DoRequest("SoftLayer_Software_Description", "getValidFilesystemTypes", nil, &r.Options, &resp) + return +} + +// SoftLayer_Software_VirtualLicense is the application class that handles a special type of Software License. Most software licenses are licensed to a specific hardware ID; virtual licenses are designed for virtual machines and therefore are assigned to an IP Address. Not all software packages can be "virtual licensed". +type Software_VirtualLicense struct { + Session *session.Session + Options sl.Options +} + +// GetSoftwareVirtualLicenseService returns an instance of the Software_VirtualLicense SoftLayer service +func GetSoftwareVirtualLicenseService(sess *session.Session) Software_VirtualLicense { + return Software_VirtualLicense{Session: sess} +} + +func (r Software_VirtualLicense) Id(id int) Software_VirtualLicense { + r.Options.Id = &id + return r +} + +func (r Software_VirtualLicense) Mask(mask string) Software_VirtualLicense { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r Software_VirtualLicense) Filter(filter string) Software_VirtualLicense { + r.Options.Filter = filter + return r +} + +func (r Software_VirtualLicense) Limit(limit int) Software_VirtualLicense { + r.Options.Limit = &limit + return r +} + +func (r Software_VirtualLicense) Offset(offset int) Software_VirtualLicense { + r.Options.Offset = &offset + return r +} + +// Retrieve The customer account this Virtual License belongs to. +func (r Software_VirtualLicense) GetAccount() (resp datatypes.Account, err error) { + err = r.Session.DoRequest("SoftLayer_Software_VirtualLicense", "getAccount", nil, &r.Options, &resp) + return +} + +// Retrieve The billing item for a software virtual license. +func (r Software_VirtualLicense) GetBillingItem() (resp datatypes.Billing_Item, err error) { + err = r.Session.DoRequest("SoftLayer_Software_VirtualLicense", "getBillingItem", nil, &r.Options, &resp) + return +} + +// Retrieve The hardware record to which the software virtual license is assigned. +func (r Software_VirtualLicense) GetHostHardware() (resp datatypes.Hardware_Server, err error) { + err = r.Session.DoRequest("SoftLayer_Software_VirtualLicense", "getHostHardware", nil, &r.Options, &resp) + return +} + +// Retrieve The IP Address record associated with a virtual license. +func (r Software_VirtualLicense) GetIpAddressRecord() (resp datatypes.Network_Subnet_IpAddress, err error) { + err = r.Session.DoRequest("SoftLayer_Software_VirtualLicense", "getIpAddressRecord", nil, &r.Options, &resp) + return +} + +// Attempt to retrieve the file associated with a virtual license, if such a file exists. If there is no file for this virtual license, calling this method will either throw an exception or return false. +func (r Software_VirtualLicense) GetLicenseFile() (resp []byte, err error) { + err = r.Session.DoRequest("SoftLayer_Software_VirtualLicense", "getLicenseFile", nil, &r.Options, &resp) + return +} + +// getObject retrieves the SoftLayer_Software_VirtualLicense object whose ID number corresponds to the ID number of the init parameter passed to the SoftLayer_Software_VirtualLicense service. You can only retrieve Virtual Licenses assigned to your account number. +func (r Software_VirtualLicense) GetObject() (resp datatypes.Software_VirtualLicense, err error) { + err = r.Session.DoRequest("SoftLayer_Software_VirtualLicense", "getObject", nil, &r.Options, &resp) + return +} + +// Retrieve The SoftLayer_Software_Description that this virtual license is for. +func (r Software_VirtualLicense) GetSoftwareDescription() (resp datatypes.Software_Description, err error) { + err = r.Session.DoRequest("SoftLayer_Software_VirtualLicense", "getSoftwareDescription", nil, &r.Options, &resp) + return +} + +// Retrieve The subnet this Virtual License's IP address belongs to. +func (r Software_VirtualLicense) GetSubnet() (resp datatypes.Network_Subnet, err error) { + err = r.Session.DoRequest("SoftLayer_Software_VirtualLicense", "getSubnet", nil, &r.Options, &resp) + return +} diff --git a/vendor/github.com/softlayer/softlayer-go/services/survey.go b/vendor/github.com/softlayer/softlayer-go/services/survey.go new file mode 100644 index 0000000000..aa956c8423 --- /dev/null +++ b/vendor/github.com/softlayer/softlayer-go/services/survey.go @@ -0,0 +1,112 @@ +/** + * Copyright 2016 IBM Corp. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** + * AUTOMATICALLY GENERATED CODE - DO NOT MODIFY + */ + +package services + +import ( + "fmt" + "strings" + + "github.com/softlayer/softlayer-go/datatypes" + "github.com/softlayer/softlayer-go/session" + "github.com/softlayer/softlayer-go/sl" +) + +// The SoftLayer_Survey data type contains general information relating to a single SoftLayer survey. +type Survey struct { + Session *session.Session + Options sl.Options +} + +// GetSurveyService returns an instance of the Survey SoftLayer service +func GetSurveyService(sess *session.Session) Survey { + return Survey{Session: sess} +} + +func (r Survey) Id(id int) Survey { + r.Options.Id = &id + return r +} + +func (r Survey) Mask(mask string) Survey { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r Survey) Filter(filter string) Survey { + r.Options.Filter = filter + return r +} + +func (r Survey) Limit(limit int) Survey { + r.Options.Limit = &limit + return r +} + +func (r Survey) Offset(offset int) Survey { + r.Options.Offset = &offset + return r +} + +// Provides survey details for the given type +func (r Survey) GetActiveSurveyByType(typ *string) (resp datatypes.Survey, err error) { + params := []interface{}{ + typ, + } + err = r.Session.DoRequest("SoftLayer_Survey", "getActiveSurveyByType", params, &r.Options, &resp) + return +} + +// getObject retrieves the SoftLayer_Survey object whose ID number corresponds to the ID number of the init parameter passed to the SoftLayer_Survey service. You can only retrieve the survey that your portal user has taken. +func (r Survey) GetObject() (resp datatypes.Survey, err error) { + err = r.Session.DoRequest("SoftLayer_Survey", "getObject", nil, &r.Options, &resp) + return +} + +// Retrieve The questions for a survey. +func (r Survey) GetQuestions() (resp []datatypes.Survey_Question, err error) { + err = r.Session.DoRequest("SoftLayer_Survey", "getQuestions", nil, &r.Options, &resp) + return +} + +// Retrieve The status of the survey +func (r Survey) GetStatus() (resp datatypes.Survey_Status, err error) { + err = r.Session.DoRequest("SoftLayer_Survey", "getStatus", nil, &r.Options, &resp) + return +} + +// Retrieve The type of survey +func (r Survey) GetType() (resp datatypes.Survey_Type, err error) { + err = r.Session.DoRequest("SoftLayer_Survey", "getType", nil, &r.Options, &resp) + return +} + +// Response to a SoftLayer survey's questions. +func (r Survey) TakeSurvey(responses []datatypes.Survey_Response) (resp bool, err error) { + params := []interface{}{ + responses, + } + err = r.Session.DoRequest("SoftLayer_Survey", "takeSurvey", params, &r.Options, &resp) + return +} diff --git a/vendor/github.com/softlayer/softlayer-go/services/tag.go b/vendor/github.com/softlayer/softlayer-go/services/tag.go new file mode 100644 index 0000000000..644ac1a775 --- /dev/null +++ b/vendor/github.com/softlayer/softlayer-go/services/tag.go @@ -0,0 +1,123 @@ +/** + * Copyright 2016 IBM Corp. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** + * AUTOMATICALLY GENERATED CODE - DO NOT MODIFY + */ + +package services + +import ( + "fmt" + "strings" + + "github.com/softlayer/softlayer-go/datatypes" + "github.com/softlayer/softlayer-go/session" + "github.com/softlayer/softlayer-go/sl" +) + +// The SoftLayer_Tag data type is an optional type associated with hardware. The account ID that the tag is tied to, and the tag itself are stored in this data type. There is also a flag to denote whether the tag is internal or not. +type Tag struct { + Session *session.Session + Options sl.Options +} + +// GetTagService returns an instance of the Tag SoftLayer service +func GetTagService(sess *session.Session) Tag { + return Tag{Session: sess} +} + +func (r Tag) Id(id int) Tag { + r.Options.Id = &id + return r +} + +func (r Tag) Mask(mask string) Tag { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r Tag) Filter(filter string) Tag { + r.Options.Filter = filter + return r +} + +func (r Tag) Limit(limit int) Tag { + r.Options.Limit = &limit + return r +} + +func (r Tag) Offset(offset int) Tag { + r.Options.Offset = &offset + return r +} + +// This function is responsible for setting the Tags values. The internal flag is set to 0 if the user is a customer, and 1 otherwise. AccountId is set to the account bound to the user, and the tags name is set to the clean version of the tag inputted by the user. +func (r Tag) AutoComplete(tag *string) (resp []datatypes.Tag, err error) { + params := []interface{}{ + tag, + } + err = r.Session.DoRequest("SoftLayer_Tag", "autoComplete", params, &r.Options, &resp) + return +} + +// Retrieve The account to which the tag is tied. +func (r Tag) GetAccount() (resp datatypes.Account, err error) { + err = r.Session.DoRequest("SoftLayer_Tag", "getAccount", nil, &r.Options, &resp) + return +} + +// Returns all tags of a given object type. +func (r Tag) GetAllTagTypes() (resp []datatypes.Tag_Type, err error) { + err = r.Session.DoRequest("SoftLayer_Tag", "getAllTagTypes", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Tag) GetObject() (resp datatypes.Tag, err error) { + err = r.Session.DoRequest("SoftLayer_Tag", "getObject", nil, &r.Options, &resp) + return +} + +// Retrieve References that tie object to the tag. +func (r Tag) GetReferences() (resp []datatypes.Tag_Reference, err error) { + err = r.Session.DoRequest("SoftLayer_Tag", "getReferences", nil, &r.Options, &resp) + return +} + +// Returns the Tag object with a given name. The user types in the tag name and this method returns the tag with that name. +func (r Tag) GetTagByTagName(tagList *string) (resp []datatypes.Tag, err error) { + params := []interface{}{ + tagList, + } + err = r.Session.DoRequest("SoftLayer_Tag", "getTagByTagName", params, &r.Options, &resp) + return +} + +// Tag an object by passing in one or more tags separated by a comma. Tag references are cleared out every time this method is called. If your object is already tagged you will need to pass the current tags along with any new ones. To remove all tag references pass an empty string. To remove one or more tags omit them from the tag list. The characters permitted are A-Z, 0-9, whitespace, _ (underscore), - (hypen), . (period), and : (colon). All other characters will be stripped away. +func (r Tag) SetTags(tags *string, keyName *string, resourceTableId *int) (resp bool, err error) { + params := []interface{}{ + tags, + keyName, + resourceTableId, + } + err = r.Session.DoRequest("SoftLayer_Tag", "setTags", params, &r.Options, &resp) + return +} diff --git a/vendor/github.com/softlayer/softlayer-go/services/ticket.go b/vendor/github.com/softlayer/softlayer-go/services/ticket.go new file mode 100644 index 0000000000..0d7a9d8003 --- /dev/null +++ b/vendor/github.com/softlayer/softlayer-go/services/ticket.go @@ -0,0 +1,1024 @@ +/** + * Copyright 2016 IBM Corp. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** + * AUTOMATICALLY GENERATED CODE - DO NOT MODIFY + */ + +package services + +import ( + "fmt" + "strings" + + "github.com/softlayer/softlayer-go/datatypes" + "github.com/softlayer/softlayer-go/session" + "github.com/softlayer/softlayer-go/sl" +) + +// The SoftLayer_Ticket data type models a single SoftLayer customer support or notification ticket. Each ticket object contains references to it's updates, the user it's assigned to, the SoftLayer department and employee that it's assigned to, and any hardware objects or attached files associated with the ticket. Tickets are described in further detail on the [[SoftLayer_Ticket]] service page. +// +// To create a support ticket execute the [[SoftLayer_Ticket::createStandardTicket|createStandardTicket]] or [[SoftLayer_Ticket::createAdministrativeTicket|createAdministrativeTicket]] methods in the SoftLayer_Ticket service. To create an upgrade ticket for the SoftLayer sales group execute the [[SoftLayer_Ticket::createUpgradeTicket|createUpgradeTicket]]. +type Ticket struct { + Session *session.Session + Options sl.Options +} + +// GetTicketService returns an instance of the Ticket SoftLayer service +func GetTicketService(sess *session.Session) Ticket { + return Ticket{Session: sess} +} + +func (r Ticket) Id(id int) Ticket { + r.Options.Id = &id + return r +} + +func (r Ticket) Mask(mask string) Ticket { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r Ticket) Filter(filter string) Ticket { + r.Options.Filter = filter + return r +} + +func (r Ticket) Limit(limit int) Ticket { + r.Options.Limit = &limit + return r +} + +func (r Ticket) Offset(offset int) Ticket { + r.Options.Offset = &offset + return r +} + +// +// +// +func (r Ticket) AddAssignedAgent(agentId *int) (err error) { + var resp datatypes.Void + params := []interface{}{ + agentId, + } + err = r.Session.DoRequest("SoftLayer_Ticket", "addAssignedAgent", params, &r.Options, &resp) + return +} + +// Creates new additional emails for assigned user if new emails are provided. Attaches any newly created additional emails to ticket. +func (r Ticket) AddAttachedAdditionalEmails(emails []string) (resp bool, err error) { + params := []interface{}{ + emails, + } + err = r.Session.DoRequest("SoftLayer_Ticket", "addAttachedAdditionalEmails", params, &r.Options, &resp) + return +} + +// Attach the given file to a SoftLayer ticket. A file attachment is a convenient way to submit non-textual error reports to SoftLayer employees in a ticket. File attachments to tickets must have a unique name. +func (r Ticket) AddAttachedFile(fileAttachment *datatypes.Container_Utility_File_Attachment) (resp datatypes.Ticket_Attachment_File, err error) { + params := []interface{}{ + fileAttachment, + } + err = r.Session.DoRequest("SoftLayer_Ticket", "addAttachedFile", params, &r.Options, &resp) + return +} + +// Attach the given hardware to a SoftLayer ticket. A hardware attachment provides an easy way for SoftLayer's employees to quickly look up your hardware records in the case of hardware-specific issues. +func (r Ticket) AddAttachedHardware(hardwareId *int) (resp datatypes.Ticket_Attachment_Hardware, err error) { + params := []interface{}{ + hardwareId, + } + err = r.Session.DoRequest("SoftLayer_Ticket", "addAttachedHardware", params, &r.Options, &resp) + return +} + +// Attach the given CloudLayer Computing Instance to a SoftLayer ticket. An attachment provides an easy way for SoftLayer's employees to quickly look up your records in the case of specific issues. +func (r Ticket) AddAttachedVirtualGuest(guestId *int) (resp datatypes.Ticket_Attachment_Virtual_Guest, err error) { + params := []interface{}{ + guestId, + } + err = r.Session.DoRequest("SoftLayer_Ticket", "addAttachedVirtualGuest", params, &r.Options, &resp) + return +} + +// As part of the customer service process SoftLayer has provided a quick feedback mechanism for its customers to rate their overall experience with SoftLayer after a ticket is closed. addFinalComments() sets these comments for a ticket update made by a SoftLayer employee. Final comments may only be set on closed tickets, can only be set once, and may not exceed 4000 characters in length. Once the comments are set ''addFinalComments()'' returns a boolean true. +func (r Ticket) AddFinalComments(finalComments *string) (resp bool, err error) { + params := []interface{}{ + finalComments, + } + err = r.Session.DoRequest("SoftLayer_Ticket", "addFinalComments", params, &r.Options, &resp) + return +} + +// no documentation yet +func (r Ticket) AddScheduledAlert(activationTime *string) (err error) { + var resp datatypes.Void + params := []interface{}{ + activationTime, + } + err = r.Session.DoRequest("SoftLayer_Ticket", "addScheduledAlert", params, &r.Options, &resp) + return +} + +// no documentation yet +func (r Ticket) AddScheduledAutoClose(activationTime *string) (err error) { + var resp datatypes.Void + params := []interface{}{ + activationTime, + } + err = r.Session.DoRequest("SoftLayer_Ticket", "addScheduledAutoClose", params, &r.Options, &resp) + return +} + +// Add an update to a ticket. A ticket update's entry has a maximum length of 4000 characters, so ''addUpdate()'' splits the ''entry'' property in the ''templateObject'' parameter into 3900 character blocks and creates one entry per 3900 character block. Once complete ''addUpdate()'' emails the ticket's owner and additional email addresses with an update message if the ticket's ''notifyUserOnUpdateFlag'' is set. If the ticket is a Legal or Abuse ticket, then the account's abuse emails are also notified when the updates are processed. Finally, ''addUpdate()'' returns an array of the newly created ticket updates. +func (r Ticket) AddUpdate(templateObject *datatypes.Ticket_Update, attachedFiles []datatypes.Container_Utility_File_Attachment) (resp []datatypes.Ticket_Update, err error) { + params := []interface{}{ + templateObject, + attachedFiles, + } + err = r.Session.DoRequest("SoftLayer_Ticket", "addUpdate", params, &r.Options, &resp) + return +} + +// Create an administrative support ticket. Use an administrative ticket if you require SoftLayer's assistance managing your server or content. If you are experiencing an issue with SoftLayer's hardware, network, or services then please open a standard support ticket. +// +// Support tickets may only be created in the open state. The SoftLayer API defaults new ticket properties ''userEditableFlag'' to true, ''accountId'' to the id of the account that your API user belongs to, and ''statusId'' to 1001 (or "open"). You may not assign your new to ticket to users that your API user does not have access to. +// +// Once your ticket is created it is placed in a queue for SoftLayer employees to work. As they update the ticket new [[SoftLayer_Ticket_Update]] entries are added to the ticket object. +// +// Administrative support tickets add a one-time $3USD charge to your account. +func (r Ticket) CreateAdministrativeTicket(templateObject *datatypes.Ticket, contents *string, attachmentId *int, rootPassword *string, controlPanelPassword *string, accessPort *string, attachedFiles []datatypes.Container_Utility_File_Attachment, attachmentType *string) (resp datatypes.Ticket, err error) { + params := []interface{}{ + templateObject, + contents, + attachmentId, + rootPassword, + controlPanelPassword, + accessPort, + attachedFiles, + attachmentType, + } + err = r.Session.DoRequest("SoftLayer_Ticket", "createAdministrativeTicket", params, &r.Options, &resp) + return +} + +// A cancel server request creates a ticket to cancel the resource on next bill date. The hardware ID parameter is required to determine which server is to be cancelled. NOTE: Hourly bare metal servers will be cancelled on next bill date. +// +// The reason parameter could be from the list below: +// * "No longer needed" +// * "Business closing down" +// * "Server / Upgrade Costs" +// * "Migrating to larger server" +// * "Migrating to smaller server" +// * "Migrating to a different SoftLayer datacenter" +// * "Network performance / latency" +// * "Support response / timing" +// * "Sales process / upgrades" +// * "Moving to competitor" +// +// +// The content parameter describes further the reason for cancelling the server. +func (r Ticket) CreateCancelServerTicket(attachmentId *int, reason *string, content *string, cancelAssociatedItems *bool, attachmentType *string) (resp datatypes.Ticket, err error) { + params := []interface{}{ + attachmentId, + reason, + content, + cancelAssociatedItems, + attachmentType, + } + err = r.Session.DoRequest("SoftLayer_Ticket", "createCancelServerTicket", params, &r.Options, &resp) + return +} + +// A cancel service request creates a sales ticket. The hardware ID parameter is required to determine which server is to be cancelled. +// +// The reason parameter could be from the list below: +// * "No longer needed" +// * "Business closing down" +// * "Server / Upgrade Costs" +// * "Migrating to larger server" +// * "Migrating to smaller server" +// * "Migrating to a different SoftLayer datacenter" +// * "Network performance / latency" +// * "Support response / timing" +// * "Sales process / upgrades" +// * "Moving to competitor" +// +// +// The content parameter describes further the reason for cancelling service. +func (r Ticket) CreateCancelServiceTicket(attachmentId *int, reason *string, content *string, attachmentType *string) (resp datatypes.Ticket, err error) { + params := []interface{}{ + attachmentId, + reason, + content, + attachmentType, + } + err = r.Session.DoRequest("SoftLayer_Ticket", "createCancelServiceTicket", params, &r.Options, &resp) + return +} + +// Create a standard support ticket. Use a standard support ticket if you need to work out a problem related to SoftLayer's hardware, network, or services. If you require SoftLayer's assistance managing your server or content then please open an administrative ticket. +// +// Support tickets may only be created in the open state. The SoftLayer API defaults new ticket properties ''userEditableFlag'' to true, ''accountId'' to the id of the account that your API user belongs to, and ''statusId'' to 1001 (or "open"). You may not assign your new to ticket to users that your API user does not have access to. +// +// Once your ticket is created it is placed in a queue for SoftLayer employees to work. As they update the ticket new [[SoftLayer_Ticket_Update]] entries are added to the ticket object. +func (r Ticket) CreateStandardTicket(templateObject *datatypes.Ticket, contents *string, attachmentId *int, rootPassword *string, controlPanelPassword *string, accessPort *string, attachedFiles []datatypes.Container_Utility_File_Attachment, attachmentType *string) (resp datatypes.Ticket, err error) { + params := []interface{}{ + templateObject, + contents, + attachmentId, + rootPassword, + controlPanelPassword, + accessPort, + attachedFiles, + attachmentType, + } + err = r.Session.DoRequest("SoftLayer_Ticket", "createStandardTicket", params, &r.Options, &resp) + return +} + +// Create a ticket for the SoftLayer sales team to perform a hardware or service upgrade. Our sales team will work with you on upgrade feasibility and pricing and then send the upgrade ticket to the proper department to perform the actual upgrade. Service affecting upgrades, such as server hardware or CloudLayer Computing Instance upgrades that require the server powered down must have a two hour maintenance specified for our datacenter engineers to perform your upgrade. Account level upgrades, such as adding PPTP VPN users, CDNLayer accounts, and monitoring services are processed much faster and do not require a maintenance window. +func (r Ticket) CreateUpgradeTicket(attachmentId *int, genericUpgrade *string, upgradeMaintenanceWindow *string, details *string, attachmentType *string, title *string) (resp datatypes.Ticket, err error) { + params := []interface{}{ + attachmentId, + genericUpgrade, + upgradeMaintenanceWindow, + details, + attachmentType, + title, + } + err = r.Session.DoRequest("SoftLayer_Ticket", "createUpgradeTicket", params, &r.Options, &resp) + return +} + +// Edit a SoftLayer ticket. The edit method is two-fold. You may either edit a ticket itself, add an update to a ticket, attach up to two files to a ticket, or perform all of these tasks. The SoftLayer API ignores changes made to the ''userEditableFlag'' and ''accountId'' properties. You may not assign a ticket to a user that your API account does not have access to. You may not enter a custom title for standard support tickets, buy may do so when editing an administrative ticket. Finally, you may not close a ticket using this method. Please contact SoftLayer if you need a ticket closed. +// +// If you need to only add an update to a ticket then please use the [[SoftLayer_Ticket::addUpdate|addUpdate]] method in this service. Likewise if you need to only attach a file to a ticket then use the [[SoftLayer_Ticket::addAttachedFile|addAttachedFile]] method. The edit method exists as a convenience if you need to perform all these tasks at once. +func (r Ticket) Edit(templateObject *datatypes.Ticket, contents *string, attachedFiles []datatypes.Container_Utility_File_Attachment) (resp datatypes.Ticket, err error) { + params := []interface{}{ + templateObject, + contents, + attachedFiles, + } + err = r.Session.DoRequest("SoftLayer_Ticket", "edit", params, &r.Options, &resp) + return +} + +// Retrieve The SoftLayer customer account associated with a ticket. +func (r Ticket) GetAccount() (resp datatypes.Account, err error) { + err = r.Session.DoRequest("SoftLayer_Ticket", "getAccount", nil, &r.Options, &resp) + return +} + +// getAllTicketGroups() retrieves a list of all groups that a ticket may be assigned to. Ticket groups represent the internal department at SoftLayer who a ticket is assigned to. +// +// Every SoftLayer ticket has groupId and ticketGroup properties that correspond to one of the groups returned by getAllTicketGroups(). +func (r Ticket) GetAllTicketGroups() (resp []datatypes.Ticket_Group, err error) { + err = r.Session.DoRequest("SoftLayer_Ticket", "getAllTicketGroups", nil, &r.Options, &resp) + return +} + +// getAllTicketStatuses() retrieves a list of all statuses that a ticket may exist in. Ticket status represent the current state of a ticket, usually "open", "assigned", and "closed". +// +// Every SoftLayer ticket has statusId and status properties that correspond to one of the statuses returned by getAllTicketStatuses(). +func (r Ticket) GetAllTicketStatuses() (resp []datatypes.Ticket_Status, err error) { + err = r.Session.DoRequest("SoftLayer_Ticket", "getAllTicketStatuses", nil, &r.Options, &resp) + return +} + +// Retrieve +func (r Ticket) GetAssignedAgents() (resp []datatypes.User_Customer, err error) { + err = r.Session.DoRequest("SoftLayer_Ticket", "getAssignedAgents", nil, &r.Options, &resp) + return +} + +// Retrieve The portal user that a ticket is assigned to. +func (r Ticket) GetAssignedUser() (resp datatypes.User_Customer, err error) { + err = r.Session.DoRequest("SoftLayer_Ticket", "getAssignedUser", nil, &r.Options, &resp) + return +} + +// Retrieve The list of additional emails to notify when a ticket update is made. +func (r Ticket) GetAttachedAdditionalEmails() (resp []datatypes.User_Customer_AdditionalEmail, err error) { + err = r.Session.DoRequest("SoftLayer_Ticket", "getAttachedAdditionalEmails", nil, &r.Options, &resp) + return +} + +// Retrieve The Dedicated Hosts associated with a ticket. This is used in cases where a ticket is directly associated with one or more Dedicated Hosts. +func (r Ticket) GetAttachedDedicatedHosts() (resp []datatypes.Virtual_DedicatedHost, err error) { + err = r.Session.DoRequest("SoftLayer_Ticket", "getAttachedDedicatedHosts", nil, &r.Options, &resp) + return +} + +// Retrieve the file attached to a SoftLayer ticket by it's given identifier. To retrieve a list of files attached to a ticket either call the SoftLayer_Ticket::getAttachedFiles method or call SoftLayer_Ticket::getObject with ''attachedFiles'' defined in an object mask. +func (r Ticket) GetAttachedFile(attachmentId *int) (resp []byte, err error) { + params := []interface{}{ + attachmentId, + } + err = r.Session.DoRequest("SoftLayer_Ticket", "getAttachedFile", params, &r.Options, &resp) + return +} + +// Retrieve The files attached to a ticket. +func (r Ticket) GetAttachedFiles() (resp []datatypes.Ticket_Attachment_File, err error) { + err = r.Session.DoRequest("SoftLayer_Ticket", "getAttachedFiles", nil, &r.Options, &resp) + return +} + +// Retrieve The hardware associated with a ticket. This is used in cases where a ticket is directly associated with one or more pieces of hardware. +func (r Ticket) GetAttachedHardware() (resp []datatypes.Hardware, err error) { + err = r.Session.DoRequest("SoftLayer_Ticket", "getAttachedHardware", nil, &r.Options, &resp) + return +} + +// Retrieve +func (r Ticket) GetAttachedHardwareCount() (resp uint, err error) { + err = r.Session.DoRequest("SoftLayer_Ticket", "getAttachedHardwareCount", nil, &r.Options, &resp) + return +} + +// Retrieve +func (r Ticket) GetAttachedResources() (resp []datatypes.Ticket_Attachment, err error) { + err = r.Session.DoRequest("SoftLayer_Ticket", "getAttachedResources", nil, &r.Options, &resp) + return +} + +// Retrieve The virtual guests associated with a ticket. This is used in cases where a ticket is directly associated with one or more virtualized guests installations or Virtual Servers. +func (r Ticket) GetAttachedVirtualGuests() (resp []datatypes.Virtual_Guest, err error) { + err = r.Session.DoRequest("SoftLayer_Ticket", "getAttachedVirtualGuests", nil, &r.Options, &resp) + return +} + +// Retrieve Ticket is waiting on a response from a customer flag. +func (r Ticket) GetAwaitingUserResponseFlag() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Ticket", "getAwaitingUserResponseFlag", nil, &r.Options, &resp) + return +} + +// Retrieve A service cancellation request. +func (r Ticket) GetCancellationRequest() (resp datatypes.Billing_Item_Cancellation_Request, err error) { + err = r.Session.DoRequest("SoftLayer_Ticket", "getCancellationRequest", nil, &r.Options, &resp) + return +} + +// Retrieve +func (r Ticket) GetEmployeeAttachments() (resp []datatypes.User_Employee, err error) { + err = r.Session.DoRequest("SoftLayer_Ticket", "getEmployeeAttachments", nil, &r.Options, &resp) + return +} + +// Retrieve The first physical or virtual server attached to a ticket. +func (r Ticket) GetFirstAttachedResource() (resp datatypes.Ticket_Attachment, err error) { + err = r.Session.DoRequest("SoftLayer_Ticket", "getFirstAttachedResource", nil, &r.Options, &resp) + return +} + +// Retrieve The first update made to a ticket. This is typically the contents of a ticket when it's created. +func (r Ticket) GetFirstUpdate() (resp datatypes.Ticket_Update, err error) { + err = r.Session.DoRequest("SoftLayer_Ticket", "getFirstUpdate", nil, &r.Options, &resp) + return +} + +// Retrieve The SoftLayer department that a ticket is assigned to. +func (r Ticket) GetGroup() (resp datatypes.Ticket_Group, err error) { + err = r.Session.DoRequest("SoftLayer_Ticket", "getGroup", nil, &r.Options, &resp) + return +} + +// Retrieve The invoice items associated with a ticket. Ticket based invoice items only exist when a ticket incurs a fee that has been invoiced. +func (r Ticket) GetInvoiceItems() (resp []datatypes.Billing_Invoice_Item, err error) { + err = r.Session.DoRequest("SoftLayer_Ticket", "getInvoiceItems", nil, &r.Options, &resp) + return +} + +// Retrieve +func (r Ticket) GetLastActivity() (resp datatypes.Ticket_Activity, err error) { + err = r.Session.DoRequest("SoftLayer_Ticket", "getLastActivity", nil, &r.Options, &resp) + return +} + +// Retrieve +func (r Ticket) GetLastEditor() (resp datatypes.User_Interface, err error) { + err = r.Session.DoRequest("SoftLayer_Ticket", "getLastEditor", nil, &r.Options, &resp) + return +} + +// Retrieve The last update made to a ticket. +func (r Ticket) GetLastUpdate() (resp datatypes.Ticket_Update, err error) { + err = r.Session.DoRequest("SoftLayer_Ticket", "getLastUpdate", nil, &r.Options, &resp) + return +} + +// Retrieve A timestamp of the last time the Ticket was viewed by the active user. +func (r Ticket) GetLastViewedDate() (resp datatypes.Time, err error) { + err = r.Session.DoRequest("SoftLayer_Ticket", "getLastViewedDate", nil, &r.Options, &resp) + return +} + +// Retrieve A ticket's associated location within the SoftLayer location hierarchy. +func (r Ticket) GetLocation() (resp datatypes.Location, err error) { + err = r.Session.DoRequest("SoftLayer_Ticket", "getLocation", nil, &r.Options, &resp) + return +} + +// Retrieve True if there are new, unread updates to this ticket for the current user, False otherwise. +func (r Ticket) GetNewUpdatesFlag() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Ticket", "getNewUpdatesFlag", nil, &r.Options, &resp) + return +} + +// getObject retrieves the SoftLayer_Ticket object whose ID number corresponds to the ID number of the init parameter passed to the SoftLayer_Ticket service. You can only retrieve tickets that are associated with your SoftLayer customer account. +func (r Ticket) GetObject() (resp datatypes.Ticket, err error) { + err = r.Session.DoRequest("SoftLayer_Ticket", "getObject", nil, &r.Options, &resp) + return +} + +// Retrieve +func (r Ticket) GetScheduledActions() (resp []datatypes.Provisioning_Version1_Transaction, err error) { + err = r.Session.DoRequest("SoftLayer_Ticket", "getScheduledActions", nil, &r.Options, &resp) + return +} + +// Retrieve The invoice associated with a ticket. Only tickets with an associated administrative charge have an invoice. +func (r Ticket) GetServerAdministrationBillingInvoice() (resp datatypes.Billing_Invoice, err error) { + err = r.Session.DoRequest("SoftLayer_Ticket", "getServerAdministrationBillingInvoice", nil, &r.Options, &resp) + return +} + +// Retrieve The refund invoice associated with a ticket. Only tickets with a refund applied in them have an associated refund invoice. +func (r Ticket) GetServerAdministrationRefundInvoice() (resp datatypes.Billing_Invoice, err error) { + err = r.Session.DoRequest("SoftLayer_Ticket", "getServerAdministrationRefundInvoice", nil, &r.Options, &resp) + return +} + +// Retrieve +func (r Ticket) GetServiceProvider() (resp datatypes.Service_Provider, err error) { + err = r.Session.DoRequest("SoftLayer_Ticket", "getServiceProvider", nil, &r.Options, &resp) + return +} + +// Retrieve +func (r Ticket) GetState() (resp []datatypes.Ticket_State, err error) { + err = r.Session.DoRequest("SoftLayer_Ticket", "getState", nil, &r.Options, &resp) + return +} + +// Retrieve A ticket's status. +func (r Ticket) GetStatus() (resp datatypes.Ticket_Status, err error) { + err = r.Session.DoRequest("SoftLayer_Ticket", "getStatus", nil, &r.Options, &resp) + return +} + +// Retrieve A ticket's subject. Only standard support tickets have an associated subject. A standard support ticket's title corresponds with it's subject's name. +func (r Ticket) GetSubject() (resp datatypes.Ticket_Subject, err error) { + err = r.Session.DoRequest("SoftLayer_Ticket", "getSubject", nil, &r.Options, &resp) + return +} + +// Retrieve +func (r Ticket) GetTagReferences() (resp []datatypes.Tag_Reference, err error) { + err = r.Session.DoRequest("SoftLayer_Ticket", "getTagReferences", nil, &r.Options, &resp) + return +} + +// Retrieve all tickets closed since a given date. +func (r Ticket) GetTicketsClosedSinceDate(closeDate *datatypes.Time) (resp []datatypes.Ticket, err error) { + params := []interface{}{ + closeDate, + } + err = r.Session.DoRequest("SoftLayer_Ticket", "getTicketsClosedSinceDate", params, &r.Options, &resp) + return +} + +// Retrieve A ticket's updates. +func (r Ticket) GetUpdates() (resp []datatypes.Ticket_Update, err error) { + err = r.Session.DoRequest("SoftLayer_Ticket", "getUpdates", nil, &r.Options, &resp) + return +} + +// Mark a ticket as viewed. All currently posted updates will be marked as viewed. The lastViewedDate property will be updated to the current time. +func (r Ticket) MarkAsViewed() (err error) { + var resp datatypes.Void + err = r.Session.DoRequest("SoftLayer_Ticket", "markAsViewed", nil, &r.Options, &resp) + return +} + +// +// +// +func (r Ticket) RemoveAssignedAgent(agentId *int) (err error) { + var resp datatypes.Void + params := []interface{}{ + agentId, + } + err = r.Session.DoRequest("SoftLayer_Ticket", "removeAssignedAgent", params, &r.Options, &resp) + return +} + +// removeAttachedAdditionalEmails() removes the specified email addresses from a ticket's notification list. If one of the provided email addresses is not attached to the ticket then ''removeAttachedAdditiaonalEmails()'' ignores it and continues to the next one. Once the email addresses are removed ''removeAttachedAdditiaonalEmails()'' returns a boolean true. +func (r Ticket) RemoveAttachedAdditionalEmails(emails []string) (resp bool, err error) { + params := []interface{}{ + emails, + } + err = r.Session.DoRequest("SoftLayer_Ticket", "removeAttachedAdditionalEmails", params, &r.Options, &resp) + return +} + +// detach the given hardware from a SoftLayer ticket. Removing a hardware attachment may delay ticket processing time if the hardware removed is relevant to the ticket's issue. Return a boolean true upon successful hardware detachment. +func (r Ticket) RemoveAttachedHardware(hardwareId *int) (resp bool, err error) { + params := []interface{}{ + hardwareId, + } + err = r.Session.DoRequest("SoftLayer_Ticket", "removeAttachedHardware", params, &r.Options, &resp) + return +} + +// Detach the given CloudLayer Computing Instance from a SoftLayer ticket. Removing an attachment may delay ticket processing time if the instance removed is relevant to the ticket's issue. Return a boolean true upon successful detachment. +func (r Ticket) RemoveAttachedVirtualGuest(guestId *int) (resp bool, err error) { + params := []interface{}{ + guestId, + } + err = r.Session.DoRequest("SoftLayer_Ticket", "removeAttachedVirtualGuest", params, &r.Options, &resp) + return +} + +// no documentation yet +func (r Ticket) RemoveScheduledAlert() (err error) { + var resp datatypes.Void + err = r.Session.DoRequest("SoftLayer_Ticket", "removeScheduledAlert", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Ticket) RemoveScheduledAutoClose() (err error) { + var resp datatypes.Void + err = r.Session.DoRequest("SoftLayer_Ticket", "removeScheduledAutoClose", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Ticket) SetTags(tags *string) (resp bool, err error) { + params := []interface{}{ + tags, + } + err = r.Session.DoRequest("SoftLayer_Ticket", "setTags", params, &r.Options, &resp) + return +} + +// (DEPRECATED) Use [[SoftLayer_Ticket_Survey::getPreference]] method. +func (r Ticket) SurveyEligible() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Ticket", "surveyEligible", nil, &r.Options, &resp) + return +} + +// Creates new additional emails for assigned user if new emails are provided. Attaches any newly created additional emails to ticket. Remove any additional emails from a ticket that are not provided as part of $emails +func (r Ticket) UpdateAttachedAdditionalEmails(emails []string) (resp bool, err error) { + params := []interface{}{ + emails, + } + err = r.Session.DoRequest("SoftLayer_Ticket", "updateAttachedAdditionalEmails", params, &r.Options, &resp) + return +} + +// SoftLayer tickets have the ability to be associated with specific pieces of dedicated hosts in a customer's inventory. Attaching a dedicated host to a ticket can greatly increase response time from SoftLayer for issues that are related to one or more specific servers on a customer's account. The SoftLayer_Ticket_Attachment_Dedicated_Host data type models the relationship between a dedicated host and a ticket. Only one attachment record may exist per dedicated host item per ticket. +type Ticket_Attachment_Dedicated_Host struct { + Session *session.Session + Options sl.Options +} + +// GetTicketAttachmentDedicatedHostService returns an instance of the Ticket_Attachment_Dedicated_Host SoftLayer service +func GetTicketAttachmentDedicatedHostService(sess *session.Session) Ticket_Attachment_Dedicated_Host { + return Ticket_Attachment_Dedicated_Host{Session: sess} +} + +func (r Ticket_Attachment_Dedicated_Host) Id(id int) Ticket_Attachment_Dedicated_Host { + r.Options.Id = &id + return r +} + +func (r Ticket_Attachment_Dedicated_Host) Mask(mask string) Ticket_Attachment_Dedicated_Host { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r Ticket_Attachment_Dedicated_Host) Filter(filter string) Ticket_Attachment_Dedicated_Host { + r.Options.Filter = filter + return r +} + +func (r Ticket_Attachment_Dedicated_Host) Limit(limit int) Ticket_Attachment_Dedicated_Host { + r.Options.Limit = &limit + return r +} + +func (r Ticket_Attachment_Dedicated_Host) Offset(offset int) Ticket_Attachment_Dedicated_Host { + r.Options.Offset = &offset + return r +} + +// Retrieve The Dedicated Host that is attached to a ticket. +func (r Ticket_Attachment_Dedicated_Host) GetDedicatedHost() (resp datatypes.Virtual_DedicatedHost, err error) { + err = r.Session.DoRequest("SoftLayer_Ticket_Attachment_Dedicated_Host", "getDedicatedHost", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Ticket_Attachment_Dedicated_Host) GetObject() (resp datatypes.Ticket_Attachment_Dedicated_Host, err error) { + err = r.Session.DoRequest("SoftLayer_Ticket_Attachment_Dedicated_Host", "getObject", nil, &r.Options, &resp) + return +} + +// Retrieve The Dedicated Host that is attached to a ticket. +func (r Ticket_Attachment_Dedicated_Host) GetResource() (resp datatypes.Virtual_DedicatedHost, err error) { + err = r.Session.DoRequest("SoftLayer_Ticket_Attachment_Dedicated_Host", "getResource", nil, &r.Options, &resp) + return +} + +// SoftLayer tickets can have have files attached to them. Attaching a file to a ticket is a good way to report issues, provide documentation, and give examples of an issue. Both SoftLayer customers and employees have the ability to attach files to a ticket. The SoftLayer_Ticket_Attachment_File data type models a single file attached to a ticket. +type Ticket_Attachment_File struct { + Session *session.Session + Options sl.Options +} + +// GetTicketAttachmentFileService returns an instance of the Ticket_Attachment_File SoftLayer service +func GetTicketAttachmentFileService(sess *session.Session) Ticket_Attachment_File { + return Ticket_Attachment_File{Session: sess} +} + +func (r Ticket_Attachment_File) Id(id int) Ticket_Attachment_File { + r.Options.Id = &id + return r +} + +func (r Ticket_Attachment_File) Mask(mask string) Ticket_Attachment_File { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r Ticket_Attachment_File) Filter(filter string) Ticket_Attachment_File { + r.Options.Filter = filter + return r +} + +func (r Ticket_Attachment_File) Limit(limit int) Ticket_Attachment_File { + r.Options.Limit = &limit + return r +} + +func (r Ticket_Attachment_File) Offset(offset int) Ticket_Attachment_File { + r.Options.Offset = &offset + return r +} + +// no documentation yet +func (r Ticket_Attachment_File) GetExtensionWhitelist() (resp []string, err error) { + err = r.Session.DoRequest("SoftLayer_Ticket_Attachment_File", "getExtensionWhitelist", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Ticket_Attachment_File) GetObject() (resp datatypes.Ticket_Attachment_File, err error) { + err = r.Session.DoRequest("SoftLayer_Ticket_Attachment_File", "getObject", nil, &r.Options, &resp) + return +} + +// Retrieve +func (r Ticket_Attachment_File) GetTicket() (resp datatypes.Ticket, err error) { + err = r.Session.DoRequest("SoftLayer_Ticket_Attachment_File", "getTicket", nil, &r.Options, &resp) + return +} + +// Retrieve The ticket that a file is attached to. +func (r Ticket_Attachment_File) GetUpdate() (resp datatypes.Ticket_Update, err error) { + err = r.Session.DoRequest("SoftLayer_Ticket_Attachment_File", "getUpdate", nil, &r.Options, &resp) + return +} + +// no documentation yet +type Ticket_Priority struct { + Session *session.Session + Options sl.Options +} + +// GetTicketPriorityService returns an instance of the Ticket_Priority SoftLayer service +func GetTicketPriorityService(sess *session.Session) Ticket_Priority { + return Ticket_Priority{Session: sess} +} + +func (r Ticket_Priority) Id(id int) Ticket_Priority { + r.Options.Id = &id + return r +} + +func (r Ticket_Priority) Mask(mask string) Ticket_Priority { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r Ticket_Priority) Filter(filter string) Ticket_Priority { + r.Options.Filter = filter + return r +} + +func (r Ticket_Priority) Limit(limit int) Ticket_Priority { + r.Options.Limit = &limit + return r +} + +func (r Ticket_Priority) Offset(offset int) Ticket_Priority { + r.Options.Offset = &offset + return r +} + +// no documentation yet +func (r Ticket_Priority) GetPriorities() (resp []datatypes.Container_Ticket_Priority, err error) { + err = r.Session.DoRequest("SoftLayer_Ticket_Priority", "getPriorities", nil, &r.Options, &resp) + return +} + +// The SoftLayer_Ticket_Subject data type models one of the possible subjects that a standard support ticket may belong to. A basic support ticket's title matches it's corresponding subject's name. +type Ticket_Subject struct { + Session *session.Session + Options sl.Options +} + +// GetTicketSubjectService returns an instance of the Ticket_Subject SoftLayer service +func GetTicketSubjectService(sess *session.Session) Ticket_Subject { + return Ticket_Subject{Session: sess} +} + +func (r Ticket_Subject) Id(id int) Ticket_Subject { + r.Options.Id = &id + return r +} + +func (r Ticket_Subject) Mask(mask string) Ticket_Subject { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r Ticket_Subject) Filter(filter string) Ticket_Subject { + r.Options.Filter = filter + return r +} + +func (r Ticket_Subject) Limit(limit int) Ticket_Subject { + r.Options.Limit = &limit + return r +} + +func (r Ticket_Subject) Offset(offset int) Ticket_Subject { + r.Options.Offset = &offset + return r +} + +// Retrieve all possible ticket subjects. The SoftLayer customer portal uses this method in the add standard support ticket form. +func (r Ticket_Subject) GetAllObjects() (resp []datatypes.Ticket_Subject, err error) { + err = r.Session.DoRequest("SoftLayer_Ticket_Subject", "getAllObjects", nil, &r.Options, &resp) + return +} + +// Retrieve +func (r Ticket_Subject) GetCategory() (resp datatypes.Ticket_Subject_Category, err error) { + err = r.Session.DoRequest("SoftLayer_Ticket_Subject", "getCategory", nil, &r.Options, &resp) + return +} + +// Retrieve A child subject +func (r Ticket_Subject) GetChildren() (resp []datatypes.Ticket_Subject, err error) { + err = r.Session.DoRequest("SoftLayer_Ticket_Subject", "getChildren", nil, &r.Options, &resp) + return +} + +// Retrieve +func (r Ticket_Subject) GetGroup() (resp datatypes.Ticket_Group, err error) { + err = r.Session.DoRequest("SoftLayer_Ticket_Subject", "getGroup", nil, &r.Options, &resp) + return +} + +// getObject retrieves the SoftLayer_Ticket_Subject object whose ID number corresponds to the ID number of the init parameter passed to the SoftLayer_Ticket_Subject service. +func (r Ticket_Subject) GetObject() (resp datatypes.Ticket_Subject, err error) { + err = r.Session.DoRequest("SoftLayer_Ticket_Subject", "getObject", nil, &r.Options, &resp) + return +} + +// Retrieve A parent subject +func (r Ticket_Subject) GetParent() (resp datatypes.Ticket_Subject, err error) { + err = r.Session.DoRequest("SoftLayer_Ticket_Subject", "getParent", nil, &r.Options, &resp) + return +} + +// SoftLayer maintains relationships between the generic subjects for standard administration and the top five commonly asked questions about these subjects. getTopFileKnowledgeLayerQuestions() retrieves the top five questions and answers from the SoftLayer KnowledgeLayer related to the given ticket subject. +func (r Ticket_Subject) GetTopFiveKnowledgeLayerQuestions() (resp []datatypes.Container_KnowledgeLayer_QuestionAnswer, err error) { + err = r.Session.DoRequest("SoftLayer_Ticket_Subject", "getTopFiveKnowledgeLayerQuestions", nil, &r.Options, &resp) + return +} + +// SoftLayer_Ticket_Subject_Category groups ticket subjects into logical group. +type Ticket_Subject_Category struct { + Session *session.Session + Options sl.Options +} + +// GetTicketSubjectCategoryService returns an instance of the Ticket_Subject_Category SoftLayer service +func GetTicketSubjectCategoryService(sess *session.Session) Ticket_Subject_Category { + return Ticket_Subject_Category{Session: sess} +} + +func (r Ticket_Subject_Category) Id(id int) Ticket_Subject_Category { + r.Options.Id = &id + return r +} + +func (r Ticket_Subject_Category) Mask(mask string) Ticket_Subject_Category { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r Ticket_Subject_Category) Filter(filter string) Ticket_Subject_Category { + r.Options.Filter = filter + return r +} + +func (r Ticket_Subject_Category) Limit(limit int) Ticket_Subject_Category { + r.Options.Limit = &limit + return r +} + +func (r Ticket_Subject_Category) Offset(offset int) Ticket_Subject_Category { + r.Options.Offset = &offset + return r +} + +// Retrieve all ticket subject categories. +func (r Ticket_Subject_Category) GetAllObjects() (resp []datatypes.Ticket_Subject_Category, err error) { + err = r.Session.DoRequest("SoftLayer_Ticket_Subject_Category", "getAllObjects", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Ticket_Subject_Category) GetObject() (resp datatypes.Ticket_Subject_Category, err error) { + err = r.Session.DoRequest("SoftLayer_Ticket_Subject_Category", "getObject", nil, &r.Options, &resp) + return +} + +// Retrieve +func (r Ticket_Subject_Category) GetSubjects() (resp []datatypes.Ticket_Subject, err error) { + err = r.Session.DoRequest("SoftLayer_Ticket_Subject_Category", "getSubjects", nil, &r.Options, &resp) + return +} + +// no documentation yet +type Ticket_Survey struct { + Session *session.Session + Options sl.Options +} + +// GetTicketSurveyService returns an instance of the Ticket_Survey SoftLayer service +func GetTicketSurveyService(sess *session.Session) Ticket_Survey { + return Ticket_Survey{Session: sess} +} + +func (r Ticket_Survey) Id(id int) Ticket_Survey { + r.Options.Id = &id + return r +} + +func (r Ticket_Survey) Mask(mask string) Ticket_Survey { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r Ticket_Survey) Filter(filter string) Ticket_Survey { + r.Options.Filter = filter + return r +} + +func (r Ticket_Survey) Limit(limit int) Ticket_Survey { + r.Options.Limit = &limit + return r +} + +func (r Ticket_Survey) Offset(offset int) Ticket_Survey { + r.Options.Offset = &offset + return r +} + +// Use this method to retrieve the ticket survey preferences. It will return your [[SoftLayer_Container_Ticket_Survey_Preference|survey preference]] which indicates if your account is applicable to receive a survey and if you're opted in. You can control the survey opt via the [[SoftLayer_Ticket_Survey::optIn|opt-in]] or [[SoftLayer_Ticket_Survey::optOut|opt-out]] method. +func (r Ticket_Survey) GetPreference() (resp datatypes.Container_Ticket_Survey_Preference, err error) { + err = r.Session.DoRequest("SoftLayer_Ticket_Survey", "getPreference", nil, &r.Options, &resp) + return +} + +// You will not receive a ticket survey if you are opted out. Use this method to opt back in if you wish to provide feedback to our support team. You may use the [[SoftLayer_Ticket_Survey::getPreference|getPreference]] method to check your current opt status. +// +// This method is depricated. Use [[SoftLayer_User_Customer::changePreference]] instead. +func (r Ticket_Survey) OptIn() (resp datatypes.Container_Ticket_Survey_Preference, err error) { + err = r.Session.DoRequest("SoftLayer_Ticket_Survey", "optIn", nil, &r.Options, &resp) + return +} + +// By default, customers will occasionally receive a ticket survey upon closing of a ticket. Use this method to opt out of it for the next 90 days. Ticket surveys may not be applicable for some customers. Use the [[SoftLayer_Ticket_Survey::getPreference|getPreference]] method to retrieve your survey preference. The "applicable" property of the [[SoftLayer_Container_Ticket_Survey_Preference|survey preference]] indicates if the survey is relevant to your account or not. +// +// This method is depricated. Use [[SoftLayer_User_Customer::changePreference]] instead. +func (r Ticket_Survey) OptOut() (resp datatypes.Container_Ticket_Survey_Preference, err error) { + err = r.Session.DoRequest("SoftLayer_Ticket_Survey", "optOut", nil, &r.Options, &resp) + return +} + +// The SoftLayer_Ticket_Update_Employee data type models an update to a ticket made by a SoftLayer employee. +type Ticket_Update_Employee struct { + Session *session.Session + Options sl.Options +} + +// GetTicketUpdateEmployeeService returns an instance of the Ticket_Update_Employee SoftLayer service +func GetTicketUpdateEmployeeService(sess *session.Session) Ticket_Update_Employee { + return Ticket_Update_Employee{Session: sess} +} + +func (r Ticket_Update_Employee) Id(id int) Ticket_Update_Employee { + r.Options.Id = &id + return r +} + +func (r Ticket_Update_Employee) Mask(mask string) Ticket_Update_Employee { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r Ticket_Update_Employee) Filter(filter string) Ticket_Update_Employee { + r.Options.Filter = filter + return r +} + +func (r Ticket_Update_Employee) Limit(limit int) Ticket_Update_Employee { + r.Options.Limit = &limit + return r +} + +func (r Ticket_Update_Employee) Offset(offset int) Ticket_Update_Employee { + r.Options.Offset = &offset + return r +} + +// As part of the customer service process SoftLayer has provided a quick feedback mechanism for its customers to rate the responses that its employees give on tickets. addResponseRating() sets the rating for a single ticket update made by a SoftLayer employee. Ticket ratings have the integer values 1 through 5, with 1 being the worst and 5 being the best. Once the rating is set ''addResponseRating()'' returns a boolean true. +func (r Ticket_Update_Employee) AddResponseRating(responseRating *int) (resp bool, err error) { + params := []interface{}{ + responseRating, + } + err = r.Session.DoRequest("SoftLayer_Ticket_Update_Employee", "addResponseRating", params, &r.Options, &resp) + return +} + +// getObject retrieves the SoftLayer_Ticket_Update_Employee object whose ID number corresponds to the ID number of the init parameter passed to the SoftLayer_Ticket_Update_Employee service. You can only retrieve employee updates to tickets that your API account has access to. +func (r Ticket_Update_Employee) GetObject() (resp datatypes.Ticket_Update_Employee, err error) { + err = r.Session.DoRequest("SoftLayer_Ticket_Update_Employee", "getObject", nil, &r.Options, &resp) + return +} diff --git a/vendor/github.com/softlayer/softlayer-go/services/user.go b/vendor/github.com/softlayer/softlayer-go/services/user.go new file mode 100644 index 0000000000..ff7eb11a78 --- /dev/null +++ b/vendor/github.com/softlayer/softlayer-go/services/user.go @@ -0,0 +1,4396 @@ +/** + * Copyright 2016 IBM Corp. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** + * AUTOMATICALLY GENERATED CODE - DO NOT MODIFY + */ + +package services + +import ( + "fmt" + "strings" + + "github.com/softlayer/softlayer-go/datatypes" + "github.com/softlayer/softlayer-go/session" + "github.com/softlayer/softlayer-go/sl" +) + +// The SoftLayer_User_Customer data type contains general information relating to a single SoftLayer customer portal user. Personal information in this type such as names, addresses, and phone numbers are not necessarily associated with the customer account the user is assigned to. +type User_Customer struct { + Session *session.Session + Options sl.Options +} + +// GetUserCustomerService returns an instance of the User_Customer SoftLayer service +func GetUserCustomerService(sess *session.Session) User_Customer { + return User_Customer{Session: sess} +} + +func (r User_Customer) Id(id int) User_Customer { + r.Options.Id = &id + return r +} + +func (r User_Customer) Mask(mask string) User_Customer { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r User_Customer) Filter(filter string) User_Customer { + r.Options.Filter = filter + return r +} + +func (r User_Customer) Limit(limit int) User_Customer { + r.Options.Limit = &limit + return r +} + +func (r User_Customer) Offset(offset int) User_Customer { + r.Options.Offset = &offset + return r +} + +// no documentation yet +func (r User_Customer) AcknowledgeSupportPolicy() (err error) { + var resp datatypes.Void + err = r.Session.DoRequest("SoftLayer_User_Customer", "acknowledgeSupportPolicy", nil, &r.Options, &resp) + return +} + +// Create a user's API authentication key, allowing that user access to query the SoftLayer API. addApiAuthenticationKey() returns the users new API key. Each portal user is allowed a maximum of two API keys. +func (r User_Customer) AddApiAuthenticationKey() (resp string, err error) { + err = r.Session.DoRequest("SoftLayer_User_Customer", "addApiAuthenticationKey", nil, &r.Options, &resp) + return +} + +// Add multiple hardware to a portal user's hardware access list. A user's hardware access list controls which of an account's hardware objects a user has access to in the SoftLayer customer portal and API. Hardware does not exist in the SoftLayer portal and returns "not found" exceptions in the API if the user doesn't have access to it. addBulkHardwareAccess() does not attempt to add hardware access if the given user already has access to that hardware object. +// +// Users can assign hardware access to their child users, but not to themselves. An account's master has access to all hardware on their customer account and can set hardware access for any of the other users on their account. +func (r User_Customer) AddBulkHardwareAccess(hardwareIds []int) (resp bool, err error) { + params := []interface{}{ + hardwareIds, + } + err = r.Session.DoRequest("SoftLayer_User_Customer", "addBulkHardwareAccess", params, &r.Options, &resp) + return +} + +// Add multiple permissions to a portal user's permission set. [[Permissions]] control which features in the SoftLayer customer portal and API a user may use. addBulkPortalPermission() does not attempt to add permissions already assigned to the user. +// +// Users can assign permissions to their child users, but not to themselves. An account's master has all portal permissions and can set permissions for any of the other users on their account. +// +// Use the [[SoftLayer_User_Customer_CustomerPermission_Permission::getAllObjects]] method to retrieve a list of all permissions available in the SoftLayer customer portal and API. Permissions are removed based on the keyName property of the permission objects within the permissions parameter. +func (r User_Customer) AddBulkPortalPermission(permissions []datatypes.User_Customer_CustomerPermission_Permission) (resp bool, err error) { + params := []interface{}{ + permissions, + } + err = r.Session.DoRequest("SoftLayer_User_Customer", "addBulkPortalPermission", params, &r.Options, &resp) + return +} + +// no documentation yet +func (r User_Customer) AddBulkRoles(roles []datatypes.User_Permission_Role) (err error) { + var resp datatypes.Void + params := []interface{}{ + roles, + } + err = r.Session.DoRequest("SoftLayer_User_Customer", "addBulkRoles", params, &r.Options, &resp) + return +} + +// Add multiple CloudLayer Computing Instances to a portal user's access list. A user's CloudLayer Computing Instance access list controls which of an account's CloudLayer Computing Instance objects a user has access to in the SoftLayer customer portal and API. CloudLayer Computing Instances do not exist in the SoftLayer portal and returns "not found" exceptions in the API if the user doesn't have access to it. addBulkVirtualGuestAccess() does not attempt to add CloudLayer Computing Instance access if the given user already has access to that CloudLayer Computing Instance object. +// +// Users can assign CloudLayer Computing Instance access to their child users, but not to themselves. An account's master has access to all CloudLayer Computing Instances on their customer account and can set CloudLayer Computing Instance access for any of the other users on their account. +func (r User_Customer) AddBulkVirtualGuestAccess(virtualGuestIds []int) (resp bool, err error) { + params := []interface{}{ + virtualGuestIds, + } + err = r.Session.DoRequest("SoftLayer_User_Customer", "addBulkVirtualGuestAccess", params, &r.Options, &resp) + return +} + +// no documentation yet +func (r User_Customer) AddExternalBinding(externalBinding *datatypes.User_External_Binding) (resp datatypes.User_Customer_External_Binding, err error) { + params := []interface{}{ + externalBinding, + } + err = r.Session.DoRequest("SoftLayer_User_Customer", "addExternalBinding", params, &r.Options, &resp) + return +} + +// Add hardware to a portal user's hardware access list. A user's hardware access list controls which of an account's hardware objects a user has access to in the SoftLayer customer portal and API. Hardware does not exist in the SoftLayer portal and returns "not found" exceptions in the API if the user doesn't have access to it. If a user already has access to the hardware you're attempting to add then addHardwareAccess() returns true. +// +// Users can assign hardware access to their child users, but not to themselves. An account's master has access to all hardware on their customer account and can set hardware access for any of the other users on their account. +func (r User_Customer) AddHardwareAccess(hardwareId *int) (resp bool, err error) { + params := []interface{}{ + hardwareId, + } + err = r.Session.DoRequest("SoftLayer_User_Customer", "addHardwareAccess", params, &r.Options, &resp) + return +} + +// Create a notification subscription record for the user. If a subscription record exists for the notification, the record will be set to active, if currently inactive. +func (r User_Customer) AddNotificationSubscriber(notificationKeyName *string) (resp bool, err error) { + params := []interface{}{ + notificationKeyName, + } + err = r.Session.DoRequest("SoftLayer_User_Customer", "addNotificationSubscriber", params, &r.Options, &resp) + return +} + +// Add a permission to a portal user's permission set. [[Permissions]] control which features in the SoftLayer customer portal and API a user may use. If the user already has the permission you're attempting to add then addPortalPermission() returns true. +// +// Users can assign permissions to their child users, but not to themselves. An account's master has all portal permissions and can set permissions for any of the other users on their account. +// +// Use the [[SoftLayer_User_Customer_CustomerPermission_Permission::getAllObjects]] method to retrieve a list of all permissions available in the SoftLayer customer portal and API. Permissions are added based on the keyName property of the permission parameter. +func (r User_Customer) AddPortalPermission(permission *datatypes.User_Customer_CustomerPermission_Permission) (resp bool, err error) { + params := []interface{}{ + permission, + } + err = r.Session.DoRequest("SoftLayer_User_Customer", "addPortalPermission", params, &r.Options, &resp) + return +} + +// no documentation yet +func (r User_Customer) AddRole(role *datatypes.User_Permission_Role) (err error) { + var resp datatypes.Void + params := []interface{}{ + role, + } + err = r.Session.DoRequest("SoftLayer_User_Customer", "addRole", params, &r.Options, &resp) + return +} + +// Add a CloudLayer Computing Instance to a portal user's access list. A user's CloudLayer Computing Instance access list controls which of an account's CloudLayer Computing Instance objects a user has access to in the SoftLayer customer portal and API. CloudLayer Computing Instances do not exist in the SoftLayer portal and returns "not found" exceptions in the API if the user doesn't have access to it. If a user already has access to the CloudLayer Computing Instance you're attempting to add then addVirtualGuestAccess() returns true. +// +// Users can assign CloudLayer Computing Instance access to their child users, but not to themselves. An account's master has access to all CloudLayer Computing Instances on their customer account and can set CloudLayer Computing Instance access for any of the other users on their account. +func (r User_Customer) AddVirtualGuestAccess(virtualGuestId *int) (resp bool, err error) { + params := []interface{}{ + virtualGuestId, + } + err = r.Session.DoRequest("SoftLayer_User_Customer", "addVirtualGuestAccess", params, &r.Options, &resp) + return +} + +// Select a type of preference you would like to modify using [[SoftLayer_User_Customer::getPreferenceTypes|getPreferenceTypes]] and invoke this method using that preference type key name. +func (r User_Customer) ChangePreference(preferenceTypeKeyName *string, value *string) (resp []datatypes.User_Preference, err error) { + params := []interface{}{ + preferenceTypeKeyName, + value, + } + err = r.Session.DoRequest("SoftLayer_User_Customer", "changePreference", params, &r.Options, &resp) + return +} + +// This service checks the result of a previously requested external authentication. [[SoftLayer_Container_User_Customer_External_Binding_Phone|Phone external binding]] container can be used for this service. Make sure to set the [[SoftLayer_Container_User_Customer_External_Binding_Phone::authenticationToken|authenticationToken]] that is generated by [[SoftLayer_User_Customer|initiateExternalAuthentication]] service. +func (r User_Customer) CheckExternalAuthenticationStatus(authenticationContainer *datatypes.Container_User_Customer_External_Binding) (resp datatypes.Container_User_Customer_Portal_Token, err error) { + params := []interface{}{ + authenticationContainer, + } + err = r.Session.DoRequest("SoftLayer_User_Customer", "checkExternalAuthenticationStatus", params, &r.Options, &resp) + return +} + +// Add a description here +// +// +func (r User_Customer) CheckPhoneFactorAuthenticationForPasswordSet(passwordSet *datatypes.Container_User_Customer_PasswordSet, authenticationContainer *datatypes.Container_User_Customer_External_Binding) (resp bool, err error) { + params := []interface{}{ + passwordSet, + authenticationContainer, + } + err = r.Session.DoRequest("SoftLayer_User_Customer", "checkPhoneFactorAuthenticationForPasswordSet", params, &r.Options, &resp) + return +} + +// Create a new subscriber for a given resource. +func (r User_Customer) CreateNotificationSubscriber(keyName *string, resourceTableId *int) (resp bool, err error) { + params := []interface{}{ + keyName, + resourceTableId, + } + err = r.Session.DoRequest("SoftLayer_User_Customer", "createNotificationSubscriber", params, &r.Options, &resp) + return +} + +// Create a new user in the SoftLayer customer portal. createObject() creates a user's portal record and adds them into the SoftLayer community forums. It is no longer possible to set up the SSL or PPTP enable flag in this call since the manage permissions have not yet been set. You will need to make a subsequent call to edit object in order to enable VPN access. An account's master user and sub-users who have the User Manage permission can add new users. createObject() creates users with a default permission set. After adding a user it may be helpful to set their permissions and hardware access. +// +// Note, neither password nor vpnPassword parameters are required. +// +// Password When a new user is created, an email will be sent to the new user's email address with a link to a url that will allow the new user to create or change their password for the SoftLayer customer portal. +// +// If the password parameter is provided and is not null, then that value will be validated. If it is a valid password, then the user will be created with this password. This user will still receive a portal password email. It can be used within 24 hours to change their password, or it can be allowed to expire, and the password provided during user creation will remain as the user's password. +// +// If the password parameter is not provided or the value is null, the user must set their portal password using the link sent in email within 24 hours.  If the user fails to set their password within 24 hours, then a non-master user can use the "Reset Password" link on the login page of the portal to request a new email. A master user can use the link to retrieve a phone number to call to assist in resetting their password. +// +// The password parameter is ignored for VPN_ONLY users or for IBMid authenticated users. +// +// vpnPassword If the vpnPassword is provided, then the user's vpnPassword will be set to the provided password.  When creating a vpn only user, the vpnPassword MUST be supplied.  If the vpnPassword is not provided, then the user will need to use the portal to edit their profile and set the vpnPassword. +// +// +func (r User_Customer) CreateObject(templateObject *datatypes.User_Customer, password *string, vpnPassword *string) (resp datatypes.User_Customer, err error) { + params := []interface{}{ + templateObject, + password, + vpnPassword, + } + err = r.Session.DoRequest("SoftLayer_User_Customer", "createObject", params, &r.Options, &resp) + return +} + +// Create delivery methods for a notification that the user is subscribed to. Multiple delivery method keyNames can be supplied to create multiple delivery methods for the specified notification. Available delivery methods - 'EMAIL'. Available notifications - 'PLANNED_MAINTENANCE', 'UNPLANNED_INCIDENT'. +func (r User_Customer) CreateSubscriberDeliveryMethods(notificationKeyName *string, deliveryMethodKeyNames []string) (resp bool, err error) { + params := []interface{}{ + notificationKeyName, + deliveryMethodKeyNames, + } + err = r.Session.DoRequest("SoftLayer_User_Customer", "createSubscriberDeliveryMethods", params, &r.Options, &resp) + return +} + +// Create a new subscriber for a given resource. +func (r User_Customer) DeactivateNotificationSubscriber(keyName *string, resourceTableId *int) (resp bool, err error) { + params := []interface{}{ + keyName, + resourceTableId, + } + err = r.Session.DoRequest("SoftLayer_User_Customer", "deactivateNotificationSubscriber", params, &r.Options, &resp) + return +} + +// Account master users and sub-users who have the User Manage permission in the SoftLayer customer portal can update other user's information. Use editObject() if you wish to edit a single user account. Users who do not have the User Manage permission can only update their own information. +func (r User_Customer) EditObject(templateObject *datatypes.User_Customer) (resp bool, err error) { + params := []interface{}{ + templateObject, + } + err = r.Session.DoRequest("SoftLayer_User_Customer", "editObject", params, &r.Options, &resp) + return +} + +// Account master users and sub-users who have the User Manage permission in the SoftLayer customer portal can update other user's information. Use editObjects() if you wish to edit multiple users at once. Users who do not have the User Manage permission can only update their own information. +func (r User_Customer) EditObjects(templateObjects []datatypes.User_Customer) (resp bool, err error) { + params := []interface{}{ + templateObjects, + } + err = r.Session.DoRequest("SoftLayer_User_Customer", "editObjects", params, &r.Options, &resp) + return +} + +// no documentation yet +func (r User_Customer) FindUserPreference(profileName *string, containerKeyname *string, preferenceKeyname *string) (resp []datatypes.Layout_Profile, err error) { + params := []interface{}{ + profileName, + containerKeyname, + preferenceKeyname, + } + err = r.Session.DoRequest("SoftLayer_User_Customer", "findUserPreference", params, &r.Options, &resp) + return +} + +// Retrieve The customer account that a user belongs to. +func (r User_Customer) GetAccount() (resp datatypes.Account, err error) { + err = r.Session.DoRequest("SoftLayer_User_Customer", "getAccount", nil, &r.Options, &resp) + return +} + +// Retrieve +func (r User_Customer) GetActions() (resp []datatypes.User_Permission_Action, err error) { + err = r.Session.DoRequest("SoftLayer_User_Customer", "getActions", nil, &r.Options, &resp) + return +} + +// The getActiveExternalAuthenticationVendors method will return a list of available external vendors that a SoftLayer user can authenticate against. The list will only contain vendors for which the user has at least one active external binding. +func (r User_Customer) GetActiveExternalAuthenticationVendors() (resp []datatypes.Container_User_Customer_External_Binding_Vendor, err error) { + err = r.Session.DoRequest("SoftLayer_User_Customer", "getActiveExternalAuthenticationVendors", nil, &r.Options, &resp) + return +} + +// Retrieve A portal user's additional email addresses. These email addresses are contacted when updates are made to support tickets. +func (r User_Customer) GetAdditionalEmails() (resp []datatypes.User_Customer_AdditionalEmail, err error) { + err = r.Session.DoRequest("SoftLayer_User_Customer", "getAdditionalEmails", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r User_Customer) GetAllowedHardwareIds() (resp []int, err error) { + err = r.Session.DoRequest("SoftLayer_User_Customer", "getAllowedHardwareIds", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r User_Customer) GetAllowedVirtualGuestIds() (resp []int, err error) { + err = r.Session.DoRequest("SoftLayer_User_Customer", "getAllowedVirtualGuestIds", nil, &r.Options, &resp) + return +} + +// Retrieve A portal user's API Authentication keys. There is a max limit of two API keys per user. +func (r User_Customer) GetApiAuthenticationKeys() (resp []datatypes.User_Customer_ApiAuthentication, err error) { + err = r.Session.DoRequest("SoftLayer_User_Customer", "getApiAuthenticationKeys", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r User_Customer) GetAuthenticationToken(token *datatypes.Container_User_Authentication_Token) (resp datatypes.Container_User_Authentication_Token, err error) { + params := []interface{}{ + token, + } + err = r.Session.DoRequest("SoftLayer_User_Customer", "getAuthenticationToken", params, &r.Options, &resp) + return +} + +// Retrieve The CDN accounts associated with a portal user. +func (r User_Customer) GetCdnAccounts() (resp []datatypes.Network_ContentDelivery_Account, err error) { + err = r.Session.DoRequest("SoftLayer_User_Customer", "getCdnAccounts", nil, &r.Options, &resp) + return +} + +// Retrieve A portal user's child users. Some portal users may not have child users. +func (r User_Customer) GetChildUsers() (resp []datatypes.User_Customer, err error) { + err = r.Session.DoRequest("SoftLayer_User_Customer", "getChildUsers", nil, &r.Options, &resp) + return +} + +// Retrieve An user's associated closed tickets. +func (r User_Customer) GetClosedTickets() (resp []datatypes.Ticket, err error) { + err = r.Session.DoRequest("SoftLayer_User_Customer", "getClosedTickets", nil, &r.Options, &resp) + return +} + +// This API gets the default account for the OpenIdConnect identity that is linked to the current SoftLayer user identity. If there is no default present, the API returns null, except in the special case where we find one active user linked to the IBMid. In that case, we will set the link from the IBMid to that user as default, and return the account of which that user is a member. Invoke this only on IBMid-authenticated users. +func (r User_Customer) GetDefaultAccount(providerType *string) (resp datatypes.Account, err error) { + params := []interface{}{ + providerType, + } + err = r.Session.DoRequest("SoftLayer_User_Customer", "getDefaultAccount", params, &r.Options, &resp) + return +} + +// This method is deprecated. Please see documentation for initiatePortalPasswordChange +func (r User_Customer) GetDefaultSecurityQuestions(key *string) (resp []datatypes.User_Security_Question, err error) { + params := []interface{}{ + key, + } + err = r.Session.DoRequest("SoftLayer_User_Customer", "getDefaultSecurityQuestions", params, &r.Options, &resp) + return +} + +// Retrieve The external authentication bindings that link an external identifier to a SoftLayer user. +func (r User_Customer) GetExternalBindings() (resp []datatypes.User_External_Binding, err error) { + err = r.Session.DoRequest("SoftLayer_User_Customer", "getExternalBindings", nil, &r.Options, &resp) + return +} + +// Retrieve A portal user's accessible hardware. These permissions control which hardware a user has access to in the SoftLayer customer portal. +func (r User_Customer) GetHardware() (resp []datatypes.Hardware, err error) { + err = r.Session.DoRequest("SoftLayer_User_Customer", "getHardware", nil, &r.Options, &resp) + return +} + +// Retrieve the number of servers that a portal user has access to. Portal users can have restrictions set to limit services for and to perform actions on hardware. You can set these permissions in the portal by clicking the "administrative" then "user admin" links. +func (r User_Customer) GetHardwareCount() (resp int, err error) { + err = r.Session.DoRequest("SoftLayer_User_Customer", "getHardwareCount", nil, &r.Options, &resp) + return +} + +// Retrieve Hardware notifications associated with this user. A hardware notification links a user to a piece of hardware, and that user will be notified if any monitors on that hardware fail, if the monitors have a status of 'Notify User'. +func (r User_Customer) GetHardwareNotifications() (resp []datatypes.User_Customer_Notification_Hardware, err error) { + err = r.Session.DoRequest("SoftLayer_User_Customer", "getHardwareNotifications", nil, &r.Options, &resp) + return +} + +// Retrieve Whether or not a user has acknowledged the support policy. +func (r User_Customer) GetHasAcknowledgedSupportPolicyFlag() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_User_Customer", "getHasAcknowledgedSupportPolicyFlag", nil, &r.Options, &resp) + return +} + +// Retrieve Whether or not a portal user has access to all hardware on their account. +func (r User_Customer) GetHasFullHardwareAccessFlag() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_User_Customer", "getHasFullHardwareAccessFlag", nil, &r.Options, &resp) + return +} + +// Retrieve Whether or not a portal user has access to all hardware on their account. +func (r User_Customer) GetHasFullVirtualGuestAccessFlag() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_User_Customer", "getHasFullVirtualGuestAccessFlag", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r User_Customer) GetImpersonationToken() (resp string, err error) { + err = r.Session.DoRequest("SoftLayer_User_Customer", "getImpersonationToken", nil, &r.Options, &resp) + return +} + +// Retrieve +func (r User_Customer) GetLayoutProfiles() (resp []datatypes.Layout_Profile, err error) { + err = r.Session.DoRequest("SoftLayer_User_Customer", "getLayoutProfiles", nil, &r.Options, &resp) + return +} + +// Retrieve A user's locale. Locale holds user's language and region information. +func (r User_Customer) GetLocale() (resp datatypes.Locale, err error) { + err = r.Session.DoRequest("SoftLayer_User_Customer", "getLocale", nil, &r.Options, &resp) + return +} + +// Retrieve A user's attempts to log into the SoftLayer customer portal. +func (r User_Customer) GetLoginAttempts() (resp []datatypes.User_Customer_Access_Authentication, err error) { + err = r.Session.DoRequest("SoftLayer_User_Customer", "getLoginAttempts", nil, &r.Options, &resp) + return +} + +// Attempt to authenticate a user to the SoftLayer customer portal using the provided authentication container. Depending on the specific type of authentication container that is used, this API will leverage the appropriate authentication protocol. If authentication is successful then the API returns a list of linked accounts for the user, a token containing the ID of the authenticated user and a hash key used by the SoftLayer customer portal to maintain authentication. +func (r User_Customer) GetLoginToken(request *datatypes.Container_Authentication_Request_Contract) (resp datatypes.Container_Authentication_Response_Common, err error) { + params := []interface{}{ + request, + } + err = r.Session.DoRequest("SoftLayer_User_Customer", "getLoginToken", params, &r.Options, &resp) + return +} + +// An OpenIdConnect identity, for example an IBMid, can be linked or mapped to one or more individual SoftLayer users, but no more than one SoftLayer user per account. This effectively links the OpenIdConnect identity to those accounts. This API returns a list of all the accounts for which there is a link between the OpenIdConnect identity and a SoftLayer user. Invoke this only on IBMid-authenticated users. +func (r User_Customer) GetMappedAccounts(providerType *string) (resp []datatypes.Account, err error) { + params := []interface{}{ + providerType, + } + err = r.Session.DoRequest("SoftLayer_User_Customer", "getMappedAccounts", params, &r.Options, &resp) + return +} + +// Retrieve A portal user's associated mobile device profiles. +func (r User_Customer) GetMobileDevices() (resp []datatypes.User_Customer_MobileDevice, err error) { + err = r.Session.DoRequest("SoftLayer_User_Customer", "getMobileDevices", nil, &r.Options, &resp) + return +} + +// Retrieve Notification subscription records for the user. +func (r User_Customer) GetNotificationSubscribers() (resp []datatypes.Notification_Subscriber, err error) { + err = r.Session.DoRequest("SoftLayer_User_Customer", "getNotificationSubscribers", nil, &r.Options, &resp) + return +} + +// getObject retrieves the SoftLayer_User_Customer object whose ID number corresponds to the ID number of the init parameter passed to the SoftLayer_User_Customer service. You can only retrieve users that are assigned to the customer account belonging to the user making the API call. +func (r User_Customer) GetObject() (resp datatypes.User_Customer, err error) { + err = r.Session.DoRequest("SoftLayer_User_Customer", "getObject", nil, &r.Options, &resp) + return +} + +// This API returns a SoftLayer_Container_User_Customer_OpenIdConnect_MigrationState object containing the necessary information to determine what migration state the user is in. If the account is not OpenIdConnect authenticated, then an exception is thrown. +func (r User_Customer) GetOpenIdConnectMigrationState() (resp datatypes.Container_User_Customer_OpenIdConnect_MigrationState, err error) { + err = r.Session.DoRequest("SoftLayer_User_Customer", "getOpenIdConnectMigrationState", nil, &r.Options, &resp) + return +} + +// Retrieve An user's associated open tickets. +func (r User_Customer) GetOpenTickets() (resp []datatypes.Ticket, err error) { + err = r.Session.DoRequest("SoftLayer_User_Customer", "getOpenTickets", nil, &r.Options, &resp) + return +} + +// Retrieve A portal user's vpn accessible subnets. +func (r User_Customer) GetOverrides() (resp []datatypes.Network_Service_Vpn_Overrides, err error) { + err = r.Session.DoRequest("SoftLayer_User_Customer", "getOverrides", nil, &r.Options, &resp) + return +} + +// Retrieve A portal user's parent user. If a SoftLayer_User_Customer has a null parentId property then it doesn't have a parent user. +func (r User_Customer) GetParent() (resp datatypes.User_Customer, err error) { + err = r.Session.DoRequest("SoftLayer_User_Customer", "getParent", nil, &r.Options, &resp) + return +} + +// Retrieve A portal user's permissions. These permissions control that user's access to functions within the SoftLayer customer portal and API. +func (r User_Customer) GetPermissions() (resp []datatypes.User_Customer_CustomerPermission_Permission, err error) { + err = r.Session.DoRequest("SoftLayer_User_Customer", "getPermissions", nil, &r.Options, &resp) + return +} + +// Attempt to authenticate a username and password to the SoftLayer customer portal. Many portal user accounts are configured to require answering a security question on login. In this case getPortalLoginToken() also verifies the given security question ID and answer. If authentication is successful then the API returns a token containing the ID of the authenticated user and a hash key used by the SoftLayer customer portal to maintain authentication. +func (r User_Customer) GetPortalLoginToken(username *string, password *string, securityQuestionId *int, securityQuestionAnswer *string) (resp datatypes.Container_User_Customer_Portal_Token, err error) { + params := []interface{}{ + username, + password, + securityQuestionId, + securityQuestionAnswer, + } + err = r.Session.DoRequest("SoftLayer_User_Customer", "getPortalLoginToken", params, &r.Options, &resp) + return +} + +// Select a type of preference you would like to get using [[SoftLayer_User_Customer::getPreferenceTypes|getPreferenceTypes]] and invoke this method using that preference type key name. +func (r User_Customer) GetPreference(preferenceTypeKeyName *string) (resp datatypes.User_Preference, err error) { + params := []interface{}{ + preferenceTypeKeyName, + } + err = r.Session.DoRequest("SoftLayer_User_Customer", "getPreference", params, &r.Options, &resp) + return +} + +// Use any of the preference types to fetch or modify user preferences using [[SoftLayer_User_Customer::getPreference|getPreference]] or [[SoftLayer_User_Customer::changePreference|changePreference]], respectively. +func (r User_Customer) GetPreferenceTypes() (resp []datatypes.User_Preference_Type, err error) { + err = r.Session.DoRequest("SoftLayer_User_Customer", "getPreferenceTypes", nil, &r.Options, &resp) + return +} + +// Retrieve +func (r User_Customer) GetPreferences() (resp []datatypes.User_Preference, err error) { + err = r.Session.DoRequest("SoftLayer_User_Customer", "getPreferences", nil, &r.Options, &resp) + return +} + +// Retrieve the authentication requirements for an outstanding password set/reset request. The password key provided to the user in an email generated by the [[SoftLayer_User_Customer::newUserPassword|newUserPassword]]. Password recovery keys are valid for 24 hours after they're generated. +func (r User_Customer) GetRequirementsForPasswordSet(passwordSet *datatypes.Container_User_Customer_PasswordSet) (resp datatypes.Container_User_Customer_PasswordSet, err error) { + params := []interface{}{ + passwordSet, + } + err = r.Session.DoRequest("SoftLayer_User_Customer", "getRequirementsForPasswordSet", params, &r.Options, &resp) + return +} + +// Retrieve +func (r User_Customer) GetRoles() (resp []datatypes.User_Permission_Role, err error) { + err = r.Session.DoRequest("SoftLayer_User_Customer", "getRoles", nil, &r.Options, &resp) + return +} + +// Retrieve +func (r User_Customer) GetSalesforceUserLink() (resp datatypes.User_Customer_Link, err error) { + err = r.Session.DoRequest("SoftLayer_User_Customer", "getSalesforceUserLink", nil, &r.Options, &resp) + return +} + +// Retrieve A portal user's security question answers. Some portal users may not have security answers or may not be configured to require answering a security question on login. +func (r User_Customer) GetSecurityAnswers() (resp []datatypes.User_Customer_Security_Answer, err error) { + err = r.Session.DoRequest("SoftLayer_User_Customer", "getSecurityAnswers", nil, &r.Options, &resp) + return +} + +// Retrieve A user's notification subscription records. +func (r User_Customer) GetSubscribers() (resp []datatypes.Notification_User_Subscriber, err error) { + err = r.Session.DoRequest("SoftLayer_User_Customer", "getSubscribers", nil, &r.Options, &resp) + return +} + +// Retrieve A user's successful attempts to log into the SoftLayer customer portal. +func (r User_Customer) GetSuccessfulLogins() (resp []datatypes.User_Customer_Access_Authentication, err error) { + err = r.Session.DoRequest("SoftLayer_User_Customer", "getSuccessfulLogins", nil, &r.Options, &resp) + return +} + +// Retrieve Whether or not a user is required to acknowledge the support policy for portal access. +func (r User_Customer) GetSupportPolicyAcknowledgementRequiredFlag() (resp int, err error) { + err = r.Session.DoRequest("SoftLayer_User_Customer", "getSupportPolicyAcknowledgementRequiredFlag", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r User_Customer) GetSupportPolicyDocument() (resp []byte, err error) { + err = r.Session.DoRequest("SoftLayer_User_Customer", "getSupportPolicyDocument", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r User_Customer) GetSupportPolicyName() (resp string, err error) { + err = r.Session.DoRequest("SoftLayer_User_Customer", "getSupportPolicyName", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r User_Customer) GetSupportedLocales() (resp []datatypes.Locale, err error) { + err = r.Session.DoRequest("SoftLayer_User_Customer", "getSupportedLocales", nil, &r.Options, &resp) + return +} + +// Retrieve Whether or not a user must take a brief survey the next time they log into the SoftLayer customer portal. +func (r User_Customer) GetSurveyRequiredFlag() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_User_Customer", "getSurveyRequiredFlag", nil, &r.Options, &resp) + return +} + +// Retrieve The surveys that a user has taken in the SoftLayer customer portal. +func (r User_Customer) GetSurveys() (resp []datatypes.Survey, err error) { + err = r.Session.DoRequest("SoftLayer_User_Customer", "getSurveys", nil, &r.Options, &resp) + return +} + +// Retrieve An user's associated tickets. +func (r User_Customer) GetTickets() (resp []datatypes.Ticket, err error) { + err = r.Session.DoRequest("SoftLayer_User_Customer", "getTickets", nil, &r.Options, &resp) + return +} + +// Retrieve A portal user's time zone. +func (r User_Customer) GetTimezone() (resp datatypes.Locale_Timezone, err error) { + err = r.Session.DoRequest("SoftLayer_User_Customer", "getTimezone", nil, &r.Options, &resp) + return +} + +// Retrieve A user's unsuccessful attempts to log into the SoftLayer customer portal. +func (r User_Customer) GetUnsuccessfulLogins() (resp []datatypes.User_Customer_Access_Authentication, err error) { + err = r.Session.DoRequest("SoftLayer_User_Customer", "getUnsuccessfulLogins", nil, &r.Options, &resp) + return +} + +// This method is deprecated. Please see documentation for initiatePortalPasswordChange Retrieve a user object using a password recovery key received in an email generated by the [[SoftLayer_User_Customer::lostPassword|lostPassword]] method. The SoftLayer customer portal uses getUserFromLostPasswordRequest() to retrieve user security questions. Password recovery keys are valid for 24 hours after they're generated. +func (r User_Customer) GetUserFromLostPasswordRequest(key *string) (resp []datatypes.User_Security_Question, err error) { + params := []interface{}{ + key, + } + err = r.Session.DoRequest("SoftLayer_User_Customer", "getUserFromLostPasswordRequest", params, &r.Options, &resp) + return +} + +// Retrieve a user object using a password token. When a new user is created or when a user has requested a password change using initiatePortalPasswordChange, they will have received an email that contains a url with a token. That token is used as the parameter for getUserIdForPasswordSet. +func (r User_Customer) GetUserIdForPasswordSet(key *string) (resp int, err error) { + params := []interface{}{ + key, + } + err = r.Session.DoRequest("SoftLayer_User_Customer", "getUserIdForPasswordSet", params, &r.Options, &resp) + return +} + +// Retrieve +func (r User_Customer) GetUserLinks() (resp []datatypes.User_Customer_Link, err error) { + err = r.Session.DoRequest("SoftLayer_User_Customer", "getUserLinks", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r User_Customer) GetUserPreferences(profileName *string, containerKeyname *string) (resp []datatypes.Layout_Profile, err error) { + params := []interface{}{ + profileName, + containerKeyname, + } + err = r.Session.DoRequest("SoftLayer_User_Customer", "getUserPreferences", params, &r.Options, &resp) + return +} + +// Retrieve A portal user's status, which controls overall access to the SoftLayer customer portal and VPN access to the private network. +func (r User_Customer) GetUserStatus() (resp datatypes.User_Customer_Status, err error) { + err = r.Session.DoRequest("SoftLayer_User_Customer", "getUserStatus", nil, &r.Options, &resp) + return +} + +// Retrieve the number of CloudLayer Computing Instances that a portal user has access to. Portal users can have restrictions set to limit services for and to perform actions on CloudLayer Computing Instances. You can set these permissions in the portal by clicking the "administrative" then "user admin" links. +func (r User_Customer) GetVirtualGuestCount() (resp int, err error) { + err = r.Session.DoRequest("SoftLayer_User_Customer", "getVirtualGuestCount", nil, &r.Options, &resp) + return +} + +// Retrieve A portal user's accessible CloudLayer Computing Instances. These permissions control which CloudLayer Computing Instances a user has access to in the SoftLayer customer portal. +func (r User_Customer) GetVirtualGuests() (resp []datatypes.Virtual_Guest, err error) { + err = r.Session.DoRequest("SoftLayer_User_Customer", "getVirtualGuests", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r User_Customer) InTerminalStatus() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_User_Customer", "inTerminalStatus", nil, &r.Options, &resp) + return +} + +// The service initiates an external authentication with the given external authentication vendor. The authentication container and its content will be verified before an attempt is made to initiate an external authentication. [[SoftLayer_Container_User_Customer_External_Binding_Phone|Phone external binding]] container can be used for this service. +// +// This service returns a unique authentication request token. You can use [[SoftLayer_User_Customer::checkExternalAuthenticationStatus|checkExternalAuthenticationStatus]] service to check if the authentication request is complete or not. +func (r User_Customer) InitiateExternalAuthentication(authenticationContainer *datatypes.Container_User_Customer_External_Binding) (resp string, err error) { + params := []interface{}{ + authenticationContainer, + } + err = r.Session.DoRequest("SoftLayer_User_Customer", "initiateExternalAuthentication", params, &r.Options, &resp) + return +} + +// Sends password change email to the user containing url that allows the user the change their password. This is the first step when a user wishes to change their password. The url that is generated contains a one-time use token that is valid for only 24-hours. +// +// If this is a new master user who has never logged into the portal, then password reset will be initiated. Once a master user has logged into the portal, they must setup their security questions prior to logging out because master users are required to answer a security question during the password reset process. Should a master user not have security questions defined and not remember their password in order to define the security questions, then they will need to contact support at live chat or Revenue Services for assistance. +// +// Due to security reasons, the number reset requests per username are limited within a undisclosed timeframe. +func (r User_Customer) InitiatePortalPasswordChange(username *string) (resp bool, err error) { + params := []interface{}{ + username, + } + err = r.Session.DoRequest("SoftLayer_User_Customer", "initiatePortalPasswordChange", params, &r.Options, &resp) + return +} + +// A Brand Agent that has permissions to Add Customer Accounts will be able to request the password email be sent to the Master User of a Customer Account created by the same Brand as the agent making the request. Due to security reasons, the number of reset requests are limited within an undisclosed timeframe. +func (r User_Customer) InitiatePortalPasswordChangeByBrandAgent(username *string) (resp bool, err error) { + params := []interface{}{ + username, + } + err = r.Session.DoRequest("SoftLayer_User_Customer", "initiatePortalPasswordChangeByBrandAgent", params, &r.Options, &resp) + return +} + +// Send email invitation to a user to join a SoftLayer account and authenticate with OpenIdConnect. Throws an exception on error. +func (r User_Customer) InviteUserToLinkOpenIdConnect(providerType *string) (err error) { + var resp datatypes.Void + params := []interface{}{ + providerType, + } + err = r.Session.DoRequest("SoftLayer_User_Customer", "inviteUserToLinkOpenIdConnect", params, &r.Options, &resp) + return +} + +// Portal users are considered master users if they don't have an associated parent user. The only users who don't have parent users are users whose username matches their SoftLayer account name. Master users have special permissions throughout the SoftLayer customer portal. +func (r User_Customer) IsMasterUser() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_User_Customer", "isMasterUser", nil, &r.Options, &resp) + return +} + +// This method is deprecated! SoftLayer Community Forums no longer exist, therefore, any password verified will return false. +// +// Determine if a string is the given user's login password to the SoftLayer community forums. +func (r User_Customer) IsValidForumPassword(password *string) (resp bool, err error) { + params := []interface{}{ + password, + } + err = r.Session.DoRequest("SoftLayer_User_Customer", "isValidForumPassword", params, &r.Options, &resp) + return +} + +// Determine if a string is the given user's login password to the SoftLayer customer portal. +func (r User_Customer) IsValidPortalPassword(password *string) (resp bool, err error) { + params := []interface{}{ + password, + } + err = r.Session.DoRequest("SoftLayer_User_Customer", "isValidPortalPassword", params, &r.Options, &resp) + return +} + +// This method is deprecated. Please see documentation for initiatePortalPasswordChange SoftLayer provides a way for users of it's customer portal to recover lost passwords. The lostPassword() method is the first step in this process. Given a valid username and email address, the SoftLayer API will email the address provided with a URL to visit to begin the password recovery process. The last part of this URL is a hash key that's used as an identifier throughout this process. Use this hash key in the [[SoftLayer_User_Customer::setPasswordFromLostPasswordRequest|setPasswordFromLostPasswordRequest]] method to reset a user's password. Password recovery hash keys are valid for 24 hours after they're generated. +func (r User_Customer) LostPassword(username *string, email *string) (resp bool, err error) { + params := []interface{}{ + username, + email, + } + err = r.Session.DoRequest("SoftLayer_User_Customer", "lostPassword", params, &r.Options, &resp) + return +} + +// The perform external authentication method will authenticate the given external authentication container with an external vendor. The authentication container and its contents will be verified before an attempt is made to authenticate the contents of the container with an external vendor. +func (r User_Customer) PerformExternalAuthentication(authenticationContainer *datatypes.Container_User_Customer_External_Binding) (resp datatypes.Container_User_Customer_Portal_Token, err error) { + params := []interface{}{ + authenticationContainer, + } + err = r.Session.DoRequest("SoftLayer_User_Customer", "performExternalAuthentication", params, &r.Options, &resp) + return +} + +// Set the password for a user who has an outstanding password request. A user with an outstanding password request will have an unused and unexpired password key. The password key is part of the url provided to the user in the email sent to the user with information on how to set their password. The email was generated by the [[SoftLayer_User_Customer::processPasswordSetRequest|processPasswordSetRequest]] method. Password recovery keys are valid for 24 hours after they're generated. +// +// User portal passwords must match the following restrictions. Portal passwords must... +// * ...be over eight characters long. +// * ...be under twenty characters long. +// * ...contain at least one uppercase letter +// * ...contain at least one lowercase letter +// * ...contain at least one number +// * ...contain one of the special characters _ - | @ . , ? / ! ~ # $ % ^ & * ( ) { } [ ] \ + = +// * ...not match your username +// * ...not match your forum password +func (r User_Customer) ProcessPasswordSetRequest(passwordSet *datatypes.Container_User_Customer_PasswordSet, authenticationContainer *datatypes.Container_User_Customer_External_Binding) (resp bool, err error) { + params := []interface{}{ + passwordSet, + authenticationContainer, + } + err = r.Session.DoRequest("SoftLayer_User_Customer", "processPasswordSetRequest", params, &r.Options, &resp) + return +} + +// Remove all hardware from a portal user's hardware access list. A user's hardware access list controls which of an account's hardware objects a user has access to in the SoftLayer customer portal and API. If the current user does not have administrative privileges over this user, an inadequate permissions exception will get thrown. +// +// Users can call this function on child users, but not to themselves. An account's master has access to all users permissions on their account. +func (r User_Customer) RemoveAllHardwareAccessForThisUser() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_User_Customer", "removeAllHardwareAccessForThisUser", nil, &r.Options, &resp) + return +} + +// Remove all cloud computing instances from a portal user's instance access list. A user's instance access list controls which of an account's computing instance objects a user has access to in the SoftLayer customer portal and API. If the current user does not have administrative privileges over this user, an inadequate permissions exception will get thrown. +// +// Users can call this function on child users, but not to themselves. An account's master has access to all users permissions on their account. +func (r User_Customer) RemoveAllVirtualAccessForThisUser() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_User_Customer", "removeAllVirtualAccessForThisUser", nil, &r.Options, &resp) + return +} + +// Remove a user's API authentication key, removing that user's access to query the SoftLayer API. +func (r User_Customer) RemoveApiAuthenticationKey(keyId *int) (resp bool, err error) { + params := []interface{}{ + keyId, + } + err = r.Session.DoRequest("SoftLayer_User_Customer", "removeApiAuthenticationKey", params, &r.Options, &resp) + return +} + +// Remove multiple hardware from a portal user's hardware access list. A user's hardware access list controls which of an account's hardware objects a user has access to in the SoftLayer customer portal and API. Hardware does not exist in the SoftLayer portal and returns "not found" exceptions in the API if the user doesn't have access to it. If a user does not has access to the hardware you're attempting remove add then removeBulkHardwareAccess() returns true. +// +// Users can assign hardware access to their child users, but not to themselves. An account's master has access to all hardware on their customer account and can set hardware access for any of the other users on their account. +// +// If the user has full hardware access, then it will provide access to "ALL but passed in" hardware ids. +func (r User_Customer) RemoveBulkHardwareAccess(hardwareIds []int) (resp bool, err error) { + params := []interface{}{ + hardwareIds, + } + err = r.Session.DoRequest("SoftLayer_User_Customer", "removeBulkHardwareAccess", params, &r.Options, &resp) + return +} + +// Remove multiple permissions from a portal user's permission set. [[Permissions]] control which features in the SoftLayer customer portal and API a user may use. Removing a user's permission will affect that user's portal and API access. removePortalPermission() does not attempt to remove permissions that are not assigned to the user. +// +// Users can assign permissions to their child users, but not to themselves. An account's master has all portal permissions and can set permissions for any of the other users on their account. +// +// Use the [[SoftLayer_User_Customer_CustomerPermission_Permission::getAllObjects]] method to retrieve a list of all permissions available in the SoftLayer customer portal and API. Permissions are removed based on the keyName property of the permission objects within the permissions parameter. +func (r User_Customer) RemoveBulkPortalPermission(permissions []datatypes.User_Customer_CustomerPermission_Permission) (resp bool, err error) { + params := []interface{}{ + permissions, + } + err = r.Session.DoRequest("SoftLayer_User_Customer", "removeBulkPortalPermission", params, &r.Options, &resp) + return +} + +// no documentation yet +func (r User_Customer) RemoveBulkRoles(roles []datatypes.User_Permission_Role) (err error) { + var resp datatypes.Void + params := []interface{}{ + roles, + } + err = r.Session.DoRequest("SoftLayer_User_Customer", "removeBulkRoles", params, &r.Options, &resp) + return +} + +// Remove multiple CloudLayer Computing Instances from a portal user's access list. A user's CloudLayer Computing Instance access list controls which of an account's CloudLayer Computing Instance objects a user has access to in the SoftLayer customer portal and API. CloudLayer Computing Instances do not exist in the SoftLayer portal and returns "not found" exceptions in the API if the user doesn't have access to it. If a user does not has access to the CloudLayer Computing Instance you're attempting remove add then removeBulkVirtualGuestAccess() returns true. +// +// Users can assign CloudLayer Computing Instance access to their child users, but not to themselves. An account's master has access to all CloudLayer Computing Instances on their customer account and can set hardware access for any of the other users on their account. +func (r User_Customer) RemoveBulkVirtualGuestAccess(virtualGuestIds []int) (resp bool, err error) { + params := []interface{}{ + virtualGuestIds, + } + err = r.Session.DoRequest("SoftLayer_User_Customer", "removeBulkVirtualGuestAccess", params, &r.Options, &resp) + return +} + +// no documentation yet +func (r User_Customer) RemoveExternalBinding(externalBinding *datatypes.User_External_Binding) (resp bool, err error) { + params := []interface{}{ + externalBinding, + } + err = r.Session.DoRequest("SoftLayer_User_Customer", "removeExternalBinding", params, &r.Options, &resp) + return +} + +// Remove hardware from a portal user's hardware access list. A user's hardware access list controls which of an account's hardware objects a user has access to in the SoftLayer customer portal and API. Hardware does not exist in the SoftLayer portal and returns "not found" exceptions in the API if the user doesn't have access to it. If a user does not has access to the hardware you're attempting remove add then removeHardwareAccess() returns true. +// +// Users can assign hardware access to their child users, but not to themselves. An account's master has access to all hardware on their customer account and can set hardware access for any of the other users on their account. +func (r User_Customer) RemoveHardwareAccess(hardwareId *int) (resp bool, err error) { + params := []interface{}{ + hardwareId, + } + err = r.Session.DoRequest("SoftLayer_User_Customer", "removeHardwareAccess", params, &r.Options, &resp) + return +} + +// Remove a permission from a portal user's permission set. [[Permissions]] control which features in the SoftLayer customer portal and API a user may use. Removing a user's permission will affect that user's portal and API access. If the user does not have the permission you're attempting to remove then removePortalPermission() returns true. +// +// Users can assign permissions to their child users, but not to themselves. An account's master has all portal permissions and can set permissions for any of the other users on their account. +// +// Use the [[SoftLayer_User_Customer_CustomerPermission_Permission::getAllObjects]] method to retrieve a list of all permissions available in the SoftLayer customer portal and API. Permissions are removed based on the keyName property of the permission parameter. +func (r User_Customer) RemovePortalPermission(permission *datatypes.User_Customer_CustomerPermission_Permission) (resp bool, err error) { + params := []interface{}{ + permission, + } + err = r.Session.DoRequest("SoftLayer_User_Customer", "removePortalPermission", params, &r.Options, &resp) + return +} + +// no documentation yet +func (r User_Customer) RemoveRole(role *datatypes.User_Permission_Role) (err error) { + var resp datatypes.Void + params := []interface{}{ + role, + } + err = r.Session.DoRequest("SoftLayer_User_Customer", "removeRole", params, &r.Options, &resp) + return +} + +// Remove a CloudLayer Computing Instance from a portal user's access list. A user's CloudLayer Computing Instance access list controls which of an account's computing instances a user has access to in the SoftLayer customer portal and API. CloudLayer Computing Instances do not exist in the SoftLayer portal and returns "not found" exceptions in the API if the user doesn't have access to it. If a user does not has access to the CloudLayer Computing Instance you're attempting remove add then removeVirtualGuestAccess() returns true. +// +// Users can assign CloudLayer Computing Instance access to their child users, but not to themselves. An account's master has access to all CloudLayer Computing Instances on their customer account and can set instance access for any of the other users on their account. +func (r User_Customer) RemoveVirtualGuestAccess(virtualGuestId *int) (resp bool, err error) { + params := []interface{}{ + virtualGuestId, + } + err = r.Session.DoRequest("SoftLayer_User_Customer", "removeVirtualGuestAccess", params, &r.Options, &resp) + return +} + +// This method is deprecated. Please see documentation for initiatePortalPasswordChange Attempt to authenticate a username and password to the SoftLayer customer portal and reset there password. If authentication and password reset is successful then the API returns true. +func (r User_Customer) ResetExpiredPassword(username *string, password *string, newPassword *string, securityQuestionId *int, securityQuestionAnswer *string) (resp bool, err error) { + params := []interface{}{ + username, + password, + newPassword, + securityQuestionId, + securityQuestionAnswer, + } + err = r.Session.DoRequest("SoftLayer_User_Customer", "resetExpiredPassword", params, &r.Options, &resp) + return +} + +// no documentation yet +func (r User_Customer) SamlAuthenticate(accountId *string, samlResponse *string) (resp datatypes.Container_User_Customer_Portal_Token, err error) { + params := []interface{}{ + accountId, + samlResponse, + } + err = r.Session.DoRequest("SoftLayer_User_Customer", "samlAuthenticate", params, &r.Options, &resp) + return +} + +// no documentation yet +func (r User_Customer) SamlBeginAuthentication(accountId *int) (resp string, err error) { + params := []interface{}{ + accountId, + } + err = r.Session.DoRequest("SoftLayer_User_Customer", "samlBeginAuthentication", params, &r.Options, &resp) + return +} + +// no documentation yet +func (r User_Customer) SamlBeginLogout() (resp string, err error) { + err = r.Session.DoRequest("SoftLayer_User_Customer", "samlBeginLogout", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r User_Customer) SamlLogout(samlResponse *string) (err error) { + var resp datatypes.Void + params := []interface{}{ + samlResponse, + } + err = r.Session.DoRequest("SoftLayer_User_Customer", "samlLogout", params, &r.Options, &resp) + return +} + +// An OpenIdConnect identity, for example an IBMid, can be linked or mapped to one or more individual SoftLayer users, but no more than one per account. If an OpenIdConnect identity is mapped to multiple accounts in this manner, one such account should be identified as the default account for that identity. Invoke this only on IBMid-authenticated users. +func (r User_Customer) SetDefaultAccount(providerType *string, accountId *int) (resp datatypes.Account, err error) { + params := []interface{}{ + providerType, + accountId, + } + err = r.Session.DoRequest("SoftLayer_User_Customer", "setDefaultAccount", params, &r.Options, &resp) + return +} + +// Set a user's password via the lost password recovery system, using a password recovery key received in an email generated by the [[SoftLayer_User_Customer::lostPassword|lostPassword]] method. Password recovery keys are valid for 24 hours after they're generated. +// +// User portal passwords must match the following restrictions. Portal passwords must... +// * ...be over eight characters long. +// * ...be under twenty characters long. +// * ...contain at least one uppercase letter +// * ...contain at least one lowercase letter +// * ...contain at least one number +// * ...contain one of the special characters _ - | @ . , ? / ! ~ # $ % ^ & * ( ) { } [ ] \ + = +// * ...not match your username +// * ...not match your forum password +func (r User_Customer) SetPasswordFromLostPasswordRequest(key *string, password *string, securityAnswers []datatypes.User_Customer_Security_Answer) (resp bool, err error) { + params := []interface{}{ + key, + password, + securityAnswers, + } + err = r.Session.DoRequest("SoftLayer_User_Customer", "setPasswordFromLostPasswordRequest", params, &r.Options, &resp) + return +} + +// As master user, calling this api for the IBMid provider type when there is an existing IBMid for the email on the SL account will silently (without sending an invitation email) create a link for the IBMid. NOTE: If the SoftLayer user is already linked to IBMid, this call will fail. If the IBMid specified by the email of this user, is already used in a link to another user in this account, this call will fail. If there is already an open invitation from this SoftLayer user to this or any IBMid, this call will fail. If there is already an open invitation from some other SoftLayer user in this account to this IBMid, then this call will fail. +func (r User_Customer) SilentlyMigrateUserOpenIdConnect(providerType *string) (resp bool, err error) { + params := []interface{}{ + providerType, + } + err = r.Session.DoRequest("SoftLayer_User_Customer", "silentlyMigrateUserOpenIdConnect", params, &r.Options, &resp) + return +} + +// This method is deprecated! SoftLayer Community Forums no longer exist, therefore, this method will return false. +// +// Update a user's password on the SoftLayer community forums. As with portal passwords, user forum passwords must match the following restrictions. Forum passwords must... +// * ...be over eight characters long. +// * ...be under twenty characters long. +// * ...contain at least one uppercase letter +// * ...contain at least one lowercase letter +// * ...contain at least one number +// * ...contain one of the special characters _ - | @ . , ? / ! ~ # $ % ^ & * ( ) { } [ ] \ + = +// * ...not match your username +// * ...not match your portal password +// Finally, users can only update their own password. +func (r User_Customer) UpdateForumPassword(password *string) (resp bool, err error) { + params := []interface{}{ + password, + } + err = r.Session.DoRequest("SoftLayer_User_Customer", "updateForumPassword", params, &r.Options, &resp) + return +} + +// Update the active status for a notification that the user is subscribed to. A notification along with an active flag can be supplied to update the active status for a particular notification subscription. +func (r User_Customer) UpdateNotificationSubscriber(notificationKeyName *string, active *int) (resp bool, err error) { + params := []interface{}{ + notificationKeyName, + active, + } + err = r.Session.DoRequest("SoftLayer_User_Customer", "updateNotificationSubscriber", params, &r.Options, &resp) + return +} + +// This method is deprecated. Please see documentation for initiatePortalPasswordChange Update a user's password on the SoftLayer customer portal. As with forum passwords, user portal passwords must match the following restrictions. Portal passwords must... +// * ...be over eight characters long. +// * ...be under twenty characters long. +// * ...contain at least one uppercase letter +// * ...contain at least one lowercase letter +// * ...contain at least one number +// * ...contain one of the special characters _ - | @ . , ? / ! ~ # $ % ^ & * ( ) { } [ ] \ + = +// * ...not match your username +// * ...not match your forum password +// Finally, users can only update their own password. An account's master user can update any of their account users' passwords. +func (r User_Customer) UpdatePassword(password *string) (resp bool, err error) { + params := []interface{}{ + password, + } + err = r.Session.DoRequest("SoftLayer_User_Customer", "updatePassword", params, &r.Options, &resp) + return +} + +// Update a user's login security questions and answers on the SoftLayer customer portal. These questions and answers are used to optionally log into the SoftLayer customer portal using two-factor authentication. Each user must have three distinct questions set with a unique answer for each question, and each answer may only contain alphanumeric or the . , - _ ( ) [ ] : ; > < characters. Existing user security questions and answers are deleted before new ones are set, and users may only update their own security questions and answers. +func (r User_Customer) UpdateSecurityAnswers(questions []datatypes.User_Security_Question, answers []string) (resp bool, err error) { + params := []interface{}{ + questions, + answers, + } + err = r.Session.DoRequest("SoftLayer_User_Customer", "updateSecurityAnswers", params, &r.Options, &resp) + return +} + +// Update a delivery method for a notification that the user is subscribed to. A delivery method keyName along with an active flag can be supplied to update the active status of the delivery methods for the specified notification. Available delivery methods - 'EMAIL'. Available notifications - 'PLANNED_MAINTENANCE', 'UNPLANNED_INCIDENT'. +func (r User_Customer) UpdateSubscriberDeliveryMethod(notificationKeyName *string, deliveryMethodKeyNames []string, active *int) (resp bool, err error) { + params := []interface{}{ + notificationKeyName, + deliveryMethodKeyNames, + active, + } + err = r.Session.DoRequest("SoftLayer_User_Customer", "updateSubscriberDeliveryMethod", params, &r.Options, &resp) + return +} + +// Update a user's VPN password on the SoftLayer customer portal. As with portal passwords, VPN passwords must match the following restrictions. VPN passwords must... +// * ...be over eight characters long. +// * ...be under twenty characters long. +// * ...contain at least one uppercase letter +// * ...contain at least one lowercase letter +// * ...contain at least one number +// * ...contain one of the special characters _ - | @ . , ? / ! ~ # $ % ^ & * ( ) { } [ ] \ = +// * ...not match your username +// * ...not match your forum password +// Finally, users can only update their own VPN password. An account's master user can update any of their account users' VPN passwords. +func (r User_Customer) UpdateVpnPassword(password *string) (resp bool, err error) { + params := []interface{}{ + password, + } + err = r.Session.DoRequest("SoftLayer_User_Customer", "updateVpnPassword", params, &r.Options, &resp) + return +} + +// Always call this function to enable changes when manually configuring VPN subnet access. +func (r User_Customer) UpdateVpnUser() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_User_Customer", "updateVpnUser", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r User_Customer) ValidateAuthenticationToken(authenticationToken *datatypes.Container_User_Authentication_Token) (resp datatypes.Container_User_Customer_Portal_Token, err error) { + params := []interface{}{ + authenticationToken, + } + err = r.Session.DoRequest("SoftLayer_User_Customer", "validateAuthenticationToken", params, &r.Options, &resp) + return +} + +// The SoftLayer_User_Customer_ApiAuthentication type contains user's authentication key(s). +type User_Customer_ApiAuthentication struct { + Session *session.Session + Options sl.Options +} + +// GetUserCustomerApiAuthenticationService returns an instance of the User_Customer_ApiAuthentication SoftLayer service +func GetUserCustomerApiAuthenticationService(sess *session.Session) User_Customer_ApiAuthentication { + return User_Customer_ApiAuthentication{Session: sess} +} + +func (r User_Customer_ApiAuthentication) Id(id int) User_Customer_ApiAuthentication { + r.Options.Id = &id + return r +} + +func (r User_Customer_ApiAuthentication) Mask(mask string) User_Customer_ApiAuthentication { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r User_Customer_ApiAuthentication) Filter(filter string) User_Customer_ApiAuthentication { + r.Options.Filter = filter + return r +} + +func (r User_Customer_ApiAuthentication) Limit(limit int) User_Customer_ApiAuthentication { + r.Options.Limit = &limit + return r +} + +func (r User_Customer_ApiAuthentication) Offset(offset int) User_Customer_ApiAuthentication { + r.Options.Offset = &offset + return r +} + +// no documentation yet +func (r User_Customer_ApiAuthentication) EditObject(templateObject *datatypes.User_Customer_ApiAuthentication) (resp datatypes.User_Customer_ApiAuthentication, err error) { + params := []interface{}{ + templateObject, + } + err = r.Session.DoRequest("SoftLayer_User_Customer_ApiAuthentication", "editObject", params, &r.Options, &resp) + return +} + +// getObject retrieves the SoftLayer_User_Customer_ApiAuthentication object whose ID number corresponds to the ID number of the init parameter passed to the SoftLayer_User_Customer_ApiAuthentication service. +func (r User_Customer_ApiAuthentication) GetObject() (resp datatypes.User_Customer_ApiAuthentication, err error) { + err = r.Session.DoRequest("SoftLayer_User_Customer_ApiAuthentication", "getObject", nil, &r.Options, &resp) + return +} + +// Retrieve The user who owns the api authentication key. +func (r User_Customer_ApiAuthentication) GetUser() (resp datatypes.User_Customer, err error) { + err = r.Session.DoRequest("SoftLayer_User_Customer_ApiAuthentication", "getUser", nil, &r.Options, &resp) + return +} + +// Each SoftLayer portal account is assigned a series of permissions that determine what access the user has to functions within the SoftLayer customer portal. This status is reflected in the SoftLayer_User_Customer_Status data type. Permissions differ from user status in that user status applies globally to the portal while user permissions are applied to specific portal functions. +type User_Customer_CustomerPermission_Permission struct { + Session *session.Session + Options sl.Options +} + +// GetUserCustomerCustomerPermissionPermissionService returns an instance of the User_Customer_CustomerPermission_Permission SoftLayer service +func GetUserCustomerCustomerPermissionPermissionService(sess *session.Session) User_Customer_CustomerPermission_Permission { + return User_Customer_CustomerPermission_Permission{Session: sess} +} + +func (r User_Customer_CustomerPermission_Permission) Id(id int) User_Customer_CustomerPermission_Permission { + r.Options.Id = &id + return r +} + +func (r User_Customer_CustomerPermission_Permission) Mask(mask string) User_Customer_CustomerPermission_Permission { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r User_Customer_CustomerPermission_Permission) Filter(filter string) User_Customer_CustomerPermission_Permission { + r.Options.Filter = filter + return r +} + +func (r User_Customer_CustomerPermission_Permission) Limit(limit int) User_Customer_CustomerPermission_Permission { + r.Options.Limit = &limit + return r +} + +func (r User_Customer_CustomerPermission_Permission) Offset(offset int) User_Customer_CustomerPermission_Permission { + r.Options.Offset = &offset + return r +} + +// Retrieve all available permissions. +func (r User_Customer_CustomerPermission_Permission) GetAllObjects() (resp []datatypes.User_Customer_CustomerPermission_Permission, err error) { + err = r.Session.DoRequest("SoftLayer_User_Customer_CustomerPermission_Permission", "getAllObjects", nil, &r.Options, &resp) + return +} + +// getObject retrieves the SoftLayer_User_Customer_CustomerPermission_Permission object whose ID number corresponds to the ID number of the init parameter passed to the SoftLayer_User_Customer_CustomerPermission_Permission service. +func (r User_Customer_CustomerPermission_Permission) GetObject() (resp datatypes.User_Customer_CustomerPermission_Permission, err error) { + err = r.Session.DoRequest("SoftLayer_User_Customer_CustomerPermission_Permission", "getObject", nil, &r.Options, &resp) + return +} + +// The SoftLayer_User_Customer_External_Binding data type contains general information for a single external binding. This includes the 3rd party vendor, type of binding, and a unique identifier and password that is used to authenticate against the 3rd party service. +type User_Customer_External_Binding struct { + Session *session.Session + Options sl.Options +} + +// GetUserCustomerExternalBindingService returns an instance of the User_Customer_External_Binding SoftLayer service +func GetUserCustomerExternalBindingService(sess *session.Session) User_Customer_External_Binding { + return User_Customer_External_Binding{Session: sess} +} + +func (r User_Customer_External_Binding) Id(id int) User_Customer_External_Binding { + r.Options.Id = &id + return r +} + +func (r User_Customer_External_Binding) Mask(mask string) User_Customer_External_Binding { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r User_Customer_External_Binding) Filter(filter string) User_Customer_External_Binding { + r.Options.Filter = filter + return r +} + +func (r User_Customer_External_Binding) Limit(limit int) User_Customer_External_Binding { + r.Options.Limit = &limit + return r +} + +func (r User_Customer_External_Binding) Offset(offset int) User_Customer_External_Binding { + r.Options.Offset = &offset + return r +} + +// Delete an external authentication binding. If the external binding currently has an active billing item associated you will be prevented from deleting the binding. The alternative method to remove an external authentication binding is to use the service cancellation form. +func (r User_Customer_External_Binding) DeleteObject() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_User_Customer_External_Binding", "deleteObject", nil, &r.Options, &resp) + return +} + +// Disabling an external binding will allow you to keep the external binding on your SoftLayer account, but will not require you to authentication with our trusted 2 form factor vendor when logging into the SoftLayer customer portal. +// +// You may supply one of the following reason when you disable an external binding: +// *Unspecified +// *TemporarilyUnavailable +// *Lost +// *Stolen +func (r User_Customer_External_Binding) Disable(reason *string) (resp bool, err error) { + params := []interface{}{ + reason, + } + err = r.Session.DoRequest("SoftLayer_User_Customer_External_Binding", "disable", params, &r.Options, &resp) + return +} + +// Enabling an external binding will activate the binding on your account and require you to authenticate with our trusted 3rd party 2 form factor vendor when logging into the SoftLayer customer portal. +// +// Please note that API access will be disabled for users that have an active external binding. +func (r User_Customer_External_Binding) Enable() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_User_Customer_External_Binding", "enable", nil, &r.Options, &resp) + return +} + +// Retrieve Attributes of an external authentication binding. +func (r User_Customer_External_Binding) GetAttributes() (resp []datatypes.User_External_Binding_Attribute, err error) { + err = r.Session.DoRequest("SoftLayer_User_Customer_External_Binding", "getAttributes", nil, &r.Options, &resp) + return +} + +// Retrieve Information regarding the billing item for external authentication. +func (r User_Customer_External_Binding) GetBillingItem() (resp datatypes.Billing_Item, err error) { + err = r.Session.DoRequest("SoftLayer_User_Customer_External_Binding", "getBillingItem", nil, &r.Options, &resp) + return +} + +// Retrieve An optional note for identifying the external binding. +func (r User_Customer_External_Binding) GetNote() (resp string, err error) { + err = r.Session.DoRequest("SoftLayer_User_Customer_External_Binding", "getNote", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r User_Customer_External_Binding) GetObject() (resp datatypes.User_Customer_External_Binding, err error) { + err = r.Session.DoRequest("SoftLayer_User_Customer_External_Binding", "getObject", nil, &r.Options, &resp) + return +} + +// Retrieve The type of external authentication binding. +func (r User_Customer_External_Binding) GetType() (resp datatypes.User_External_Binding_Type, err error) { + err = r.Session.DoRequest("SoftLayer_User_Customer_External_Binding", "getType", nil, &r.Options, &resp) + return +} + +// Retrieve The SoftLayer user that the external authentication binding belongs to. +func (r User_Customer_External_Binding) GetUser() (resp datatypes.User_Customer, err error) { + err = r.Session.DoRequest("SoftLayer_User_Customer_External_Binding", "getUser", nil, &r.Options, &resp) + return +} + +// Retrieve The vendor of an external authentication binding. +func (r User_Customer_External_Binding) GetVendor() (resp datatypes.User_External_Binding_Vendor, err error) { + err = r.Session.DoRequest("SoftLayer_User_Customer_External_Binding", "getVendor", nil, &r.Options, &resp) + return +} + +// Update the note of an external binding. The note is an optional property that is used to store information about a binding. +func (r User_Customer_External_Binding) UpdateNote(text *string) (resp bool, err error) { + params := []interface{}{ + text, + } + err = r.Session.DoRequest("SoftLayer_User_Customer_External_Binding", "updateNote", params, &r.Options, &resp) + return +} + +// The SoftLayer_User_Customer_External_Binding_Phone data type contains information about an external binding that uses a phone call, SMS or mobile app for 2 form factor authentication. The external binding information is used when a SoftLayer customer logs into the SoftLayer customer portal or VPN to authenticate them against a trusted 3rd party, in this case using a mobile phone, mobile phone application or land-line phone. +// +// SoftLayer users with an active external binding will be prohibited from using the API for security reasons. +type User_Customer_External_Binding_Phone struct { + Session *session.Session + Options sl.Options +} + +// GetUserCustomerExternalBindingPhoneService returns an instance of the User_Customer_External_Binding_Phone SoftLayer service +func GetUserCustomerExternalBindingPhoneService(sess *session.Session) User_Customer_External_Binding_Phone { + return User_Customer_External_Binding_Phone{Session: sess} +} + +func (r User_Customer_External_Binding_Phone) Id(id int) User_Customer_External_Binding_Phone { + r.Options.Id = &id + return r +} + +func (r User_Customer_External_Binding_Phone) Mask(mask string) User_Customer_External_Binding_Phone { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r User_Customer_External_Binding_Phone) Filter(filter string) User_Customer_External_Binding_Phone { + r.Options.Filter = filter + return r +} + +func (r User_Customer_External_Binding_Phone) Limit(limit int) User_Customer_External_Binding_Phone { + r.Options.Limit = &limit + return r +} + +func (r User_Customer_External_Binding_Phone) Offset(offset int) User_Customer_External_Binding_Phone { + r.Options.Offset = &offset + return r +} + +// Return a phone validation result. +func (r User_Customer_External_Binding_Phone) CheckPhoneValidationResult(token *string) (resp bool, err error) { + params := []interface{}{ + token, + } + err = r.Session.DoRequest("SoftLayer_User_Customer_External_Binding_Phone", "checkPhoneValidationResult", params, &r.Options, &resp) + return +} + +// Delete an external authentication binding. If the external binding currently has an active billing item associated you will be prevented from deleting the binding. The alternative method to remove an external authentication binding is to use the service cancellation form. +func (r User_Customer_External_Binding_Phone) DeleteObject() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_User_Customer_External_Binding_Phone", "deleteObject", nil, &r.Options, &resp) + return +} + +// Disabling an external binding will allow you to keep the external binding on your SoftLayer account, but will not require you to authentication with our trusted 2 form factor vendor when logging into the SoftLayer customer portal. +// +// You may supply one of the following reason when you disable an external binding: +// *Unspecified +// *TemporarilyUnavailable +// *Lost +// *Stolen +func (r User_Customer_External_Binding_Phone) Disable(reason *string) (resp bool, err error) { + params := []interface{}{ + reason, + } + err = r.Session.DoRequest("SoftLayer_User_Customer_External_Binding_Phone", "disable", params, &r.Options, &resp) + return +} + +// Enabling an external binding will activate the binding on your account and require you to authenticate with our trusted 3rd party 2 form factor vendor when logging into the SoftLayer customer portal. +// +// Please note that API access will be disabled for users that have an active external binding. +func (r User_Customer_External_Binding_Phone) Enable() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_User_Customer_External_Binding_Phone", "enable", nil, &r.Options, &resp) + return +} + +// This service returns key names of all available authentication modes. See [[SoftLayer_Container_User_Customer_External_Binding_Phone_Mode|authentication mode]] container for details. +func (r User_Customer_External_Binding_Phone) GetAllAuthenticationModes() (resp []string, err error) { + err = r.Session.DoRequest("SoftLayer_User_Customer_External_Binding_Phone", "getAllAuthenticationModes", nil, &r.Options, &resp) + return +} + +// This service returns key names of all available authentication modes. Refer to [[SoftLayer_User_Customer_External_Binding_Phone::getAllAuthenticationModes|getAllAuthenticationModes]] to retrieve authentication mode key names. +func (r User_Customer_External_Binding_Phone) GetAllAuthenticationPinModes(authenticationModeKeyName *string) (resp []string, err error) { + params := []interface{}{ + authenticationModeKeyName, + } + err = r.Session.DoRequest("SoftLayer_User_Customer_External_Binding_Phone", "getAllAuthenticationPinModes", params, &r.Options, &resp) + return +} + +// Retrieve Attributes of an external authentication binding. +func (r User_Customer_External_Binding_Phone) GetAttributes() (resp []datatypes.User_External_Binding_Attribute, err error) { + err = r.Session.DoRequest("SoftLayer_User_Customer_External_Binding_Phone", "getAttributes", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r User_Customer_External_Binding_Phone) GetAuthenticationMode() (resp datatypes.Container_User_Customer_External_Binding_Phone_Mode, err error) { + err = r.Session.DoRequest("SoftLayer_User_Customer_External_Binding_Phone", "getAuthenticationMode", nil, &r.Options, &resp) + return +} + +// Retrieve Information regarding the billing item for external authentication. +func (r User_Customer_External_Binding_Phone) GetBillingItem() (resp datatypes.Billing_Item, err error) { + err = r.Session.DoRequest("SoftLayer_User_Customer_External_Binding_Phone", "getBillingItem", nil, &r.Options, &resp) + return +} + +// Retrieve The current external binding status. It can be "ACTIVE" or "BLOCKED". +func (r User_Customer_External_Binding_Phone) GetBindingStatus() (resp string, err error) { + err = r.Session.DoRequest("SoftLayer_User_Customer_External_Binding_Phone", "getBindingStatus", nil, &r.Options, &resp) + return +} + +// Retrieve An optional note for identifying the external binding. +func (r User_Customer_External_Binding_Phone) GetNote() (resp string, err error) { + err = r.Session.DoRequest("SoftLayer_User_Customer_External_Binding_Phone", "getNote", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r User_Customer_External_Binding_Phone) GetObject() (resp datatypes.User_Customer_External_Binding_Phone, err error) { + err = r.Session.DoRequest("SoftLayer_User_Customer_External_Binding_Phone", "getObject", nil, &r.Options, &resp) + return +} + +// Some vendor's mobile app requires an activation code. Use this method to get an activation data. +func (r User_Customer_External_Binding_Phone) GetPhoneAppActivationCode() (resp []datatypes.User_External_Binding_Attribute, err error) { + err = r.Session.DoRequest("SoftLayer_User_Customer_External_Binding_Phone", "getPhoneAppActivationCode", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r User_Customer_External_Binding_Phone) GetPhoneData() (resp []datatypes.Container_User_Data_Phone, err error) { + err = r.Session.DoRequest("SoftLayer_User_Customer_External_Binding_Phone", "getPhoneData", nil, &r.Options, &resp) + return +} + +// Retrieve +func (r User_Customer_External_Binding_Phone) GetPinLength() (resp string, err error) { + err = r.Session.DoRequest("SoftLayer_User_Customer_External_Binding_Phone", "getPinLength", nil, &r.Options, &resp) + return +} + +// Retrieve The type of external authentication binding. +func (r User_Customer_External_Binding_Phone) GetType() (resp datatypes.User_External_Binding_Type, err error) { + err = r.Session.DoRequest("SoftLayer_User_Customer_External_Binding_Phone", "getType", nil, &r.Options, &resp) + return +} + +// Retrieve The SoftLayer user that the external authentication binding belongs to. +func (r User_Customer_External_Binding_Phone) GetUser() (resp datatypes.User_Customer, err error) { + err = r.Session.DoRequest("SoftLayer_User_Customer_External_Binding_Phone", "getUser", nil, &r.Options, &resp) + return +} + +// Retrieve The vendor of an external authentication binding. +func (r User_Customer_External_Binding_Phone) GetVendor() (resp datatypes.User_External_Binding_Vendor, err error) { + err = r.Session.DoRequest("SoftLayer_User_Customer_External_Binding_Phone", "getVendor", nil, &r.Options, &resp) + return +} + +// Initiates a phone validation requests and returns a unique token. Use [[SoftLayer_User_Customer_External_Binding_Phone::checkPhoneValidationResult|checkPhoneValidationResult]] to find the phone validation result. +func (r User_Customer_External_Binding_Phone) RequestPhoneValidation(phoneData *datatypes.Container_User_Data_Phone) (resp string, err error) { + params := []interface{}{ + phoneData, + } + err = r.Session.DoRequest("SoftLayer_User_Customer_External_Binding_Phone", "requestPhoneValidation", params, &r.Options, &resp) + return +} + +// This service allow you to change your phone authentication mode. See [[SoftLayer_Container_User_Customer_External_Binding_Phone_Mode|authentication mode]] container for available modes. +func (r User_Customer_External_Binding_Phone) UpdateAuthenticationMode(mode *datatypes.Container_User_Customer_External_Binding_Phone_Mode) (resp bool, err error) { + params := []interface{}{ + mode, + } + err = r.Session.DoRequest("SoftLayer_User_Customer_External_Binding_Phone", "updateAuthenticationMode", params, &r.Options, &resp) + return +} + +// Update the note of an external binding. The note is an optional property that is used to store information about a binding. +func (r User_Customer_External_Binding_Phone) UpdateNote(text *string) (resp bool, err error) { + params := []interface{}{ + text, + } + err = r.Session.DoRequest("SoftLayer_User_Customer_External_Binding_Phone", "updateNote", params, &r.Options, &resp) + return +} + +// Phone external binding supports a primary and a backup phone number. You can use this method to update your phone number used for the phone authentication. You can provide an array of [[SoftLayer_Container_User_Data_Phone|User Phone]] objects. You have to mark one as the primary phone number by setting "phoneType" to "PRIMARY". +// +// +// *countryCode: Country code number for the phone number. Default: 1 (United States & Canada +1) +// *phone: Phone number that 2 Form Factor system will call or text for user authentication. The phone number format must match the format selected in the Country Code. +// *extension: Specify the extension that will be dialed after the call is answered. Digits, commas, *, and # are allowed. Commas can be used for a one second pause to navigate phone system menus. +// *phoneType: Specify the primary and backup phone number by setting this value to "PRIMARY" or "BACKUP". If omitted, it will be considered to be the primary phone number. If you are passing two Phone objects, you must specify the phone type of each phone number. +// +// +func (r User_Customer_External_Binding_Phone) UpdatePhone(phoneData []datatypes.Container_User_Data_Phone) (resp bool, err error) { + params := []interface{}{ + phoneData, + } + err = r.Session.DoRequest("SoftLayer_User_Customer_External_Binding_Phone", "updatePhone", params, &r.Options, &resp) + return +} + +// The SoftLayer_User_Customer_External_Binding_Totp data type contains information about a single time-based one time password external binding. The external binding information is used when a SoftLayer customer logs into the SoftLayer customer portal to authenticate them. +// +// The information provided by this external binding data type includes: +// * The type of credential +// * The current state of the credential +// ** Active +// ** Inactive +// +// +// SoftLayer users with an active external binding will be prohibited from using the API for security reasons. +type User_Customer_External_Binding_Totp struct { + Session *session.Session + Options sl.Options +} + +// GetUserCustomerExternalBindingTotpService returns an instance of the User_Customer_External_Binding_Totp SoftLayer service +func GetUserCustomerExternalBindingTotpService(sess *session.Session) User_Customer_External_Binding_Totp { + return User_Customer_External_Binding_Totp{Session: sess} +} + +func (r User_Customer_External_Binding_Totp) Id(id int) User_Customer_External_Binding_Totp { + r.Options.Id = &id + return r +} + +func (r User_Customer_External_Binding_Totp) Mask(mask string) User_Customer_External_Binding_Totp { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r User_Customer_External_Binding_Totp) Filter(filter string) User_Customer_External_Binding_Totp { + r.Options.Filter = filter + return r +} + +func (r User_Customer_External_Binding_Totp) Limit(limit int) User_Customer_External_Binding_Totp { + r.Options.Limit = &limit + return r +} + +func (r User_Customer_External_Binding_Totp) Offset(offset int) User_Customer_External_Binding_Totp { + r.Options.Offset = &offset + return r +} + +// no documentation yet +func (r User_Customer_External_Binding_Totp) Activate() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_User_Customer_External_Binding_Totp", "activate", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r User_Customer_External_Binding_Totp) Deactivate() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_User_Customer_External_Binding_Totp", "deactivate", nil, &r.Options, &resp) + return +} + +// Delete an external authentication binding. If the external binding currently has an active billing item associated you will be prevented from deleting the binding. The alternative method to remove an external authentication binding is to use the service cancellation form. +func (r User_Customer_External_Binding_Totp) DeleteObject() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_User_Customer_External_Binding_Totp", "deleteObject", nil, &r.Options, &resp) + return +} + +// Disabling an external binding will allow you to keep the external binding on your SoftLayer account, but will not require you to authentication with our trusted 2 form factor vendor when logging into the SoftLayer customer portal. +// +// You may supply one of the following reason when you disable an external binding: +// *Unspecified +// *TemporarilyUnavailable +// *Lost +// *Stolen +func (r User_Customer_External_Binding_Totp) Disable(reason *string) (resp bool, err error) { + params := []interface{}{ + reason, + } + err = r.Session.DoRequest("SoftLayer_User_Customer_External_Binding_Totp", "disable", params, &r.Options, &resp) + return +} + +// Enabling an external binding will activate the binding on your account and require you to authenticate with our trusted 3rd party 2 form factor vendor when logging into the SoftLayer customer portal. +// +// Please note that API access will be disabled for users that have an active external binding. +func (r User_Customer_External_Binding_Totp) Enable() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_User_Customer_External_Binding_Totp", "enable", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r User_Customer_External_Binding_Totp) GenerateSecretKey() (resp string, err error) { + err = r.Session.DoRequest("SoftLayer_User_Customer_External_Binding_Totp", "generateSecretKey", nil, &r.Options, &resp) + return +} + +// Retrieve Attributes of an external authentication binding. +func (r User_Customer_External_Binding_Totp) GetAttributes() (resp []datatypes.User_External_Binding_Attribute, err error) { + err = r.Session.DoRequest("SoftLayer_User_Customer_External_Binding_Totp", "getAttributes", nil, &r.Options, &resp) + return +} + +// Retrieve Information regarding the billing item for external authentication. +func (r User_Customer_External_Binding_Totp) GetBillingItem() (resp datatypes.Billing_Item, err error) { + err = r.Session.DoRequest("SoftLayer_User_Customer_External_Binding_Totp", "getBillingItem", nil, &r.Options, &resp) + return +} + +// Retrieve An optional note for identifying the external binding. +func (r User_Customer_External_Binding_Totp) GetNote() (resp string, err error) { + err = r.Session.DoRequest("SoftLayer_User_Customer_External_Binding_Totp", "getNote", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r User_Customer_External_Binding_Totp) GetObject() (resp datatypes.User_Customer_External_Binding_Totp, err error) { + err = r.Session.DoRequest("SoftLayer_User_Customer_External_Binding_Totp", "getObject", nil, &r.Options, &resp) + return +} + +// Retrieve The type of external authentication binding. +func (r User_Customer_External_Binding_Totp) GetType() (resp datatypes.User_External_Binding_Type, err error) { + err = r.Session.DoRequest("SoftLayer_User_Customer_External_Binding_Totp", "getType", nil, &r.Options, &resp) + return +} + +// Retrieve The SoftLayer user that the external authentication binding belongs to. +func (r User_Customer_External_Binding_Totp) GetUser() (resp datatypes.User_Customer, err error) { + err = r.Session.DoRequest("SoftLayer_User_Customer_External_Binding_Totp", "getUser", nil, &r.Options, &resp) + return +} + +// Retrieve The vendor of an external authentication binding. +func (r User_Customer_External_Binding_Totp) GetVendor() (resp datatypes.User_External_Binding_Vendor, err error) { + err = r.Session.DoRequest("SoftLayer_User_Customer_External_Binding_Totp", "getVendor", nil, &r.Options, &resp) + return +} + +// Update the note of an external binding. The note is an optional property that is used to store information about a binding. +func (r User_Customer_External_Binding_Totp) UpdateNote(text *string) (resp bool, err error) { + params := []interface{}{ + text, + } + err = r.Session.DoRequest("SoftLayer_User_Customer_External_Binding_Totp", "updateNote", params, &r.Options, &resp) + return +} + +// The SoftLayer_User_Customer_External_Binding_Vendor data type contains information for a single external binding vendor. This information includes a user friendly vendor name, a unique version of the vendor name, and a unique internal identifier that can be used when creating a new external binding. +type User_Customer_External_Binding_Vendor struct { + Session *session.Session + Options sl.Options +} + +// GetUserCustomerExternalBindingVendorService returns an instance of the User_Customer_External_Binding_Vendor SoftLayer service +func GetUserCustomerExternalBindingVendorService(sess *session.Session) User_Customer_External_Binding_Vendor { + return User_Customer_External_Binding_Vendor{Session: sess} +} + +func (r User_Customer_External_Binding_Vendor) Id(id int) User_Customer_External_Binding_Vendor { + r.Options.Id = &id + return r +} + +func (r User_Customer_External_Binding_Vendor) Mask(mask string) User_Customer_External_Binding_Vendor { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r User_Customer_External_Binding_Vendor) Filter(filter string) User_Customer_External_Binding_Vendor { + r.Options.Filter = filter + return r +} + +func (r User_Customer_External_Binding_Vendor) Limit(limit int) User_Customer_External_Binding_Vendor { + r.Options.Limit = &limit + return r +} + +func (r User_Customer_External_Binding_Vendor) Offset(offset int) User_Customer_External_Binding_Vendor { + r.Options.Offset = &offset + return r +} + +// getAllObjects() will return a list of the available external binding vendors that SoftLayer supports. Use this list to select the appropriate vendor when creating a new external binding. +func (r User_Customer_External_Binding_Vendor) GetAllObjects() (resp []datatypes.User_External_Binding_Vendor, err error) { + err = r.Session.DoRequest("SoftLayer_User_Customer_External_Binding_Vendor", "getAllObjects", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r User_Customer_External_Binding_Vendor) GetObject() (resp datatypes.User_Customer_External_Binding_Vendor, err error) { + err = r.Session.DoRequest("SoftLayer_User_Customer_External_Binding_Vendor", "getObject", nil, &r.Options, &resp) + return +} + +// The SoftLayer_User_Customer_External_Binding_Verisign data type contains information about a single VeriSign external binding. The external binding information is used when a SoftLayer customer logs into the SoftLayer customer portal to authenticate them against a 3rd party, in this case VeriSign. +// +// The information provided by the VeriSign external binding data type includes: +// * The type of credential +// * The current state of the credential +// ** Enabled +// ** Disabled +// ** Locked +// * The credential's expiration date +// * The last time the credential was updated +// +// +// SoftLayer users with an active external binding will be prohibited from using the API for security reasons. +type User_Customer_External_Binding_Verisign struct { + Session *session.Session + Options sl.Options +} + +// GetUserCustomerExternalBindingVerisignService returns an instance of the User_Customer_External_Binding_Verisign SoftLayer service +func GetUserCustomerExternalBindingVerisignService(sess *session.Session) User_Customer_External_Binding_Verisign { + return User_Customer_External_Binding_Verisign{Session: sess} +} + +func (r User_Customer_External_Binding_Verisign) Id(id int) User_Customer_External_Binding_Verisign { + r.Options.Id = &id + return r +} + +func (r User_Customer_External_Binding_Verisign) Mask(mask string) User_Customer_External_Binding_Verisign { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r User_Customer_External_Binding_Verisign) Filter(filter string) User_Customer_External_Binding_Verisign { + r.Options.Filter = filter + return r +} + +func (r User_Customer_External_Binding_Verisign) Limit(limit int) User_Customer_External_Binding_Verisign { + r.Options.Limit = &limit + return r +} + +func (r User_Customer_External_Binding_Verisign) Offset(offset int) User_Customer_External_Binding_Verisign { + r.Options.Offset = &offset + return r +} + +// Delete a VeriSign external binding. The only VeriSign external binding that can be deleted through this method is the free VeriSign external binding for the master user of a SoftLayer account. All other external bindings must be canceled using the SoftLayer service cancellation form. +// +// When a VeriSign external binding is deleted the credential is deactivated in VeriSign's system for use on the SoftLayer site and the $0 billing item associated with the free VeriSign external binding is cancelled. +func (r User_Customer_External_Binding_Verisign) DeleteObject() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_User_Customer_External_Binding_Verisign", "deleteObject", nil, &r.Options, &resp) + return +} + +// Disabling an external binding will allow you to keep the external binding on your SoftLayer account, but will not require you to authentication with our trusted 2 form factor vendor when logging into the SoftLayer customer portal. +// +// You may supply one of the following reason when you disable an external binding: +// *Unspecified +// *TemporarilyUnavailable +// *Lost +// *Stolen +func (r User_Customer_External_Binding_Verisign) Disable(reason *string) (resp bool, err error) { + params := []interface{}{ + reason, + } + err = r.Session.DoRequest("SoftLayer_User_Customer_External_Binding_Verisign", "disable", params, &r.Options, &resp) + return +} + +// Enabling an external binding will activate the binding on your account and require you to authenticate with our trusted 3rd party 2 form factor vendor when logging into the SoftLayer customer portal. +// +// Please note that API access will be disabled for users that have an active external binding. +func (r User_Customer_External_Binding_Verisign) Enable() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_User_Customer_External_Binding_Verisign", "enable", nil, &r.Options, &resp) + return +} + +// An activation code is required when provisioning a new mobile credential from Verisign. This method will return the required activation code. +func (r User_Customer_External_Binding_Verisign) GetActivationCodeForMobileClient() (resp string, err error) { + err = r.Session.DoRequest("SoftLayer_User_Customer_External_Binding_Verisign", "getActivationCodeForMobileClient", nil, &r.Options, &resp) + return +} + +// Retrieve Attributes of an external authentication binding. +func (r User_Customer_External_Binding_Verisign) GetAttributes() (resp []datatypes.User_External_Binding_Attribute, err error) { + err = r.Session.DoRequest("SoftLayer_User_Customer_External_Binding_Verisign", "getAttributes", nil, &r.Options, &resp) + return +} + +// Retrieve Information regarding the billing item for external authentication. +func (r User_Customer_External_Binding_Verisign) GetBillingItem() (resp datatypes.Billing_Item, err error) { + err = r.Session.DoRequest("SoftLayer_User_Customer_External_Binding_Verisign", "getBillingItem", nil, &r.Options, &resp) + return +} + +// Retrieve The date that a VeriSign credential expires. +func (r User_Customer_External_Binding_Verisign) GetCredentialExpirationDate() (resp string, err error) { + err = r.Session.DoRequest("SoftLayer_User_Customer_External_Binding_Verisign", "getCredentialExpirationDate", nil, &r.Options, &resp) + return +} + +// Retrieve The last time a VeriSign credential was updated. +func (r User_Customer_External_Binding_Verisign) GetCredentialLastUpdateDate() (resp string, err error) { + err = r.Session.DoRequest("SoftLayer_User_Customer_External_Binding_Verisign", "getCredentialLastUpdateDate", nil, &r.Options, &resp) + return +} + +// Retrieve The current state of a VeriSign credential. This can be 'Enabled', 'Disabled', or 'Locked'. +func (r User_Customer_External_Binding_Verisign) GetCredentialState() (resp string, err error) { + err = r.Session.DoRequest("SoftLayer_User_Customer_External_Binding_Verisign", "getCredentialState", nil, &r.Options, &resp) + return +} + +// Retrieve The type of VeriSign credential. This can be either 'Hardware' or 'Software'. +func (r User_Customer_External_Binding_Verisign) GetCredentialType() (resp string, err error) { + err = r.Session.DoRequest("SoftLayer_User_Customer_External_Binding_Verisign", "getCredentialType", nil, &r.Options, &resp) + return +} + +// Retrieve An optional note for identifying the external binding. +func (r User_Customer_External_Binding_Verisign) GetNote() (resp string, err error) { + err = r.Session.DoRequest("SoftLayer_User_Customer_External_Binding_Verisign", "getNote", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r User_Customer_External_Binding_Verisign) GetObject() (resp datatypes.User_Customer_External_Binding_Verisign, err error) { + err = r.Session.DoRequest("SoftLayer_User_Customer_External_Binding_Verisign", "getObject", nil, &r.Options, &resp) + return +} + +// Retrieve The type of external authentication binding. +func (r User_Customer_External_Binding_Verisign) GetType() (resp datatypes.User_External_Binding_Type, err error) { + err = r.Session.DoRequest("SoftLayer_User_Customer_External_Binding_Verisign", "getType", nil, &r.Options, &resp) + return +} + +// Retrieve The SoftLayer user that the external authentication binding belongs to. +func (r User_Customer_External_Binding_Verisign) GetUser() (resp datatypes.User_Customer, err error) { + err = r.Session.DoRequest("SoftLayer_User_Customer_External_Binding_Verisign", "getUser", nil, &r.Options, &resp) + return +} + +// Retrieve The vendor of an external authentication binding. +func (r User_Customer_External_Binding_Verisign) GetVendor() (resp datatypes.User_External_Binding_Vendor, err error) { + err = r.Session.DoRequest("SoftLayer_User_Customer_External_Binding_Verisign", "getVendor", nil, &r.Options, &resp) + return +} + +// If a VeriSign credential becomes locked because of too many failed login attempts the unlock method can be used to unlock a VeriSign credential. As a security precaution a valid security code generated by the credential will be required before the credential is unlocked. +func (r User_Customer_External_Binding_Verisign) Unlock(securityCode *string) (resp bool, err error) { + params := []interface{}{ + securityCode, + } + err = r.Session.DoRequest("SoftLayer_User_Customer_External_Binding_Verisign", "unlock", params, &r.Options, &resp) + return +} + +// Update the note of an external binding. The note is an optional property that is used to store information about a binding. +func (r User_Customer_External_Binding_Verisign) UpdateNote(text *string) (resp bool, err error) { + params := []interface{}{ + text, + } + err = r.Session.DoRequest("SoftLayer_User_Customer_External_Binding_Verisign", "updateNote", params, &r.Options, &resp) + return +} + +// Validate the user id and VeriSign credential id used to create an external authentication binding. +func (r User_Customer_External_Binding_Verisign) ValidateCredentialId(userId *int, externalId *string) (resp bool, err error) { + params := []interface{}{ + userId, + externalId, + } + err = r.Session.DoRequest("SoftLayer_User_Customer_External_Binding_Verisign", "validateCredentialId", params, &r.Options, &resp) + return +} + +// no documentation yet +type User_Customer_Invitation struct { + Session *session.Session + Options sl.Options +} + +// GetUserCustomerInvitationService returns an instance of the User_Customer_Invitation SoftLayer service +func GetUserCustomerInvitationService(sess *session.Session) User_Customer_Invitation { + return User_Customer_Invitation{Session: sess} +} + +func (r User_Customer_Invitation) Id(id int) User_Customer_Invitation { + r.Options.Id = &id + return r +} + +func (r User_Customer_Invitation) Mask(mask string) User_Customer_Invitation { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r User_Customer_Invitation) Filter(filter string) User_Customer_Invitation { + r.Options.Filter = filter + return r +} + +func (r User_Customer_Invitation) Limit(limit int) User_Customer_Invitation { + r.Options.Limit = &limit + return r +} + +func (r User_Customer_Invitation) Offset(offset int) User_Customer_Invitation { + r.Options.Offset = &offset + return r +} + +// no documentation yet +func (r User_Customer_Invitation) GetObject() (resp datatypes.User_Customer_Invitation, err error) { + err = r.Session.DoRequest("SoftLayer_User_Customer_Invitation", "getObject", nil, &r.Options, &resp) + return +} + +// Retrieve +func (r User_Customer_Invitation) GetUser() (resp datatypes.User_Customer, err error) { + err = r.Session.DoRequest("SoftLayer_User_Customer_Invitation", "getUser", nil, &r.Options, &resp) + return +} + +// This class represents a mobile device belonging to a user. The device can be a phone, tablet, or possibly even some Android based net books. The purpose is to tie just enough info with the device and the user to enable push notifications through non-softlayer entities (Google, Apple, RIM). +type User_Customer_MobileDevice struct { + Session *session.Session + Options sl.Options +} + +// GetUserCustomerMobileDeviceService returns an instance of the User_Customer_MobileDevice SoftLayer service +func GetUserCustomerMobileDeviceService(sess *session.Session) User_Customer_MobileDevice { + return User_Customer_MobileDevice{Session: sess} +} + +func (r User_Customer_MobileDevice) Id(id int) User_Customer_MobileDevice { + r.Options.Id = &id + return r +} + +func (r User_Customer_MobileDevice) Mask(mask string) User_Customer_MobileDevice { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r User_Customer_MobileDevice) Filter(filter string) User_Customer_MobileDevice { + r.Options.Filter = filter + return r +} + +func (r User_Customer_MobileDevice) Limit(limit int) User_Customer_MobileDevice { + r.Options.Limit = &limit + return r +} + +func (r User_Customer_MobileDevice) Offset(offset int) User_Customer_MobileDevice { + r.Options.Offset = &offset + return r +} + +// Create a new mobile device association for a user. +func (r User_Customer_MobileDevice) CreateObject(templateObject *datatypes.User_Customer_MobileDevice) (resp datatypes.User_Customer_MobileDevice, err error) { + params := []interface{}{ + templateObject, + } + err = r.Session.DoRequest("SoftLayer_User_Customer_MobileDevice", "createObject", params, &r.Options, &resp) + return +} + +// Delete a mobile device association for a user. +func (r User_Customer_MobileDevice) DeleteObject() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_User_Customer_MobileDevice", "deleteObject", nil, &r.Options, &resp) + return +} + +// Edit the object by passing in a modified instance of the object +func (r User_Customer_MobileDevice) EditObject(templateObject *datatypes.User_Customer_MobileDevice) (resp bool, err error) { + params := []interface{}{ + templateObject, + } + err = r.Session.DoRequest("SoftLayer_User_Customer_MobileDevice", "editObject", params, &r.Options, &resp) + return +} + +// Retrieve Notification subscriptions available to a mobile device. +func (r User_Customer_MobileDevice) GetAvailablePushNotificationSubscriptions() (resp []datatypes.Notification, err error) { + err = r.Session.DoRequest("SoftLayer_User_Customer_MobileDevice", "getAvailablePushNotificationSubscriptions", nil, &r.Options, &resp) + return +} + +// Retrieve The user this mobile device belongs to. +func (r User_Customer_MobileDevice) GetCustomer() (resp datatypes.User_Customer, err error) { + err = r.Session.DoRequest("SoftLayer_User_Customer_MobileDevice", "getCustomer", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r User_Customer_MobileDevice) GetObject() (resp datatypes.User_Customer_MobileDevice, err error) { + err = r.Session.DoRequest("SoftLayer_User_Customer_MobileDevice", "getObject", nil, &r.Options, &resp) + return +} + +// Retrieve The operating system this device is using +func (r User_Customer_MobileDevice) GetOperatingSystem() (resp datatypes.User_Customer_MobileDevice_OperatingSystem, err error) { + err = r.Session.DoRequest("SoftLayer_User_Customer_MobileDevice", "getOperatingSystem", nil, &r.Options, &resp) + return +} + +// Retrieve Notification subscriptions attached to a mobile device. +func (r User_Customer_MobileDevice) GetPushNotificationSubscriptions() (resp []datatypes.Notification_User_Subscriber, err error) { + err = r.Session.DoRequest("SoftLayer_User_Customer_MobileDevice", "getPushNotificationSubscriptions", nil, &r.Options, &resp) + return +} + +// Retrieve The type of device this user is using +func (r User_Customer_MobileDevice) GetType() (resp datatypes.User_Customer_MobileDevice_Type, err error) { + err = r.Session.DoRequest("SoftLayer_User_Customer_MobileDevice", "getType", nil, &r.Options, &resp) + return +} + +// This class represents the mobile operating system installed on a user's registered mobile device. It assists us when determining the how to get a push notification to the user. +type User_Customer_MobileDevice_OperatingSystem struct { + Session *session.Session + Options sl.Options +} + +// GetUserCustomerMobileDeviceOperatingSystemService returns an instance of the User_Customer_MobileDevice_OperatingSystem SoftLayer service +func GetUserCustomerMobileDeviceOperatingSystemService(sess *session.Session) User_Customer_MobileDevice_OperatingSystem { + return User_Customer_MobileDevice_OperatingSystem{Session: sess} +} + +func (r User_Customer_MobileDevice_OperatingSystem) Id(id int) User_Customer_MobileDevice_OperatingSystem { + r.Options.Id = &id + return r +} + +func (r User_Customer_MobileDevice_OperatingSystem) Mask(mask string) User_Customer_MobileDevice_OperatingSystem { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r User_Customer_MobileDevice_OperatingSystem) Filter(filter string) User_Customer_MobileDevice_OperatingSystem { + r.Options.Filter = filter + return r +} + +func (r User_Customer_MobileDevice_OperatingSystem) Limit(limit int) User_Customer_MobileDevice_OperatingSystem { + r.Options.Limit = &limit + return r +} + +func (r User_Customer_MobileDevice_OperatingSystem) Offset(offset int) User_Customer_MobileDevice_OperatingSystem { + r.Options.Offset = &offset + return r +} + +// no documentation yet +func (r User_Customer_MobileDevice_OperatingSystem) GetAllObjects() (resp []datatypes.User_Customer_MobileDevice_OperatingSystem, err error) { + err = r.Session.DoRequest("SoftLayer_User_Customer_MobileDevice_OperatingSystem", "getAllObjects", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r User_Customer_MobileDevice_OperatingSystem) GetObject() (resp datatypes.User_Customer_MobileDevice_OperatingSystem, err error) { + err = r.Session.DoRequest("SoftLayer_User_Customer_MobileDevice_OperatingSystem", "getObject", nil, &r.Options, &resp) + return +} + +// Describes a supported class of mobile device. In this the word class is used in the context of classes of consumer electronic devices, the two most prominent examples being mobile phones and tablets. +type User_Customer_MobileDevice_Type struct { + Session *session.Session + Options sl.Options +} + +// GetUserCustomerMobileDeviceTypeService returns an instance of the User_Customer_MobileDevice_Type SoftLayer service +func GetUserCustomerMobileDeviceTypeService(sess *session.Session) User_Customer_MobileDevice_Type { + return User_Customer_MobileDevice_Type{Session: sess} +} + +func (r User_Customer_MobileDevice_Type) Id(id int) User_Customer_MobileDevice_Type { + r.Options.Id = &id + return r +} + +func (r User_Customer_MobileDevice_Type) Mask(mask string) User_Customer_MobileDevice_Type { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r User_Customer_MobileDevice_Type) Filter(filter string) User_Customer_MobileDevice_Type { + r.Options.Filter = filter + return r +} + +func (r User_Customer_MobileDevice_Type) Limit(limit int) User_Customer_MobileDevice_Type { + r.Options.Limit = &limit + return r +} + +func (r User_Customer_MobileDevice_Type) Offset(offset int) User_Customer_MobileDevice_Type { + r.Options.Offset = &offset + return r +} + +// no documentation yet +func (r User_Customer_MobileDevice_Type) GetAllObjects() (resp []datatypes.User_Customer_MobileDevice_Type, err error) { + err = r.Session.DoRequest("SoftLayer_User_Customer_MobileDevice_Type", "getAllObjects", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r User_Customer_MobileDevice_Type) GetObject() (resp datatypes.User_Customer_MobileDevice_Type, err error) { + err = r.Session.DoRequest("SoftLayer_User_Customer_MobileDevice_Type", "getObject", nil, &r.Options, &resp) + return +} + +// The Customer_Notification_Hardware object stores links between customers and the hardware devices they wish to monitor. This link is not enough, the user must be sure to also create SoftLayer_Network_Monitor_Version1_Query_Host instance with the response action set to "notify users" in order for the users linked to that hardware object to be notified on failure. +type User_Customer_Notification_Hardware struct { + Session *session.Session + Options sl.Options +} + +// GetUserCustomerNotificationHardwareService returns an instance of the User_Customer_Notification_Hardware SoftLayer service +func GetUserCustomerNotificationHardwareService(sess *session.Session) User_Customer_Notification_Hardware { + return User_Customer_Notification_Hardware{Session: sess} +} + +func (r User_Customer_Notification_Hardware) Id(id int) User_Customer_Notification_Hardware { + r.Options.Id = &id + return r +} + +func (r User_Customer_Notification_Hardware) Mask(mask string) User_Customer_Notification_Hardware { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r User_Customer_Notification_Hardware) Filter(filter string) User_Customer_Notification_Hardware { + r.Options.Filter = filter + return r +} + +func (r User_Customer_Notification_Hardware) Limit(limit int) User_Customer_Notification_Hardware { + r.Options.Limit = &limit + return r +} + +func (r User_Customer_Notification_Hardware) Offset(offset int) User_Customer_Notification_Hardware { + r.Options.Offset = &offset + return r +} + +// Passing in an unsaved instances of a Customer_Notification_Hardware object into this function will create the object and return the results to the user. +func (r User_Customer_Notification_Hardware) CreateObject(templateObject *datatypes.User_Customer_Notification_Hardware) (resp datatypes.User_Customer_Notification_Hardware, err error) { + params := []interface{}{ + templateObject, + } + err = r.Session.DoRequest("SoftLayer_User_Customer_Notification_Hardware", "createObject", params, &r.Options, &resp) + return +} + +// Passing in a collection of unsaved instances of Customer_Notification_Hardware objects into this function will create all objects and return the results to the user. +func (r User_Customer_Notification_Hardware) CreateObjects(templateObjects []datatypes.User_Customer_Notification_Hardware) (resp []datatypes.Dns_Domain, err error) { + params := []interface{}{ + templateObjects, + } + err = r.Session.DoRequest("SoftLayer_User_Customer_Notification_Hardware", "createObjects", params, &r.Options, &resp) + return +} + +// Like any other API object, the customer notification objects can be deleted by passing an instance of them into this function. The ID on the object must be set. +func (r User_Customer_Notification_Hardware) DeleteObjects(templateObjects []datatypes.User_Customer_Notification_Hardware) (resp bool, err error) { + params := []interface{}{ + templateObjects, + } + err = r.Session.DoRequest("SoftLayer_User_Customer_Notification_Hardware", "deleteObjects", params, &r.Options, &resp) + return +} + +// This method returns all Customer_Notification_Hardware objects associated with the passed in hardware ID as long as that hardware ID is owned by the current user's account. +// +// This behavior can also be accomplished by simply tapping monitoringUserNotification on the Hardware_Server object. +func (r User_Customer_Notification_Hardware) FindByHardwareId(hardwareId *int) (resp []datatypes.User_Customer_Notification_Hardware, err error) { + params := []interface{}{ + hardwareId, + } + err = r.Session.DoRequest("SoftLayer_User_Customer_Notification_Hardware", "findByHardwareId", params, &r.Options, &resp) + return +} + +// Retrieve The hardware object that will be monitored. +func (r User_Customer_Notification_Hardware) GetHardware() (resp datatypes.Hardware, err error) { + err = r.Session.DoRequest("SoftLayer_User_Customer_Notification_Hardware", "getHardware", nil, &r.Options, &resp) + return +} + +// getObject retrieves the SoftLayer_User_Customer_Notification_Hardware object whose ID number corresponds to the ID number of the init parameter passed to the SoftLayer_User_Customer_Notification_Hardware service. You can only retrieve hardware notifications attached to hardware and users that belong to your account +func (r User_Customer_Notification_Hardware) GetObject() (resp datatypes.User_Customer_Notification_Hardware, err error) { + err = r.Session.DoRequest("SoftLayer_User_Customer_Notification_Hardware", "getObject", nil, &r.Options, &resp) + return +} + +// Retrieve The user that will be notified when the associated hardware object fails a monitoring instance. +func (r User_Customer_Notification_Hardware) GetUser() (resp datatypes.User_Customer, err error) { + err = r.Session.DoRequest("SoftLayer_User_Customer_Notification_Hardware", "getUser", nil, &r.Options, &resp) + return +} + +// The SoftLayer_User_Customer_Notification_Virtual_Guest object stores links between customers and the virtual guests they wish to monitor. This link is not enough, the user must be sure to also create SoftLayer_Network_Monitor_Version1_Query_Host instance with the response action set to "notify users" in order for the users linked to that hardware object to be notified on failure. +type User_Customer_Notification_Virtual_Guest struct { + Session *session.Session + Options sl.Options +} + +// GetUserCustomerNotificationVirtualGuestService returns an instance of the User_Customer_Notification_Virtual_Guest SoftLayer service +func GetUserCustomerNotificationVirtualGuestService(sess *session.Session) User_Customer_Notification_Virtual_Guest { + return User_Customer_Notification_Virtual_Guest{Session: sess} +} + +func (r User_Customer_Notification_Virtual_Guest) Id(id int) User_Customer_Notification_Virtual_Guest { + r.Options.Id = &id + return r +} + +func (r User_Customer_Notification_Virtual_Guest) Mask(mask string) User_Customer_Notification_Virtual_Guest { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r User_Customer_Notification_Virtual_Guest) Filter(filter string) User_Customer_Notification_Virtual_Guest { + r.Options.Filter = filter + return r +} + +func (r User_Customer_Notification_Virtual_Guest) Limit(limit int) User_Customer_Notification_Virtual_Guest { + r.Options.Limit = &limit + return r +} + +func (r User_Customer_Notification_Virtual_Guest) Offset(offset int) User_Customer_Notification_Virtual_Guest { + r.Options.Offset = &offset + return r +} + +// Passing in an unsaved instance of a SoftLayer_Customer_Notification_Virtual_Guest object into this function will create the object and return the results to the user. +func (r User_Customer_Notification_Virtual_Guest) CreateObject(templateObject *datatypes.User_Customer_Notification_Virtual_Guest) (resp datatypes.User_Customer_Notification_Virtual_Guest, err error) { + params := []interface{}{ + templateObject, + } + err = r.Session.DoRequest("SoftLayer_User_Customer_Notification_Virtual_Guest", "createObject", params, &r.Options, &resp) + return +} + +// Passing in a collection of unsaved instances of SoftLayer_Customer_Notification_Virtual_Guest objects into this function will create all objects and return the results to the user. +func (r User_Customer_Notification_Virtual_Guest) CreateObjects(templateObjects []datatypes.User_Customer_Notification_Virtual_Guest) (resp []datatypes.User_Customer_Notification_Virtual_Guest, err error) { + params := []interface{}{ + templateObjects, + } + err = r.Session.DoRequest("SoftLayer_User_Customer_Notification_Virtual_Guest", "createObjects", params, &r.Options, &resp) + return +} + +// Like any other API object, the customer notification objects can be deleted by passing an instance of them into this function. The ID on the object must be set. +func (r User_Customer_Notification_Virtual_Guest) DeleteObjects(templateObjects []datatypes.User_Customer_Notification_Virtual_Guest) (resp bool, err error) { + params := []interface{}{ + templateObjects, + } + err = r.Session.DoRequest("SoftLayer_User_Customer_Notification_Virtual_Guest", "deleteObjects", params, &r.Options, &resp) + return +} + +// This method returns all SoftLayer_User_Customer_Notification_Virtual_Guest objects associated with the passed in ID as long as that hardware ID is owned by the current user's account. +// +// This behavior can also be accomplished by simply tapping monitoringUserNotification on the Virtual_Guest object. +func (r User_Customer_Notification_Virtual_Guest) FindByGuestId(id *int) (resp []datatypes.User_Customer_Notification_Virtual_Guest, err error) { + params := []interface{}{ + id, + } + err = r.Session.DoRequest("SoftLayer_User_Customer_Notification_Virtual_Guest", "findByGuestId", params, &r.Options, &resp) + return +} + +// Retrieve The virtual guest object that will be monitored. +func (r User_Customer_Notification_Virtual_Guest) GetGuest() (resp datatypes.Virtual_Guest, err error) { + err = r.Session.DoRequest("SoftLayer_User_Customer_Notification_Virtual_Guest", "getGuest", nil, &r.Options, &resp) + return +} + +// getObject retrieves the SoftLayer_User_Customer_Notification_Virtual_Guest object whose ID number corresponds to the ID number of the init parameter passed to the SoftLayer_User_Customer_Notification_Virtual_Guest service. You can only retrieve guest notifications attached to virtual guests and users that belong to your account +func (r User_Customer_Notification_Virtual_Guest) GetObject() (resp datatypes.User_Customer_Notification_Virtual_Guest, err error) { + err = r.Session.DoRequest("SoftLayer_User_Customer_Notification_Virtual_Guest", "getObject", nil, &r.Options, &resp) + return +} + +// Retrieve The user that will be notified when the associated virtual guest object fails a monitoring instance. +func (r User_Customer_Notification_Virtual_Guest) GetUser() (resp datatypes.User_Customer, err error) { + err = r.Session.DoRequest("SoftLayer_User_Customer_Notification_Virtual_Guest", "getUser", nil, &r.Options, &resp) + return +} + +// no documentation yet +type User_Customer_OpenIdConnect struct { + Session *session.Session + Options sl.Options +} + +// GetUserCustomerOpenIdConnectService returns an instance of the User_Customer_OpenIdConnect SoftLayer service +func GetUserCustomerOpenIdConnectService(sess *session.Session) User_Customer_OpenIdConnect { + return User_Customer_OpenIdConnect{Session: sess} +} + +func (r User_Customer_OpenIdConnect) Id(id int) User_Customer_OpenIdConnect { + r.Options.Id = &id + return r +} + +func (r User_Customer_OpenIdConnect) Mask(mask string) User_Customer_OpenIdConnect { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r User_Customer_OpenIdConnect) Filter(filter string) User_Customer_OpenIdConnect { + r.Options.Filter = filter + return r +} + +func (r User_Customer_OpenIdConnect) Limit(limit int) User_Customer_OpenIdConnect { + r.Options.Limit = &limit + return r +} + +func (r User_Customer_OpenIdConnect) Offset(offset int) User_Customer_OpenIdConnect { + r.Options.Offset = &offset + return r +} + +// no documentation yet +func (r User_Customer_OpenIdConnect) AcknowledgeSupportPolicy() (err error) { + var resp datatypes.Void + err = r.Session.DoRequest("SoftLayer_User_Customer_OpenIdConnect", "acknowledgeSupportPolicy", nil, &r.Options, &resp) + return +} + +// Completes invitation process for an OpenIdConnect user created by Bluemix Unified User Console. +func (r User_Customer_OpenIdConnect) ActivateOpenIdConnectUser(verificationCode *string, userInfo *datatypes.User_Customer) (err error) { + var resp datatypes.Void + params := []interface{}{ + verificationCode, + userInfo, + } + err = r.Session.DoRequest("SoftLayer_User_Customer_OpenIdConnect", "activateOpenIdConnectUser", params, &r.Options, &resp) + return +} + +// Create a user's API authentication key, allowing that user access to query the SoftLayer API. addApiAuthenticationKey() returns the users new API key. Each portal user is allowed a maximum of two API keys. +func (r User_Customer_OpenIdConnect) AddApiAuthenticationKey() (resp string, err error) { + err = r.Session.DoRequest("SoftLayer_User_Customer_OpenIdConnect", "addApiAuthenticationKey", nil, &r.Options, &resp) + return +} + +// Add multiple hardware to a portal user's hardware access list. A user's hardware access list controls which of an account's hardware objects a user has access to in the SoftLayer customer portal and API. Hardware does not exist in the SoftLayer portal and returns "not found" exceptions in the API if the user doesn't have access to it. addBulkHardwareAccess() does not attempt to add hardware access if the given user already has access to that hardware object. +// +// Users can assign hardware access to their child users, but not to themselves. An account's master has access to all hardware on their customer account and can set hardware access for any of the other users on their account. +func (r User_Customer_OpenIdConnect) AddBulkHardwareAccess(hardwareIds []int) (resp bool, err error) { + params := []interface{}{ + hardwareIds, + } + err = r.Session.DoRequest("SoftLayer_User_Customer_OpenIdConnect", "addBulkHardwareAccess", params, &r.Options, &resp) + return +} + +// Add multiple permissions to a portal user's permission set. [[Permissions]] control which features in the SoftLayer customer portal and API a user may use. addBulkPortalPermission() does not attempt to add permissions already assigned to the user. +// +// Users can assign permissions to their child users, but not to themselves. An account's master has all portal permissions and can set permissions for any of the other users on their account. +// +// Use the [[SoftLayer_User_Customer_CustomerPermission_Permission::getAllObjects]] method to retrieve a list of all permissions available in the SoftLayer customer portal and API. Permissions are removed based on the keyName property of the permission objects within the permissions parameter. +func (r User_Customer_OpenIdConnect) AddBulkPortalPermission(permissions []datatypes.User_Customer_CustomerPermission_Permission) (resp bool, err error) { + params := []interface{}{ + permissions, + } + err = r.Session.DoRequest("SoftLayer_User_Customer_OpenIdConnect", "addBulkPortalPermission", params, &r.Options, &resp) + return +} + +// no documentation yet +func (r User_Customer_OpenIdConnect) AddBulkRoles(roles []datatypes.User_Permission_Role) (err error) { + var resp datatypes.Void + params := []interface{}{ + roles, + } + err = r.Session.DoRequest("SoftLayer_User_Customer_OpenIdConnect", "addBulkRoles", params, &r.Options, &resp) + return +} + +// Add multiple CloudLayer Computing Instances to a portal user's access list. A user's CloudLayer Computing Instance access list controls which of an account's CloudLayer Computing Instance objects a user has access to in the SoftLayer customer portal and API. CloudLayer Computing Instances do not exist in the SoftLayer portal and returns "not found" exceptions in the API if the user doesn't have access to it. addBulkVirtualGuestAccess() does not attempt to add CloudLayer Computing Instance access if the given user already has access to that CloudLayer Computing Instance object. +// +// Users can assign CloudLayer Computing Instance access to their child users, but not to themselves. An account's master has access to all CloudLayer Computing Instances on their customer account and can set CloudLayer Computing Instance access for any of the other users on their account. +func (r User_Customer_OpenIdConnect) AddBulkVirtualGuestAccess(virtualGuestIds []int) (resp bool, err error) { + params := []interface{}{ + virtualGuestIds, + } + err = r.Session.DoRequest("SoftLayer_User_Customer_OpenIdConnect", "addBulkVirtualGuestAccess", params, &r.Options, &resp) + return +} + +// no documentation yet +func (r User_Customer_OpenIdConnect) AddExternalBinding(externalBinding *datatypes.User_External_Binding) (resp datatypes.User_Customer_External_Binding, err error) { + params := []interface{}{ + externalBinding, + } + err = r.Session.DoRequest("SoftLayer_User_Customer_OpenIdConnect", "addExternalBinding", params, &r.Options, &resp) + return +} + +// Add hardware to a portal user's hardware access list. A user's hardware access list controls which of an account's hardware objects a user has access to in the SoftLayer customer portal and API. Hardware does not exist in the SoftLayer portal and returns "not found" exceptions in the API if the user doesn't have access to it. If a user already has access to the hardware you're attempting to add then addHardwareAccess() returns true. +// +// Users can assign hardware access to their child users, but not to themselves. An account's master has access to all hardware on their customer account and can set hardware access for any of the other users on their account. +func (r User_Customer_OpenIdConnect) AddHardwareAccess(hardwareId *int) (resp bool, err error) { + params := []interface{}{ + hardwareId, + } + err = r.Session.DoRequest("SoftLayer_User_Customer_OpenIdConnect", "addHardwareAccess", params, &r.Options, &resp) + return +} + +// Create a notification subscription record for the user. If a subscription record exists for the notification, the record will be set to active, if currently inactive. +func (r User_Customer_OpenIdConnect) AddNotificationSubscriber(notificationKeyName *string) (resp bool, err error) { + params := []interface{}{ + notificationKeyName, + } + err = r.Session.DoRequest("SoftLayer_User_Customer_OpenIdConnect", "addNotificationSubscriber", params, &r.Options, &resp) + return +} + +// Add a permission to a portal user's permission set. [[Permissions]] control which features in the SoftLayer customer portal and API a user may use. If the user already has the permission you're attempting to add then addPortalPermission() returns true. +// +// Users can assign permissions to their child users, but not to themselves. An account's master has all portal permissions and can set permissions for any of the other users on their account. +// +// Use the [[SoftLayer_User_Customer_CustomerPermission_Permission::getAllObjects]] method to retrieve a list of all permissions available in the SoftLayer customer portal and API. Permissions are added based on the keyName property of the permission parameter. +func (r User_Customer_OpenIdConnect) AddPortalPermission(permission *datatypes.User_Customer_CustomerPermission_Permission) (resp bool, err error) { + params := []interface{}{ + permission, + } + err = r.Session.DoRequest("SoftLayer_User_Customer_OpenIdConnect", "addPortalPermission", params, &r.Options, &resp) + return +} + +// no documentation yet +func (r User_Customer_OpenIdConnect) AddRole(role *datatypes.User_Permission_Role) (err error) { + var resp datatypes.Void + params := []interface{}{ + role, + } + err = r.Session.DoRequest("SoftLayer_User_Customer_OpenIdConnect", "addRole", params, &r.Options, &resp) + return +} + +// Add a CloudLayer Computing Instance to a portal user's access list. A user's CloudLayer Computing Instance access list controls which of an account's CloudLayer Computing Instance objects a user has access to in the SoftLayer customer portal and API. CloudLayer Computing Instances do not exist in the SoftLayer portal and returns "not found" exceptions in the API if the user doesn't have access to it. If a user already has access to the CloudLayer Computing Instance you're attempting to add then addVirtualGuestAccess() returns true. +// +// Users can assign CloudLayer Computing Instance access to their child users, but not to themselves. An account's master has access to all CloudLayer Computing Instances on their customer account and can set CloudLayer Computing Instance access for any of the other users on their account. +func (r User_Customer_OpenIdConnect) AddVirtualGuestAccess(virtualGuestId *int) (resp bool, err error) { + params := []interface{}{ + virtualGuestId, + } + err = r.Session.DoRequest("SoftLayer_User_Customer_OpenIdConnect", "addVirtualGuestAccess", params, &r.Options, &resp) + return +} + +// Select a type of preference you would like to modify using [[SoftLayer_User_Customer::getPreferenceTypes|getPreferenceTypes]] and invoke this method using that preference type key name. +func (r User_Customer_OpenIdConnect) ChangePreference(preferenceTypeKeyName *string, value *string) (resp []datatypes.User_Preference, err error) { + params := []interface{}{ + preferenceTypeKeyName, + value, + } + err = r.Session.DoRequest("SoftLayer_User_Customer_OpenIdConnect", "changePreference", params, &r.Options, &resp) + return +} + +// This service checks the result of a previously requested external authentication. [[SoftLayer_Container_User_Customer_External_Binding_Phone|Phone external binding]] container can be used for this service. Make sure to set the [[SoftLayer_Container_User_Customer_External_Binding_Phone::authenticationToken|authenticationToken]] that is generated by [[SoftLayer_User_Customer|initiateExternalAuthentication]] service. +func (r User_Customer_OpenIdConnect) CheckExternalAuthenticationStatus(authenticationContainer *datatypes.Container_User_Customer_External_Binding) (resp datatypes.Container_User_Customer_Portal_Token, err error) { + params := []interface{}{ + authenticationContainer, + } + err = r.Session.DoRequest("SoftLayer_User_Customer_OpenIdConnect", "checkExternalAuthenticationStatus", params, &r.Options, &resp) + return +} + +// Add a description here +// +// +func (r User_Customer_OpenIdConnect) CheckPhoneFactorAuthenticationForPasswordSet(passwordSet *datatypes.Container_User_Customer_PasswordSet, authenticationContainer *datatypes.Container_User_Customer_External_Binding) (resp bool, err error) { + params := []interface{}{ + passwordSet, + authenticationContainer, + } + err = r.Session.DoRequest("SoftLayer_User_Customer_OpenIdConnect", "checkPhoneFactorAuthenticationForPasswordSet", params, &r.Options, &resp) + return +} + +// no documentation yet +func (r User_Customer_OpenIdConnect) CompleteInvitationAfterLogin(providerType *string, accessToken *string, emailRegistrationCode *string) (err error) { + var resp datatypes.Void + params := []interface{}{ + providerType, + accessToken, + emailRegistrationCode, + } + err = r.Session.DoRequest("SoftLayer_User_Customer_OpenIdConnect", "completeInvitationAfterLogin", params, &r.Options, &resp) + return +} + +// Create a new subscriber for a given resource. +func (r User_Customer_OpenIdConnect) CreateNotificationSubscriber(keyName *string, resourceTableId *int) (resp bool, err error) { + params := []interface{}{ + keyName, + resourceTableId, + } + err = r.Session.DoRequest("SoftLayer_User_Customer_OpenIdConnect", "createNotificationSubscriber", params, &r.Options, &resp) + return +} + +// Create a new user in the SoftLayer customer portal. createObject() creates a user's portal record and adds them into the SoftLayer community forums. It is no longer possible to set up the SSL or PPTP enable flag in this call since the manage permissions have not yet been set. You will need to make a subsequent call to edit object in order to enable VPN access. An account's master user and sub-users who have the User Manage permission can add new users. createObject() creates users with a default permission set. After adding a user it may be helpful to set their permissions and hardware access. +// +// Note, neither password nor vpnPassword parameters are required. +// +// Password When a new user is created, an email will be sent to the new user's email address with a link to a url that will allow the new user to create or change their password for the SoftLayer customer portal. +// +// If the password parameter is provided and is not null, then that value will be validated. If it is a valid password, then the user will be created with this password. This user will still receive a portal password email. It can be used within 24 hours to change their password, or it can be allowed to expire, and the password provided during user creation will remain as the user's password. +// +// If the password parameter is not provided or the value is null, the user must set their portal password using the link sent in email within 24 hours.  If the user fails to set their password within 24 hours, then a non-master user can use the "Reset Password" link on the login page of the portal to request a new email. A master user can use the link to retrieve a phone number to call to assist in resetting their password. +// +// The password parameter is ignored for VPN_ONLY users or for IBMid authenticated users. +// +// vpnPassword If the vpnPassword is provided, then the user's vpnPassword will be set to the provided password.  When creating a vpn only user, the vpnPassword MUST be supplied.  If the vpnPassword is not provided, then the user will need to use the portal to edit their profile and set the vpnPassword. +// +// +func (r User_Customer_OpenIdConnect) CreateObject(templateObject *datatypes.User_Customer, password *string, vpnPassword *string) (resp datatypes.User_Customer, err error) { + params := []interface{}{ + templateObject, + password, + vpnPassword, + } + err = r.Session.DoRequest("SoftLayer_User_Customer_OpenIdConnect", "createObject", params, &r.Options, &resp) + return +} + +// no documentation yet +func (r User_Customer_OpenIdConnect) CreateOpenIdConnectUserAndCompleteInvitation(providerType *string, user *datatypes.User_Customer, password *string, registrationCode *string) (resp string, err error) { + params := []interface{}{ + providerType, + user, + password, + registrationCode, + } + err = r.Session.DoRequest("SoftLayer_User_Customer_OpenIdConnect", "createOpenIdConnectUserAndCompleteInvitation", params, &r.Options, &resp) + return +} + +// Create delivery methods for a notification that the user is subscribed to. Multiple delivery method keyNames can be supplied to create multiple delivery methods for the specified notification. Available delivery methods - 'EMAIL'. Available notifications - 'PLANNED_MAINTENANCE', 'UNPLANNED_INCIDENT'. +func (r User_Customer_OpenIdConnect) CreateSubscriberDeliveryMethods(notificationKeyName *string, deliveryMethodKeyNames []string) (resp bool, err error) { + params := []interface{}{ + notificationKeyName, + deliveryMethodKeyNames, + } + err = r.Session.DoRequest("SoftLayer_User_Customer_OpenIdConnect", "createSubscriberDeliveryMethods", params, &r.Options, &resp) + return +} + +// Create a new subscriber for a given resource. +func (r User_Customer_OpenIdConnect) DeactivateNotificationSubscriber(keyName *string, resourceTableId *int) (resp bool, err error) { + params := []interface{}{ + keyName, + resourceTableId, + } + err = r.Session.DoRequest("SoftLayer_User_Customer_OpenIdConnect", "deactivateNotificationSubscriber", params, &r.Options, &resp) + return +} + +// Declines an invitation to link an OpenIdConnect identity to a SoftLayer (Atlas) identity and account. Note that this uses a registration code that is likely a one-time-use-only token, so if an invitation has already been processed (accepted or previously declined) it will not be possible to process it a second time. +func (r User_Customer_OpenIdConnect) DeclineInvitation(providerType *string, registrationCode *string) (err error) { + var resp datatypes.Void + params := []interface{}{ + providerType, + registrationCode, + } + err = r.Session.DoRequest("SoftLayer_User_Customer_OpenIdConnect", "declineInvitation", params, &r.Options, &resp) + return +} + +// Account master users and sub-users who have the User Manage permission in the SoftLayer customer portal can update other user's information. Use editObject() if you wish to edit a single user account. Users who do not have the User Manage permission can only update their own information. +func (r User_Customer_OpenIdConnect) EditObject(templateObject *datatypes.User_Customer) (resp bool, err error) { + params := []interface{}{ + templateObject, + } + err = r.Session.DoRequest("SoftLayer_User_Customer_OpenIdConnect", "editObject", params, &r.Options, &resp) + return +} + +// Account master users and sub-users who have the User Manage permission in the SoftLayer customer portal can update other user's information. Use editObjects() if you wish to edit multiple users at once. Users who do not have the User Manage permission can only update their own information. +func (r User_Customer_OpenIdConnect) EditObjects(templateObjects []datatypes.User_Customer) (resp bool, err error) { + params := []interface{}{ + templateObjects, + } + err = r.Session.DoRequest("SoftLayer_User_Customer_OpenIdConnect", "editObjects", params, &r.Options, &resp) + return +} + +// no documentation yet +func (r User_Customer_OpenIdConnect) FindUserPreference(profileName *string, containerKeyname *string, preferenceKeyname *string) (resp []datatypes.Layout_Profile, err error) { + params := []interface{}{ + profileName, + containerKeyname, + preferenceKeyname, + } + err = r.Session.DoRequest("SoftLayer_User_Customer_OpenIdConnect", "findUserPreference", params, &r.Options, &resp) + return +} + +// Retrieve The customer account that a user belongs to. +func (r User_Customer_OpenIdConnect) GetAccount() (resp datatypes.Account, err error) { + err = r.Session.DoRequest("SoftLayer_User_Customer_OpenIdConnect", "getAccount", nil, &r.Options, &resp) + return +} + +// Retrieve +func (r User_Customer_OpenIdConnect) GetActions() (resp []datatypes.User_Permission_Action, err error) { + err = r.Session.DoRequest("SoftLayer_User_Customer_OpenIdConnect", "getActions", nil, &r.Options, &resp) + return +} + +// The getActiveExternalAuthenticationVendors method will return a list of available external vendors that a SoftLayer user can authenticate against. The list will only contain vendors for which the user has at least one active external binding. +func (r User_Customer_OpenIdConnect) GetActiveExternalAuthenticationVendors() (resp []datatypes.Container_User_Customer_External_Binding_Vendor, err error) { + err = r.Session.DoRequest("SoftLayer_User_Customer_OpenIdConnect", "getActiveExternalAuthenticationVendors", nil, &r.Options, &resp) + return +} + +// Retrieve A portal user's additional email addresses. These email addresses are contacted when updates are made to support tickets. +func (r User_Customer_OpenIdConnect) GetAdditionalEmails() (resp []datatypes.User_Customer_AdditionalEmail, err error) { + err = r.Session.DoRequest("SoftLayer_User_Customer_OpenIdConnect", "getAdditionalEmails", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r User_Customer_OpenIdConnect) GetAllowedHardwareIds() (resp []int, err error) { + err = r.Session.DoRequest("SoftLayer_User_Customer_OpenIdConnect", "getAllowedHardwareIds", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r User_Customer_OpenIdConnect) GetAllowedVirtualGuestIds() (resp []int, err error) { + err = r.Session.DoRequest("SoftLayer_User_Customer_OpenIdConnect", "getAllowedVirtualGuestIds", nil, &r.Options, &resp) + return +} + +// Retrieve A portal user's API Authentication keys. There is a max limit of two API keys per user. +func (r User_Customer_OpenIdConnect) GetApiAuthenticationKeys() (resp []datatypes.User_Customer_ApiAuthentication, err error) { + err = r.Session.DoRequest("SoftLayer_User_Customer_OpenIdConnect", "getApiAuthenticationKeys", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r User_Customer_OpenIdConnect) GetAuthenticationToken(token *datatypes.Container_User_Authentication_Token) (resp datatypes.Container_User_Authentication_Token, err error) { + params := []interface{}{ + token, + } + err = r.Session.DoRequest("SoftLayer_User_Customer_OpenIdConnect", "getAuthenticationToken", params, &r.Options, &resp) + return +} + +// Retrieve The CDN accounts associated with a portal user. +func (r User_Customer_OpenIdConnect) GetCdnAccounts() (resp []datatypes.Network_ContentDelivery_Account, err error) { + err = r.Session.DoRequest("SoftLayer_User_Customer_OpenIdConnect", "getCdnAccounts", nil, &r.Options, &resp) + return +} + +// Retrieve A portal user's child users. Some portal users may not have child users. +func (r User_Customer_OpenIdConnect) GetChildUsers() (resp []datatypes.User_Customer, err error) { + err = r.Session.DoRequest("SoftLayer_User_Customer_OpenIdConnect", "getChildUsers", nil, &r.Options, &resp) + return +} + +// Retrieve An user's associated closed tickets. +func (r User_Customer_OpenIdConnect) GetClosedTickets() (resp []datatypes.Ticket, err error) { + err = r.Session.DoRequest("SoftLayer_User_Customer_OpenIdConnect", "getClosedTickets", nil, &r.Options, &resp) + return +} + +// This API gets the default account for the OpenIdConnect identity that is linked to the current SoftLayer user identity. If there is no default present, the API returns null, except in the special case where we find one active user linked to the IBMid. In that case, we will set the link from the IBMid to that user as default, and return the account of which that user is a member. Invoke this only on IBMid-authenticated users. +func (r User_Customer_OpenIdConnect) GetDefaultAccount(providerType *string) (resp datatypes.Account, err error) { + params := []interface{}{ + providerType, + } + err = r.Session.DoRequest("SoftLayer_User_Customer_OpenIdConnect", "getDefaultAccount", params, &r.Options, &resp) + return +} + +// This method is deprecated. Please see documentation for initiatePortalPasswordChange +func (r User_Customer_OpenIdConnect) GetDefaultSecurityQuestions(key *string) (resp []datatypes.User_Security_Question, err error) { + params := []interface{}{ + key, + } + err = r.Session.DoRequest("SoftLayer_User_Customer_OpenIdConnect", "getDefaultSecurityQuestions", params, &r.Options, &resp) + return +} + +// Retrieve The external authentication bindings that link an external identifier to a SoftLayer user. +func (r User_Customer_OpenIdConnect) GetExternalBindings() (resp []datatypes.User_External_Binding, err error) { + err = r.Session.DoRequest("SoftLayer_User_Customer_OpenIdConnect", "getExternalBindings", nil, &r.Options, &resp) + return +} + +// Retrieve A portal user's accessible hardware. These permissions control which hardware a user has access to in the SoftLayer customer portal. +func (r User_Customer_OpenIdConnect) GetHardware() (resp []datatypes.Hardware, err error) { + err = r.Session.DoRequest("SoftLayer_User_Customer_OpenIdConnect", "getHardware", nil, &r.Options, &resp) + return +} + +// Retrieve the number of servers that a portal user has access to. Portal users can have restrictions set to limit services for and to perform actions on hardware. You can set these permissions in the portal by clicking the "administrative" then "user admin" links. +func (r User_Customer_OpenIdConnect) GetHardwareCount() (resp int, err error) { + err = r.Session.DoRequest("SoftLayer_User_Customer_OpenIdConnect", "getHardwareCount", nil, &r.Options, &resp) + return +} + +// Retrieve Hardware notifications associated with this user. A hardware notification links a user to a piece of hardware, and that user will be notified if any monitors on that hardware fail, if the monitors have a status of 'Notify User'. +func (r User_Customer_OpenIdConnect) GetHardwareNotifications() (resp []datatypes.User_Customer_Notification_Hardware, err error) { + err = r.Session.DoRequest("SoftLayer_User_Customer_OpenIdConnect", "getHardwareNotifications", nil, &r.Options, &resp) + return +} + +// Retrieve Whether or not a user has acknowledged the support policy. +func (r User_Customer_OpenIdConnect) GetHasAcknowledgedSupportPolicyFlag() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_User_Customer_OpenIdConnect", "getHasAcknowledgedSupportPolicyFlag", nil, &r.Options, &resp) + return +} + +// Retrieve Whether or not a portal user has access to all hardware on their account. +func (r User_Customer_OpenIdConnect) GetHasFullHardwareAccessFlag() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_User_Customer_OpenIdConnect", "getHasFullHardwareAccessFlag", nil, &r.Options, &resp) + return +} + +// Retrieve Whether or not a portal user has access to all hardware on their account. +func (r User_Customer_OpenIdConnect) GetHasFullVirtualGuestAccessFlag() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_User_Customer_OpenIdConnect", "getHasFullVirtualGuestAccessFlag", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r User_Customer_OpenIdConnect) GetImpersonationToken() (resp string, err error) { + err = r.Session.DoRequest("SoftLayer_User_Customer_OpenIdConnect", "getImpersonationToken", nil, &r.Options, &resp) + return +} + +// Retrieve +func (r User_Customer_OpenIdConnect) GetLayoutProfiles() (resp []datatypes.Layout_Profile, err error) { + err = r.Session.DoRequest("SoftLayer_User_Customer_OpenIdConnect", "getLayoutProfiles", nil, &r.Options, &resp) + return +} + +// Retrieve A user's locale. Locale holds user's language and region information. +func (r User_Customer_OpenIdConnect) GetLocale() (resp datatypes.Locale, err error) { + err = r.Session.DoRequest("SoftLayer_User_Customer_OpenIdConnect", "getLocale", nil, &r.Options, &resp) + return +} + +// Validates a supplied OpenIdConnect access token to the SoftLayer customer portal and returns the default account name and id for the active user. An exception will be thrown if no matching customer is found. +func (r User_Customer_OpenIdConnect) GetLoginAccountInfoOpenIdConnect(providerType *string, accessToken *string) (resp datatypes.Container_User_Customer_OpenIdConnect_LoginAccountInfo, err error) { + params := []interface{}{ + providerType, + accessToken, + } + err = r.Session.DoRequest("SoftLayer_User_Customer_OpenIdConnect", "getLoginAccountInfoOpenIdConnect", params, &r.Options, &resp) + return +} + +// Retrieve A user's attempts to log into the SoftLayer customer portal. +func (r User_Customer_OpenIdConnect) GetLoginAttempts() (resp []datatypes.User_Customer_Access_Authentication, err error) { + err = r.Session.DoRequest("SoftLayer_User_Customer_OpenIdConnect", "getLoginAttempts", nil, &r.Options, &resp) + return +} + +// Attempt to authenticate a user to the SoftLayer customer portal using the provided authentication container. Depending on the specific type of authentication container that is used, this API will leverage the appropriate authentication protocol. If authentication is successful then the API returns a list of linked accounts for the user, a token containing the ID of the authenticated user and a hash key used by the SoftLayer customer portal to maintain authentication. +func (r User_Customer_OpenIdConnect) GetLoginToken(request *datatypes.Container_Authentication_Request_Contract) (resp datatypes.Container_Authentication_Response_Common, err error) { + params := []interface{}{ + request, + } + err = r.Session.DoRequest("SoftLayer_User_Customer_OpenIdConnect", "getLoginToken", params, &r.Options, &resp) + return +} + +// An OpenIdConnect identity, for example an IBMid, can be linked or mapped to one or more individual SoftLayer users, but no more than one SoftLayer user per account. This effectively links the OpenIdConnect identity to those accounts. This API returns a list of all the accounts for which there is a link between the OpenIdConnect identity and a SoftLayer user. Invoke this only on IBMid-authenticated users. +func (r User_Customer_OpenIdConnect) GetMappedAccounts(providerType *string) (resp []datatypes.Account, err error) { + params := []interface{}{ + providerType, + } + err = r.Session.DoRequest("SoftLayer_User_Customer_OpenIdConnect", "getMappedAccounts", params, &r.Options, &resp) + return +} + +// Retrieve A portal user's associated mobile device profiles. +func (r User_Customer_OpenIdConnect) GetMobileDevices() (resp []datatypes.User_Customer_MobileDevice, err error) { + err = r.Session.DoRequest("SoftLayer_User_Customer_OpenIdConnect", "getMobileDevices", nil, &r.Options, &resp) + return +} + +// Retrieve Notification subscription records for the user. +func (r User_Customer_OpenIdConnect) GetNotificationSubscribers() (resp []datatypes.Notification_Subscriber, err error) { + err = r.Session.DoRequest("SoftLayer_User_Customer_OpenIdConnect", "getNotificationSubscribers", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r User_Customer_OpenIdConnect) GetObject() (resp datatypes.User_Customer_OpenIdConnect, err error) { + err = r.Session.DoRequest("SoftLayer_User_Customer_OpenIdConnect", "getObject", nil, &r.Options, &resp) + return +} + +// This API returns a SoftLayer_Container_User_Customer_OpenIdConnect_MigrationState object containing the necessary information to determine what migration state the user is in. If the account is not OpenIdConnect authenticated, then an exception is thrown. +func (r User_Customer_OpenIdConnect) GetOpenIdConnectMigrationState() (resp datatypes.Container_User_Customer_OpenIdConnect_MigrationState, err error) { + err = r.Session.DoRequest("SoftLayer_User_Customer_OpenIdConnect", "getOpenIdConnectMigrationState", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r User_Customer_OpenIdConnect) GetOpenIdRegistrationInfoFromCode(providerType *string, registrationCode *string) (resp datatypes.Account_Authentication_OpenIdConnect_RegistrationInformation, err error) { + params := []interface{}{ + providerType, + registrationCode, + } + err = r.Session.DoRequest("SoftLayer_User_Customer_OpenIdConnect", "getOpenIdRegistrationInfoFromCode", params, &r.Options, &resp) + return +} + +// Retrieve An user's associated open tickets. +func (r User_Customer_OpenIdConnect) GetOpenTickets() (resp []datatypes.Ticket, err error) { + err = r.Session.DoRequest("SoftLayer_User_Customer_OpenIdConnect", "getOpenTickets", nil, &r.Options, &resp) + return +} + +// Retrieve A portal user's vpn accessible subnets. +func (r User_Customer_OpenIdConnect) GetOverrides() (resp []datatypes.Network_Service_Vpn_Overrides, err error) { + err = r.Session.DoRequest("SoftLayer_User_Customer_OpenIdConnect", "getOverrides", nil, &r.Options, &resp) + return +} + +// Retrieve A portal user's parent user. If a SoftLayer_User_Customer has a null parentId property then it doesn't have a parent user. +func (r User_Customer_OpenIdConnect) GetParent() (resp datatypes.User_Customer, err error) { + err = r.Session.DoRequest("SoftLayer_User_Customer_OpenIdConnect", "getParent", nil, &r.Options, &resp) + return +} + +// Retrieve A portal user's permissions. These permissions control that user's access to functions within the SoftLayer customer portal and API. +func (r User_Customer_OpenIdConnect) GetPermissions() (resp []datatypes.User_Customer_CustomerPermission_Permission, err error) { + err = r.Session.DoRequest("SoftLayer_User_Customer_OpenIdConnect", "getPermissions", nil, &r.Options, &resp) + return +} + +// Attempt to authenticate a username and password to the SoftLayer customer portal. Many portal user accounts are configured to require answering a security question on login. In this case getPortalLoginToken() also verifies the given security question ID and answer. If authentication is successful then the API returns a token containing the ID of the authenticated user and a hash key used by the SoftLayer customer portal to maintain authentication. +func (r User_Customer_OpenIdConnect) GetPortalLoginToken(username *string, password *string, securityQuestionId *int, securityQuestionAnswer *string) (resp datatypes.Container_User_Customer_Portal_Token, err error) { + params := []interface{}{ + username, + password, + securityQuestionId, + securityQuestionAnswer, + } + err = r.Session.DoRequest("SoftLayer_User_Customer_OpenIdConnect", "getPortalLoginToken", params, &r.Options, &resp) + return +} + +// Attempt to authenticate a supplied OpenIdConnect access token to the SoftLayer customer portal. If authentication is successful then the API returns a token containing the ID of the authenticated user and a hash key used by the SoftLayer customer portal to maintain authentication. +func (r User_Customer_OpenIdConnect) GetPortalLoginTokenOpenIdConnect(providerType *string, accessToken *string, accountId *int, securityQuestionId *int, securityQuestionAnswer *string) (resp datatypes.Container_User_Customer_Portal_Token, err error) { + params := []interface{}{ + providerType, + accessToken, + accountId, + securityQuestionId, + securityQuestionAnswer, + } + err = r.Session.DoRequest("SoftLayer_User_Customer_OpenIdConnect", "getPortalLoginTokenOpenIdConnect", params, &r.Options, &resp) + return +} + +// Select a type of preference you would like to get using [[SoftLayer_User_Customer::getPreferenceTypes|getPreferenceTypes]] and invoke this method using that preference type key name. +func (r User_Customer_OpenIdConnect) GetPreference(preferenceTypeKeyName *string) (resp datatypes.User_Preference, err error) { + params := []interface{}{ + preferenceTypeKeyName, + } + err = r.Session.DoRequest("SoftLayer_User_Customer_OpenIdConnect", "getPreference", params, &r.Options, &resp) + return +} + +// Use any of the preference types to fetch or modify user preferences using [[SoftLayer_User_Customer::getPreference|getPreference]] or [[SoftLayer_User_Customer::changePreference|changePreference]], respectively. +func (r User_Customer_OpenIdConnect) GetPreferenceTypes() (resp []datatypes.User_Preference_Type, err error) { + err = r.Session.DoRequest("SoftLayer_User_Customer_OpenIdConnect", "getPreferenceTypes", nil, &r.Options, &resp) + return +} + +// Retrieve +func (r User_Customer_OpenIdConnect) GetPreferences() (resp []datatypes.User_Preference, err error) { + err = r.Session.DoRequest("SoftLayer_User_Customer_OpenIdConnect", "getPreferences", nil, &r.Options, &resp) + return +} + +// Retrieve the authentication requirements for an outstanding password set/reset request. The password key provided to the user in an email generated by the [[SoftLayer_User_Customer::newUserPassword|newUserPassword]]. Password recovery keys are valid for 24 hours after they're generated. +func (r User_Customer_OpenIdConnect) GetRequirementsForPasswordSet(passwordSet *datatypes.Container_User_Customer_PasswordSet) (resp datatypes.Container_User_Customer_PasswordSet, err error) { + params := []interface{}{ + passwordSet, + } + err = r.Session.DoRequest("SoftLayer_User_Customer_OpenIdConnect", "getRequirementsForPasswordSet", params, &r.Options, &resp) + return +} + +// Retrieve +func (r User_Customer_OpenIdConnect) GetRoles() (resp []datatypes.User_Permission_Role, err error) { + err = r.Session.DoRequest("SoftLayer_User_Customer_OpenIdConnect", "getRoles", nil, &r.Options, &resp) + return +} + +// Retrieve +func (r User_Customer_OpenIdConnect) GetSalesforceUserLink() (resp datatypes.User_Customer_Link, err error) { + err = r.Session.DoRequest("SoftLayer_User_Customer_OpenIdConnect", "getSalesforceUserLink", nil, &r.Options, &resp) + return +} + +// Retrieve A portal user's security question answers. Some portal users may not have security answers or may not be configured to require answering a security question on login. +func (r User_Customer_OpenIdConnect) GetSecurityAnswers() (resp []datatypes.User_Customer_Security_Answer, err error) { + err = r.Session.DoRequest("SoftLayer_User_Customer_OpenIdConnect", "getSecurityAnswers", nil, &r.Options, &resp) + return +} + +// Retrieve A user's notification subscription records. +func (r User_Customer_OpenIdConnect) GetSubscribers() (resp []datatypes.Notification_User_Subscriber, err error) { + err = r.Session.DoRequest("SoftLayer_User_Customer_OpenIdConnect", "getSubscribers", nil, &r.Options, &resp) + return +} + +// Retrieve A user's successful attempts to log into the SoftLayer customer portal. +func (r User_Customer_OpenIdConnect) GetSuccessfulLogins() (resp []datatypes.User_Customer_Access_Authentication, err error) { + err = r.Session.DoRequest("SoftLayer_User_Customer_OpenIdConnect", "getSuccessfulLogins", nil, &r.Options, &resp) + return +} + +// Retrieve Whether or not a user is required to acknowledge the support policy for portal access. +func (r User_Customer_OpenIdConnect) GetSupportPolicyAcknowledgementRequiredFlag() (resp int, err error) { + err = r.Session.DoRequest("SoftLayer_User_Customer_OpenIdConnect", "getSupportPolicyAcknowledgementRequiredFlag", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r User_Customer_OpenIdConnect) GetSupportPolicyDocument() (resp []byte, err error) { + err = r.Session.DoRequest("SoftLayer_User_Customer_OpenIdConnect", "getSupportPolicyDocument", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r User_Customer_OpenIdConnect) GetSupportPolicyName() (resp string, err error) { + err = r.Session.DoRequest("SoftLayer_User_Customer_OpenIdConnect", "getSupportPolicyName", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r User_Customer_OpenIdConnect) GetSupportedLocales() (resp []datatypes.Locale, err error) { + err = r.Session.DoRequest("SoftLayer_User_Customer_OpenIdConnect", "getSupportedLocales", nil, &r.Options, &resp) + return +} + +// Retrieve Whether or not a user must take a brief survey the next time they log into the SoftLayer customer portal. +func (r User_Customer_OpenIdConnect) GetSurveyRequiredFlag() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_User_Customer_OpenIdConnect", "getSurveyRequiredFlag", nil, &r.Options, &resp) + return +} + +// Retrieve The surveys that a user has taken in the SoftLayer customer portal. +func (r User_Customer_OpenIdConnect) GetSurveys() (resp []datatypes.Survey, err error) { + err = r.Session.DoRequest("SoftLayer_User_Customer_OpenIdConnect", "getSurveys", nil, &r.Options, &resp) + return +} + +// Retrieve An user's associated tickets. +func (r User_Customer_OpenIdConnect) GetTickets() (resp []datatypes.Ticket, err error) { + err = r.Session.DoRequest("SoftLayer_User_Customer_OpenIdConnect", "getTickets", nil, &r.Options, &resp) + return +} + +// Retrieve A portal user's time zone. +func (r User_Customer_OpenIdConnect) GetTimezone() (resp datatypes.Locale_Timezone, err error) { + err = r.Session.DoRequest("SoftLayer_User_Customer_OpenIdConnect", "getTimezone", nil, &r.Options, &resp) + return +} + +// Retrieve A user's unsuccessful attempts to log into the SoftLayer customer portal. +func (r User_Customer_OpenIdConnect) GetUnsuccessfulLogins() (resp []datatypes.User_Customer_Access_Authentication, err error) { + err = r.Session.DoRequest("SoftLayer_User_Customer_OpenIdConnect", "getUnsuccessfulLogins", nil, &r.Options, &resp) + return +} + +// Returns an IMS User Object from the provided OpenIdConnect User ID for the Account of the active user. Enforces the User Management permissions for the Active User. An exception will be thrown if no matching IMS User is found. +func (r User_Customer_OpenIdConnect) GetUserForUnifiedInvitation(openIdConnectUserId *string) (resp datatypes.User_Customer_OpenIdConnect, err error) { + params := []interface{}{ + openIdConnectUserId, + } + err = r.Session.DoRequest("SoftLayer_User_Customer_OpenIdConnect", "getUserForUnifiedInvitation", params, &r.Options, &resp) + return +} + +// This method is deprecated. Please see documentation for initiatePortalPasswordChange Retrieve a user object using a password recovery key received in an email generated by the [[SoftLayer_User_Customer::lostPassword|lostPassword]] method. The SoftLayer customer portal uses getUserFromLostPasswordRequest() to retrieve user security questions. Password recovery keys are valid for 24 hours after they're generated. +func (r User_Customer_OpenIdConnect) GetUserFromLostPasswordRequest(key *string) (resp []datatypes.User_Security_Question, err error) { + params := []interface{}{ + key, + } + err = r.Session.DoRequest("SoftLayer_User_Customer_OpenIdConnect", "getUserFromLostPasswordRequest", params, &r.Options, &resp) + return +} + +// Retrieve a user object using a password token. When a new user is created or when a user has requested a password change using initiatePortalPasswordChange, they will have received an email that contains a url with a token. That token is used as the parameter for getUserIdForPasswordSet. +func (r User_Customer_OpenIdConnect) GetUserIdForPasswordSet(key *string) (resp int, err error) { + params := []interface{}{ + key, + } + err = r.Session.DoRequest("SoftLayer_User_Customer_OpenIdConnect", "getUserIdForPasswordSet", params, &r.Options, &resp) + return +} + +// Retrieve +func (r User_Customer_OpenIdConnect) GetUserLinks() (resp []datatypes.User_Customer_Link, err error) { + err = r.Session.DoRequest("SoftLayer_User_Customer_OpenIdConnect", "getUserLinks", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r User_Customer_OpenIdConnect) GetUserPreferences(profileName *string, containerKeyname *string) (resp []datatypes.Layout_Profile, err error) { + params := []interface{}{ + profileName, + containerKeyname, + } + err = r.Session.DoRequest("SoftLayer_User_Customer_OpenIdConnect", "getUserPreferences", params, &r.Options, &resp) + return +} + +// Retrieve A portal user's status, which controls overall access to the SoftLayer customer portal and VPN access to the private network. +func (r User_Customer_OpenIdConnect) GetUserStatus() (resp datatypes.User_Customer_Status, err error) { + err = r.Session.DoRequest("SoftLayer_User_Customer_OpenIdConnect", "getUserStatus", nil, &r.Options, &resp) + return +} + +// Retrieve the number of CloudLayer Computing Instances that a portal user has access to. Portal users can have restrictions set to limit services for and to perform actions on CloudLayer Computing Instances. You can set these permissions in the portal by clicking the "administrative" then "user admin" links. +func (r User_Customer_OpenIdConnect) GetVirtualGuestCount() (resp int, err error) { + err = r.Session.DoRequest("SoftLayer_User_Customer_OpenIdConnect", "getVirtualGuestCount", nil, &r.Options, &resp) + return +} + +// Retrieve A portal user's accessible CloudLayer Computing Instances. These permissions control which CloudLayer Computing Instances a user has access to in the SoftLayer customer portal. +func (r User_Customer_OpenIdConnect) GetVirtualGuests() (resp []datatypes.Virtual_Guest, err error) { + err = r.Session.DoRequest("SoftLayer_User_Customer_OpenIdConnect", "getVirtualGuests", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r User_Customer_OpenIdConnect) InTerminalStatus() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_User_Customer_OpenIdConnect", "inTerminalStatus", nil, &r.Options, &resp) + return +} + +// The service initiates an external authentication with the given external authentication vendor. The authentication container and its content will be verified before an attempt is made to initiate an external authentication. [[SoftLayer_Container_User_Customer_External_Binding_Phone|Phone external binding]] container can be used for this service. +// +// This service returns a unique authentication request token. You can use [[SoftLayer_User_Customer::checkExternalAuthenticationStatus|checkExternalAuthenticationStatus]] service to check if the authentication request is complete or not. +func (r User_Customer_OpenIdConnect) InitiateExternalAuthentication(authenticationContainer *datatypes.Container_User_Customer_External_Binding) (resp string, err error) { + params := []interface{}{ + authenticationContainer, + } + err = r.Session.DoRequest("SoftLayer_User_Customer_OpenIdConnect", "initiateExternalAuthentication", params, &r.Options, &resp) + return +} + +// Sends password change email to the user containing url that allows the user the change their password. This is the first step when a user wishes to change their password. The url that is generated contains a one-time use token that is valid for only 24-hours. +// +// If this is a new master user who has never logged into the portal, then password reset will be initiated. Once a master user has logged into the portal, they must setup their security questions prior to logging out because master users are required to answer a security question during the password reset process. Should a master user not have security questions defined and not remember their password in order to define the security questions, then they will need to contact support at live chat or Revenue Services for assistance. +// +// Due to security reasons, the number reset requests per username are limited within a undisclosed timeframe. +func (r User_Customer_OpenIdConnect) InitiatePortalPasswordChange(username *string) (resp bool, err error) { + params := []interface{}{ + username, + } + err = r.Session.DoRequest("SoftLayer_User_Customer_OpenIdConnect", "initiatePortalPasswordChange", params, &r.Options, &resp) + return +} + +// A Brand Agent that has permissions to Add Customer Accounts will be able to request the password email be sent to the Master User of a Customer Account created by the same Brand as the agent making the request. Due to security reasons, the number of reset requests are limited within an undisclosed timeframe. +func (r User_Customer_OpenIdConnect) InitiatePortalPasswordChangeByBrandAgent(username *string) (resp bool, err error) { + params := []interface{}{ + username, + } + err = r.Session.DoRequest("SoftLayer_User_Customer_OpenIdConnect", "initiatePortalPasswordChangeByBrandAgent", params, &r.Options, &resp) + return +} + +// Send email invitation to a user to join a SoftLayer account and authenticate with OpenIdConnect. Throws an exception on error. +func (r User_Customer_OpenIdConnect) InviteUserToLinkOpenIdConnect(providerType *string) (err error) { + var resp datatypes.Void + params := []interface{}{ + providerType, + } + err = r.Session.DoRequest("SoftLayer_User_Customer_OpenIdConnect", "inviteUserToLinkOpenIdConnect", params, &r.Options, &resp) + return +} + +// Portal users are considered master users if they don't have an associated parent user. The only users who don't have parent users are users whose username matches their SoftLayer account name. Master users have special permissions throughout the SoftLayer customer portal. +func (r User_Customer_OpenIdConnect) IsMasterUser() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_User_Customer_OpenIdConnect", "isMasterUser", nil, &r.Options, &resp) + return +} + +// This method is deprecated! SoftLayer Community Forums no longer exist, therefore, any password verified will return false. +// +// Determine if a string is the given user's login password to the SoftLayer community forums. +func (r User_Customer_OpenIdConnect) IsValidForumPassword(password *string) (resp bool, err error) { + params := []interface{}{ + password, + } + err = r.Session.DoRequest("SoftLayer_User_Customer_OpenIdConnect", "isValidForumPassword", params, &r.Options, &resp) + return +} + +// Determine if a string is the given user's login password to the SoftLayer customer portal. +func (r User_Customer_OpenIdConnect) IsValidPortalPassword(password *string) (resp bool, err error) { + params := []interface{}{ + password, + } + err = r.Session.DoRequest("SoftLayer_User_Customer_OpenIdConnect", "isValidPortalPassword", params, &r.Options, &resp) + return +} + +// This method is deprecated. Please see documentation for initiatePortalPasswordChange SoftLayer provides a way for users of it's customer portal to recover lost passwords. The lostPassword() method is the first step in this process. Given a valid username and email address, the SoftLayer API will email the address provided with a URL to visit to begin the password recovery process. The last part of this URL is a hash key that's used as an identifier throughout this process. Use this hash key in the [[SoftLayer_User_Customer::setPasswordFromLostPasswordRequest|setPasswordFromLostPasswordRequest]] method to reset a user's password. Password recovery hash keys are valid for 24 hours after they're generated. +func (r User_Customer_OpenIdConnect) LostPassword(username *string, email *string) (resp bool, err error) { + params := []interface{}{ + username, + email, + } + err = r.Session.DoRequest("SoftLayer_User_Customer_OpenIdConnect", "lostPassword", params, &r.Options, &resp) + return +} + +// The perform external authentication method will authenticate the given external authentication container with an external vendor. The authentication container and its contents will be verified before an attempt is made to authenticate the contents of the container with an external vendor. +func (r User_Customer_OpenIdConnect) PerformExternalAuthentication(authenticationContainer *datatypes.Container_User_Customer_External_Binding) (resp datatypes.Container_User_Customer_Portal_Token, err error) { + params := []interface{}{ + authenticationContainer, + } + err = r.Session.DoRequest("SoftLayer_User_Customer_OpenIdConnect", "performExternalAuthentication", params, &r.Options, &resp) + return +} + +// Set the password for a user who has an outstanding password request. A user with an outstanding password request will have an unused and unexpired password key. The password key is part of the url provided to the user in the email sent to the user with information on how to set their password. The email was generated by the [[SoftLayer_User_Customer::processPasswordSetRequest|processPasswordSetRequest]] method. Password recovery keys are valid for 24 hours after they're generated. +// +// User portal passwords must match the following restrictions. Portal passwords must... +// * ...be over eight characters long. +// * ...be under twenty characters long. +// * ...contain at least one uppercase letter +// * ...contain at least one lowercase letter +// * ...contain at least one number +// * ...contain one of the special characters _ - | @ . , ? / ! ~ # $ % ^ & * ( ) { } [ ] \ + = +// * ...not match your username +// * ...not match your forum password +func (r User_Customer_OpenIdConnect) ProcessPasswordSetRequest(passwordSet *datatypes.Container_User_Customer_PasswordSet, authenticationContainer *datatypes.Container_User_Customer_External_Binding) (resp bool, err error) { + params := []interface{}{ + passwordSet, + authenticationContainer, + } + err = r.Session.DoRequest("SoftLayer_User_Customer_OpenIdConnect", "processPasswordSetRequest", params, &r.Options, &resp) + return +} + +// Remove all hardware from a portal user's hardware access list. A user's hardware access list controls which of an account's hardware objects a user has access to in the SoftLayer customer portal and API. If the current user does not have administrative privileges over this user, an inadequate permissions exception will get thrown. +// +// Users can call this function on child users, but not to themselves. An account's master has access to all users permissions on their account. +func (r User_Customer_OpenIdConnect) RemoveAllHardwareAccessForThisUser() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_User_Customer_OpenIdConnect", "removeAllHardwareAccessForThisUser", nil, &r.Options, &resp) + return +} + +// Remove all cloud computing instances from a portal user's instance access list. A user's instance access list controls which of an account's computing instance objects a user has access to in the SoftLayer customer portal and API. If the current user does not have administrative privileges over this user, an inadequate permissions exception will get thrown. +// +// Users can call this function on child users, but not to themselves. An account's master has access to all users permissions on their account. +func (r User_Customer_OpenIdConnect) RemoveAllVirtualAccessForThisUser() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_User_Customer_OpenIdConnect", "removeAllVirtualAccessForThisUser", nil, &r.Options, &resp) + return +} + +// Remove a user's API authentication key, removing that user's access to query the SoftLayer API. +func (r User_Customer_OpenIdConnect) RemoveApiAuthenticationKey(keyId *int) (resp bool, err error) { + params := []interface{}{ + keyId, + } + err = r.Session.DoRequest("SoftLayer_User_Customer_OpenIdConnect", "removeApiAuthenticationKey", params, &r.Options, &resp) + return +} + +// Remove multiple hardware from a portal user's hardware access list. A user's hardware access list controls which of an account's hardware objects a user has access to in the SoftLayer customer portal and API. Hardware does not exist in the SoftLayer portal and returns "not found" exceptions in the API if the user doesn't have access to it. If a user does not has access to the hardware you're attempting remove add then removeBulkHardwareAccess() returns true. +// +// Users can assign hardware access to their child users, but not to themselves. An account's master has access to all hardware on their customer account and can set hardware access for any of the other users on their account. +// +// If the user has full hardware access, then it will provide access to "ALL but passed in" hardware ids. +func (r User_Customer_OpenIdConnect) RemoveBulkHardwareAccess(hardwareIds []int) (resp bool, err error) { + params := []interface{}{ + hardwareIds, + } + err = r.Session.DoRequest("SoftLayer_User_Customer_OpenIdConnect", "removeBulkHardwareAccess", params, &r.Options, &resp) + return +} + +// Remove multiple permissions from a portal user's permission set. [[Permissions]] control which features in the SoftLayer customer portal and API a user may use. Removing a user's permission will affect that user's portal and API access. removePortalPermission() does not attempt to remove permissions that are not assigned to the user. +// +// Users can assign permissions to their child users, but not to themselves. An account's master has all portal permissions and can set permissions for any of the other users on their account. +// +// Use the [[SoftLayer_User_Customer_CustomerPermission_Permission::getAllObjects]] method to retrieve a list of all permissions available in the SoftLayer customer portal and API. Permissions are removed based on the keyName property of the permission objects within the permissions parameter. +func (r User_Customer_OpenIdConnect) RemoveBulkPortalPermission(permissions []datatypes.User_Customer_CustomerPermission_Permission) (resp bool, err error) { + params := []interface{}{ + permissions, + } + err = r.Session.DoRequest("SoftLayer_User_Customer_OpenIdConnect", "removeBulkPortalPermission", params, &r.Options, &resp) + return +} + +// no documentation yet +func (r User_Customer_OpenIdConnect) RemoveBulkRoles(roles []datatypes.User_Permission_Role) (err error) { + var resp datatypes.Void + params := []interface{}{ + roles, + } + err = r.Session.DoRequest("SoftLayer_User_Customer_OpenIdConnect", "removeBulkRoles", params, &r.Options, &resp) + return +} + +// Remove multiple CloudLayer Computing Instances from a portal user's access list. A user's CloudLayer Computing Instance access list controls which of an account's CloudLayer Computing Instance objects a user has access to in the SoftLayer customer portal and API. CloudLayer Computing Instances do not exist in the SoftLayer portal and returns "not found" exceptions in the API if the user doesn't have access to it. If a user does not has access to the CloudLayer Computing Instance you're attempting remove add then removeBulkVirtualGuestAccess() returns true. +// +// Users can assign CloudLayer Computing Instance access to their child users, but not to themselves. An account's master has access to all CloudLayer Computing Instances on their customer account and can set hardware access for any of the other users on their account. +func (r User_Customer_OpenIdConnect) RemoveBulkVirtualGuestAccess(virtualGuestIds []int) (resp bool, err error) { + params := []interface{}{ + virtualGuestIds, + } + err = r.Session.DoRequest("SoftLayer_User_Customer_OpenIdConnect", "removeBulkVirtualGuestAccess", params, &r.Options, &resp) + return +} + +// no documentation yet +func (r User_Customer_OpenIdConnect) RemoveExternalBinding(externalBinding *datatypes.User_External_Binding) (resp bool, err error) { + params := []interface{}{ + externalBinding, + } + err = r.Session.DoRequest("SoftLayer_User_Customer_OpenIdConnect", "removeExternalBinding", params, &r.Options, &resp) + return +} + +// Remove hardware from a portal user's hardware access list. A user's hardware access list controls which of an account's hardware objects a user has access to in the SoftLayer customer portal and API. Hardware does not exist in the SoftLayer portal and returns "not found" exceptions in the API if the user doesn't have access to it. If a user does not has access to the hardware you're attempting remove add then removeHardwareAccess() returns true. +// +// Users can assign hardware access to their child users, but not to themselves. An account's master has access to all hardware on their customer account and can set hardware access for any of the other users on their account. +func (r User_Customer_OpenIdConnect) RemoveHardwareAccess(hardwareId *int) (resp bool, err error) { + params := []interface{}{ + hardwareId, + } + err = r.Session.DoRequest("SoftLayer_User_Customer_OpenIdConnect", "removeHardwareAccess", params, &r.Options, &resp) + return +} + +// Remove a permission from a portal user's permission set. [[Permissions]] control which features in the SoftLayer customer portal and API a user may use. Removing a user's permission will affect that user's portal and API access. If the user does not have the permission you're attempting to remove then removePortalPermission() returns true. +// +// Users can assign permissions to their child users, but not to themselves. An account's master has all portal permissions and can set permissions for any of the other users on their account. +// +// Use the [[SoftLayer_User_Customer_CustomerPermission_Permission::getAllObjects]] method to retrieve a list of all permissions available in the SoftLayer customer portal and API. Permissions are removed based on the keyName property of the permission parameter. +func (r User_Customer_OpenIdConnect) RemovePortalPermission(permission *datatypes.User_Customer_CustomerPermission_Permission) (resp bool, err error) { + params := []interface{}{ + permission, + } + err = r.Session.DoRequest("SoftLayer_User_Customer_OpenIdConnect", "removePortalPermission", params, &r.Options, &resp) + return +} + +// no documentation yet +func (r User_Customer_OpenIdConnect) RemoveRole(role *datatypes.User_Permission_Role) (err error) { + var resp datatypes.Void + params := []interface{}{ + role, + } + err = r.Session.DoRequest("SoftLayer_User_Customer_OpenIdConnect", "removeRole", params, &r.Options, &resp) + return +} + +// Remove a CloudLayer Computing Instance from a portal user's access list. A user's CloudLayer Computing Instance access list controls which of an account's computing instances a user has access to in the SoftLayer customer portal and API. CloudLayer Computing Instances do not exist in the SoftLayer portal and returns "not found" exceptions in the API if the user doesn't have access to it. If a user does not has access to the CloudLayer Computing Instance you're attempting remove add then removeVirtualGuestAccess() returns true. +// +// Users can assign CloudLayer Computing Instance access to their child users, but not to themselves. An account's master has access to all CloudLayer Computing Instances on their customer account and can set instance access for any of the other users on their account. +func (r User_Customer_OpenIdConnect) RemoveVirtualGuestAccess(virtualGuestId *int) (resp bool, err error) { + params := []interface{}{ + virtualGuestId, + } + err = r.Session.DoRequest("SoftLayer_User_Customer_OpenIdConnect", "removeVirtualGuestAccess", params, &r.Options, &resp) + return +} + +// This method is deprecated. Please see documentation for initiatePortalPasswordChange Attempt to authenticate a username and password to the SoftLayer customer portal and reset there password. If authentication and password reset is successful then the API returns true. +func (r User_Customer_OpenIdConnect) ResetExpiredPassword(username *string, password *string, newPassword *string, securityQuestionId *int, securityQuestionAnswer *string) (resp bool, err error) { + params := []interface{}{ + username, + password, + newPassword, + securityQuestionId, + securityQuestionAnswer, + } + err = r.Session.DoRequest("SoftLayer_User_Customer_OpenIdConnect", "resetExpiredPassword", params, &r.Options, &resp) + return +} + +// no documentation yet +func (r User_Customer_OpenIdConnect) SamlAuthenticate(accountId *string, samlResponse *string) (resp datatypes.Container_User_Customer_Portal_Token, err error) { + params := []interface{}{ + accountId, + samlResponse, + } + err = r.Session.DoRequest("SoftLayer_User_Customer_OpenIdConnect", "samlAuthenticate", params, &r.Options, &resp) + return +} + +// no documentation yet +func (r User_Customer_OpenIdConnect) SamlBeginAuthentication(accountId *int) (resp string, err error) { + params := []interface{}{ + accountId, + } + err = r.Session.DoRequest("SoftLayer_User_Customer_OpenIdConnect", "samlBeginAuthentication", params, &r.Options, &resp) + return +} + +// no documentation yet +func (r User_Customer_OpenIdConnect) SamlBeginLogout() (resp string, err error) { + err = r.Session.DoRequest("SoftLayer_User_Customer_OpenIdConnect", "samlBeginLogout", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r User_Customer_OpenIdConnect) SamlLogout(samlResponse *string) (err error) { + var resp datatypes.Void + params := []interface{}{ + samlResponse, + } + err = r.Session.DoRequest("SoftLayer_User_Customer_OpenIdConnect", "samlLogout", params, &r.Options, &resp) + return +} + +// An OpenIdConnect identity, for example an IBMid, can be linked or mapped to one or more individual SoftLayer users, but no more than one per account. If an OpenIdConnect identity is mapped to multiple accounts in this manner, one such account should be identified as the default account for that identity. Invoke this only on IBMid-authenticated users. +func (r User_Customer_OpenIdConnect) SetDefaultAccount(providerType *string, accountId *int) (resp datatypes.Account, err error) { + params := []interface{}{ + providerType, + accountId, + } + err = r.Session.DoRequest("SoftLayer_User_Customer_OpenIdConnect", "setDefaultAccount", params, &r.Options, &resp) + return +} + +// Set a user's password via the lost password recovery system, using a password recovery key received in an email generated by the [[SoftLayer_User_Customer::lostPassword|lostPassword]] method. Password recovery keys are valid for 24 hours after they're generated. +// +// User portal passwords must match the following restrictions. Portal passwords must... +// * ...be over eight characters long. +// * ...be under twenty characters long. +// * ...contain at least one uppercase letter +// * ...contain at least one lowercase letter +// * ...contain at least one number +// * ...contain one of the special characters _ - | @ . , ? / ! ~ # $ % ^ & * ( ) { } [ ] \ + = +// * ...not match your username +// * ...not match your forum password +func (r User_Customer_OpenIdConnect) SetPasswordFromLostPasswordRequest(key *string, password *string, securityAnswers []datatypes.User_Customer_Security_Answer) (resp bool, err error) { + params := []interface{}{ + key, + password, + securityAnswers, + } + err = r.Session.DoRequest("SoftLayer_User_Customer_OpenIdConnect", "setPasswordFromLostPasswordRequest", params, &r.Options, &resp) + return +} + +// As master user, calling this api for the IBMid provider type when there is an existing IBMid for the email on the SL account will silently (without sending an invitation email) create a link for the IBMid. NOTE: If the SoftLayer user is already linked to IBMid, this call will fail. If the IBMid specified by the email of this user, is already used in a link to another user in this account, this call will fail. If there is already an open invitation from this SoftLayer user to this or any IBMid, this call will fail. If there is already an open invitation from some other SoftLayer user in this account to this IBMid, then this call will fail. +func (r User_Customer_OpenIdConnect) SilentlyMigrateUserOpenIdConnect(providerType *string) (resp bool, err error) { + params := []interface{}{ + providerType, + } + err = r.Session.DoRequest("SoftLayer_User_Customer_OpenIdConnect", "silentlyMigrateUserOpenIdConnect", params, &r.Options, &resp) + return +} + +// This method is deprecated! SoftLayer Community Forums no longer exist, therefore, this method will return false. +// +// Update a user's password on the SoftLayer community forums. As with portal passwords, user forum passwords must match the following restrictions. Forum passwords must... +// * ...be over eight characters long. +// * ...be under twenty characters long. +// * ...contain at least one uppercase letter +// * ...contain at least one lowercase letter +// * ...contain at least one number +// * ...contain one of the special characters _ - | @ . , ? / ! ~ # $ % ^ & * ( ) { } [ ] \ + = +// * ...not match your username +// * ...not match your portal password +// Finally, users can only update their own password. +func (r User_Customer_OpenIdConnect) UpdateForumPassword(password *string) (resp bool, err error) { + params := []interface{}{ + password, + } + err = r.Session.DoRequest("SoftLayer_User_Customer_OpenIdConnect", "updateForumPassword", params, &r.Options, &resp) + return +} + +// Update the active status for a notification that the user is subscribed to. A notification along with an active flag can be supplied to update the active status for a particular notification subscription. +func (r User_Customer_OpenIdConnect) UpdateNotificationSubscriber(notificationKeyName *string, active *int) (resp bool, err error) { + params := []interface{}{ + notificationKeyName, + active, + } + err = r.Session.DoRequest("SoftLayer_User_Customer_OpenIdConnect", "updateNotificationSubscriber", params, &r.Options, &resp) + return +} + +// This method is deprecated. Please see documentation for initiatePortalPasswordChange Update a user's password on the SoftLayer customer portal. As with forum passwords, user portal passwords must match the following restrictions. Portal passwords must... +// * ...be over eight characters long. +// * ...be under twenty characters long. +// * ...contain at least one uppercase letter +// * ...contain at least one lowercase letter +// * ...contain at least one number +// * ...contain one of the special characters _ - | @ . , ? / ! ~ # $ % ^ & * ( ) { } [ ] \ + = +// * ...not match your username +// * ...not match your forum password +// Finally, users can only update their own password. An account's master user can update any of their account users' passwords. +func (r User_Customer_OpenIdConnect) UpdatePassword(password *string) (resp bool, err error) { + params := []interface{}{ + password, + } + err = r.Session.DoRequest("SoftLayer_User_Customer_OpenIdConnect", "updatePassword", params, &r.Options, &resp) + return +} + +// Update a user's login security questions and answers on the SoftLayer customer portal. These questions and answers are used to optionally log into the SoftLayer customer portal using two-factor authentication. Each user must have three distinct questions set with a unique answer for each question, and each answer may only contain alphanumeric or the . , - _ ( ) [ ] : ; > < characters. Existing user security questions and answers are deleted before new ones are set, and users may only update their own security questions and answers. +func (r User_Customer_OpenIdConnect) UpdateSecurityAnswers(questions []datatypes.User_Security_Question, answers []string) (resp bool, err error) { + params := []interface{}{ + questions, + answers, + } + err = r.Session.DoRequest("SoftLayer_User_Customer_OpenIdConnect", "updateSecurityAnswers", params, &r.Options, &resp) + return +} + +// Update a delivery method for a notification that the user is subscribed to. A delivery method keyName along with an active flag can be supplied to update the active status of the delivery methods for the specified notification. Available delivery methods - 'EMAIL'. Available notifications - 'PLANNED_MAINTENANCE', 'UNPLANNED_INCIDENT'. +func (r User_Customer_OpenIdConnect) UpdateSubscriberDeliveryMethod(notificationKeyName *string, deliveryMethodKeyNames []string, active *int) (resp bool, err error) { + params := []interface{}{ + notificationKeyName, + deliveryMethodKeyNames, + active, + } + err = r.Session.DoRequest("SoftLayer_User_Customer_OpenIdConnect", "updateSubscriberDeliveryMethod", params, &r.Options, &resp) + return +} + +// Update a user's VPN password on the SoftLayer customer portal. As with portal passwords, VPN passwords must match the following restrictions. VPN passwords must... +// * ...be over eight characters long. +// * ...be under twenty characters long. +// * ...contain at least one uppercase letter +// * ...contain at least one lowercase letter +// * ...contain at least one number +// * ...contain one of the special characters _ - | @ . , ? / ! ~ # $ % ^ & * ( ) { } [ ] \ = +// * ...not match your username +// * ...not match your forum password +// Finally, users can only update their own VPN password. An account's master user can update any of their account users' VPN passwords. +func (r User_Customer_OpenIdConnect) UpdateVpnPassword(password *string) (resp bool, err error) { + params := []interface{}{ + password, + } + err = r.Session.DoRequest("SoftLayer_User_Customer_OpenIdConnect", "updateVpnPassword", params, &r.Options, &resp) + return +} + +// Always call this function to enable changes when manually configuring VPN subnet access. +func (r User_Customer_OpenIdConnect) UpdateVpnUser() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_User_Customer_OpenIdConnect", "updateVpnUser", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r User_Customer_OpenIdConnect) ValidateAuthenticationToken(authenticationToken *datatypes.Container_User_Authentication_Token) (resp datatypes.Container_User_Customer_Portal_Token, err error) { + params := []interface{}{ + authenticationToken, + } + err = r.Session.DoRequest("SoftLayer_User_Customer_OpenIdConnect", "validateAuthenticationToken", params, &r.Options, &resp) + return +} + +// Contains user information for Service Provider Enrollment. +type User_Customer_Prospect_ServiceProvider_EnrollRequest struct { + Session *session.Session + Options sl.Options +} + +// GetUserCustomerProspectServiceProviderEnrollRequestService returns an instance of the User_Customer_Prospect_ServiceProvider_EnrollRequest SoftLayer service +func GetUserCustomerProspectServiceProviderEnrollRequestService(sess *session.Session) User_Customer_Prospect_ServiceProvider_EnrollRequest { + return User_Customer_Prospect_ServiceProvider_EnrollRequest{Session: sess} +} + +func (r User_Customer_Prospect_ServiceProvider_EnrollRequest) Id(id int) User_Customer_Prospect_ServiceProvider_EnrollRequest { + r.Options.Id = &id + return r +} + +func (r User_Customer_Prospect_ServiceProvider_EnrollRequest) Mask(mask string) User_Customer_Prospect_ServiceProvider_EnrollRequest { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r User_Customer_Prospect_ServiceProvider_EnrollRequest) Filter(filter string) User_Customer_Prospect_ServiceProvider_EnrollRequest { + r.Options.Filter = filter + return r +} + +func (r User_Customer_Prospect_ServiceProvider_EnrollRequest) Limit(limit int) User_Customer_Prospect_ServiceProvider_EnrollRequest { + r.Options.Limit = &limit + return r +} + +func (r User_Customer_Prospect_ServiceProvider_EnrollRequest) Offset(offset int) User_Customer_Prospect_ServiceProvider_EnrollRequest { + r.Options.Offset = &offset + return r +} + +// Create a new Service Provider Enrollment +func (r User_Customer_Prospect_ServiceProvider_EnrollRequest) Enroll(templateObject *datatypes.User_Customer_Prospect_ServiceProvider_EnrollRequest) (resp datatypes.User_Customer_Prospect_ServiceProvider_EnrollRequest, err error) { + params := []interface{}{ + templateObject, + } + err = r.Session.DoRequest("SoftLayer_User_Customer_Prospect_ServiceProvider_EnrollRequest", "enroll", params, &r.Options, &resp) + return +} + +// Retrieve Catalyst company types. +func (r User_Customer_Prospect_ServiceProvider_EnrollRequest) GetCompanyType() (resp datatypes.Catalyst_Company_Type, err error) { + err = r.Session.DoRequest("SoftLayer_User_Customer_Prospect_ServiceProvider_EnrollRequest", "getCompanyType", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r User_Customer_Prospect_ServiceProvider_EnrollRequest) GetObject() (resp datatypes.User_Customer_Prospect_ServiceProvider_EnrollRequest, err error) { + err = r.Session.DoRequest("SoftLayer_User_Customer_Prospect_ServiceProvider_EnrollRequest", "getObject", nil, &r.Options, &resp) + return +} + +// The SoftLayer_User_Customer_Security_Answer type contains user's answers to security questions. +type User_Customer_Security_Answer struct { + Session *session.Session + Options sl.Options +} + +// GetUserCustomerSecurityAnswerService returns an instance of the User_Customer_Security_Answer SoftLayer service +func GetUserCustomerSecurityAnswerService(sess *session.Session) User_Customer_Security_Answer { + return User_Customer_Security_Answer{Session: sess} +} + +func (r User_Customer_Security_Answer) Id(id int) User_Customer_Security_Answer { + r.Options.Id = &id + return r +} + +func (r User_Customer_Security_Answer) Mask(mask string) User_Customer_Security_Answer { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r User_Customer_Security_Answer) Filter(filter string) User_Customer_Security_Answer { + r.Options.Filter = filter + return r +} + +func (r User_Customer_Security_Answer) Limit(limit int) User_Customer_Security_Answer { + r.Options.Limit = &limit + return r +} + +func (r User_Customer_Security_Answer) Offset(offset int) User_Customer_Security_Answer { + r.Options.Offset = &offset + return r +} + +// getObject retrieves the SoftLayer_User_Customer_Security_Answer object whose ID number corresponds to the ID number of the init parameter passed to the SoftLayer_User_Customer_Security_Answer service. +func (r User_Customer_Security_Answer) GetObject() (resp datatypes.User_Customer_Security_Answer, err error) { + err = r.Session.DoRequest("SoftLayer_User_Customer_Security_Answer", "getObject", nil, &r.Options, &resp) + return +} + +// Retrieve The question the security answer is associated with. +func (r User_Customer_Security_Answer) GetQuestion() (resp datatypes.User_Security_Question, err error) { + err = r.Session.DoRequest("SoftLayer_User_Customer_Security_Answer", "getQuestion", nil, &r.Options, &resp) + return +} + +// Retrieve The user who the security answer belongs to. +func (r User_Customer_Security_Answer) GetUser() (resp datatypes.User_Customer, err error) { + err = r.Session.DoRequest("SoftLayer_User_Customer_Security_Answer", "getUser", nil, &r.Options, &resp) + return +} + +// Each SoftLayer portal account is assigned a status code that determines how it's treated in the customer portal. This status is reflected in the SoftLayer_User_Customer_Status data type. Status differs from user permissions in that user status applies globally to the portal while user permissions are applied to specific portal functions. +type User_Customer_Status struct { + Session *session.Session + Options sl.Options +} + +// GetUserCustomerStatusService returns an instance of the User_Customer_Status SoftLayer service +func GetUserCustomerStatusService(sess *session.Session) User_Customer_Status { + return User_Customer_Status{Session: sess} +} + +func (r User_Customer_Status) Id(id int) User_Customer_Status { + r.Options.Id = &id + return r +} + +func (r User_Customer_Status) Mask(mask string) User_Customer_Status { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r User_Customer_Status) Filter(filter string) User_Customer_Status { + r.Options.Filter = filter + return r +} + +func (r User_Customer_Status) Limit(limit int) User_Customer_Status { + r.Options.Limit = &limit + return r +} + +func (r User_Customer_Status) Offset(offset int) User_Customer_Status { + r.Options.Offset = &offset + return r +} + +// Retrieve all user status objects. +func (r User_Customer_Status) GetAllObjects() (resp []datatypes.User_Customer_Status, err error) { + err = r.Session.DoRequest("SoftLayer_User_Customer_Status", "getAllObjects", nil, &r.Options, &resp) + return +} + +// getObject retrieves the SoftLayer_User_Customer_Status object whose ID number corresponds to the ID number of the init parameter passed to the SoftLayer_User_Customer_Status service. +func (r User_Customer_Status) GetObject() (resp datatypes.User_Customer_Status, err error) { + err = r.Session.DoRequest("SoftLayer_User_Customer_Status", "getObject", nil, &r.Options, &resp) + return +} + +// The SoftLayer_User_External_Binding data type contains general information for a single external binding. This includes the 3rd party vendor, type of binding, and a unique identifier and password that is used to authenticate against the 3rd party service. +type User_External_Binding struct { + Session *session.Session + Options sl.Options +} + +// GetUserExternalBindingService returns an instance of the User_External_Binding SoftLayer service +func GetUserExternalBindingService(sess *session.Session) User_External_Binding { + return User_External_Binding{Session: sess} +} + +func (r User_External_Binding) Id(id int) User_External_Binding { + r.Options.Id = &id + return r +} + +func (r User_External_Binding) Mask(mask string) User_External_Binding { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r User_External_Binding) Filter(filter string) User_External_Binding { + r.Options.Filter = filter + return r +} + +func (r User_External_Binding) Limit(limit int) User_External_Binding { + r.Options.Limit = &limit + return r +} + +func (r User_External_Binding) Offset(offset int) User_External_Binding { + r.Options.Offset = &offset + return r +} + +// Delete an external authentication binding. If the external binding currently has an active billing item associated you will be prevented from deleting the binding. The alternative method to remove an external authentication binding is to use the service cancellation form. +func (r User_External_Binding) DeleteObject() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_User_External_Binding", "deleteObject", nil, &r.Options, &resp) + return +} + +// Retrieve Attributes of an external authentication binding. +func (r User_External_Binding) GetAttributes() (resp []datatypes.User_External_Binding_Attribute, err error) { + err = r.Session.DoRequest("SoftLayer_User_External_Binding", "getAttributes", nil, &r.Options, &resp) + return +} + +// Retrieve Information regarding the billing item for external authentication. +func (r User_External_Binding) GetBillingItem() (resp datatypes.Billing_Item, err error) { + err = r.Session.DoRequest("SoftLayer_User_External_Binding", "getBillingItem", nil, &r.Options, &resp) + return +} + +// Retrieve An optional note for identifying the external binding. +func (r User_External_Binding) GetNote() (resp string, err error) { + err = r.Session.DoRequest("SoftLayer_User_External_Binding", "getNote", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r User_External_Binding) GetObject() (resp datatypes.User_External_Binding, err error) { + err = r.Session.DoRequest("SoftLayer_User_External_Binding", "getObject", nil, &r.Options, &resp) + return +} + +// Retrieve The type of external authentication binding. +func (r User_External_Binding) GetType() (resp datatypes.User_External_Binding_Type, err error) { + err = r.Session.DoRequest("SoftLayer_User_External_Binding", "getType", nil, &r.Options, &resp) + return +} + +// Retrieve The vendor of an external authentication binding. +func (r User_External_Binding) GetVendor() (resp datatypes.User_External_Binding_Vendor, err error) { + err = r.Session.DoRequest("SoftLayer_User_External_Binding", "getVendor", nil, &r.Options, &resp) + return +} + +// Update the note of an external binding. The note is an optional property that is used to store information about a binding. +func (r User_External_Binding) UpdateNote(text *string) (resp bool, err error) { + params := []interface{}{ + text, + } + err = r.Session.DoRequest("SoftLayer_User_External_Binding", "updateNote", params, &r.Options, &resp) + return +} + +// The SoftLayer_User_External_Binding_Vendor data type contains information for a single external binding vendor. This information includes a user friendly vendor name, a unique version of the vendor name, and a unique internal identifier that can be used when creating a new external binding. +type User_External_Binding_Vendor struct { + Session *session.Session + Options sl.Options +} + +// GetUserExternalBindingVendorService returns an instance of the User_External_Binding_Vendor SoftLayer service +func GetUserExternalBindingVendorService(sess *session.Session) User_External_Binding_Vendor { + return User_External_Binding_Vendor{Session: sess} +} + +func (r User_External_Binding_Vendor) Id(id int) User_External_Binding_Vendor { + r.Options.Id = &id + return r +} + +func (r User_External_Binding_Vendor) Mask(mask string) User_External_Binding_Vendor { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r User_External_Binding_Vendor) Filter(filter string) User_External_Binding_Vendor { + r.Options.Filter = filter + return r +} + +func (r User_External_Binding_Vendor) Limit(limit int) User_External_Binding_Vendor { + r.Options.Limit = &limit + return r +} + +func (r User_External_Binding_Vendor) Offset(offset int) User_External_Binding_Vendor { + r.Options.Offset = &offset + return r +} + +// getAllObjects() will return a list of the available external binding vendors that SoftLayer supports. Use this list to select the appropriate vendor when creating a new external binding. +func (r User_External_Binding_Vendor) GetAllObjects() (resp []datatypes.User_External_Binding_Vendor, err error) { + err = r.Session.DoRequest("SoftLayer_User_External_Binding_Vendor", "getAllObjects", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r User_External_Binding_Vendor) GetObject() (resp datatypes.User_External_Binding_Vendor, err error) { + err = r.Session.DoRequest("SoftLayer_User_External_Binding_Vendor", "getObject", nil, &r.Options, &resp) + return +} + +// no documentation yet +type User_Permission_Action struct { + Session *session.Session + Options sl.Options +} + +// GetUserPermissionActionService returns an instance of the User_Permission_Action SoftLayer service +func GetUserPermissionActionService(sess *session.Session) User_Permission_Action { + return User_Permission_Action{Session: sess} +} + +func (r User_Permission_Action) Id(id int) User_Permission_Action { + r.Options.Id = &id + return r +} + +func (r User_Permission_Action) Mask(mask string) User_Permission_Action { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r User_Permission_Action) Filter(filter string) User_Permission_Action { + r.Options.Filter = filter + return r +} + +func (r User_Permission_Action) Limit(limit int) User_Permission_Action { + r.Options.Limit = &limit + return r +} + +func (r User_Permission_Action) Offset(offset int) User_Permission_Action { + r.Options.Offset = &offset + return r +} + +// no documentation yet +func (r User_Permission_Action) GetAllObjects() (resp []datatypes.User_Permission_Action, err error) { + err = r.Session.DoRequest("SoftLayer_User_Permission_Action", "getAllObjects", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r User_Permission_Action) GetObject() (resp datatypes.User_Permission_Action, err error) { + err = r.Session.DoRequest("SoftLayer_User_Permission_Action", "getObject", nil, &r.Options, &resp) + return +} + +// no documentation yet +type User_Permission_Group struct { + Session *session.Session + Options sl.Options +} + +// GetUserPermissionGroupService returns an instance of the User_Permission_Group SoftLayer service +func GetUserPermissionGroupService(sess *session.Session) User_Permission_Group { + return User_Permission_Group{Session: sess} +} + +func (r User_Permission_Group) Id(id int) User_Permission_Group { + r.Options.Id = &id + return r +} + +func (r User_Permission_Group) Mask(mask string) User_Permission_Group { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r User_Permission_Group) Filter(filter string) User_Permission_Group { + r.Options.Filter = filter + return r +} + +func (r User_Permission_Group) Limit(limit int) User_Permission_Group { + r.Options.Limit = &limit + return r +} + +func (r User_Permission_Group) Offset(offset int) User_Permission_Group { + r.Options.Offset = &offset + return r +} + +// no documentation yet +func (r User_Permission_Group) AddAction(action *datatypes.User_Permission_Action) (err error) { + var resp datatypes.Void + params := []interface{}{ + action, + } + err = r.Session.DoRequest("SoftLayer_User_Permission_Group", "addAction", params, &r.Options, &resp) + return +} + +// no documentation yet +func (r User_Permission_Group) AddBulkActions(actions []datatypes.User_Permission_Action) (err error) { + var resp datatypes.Void + params := []interface{}{ + actions, + } + err = r.Session.DoRequest("SoftLayer_User_Permission_Group", "addBulkActions", params, &r.Options, &resp) + return +} + +// no documentation yet +func (r User_Permission_Group) AddBulkResourceObjects(resourceObjects []datatypes.Entity, resourceTypeKeyName *string) (resp bool, err error) { + params := []interface{}{ + resourceObjects, + resourceTypeKeyName, + } + err = r.Session.DoRequest("SoftLayer_User_Permission_Group", "addBulkResourceObjects", params, &r.Options, &resp) + return +} + +// no documentation yet +func (r User_Permission_Group) AddResourceObject(resourceObject *datatypes.Entity, resourceTypeKeyName *string) (resp bool, err error) { + params := []interface{}{ + resourceObject, + resourceTypeKeyName, + } + err = r.Session.DoRequest("SoftLayer_User_Permission_Group", "addResourceObject", params, &r.Options, &resp) + return +} + +// no documentation yet +func (r User_Permission_Group) CreateObject(templateObject *datatypes.User_Permission_Group) (resp datatypes.User_Permission_Group, err error) { + params := []interface{}{ + templateObject, + } + err = r.Session.DoRequest("SoftLayer_User_Permission_Group", "createObject", params, &r.Options, &resp) + return +} + +// no documentation yet +func (r User_Permission_Group) DeleteObject() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_User_Permission_Group", "deleteObject", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r User_Permission_Group) EditObject(templateObject *datatypes.User_Permission_Group) (resp datatypes.User_Permission_Group, err error) { + params := []interface{}{ + templateObject, + } + err = r.Session.DoRequest("SoftLayer_User_Permission_Group", "editObject", params, &r.Options, &resp) + return +} + +// Retrieve +func (r User_Permission_Group) GetAccount() (resp datatypes.Account, err error) { + err = r.Session.DoRequest("SoftLayer_User_Permission_Group", "getAccount", nil, &r.Options, &resp) + return +} + +// Retrieve +func (r User_Permission_Group) GetActions() (resp []datatypes.User_Permission_Action, err error) { + err = r.Session.DoRequest("SoftLayer_User_Permission_Group", "getActions", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r User_Permission_Group) GetObject() (resp datatypes.User_Permission_Group, err error) { + err = r.Session.DoRequest("SoftLayer_User_Permission_Group", "getObject", nil, &r.Options, &resp) + return +} + +// Retrieve +func (r User_Permission_Group) GetRoles() (resp []datatypes.User_Permission_Role, err error) { + err = r.Session.DoRequest("SoftLayer_User_Permission_Group", "getRoles", nil, &r.Options, &resp) + return +} + +// Retrieve The type of the permission group. +func (r User_Permission_Group) GetType() (resp datatypes.User_Permission_Group_Type, err error) { + err = r.Session.DoRequest("SoftLayer_User_Permission_Group", "getType", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r User_Permission_Group) LinkRole(role *datatypes.User_Permission_Role) (err error) { + var resp datatypes.Void + params := []interface{}{ + role, + } + err = r.Session.DoRequest("SoftLayer_User_Permission_Group", "linkRole", params, &r.Options, &resp) + return +} + +// no documentation yet +func (r User_Permission_Group) RemoveAction(action *datatypes.User_Permission_Action) (err error) { + var resp datatypes.Void + params := []interface{}{ + action, + } + err = r.Session.DoRequest("SoftLayer_User_Permission_Group", "removeAction", params, &r.Options, &resp) + return +} + +// no documentation yet +func (r User_Permission_Group) RemoveBulkActions(actions []datatypes.User_Permission_Action) (err error) { + var resp datatypes.Void + params := []interface{}{ + actions, + } + err = r.Session.DoRequest("SoftLayer_User_Permission_Group", "removeBulkActions", params, &r.Options, &resp) + return +} + +// no documentation yet +func (r User_Permission_Group) RemoveBulkResourceObjects(resourceObjects []datatypes.Entity, resourceTypeKeyName *string) (resp bool, err error) { + params := []interface{}{ + resourceObjects, + resourceTypeKeyName, + } + err = r.Session.DoRequest("SoftLayer_User_Permission_Group", "removeBulkResourceObjects", params, &r.Options, &resp) + return +} + +// no documentation yet +func (r User_Permission_Group) RemoveResourceObject(resourceObject *datatypes.Entity, resourceTypeKeyName *string) (resp bool, err error) { + params := []interface{}{ + resourceObject, + resourceTypeKeyName, + } + err = r.Session.DoRequest("SoftLayer_User_Permission_Group", "removeResourceObject", params, &r.Options, &resp) + return +} + +// no documentation yet +func (r User_Permission_Group) UnlinkRole(role *datatypes.User_Permission_Role) (err error) { + var resp datatypes.Void + params := []interface{}{ + role, + } + err = r.Session.DoRequest("SoftLayer_User_Permission_Group", "unlinkRole", params, &r.Options, &resp) + return +} + +// no documentation yet +type User_Permission_Group_Type struct { + Session *session.Session + Options sl.Options +} + +// GetUserPermissionGroupTypeService returns an instance of the User_Permission_Group_Type SoftLayer service +func GetUserPermissionGroupTypeService(sess *session.Session) User_Permission_Group_Type { + return User_Permission_Group_Type{Session: sess} +} + +func (r User_Permission_Group_Type) Id(id int) User_Permission_Group_Type { + r.Options.Id = &id + return r +} + +func (r User_Permission_Group_Type) Mask(mask string) User_Permission_Group_Type { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r User_Permission_Group_Type) Filter(filter string) User_Permission_Group_Type { + r.Options.Filter = filter + return r +} + +func (r User_Permission_Group_Type) Limit(limit int) User_Permission_Group_Type { + r.Options.Limit = &limit + return r +} + +func (r User_Permission_Group_Type) Offset(offset int) User_Permission_Group_Type { + r.Options.Offset = &offset + return r +} + +// Retrieve +func (r User_Permission_Group_Type) GetGroups() (resp []datatypes.User_Permission_Group, err error) { + err = r.Session.DoRequest("SoftLayer_User_Permission_Group_Type", "getGroups", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r User_Permission_Group_Type) GetObject() (resp datatypes.User_Permission_Group_Type, err error) { + err = r.Session.DoRequest("SoftLayer_User_Permission_Group_Type", "getObject", nil, &r.Options, &resp) + return +} + +// no documentation yet +type User_Permission_Role struct { + Session *session.Session + Options sl.Options +} + +// GetUserPermissionRoleService returns an instance of the User_Permission_Role SoftLayer service +func GetUserPermissionRoleService(sess *session.Session) User_Permission_Role { + return User_Permission_Role{Session: sess} +} + +func (r User_Permission_Role) Id(id int) User_Permission_Role { + r.Options.Id = &id + return r +} + +func (r User_Permission_Role) Mask(mask string) User_Permission_Role { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r User_Permission_Role) Filter(filter string) User_Permission_Role { + r.Options.Filter = filter + return r +} + +func (r User_Permission_Role) Limit(limit int) User_Permission_Role { + r.Options.Limit = &limit + return r +} + +func (r User_Permission_Role) Offset(offset int) User_Permission_Role { + r.Options.Offset = &offset + return r +} + +// no documentation yet +func (r User_Permission_Role) AddUser(user *datatypes.User_Customer) (err error) { + var resp datatypes.Void + params := []interface{}{ + user, + } + err = r.Session.DoRequest("SoftLayer_User_Permission_Role", "addUser", params, &r.Options, &resp) + return +} + +// no documentation yet +func (r User_Permission_Role) CreateObject(templateObject *datatypes.User_Permission_Role) (resp datatypes.User_Permission_Role, err error) { + params := []interface{}{ + templateObject, + } + err = r.Session.DoRequest("SoftLayer_User_Permission_Role", "createObject", params, &r.Options, &resp) + return +} + +// no documentation yet +func (r User_Permission_Role) DeleteObject() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_User_Permission_Role", "deleteObject", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r User_Permission_Role) EditObject(templateObject *datatypes.User_Permission_Role) (resp datatypes.User_Permission_Role, err error) { + params := []interface{}{ + templateObject, + } + err = r.Session.DoRequest("SoftLayer_User_Permission_Role", "editObject", params, &r.Options, &resp) + return +} + +// Retrieve +func (r User_Permission_Role) GetAccount() (resp datatypes.Account, err error) { + err = r.Session.DoRequest("SoftLayer_User_Permission_Role", "getAccount", nil, &r.Options, &resp) + return +} + +// Retrieve +func (r User_Permission_Role) GetActions() (resp []datatypes.User_Permission_Action, err error) { + err = r.Session.DoRequest("SoftLayer_User_Permission_Role", "getActions", nil, &r.Options, &resp) + return +} + +// Retrieve +func (r User_Permission_Role) GetGroups() (resp []datatypes.User_Permission_Group, err error) { + err = r.Session.DoRequest("SoftLayer_User_Permission_Role", "getGroups", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r User_Permission_Role) GetObject() (resp datatypes.User_Permission_Role, err error) { + err = r.Session.DoRequest("SoftLayer_User_Permission_Role", "getObject", nil, &r.Options, &resp) + return +} + +// Retrieve +func (r User_Permission_Role) GetUsers() (resp []datatypes.User_Customer, err error) { + err = r.Session.DoRequest("SoftLayer_User_Permission_Role", "getUsers", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r User_Permission_Role) LinkGroup(group *datatypes.User_Permission_Group) (err error) { + var resp datatypes.Void + params := []interface{}{ + group, + } + err = r.Session.DoRequest("SoftLayer_User_Permission_Role", "linkGroup", params, &r.Options, &resp) + return +} + +// no documentation yet +func (r User_Permission_Role) RemoveUser(user *datatypes.User_Customer) (err error) { + var resp datatypes.Void + params := []interface{}{ + user, + } + err = r.Session.DoRequest("SoftLayer_User_Permission_Role", "removeUser", params, &r.Options, &resp) + return +} + +// no documentation yet +func (r User_Permission_Role) UnlinkGroup(group *datatypes.User_Permission_Group) (err error) { + var resp datatypes.Void + params := []interface{}{ + group, + } + err = r.Session.DoRequest("SoftLayer_User_Permission_Role", "unlinkGroup", params, &r.Options, &resp) + return +} + +// The SoftLayer_User_Security_Question data type contains questions. +type User_Security_Question struct { + Session *session.Session + Options sl.Options +} + +// GetUserSecurityQuestionService returns an instance of the User_Security_Question SoftLayer service +func GetUserSecurityQuestionService(sess *session.Session) User_Security_Question { + return User_Security_Question{Session: sess} +} + +func (r User_Security_Question) Id(id int) User_Security_Question { + r.Options.Id = &id + return r +} + +func (r User_Security_Question) Mask(mask string) User_Security_Question { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r User_Security_Question) Filter(filter string) User_Security_Question { + r.Options.Filter = filter + return r +} + +func (r User_Security_Question) Limit(limit int) User_Security_Question { + r.Options.Limit = &limit + return r +} + +func (r User_Security_Question) Offset(offset int) User_Security_Question { + r.Options.Offset = &offset + return r +} + +// Retrieve all viewable security questions. +func (r User_Security_Question) GetAllObjects() (resp []datatypes.User_Security_Question, err error) { + err = r.Session.DoRequest("SoftLayer_User_Security_Question", "getAllObjects", nil, &r.Options, &resp) + return +} + +// getAllObjects retrieves all the SoftLayer_User_Security_Question objects where it is set to be viewable. +func (r User_Security_Question) GetObject() (resp datatypes.User_Security_Question, err error) { + err = r.Session.DoRequest("SoftLayer_User_Security_Question", "getObject", nil, &r.Options, &resp) + return +} diff --git a/vendor/github.com/softlayer/softlayer-go/services/utility.go b/vendor/github.com/softlayer/softlayer-go/services/utility.go new file mode 100644 index 0000000000..9eed189f36 --- /dev/null +++ b/vendor/github.com/softlayer/softlayer-go/services/utility.go @@ -0,0 +1,88 @@ +/** + * Copyright 2016 IBM Corp. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** + * AUTOMATICALLY GENERATED CODE - DO NOT MODIFY + */ + +package services + +import ( + "fmt" + "strings" + + "github.com/softlayer/softlayer-go/session" + "github.com/softlayer/softlayer-go/sl" +) + +// no documentation yet +type Utility_Network struct { + Session *session.Session + Options sl.Options +} + +// GetUtilityNetworkService returns an instance of the Utility_Network SoftLayer service +func GetUtilityNetworkService(sess *session.Session) Utility_Network { + return Utility_Network{Session: sess} +} + +func (r Utility_Network) Id(id int) Utility_Network { + r.Options.Id = &id + return r +} + +func (r Utility_Network) Mask(mask string) Utility_Network { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r Utility_Network) Filter(filter string) Utility_Network { + r.Options.Filter = filter + return r +} + +func (r Utility_Network) Limit(limit int) Utility_Network { + r.Options.Limit = &limit + return r +} + +func (r Utility_Network) Offset(offset int) Utility_Network { + r.Options.Offset = &offset + return r +} + +// A method used to return the nameserver information for a given address +func (r Utility_Network) NsLookup(address *string, typ *string) (resp string, err error) { + params := []interface{}{ + address, + typ, + } + err = r.Session.DoRequest("SoftLayer_Utility_Network", "nsLookup", params, &r.Options, &resp) + return +} + +// Perform a WHOIS lookup from SoftLayer's application servers on the given IP address or hostname and return the raw results of that command. The returned result is similar to the result received from running the command `whois` from a UNIX command shell. A WHOIS lookup queries a host's registrar to retrieve domain registrant information including registration date, expiry date, and the administrative, technical, billing, and abuse contacts responsible for a domain. WHOIS lookups are useful for determining a physical contact responsible for a particular domain. WHOIS lookups are also useful for determining domain availability. Running a WHOIS lookup on an IP address queries ARIN for that IP block's ownership, and is helpful for determining a physical entity responsible for a certain IP address. +func (r Utility_Network) Whois(address *string) (resp string, err error) { + params := []interface{}{ + address, + } + err = r.Session.DoRequest("SoftLayer_Utility_Network", "whois", params, &r.Options, &resp) + return +} diff --git a/vendor/github.com/softlayer/softlayer-go/services/virtual.go b/vendor/github.com/softlayer/softlayer-go/services/virtual.go new file mode 100644 index 0000000000..c0cc92b0d1 --- /dev/null +++ b/vendor/github.com/softlayer/softlayer-go/services/virtual.go @@ -0,0 +1,2957 @@ +/** + * Copyright 2016 IBM Corp. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** + * AUTOMATICALLY GENERATED CODE - DO NOT MODIFY + */ + +package services + +import ( + "fmt" + "strings" + + "github.com/softlayer/softlayer-go/datatypes" + "github.com/softlayer/softlayer-go/session" + "github.com/softlayer/softlayer-go/sl" +) + +// This type presents the structure for a DedicatedHost. The type contains relational properties to distinguish a host, associate an account to it. +type Virtual_DedicatedHost struct { + Session *session.Session + Options sl.Options +} + +// GetVirtualDedicatedHostService returns an instance of the Virtual_DedicatedHost SoftLayer service +func GetVirtualDedicatedHostService(sess *session.Session) Virtual_DedicatedHost { + return Virtual_DedicatedHost{Session: sess} +} + +func (r Virtual_DedicatedHost) Id(id int) Virtual_DedicatedHost { + r.Options.Id = &id + return r +} + +func (r Virtual_DedicatedHost) Mask(mask string) Virtual_DedicatedHost { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r Virtual_DedicatedHost) Filter(filter string) Virtual_DedicatedHost { + r.Options.Filter = filter + return r +} + +func (r Virtual_DedicatedHost) Limit(limit int) Virtual_DedicatedHost { + r.Options.Limit = &limit + return r +} + +func (r Virtual_DedicatedHost) Offset(offset int) Virtual_DedicatedHost { + r.Options.Offset = &offset + return r +} + +// This method will cancel a dedicated virtual host immediately. +func (r Virtual_DedicatedHost) DeleteObject() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Virtual_DedicatedHost", "deleteObject", nil, &r.Options, &resp) + return +} + +// Edit a dedicated host's properties +func (r Virtual_DedicatedHost) EditObject(templateObject *datatypes.Virtual_DedicatedHost) (resp bool, err error) { + params := []interface{}{ + templateObject, + } + err = r.Session.DoRequest("SoftLayer_Virtual_DedicatedHost", "editObject", params, &r.Options, &resp) + return +} + +// Get the allocation properties for a specified virtual host +func (r Virtual_DedicatedHost) FetchAllocationStatus() (resp datatypes.Container_Virtual_DedicatedHost_AllocationStatus, err error) { + err = r.Session.DoRequest("SoftLayer_Virtual_DedicatedHost", "fetchAllocationStatus", nil, &r.Options, &resp) + return +} + +// Retrieve The account which dedicated host belongs to. +func (r Virtual_DedicatedHost) GetAccount() (resp datatypes.Account, err error) { + err = r.Session.DoRequest("SoftLayer_Virtual_DedicatedHost", "getAccount", nil, &r.Options, &resp) + return +} + +// Retrieve The container representing allocations on a dedicated host. +func (r Virtual_DedicatedHost) GetAllocationStatus() (resp datatypes.Container_Virtual_DedicatedHost_AllocationStatus, err error) { + err = r.Session.DoRequest("SoftLayer_Virtual_DedicatedHost", "getAllocationStatus", nil, &r.Options, &resp) + return +} + +// This method will get the available backend routers to order [[SoftLayer_Virtual_DedicatedHost]] +func (r Virtual_DedicatedHost) GetAvailableRouters(dedicatedHost *datatypes.Virtual_DedicatedHost) (resp []datatypes.Hardware, err error) { + params := []interface{}{ + dedicatedHost, + } + err = r.Session.DoRequest("SoftLayer_Virtual_DedicatedHost", "getAvailableRouters", params, &r.Options, &resp) + return +} + +// Retrieve The backendRouter behind dedicated host's pool. +func (r Virtual_DedicatedHost) GetBackendRouter() (resp datatypes.Hardware_Router_Backend, err error) { + err = r.Session.DoRequest("SoftLayer_Virtual_DedicatedHost", "getBackendRouter", nil, &r.Options, &resp) + return +} + +// Retrieve The billing item for a dedicated host. +func (r Virtual_DedicatedHost) GetBillingItem() (resp datatypes.Billing_Item_Virtual_DedicatedHost, err error) { + err = r.Session.DoRequest("SoftLayer_Virtual_DedicatedHost", "getBillingItem", nil, &r.Options, &resp) + return +} + +// Retrieve The datacenter that the host resides in. +func (r Virtual_DedicatedHost) GetDatacenter() (resp datatypes.Location, err error) { + err = r.Session.DoRequest("SoftLayer_Virtual_DedicatedHost", "getDatacenter", nil, &r.Options, &resp) + return +} + +// Retrieve The guests associated with a host. +func (r Virtual_DedicatedHost) GetGuests() (resp []datatypes.Virtual_Guest, err error) { + err = r.Session.DoRequest("SoftLayer_Virtual_DedicatedHost", "getGuests", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Virtual_DedicatedHost) GetObject() (resp datatypes.Virtual_DedicatedHost, err error) { + err = r.Session.DoRequest("SoftLayer_Virtual_DedicatedHost", "getObject", nil, &r.Options, &resp) + return +} + +// The virtual disk image data type presents the structure in which a virtual disk image will be presented. +// +// Virtual block devices are assigned to disk images. +type Virtual_Disk_Image struct { + Session *session.Session + Options sl.Options +} + +// GetVirtualDiskImageService returns an instance of the Virtual_Disk_Image SoftLayer service +func GetVirtualDiskImageService(sess *session.Session) Virtual_Disk_Image { + return Virtual_Disk_Image{Session: sess} +} + +func (r Virtual_Disk_Image) Id(id int) Virtual_Disk_Image { + r.Options.Id = &id + return r +} + +func (r Virtual_Disk_Image) Mask(mask string) Virtual_Disk_Image { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r Virtual_Disk_Image) Filter(filter string) Virtual_Disk_Image { + r.Options.Filter = filter + return r +} + +func (r Virtual_Disk_Image) Limit(limit int) Virtual_Disk_Image { + r.Options.Limit = &limit + return r +} + +func (r Virtual_Disk_Image) Offset(offset int) Virtual_Disk_Image { + r.Options.Offset = &offset + return r +} + +// no documentation yet +func (r Virtual_Disk_Image) EditObject(templateObject *datatypes.Virtual_Disk_Image) (resp bool, err error) { + params := []interface{}{ + templateObject, + } + err = r.Session.DoRequest("SoftLayer_Virtual_Disk_Image", "editObject", params, &r.Options, &resp) + return +} + +// Retrieve The billing item for a virtual disk image. +func (r Virtual_Disk_Image) GetBillingItem() (resp datatypes.Billing_Item_Virtual_Disk_Image, err error) { + err = r.Session.DoRequest("SoftLayer_Virtual_Disk_Image", "getBillingItem", nil, &r.Options, &resp) + return +} + +// Retrieve The block devices that a disk image is attached to. Block devices connect computing instances to disk images. +func (r Virtual_Disk_Image) GetBlockDevices() (resp []datatypes.Virtual_Guest_Block_Device, err error) { + err = r.Session.DoRequest("SoftLayer_Virtual_Disk_Image", "getBlockDevices", nil, &r.Options, &resp) + return +} + +// Retrieve +func (r Virtual_Disk_Image) GetBootableVolumeFlag() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Virtual_Disk_Image", "getBootableVolumeFlag", nil, &r.Options, &resp) + return +} + +// Retrieve +func (r Virtual_Disk_Image) GetCoalescedDiskImages() (resp []datatypes.Virtual_Disk_Image, err error) { + err = r.Session.DoRequest("SoftLayer_Virtual_Disk_Image", "getCoalescedDiskImages", nil, &r.Options, &resp) + return +} + +// Retrieve +func (r Virtual_Disk_Image) GetCopyOnWriteFlag() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Virtual_Disk_Image", "getCopyOnWriteFlag", nil, &r.Options, &resp) + return +} + +// Retrieve +func (r Virtual_Disk_Image) GetLocalDiskFlag() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Virtual_Disk_Image", "getLocalDiskFlag", nil, &r.Options, &resp) + return +} + +// Retrieve Whether this disk image is meant for storage of custom user data supplied with a Cloud Computing Instance order. +func (r Virtual_Disk_Image) GetMetadataFlag() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Virtual_Disk_Image", "getMetadataFlag", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Virtual_Disk_Image) GetObject() (resp datatypes.Virtual_Disk_Image, err error) { + err = r.Session.DoRequest("SoftLayer_Virtual_Disk_Image", "getObject", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Virtual_Disk_Image) GetPublicIsoImages() (resp []datatypes.Virtual_Disk_Image, err error) { + err = r.Session.DoRequest("SoftLayer_Virtual_Disk_Image", "getPublicIsoImages", nil, &r.Options, &resp) + return +} + +// Retrieve References to the software that resides on a disk image. +func (r Virtual_Disk_Image) GetSoftwareReferences() (resp []datatypes.Virtual_Disk_Image_Software, err error) { + err = r.Session.DoRequest("SoftLayer_Virtual_Disk_Image", "getSoftwareReferences", nil, &r.Options, &resp) + return +} + +// Retrieve The original disk image that the current disk image was cloned from. +func (r Virtual_Disk_Image) GetSourceDiskImage() (resp datatypes.Virtual_Disk_Image, err error) { + err = r.Session.DoRequest("SoftLayer_Virtual_Disk_Image", "getSourceDiskImage", nil, &r.Options, &resp) + return +} + +// Retrieve The storage repository that a disk image resides in. +func (r Virtual_Disk_Image) GetStorageRepository() (resp datatypes.Virtual_Storage_Repository, err error) { + err = r.Session.DoRequest("SoftLayer_Virtual_Disk_Image", "getStorageRepository", nil, &r.Options, &resp) + return +} + +// Retrieve The type of storage repository that a disk image resides in. +func (r Virtual_Disk_Image) GetStorageRepositoryType() (resp datatypes.Virtual_Storage_Repository_Type, err error) { + err = r.Session.DoRequest("SoftLayer_Virtual_Disk_Image", "getStorageRepositoryType", nil, &r.Options, &resp) + return +} + +// Retrieve The template that attaches a disk image to a [[SoftLayer_Virtual_Guest_Block_Device_Template_Group|archive]]. +func (r Virtual_Disk_Image) GetTemplateBlockDevice() (resp datatypes.Virtual_Guest_Block_Device_Template, err error) { + err = r.Session.DoRequest("SoftLayer_Virtual_Disk_Image", "getTemplateBlockDevice", nil, &r.Options, &resp) + return +} + +// Retrieve A virtual disk image's type. +func (r Virtual_Disk_Image) GetType() (resp datatypes.Virtual_Disk_Image_Type, err error) { + err = r.Session.DoRequest("SoftLayer_Virtual_Disk_Image", "getType", nil, &r.Options, &resp) + return +} + +// The virtual guest data type presents the structure in which all virtual guests will be presented. Internally, the structure supports various virtualization platforms with no change to external interaction. +// +// A guest, also known as a virtual server, represents an allocation of resources on a virtual host. +type Virtual_Guest struct { + Session *session.Session + Options sl.Options +} + +// GetVirtualGuestService returns an instance of the Virtual_Guest SoftLayer service +func GetVirtualGuestService(sess *session.Session) Virtual_Guest { + return Virtual_Guest{Session: sess} +} + +func (r Virtual_Guest) Id(id int) Virtual_Guest { + r.Options.Id = &id + return r +} + +func (r Virtual_Guest) Mask(mask string) Virtual_Guest { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r Virtual_Guest) Filter(filter string) Virtual_Guest { + r.Options.Filter = filter + return r +} + +func (r Virtual_Guest) Limit(limit int) Virtual_Guest { + r.Options.Limit = &limit + return r +} + +func (r Virtual_Guest) Offset(offset int) Virtual_Guest { + r.Options.Offset = &offset + return r +} + +// Activate the private network port +func (r Virtual_Guest) ActivatePrivatePort() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Virtual_Guest", "activatePrivatePort", nil, &r.Options, &resp) + return +} + +// Activate the public network port +func (r Virtual_Guest) ActivatePublicPort() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Virtual_Guest", "activatePublicPort", nil, &r.Options, &resp) + return +} + +// This method is used to allow access to a SoftLayer_Network_Storage volume that supports host- or network-level access control. +func (r Virtual_Guest) AllowAccessToNetworkStorage(networkStorageTemplateObject *datatypes.Network_Storage) (resp bool, err error) { + params := []interface{}{ + networkStorageTemplateObject, + } + err = r.Session.DoRequest("SoftLayer_Virtual_Guest", "allowAccessToNetworkStorage", params, &r.Options, &resp) + return +} + +// This method is used to allow access to multiple SoftLayer_Network_Storage volumes that support host- or network-level access control. +func (r Virtual_Guest) AllowAccessToNetworkStorageList(networkStorageTemplateObjects []datatypes.Network_Storage) (resp bool, err error) { + params := []interface{}{ + networkStorageTemplateObjects, + } + err = r.Session.DoRequest("SoftLayer_Virtual_Guest", "allowAccessToNetworkStorageList", params, &r.Options, &resp) + return +} + +// Creates a transaction to attach a guest's disk image. If the disk image is already attached it will be ignored. +// +// WARNING: SoftLayer_Virtual_Guest::checkHostDiskAvailability should be called before this method. If the SoftLayer_Virtual_Guest::checkHostDiskAvailability method is not called before this method, the guest migration will happen automatically. +func (r Virtual_Guest) AttachDiskImage(imageId *int) (resp datatypes.Provisioning_Version1_Transaction, err error) { + params := []interface{}{ + imageId, + } + err = r.Session.DoRequest("SoftLayer_Virtual_Guest", "attachDiskImage", params, &r.Options, &resp) + return +} + +// Reopens the public and/or private ports to reverse the changes made when the server was isolated for a destructive action. +func (r Virtual_Guest) CancelIsolationForDestructiveAction() (err error) { + var resp datatypes.Void + err = r.Session.DoRequest("SoftLayer_Virtual_Guest", "cancelIsolationForDestructiveAction", nil, &r.Options, &resp) + return +} + +// Captures a Flex Image of the hard disk on the virtual machine, based on the capture template parameter. Returns the image template group containing the disk image. +func (r Virtual_Guest) CaptureImage(captureTemplate *datatypes.Container_Disk_Image_Capture_Template) (resp datatypes.Virtual_Guest_Block_Device_Template_Group, err error) { + params := []interface{}{ + captureTemplate, + } + err = r.Session.DoRequest("SoftLayer_Virtual_Guest", "captureImage", params, &r.Options, &resp) + return +} + +// Checks the associated host for available disk space to determine if guest migration is necessary. This method is only used with local disks. If this method returns false, calling attachDiskImage($imageId) will automatically migrate the destination guest to a new host before attaching the portable volume. +func (r Virtual_Guest) CheckHostDiskAvailability(diskCapacity *int) (resp bool, err error) { + params := []interface{}{ + diskCapacity, + } + err = r.Session.DoRequest("SoftLayer_Virtual_Guest", "checkHostDiskAvailability", params, &r.Options, &resp) + return +} + +// Returns monitoring alarm detailed history +func (r Virtual_Guest) CloseAlarm(alarmId *string) (resp bool, err error) { + params := []interface{}{ + alarmId, + } + err = r.Session.DoRequest("SoftLayer_Virtual_Guest", "closeAlarm", params, &r.Options, &resp) + return +} + +// Creates a transaction to configure the guest's metadata disk. If the guest has user data associated with it, the transaction will create a small virtual drive and write the metadata to a file on the drive; if the drive already exists, the metadata will be rewritten. If the guest has no user data associated with it, the transaction will remove the virtual drive if it exists. +// +// WARNING: The transaction created by this service will shut down the guest while the metadata disk is configured. The guest will be turned back on once this process is complete. +func (r Virtual_Guest) ConfigureMetadataDisk() (resp datatypes.Provisioning_Version1_Transaction, err error) { + err = r.Session.DoRequest("SoftLayer_Virtual_Guest", "configureMetadataDisk", nil, &r.Options, &resp) + return +} + +// Create a transaction to archive a computing instance's block devices +func (r Virtual_Guest) CreateArchiveTransaction(groupName *string, blockDevices []datatypes.Virtual_Guest_Block_Device, note *string) (resp datatypes.Provisioning_Version1_Transaction, err error) { + params := []interface{}{ + groupName, + blockDevices, + note, + } + err = r.Session.DoRequest("SoftLayer_Virtual_Guest", "createArchiveTransaction", params, &r.Options, &resp) + return +} + +// +// +// createObject() enables the creation of computing instances on an account. This +// method is a simplified alternative to interacting with the ordering system directly. +// +// +// In order to create a computing instance, a template object must be sent in with a few required +// values. +// +// +// When this method returns an order will have been placed for a computing instance of the specified configuration. +// +// +// To determine when the instance is available you can poll the instance via [[SoftLayer_Virtual_Guest/getObject|getObject]], with an [[Extended-Object-Masks|object mask]] requesting the provisionDate relational property. When provisionDate is not null, the instance will be ready. +// +// +// Warning: Computing instances created via this method will incur charges on your account. For testing input parameters see [[SoftLayer_Virtual_Guest/generateOrderTemplate|generateOrderTemplate]]. +// +// +// Input - [[SoftLayer_Virtual_Guest (type)|SoftLayer_Virtual_Guest]] +//
      +//
    • hostname +//
      Hostname for the computing instance.
        +//
      • Required
      • +//
      • Type - string
      • +//
      +//
      +//
    • +//
    • domain +//
      Domain for the computing instance.
        +//
      • Required
      • +//
      • Type - string
      • +//
      +//
      +//
    • +//
    • startCpus +//
      The number of CPU cores to allocate.
        +//
      • Required
      • +//
      • Type - int
      • +//
      • See [[SoftLayer_Virtual_Guest/getCreateObjectOptions|getCreateObjectOptions]] for available options.
      • +//
      +//
      +//
    • +//
    • maxMemory +//
      The amount of memory to allocate in megabytes.
        +//
      • Required
      • +//
      • Type - int
      • +//
      • See [[SoftLayer_Virtual_Guest/getCreateObjectOptions|getCreateObjectOptions]] for available options.
      • +//
      +//
      +//
    • +//
    • datacenter.name +//
      Specifies which datacenter the instance is to be provisioned in.
        +//
      • Required
      • +//
      • Type - string
      • +//
      • The datacenter property is a [[SoftLayer_Location (type)|location]] structure with the name field set.
      • +//
      • See [[SoftLayer_Virtual_Guest/getCreateObjectOptions|getCreateObjectOptions]] for available options.
      • +//
      +// { +// "datacenter": { +// "name": "dal05" +// } +// } +//
      +//
    • +//
    • hourlyBillingFlag +//
      Specifies the billing type for the instance.
        +//
      • Required
      • +//
      • Type - boolean
      • +//
      • When true the computing instance will be billed on hourly usage, otherwise it will be billed on a monthly basis.
      • +//
      +//
      +//
    • +//
    • localDiskFlag +//
      Specifies the disk type for the instance.
        +//
      • Required
      • +//
      • Type - boolean
      • +//
      • When true the disks for the computing instance will be provisioned on the host which it runs, otherwise SAN disks will be provisioned.
      • +//
      +//
      +//
    • +//
    • dedicatedAccountHostOnlyFlag +//
      Specifies whether or not the instance must only run on hosts with instances from the same account
        +//
      • Optional
      • +//
      • Type - boolean
      • +//
      • Default - false
      • +//
      • When true this flag specifies that a compute instance is to run on hosts that only have guests from the same account.
      • +//
      +//
      +//
    • +//
    • operatingSystemReferenceCode +//
      An identifier for the operating system to provision the computing instance with.
        +//
      • Conditionally required - Disallowed when blockDeviceTemplateGroup.globalIdentifier is provided, as the template will specify the operating system.
      • +//
      • Type - string
      • +//
      • Notice - Some operating systems are charged based on the value specified in startCpus. The price which is used can be determined by calling [[SoftLayer_Virtual_Guest/generateOrderTemplate|generateOrderTemplate]] with your desired device specifications.
      • +//
      • See [[SoftLayer_Virtual_Guest/getCreateObjectOptions|getCreateObjectOptions]] for available options.
      • +//
      +//
      +//
    • +//
    • blockDeviceTemplateGroup.globalIdentifier +//
      A global identifier for the template to be used to provision the computing instance.
        +//
      • Conditionally required - Disallowed when operatingSystemReferenceCode is provided, as the template will specify the operating system.
      • +//
      • Type - string
      • +//
      • Notice - Some operating systems are charged based on the value specified in startCpus. The price which is used can be determined by calling [[SoftLayer_Virtual_Guest/generateOrderTemplate|generateOrderTemplate]] with your desired device specifications.
      • +//
      • Both public and non-public images may be specified.
      • +//
      • A list of public images may be obtained via a request to [[SoftLayer_Virtual_Guest_Block_Device_Template_Group/getPublicImages|getPublicImages]].
      • +//
      • A list of non-public images, images owned by an account or specifically shared with an account, may be obtained via a request to [[SoftLayer_Account/getBlockDeviceTemplateGroups|getBlockDeviceTemplateGroups]].
      • +//
      +// { +// "blockDeviceTemplateGroup": { +// "globalIdentifier": "07beadaa-1e11-476e-a188-3f7795feb9fb" +// } +// } +//
      +//
    • +//
    • networkComponents.maxSpeed +//
      Specifies the connection speed for the instance's network components.
        +//
      • Optional
      • +//
      • Type - int
      • +//
      • Default - 10
      • +//
      • Description - The networkComponents property is an array with a single [[SoftLayer_Virtual_Guest_Network_Component (type)|network component]] structure. The maxSpeed property must be set to specify the network uplink speed, in megabits per second, of the computing instance.
      • +//
      • See [[SoftLayer_Virtual_Guest/getCreateObjectOptions|getCreateObjectOptions]] for available options.
      • +//
      +// { +// "networkComponents": [ +// { +// "maxSpeed": 1000 +// } +// ] +// } +//
      +//
    • +//
    • privateNetworkOnlyFlag +//
      Specifies whether or not the instance only has access to the private network
        +//
      • Optional
      • +//
      • Type - boolean
      • +//
      • Default - false
      • +//
      • When true this flag specifies that a compute instance is to only have access to the private network.
      • +//
      +//
      +//
    • +//
    • primaryNetworkComponent.networkVlan.id +//
      Specifies the network vlan which is to be used for the frontend interface of the computing instance.
        +//
      • Optional
      • +//
      • Type - int
      • +//
      • Description - The primaryNetworkComponent property is a [[SoftLayer_Virtual_Guest_Network_Component (type)|network component]] structure with the networkVlan property populated with a [[SoftLayer_Network_Vlan (type)|vlan]] structure. The id property must be set to specify the frontend network vlan of the computing instance.
      • +//
      +// { +// "primaryNetworkComponent": { +// "networkVlan": { +// "id": 1 +// } +// } +// } +//
      +//
    • +//
    • primaryBackendNetworkComponent.networkVlan.id +//
      Specifies the network vlan which is to be used for the backend interface of the computing instance.
        +//
      • Optional
      • +//
      • Type - int
      • +//
      • Description - The primaryBackendNetworkComponent property is a [[SoftLayer_Virtual_Guest_Network_Component (type)|network component]] structure with the networkVlan property populated with a [[SoftLayer_Network_Vlan (type)|vlan]] structure. The id property must be set to specify the backend network vlan of the computing instance.
      • +//
      +// { +// "primaryBackendNetworkComponent": { +// "networkVlan": { +// "id": 2 +// } +// } +// } +//
      +//
    • +//
    • blockDevices +//
      Block device and disk image settings for the computing instance
        +//
      • Optional
      • +//
      • Type - array of [[SoftLayer_Virtual_Guest_Block_Device (type)|SoftLayer_Virtual_Guest_Block_Device]
      • +//
      • Default - The smallest available capacity for the primary disk will be used. If an image template is specified the disk capacity will be be provided by the template.
      • +//
      • Description - The blockDevices property is an array of [[SoftLayer_Virtual_Guest_Block_Device (type)|block device]] structures. +//
      • Each block device must specify the device property along with the diskImage property, which is a [[SoftLayer_Virtual_Disk_Image (type)|disk image]] structure with the capacity property set.
      • +//
      • The device number '1' is reserved for the SWAP disk attached to the computing instance.
      • +//
      • See [[SoftLayer_Virtual_Guest/getCreateObjectOptions|getCreateObjectOptions]] for available options.
      • +//
      +// { +// "blockDevices": [ +// { +// "device": "0", +// "diskImage": { +// "capacity": 100 +// } +// } +// ], +// "localDiskFlag": true +// } +//
      +//
    • +//
    • userData.value +//
      Arbitrary data to be made available to the computing instance.
        +//
      • Optional
      • +//
      • Type - string
      • +//
      • Description - The userData property is an array with a single [[SoftLayer_Virtual_Guest_Attribute (type)|attribute]] structure with the value property set to an arbitrary value.
      • +//
      • This value can be retrieved via the [[SoftLayer_Resource_Metadata/getUserMetadata|getUserMetadata]] method from a request originating from the computing instance. This is primarily useful for providing data to software that may be on the instance and configured to execute upon first boot.
      • +//
      +// { +// "userData": [ +// { +// "value": "someValue" +// } +// ] +// } +//
      +//
    • +//
    • sshKeys +//
      SSH keys to install on the computing instance upon provisioning.
        +//
      • Optional
      • +//
      • Type - array of [[SoftLayer_Security_Ssh_Key (type)|SoftLayer_Security_Ssh_Key]]
      • +//
      • Description - The sshKeys property is an array of [[SoftLayer_Security_Ssh_Key (type)|SSH Key]] structures with the id property set to the value of an existing SSH key.
      • +//
      • To create a new SSH key, call [[SoftLayer_Security_Ssh_Key/createObject|createObject]] on the [[SoftLayer_Security_Ssh_Key]] service.
      • +//
      • To obtain a list of existing SSH keys, call [[SoftLayer_Account/getSshKeys|getSshKeys]] on the [[SoftLayer_Account]] service. +//
      +// { +// "sshKeys": [ +// { +// "id": 123 +// } +// ] +// } +//
      +//
    • +//
    • postInstallScriptUri +//
      Specifies the uri location of the script to be downloaded and run after installation is complete.
        +//
      • Optional
      • +//
      • Type - string
      • +//
      +//
      +//
    • +//
    +// +// +//

    REST Example

    +// curl -X POST -d '{ +// "parameters":[ +// { +// "hostname": "host1", +// "domain": "example.com", +// "startCpus": 1, +// "maxMemory": 1024, +// "hourlyBillingFlag": true, +// "localDiskFlag": true, +// "operatingSystemReferenceCode": "UBUNTU_LATEST" +// } +// ] +// }' https://api.softlayer.com/rest/v3/SoftLayer_Virtual_Guest.json +// +// HTTP/1.1 201 Created +// Location: https://api.softlayer.com/rest/v3/SoftLayer_Virtual_Guest/1301396/getObject +// +// +// { +// "accountId": 232298, +// "createDate": "2012-11-30T16:28:17-06:00", +// "dedicatedAccountHostOnlyFlag": false, +// "domain": "example.com", +// "hostname": "host1", +// "id": 1301396, +// "lastPowerStateId": null, +// "lastVerifiedDate": null, +// "maxCpu": 1, +// "maxCpuUnits": "CORE", +// "maxMemory": 1024, +// "metricPollDate": null, +// "modifyDate": null, +// "privateNetworkOnlyFlag": false, +// "startCpus": 1, +// "statusId": 1001, +// "globalIdentifier": "2d203774-0ee1-49f5-9599-6ef67358dd31" +// } +// +func (r Virtual_Guest) CreateObject(templateObject *datatypes.Virtual_Guest) (resp datatypes.Virtual_Guest, err error) { + params := []interface{}{ + templateObject, + } + err = r.Session.DoRequest("SoftLayer_Virtual_Guest", "createObject", params, &r.Options, &resp) + return +} + +// +// createObjects() enables the creation of multiple computing instances on an account in a single call. This +// method is a simplified alternative to interacting with the ordering system directly. +// +// +// In order to create a computing instance a set of template objects must be sent in with a few required +// values. +// +// +// Warning: Computing instances created via this method will incur charges on your account. +// +// +// See [[SoftLayer_Virtual_Guest/createObject|createObject]] for specifics on the requirements of each template object. +// +// +//

    Example

    +// curl -X POST -d '{ +// "parameters":[ +// [ +// { +// "hostname": "host1", +// "domain": "example.com", +// "startCpus": 1, +// "maxMemory": 1024, +// "hourlyBillingFlag": true, +// "localDiskFlag": true, +// "operatingSystemReferenceCode": "UBUNTU_LATEST" +// }, +// { +// "hostname": "host2", +// "domain": "example.com", +// "startCpus": 1, +// "maxMemory": 1024, +// "hourlyBillingFlag": true, +// "localDiskFlag": true, +// "operatingSystemReferenceCode": "UBUNTU_LATEST" +// } +// ] +// ] +// }' https://api.softlayer.com/rest/v3/SoftLayer_Virtual_Guest/createObjects.json +// +// HTTP/1.1 200 OK +// +// +// [ +// { +// "accountId": 232298, +// "createDate": "2012-11-30T23:56:48-06:00", +// "dedicatedAccountHostOnlyFlag": false, +// "domain": "softlayer.com", +// "hostname": "ubuntu1", +// "id": 1301456, +// "lastPowerStateId": null, +// "lastVerifiedDate": null, +// "maxCpu": 1, +// "maxCpuUnits": "CORE", +// "maxMemory": 1024, +// "metricPollDate": null, +// "modifyDate": null, +// "privateNetworkOnlyFlag": false, +// "startCpus": 1, +// "statusId": 1001, +// "globalIdentifier": "fed4c822-48c0-45d0-85e2-90476aa0c542" +// }, +// { +// "accountId": 232298, +// "createDate": "2012-11-30T23:56:49-06:00", +// "dedicatedAccountHostOnlyFlag": false, +// "domain": "softlayer.com", +// "hostname": "ubuntu2", +// "id": 1301457, +// "lastPowerStateId": null, +// "lastVerifiedDate": null, +// "maxCpu": 1, +// "maxCpuUnits": "CORE", +// "maxMemory": 1024, +// "metricPollDate": null, +// "modifyDate": null, +// "privateNetworkOnlyFlag": false, +// "startCpus": 1, +// "statusId": 1001, +// "globalIdentifier": "bed4c686-9562-4ade-9049-dc4d5b6b200c" +// } +// ] +// +func (r Virtual_Guest) CreateObjects(templateObjects []datatypes.Virtual_Guest) (resp []datatypes.Virtual_Guest, err error) { + params := []interface{}{ + templateObjects, + } + err = r.Session.DoRequest("SoftLayer_Virtual_Guest", "createObjects", params, &r.Options, &resp) + return +} + +// no documentation yet +func (r Virtual_Guest) CreatePostSoftwareInstallTransaction(data *string, returnBoolean *bool) (resp bool, err error) { + params := []interface{}{ + data, + returnBoolean, + } + err = r.Session.DoRequest("SoftLayer_Virtual_Guest", "createPostSoftwareInstallTransaction", params, &r.Options, &resp) + return +} + +// +// This method will cancel a computing instance effective immediately. For instances billed hourly, the charges will stop immediately after the method returns. +func (r Virtual_Guest) DeleteObject() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Virtual_Guest", "deleteObject", nil, &r.Options, &resp) + return +} + +// Creates a transaction to detach a guest's disk image. If the disk image is already detached it will be ignored. +// +// WARNING: The transaction created by this service will shut down the guest while the disk image is attached. The guest will be turned back on once this process is complete. +func (r Virtual_Guest) DetachDiskImage(imageId *int) (resp datatypes.Provisioning_Version1_Transaction, err error) { + params := []interface{}{ + imageId, + } + err = r.Session.DoRequest("SoftLayer_Virtual_Guest", "detachDiskImage", params, &r.Options, &resp) + return +} + +// Edit a computing instance's properties +func (r Virtual_Guest) EditObject(templateObject *datatypes.Virtual_Guest) (resp bool, err error) { + params := []interface{}{ + templateObject, + } + err = r.Session.DoRequest("SoftLayer_Virtual_Guest", "editObject", params, &r.Options, &resp) + return +} + +// Reboot a guest into the Idera Bare Metal Restore image. +func (r Virtual_Guest) ExecuteIderaBareMetalRestore() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Virtual_Guest", "executeIderaBareMetalRestore", nil, &r.Options, &resp) + return +} + +// Reboot a guest into the R1Soft Bare Metal Restore image. +func (r Virtual_Guest) ExecuteR1SoftBareMetalRestore() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Virtual_Guest", "executeR1SoftBareMetalRestore", nil, &r.Options, &resp) + return +} + +// Download and run remote script from uri on virtual guests. +func (r Virtual_Guest) ExecuteRemoteScript(uri *string) (err error) { + var resp datatypes.Void + params := []interface{}{ + uri, + } + err = r.Session.DoRequest("SoftLayer_Virtual_Guest", "executeRemoteScript", params, &r.Options, &resp) + return +} + +// Reboot a Linux guest into the Xen rescue image. +func (r Virtual_Guest) ExecuteRescueLayer() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Virtual_Guest", "executeRescueLayer", nil, &r.Options, &resp) + return +} + +// Find CCI by only its primary public or private IP address. IP addresses within secondary subnets tied to the CCI will not return the CCI. If no CCI is found, no errors are generated and no data is returned. +func (r Virtual_Guest) FindByIpAddress(ipAddress *string) (resp datatypes.Virtual_Guest, err error) { + params := []interface{}{ + ipAddress, + } + err = r.Session.DoRequest("SoftLayer_Virtual_Guest", "findByIpAddress", params, &r.Options, &resp) + return +} + +// +// Obtain an [[SoftLayer_Container_Product_Order_Virtual_Guest (type)|order container]] that can be sent to [[SoftLayer_Product_Order/verifyOrder|verifyOrder]] or [[SoftLayer_Product_Order/placeOrder|placeOrder]]. +// +// +// This is primarily useful when there is a necessity to confirm the price which will be charged for an order. +// +// +// See [[SoftLayer_Virtual_Guest/createObject|createObject]] for specifics on the requirements of the template object parameter. +func (r Virtual_Guest) GenerateOrderTemplate(templateObject *datatypes.Virtual_Guest) (resp datatypes.Container_Product_Order, err error) { + params := []interface{}{ + templateObject, + } + err = r.Session.DoRequest("SoftLayer_Virtual_Guest", "generateOrderTemplate", params, &r.Options, &resp) + return +} + +// Retrieve The account that a virtual guest belongs to. +func (r Virtual_Guest) GetAccount() (resp datatypes.Account, err error) { + err = r.Session.DoRequest("SoftLayer_Virtual_Guest", "getAccount", nil, &r.Options, &resp) + return +} + +// Retrieve +func (r Virtual_Guest) GetAccountOwnedPoolFlag() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Virtual_Guest", "getAccountOwnedPoolFlag", nil, &r.Options, &resp) + return +} + +// Retrieve A virtual guest's currently active network monitoring incidents. +func (r Virtual_Guest) GetActiveNetworkMonitorIncident() (resp []datatypes.Network_Monitor_Version1_Incident, err error) { + err = r.Session.DoRequest("SoftLayer_Virtual_Guest", "getActiveNetworkMonitorIncident", nil, &r.Options, &resp) + return +} + +// Retrieve +func (r Virtual_Guest) GetActiveTickets() (resp []datatypes.Ticket, err error) { + err = r.Session.DoRequest("SoftLayer_Virtual_Guest", "getActiveTickets", nil, &r.Options, &resp) + return +} + +// Retrieve A transaction that is still be performed on a cloud server. +func (r Virtual_Guest) GetActiveTransaction() (resp datatypes.Provisioning_Version1_Transaction, err error) { + err = r.Session.DoRequest("SoftLayer_Virtual_Guest", "getActiveTransaction", nil, &r.Options, &resp) + return +} + +// Retrieve Any active transaction(s) that are currently running for the server (example: os reload). +func (r Virtual_Guest) GetActiveTransactions() (resp []datatypes.Provisioning_Version1_Transaction, err error) { + err = r.Session.DoRequest("SoftLayer_Virtual_Guest", "getActiveTransactions", nil, &r.Options, &resp) + return +} + +// Return a collection of SoftLayer_Item_Price objects for an OS reload +func (r Virtual_Guest) GetAdditionalRequiredPricesForOsReload(config *datatypes.Container_Hardware_Server_Configuration) (resp []datatypes.Product_Item_Price, err error) { + params := []interface{}{ + config, + } + err = r.Session.DoRequest("SoftLayer_Virtual_Guest", "getAdditionalRequiredPricesForOsReload", params, &r.Options, &resp) + return +} + +// Returns monitoring alarm detailed history +func (r Virtual_Guest) GetAlarmHistory(startDate *datatypes.Time, endDate *datatypes.Time, alarmId *string) (resp []datatypes.Container_Monitoring_Alarm_History, err error) { + params := []interface{}{ + startDate, + endDate, + alarmId, + } + err = r.Session.DoRequest("SoftLayer_Virtual_Guest", "getAlarmHistory", params, &r.Options, &resp) + return +} + +// Retrieve The SoftLayer_Network_Storage_Allowed_Host information to connect this Virtual Guest to Network Storage volumes that require access control lists. +func (r Virtual_Guest) GetAllowedHost() (resp datatypes.Network_Storage_Allowed_Host, err error) { + err = r.Session.DoRequest("SoftLayer_Virtual_Guest", "getAllowedHost", nil, &r.Options, &resp) + return +} + +// Retrieve The SoftLayer_Network_Storage objects that this SoftLayer_Virtual_Guest has access to. +func (r Virtual_Guest) GetAllowedNetworkStorage() (resp []datatypes.Network_Storage, err error) { + err = r.Session.DoRequest("SoftLayer_Virtual_Guest", "getAllowedNetworkStorage", nil, &r.Options, &resp) + return +} + +// Retrieve The SoftLayer_Network_Storage objects whose Replica that this SoftLayer_Virtual_Guest has access to. +func (r Virtual_Guest) GetAllowedNetworkStorageReplicas() (resp []datatypes.Network_Storage, err error) { + err = r.Session.DoRequest("SoftLayer_Virtual_Guest", "getAllowedNetworkStorageReplicas", nil, &r.Options, &resp) + return +} + +// Retrieve A antivirus / spyware software component object. +func (r Virtual_Guest) GetAntivirusSpywareSoftwareComponent() (resp datatypes.Software_Component, err error) { + err = r.Session.DoRequest("SoftLayer_Virtual_Guest", "getAntivirusSpywareSoftwareComponent", nil, &r.Options, &resp) + return +} + +// Retrieve +func (r Virtual_Guest) GetApplicationDeliveryController() (resp datatypes.Network_Application_Delivery_Controller, err error) { + err = r.Session.DoRequest("SoftLayer_Virtual_Guest", "getApplicationDeliveryController", nil, &r.Options, &resp) + return +} + +// This method is retrieve a list of SoftLayer_Network_Storage volumes that are authorized access to this SoftLayer_Virtual_Guest. +func (r Virtual_Guest) GetAttachedNetworkStorages(nasType *string) (resp []datatypes.Network_Storage, err error) { + params := []interface{}{ + nasType, + } + err = r.Session.DoRequest("SoftLayer_Virtual_Guest", "getAttachedNetworkStorages", params, &r.Options, &resp) + return +} + +// Retrieve +func (r Virtual_Guest) GetAttributes() (resp []datatypes.Virtual_Guest_Attribute, err error) { + err = r.Session.DoRequest("SoftLayer_Virtual_Guest", "getAttributes", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Virtual_Guest) GetAvailableBlockDevicePositions() (resp []string, err error) { + err = r.Session.DoRequest("SoftLayer_Virtual_Guest", "getAvailableBlockDevicePositions", nil, &r.Options, &resp) + return +} + +// Retrieve An object that stores the maximum level for the monitoring query types and response types. +func (r Virtual_Guest) GetAvailableMonitoring() (resp []datatypes.Network_Monitor_Version1_Query_Host_Stratum, err error) { + err = r.Session.DoRequest("SoftLayer_Virtual_Guest", "getAvailableMonitoring", nil, &r.Options, &resp) + return +} + +// This method retrieves a list of SoftLayer_Network_Storage volumes that can be authorized to this SoftLayer_Virtual_Guest. +func (r Virtual_Guest) GetAvailableNetworkStorages(nasType *string) (resp []datatypes.Network_Storage, err error) { + params := []interface{}{ + nasType, + } + err = r.Session.DoRequest("SoftLayer_Virtual_Guest", "getAvailableNetworkStorages", params, &r.Options, &resp) + return +} + +// Retrieve The average daily private bandwidth usage for the current billing cycle. +func (r Virtual_Guest) GetAverageDailyPrivateBandwidthUsage() (resp datatypes.Float64, err error) { + err = r.Session.DoRequest("SoftLayer_Virtual_Guest", "getAverageDailyPrivateBandwidthUsage", nil, &r.Options, &resp) + return +} + +// Retrieve The average daily public bandwidth usage for the current billing cycle. +func (r Virtual_Guest) GetAverageDailyPublicBandwidthUsage() (resp datatypes.Float64, err error) { + err = r.Session.DoRequest("SoftLayer_Virtual_Guest", "getAverageDailyPublicBandwidthUsage", nil, &r.Options, &resp) + return +} + +// Retrieve A guests's backend network components. +func (r Virtual_Guest) GetBackendNetworkComponents() (resp []datatypes.Virtual_Guest_Network_Component, err error) { + err = r.Session.DoRequest("SoftLayer_Virtual_Guest", "getBackendNetworkComponents", nil, &r.Options, &resp) + return +} + +// Retrieve A guest's backend or private router. +func (r Virtual_Guest) GetBackendRouters() (resp []datatypes.Hardware, err error) { + err = r.Session.DoRequest("SoftLayer_Virtual_Guest", "getBackendRouters", nil, &r.Options, &resp) + return +} + +// Retrieve A computing instance's allotted bandwidth (measured in GB). +func (r Virtual_Guest) GetBandwidthAllocation() (resp datatypes.Float64, err error) { + err = r.Session.DoRequest("SoftLayer_Virtual_Guest", "getBandwidthAllocation", nil, &r.Options, &resp) + return +} + +// Retrieve A computing instance's allotted detail record. Allotment details link bandwidth allocation with allotments. +func (r Virtual_Guest) GetBandwidthAllotmentDetail() (resp datatypes.Network_Bandwidth_Version1_Allotment_Detail, err error) { + err = r.Session.DoRequest("SoftLayer_Virtual_Guest", "getBandwidthAllotmentDetail", nil, &r.Options, &resp) + return +} + +// Use this method when needing the metric data for bandwidth for a single guest. It will gather the correct input parameters based on the date ranges +func (r Virtual_Guest) GetBandwidthDataByDate(startDateTime *datatypes.Time, endDateTime *datatypes.Time, networkType *string) (resp []datatypes.Metric_Tracking_Object_Data, err error) { + params := []interface{}{ + startDateTime, + endDateTime, + networkType, + } + err = r.Session.DoRequest("SoftLayer_Virtual_Guest", "getBandwidthDataByDate", params, &r.Options, &resp) + return +} + +// Retrieve a collection of bandwidth data from an individual public or private network tracking object. Data is ideal if you with to employ your own traffic storage and graphing systems. +func (r Virtual_Guest) GetBandwidthForDateRange(startDate *datatypes.Time, endDate *datatypes.Time) (resp []datatypes.Metric_Tracking_Object_Data, err error) { + params := []interface{}{ + startDate, + endDate, + } + err = r.Session.DoRequest("SoftLayer_Virtual_Guest", "getBandwidthForDateRange", params, &r.Options, &resp) + return +} + +// Use this method when needing a bandwidth image for a single guest. It will gather the correct input parameters for the generic graphing utility automatically based on the snapshot specified. +func (r Virtual_Guest) GetBandwidthImage(networkType *string, snapshotRange *string, dateSpecified *datatypes.Time, dateSpecifiedEnd *datatypes.Time) (resp datatypes.Container_Bandwidth_GraphOutputs, err error) { + params := []interface{}{ + networkType, + snapshotRange, + dateSpecified, + dateSpecifiedEnd, + } + err = r.Session.DoRequest("SoftLayer_Virtual_Guest", "getBandwidthImage", params, &r.Options, &resp) + return +} + +// Use this method when needing a bandwidth image for a single guest. It will gather the correct input parameters for the generic graphing utility based on the date ranges +func (r Virtual_Guest) GetBandwidthImageByDate(startDateTime *datatypes.Time, endDateTime *datatypes.Time, networkType *string) (resp datatypes.Container_Bandwidth_GraphOutputs, err error) { + params := []interface{}{ + startDateTime, + endDateTime, + networkType, + } + err = r.Session.DoRequest("SoftLayer_Virtual_Guest", "getBandwidthImageByDate", params, &r.Options, &resp) + return +} + +// Returns the total amount of bandwidth used during the time specified for a computing instance. +func (r Virtual_Guest) GetBandwidthTotal(startDateTime *datatypes.Time, endDateTime *datatypes.Time, direction *string, side *string) (resp uint, err error) { + params := []interface{}{ + startDateTime, + endDateTime, + direction, + side, + } + err = r.Session.DoRequest("SoftLayer_Virtual_Guest", "getBandwidthTotal", params, &r.Options, &resp) + return +} + +// Retrieve The raw bandwidth usage data for the current billing cycle. One object will be returned for each network this server is attached to. +func (r Virtual_Guest) GetBillingCycleBandwidthUsage() (resp []datatypes.Network_Bandwidth_Usage, err error) { + err = r.Session.DoRequest("SoftLayer_Virtual_Guest", "getBillingCycleBandwidthUsage", nil, &r.Options, &resp) + return +} + +// Retrieve The raw private bandwidth usage data for the current billing cycle. +func (r Virtual_Guest) GetBillingCyclePrivateBandwidthUsage() (resp datatypes.Network_Bandwidth_Usage, err error) { + err = r.Session.DoRequest("SoftLayer_Virtual_Guest", "getBillingCyclePrivateBandwidthUsage", nil, &r.Options, &resp) + return +} + +// Retrieve The raw public bandwidth usage data for the current billing cycle. +func (r Virtual_Guest) GetBillingCyclePublicBandwidthUsage() (resp datatypes.Network_Bandwidth_Usage, err error) { + err = r.Session.DoRequest("SoftLayer_Virtual_Guest", "getBillingCyclePublicBandwidthUsage", nil, &r.Options, &resp) + return +} + +// Retrieve The billing item for a CloudLayer Compute Instance. +func (r Virtual_Guest) GetBillingItem() (resp datatypes.Billing_Item_Virtual_Guest, err error) { + err = r.Session.DoRequest("SoftLayer_Virtual_Guest", "getBillingItem", nil, &r.Options, &resp) + return +} + +// Retrieve Determines whether the instance is ineligible for cancellation because it is disconnected. +func (r Virtual_Guest) GetBlockCancelBecauseDisconnectedFlag() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Virtual_Guest", "getBlockCancelBecauseDisconnectedFlag", nil, &r.Options, &resp) + return +} + +// Retrieve The global identifier for the image template that was used to provision or reload a guest. +func (r Virtual_Guest) GetBlockDeviceTemplateGroup() (resp datatypes.Virtual_Guest_Block_Device_Template_Group, err error) { + err = r.Session.DoRequest("SoftLayer_Virtual_Guest", "getBlockDeviceTemplateGroup", nil, &r.Options, &resp) + return +} + +// Retrieve A computing instance's block devices. Block devices link [[SoftLayer_Virtual_Disk_Image|disk images]] to computing instances. +func (r Virtual_Guest) GetBlockDevices() (resp []datatypes.Virtual_Guest_Block_Device, err error) { + err = r.Session.DoRequest("SoftLayer_Virtual_Guest", "getBlockDevices", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Virtual_Guest) GetBootOrder() (resp string, err error) { + err = r.Session.DoRequest("SoftLayer_Virtual_Guest", "getBootOrder", nil, &r.Options, &resp) + return +} + +// Gets the console access logs for a computing instance +func (r Virtual_Guest) GetConsoleAccessLog() (resp []datatypes.Network_Logging_Syslog, err error) { + err = r.Session.DoRequest("SoftLayer_Virtual_Guest", "getConsoleAccessLog", nil, &r.Options, &resp) + return +} + +// Retrieve A flag indicating a computing instance's console IP address is assigned. +func (r Virtual_Guest) GetConsoleIpAddressFlag() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Virtual_Guest", "getConsoleIpAddressFlag", nil, &r.Options, &resp) + return +} + +// Retrieve A record containing information about a computing instance's console IP and port number. +func (r Virtual_Guest) GetConsoleIpAddressRecord() (resp datatypes.Virtual_Guest_Network_Component_IpAddress, err error) { + err = r.Session.DoRequest("SoftLayer_Virtual_Guest", "getConsoleIpAddressRecord", nil, &r.Options, &resp) + return +} + +// Retrieve A continuous data protection software component object. +func (r Virtual_Guest) GetContinuousDataProtectionSoftwareComponent() (resp datatypes.Software_Component, err error) { + err = r.Session.DoRequest("SoftLayer_Virtual_Guest", "getContinuousDataProtectionSoftwareComponent", nil, &r.Options, &resp) + return +} + +// Retrieve A guest's control panel. +func (r Virtual_Guest) GetControlPanel() (resp datatypes.Software_Component, err error) { + err = r.Session.DoRequest("SoftLayer_Virtual_Guest", "getControlPanel", nil, &r.Options, &resp) + return +} + +// If the virtual server currently has an operating system that has a core capacity restriction, return the associated core-restricted operating system item price. Some operating systems (e.g., Red Hat Enterprise Linux) may be billed by the number of processor cores, so therefore require that a certain number of cores be present on the server. +func (r Virtual_Guest) GetCoreRestrictedOperatingSystemPrice() (resp datatypes.Product_Item_Price, err error) { + err = r.Session.DoRequest("SoftLayer_Virtual_Guest", "getCoreRestrictedOperatingSystemPrice", nil, &r.Options, &resp) + return +} + +// Use this method when needing the metric data for a single guest's CPUs. It will gather the correct input parameters based on the date ranges +func (r Virtual_Guest) GetCpuMetricDataByDate(startDateTime *datatypes.Time, endDateTime *datatypes.Time, cpuIndexes []int) (resp []datatypes.Metric_Tracking_Object_Data, err error) { + params := []interface{}{ + startDateTime, + endDateTime, + cpuIndexes, + } + err = r.Session.DoRequest("SoftLayer_Virtual_Guest", "getCpuMetricDataByDate", params, &r.Options, &resp) + return +} + +// Use this method when needing a cpu usage image for a single guest. It will gather the correct input parameters for the generic graphing utility automatically based on the snapshot specified. +func (r Virtual_Guest) GetCpuMetricImage(snapshotRange *string, dateSpecified *datatypes.Time) (resp datatypes.Container_Bandwidth_GraphOutputs, err error) { + params := []interface{}{ + snapshotRange, + dateSpecified, + } + err = r.Session.DoRequest("SoftLayer_Virtual_Guest", "getCpuMetricImage", params, &r.Options, &resp) + return +} + +// Use this method when needing a CPU usage image for a single guest. It will gather the correct input parameters for the generic graphing utility based on the date ranges +func (r Virtual_Guest) GetCpuMetricImageByDate(startDateTime *datatypes.Time, endDateTime *datatypes.Time, cpuIndexes []int) (resp datatypes.Container_Bandwidth_GraphOutputs, err error) { + params := []interface{}{ + startDateTime, + endDateTime, + cpuIndexes, + } + err = r.Session.DoRequest("SoftLayer_Virtual_Guest", "getCpuMetricImageByDate", params, &r.Options, &resp) + return +} + +// +// There are many options that may be provided while ordering a computing instance, this method can be used to determine what these options are. +// +// +// Detailed information on the return value can be found on the data type page for [[SoftLayer_Container_Virtual_Guest_Configuration (type)]]. +func (r Virtual_Guest) GetCreateObjectOptions() (resp datatypes.Container_Virtual_Guest_Configuration, err error) { + err = r.Session.DoRequest("SoftLayer_Virtual_Guest", "getCreateObjectOptions", nil, &r.Options, &resp) + return +} + +// Retrieve An object that provides commonly used bandwidth summary components for the current billing cycle. +func (r Virtual_Guest) GetCurrentBandwidthSummary() (resp datatypes.Metric_Tracking_Object_Bandwidth_Summary, err error) { + err = r.Session.DoRequest("SoftLayer_Virtual_Guest", "getCurrentBandwidthSummary", nil, &r.Options, &resp) + return +} + +// getUpgradeItemPrices() retrieves a list of all upgrades available to a CloudLayer Computing Instance. Upgradeable items include, but are not limited to, number of cores, amount of RAM, storage configuration, and network port speed. +func (r Virtual_Guest) GetCurrentBillingDetail() (resp []datatypes.Billing_Item, err error) { + err = r.Session.DoRequest("SoftLayer_Virtual_Guest", "getCurrentBillingDetail", nil, &r.Options, &resp) + return +} + +// Get the total billing price in US Dollars ($) for this instance. This includes all bandwidth used up to this point for this instance. +func (r Virtual_Guest) GetCurrentBillingTotal() (resp datatypes.Float64, err error) { + err = r.Session.DoRequest("SoftLayer_Virtual_Guest", "getCurrentBillingTotal", nil, &r.Options, &resp) + return +} + +// Retrieve bandwidth graph by date. +func (r Virtual_Guest) GetCustomBandwidthDataByDate(graphData *datatypes.Container_Graph) (resp datatypes.Container_Graph, err error) { + params := []interface{}{ + graphData, + } + err = r.Session.DoRequest("SoftLayer_Virtual_Guest", "getCustomBandwidthDataByDate", params, &r.Options, &resp) + return +} + +// Retrieve bandwidth graph by date. +func (r Virtual_Guest) GetCustomMetricDataByDate(graphData *datatypes.Container_Graph) (resp datatypes.Container_Graph, err error) { + params := []interface{}{ + graphData, + } + err = r.Session.DoRequest("SoftLayer_Virtual_Guest", "getCustomMetricDataByDate", params, &r.Options, &resp) + return +} + +// Retrieve The datacenter that a virtual guest resides in. +func (r Virtual_Guest) GetDatacenter() (resp datatypes.Location, err error) { + err = r.Session.DoRequest("SoftLayer_Virtual_Guest", "getDatacenter", nil, &r.Options, &resp) + return +} + +// Retrieve The dedicated host associated with this guest. +func (r Virtual_Guest) GetDedicatedHost() (resp datatypes.Virtual_DedicatedHost, err error) { + err = r.Session.DoRequest("SoftLayer_Virtual_Guest", "getDedicatedHost", nil, &r.Options, &resp) + return +} + +// Return a drive retention SoftLayer_Item_Price object for a guest. +func (r Virtual_Guest) GetDriveRetentionItemPrice() (resp datatypes.Product_Item_Price, err error) { + err = r.Session.DoRequest("SoftLayer_Virtual_Guest", "getDriveRetentionItemPrice", nil, &r.Options, &resp) + return +} + +// Retrieve A guest's associated EVault network storage service account. +func (r Virtual_Guest) GetEvaultNetworkStorage() (resp []datatypes.Network_Storage, err error) { + err = r.Session.DoRequest("SoftLayer_Virtual_Guest", "getEvaultNetworkStorage", nil, &r.Options, &resp) + return +} + +// Get the subnets associated with this CloudLayer computing instance that are protectable by a network component firewall. +func (r Virtual_Guest) GetFirewallProtectableSubnets() (resp []datatypes.Network_Subnet, err error) { + err = r.Session.DoRequest("SoftLayer_Virtual_Guest", "getFirewallProtectableSubnets", nil, &r.Options, &resp) + return +} + +// Retrieve A computing instance's hardware firewall services. +func (r Virtual_Guest) GetFirewallServiceComponent() (resp datatypes.Network_Component_Firewall, err error) { + err = r.Session.DoRequest("SoftLayer_Virtual_Guest", "getFirewallServiceComponent", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Virtual_Guest) GetFirstAvailableBlockDevicePosition() (resp string, err error) { + err = r.Session.DoRequest("SoftLayer_Virtual_Guest", "getFirstAvailableBlockDevicePosition", nil, &r.Options, &resp) + return +} + +// Retrieve A guest's frontend network components. +func (r Virtual_Guest) GetFrontendNetworkComponents() (resp []datatypes.Virtual_Guest_Network_Component, err error) { + err = r.Session.DoRequest("SoftLayer_Virtual_Guest", "getFrontendNetworkComponents", nil, &r.Options, &resp) + return +} + +// Retrieve A guest's frontend or public router. +func (r Virtual_Guest) GetFrontendRouters() (resp datatypes.Hardware, err error) { + err = r.Session.DoRequest("SoftLayer_Virtual_Guest", "getFrontendRouters", nil, &r.Options, &resp) + return +} + +// Retrieve A guest's universally unique identifier. +func (r Virtual_Guest) GetGlobalIdentifier() (resp string, err error) { + err = r.Session.DoRequest("SoftLayer_Virtual_Guest", "getGlobalIdentifier", nil, &r.Options, &resp) + return +} + +// Retrieve +func (r Virtual_Guest) GetGuestBootParameter() (resp datatypes.Virtual_Guest_Boot_Parameter, err error) { + err = r.Session.DoRequest("SoftLayer_Virtual_Guest", "getGuestBootParameter", nil, &r.Options, &resp) + return +} + +// Retrieve The virtual host on which a virtual guest resides (available only on private clouds). +func (r Virtual_Guest) GetHost() (resp datatypes.Virtual_Host, err error) { + err = r.Session.DoRequest("SoftLayer_Virtual_Guest", "getHost", nil, &r.Options, &resp) + return +} + +// Retrieve A host IPS software component object. +func (r Virtual_Guest) GetHostIpsSoftwareComponent() (resp datatypes.Software_Component, err error) { + err = r.Session.DoRequest("SoftLayer_Virtual_Guest", "getHostIpsSoftwareComponent", nil, &r.Options, &resp) + return +} + +// Retrieve Whether or not a computing instance is billed hourly instead of monthly. +func (r Virtual_Guest) GetHourlyBillingFlag() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Virtual_Guest", "getHourlyBillingFlag", nil, &r.Options, &resp) + return +} + +// Retrieve The total private inbound bandwidth for this computing instance for the current billing cycle. +func (r Virtual_Guest) GetInboundPrivateBandwidthUsage() (resp datatypes.Float64, err error) { + err = r.Session.DoRequest("SoftLayer_Virtual_Guest", "getInboundPrivateBandwidthUsage", nil, &r.Options, &resp) + return +} + +// Retrieve The total public inbound bandwidth for this computing instance for the current billing cycle. +func (r Virtual_Guest) GetInboundPublicBandwidthUsage() (resp datatypes.Float64, err error) { + err = r.Session.DoRequest("SoftLayer_Virtual_Guest", "getInboundPublicBandwidthUsage", nil, &r.Options, &resp) + return +} + +// Retrieve +func (r Virtual_Guest) GetInternalTagReferences() (resp []datatypes.Tag_Reference, err error) { + err = r.Session.DoRequest("SoftLayer_Virtual_Guest", "getInternalTagReferences", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Virtual_Guest) GetIsoBootImage() (resp datatypes.Virtual_Disk_Image, err error) { + err = r.Session.DoRequest("SoftLayer_Virtual_Guest", "getIsoBootImage", nil, &r.Options, &resp) + return +} + +// Return a collection of SoftLayer_Item_Price objects from a collection of SoftLayer_Software_Description +func (r Virtual_Guest) GetItemPricesFromSoftwareDescriptions(softwareDescriptions []datatypes.Software_Description, includeTranslationsFlag *bool, returnAllPricesFlag *bool) (resp []datatypes.Product_Item, err error) { + params := []interface{}{ + softwareDescriptions, + includeTranslationsFlag, + returnAllPricesFlag, + } + err = r.Session.DoRequest("SoftLayer_Virtual_Guest", "getItemPricesFromSoftwareDescriptions", params, &r.Options, &resp) + return +} + +// Retrieve The last known power state of a virtual guest in the event the guest is turned off outside of IMS or has gone offline. +func (r Virtual_Guest) GetLastKnownPowerState() (resp datatypes.Virtual_Guest_Power_State, err error) { + err = r.Session.DoRequest("SoftLayer_Virtual_Guest", "getLastKnownPowerState", nil, &r.Options, &resp) + return +} + +// Retrieve The last transaction that a cloud server's operating system was loaded. +func (r Virtual_Guest) GetLastOperatingSystemReload() (resp datatypes.Provisioning_Version1_Transaction, err error) { + err = r.Session.DoRequest("SoftLayer_Virtual_Guest", "getLastOperatingSystemReload", nil, &r.Options, &resp) + return +} + +// Retrieve The last transaction a cloud server had performed. +func (r Virtual_Guest) GetLastTransaction() (resp datatypes.Provisioning_Version1_Transaction, err error) { + err = r.Session.DoRequest("SoftLayer_Virtual_Guest", "getLastTransaction", nil, &r.Options, &resp) + return +} + +// Retrieve A virtual guest's latest network monitoring incident. +func (r Virtual_Guest) GetLatestNetworkMonitorIncident() (resp datatypes.Network_Monitor_Version1_Incident, err error) { + err = r.Session.DoRequest("SoftLayer_Virtual_Guest", "getLatestNetworkMonitorIncident", nil, &r.Options, &resp) + return +} + +// Retrieve A flag indicating that the virtual guest has at least one disk which is local to the host it runs on. This does not include a SWAP device. +func (r Virtual_Guest) GetLocalDiskFlag() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Virtual_Guest", "getLocalDiskFlag", nil, &r.Options, &resp) + return +} + +// Retrieve Where guest is located within SoftLayer's location hierarchy. +func (r Virtual_Guest) GetLocation() (resp datatypes.Location, err error) { + err = r.Session.DoRequest("SoftLayer_Virtual_Guest", "getLocation", nil, &r.Options, &resp) + return +} + +// Retrieve A flag indicating that the virtual guest is a managed resource. +func (r Virtual_Guest) GetManagedResourceFlag() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Virtual_Guest", "getManagedResourceFlag", nil, &r.Options, &resp) + return +} + +// Use this method when needing the metric data for memory for a single computing instance. +func (r Virtual_Guest) GetMemoryMetricDataByDate(startDateTime *datatypes.Time, endDateTime *datatypes.Time) (resp []datatypes.Metric_Tracking_Object_Data, err error) { + params := []interface{}{ + startDateTime, + endDateTime, + } + err = r.Session.DoRequest("SoftLayer_Virtual_Guest", "getMemoryMetricDataByDate", params, &r.Options, &resp) + return +} + +// Use this method when needing a memory usage image for a single guest. It will gather the correct input parameters for the generic graphing utility automatically based on the snapshot specified. +func (r Virtual_Guest) GetMemoryMetricImage(snapshotRange *string, dateSpecified *datatypes.Time) (resp datatypes.Container_Bandwidth_GraphOutputs, err error) { + params := []interface{}{ + snapshotRange, + dateSpecified, + } + err = r.Session.DoRequest("SoftLayer_Virtual_Guest", "getMemoryMetricImage", params, &r.Options, &resp) + return +} + +// Use this method when needing a image displaying the amount of memory used over time for a single computing instance. It will gather the correct input parameters for the generic graphing utility based on the date ranges +func (r Virtual_Guest) GetMemoryMetricImageByDate(startDateTime *datatypes.Time, endDateTime *datatypes.Time) (resp datatypes.Container_Bandwidth_GraphOutputs, err error) { + params := []interface{}{ + startDateTime, + endDateTime, + } + err = r.Session.DoRequest("SoftLayer_Virtual_Guest", "getMemoryMetricImageByDate", params, &r.Options, &resp) + return +} + +// Retrieve A guest's metric tracking object. +func (r Virtual_Guest) GetMetricTrackingObject() (resp datatypes.Metric_Tracking_Object, err error) { + err = r.Session.DoRequest("SoftLayer_Virtual_Guest", "getMetricTrackingObject", nil, &r.Options, &resp) + return +} + +// Retrieve The metric tracking object id for this guest. +func (r Virtual_Guest) GetMetricTrackingObjectId() (resp int, err error) { + err = r.Session.DoRequest("SoftLayer_Virtual_Guest", "getMetricTrackingObjectId", nil, &r.Options, &resp) + return +} + +// Returns open monitoring alarms for a given time period +func (r Virtual_Guest) GetMonitoringActiveAlarms(startDate *datatypes.Time, endDate *datatypes.Time) (resp []datatypes.Container_Monitoring_Alarm_History, err error) { + params := []interface{}{ + startDate, + endDate, + } + err = r.Session.DoRequest("SoftLayer_Virtual_Guest", "getMonitoringActiveAlarms", params, &r.Options, &resp) + return +} + +// Retrieve +func (r Virtual_Guest) GetMonitoringAgents() (resp []datatypes.Monitoring_Agent, err error) { + err = r.Session.DoRequest("SoftLayer_Virtual_Guest", "getMonitoringAgents", nil, &r.Options, &resp) + return +} + +// Returns closed monitoring alarms for a given time period +func (r Virtual_Guest) GetMonitoringClosedAlarms(startDate *datatypes.Time, endDate *datatypes.Time) (resp []datatypes.Container_Monitoring_Alarm_History, err error) { + params := []interface{}{ + startDate, + endDate, + } + err = r.Session.DoRequest("SoftLayer_Virtual_Guest", "getMonitoringClosedAlarms", params, &r.Options, &resp) + return +} + +// Retrieve +func (r Virtual_Guest) GetMonitoringRobot() (resp datatypes.Monitoring_Robot, err error) { + err = r.Session.DoRequest("SoftLayer_Virtual_Guest", "getMonitoringRobot", nil, &r.Options, &resp) + return +} + +// Retrieve A virtual guest's network monitoring services. +func (r Virtual_Guest) GetMonitoringServiceComponent() (resp datatypes.Network_Monitor_Version1_Query_Host_Stratum, err error) { + err = r.Session.DoRequest("SoftLayer_Virtual_Guest", "getMonitoringServiceComponent", nil, &r.Options, &resp) + return +} + +// Retrieve +func (r Virtual_Guest) GetMonitoringServiceEligibilityFlag() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Virtual_Guest", "getMonitoringServiceEligibilityFlag", nil, &r.Options, &resp) + return +} + +// Retrieve +func (r Virtual_Guest) GetMonitoringServiceFlag() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Virtual_Guest", "getMonitoringServiceFlag", nil, &r.Options, &resp) + return +} + +// Retrieve The monitoring notification objects for this guest. Each object links this guest instance to a user account that will be notified if monitoring on this guest object fails +func (r Virtual_Guest) GetMonitoringUserNotification() (resp []datatypes.User_Customer_Notification_Virtual_Guest, err error) { + err = r.Session.DoRequest("SoftLayer_Virtual_Guest", "getMonitoringUserNotification", nil, &r.Options, &resp) + return +} + +// Get the IP addresses associated with this CloudLayer computing instance that are protectable by a network component firewall. Note, this may not return all values for IPv6 subnets for this CloudLayer computing instance. Please use getFirewallProtectableSubnets to get all protectable subnets. +func (r Virtual_Guest) GetNetworkComponentFirewallProtectableIpAddresses() (resp []datatypes.Network_Subnet_IpAddress, err error) { + err = r.Session.DoRequest("SoftLayer_Virtual_Guest", "getNetworkComponentFirewallProtectableIpAddresses", nil, &r.Options, &resp) + return +} + +// Retrieve A guests's network components. +func (r Virtual_Guest) GetNetworkComponents() (resp []datatypes.Virtual_Guest_Network_Component, err error) { + err = r.Session.DoRequest("SoftLayer_Virtual_Guest", "getNetworkComponents", nil, &r.Options, &resp) + return +} + +// Retrieve All of a virtual guest's network monitoring incidents. +func (r Virtual_Guest) GetNetworkMonitorIncidents() (resp []datatypes.Network_Monitor_Version1_Incident, err error) { + err = r.Session.DoRequest("SoftLayer_Virtual_Guest", "getNetworkMonitorIncidents", nil, &r.Options, &resp) + return +} + +// Retrieve A guests's network monitors. +func (r Virtual_Guest) GetNetworkMonitors() (resp []datatypes.Network_Monitor_Version1_Query_Host, err error) { + err = r.Session.DoRequest("SoftLayer_Virtual_Guest", "getNetworkMonitors", nil, &r.Options, &resp) + return +} + +// Retrieve A guest's associated network storage accounts. +func (r Virtual_Guest) GetNetworkStorage() (resp []datatypes.Network_Storage, err error) { + err = r.Session.DoRequest("SoftLayer_Virtual_Guest", "getNetworkStorage", nil, &r.Options, &resp) + return +} + +// Retrieve The network Vlans that a guest's network components are associated with. +func (r Virtual_Guest) GetNetworkVlans() (resp []datatypes.Network_Vlan, err error) { + err = r.Session.DoRequest("SoftLayer_Virtual_Guest", "getNetworkVlans", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Virtual_Guest) GetObject() (resp datatypes.Virtual_Guest, err error) { + err = r.Session.DoRequest("SoftLayer_Virtual_Guest", "getObject", nil, &r.Options, &resp) + return +} + +// Retrieve An open ticket requesting cancellation of this server, if one exists. +func (r Virtual_Guest) GetOpenCancellationTicket() (resp datatypes.Ticket, err error) { + err = r.Session.DoRequest("SoftLayer_Virtual_Guest", "getOpenCancellationTicket", nil, &r.Options, &resp) + return +} + +// Retrieve A guest's operating system. +func (r Virtual_Guest) GetOperatingSystem() (resp datatypes.Software_Component_OperatingSystem, err error) { + err = r.Session.DoRequest("SoftLayer_Virtual_Guest", "getOperatingSystem", nil, &r.Options, &resp) + return +} + +// Retrieve A guest's operating system software description. +func (r Virtual_Guest) GetOperatingSystemReferenceCode() (resp string, err error) { + err = r.Session.DoRequest("SoftLayer_Virtual_Guest", "getOperatingSystemReferenceCode", nil, &r.Options, &resp) + return +} + +// Obtain an order container that is ready to be sent to the [[SoftLayer_Product_Order#placeOrder|SoftLayer_Product_Order::placeOrder]] method. This container will include all services that the selected computing instance has. If desired you may remove prices which were returned. +func (r Virtual_Guest) GetOrderTemplate(billingType *string, orderPrices []datatypes.Product_Item_Price) (resp datatypes.Container_Product_Order, err error) { + params := []interface{}{ + billingType, + orderPrices, + } + err = r.Session.DoRequest("SoftLayer_Virtual_Guest", "getOrderTemplate", params, &r.Options, &resp) + return +} + +// Retrieve The original package id provided with the order for a Cloud Computing Instance. +func (r Virtual_Guest) GetOrderedPackageId() (resp string, err error) { + err = r.Session.DoRequest("SoftLayer_Virtual_Guest", "getOrderedPackageId", nil, &r.Options, &resp) + return +} + +// Retrieve The total private outbound bandwidth for this computing instance for the current billing cycle. +func (r Virtual_Guest) GetOutboundPrivateBandwidthUsage() (resp datatypes.Float64, err error) { + err = r.Session.DoRequest("SoftLayer_Virtual_Guest", "getOutboundPrivateBandwidthUsage", nil, &r.Options, &resp) + return +} + +// Retrieve The total public outbound bandwidth for this computing instance for the current billing cycle. +func (r Virtual_Guest) GetOutboundPublicBandwidthUsage() (resp datatypes.Float64, err error) { + err = r.Session.DoRequest("SoftLayer_Virtual_Guest", "getOutboundPublicBandwidthUsage", nil, &r.Options, &resp) + return +} + +// Retrieve Whether the bandwidth usage for this computing instance for the current billing cycle exceeds the allocation. +func (r Virtual_Guest) GetOverBandwidthAllocationFlag() (resp int, err error) { + err = r.Session.DoRequest("SoftLayer_Virtual_Guest", "getOverBandwidthAllocationFlag", nil, &r.Options, &resp) + return +} + +// Retrieve When true this virtual guest must be migrated using SoftLayer_Virtual_Guest::migrate. +func (r Virtual_Guest) GetPendingMigrationFlag() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Virtual_Guest", "getPendingMigrationFlag", nil, &r.Options, &resp) + return +} + +// Retrieve The current power state of a virtual guest. +func (r Virtual_Guest) GetPowerState() (resp datatypes.Virtual_Guest_Power_State, err error) { + err = r.Session.DoRequest("SoftLayer_Virtual_Guest", "getPowerState", nil, &r.Options, &resp) + return +} + +// Retrieve A guest's primary private IP address. +func (r Virtual_Guest) GetPrimaryBackendIpAddress() (resp string, err error) { + err = r.Session.DoRequest("SoftLayer_Virtual_Guest", "getPrimaryBackendIpAddress", nil, &r.Options, &resp) + return +} + +// Retrieve A guest's primary backend network component. +func (r Virtual_Guest) GetPrimaryBackendNetworkComponent() (resp datatypes.Virtual_Guest_Network_Component, err error) { + err = r.Session.DoRequest("SoftLayer_Virtual_Guest", "getPrimaryBackendNetworkComponent", nil, &r.Options, &resp) + return +} + +// Retrieve The guest's primary public IP address. +func (r Virtual_Guest) GetPrimaryIpAddress() (resp string, err error) { + err = r.Session.DoRequest("SoftLayer_Virtual_Guest", "getPrimaryIpAddress", nil, &r.Options, &resp) + return +} + +// Retrieve A guest's primary public network component. +func (r Virtual_Guest) GetPrimaryNetworkComponent() (resp datatypes.Virtual_Guest_Network_Component, err error) { + err = r.Session.DoRequest("SoftLayer_Virtual_Guest", "getPrimaryNetworkComponent", nil, &r.Options, &resp) + return +} + +// Retrieve Whether the computing instance only has access to the private network. +func (r Virtual_Guest) GetPrivateNetworkOnlyFlag() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Virtual_Guest", "getPrivateNetworkOnlyFlag", nil, &r.Options, &resp) + return +} + +// Retrieve Whether the bandwidth usage for this computing instance for the current billing cycle is projected to exceed the allocation. +func (r Virtual_Guest) GetProjectedOverBandwidthAllocationFlag() (resp int, err error) { + err = r.Session.DoRequest("SoftLayer_Virtual_Guest", "getProjectedOverBandwidthAllocationFlag", nil, &r.Options, &resp) + return +} + +// Retrieve The projected public outbound bandwidth for this computing instance for the current billing cycle. +func (r Virtual_Guest) GetProjectedPublicBandwidthUsage() (resp datatypes.Float64, err error) { + err = r.Session.DoRequest("SoftLayer_Virtual_Guest", "getProjectedPublicBandwidthUsage", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Virtual_Guest) GetProvisionDate() (resp datatypes.Time, err error) { + err = r.Session.DoRequest("SoftLayer_Virtual_Guest", "getProvisionDate", nil, &r.Options, &resp) + return +} + +// Retrieve Recent events that impact this computing instance. +func (r Virtual_Guest) GetRecentEvents() (resp []datatypes.Notification_Occurrence_Event, err error) { + err = r.Session.DoRequest("SoftLayer_Virtual_Guest", "getRecentEvents", nil, &r.Options, &resp) + return +} + +// Recent metric data for a guest +func (r Virtual_Guest) GetRecentMetricData(time *uint) (resp []datatypes.Metric_Tracking_Object, err error) { + params := []interface{}{ + time, + } + err = r.Session.DoRequest("SoftLayer_Virtual_Guest", "getRecentMetricData", params, &r.Options, &resp) + return +} + +// Retrieve The regional group this guest is in. +func (r Virtual_Guest) GetRegionalGroup() (resp datatypes.Location_Group_Regional, err error) { + err = r.Session.DoRequest("SoftLayer_Virtual_Guest", "getRegionalGroup", nil, &r.Options, &resp) + return +} + +// Retrieve +func (r Virtual_Guest) GetRegionalInternetRegistry() (resp datatypes.Network_Regional_Internet_Registry, err error) { + err = r.Session.DoRequest("SoftLayer_Virtual_Guest", "getRegionalInternetRegistry", nil, &r.Options, &resp) + return +} + +// Returns open monitoring alarms generated by monitoring agents that reside in the SoftLayer monitoring cluster. +// +// A monitoring agent with "remoteMonitoringAgentFlag" indicates that it work from SoftLayer monitoring cluster. If a monitoring agent does not have the flag, it resides in your cloud instance. +func (r Virtual_Guest) GetRemoteMonitoringActiveAlarms(startDate *datatypes.Time, endDate *datatypes.Time) (resp []datatypes.Container_Monitoring_Alarm_History, err error) { + params := []interface{}{ + startDate, + endDate, + } + err = r.Session.DoRequest("SoftLayer_Virtual_Guest", "getRemoteMonitoringActiveAlarms", params, &r.Options, &resp) + return +} + +// Returns closed monitoring alarms generated by monitoring agents that reside in the SoftLayer monitoring cluster. +// +// A monitoring agent with "remoteMonitoringAgentFlag" indicates that it work from SoftLayer monitoring cluster. If a monitoring agent does not have the flag, it resides in your cloud instance. +func (r Virtual_Guest) GetRemoteMonitoringClosedAlarms(startDate *datatypes.Time, endDate *datatypes.Time) (resp []datatypes.Container_Monitoring_Alarm_History, err error) { + params := []interface{}{ + startDate, + endDate, + } + err = r.Session.DoRequest("SoftLayer_Virtual_Guest", "getRemoteMonitoringClosedAlarms", params, &r.Options, &resp) + return +} + +// Retrieve the reverse domain records associated with this server. +func (r Virtual_Guest) GetReverseDomainRecords() (resp []datatypes.Dns_Domain, err error) { + err = r.Session.DoRequest("SoftLayer_Virtual_Guest", "getReverseDomainRecords", nil, &r.Options, &resp) + return +} + +// Retrieve Collection of scale assets this guest corresponds to. +func (r Virtual_Guest) GetScaleAssets() (resp []datatypes.Scale_Asset, err error) { + err = r.Session.DoRequest("SoftLayer_Virtual_Guest", "getScaleAssets", nil, &r.Options, &resp) + return +} + +// Retrieve The scale member for this guest, if applicable. +func (r Virtual_Guest) GetScaleMember() (resp datatypes.Scale_Member_Virtual_Guest, err error) { + err = r.Session.DoRequest("SoftLayer_Virtual_Guest", "getScaleMember", nil, &r.Options, &resp) + return +} + +// Retrieve Whether or not this guest is a member of a scale group and was automatically created as part of a scale group action. +func (r Virtual_Guest) GetScaledFlag() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Virtual_Guest", "getScaledFlag", nil, &r.Options, &resp) + return +} + +// Retrieve A guest's vulnerability scan requests. +func (r Virtual_Guest) GetSecurityScanRequests() (resp []datatypes.Network_Security_Scanner_Request, err error) { + err = r.Session.DoRequest("SoftLayer_Virtual_Guest", "getSecurityScanRequests", nil, &r.Options, &resp) + return +} + +// Retrieve The server room that a guest is located at. There may be more than one server room for every data center. +func (r Virtual_Guest) GetServerRoom() (resp datatypes.Location, err error) { + err = r.Session.DoRequest("SoftLayer_Virtual_Guest", "getServerRoom", nil, &r.Options, &resp) + return +} + +// Retrieve A guest's installed software. +func (r Virtual_Guest) GetSoftwareComponents() (resp []datatypes.Software_Component, err error) { + err = r.Session.DoRequest("SoftLayer_Virtual_Guest", "getSoftwareComponents", nil, &r.Options, &resp) + return +} + +// Retrieve SSH keys to be installed on the server during provisioning or an OS reload. +func (r Virtual_Guest) GetSshKeys() (resp []datatypes.Security_Ssh_Key, err error) { + err = r.Session.DoRequest("SoftLayer_Virtual_Guest", "getSshKeys", nil, &r.Options, &resp) + return +} + +// Retrieve A computing instance's status. +func (r Virtual_Guest) GetStatus() (resp datatypes.Virtual_Guest_Status, err error) { + err = r.Session.DoRequest("SoftLayer_Virtual_Guest", "getStatus", nil, &r.Options, &resp) + return +} + +// Retrieve +func (r Virtual_Guest) GetTagReferences() (resp []datatypes.Tag_Reference, err error) { + err = r.Session.DoRequest("SoftLayer_Virtual_Guest", "getTagReferences", nil, &r.Options, &resp) + return +} + +// Retrieve The type of this virtual guest. +func (r Virtual_Guest) GetType() (resp datatypes.Virtual_Guest_Type, err error) { + err = r.Session.DoRequest("SoftLayer_Virtual_Guest", "getType", nil, &r.Options, &resp) + return +} + +// getUpgradeItemPrices() retrieves a list of all upgrades available to a CloudLayer Computing Instance. Upgradeable items include, but are not limited to, number of cores, amount of RAM, storage configuration, and network port speed. +// +// This method exclude downgrade item prices by default. You can set the "includeDowngradeItemPrices" parameter to true so that it can include downgrade item prices. +func (r Virtual_Guest) GetUpgradeItemPrices(includeDowngradeItemPrices *bool) (resp []datatypes.Product_Item_Price, err error) { + params := []interface{}{ + includeDowngradeItemPrices, + } + err = r.Session.DoRequest("SoftLayer_Virtual_Guest", "getUpgradeItemPrices", params, &r.Options, &resp) + return +} + +// Retrieve A computing instance's associated upgrade request object if any. +func (r Virtual_Guest) GetUpgradeRequest() (resp datatypes.Product_Upgrade_Request, err error) { + err = r.Session.DoRequest("SoftLayer_Virtual_Guest", "getUpgradeRequest", nil, &r.Options, &resp) + return +} + +// Retrieve A base64 encoded string containing custom user data for a Cloud Computing Instance order. +func (r Virtual_Guest) GetUserData() (resp []datatypes.Virtual_Guest_Attribute, err error) { + err = r.Session.DoRequest("SoftLayer_Virtual_Guest", "getUserData", nil, &r.Options, &resp) + return +} + +// Retrieve A list of users that have access to this computing instance. +func (r Virtual_Guest) GetUsers() (resp []datatypes.User_Customer, err error) { + err = r.Session.DoRequest("SoftLayer_Virtual_Guest", "getUsers", nil, &r.Options, &resp) + return +} + +// This method will return the list of block device template groups that are valid to the host. For instance, it will validate that the template groups returned are compatible with the size and number of disks on the host. +func (r Virtual_Guest) GetValidBlockDeviceTemplateGroups(visibility *string) (resp []datatypes.Virtual_Guest_Block_Device_Template_Group, err error) { + params := []interface{}{ + visibility, + } + err = r.Session.DoRequest("SoftLayer_Virtual_Guest", "getValidBlockDeviceTemplateGroups", params, &r.Options, &resp) + return +} + +// Retrieve The name of the bandwidth allotment that a hardware belongs too. +func (r Virtual_Guest) GetVirtualRack() (resp datatypes.Network_Bandwidth_Version1_Allotment, err error) { + err = r.Session.DoRequest("SoftLayer_Virtual_Guest", "getVirtualRack", nil, &r.Options, &resp) + return +} + +// Retrieve The id of the bandwidth allotment that a computing instance belongs too. +func (r Virtual_Guest) GetVirtualRackId() (resp int, err error) { + err = r.Session.DoRequest("SoftLayer_Virtual_Guest", "getVirtualRackId", nil, &r.Options, &resp) + return +} + +// Retrieve The name of the bandwidth allotment that a computing instance belongs too. +func (r Virtual_Guest) GetVirtualRackName() (resp string, err error) { + err = r.Session.DoRequest("SoftLayer_Virtual_Guest", "getVirtualRackName", nil, &r.Options, &resp) + return +} + +// Issues a ping command and returns the success (true) or failure (false) of the ping command. +func (r Virtual_Guest) IsBackendPingable() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Virtual_Guest", "isBackendPingable", nil, &r.Options, &resp) + return +} + +// Issues a ping command and returns the success (true) or failure (false) of the ping command. +func (r Virtual_Guest) IsPingable() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Virtual_Guest", "isPingable", nil, &r.Options, &resp) + return +} + +// Closes the public or private ports to isolate the instance before a destructive action. +func (r Virtual_Guest) IsolateInstanceForDestructiveAction() (err error) { + var resp datatypes.Void + err = r.Session.DoRequest("SoftLayer_Virtual_Guest", "isolateInstanceForDestructiveAction", nil, &r.Options, &resp) + return +} + +// Creates a transaction to migrate a virtual guest to a new host. NOTE: Will only migrate if SoftLayer_Virtual_Guest property pendingMigrationFlag = true +func (r Virtual_Guest) Migrate() (resp datatypes.Provisioning_Version1_Transaction, err error) { + err = r.Session.DoRequest("SoftLayer_Virtual_Guest", "migrate", nil, &r.Options, &resp) + return +} + +// Create a transaction to migrate an instance from one dedicated host to another dedicated host +func (r Virtual_Guest) MigrateDedicatedHost(destinationHostId *int) (err error) { + var resp datatypes.Void + params := []interface{}{ + destinationHostId, + } + err = r.Session.DoRequest("SoftLayer_Virtual_Guest", "migrateDedicatedHost", params, &r.Options, &resp) + return +} + +// no documentation yet +func (r Virtual_Guest) MountIsoImage(diskImageId *int) (resp datatypes.Provisioning_Version1_Transaction, err error) { + params := []interface{}{ + diskImageId, + } + err = r.Session.DoRequest("SoftLayer_Virtual_Guest", "mountIsoImage", params, &r.Options, &resp) + return +} + +// Pause a virtual guest +func (r Virtual_Guest) Pause() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Virtual_Guest", "pause", nil, &r.Options, &resp) + return +} + +// Power cycle a virtual guest +func (r Virtual_Guest) PowerCycle() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Virtual_Guest", "powerCycle", nil, &r.Options, &resp) + return +} + +// Power off a virtual guest +func (r Virtual_Guest) PowerOff() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Virtual_Guest", "powerOff", nil, &r.Options, &resp) + return +} + +// Power off a virtual guest +func (r Virtual_Guest) PowerOffSoft() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Virtual_Guest", "powerOffSoft", nil, &r.Options, &resp) + return +} + +// Power on a virtual guest +func (r Virtual_Guest) PowerOn() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Virtual_Guest", "powerOn", nil, &r.Options, &resp) + return +} + +// Power cycle a virtual guest +func (r Virtual_Guest) RebootDefault() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Virtual_Guest", "rebootDefault", nil, &r.Options, &resp) + return +} + +// Power cycle a guest. +func (r Virtual_Guest) RebootHard() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Virtual_Guest", "rebootHard", nil, &r.Options, &resp) + return +} + +// Attempt to complete a soft reboot of a guest by shutting down the operating system. +func (r Virtual_Guest) RebootSoft() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Virtual_Guest", "rebootSoft", nil, &r.Options, &resp) + return +} + +// Create a transaction to perform an OS reload +func (r Virtual_Guest) ReloadCurrentOperatingSystemConfiguration() (resp datatypes.Provisioning_Version1_Transaction, err error) { + err = r.Session.DoRequest("SoftLayer_Virtual_Guest", "reloadCurrentOperatingSystemConfiguration", nil, &r.Options, &resp) + return +} + +// Reloads current operating system configuration. +// +// This service has a confirmation protocol for proceeding with the reload. To proceed with the reload without confirmation, simply pass in 'FORCE' as the token parameter. To proceed with the reload with confirmation, simply call the service with no parameter. A token string will be returned by this service. The token will remain active for 10 minutes. Use this token as the parameter to confirm that a reload is to be performed for the server. +// +// As a precaution, we strongly recommend backing up all data before reloading the operating system. The reload will format the primary disk and will reconfigure the computing instance to the current specifications on record. +// +// If reloading from an image template, we recommend first getting the list of valid private block device template groups, by calling the getOperatingSystemReloadImages method. +func (r Virtual_Guest) ReloadOperatingSystem(token *string, config *datatypes.Container_Hardware_Server_Configuration) (resp string, err error) { + params := []interface{}{ + token, + config, + } + err = r.Session.DoRequest("SoftLayer_Virtual_Guest", "reloadOperatingSystem", params, &r.Options, &resp) + return +} + +// This method is used to remove access to a SoftLayer_Network_Storage volume that supports host- or network-level access control. +func (r Virtual_Guest) RemoveAccessToNetworkStorage(networkStorageTemplateObject *datatypes.Network_Storage) (resp bool, err error) { + params := []interface{}{ + networkStorageTemplateObject, + } + err = r.Session.DoRequest("SoftLayer_Virtual_Guest", "removeAccessToNetworkStorage", params, &r.Options, &resp) + return +} + +// This method is used to allow access to multiple SoftLayer_Network_Storage volumes that support host- or network-level access control. +func (r Virtual_Guest) RemoveAccessToNetworkStorageList(networkStorageTemplateObjects []datatypes.Network_Storage) (resp bool, err error) { + params := []interface{}{ + networkStorageTemplateObjects, + } + err = r.Session.DoRequest("SoftLayer_Virtual_Guest", "removeAccessToNetworkStorageList", params, &r.Options, &resp) + return +} + +// Resume a virtual guest +func (r Virtual_Guest) Resume() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Virtual_Guest", "resume", nil, &r.Options, &resp) + return +} + +// Sets the private network interface speed to the new speed. Speed values can only be 0 (Disconnect), 10, 100, or 1000. The new speed must be equal to or less than the max speed of the interface. +// +// It will take less than a minute to update the port speed. +func (r Virtual_Guest) SetPrivateNetworkInterfaceSpeed(newSpeed *int) (resp bool, err error) { + params := []interface{}{ + newSpeed, + } + err = r.Session.DoRequest("SoftLayer_Virtual_Guest", "setPrivateNetworkInterfaceSpeed", params, &r.Options, &resp) + return +} + +// Sets the public network interface speed to the new speed. Speed values can only be 0 (Disconnect), 10, 100, or 1000. The new speed must be equal to or less than the max speed of the interface. +// +// It will take less than a minute to update the port speed. +func (r Virtual_Guest) SetPublicNetworkInterfaceSpeed(newSpeed *int) (resp bool, err error) { + params := []interface{}{ + newSpeed, + } + err = r.Session.DoRequest("SoftLayer_Virtual_Guest", "setPublicNetworkInterfaceSpeed", params, &r.Options, &resp) + return +} + +// no documentation yet +func (r Virtual_Guest) SetTags(tags *string) (resp bool, err error) { + params := []interface{}{ + tags, + } + err = r.Session.DoRequest("SoftLayer_Virtual_Guest", "setTags", params, &r.Options, &resp) + return +} + +// Sets the data that will be written to the configuration drive. +func (r Virtual_Guest) SetUserMetadata(metadata []string) (resp bool, err error) { + params := []interface{}{ + metadata, + } + err = r.Session.DoRequest("SoftLayer_Virtual_Guest", "setUserMetadata", params, &r.Options, &resp) + return +} + +// Shuts down the private network port +func (r Virtual_Guest) ShutdownPrivatePort() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Virtual_Guest", "shutdownPrivatePort", nil, &r.Options, &resp) + return +} + +// Shuts down the public network port +func (r Virtual_Guest) ShutdownPublicPort() (resp bool, err error) { + err = r.Session.DoRequest("SoftLayer_Virtual_Guest", "shutdownPublicPort", nil, &r.Options, &resp) + return +} + +// no documentation yet +func (r Virtual_Guest) UnmountIsoImage() (resp datatypes.Provisioning_Version1_Transaction, err error) { + err = r.Session.DoRequest("SoftLayer_Virtual_Guest", "unmountIsoImage", nil, &r.Options, &resp) + return +} + +// Validate an image template for OS Reload +func (r Virtual_Guest) ValidateImageTemplate(imageTemplateId *int) (resp bool, err error) { + params := []interface{}{ + imageTemplateId, + } + err = r.Session.DoRequest("SoftLayer_Virtual_Guest", "validateImageTemplate", params, &r.Options, &resp) + return +} + +// Verify that a virtual server can go through the operating system reload process. It may be useful to call this method before attempting to actually reload the operating system just to verify that the reload will go smoothly. If the server configuration is not setup correctly or there is some other issue, an exception will be thrown indicating the error. If there were no issues, this will just return true. +func (r Virtual_Guest) VerifyReloadOperatingSystem(config *datatypes.Container_Hardware_Server_Configuration) (resp bool, err error) { + params := []interface{}{ + config, + } + err = r.Session.DoRequest("SoftLayer_Virtual_Guest", "verifyReloadOperatingSystem", params, &r.Options, &resp) + return +} + +// The virtual block device template group data type presents the structure in which a group of archived image templates will be presented. The structure consists of a parent template group which contain multiple child template group objects. Each child template group object represents the image template in a particular location. Unless editing/deleting a specific child template group object, it is best to use the parent object. +// +// A virtual block device template group, also known as an image template group, represents an image of a virtual guest instance. +type Virtual_Guest_Block_Device_Template_Group struct { + Session *session.Session + Options sl.Options +} + +// GetVirtualGuestBlockDeviceTemplateGroupService returns an instance of the Virtual_Guest_Block_Device_Template_Group SoftLayer service +func GetVirtualGuestBlockDeviceTemplateGroupService(sess *session.Session) Virtual_Guest_Block_Device_Template_Group { + return Virtual_Guest_Block_Device_Template_Group{Session: sess} +} + +func (r Virtual_Guest_Block_Device_Template_Group) Id(id int) Virtual_Guest_Block_Device_Template_Group { + r.Options.Id = &id + return r +} + +func (r Virtual_Guest_Block_Device_Template_Group) Mask(mask string) Virtual_Guest_Block_Device_Template_Group { + if !strings.HasPrefix(mask, "mask[") && (strings.Contains(mask, "[") || strings.Contains(mask, ",")) { + mask = fmt.Sprintf("mask[%s]", mask) + } + + r.Options.Mask = mask + return r +} + +func (r Virtual_Guest_Block_Device_Template_Group) Filter(filter string) Virtual_Guest_Block_Device_Template_Group { + r.Options.Filter = filter + return r +} + +func (r Virtual_Guest_Block_Device_Template_Group) Limit(limit int) Virtual_Guest_Block_Device_Template_Group { + r.Options.Limit = &limit + return r +} + +func (r Virtual_Guest_Block_Device_Template_Group) Offset(offset int) Virtual_Guest_Block_Device_Template_Group { + r.Options.Offset = &offset + return r +} + +// << 0 { + // parse the parameters + parameters, _ = json.Marshal( + map[string]interface{}{ + "parameters": args, + }) + } + + path := buildPath(service, method, options) + + resp, code, err := makeHTTPRequest( + sess, + path, + restMethod, + bytes.NewBuffer(parameters), + options) + + if err != nil { + return sl.Error{Wrapped: err} + } + + if code < 200 || code > 299 { + e := sl.Error{StatusCode: code} + + err = json.Unmarshal(resp, &e) + + // If unparseable, wrap the json error + if err != nil { + e.Wrapped = err + e.Message = err.Error() + } + + return e + } + + // Some APIs that normally return a collection, omit the []'s when the API returns a single value + returnType := reflect.TypeOf(pResult).String() + if strings.Index(returnType, "[]") == 1 && strings.Index(string(resp), "[") != 0 { + resp = []byte("[" + string(resp) + "]") + } + + // At this point, all that's left to do is parse the return value to the appropriate type, and return + // any parse errors (or nil if successful) + + err = nil + switch pResult.(type) { + case *[]uint8: + // exclude quotes + *pResult.(*[]uint8) = resp[1 : len(resp)-1] + case *datatypes.Void: + case *uint: + var val uint64 + val, err = strconv.ParseUint(string(resp), 0, 64) + if err == nil { + *pResult.(*uint) = uint(val) + } + case *bool: + *pResult.(*bool), err = strconv.ParseBool(string(resp)) + case *string: + str := string(resp) + strIdx := len(str) - 1 + if str == "null" { + str = "" + } else if str[0] == '"' && str[strIdx] == '"' { + rawStr := rawString{str} + err = json.Unmarshal([]byte(`{"val":`+str+`}`), &rawStr) + if err == nil { + str = rawStr.Val + } + } + *pResult.(*string) = str + default: + // Must be a json representation of one of the many softlayer datatypes + err = json.Unmarshal(resp, pResult) + } + + if err != nil { + err = sl.Error{Message: err.Error(), Wrapped: err} + } + + return err +} + +type rawString struct { + Val string +} + +func buildPath(service string, method string, options *sl.Options) string { + path := service + + if options.Id != nil { + path = path + "/" + strconv.Itoa(*options.Id) + } + + // omit the API method name if the method represents one of the basic REST methods + if method != "getObject" && method != "deleteObject" && method != "createObject" && + method != "createObjects" && method != "editObject" && method != "editObjects" { + path = path + "/" + method + } + + return path + ".json" +} + +func encodeQuery(opts *sl.Options) string { + query := new(url.URL).Query() + + if opts.Mask != "" { + query.Add("objectMask", opts.Mask) + } + + if opts.Filter != "" { + query.Add("objectFilter", opts.Filter) + } + + // resultLimit=, + // If offset unspecified, default to 0 + if opts.Limit != nil { + startOffset := 0 + if opts.Offset != nil { + startOffset = *opts.Offset + } + + query.Add("resultLimit", fmt.Sprintf("%d,%d", startOffset, *opts.Limit)) + } + + return query.Encode() +} + +func makeHTTPRequest(session *Session, path string, requestType string, requestBody *bytes.Buffer, options *sl.Options) ([]byte, int, error) { + client := http.DefaultClient + client.Timeout = DefaultTimeout + if session.Timeout != 0 { + client.Timeout = session.Timeout + } + + var url string + if session.Endpoint == "" { + url = url + DefaultEndpoint + } else { + url = url + session.Endpoint + } + url = fmt.Sprintf("%s/%s", strings.TrimRight(url, "/"), path) + req, err := http.NewRequest(requestType, url, requestBody) + if err != nil { + return nil, 0, err + } + + if session.APIKey != "" { + req.SetBasicAuth(session.UserName, session.APIKey) + } else if session.AuthToken != "" { + req.SetBasicAuth(fmt.Sprintf("%d", session.UserId), session.AuthToken) + } + + req.URL.RawQuery = encodeQuery(options) + + if session.Debug { + log.Println("[DEBUG] Request URL: ", requestType, req.URL) + log.Println("[DEBUG] Parameters: ", requestBody.String()) + } + + resp, err := client.Do(req) + if err != nil { + return nil, 520, err + } + + defer resp.Body.Close() + + responseBody, err := ioutil.ReadAll(resp.Body) + if err != nil { + return nil, resp.StatusCode, err + } + + if session.Debug { + log.Println("[DEBUG] Response: ", string(responseBody)) + } + return responseBody, resp.StatusCode, nil +} + +func httpMethod(name string, args []interface{}) string { + if name == "deleteObject" { + return "DELETE" + } else if name == "editObject" || name == "editObjects" { + return "PUT" + } else if name == "createObject" || name == "createObjects" || len(args) > 0 { + return "POST" + } + + return "GET" +} diff --git a/vendor/github.com/softlayer/softlayer-go/session/session.go b/vendor/github.com/softlayer/softlayer-go/session/session.go new file mode 100644 index 0000000000..181b0fd408 --- /dev/null +++ b/vendor/github.com/softlayer/softlayer-go/session/session.go @@ -0,0 +1,226 @@ +/** + * Copyright 2016 IBM Corp. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package session + +import ( + "fmt" + "log" + "os" + "os/user" + "strings" + "time" + + "github.com/softlayer/softlayer-go/config" + "github.com/softlayer/softlayer-go/sl" +) + +// DefaultEndpoint is the default endpoint for API calls, when no override +// is provided. +const DefaultEndpoint = "https://api.softlayer.com/rest/v3" + +// TransportHandler +type TransportHandler interface { + // DoRequest is the protocol-specific handler for making API requests. + // + // sess is a reference to the current session object, where authentication and + // endpoint information can be found. + // + // service and method are the SoftLayer service name and method name, exactly as they + // are documented at http://sldn.softlayer.com/reference/softlayerapi (i.e., with the + // 'SoftLayer_' prefix and properly cased. + // + // args is a slice of arguments required for the service method being invoked. The + // types of each argument varies. See the method definition in the services package + // for the expected type of each argument. + // + // options is an sl.Options struct, containing any mask, filter, or result limit values + // to be applied. + // + // pResult is a pointer to a variable to be populated with the result of the API call. + // DoRequest should ensure that the native API response (i.e., XML or JSON) is correctly + // unmarshaled into the result structure. + // + // A sl.Error is returned, and can be (with a type assertion) inspected for details of + // the error (http code, API error message, etc.), or simply handled as a generic error, + // (in which case no type assertion would be necessary) + DoRequest( + sess *Session, + service string, + method string, + args []interface{}, + options *sl.Options, + pResult interface{}) error +} + +const DefaultTimeout = time.Second * 120 + +// Session stores the information required for communication with the SoftLayer +// API +type Session struct { + // UserName is the name of the SoftLayer API user + UserName string + + // ApiKey is the secret for making API calls + APIKey string + + // Endpoint is the SoftLayer API endpoint to communicate with + Endpoint string + + // UserId is the user id for token-based authentication + UserId int + + // AuthToken is the token secret for token-based authentication + AuthToken string + + // Debug controls logging of request details (URI, parameters, etc.) + Debug bool + + // The handler whose DoRequest() function will be called for each API request. + // Handles the request and any response parsing specific to the desired protocol + // (e.g., REST). Set automatically for a new Session, based on the + // provided Endpoint. + TransportHandler TransportHandler + + // Timeout specifies a time limit for http requests made by this + // session. Requests that take longer that the specified timeout + // will result in an error. + Timeout time.Duration +} + +// New creates and returns a pointer to a new session object. It takes up to +// three parameters, all of which are optional. If specified, they will be +// interpreted in the following sequence: +// +// 1. UserName +// 2. Api Key +// 3. Endpoint +// 4. Timeout +// +// If one or more are omitted, New() will attempt to retrieve these values from +// the environment, and the ~/.softlayer config file, in that order. +func New(args ...interface{}) *Session { + keys := map[string]int{"username": 0, "api_key": 1, "endpoint_url": 2, "timeout": 3} + values := []string{"", "", "", ""} + + for i := 0; i < len(args); i++ { + values[i] = args[i].(string) + } + + // Default to the environment variables + + // Prioritize SL_USERNAME + envFallback("SL_USERNAME", &values[keys["username"]]) + envFallback("SOFTLAYER_USERNAME", &values[keys["username"]]) + + // Prioritize SL_API_KEY + envFallback("SL_API_KEY", &values[keys["api_key"]]) + envFallback("SOFTLAYER_API_KEY", &values[keys["api_key"]]) + + // Prioritize SL_ENDPOINT_URL + envFallback("SL_ENDPOINT_URL", &values[keys["endpoint_url"]]) + envFallback("SOFTLAYER_ENDPOINT_URL", &values[keys["endpoint_url"]]) + + envFallback("SL_TIMEOUT", &values[keys["timeout"]]) + envFallback("SOFTLAYER_TIMEOUT", &values[keys["timeout"]]) + + // Read ~/.softlayer for configuration + var homeDir string + u, err := user.Current() + if err != nil { + for _, name := range []string{"HOME", "USERPROFILE"} { // *nix, windows + if dir := os.Getenv(name); dir != "" { + homeDir = dir + break + } + } + } else { + homeDir = u.HomeDir + } + + if homeDir != "" { + configPath := fmt.Sprintf("%s/.softlayer", homeDir) + if _, err = os.Stat(configPath); !os.IsNotExist(err) { + // config file exists + file, err := config.LoadFile(configPath) + if err != nil { + log.Println(fmt.Sprintf("[WARN] session: Could not parse %s : %s", configPath, err)) + } else { + for k, v := range keys { + value, ok := file.Get("softlayer", k) + if ok && values[v] == "" { + values[v] = value + } + } + } + } + } else { + log.Println("[WARN] session: home dir could not be determined. Skipping read of ~/.softlayer.") + } + + endpointURL := values[keys["endpoint_url"]] + if endpointURL == "" { + endpointURL = DefaultEndpoint + } + + sess := &Session{ + UserName: values[keys["username"]], + APIKey: values[keys["api_key"]], + Endpoint: endpointURL, + } + + timeout := values[keys["timeout"]] + if timeout != "" { + timeoutDuration, err := time.ParseDuration(fmt.Sprintf("%ss", timeout)) + if err == nil { + sess.Timeout = timeoutDuration + } + } + + return sess +} + +// DoRequest hands off the processing to the assigned transport handler. It is +// normally called internally by the service objects, but is exported so that it can +// be invoked directly by client code in exceptional cases where direct control is +// needed over one of the parameters. +// +// For a description of parameters, see TransportHandler.DoRequest in this package +func (r *Session) DoRequest(service string, method string, args []interface{}, options *sl.Options, pResult interface{}) error { + if r.TransportHandler == nil { + r.TransportHandler = getDefaultTransport(r.Endpoint) + } + + return r.TransportHandler.DoRequest(r, service, method, args, options, pResult) +} + +func envFallback(keyName string, value *string) { + if *value == "" { + *value = os.Getenv(keyName) + } +} + +func getDefaultTransport(endpointURL string) TransportHandler { + var transportHandler TransportHandler + + if strings.Contains(endpointURL, "/xmlrpc/") { + transportHandler = &XmlRpcTransport{} + } else { + transportHandler = &RestTransport{} + } + + return transportHandler +} diff --git a/vendor/github.com/softlayer/softlayer-go/session/xmlrpc.go b/vendor/github.com/softlayer/softlayer-go/session/xmlrpc.go new file mode 100644 index 0000000000..0bcc3f8a5d --- /dev/null +++ b/vendor/github.com/softlayer/softlayer-go/session/xmlrpc.go @@ -0,0 +1,195 @@ +/** + * Copyright 2016 IBM Corp. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package session + +import ( + "encoding/json" + "fmt" + "log" + "net/http" + "net/http/httputil" + "strings" + + "github.com/renier/xmlrpc" + "github.com/softlayer/softlayer-go/sl" +) + +// Debugging RoundTripper +type debugRoundTripper struct{} + +func (mrt debugRoundTripper) RoundTrip(request *http.Request) (*http.Response, error) { + log.Println("->>>Request:") + dumpedReq, _ := httputil.DumpRequestOut(request, true) + log.Println(string(dumpedReq)) + + response, err := http.DefaultTransport.RoundTrip(request) + if err != nil { + log.Println("Error:", err) + return response, err + } + + log.Println("\n\n<<<-Response:") + dumpedResp, _ := httputil.DumpResponse(response, true) + log.Println(string(dumpedResp)) + + return response, err +} + +// XML-RPC Transport +type XmlRpcTransport struct{} + +func (x *XmlRpcTransport) DoRequest( + sess *Session, + service string, + method string, + args []interface{}, + options *sl.Options, + pResult interface{}, +) error { + + serviceUrl := fmt.Sprintf("%s/%s", strings.TrimRight(sess.Endpoint, "/"), service) + + var roundTripper http.RoundTripper + if sess.Debug { + roundTripper = debugRoundTripper{} + } + + timeout := DefaultTimeout + if sess.Timeout != 0 { + timeout = sess.Timeout + } + + client, err := xmlrpc.NewClient(serviceUrl, roundTripper, timeout) + if err != nil { + return fmt.Errorf("Could not create an xmlrpc client for %s: %s", service, err) + } + + authenticate := map[string]interface{}{} + if sess.UserName != "" { + authenticate["username"] = sess.UserName + } + + if sess.APIKey != "" { + authenticate["apiKey"] = sess.APIKey + } + + if sess.UserId != 0 { + authenticate["userId"] = sess.UserId + authenticate["complexType"] = "PortalLoginToken" + } + + if sess.AuthToken != "" { + authenticate["authToken"] = sess.AuthToken + authenticate["complexType"] = "PortalLoginToken" + } + + headers := map[string]interface{}{} + if len(authenticate) > 0 { + headers["authenticate"] = authenticate + } + + if options.Id != nil { + headers[fmt.Sprintf("%sInitParameters", service)] = map[string]int{ + "id": *options.Id, + } + } + + mask := options.Mask + if mask != "" { + if !strings.HasPrefix(mask, "mask[") && !strings.Contains(mask, ";") && strings.Contains(mask, ",") { + mask = fmt.Sprintf("mask[%s]", mask) + headers["SoftLayer_ObjectMask"] = map[string]string{"mask": mask} + } else { + headers[fmt.Sprintf("%sObjectMask", service)] = + map[string]interface{}{"mask": genXMLMask(mask)} + } + } + + if options.Filter != "" { + // FIXME: This json unmarshaling presents a performance problem, + // since the filter builder marshals a data structure to json. + // This then undoes that step to pass it to the xmlrpc request. + // It would be better to get the umarshaled data structure + // from the filter builder, but that will require changes to the + // public API in Options. + objFilter := map[string]interface{}{} + err := json.Unmarshal([]byte(options.Filter), &objFilter) + if err != nil { + return fmt.Errorf("Error encoding object filter: %s", err) + } + headers[fmt.Sprintf("%sObjectFilter", service)] = objFilter + } + + if options.Limit != nil { + offset := 0 + if options.Offset != nil { + offset = *options.Offset + } + + headers["resultLimit"] = map[string]int{ + "limit": *options.Limit, + "offset": offset, + } + } + + // Add incoming arguments to xmlrpc parameter array + params := []interface{}{} + + if len(headers) > 0 { + params = append(params, map[string]interface{}{"headers": headers}) + } + + for _, arg := range args { + params = append(params, arg) + } + + err = client.Call(method, params, pResult) + if xmlRpcError, ok := err.(*xmlrpc.XmlRpcError); ok { + return sl.Error{ + StatusCode: xmlRpcError.HttpStatusCode, + Exception: xmlRpcError.Code, + Message: xmlRpcError.Err, + } + } + + return err +} + +func genXMLMask(mask string) interface{} { + objectMask := map[string]interface{}{} + for _, item := range strings.Split(mask, ";") { + if !strings.Contains(item, ".") { + objectMask[item] = []string{} + continue + } + + level := objectMask + names := strings.Split(item, ".") + totalNames := len(names) + for i, name := range names { + if i == totalNames-1 { + level[name] = []string{} + continue + } + + level[name] = map[string]interface{}{} + level = level[name].(map[string]interface{}) + } + } + + return objectMask +} diff --git a/vendor/github.com/softlayer/softlayer-go/sl/errors.go b/vendor/github.com/softlayer/softlayer-go/sl/errors.go new file mode 100644 index 0000000000..fe638ab62f --- /dev/null +++ b/vendor/github.com/softlayer/softlayer-go/sl/errors.go @@ -0,0 +1,49 @@ +/** + * Copyright 2016 IBM Corp. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package sl + +import "fmt" + +// Error contains detailed information about an API error, which can be useful +// for debugging, or when finer error handling is required than just the mere +// presence or absence of an error. +// +// Error implements the error interface +type Error struct { + StatusCode int + Exception string `json:"code"` + Message string `json:"error"` + Wrapped error +} + +func (r Error) Error() string { + if r.Wrapped != nil { + return r.Wrapped.Error() + } + + var msg string + if r.Exception != "" { + msg = r.Exception + ": " + } + if r.Message != "" { + msg = msg + r.Message + " " + } + if r.StatusCode != 0 { + msg = fmt.Sprintf("%s(HTTP %d)", msg, r.StatusCode) + } + return msg +} diff --git a/vendor/github.com/softlayer/softlayer-go/sl/helpers.go b/vendor/github.com/softlayer/softlayer-go/sl/helpers.go new file mode 100644 index 0000000000..6a3090672c --- /dev/null +++ b/vendor/github.com/softlayer/softlayer-go/sl/helpers.go @@ -0,0 +1,175 @@ +/** + * Copyright 2016 IBM Corp. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +// Package sl has convenience functions for returning pointers to values +package sl + +import ( + "reflect" + "strings" + "time" + + "github.com/softlayer/softlayer-go/datatypes" +) + +// Int returns a pointer to the int value provided +func Int(v int) *int { + return &v +} + +// Uint returns a pointer to the uint value provided +func Uint(v uint) *uint { + return &v +} + +// String returns a pointer to the string value provided +func String(v string) *string { + return &v +} + +// Bool returns a pointer to the bool value provided +func Bool(v bool) *bool { + return &v +} + +// Time converts the time.Time value provided to a datatypes.Time value, +// and returns a pointer to it +func Time(v time.Time) *datatypes.Time { + r := datatypes.Time{Time: v} + return &r +} + +// Float converts the float value provided to a datatypes.Float64 value, +// and returns a pointer to it +func Float(v float64) *datatypes.Float64 { + r := datatypes.Float64(v) + return &r +} + +// Convenience functions to simplify dereference of datatype properties + +// Get returns the value of p, either p itself, or, if p is a pointer, the +// value that p points to. d is an optional default value to be returned +// in the event that p is nil. If d is not specified, and p is nil, a +// type-appropriate zero-value is returned instead. +func Get(p interface{}, d ...interface{}) interface{} { + var ( + val interface{} + ok bool + ) + + if val, ok = GetOk(p); ok { + return val + } + + if len(d) > 0 { + return d[0] + } + + return val +} + +// GetOk returns the value of p, either p itself, or, if p is a pointer, the +// value that p points to. If p is nil, a type-appropriate zero-value is +// returned instead. If p is a value or non-nil pointer, the second return +// value will be true. Otherwise, it will be false. +func GetOk(p interface{}) (interface{}, bool) { + t := reflect.TypeOf(p) + + // if p is a non-pointer, just return it + if t.Kind() != reflect.Ptr { + return p, true + } + + // p is a pointer. If non-nil, return the value pointed to + v := reflect.Indirect(reflect.ValueOf(p)) + if v.IsValid() { + return v.Interface(), true + } + + // p is a nil pointer. Return the zero value for the pointed-to type + return reflect.Zero(t.Elem()).Interface(), false +} + +// Grab returns the value specified by the path given, +// starting from the struct s. +// If at any point in the path the lookup falls short +// (i.e. a field is not found), or if the last field in the path is nil +// itself, a type-appropriate zero-value is returned. +// This behavior can be overidden by providing a default value. +// +// This is useful for getting values our of deeply nested structures +// Example: val := sl.Grab(virtualGuest, "Datacenter.Name") +func Grab(s interface{}, path string, d ...interface{}) interface{} { + var ( + val interface{} + ok bool + ) + + if val, ok = GrabOk(s, path); ok { + return val + } + + if len(d) > 0 { + return d[0] + } + + return val +} + +// GrabOk returns the value specified by the path given, +// starting from the struct s. +// If at any point in the path the lookup falls short +// (i.e. a field is not found), or if the last field in the path is nil +// itself, a type-appropriate zero-value is returned. +// It returns a second value, a boolean, which will be false if it failed +// to lookup the value, including if the last field in the path was nil. +// +// This is useful for getting values our of deeply nested structures +// Example: val, ok := sl.GrabOk(virtualGuest, "Datacenter.Name") +func GrabOk(s interface{}, path string) (interface{}, bool) { + t := reflect.TypeOf(s) + if t.Kind() != reflect.Struct { + return nil, false + } + + dotIndex := strings.Index(path, ".") + if dotIndex == -1 { + dotIndex = len(path) + } + + fieldName := path[0:dotIndex] + val := reflect.ValueOf(s) + fieldVal := val.FieldByName(fieldName) + if fieldVal.Kind() == reflect.Ptr { + if fieldVal.IsNil() { + return reflect.Zero(fieldVal.Type().Elem()).Interface(), false + } + + fieldVal = reflect.Indirect(fieldVal) + } + + result, ok := GetOk(fieldVal.Interface()) + if !ok { + return result, ok + } + + if dotIndex == len(path) { + return result, ok + } + + return GrabOk(result, path[dotIndex+1:len(path)]) +} diff --git a/vendor/github.com/softlayer/softlayer-go/sl/options.go b/vendor/github.com/softlayer/softlayer-go/sl/options.go new file mode 100644 index 0000000000..5d1dd0aaf6 --- /dev/null +++ b/vendor/github.com/softlayer/softlayer-go/sl/options.go @@ -0,0 +1,27 @@ +/** + * Copyright 2016 IBM Corp. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package sl + +// Options contains the individual query parameters that can be applied to +// a request. +type Options struct { + Id *int + Mask string + Filter string + Limit *int + Offset *int +} diff --git a/vendor/github.com/softlayer/softlayer-go/sl/version.go b/vendor/github.com/softlayer/softlayer-go/sl/version.go new file mode 100644 index 0000000000..8f36a8d097 --- /dev/null +++ b/vendor/github.com/softlayer/softlayer-go/sl/version.go @@ -0,0 +1,47 @@ +/** + * Copyright 2016 IBM Corp. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** + * AUTOMATICALLY GENERATED CODE - DO NOT MODIFY + */ + +package sl + +import "fmt" + +type VersionInfo struct { + Major int + Minor int + Patch int + Pre string +} + +var Version = VersionInfo{ + Major: 0, + Minor: 1, + Patch: 0, + Pre: "alpha", +} + +func (v VersionInfo) String() string { + result := fmt.Sprintf("v%d.%d.%d", v.Major, v.Minor, v.Patch) + + if v.Pre != "" { + result += fmt.Sprintf("-%s", v.Pre) + } + + return result +} diff --git a/vendor/golang.org/x/crypto/LICENSE b/vendor/golang.org/x/crypto/LICENSE new file mode 100644 index 0000000000..6a66aea5ea --- /dev/null +++ b/vendor/golang.org/x/crypto/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2009 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/golang.org/x/crypto/PATENTS b/vendor/golang.org/x/crypto/PATENTS new file mode 100644 index 0000000000..733099041f --- /dev/null +++ b/vendor/golang.org/x/crypto/PATENTS @@ -0,0 +1,22 @@ +Additional IP Rights Grant (Patents) + +"This implementation" means the copyrightable works distributed by +Google as part of the Go project. + +Google hereby grants to You a perpetual, worldwide, non-exclusive, +no-charge, royalty-free, irrevocable (except as stated in this section) +patent license to make, have made, use, offer to sell, sell, import, +transfer and otherwise run, modify and propagate the contents of this +implementation of Go, where such license applies only to those patent +claims, both currently owned or controlled by Google and acquired in +the future, licensable by Google that are necessarily infringed by this +implementation of Go. This grant does not include claims that would be +infringed only as a consequence of further modification of this +implementation. If you or your agent or exclusive licensee institute or +order or agree to the institution of patent litigation against any +entity (including a cross-claim or counterclaim in a lawsuit) alleging +that this implementation of Go or any code incorporated within this +implementation of Go constitutes direct or contributory patent +infringement, or inducement of patent infringement, then any patent +rights granted to you under this License for this implementation of Go +shall terminate as of the date such litigation is filed. diff --git a/vendor/golang.org/x/crypto/bcrypt/base64.go b/vendor/golang.org/x/crypto/bcrypt/base64.go new file mode 100644 index 0000000000..fc31160908 --- /dev/null +++ b/vendor/golang.org/x/crypto/bcrypt/base64.go @@ -0,0 +1,35 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package bcrypt + +import "encoding/base64" + +const alphabet = "./ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789" + +var bcEncoding = base64.NewEncoding(alphabet) + +func base64Encode(src []byte) []byte { + n := bcEncoding.EncodedLen(len(src)) + dst := make([]byte, n) + bcEncoding.Encode(dst, src) + for dst[n-1] == '=' { + n-- + } + return dst[:n] +} + +func base64Decode(src []byte) ([]byte, error) { + numOfEquals := 4 - (len(src) % 4) + for i := 0; i < numOfEquals; i++ { + src = append(src, '=') + } + + dst := make([]byte, bcEncoding.DecodedLen(len(src))) + n, err := bcEncoding.Decode(dst, src) + if err != nil { + return nil, err + } + return dst[:n], nil +} diff --git a/vendor/golang.org/x/crypto/bcrypt/bcrypt.go b/vendor/golang.org/x/crypto/bcrypt/bcrypt.go new file mode 100644 index 0000000000..f8b807f9c3 --- /dev/null +++ b/vendor/golang.org/x/crypto/bcrypt/bcrypt.go @@ -0,0 +1,294 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package bcrypt implements Provos and Mazières's bcrypt adaptive hashing +// algorithm. See http://www.usenix.org/event/usenix99/provos/provos.pdf +package bcrypt // import "golang.org/x/crypto/bcrypt" + +// The code is a port of Provos and Mazières's C implementation. +import ( + "crypto/rand" + "crypto/subtle" + "errors" + "fmt" + "golang.org/x/crypto/blowfish" + "io" + "strconv" +) + +const ( + MinCost int = 4 // the minimum allowable cost as passed in to GenerateFromPassword + MaxCost int = 31 // the maximum allowable cost as passed in to GenerateFromPassword + DefaultCost int = 10 // the cost that will actually be set if a cost below MinCost is passed into GenerateFromPassword +) + +// The error returned from CompareHashAndPassword when a password and hash do +// not match. +var ErrMismatchedHashAndPassword = errors.New("crypto/bcrypt: hashedPassword is not the hash of the given password") + +// The error returned from CompareHashAndPassword when a hash is too short to +// be a bcrypt hash. +var ErrHashTooShort = errors.New("crypto/bcrypt: hashedSecret too short to be a bcrypted password") + +// The error returned from CompareHashAndPassword when a hash was created with +// a bcrypt algorithm newer than this implementation. +type HashVersionTooNewError byte + +func (hv HashVersionTooNewError) Error() string { + return fmt.Sprintf("crypto/bcrypt: bcrypt algorithm version '%c' requested is newer than current version '%c'", byte(hv), majorVersion) +} + +// The error returned from CompareHashAndPassword when a hash starts with something other than '$' +type InvalidHashPrefixError byte + +func (ih InvalidHashPrefixError) Error() string { + return fmt.Sprintf("crypto/bcrypt: bcrypt hashes must start with '$', but hashedSecret started with '%c'", byte(ih)) +} + +type InvalidCostError int + +func (ic InvalidCostError) Error() string { + return fmt.Sprintf("crypto/bcrypt: cost %d is outside allowed range (%d,%d)", int(ic), int(MinCost), int(MaxCost)) +} + +const ( + majorVersion = '2' + minorVersion = 'a' + maxSaltSize = 16 + maxCryptedHashSize = 23 + encodedSaltSize = 22 + encodedHashSize = 31 + minHashSize = 59 +) + +// magicCipherData is an IV for the 64 Blowfish encryption calls in +// bcrypt(). It's the string "OrpheanBeholderScryDoubt" in big-endian bytes. +var magicCipherData = []byte{ + 0x4f, 0x72, 0x70, 0x68, + 0x65, 0x61, 0x6e, 0x42, + 0x65, 0x68, 0x6f, 0x6c, + 0x64, 0x65, 0x72, 0x53, + 0x63, 0x72, 0x79, 0x44, + 0x6f, 0x75, 0x62, 0x74, +} + +type hashed struct { + hash []byte + salt []byte + cost int // allowed range is MinCost to MaxCost + major byte + minor byte +} + +// GenerateFromPassword returns the bcrypt hash of the password at the given +// cost. If the cost given is less than MinCost, the cost will be set to +// DefaultCost, instead. Use CompareHashAndPassword, as defined in this package, +// to compare the returned hashed password with its cleartext version. +func GenerateFromPassword(password []byte, cost int) ([]byte, error) { + p, err := newFromPassword(password, cost) + if err != nil { + return nil, err + } + return p.Hash(), nil +} + +// CompareHashAndPassword compares a bcrypt hashed password with its possible +// plaintext equivalent. Returns nil on success, or an error on failure. +func CompareHashAndPassword(hashedPassword, password []byte) error { + p, err := newFromHash(hashedPassword) + if err != nil { + return err + } + + otherHash, err := bcrypt(password, p.cost, p.salt) + if err != nil { + return err + } + + otherP := &hashed{otherHash, p.salt, p.cost, p.major, p.minor} + if subtle.ConstantTimeCompare(p.Hash(), otherP.Hash()) == 1 { + return nil + } + + return ErrMismatchedHashAndPassword +} + +// Cost returns the hashing cost used to create the given hashed +// password. When, in the future, the hashing cost of a password system needs +// to be increased in order to adjust for greater computational power, this +// function allows one to establish which passwords need to be updated. +func Cost(hashedPassword []byte) (int, error) { + p, err := newFromHash(hashedPassword) + if err != nil { + return 0, err + } + return p.cost, nil +} + +func newFromPassword(password []byte, cost int) (*hashed, error) { + if cost < MinCost { + cost = DefaultCost + } + p := new(hashed) + p.major = majorVersion + p.minor = minorVersion + + err := checkCost(cost) + if err != nil { + return nil, err + } + p.cost = cost + + unencodedSalt := make([]byte, maxSaltSize) + _, err = io.ReadFull(rand.Reader, unencodedSalt) + if err != nil { + return nil, err + } + + p.salt = base64Encode(unencodedSalt) + hash, err := bcrypt(password, p.cost, p.salt) + if err != nil { + return nil, err + } + p.hash = hash + return p, err +} + +func newFromHash(hashedSecret []byte) (*hashed, error) { + if len(hashedSecret) < minHashSize { + return nil, ErrHashTooShort + } + p := new(hashed) + n, err := p.decodeVersion(hashedSecret) + if err != nil { + return nil, err + } + hashedSecret = hashedSecret[n:] + n, err = p.decodeCost(hashedSecret) + if err != nil { + return nil, err + } + hashedSecret = hashedSecret[n:] + + // The "+2" is here because we'll have to append at most 2 '=' to the salt + // when base64 decoding it in expensiveBlowfishSetup(). + p.salt = make([]byte, encodedSaltSize, encodedSaltSize+2) + copy(p.salt, hashedSecret[:encodedSaltSize]) + + hashedSecret = hashedSecret[encodedSaltSize:] + p.hash = make([]byte, len(hashedSecret)) + copy(p.hash, hashedSecret) + + return p, nil +} + +func bcrypt(password []byte, cost int, salt []byte) ([]byte, error) { + cipherData := make([]byte, len(magicCipherData)) + copy(cipherData, magicCipherData) + + c, err := expensiveBlowfishSetup(password, uint32(cost), salt) + if err != nil { + return nil, err + } + + for i := 0; i < 24; i += 8 { + for j := 0; j < 64; j++ { + c.Encrypt(cipherData[i:i+8], cipherData[i:i+8]) + } + } + + // Bug compatibility with C bcrypt implementations. We only encode 23 of + // the 24 bytes encrypted. + hsh := base64Encode(cipherData[:maxCryptedHashSize]) + return hsh, nil +} + +func expensiveBlowfishSetup(key []byte, cost uint32, salt []byte) (*blowfish.Cipher, error) { + + csalt, err := base64Decode(salt) + if err != nil { + return nil, err + } + + // Bug compatibility with C bcrypt implementations. They use the trailing + // NULL in the key string during expansion. + ckey := append(key, 0) + + c, err := blowfish.NewSaltedCipher(ckey, csalt) + if err != nil { + return nil, err + } + + var i, rounds uint64 + rounds = 1 << cost + for i = 0; i < rounds; i++ { + blowfish.ExpandKey(ckey, c) + blowfish.ExpandKey(csalt, c) + } + + return c, nil +} + +func (p *hashed) Hash() []byte { + arr := make([]byte, 60) + arr[0] = '$' + arr[1] = p.major + n := 2 + if p.minor != 0 { + arr[2] = p.minor + n = 3 + } + arr[n] = '$' + n += 1 + copy(arr[n:], []byte(fmt.Sprintf("%02d", p.cost))) + n += 2 + arr[n] = '$' + n += 1 + copy(arr[n:], p.salt) + n += encodedSaltSize + copy(arr[n:], p.hash) + n += encodedHashSize + return arr[:n] +} + +func (p *hashed) decodeVersion(sbytes []byte) (int, error) { + if sbytes[0] != '$' { + return -1, InvalidHashPrefixError(sbytes[0]) + } + if sbytes[1] > majorVersion { + return -1, HashVersionTooNewError(sbytes[1]) + } + p.major = sbytes[1] + n := 3 + if sbytes[2] != '$' { + p.minor = sbytes[2] + n++ + } + return n, nil +} + +// sbytes should begin where decodeVersion left off. +func (p *hashed) decodeCost(sbytes []byte) (int, error) { + cost, err := strconv.Atoi(string(sbytes[0:2])) + if err != nil { + return -1, err + } + err = checkCost(cost) + if err != nil { + return -1, err + } + p.cost = cost + return 3, nil +} + +func (p *hashed) String() string { + return fmt.Sprintf("&{hash: %#v, salt: %#v, cost: %d, major: %c, minor: %c}", string(p.hash), p.salt, p.cost, p.major, p.minor) +} + +func checkCost(cost int) error { + if cost < MinCost || cost > MaxCost { + return InvalidCostError(cost) + } + return nil +} diff --git a/vendor/golang.org/x/crypto/blowfish/block.go b/vendor/golang.org/x/crypto/blowfish/block.go new file mode 100644 index 0000000000..9d80f19521 --- /dev/null +++ b/vendor/golang.org/x/crypto/blowfish/block.go @@ -0,0 +1,159 @@ +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package blowfish + +// getNextWord returns the next big-endian uint32 value from the byte slice +// at the given position in a circular manner, updating the position. +func getNextWord(b []byte, pos *int) uint32 { + var w uint32 + j := *pos + for i := 0; i < 4; i++ { + w = w<<8 | uint32(b[j]) + j++ + if j >= len(b) { + j = 0 + } + } + *pos = j + return w +} + +// ExpandKey performs a key expansion on the given *Cipher. Specifically, it +// performs the Blowfish algorithm's key schedule which sets up the *Cipher's +// pi and substitution tables for calls to Encrypt. This is used, primarily, +// by the bcrypt package to reuse the Blowfish key schedule during its +// set up. It's unlikely that you need to use this directly. +func ExpandKey(key []byte, c *Cipher) { + j := 0 + for i := 0; i < 18; i++ { + // Using inlined getNextWord for performance. + var d uint32 + for k := 0; k < 4; k++ { + d = d<<8 | uint32(key[j]) + j++ + if j >= len(key) { + j = 0 + } + } + c.p[i] ^= d + } + + var l, r uint32 + for i := 0; i < 18; i += 2 { + l, r = encryptBlock(l, r, c) + c.p[i], c.p[i+1] = l, r + } + + for i := 0; i < 256; i += 2 { + l, r = encryptBlock(l, r, c) + c.s0[i], c.s0[i+1] = l, r + } + for i := 0; i < 256; i += 2 { + l, r = encryptBlock(l, r, c) + c.s1[i], c.s1[i+1] = l, r + } + for i := 0; i < 256; i += 2 { + l, r = encryptBlock(l, r, c) + c.s2[i], c.s2[i+1] = l, r + } + for i := 0; i < 256; i += 2 { + l, r = encryptBlock(l, r, c) + c.s3[i], c.s3[i+1] = l, r + } +} + +// This is similar to ExpandKey, but folds the salt during the key +// schedule. While ExpandKey is essentially expandKeyWithSalt with an all-zero +// salt passed in, reusing ExpandKey turns out to be a place of inefficiency +// and specializing it here is useful. +func expandKeyWithSalt(key []byte, salt []byte, c *Cipher) { + j := 0 + for i := 0; i < 18; i++ { + c.p[i] ^= getNextWord(key, &j) + } + + j = 0 + var l, r uint32 + for i := 0; i < 18; i += 2 { + l ^= getNextWord(salt, &j) + r ^= getNextWord(salt, &j) + l, r = encryptBlock(l, r, c) + c.p[i], c.p[i+1] = l, r + } + + for i := 0; i < 256; i += 2 { + l ^= getNextWord(salt, &j) + r ^= getNextWord(salt, &j) + l, r = encryptBlock(l, r, c) + c.s0[i], c.s0[i+1] = l, r + } + + for i := 0; i < 256; i += 2 { + l ^= getNextWord(salt, &j) + r ^= getNextWord(salt, &j) + l, r = encryptBlock(l, r, c) + c.s1[i], c.s1[i+1] = l, r + } + + for i := 0; i < 256; i += 2 { + l ^= getNextWord(salt, &j) + r ^= getNextWord(salt, &j) + l, r = encryptBlock(l, r, c) + c.s2[i], c.s2[i+1] = l, r + } + + for i := 0; i < 256; i += 2 { + l ^= getNextWord(salt, &j) + r ^= getNextWord(salt, &j) + l, r = encryptBlock(l, r, c) + c.s3[i], c.s3[i+1] = l, r + } +} + +func encryptBlock(l, r uint32, c *Cipher) (uint32, uint32) { + xl, xr := l, r + xl ^= c.p[0] + xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[1] + xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[2] + xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[3] + xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[4] + xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[5] + xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[6] + xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[7] + xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[8] + xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[9] + xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[10] + xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[11] + xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[12] + xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[13] + xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[14] + xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[15] + xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[16] + xr ^= c.p[17] + return xr, xl +} + +func decryptBlock(l, r uint32, c *Cipher) (uint32, uint32) { + xl, xr := l, r + xl ^= c.p[17] + xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[16] + xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[15] + xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[14] + xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[13] + xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[12] + xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[11] + xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[10] + xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[9] + xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[8] + xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[7] + xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[6] + xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[5] + xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[4] + xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[3] + xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[2] + xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[1] + xr ^= c.p[0] + return xr, xl +} diff --git a/vendor/golang.org/x/crypto/blowfish/cipher.go b/vendor/golang.org/x/crypto/blowfish/cipher.go new file mode 100644 index 0000000000..a73954f390 --- /dev/null +++ b/vendor/golang.org/x/crypto/blowfish/cipher.go @@ -0,0 +1,91 @@ +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package blowfish implements Bruce Schneier's Blowfish encryption algorithm. +package blowfish // import "golang.org/x/crypto/blowfish" + +// The code is a port of Bruce Schneier's C implementation. +// See http://www.schneier.com/blowfish.html. + +import "strconv" + +// The Blowfish block size in bytes. +const BlockSize = 8 + +// A Cipher is an instance of Blowfish encryption using a particular key. +type Cipher struct { + p [18]uint32 + s0, s1, s2, s3 [256]uint32 +} + +type KeySizeError int + +func (k KeySizeError) Error() string { + return "crypto/blowfish: invalid key size " + strconv.Itoa(int(k)) +} + +// NewCipher creates and returns a Cipher. +// The key argument should be the Blowfish key, from 1 to 56 bytes. +func NewCipher(key []byte) (*Cipher, error) { + var result Cipher + if k := len(key); k < 1 || k > 56 { + return nil, KeySizeError(k) + } + initCipher(&result) + ExpandKey(key, &result) + return &result, nil +} + +// NewSaltedCipher creates a returns a Cipher that folds a salt into its key +// schedule. For most purposes, NewCipher, instead of NewSaltedCipher, is +// sufficient and desirable. For bcrypt compatibility, the key can be over 56 +// bytes. +func NewSaltedCipher(key, salt []byte) (*Cipher, error) { + if len(salt) == 0 { + return NewCipher(key) + } + var result Cipher + if k := len(key); k < 1 { + return nil, KeySizeError(k) + } + initCipher(&result) + expandKeyWithSalt(key, salt, &result) + return &result, nil +} + +// BlockSize returns the Blowfish block size, 8 bytes. +// It is necessary to satisfy the Block interface in the +// package "crypto/cipher". +func (c *Cipher) BlockSize() int { return BlockSize } + +// Encrypt encrypts the 8-byte buffer src using the key k +// and stores the result in dst. +// Note that for amounts of data larger than a block, +// it is not safe to just call Encrypt on successive blocks; +// instead, use an encryption mode like CBC (see crypto/cipher/cbc.go). +func (c *Cipher) Encrypt(dst, src []byte) { + l := uint32(src[0])<<24 | uint32(src[1])<<16 | uint32(src[2])<<8 | uint32(src[3]) + r := uint32(src[4])<<24 | uint32(src[5])<<16 | uint32(src[6])<<8 | uint32(src[7]) + l, r = encryptBlock(l, r, c) + dst[0], dst[1], dst[2], dst[3] = byte(l>>24), byte(l>>16), byte(l>>8), byte(l) + dst[4], dst[5], dst[6], dst[7] = byte(r>>24), byte(r>>16), byte(r>>8), byte(r) +} + +// Decrypt decrypts the 8-byte buffer src using the key k +// and stores the result in dst. +func (c *Cipher) Decrypt(dst, src []byte) { + l := uint32(src[0])<<24 | uint32(src[1])<<16 | uint32(src[2])<<8 | uint32(src[3]) + r := uint32(src[4])<<24 | uint32(src[5])<<16 | uint32(src[6])<<8 | uint32(src[7]) + l, r = decryptBlock(l, r, c) + dst[0], dst[1], dst[2], dst[3] = byte(l>>24), byte(l>>16), byte(l>>8), byte(l) + dst[4], dst[5], dst[6], dst[7] = byte(r>>24), byte(r>>16), byte(r>>8), byte(r) +} + +func initCipher(c *Cipher) { + copy(c.p[0:], p[0:]) + copy(c.s0[0:], s0[0:]) + copy(c.s1[0:], s1[0:]) + copy(c.s2[0:], s2[0:]) + copy(c.s3[0:], s3[0:]) +} diff --git a/vendor/golang.org/x/crypto/blowfish/const.go b/vendor/golang.org/x/crypto/blowfish/const.go new file mode 100644 index 0000000000..8c5ee4cb08 --- /dev/null +++ b/vendor/golang.org/x/crypto/blowfish/const.go @@ -0,0 +1,199 @@ +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// The startup permutation array and substitution boxes. +// They are the hexadecimal digits of PI; see: +// http://www.schneier.com/code/constants.txt. + +package blowfish + +var s0 = [256]uint32{ + 0xd1310ba6, 0x98dfb5ac, 0x2ffd72db, 0xd01adfb7, 0xb8e1afed, 0x6a267e96, + 0xba7c9045, 0xf12c7f99, 0x24a19947, 0xb3916cf7, 0x0801f2e2, 0x858efc16, + 0x636920d8, 0x71574e69, 0xa458fea3, 0xf4933d7e, 0x0d95748f, 0x728eb658, + 0x718bcd58, 0x82154aee, 0x7b54a41d, 0xc25a59b5, 0x9c30d539, 0x2af26013, + 0xc5d1b023, 0x286085f0, 0xca417918, 0xb8db38ef, 0x8e79dcb0, 0x603a180e, + 0x6c9e0e8b, 0xb01e8a3e, 0xd71577c1, 0xbd314b27, 0x78af2fda, 0x55605c60, + 0xe65525f3, 0xaa55ab94, 0x57489862, 0x63e81440, 0x55ca396a, 0x2aab10b6, + 0xb4cc5c34, 0x1141e8ce, 0xa15486af, 0x7c72e993, 0xb3ee1411, 0x636fbc2a, + 0x2ba9c55d, 0x741831f6, 0xce5c3e16, 0x9b87931e, 0xafd6ba33, 0x6c24cf5c, + 0x7a325381, 0x28958677, 0x3b8f4898, 0x6b4bb9af, 0xc4bfe81b, 0x66282193, + 0x61d809cc, 0xfb21a991, 0x487cac60, 0x5dec8032, 0xef845d5d, 0xe98575b1, + 0xdc262302, 0xeb651b88, 0x23893e81, 0xd396acc5, 0x0f6d6ff3, 0x83f44239, + 0x2e0b4482, 0xa4842004, 0x69c8f04a, 0x9e1f9b5e, 0x21c66842, 0xf6e96c9a, + 0x670c9c61, 0xabd388f0, 0x6a51a0d2, 0xd8542f68, 0x960fa728, 0xab5133a3, + 0x6eef0b6c, 0x137a3be4, 0xba3bf050, 0x7efb2a98, 0xa1f1651d, 0x39af0176, + 0x66ca593e, 0x82430e88, 0x8cee8619, 0x456f9fb4, 0x7d84a5c3, 0x3b8b5ebe, + 0xe06f75d8, 0x85c12073, 0x401a449f, 0x56c16aa6, 0x4ed3aa62, 0x363f7706, + 0x1bfedf72, 0x429b023d, 0x37d0d724, 0xd00a1248, 0xdb0fead3, 0x49f1c09b, + 0x075372c9, 0x80991b7b, 0x25d479d8, 0xf6e8def7, 0xe3fe501a, 0xb6794c3b, + 0x976ce0bd, 0x04c006ba, 0xc1a94fb6, 0x409f60c4, 0x5e5c9ec2, 0x196a2463, + 0x68fb6faf, 0x3e6c53b5, 0x1339b2eb, 0x3b52ec6f, 0x6dfc511f, 0x9b30952c, + 0xcc814544, 0xaf5ebd09, 0xbee3d004, 0xde334afd, 0x660f2807, 0x192e4bb3, + 0xc0cba857, 0x45c8740f, 0xd20b5f39, 0xb9d3fbdb, 0x5579c0bd, 0x1a60320a, + 0xd6a100c6, 0x402c7279, 0x679f25fe, 0xfb1fa3cc, 0x8ea5e9f8, 0xdb3222f8, + 0x3c7516df, 0xfd616b15, 0x2f501ec8, 0xad0552ab, 0x323db5fa, 0xfd238760, + 0x53317b48, 0x3e00df82, 0x9e5c57bb, 0xca6f8ca0, 0x1a87562e, 0xdf1769db, + 0xd542a8f6, 0x287effc3, 0xac6732c6, 0x8c4f5573, 0x695b27b0, 0xbbca58c8, + 0xe1ffa35d, 0xb8f011a0, 0x10fa3d98, 0xfd2183b8, 0x4afcb56c, 0x2dd1d35b, + 0x9a53e479, 0xb6f84565, 0xd28e49bc, 0x4bfb9790, 0xe1ddf2da, 0xa4cb7e33, + 0x62fb1341, 0xcee4c6e8, 0xef20cada, 0x36774c01, 0xd07e9efe, 0x2bf11fb4, + 0x95dbda4d, 0xae909198, 0xeaad8e71, 0x6b93d5a0, 0xd08ed1d0, 0xafc725e0, + 0x8e3c5b2f, 0x8e7594b7, 0x8ff6e2fb, 0xf2122b64, 0x8888b812, 0x900df01c, + 0x4fad5ea0, 0x688fc31c, 0xd1cff191, 0xb3a8c1ad, 0x2f2f2218, 0xbe0e1777, + 0xea752dfe, 0x8b021fa1, 0xe5a0cc0f, 0xb56f74e8, 0x18acf3d6, 0xce89e299, + 0xb4a84fe0, 0xfd13e0b7, 0x7cc43b81, 0xd2ada8d9, 0x165fa266, 0x80957705, + 0x93cc7314, 0x211a1477, 0xe6ad2065, 0x77b5fa86, 0xc75442f5, 0xfb9d35cf, + 0xebcdaf0c, 0x7b3e89a0, 0xd6411bd3, 0xae1e7e49, 0x00250e2d, 0x2071b35e, + 0x226800bb, 0x57b8e0af, 0x2464369b, 0xf009b91e, 0x5563911d, 0x59dfa6aa, + 0x78c14389, 0xd95a537f, 0x207d5ba2, 0x02e5b9c5, 0x83260376, 0x6295cfa9, + 0x11c81968, 0x4e734a41, 0xb3472dca, 0x7b14a94a, 0x1b510052, 0x9a532915, + 0xd60f573f, 0xbc9bc6e4, 0x2b60a476, 0x81e67400, 0x08ba6fb5, 0x571be91f, + 0xf296ec6b, 0x2a0dd915, 0xb6636521, 0xe7b9f9b6, 0xff34052e, 0xc5855664, + 0x53b02d5d, 0xa99f8fa1, 0x08ba4799, 0x6e85076a, +} + +var s1 = [256]uint32{ + 0x4b7a70e9, 0xb5b32944, 0xdb75092e, 0xc4192623, 0xad6ea6b0, 0x49a7df7d, + 0x9cee60b8, 0x8fedb266, 0xecaa8c71, 0x699a17ff, 0x5664526c, 0xc2b19ee1, + 0x193602a5, 0x75094c29, 0xa0591340, 0xe4183a3e, 0x3f54989a, 0x5b429d65, + 0x6b8fe4d6, 0x99f73fd6, 0xa1d29c07, 0xefe830f5, 0x4d2d38e6, 0xf0255dc1, + 0x4cdd2086, 0x8470eb26, 0x6382e9c6, 0x021ecc5e, 0x09686b3f, 0x3ebaefc9, + 0x3c971814, 0x6b6a70a1, 0x687f3584, 0x52a0e286, 0xb79c5305, 0xaa500737, + 0x3e07841c, 0x7fdeae5c, 0x8e7d44ec, 0x5716f2b8, 0xb03ada37, 0xf0500c0d, + 0xf01c1f04, 0x0200b3ff, 0xae0cf51a, 0x3cb574b2, 0x25837a58, 0xdc0921bd, + 0xd19113f9, 0x7ca92ff6, 0x94324773, 0x22f54701, 0x3ae5e581, 0x37c2dadc, + 0xc8b57634, 0x9af3dda7, 0xa9446146, 0x0fd0030e, 0xecc8c73e, 0xa4751e41, + 0xe238cd99, 0x3bea0e2f, 0x3280bba1, 0x183eb331, 0x4e548b38, 0x4f6db908, + 0x6f420d03, 0xf60a04bf, 0x2cb81290, 0x24977c79, 0x5679b072, 0xbcaf89af, + 0xde9a771f, 0xd9930810, 0xb38bae12, 0xdccf3f2e, 0x5512721f, 0x2e6b7124, + 0x501adde6, 0x9f84cd87, 0x7a584718, 0x7408da17, 0xbc9f9abc, 0xe94b7d8c, + 0xec7aec3a, 0xdb851dfa, 0x63094366, 0xc464c3d2, 0xef1c1847, 0x3215d908, + 0xdd433b37, 0x24c2ba16, 0x12a14d43, 0x2a65c451, 0x50940002, 0x133ae4dd, + 0x71dff89e, 0x10314e55, 0x81ac77d6, 0x5f11199b, 0x043556f1, 0xd7a3c76b, + 0x3c11183b, 0x5924a509, 0xf28fe6ed, 0x97f1fbfa, 0x9ebabf2c, 0x1e153c6e, + 0x86e34570, 0xeae96fb1, 0x860e5e0a, 0x5a3e2ab3, 0x771fe71c, 0x4e3d06fa, + 0x2965dcb9, 0x99e71d0f, 0x803e89d6, 0x5266c825, 0x2e4cc978, 0x9c10b36a, + 0xc6150eba, 0x94e2ea78, 0xa5fc3c53, 0x1e0a2df4, 0xf2f74ea7, 0x361d2b3d, + 0x1939260f, 0x19c27960, 0x5223a708, 0xf71312b6, 0xebadfe6e, 0xeac31f66, + 0xe3bc4595, 0xa67bc883, 0xb17f37d1, 0x018cff28, 0xc332ddef, 0xbe6c5aa5, + 0x65582185, 0x68ab9802, 0xeecea50f, 0xdb2f953b, 0x2aef7dad, 0x5b6e2f84, + 0x1521b628, 0x29076170, 0xecdd4775, 0x619f1510, 0x13cca830, 0xeb61bd96, + 0x0334fe1e, 0xaa0363cf, 0xb5735c90, 0x4c70a239, 0xd59e9e0b, 0xcbaade14, + 0xeecc86bc, 0x60622ca7, 0x9cab5cab, 0xb2f3846e, 0x648b1eaf, 0x19bdf0ca, + 0xa02369b9, 0x655abb50, 0x40685a32, 0x3c2ab4b3, 0x319ee9d5, 0xc021b8f7, + 0x9b540b19, 0x875fa099, 0x95f7997e, 0x623d7da8, 0xf837889a, 0x97e32d77, + 0x11ed935f, 0x16681281, 0x0e358829, 0xc7e61fd6, 0x96dedfa1, 0x7858ba99, + 0x57f584a5, 0x1b227263, 0x9b83c3ff, 0x1ac24696, 0xcdb30aeb, 0x532e3054, + 0x8fd948e4, 0x6dbc3128, 0x58ebf2ef, 0x34c6ffea, 0xfe28ed61, 0xee7c3c73, + 0x5d4a14d9, 0xe864b7e3, 0x42105d14, 0x203e13e0, 0x45eee2b6, 0xa3aaabea, + 0xdb6c4f15, 0xfacb4fd0, 0xc742f442, 0xef6abbb5, 0x654f3b1d, 0x41cd2105, + 0xd81e799e, 0x86854dc7, 0xe44b476a, 0x3d816250, 0xcf62a1f2, 0x5b8d2646, + 0xfc8883a0, 0xc1c7b6a3, 0x7f1524c3, 0x69cb7492, 0x47848a0b, 0x5692b285, + 0x095bbf00, 0xad19489d, 0x1462b174, 0x23820e00, 0x58428d2a, 0x0c55f5ea, + 0x1dadf43e, 0x233f7061, 0x3372f092, 0x8d937e41, 0xd65fecf1, 0x6c223bdb, + 0x7cde3759, 0xcbee7460, 0x4085f2a7, 0xce77326e, 0xa6078084, 0x19f8509e, + 0xe8efd855, 0x61d99735, 0xa969a7aa, 0xc50c06c2, 0x5a04abfc, 0x800bcadc, + 0x9e447a2e, 0xc3453484, 0xfdd56705, 0x0e1e9ec9, 0xdb73dbd3, 0x105588cd, + 0x675fda79, 0xe3674340, 0xc5c43465, 0x713e38d8, 0x3d28f89e, 0xf16dff20, + 0x153e21e7, 0x8fb03d4a, 0xe6e39f2b, 0xdb83adf7, +} + +var s2 = [256]uint32{ + 0xe93d5a68, 0x948140f7, 0xf64c261c, 0x94692934, 0x411520f7, 0x7602d4f7, + 0xbcf46b2e, 0xd4a20068, 0xd4082471, 0x3320f46a, 0x43b7d4b7, 0x500061af, + 0x1e39f62e, 0x97244546, 0x14214f74, 0xbf8b8840, 0x4d95fc1d, 0x96b591af, + 0x70f4ddd3, 0x66a02f45, 0xbfbc09ec, 0x03bd9785, 0x7fac6dd0, 0x31cb8504, + 0x96eb27b3, 0x55fd3941, 0xda2547e6, 0xabca0a9a, 0x28507825, 0x530429f4, + 0x0a2c86da, 0xe9b66dfb, 0x68dc1462, 0xd7486900, 0x680ec0a4, 0x27a18dee, + 0x4f3ffea2, 0xe887ad8c, 0xb58ce006, 0x7af4d6b6, 0xaace1e7c, 0xd3375fec, + 0xce78a399, 0x406b2a42, 0x20fe9e35, 0xd9f385b9, 0xee39d7ab, 0x3b124e8b, + 0x1dc9faf7, 0x4b6d1856, 0x26a36631, 0xeae397b2, 0x3a6efa74, 0xdd5b4332, + 0x6841e7f7, 0xca7820fb, 0xfb0af54e, 0xd8feb397, 0x454056ac, 0xba489527, + 0x55533a3a, 0x20838d87, 0xfe6ba9b7, 0xd096954b, 0x55a867bc, 0xa1159a58, + 0xcca92963, 0x99e1db33, 0xa62a4a56, 0x3f3125f9, 0x5ef47e1c, 0x9029317c, + 0xfdf8e802, 0x04272f70, 0x80bb155c, 0x05282ce3, 0x95c11548, 0xe4c66d22, + 0x48c1133f, 0xc70f86dc, 0x07f9c9ee, 0x41041f0f, 0x404779a4, 0x5d886e17, + 0x325f51eb, 0xd59bc0d1, 0xf2bcc18f, 0x41113564, 0x257b7834, 0x602a9c60, + 0xdff8e8a3, 0x1f636c1b, 0x0e12b4c2, 0x02e1329e, 0xaf664fd1, 0xcad18115, + 0x6b2395e0, 0x333e92e1, 0x3b240b62, 0xeebeb922, 0x85b2a20e, 0xe6ba0d99, + 0xde720c8c, 0x2da2f728, 0xd0127845, 0x95b794fd, 0x647d0862, 0xe7ccf5f0, + 0x5449a36f, 0x877d48fa, 0xc39dfd27, 0xf33e8d1e, 0x0a476341, 0x992eff74, + 0x3a6f6eab, 0xf4f8fd37, 0xa812dc60, 0xa1ebddf8, 0x991be14c, 0xdb6e6b0d, + 0xc67b5510, 0x6d672c37, 0x2765d43b, 0xdcd0e804, 0xf1290dc7, 0xcc00ffa3, + 0xb5390f92, 0x690fed0b, 0x667b9ffb, 0xcedb7d9c, 0xa091cf0b, 0xd9155ea3, + 0xbb132f88, 0x515bad24, 0x7b9479bf, 0x763bd6eb, 0x37392eb3, 0xcc115979, + 0x8026e297, 0xf42e312d, 0x6842ada7, 0xc66a2b3b, 0x12754ccc, 0x782ef11c, + 0x6a124237, 0xb79251e7, 0x06a1bbe6, 0x4bfb6350, 0x1a6b1018, 0x11caedfa, + 0x3d25bdd8, 0xe2e1c3c9, 0x44421659, 0x0a121386, 0xd90cec6e, 0xd5abea2a, + 0x64af674e, 0xda86a85f, 0xbebfe988, 0x64e4c3fe, 0x9dbc8057, 0xf0f7c086, + 0x60787bf8, 0x6003604d, 0xd1fd8346, 0xf6381fb0, 0x7745ae04, 0xd736fccc, + 0x83426b33, 0xf01eab71, 0xb0804187, 0x3c005e5f, 0x77a057be, 0xbde8ae24, + 0x55464299, 0xbf582e61, 0x4e58f48f, 0xf2ddfda2, 0xf474ef38, 0x8789bdc2, + 0x5366f9c3, 0xc8b38e74, 0xb475f255, 0x46fcd9b9, 0x7aeb2661, 0x8b1ddf84, + 0x846a0e79, 0x915f95e2, 0x466e598e, 0x20b45770, 0x8cd55591, 0xc902de4c, + 0xb90bace1, 0xbb8205d0, 0x11a86248, 0x7574a99e, 0xb77f19b6, 0xe0a9dc09, + 0x662d09a1, 0xc4324633, 0xe85a1f02, 0x09f0be8c, 0x4a99a025, 0x1d6efe10, + 0x1ab93d1d, 0x0ba5a4df, 0xa186f20f, 0x2868f169, 0xdcb7da83, 0x573906fe, + 0xa1e2ce9b, 0x4fcd7f52, 0x50115e01, 0xa70683fa, 0xa002b5c4, 0x0de6d027, + 0x9af88c27, 0x773f8641, 0xc3604c06, 0x61a806b5, 0xf0177a28, 0xc0f586e0, + 0x006058aa, 0x30dc7d62, 0x11e69ed7, 0x2338ea63, 0x53c2dd94, 0xc2c21634, + 0xbbcbee56, 0x90bcb6de, 0xebfc7da1, 0xce591d76, 0x6f05e409, 0x4b7c0188, + 0x39720a3d, 0x7c927c24, 0x86e3725f, 0x724d9db9, 0x1ac15bb4, 0xd39eb8fc, + 0xed545578, 0x08fca5b5, 0xd83d7cd3, 0x4dad0fc4, 0x1e50ef5e, 0xb161e6f8, + 0xa28514d9, 0x6c51133c, 0x6fd5c7e7, 0x56e14ec4, 0x362abfce, 0xddc6c837, + 0xd79a3234, 0x92638212, 0x670efa8e, 0x406000e0, +} + +var s3 = [256]uint32{ + 0x3a39ce37, 0xd3faf5cf, 0xabc27737, 0x5ac52d1b, 0x5cb0679e, 0x4fa33742, + 0xd3822740, 0x99bc9bbe, 0xd5118e9d, 0xbf0f7315, 0xd62d1c7e, 0xc700c47b, + 0xb78c1b6b, 0x21a19045, 0xb26eb1be, 0x6a366eb4, 0x5748ab2f, 0xbc946e79, + 0xc6a376d2, 0x6549c2c8, 0x530ff8ee, 0x468dde7d, 0xd5730a1d, 0x4cd04dc6, + 0x2939bbdb, 0xa9ba4650, 0xac9526e8, 0xbe5ee304, 0xa1fad5f0, 0x6a2d519a, + 0x63ef8ce2, 0x9a86ee22, 0xc089c2b8, 0x43242ef6, 0xa51e03aa, 0x9cf2d0a4, + 0x83c061ba, 0x9be96a4d, 0x8fe51550, 0xba645bd6, 0x2826a2f9, 0xa73a3ae1, + 0x4ba99586, 0xef5562e9, 0xc72fefd3, 0xf752f7da, 0x3f046f69, 0x77fa0a59, + 0x80e4a915, 0x87b08601, 0x9b09e6ad, 0x3b3ee593, 0xe990fd5a, 0x9e34d797, + 0x2cf0b7d9, 0x022b8b51, 0x96d5ac3a, 0x017da67d, 0xd1cf3ed6, 0x7c7d2d28, + 0x1f9f25cf, 0xadf2b89b, 0x5ad6b472, 0x5a88f54c, 0xe029ac71, 0xe019a5e6, + 0x47b0acfd, 0xed93fa9b, 0xe8d3c48d, 0x283b57cc, 0xf8d56629, 0x79132e28, + 0x785f0191, 0xed756055, 0xf7960e44, 0xe3d35e8c, 0x15056dd4, 0x88f46dba, + 0x03a16125, 0x0564f0bd, 0xc3eb9e15, 0x3c9057a2, 0x97271aec, 0xa93a072a, + 0x1b3f6d9b, 0x1e6321f5, 0xf59c66fb, 0x26dcf319, 0x7533d928, 0xb155fdf5, + 0x03563482, 0x8aba3cbb, 0x28517711, 0xc20ad9f8, 0xabcc5167, 0xccad925f, + 0x4de81751, 0x3830dc8e, 0x379d5862, 0x9320f991, 0xea7a90c2, 0xfb3e7bce, + 0x5121ce64, 0x774fbe32, 0xa8b6e37e, 0xc3293d46, 0x48de5369, 0x6413e680, + 0xa2ae0810, 0xdd6db224, 0x69852dfd, 0x09072166, 0xb39a460a, 0x6445c0dd, + 0x586cdecf, 0x1c20c8ae, 0x5bbef7dd, 0x1b588d40, 0xccd2017f, 0x6bb4e3bb, + 0xdda26a7e, 0x3a59ff45, 0x3e350a44, 0xbcb4cdd5, 0x72eacea8, 0xfa6484bb, + 0x8d6612ae, 0xbf3c6f47, 0xd29be463, 0x542f5d9e, 0xaec2771b, 0xf64e6370, + 0x740e0d8d, 0xe75b1357, 0xf8721671, 0xaf537d5d, 0x4040cb08, 0x4eb4e2cc, + 0x34d2466a, 0x0115af84, 0xe1b00428, 0x95983a1d, 0x06b89fb4, 0xce6ea048, + 0x6f3f3b82, 0x3520ab82, 0x011a1d4b, 0x277227f8, 0x611560b1, 0xe7933fdc, + 0xbb3a792b, 0x344525bd, 0xa08839e1, 0x51ce794b, 0x2f32c9b7, 0xa01fbac9, + 0xe01cc87e, 0xbcc7d1f6, 0xcf0111c3, 0xa1e8aac7, 0x1a908749, 0xd44fbd9a, + 0xd0dadecb, 0xd50ada38, 0x0339c32a, 0xc6913667, 0x8df9317c, 0xe0b12b4f, + 0xf79e59b7, 0x43f5bb3a, 0xf2d519ff, 0x27d9459c, 0xbf97222c, 0x15e6fc2a, + 0x0f91fc71, 0x9b941525, 0xfae59361, 0xceb69ceb, 0xc2a86459, 0x12baa8d1, + 0xb6c1075e, 0xe3056a0c, 0x10d25065, 0xcb03a442, 0xe0ec6e0e, 0x1698db3b, + 0x4c98a0be, 0x3278e964, 0x9f1f9532, 0xe0d392df, 0xd3a0342b, 0x8971f21e, + 0x1b0a7441, 0x4ba3348c, 0xc5be7120, 0xc37632d8, 0xdf359f8d, 0x9b992f2e, + 0xe60b6f47, 0x0fe3f11d, 0xe54cda54, 0x1edad891, 0xce6279cf, 0xcd3e7e6f, + 0x1618b166, 0xfd2c1d05, 0x848fd2c5, 0xf6fb2299, 0xf523f357, 0xa6327623, + 0x93a83531, 0x56cccd02, 0xacf08162, 0x5a75ebb5, 0x6e163697, 0x88d273cc, + 0xde966292, 0x81b949d0, 0x4c50901b, 0x71c65614, 0xe6c6c7bd, 0x327a140a, + 0x45e1d006, 0xc3f27b9a, 0xc9aa53fd, 0x62a80f00, 0xbb25bfe2, 0x35bdd2f6, + 0x71126905, 0xb2040222, 0xb6cbcf7c, 0xcd769c2b, 0x53113ec0, 0x1640e3d3, + 0x38abbd60, 0x2547adf0, 0xba38209c, 0xf746ce76, 0x77afa1c5, 0x20756060, + 0x85cbfe4e, 0x8ae88dd8, 0x7aaaf9b0, 0x4cf9aa7e, 0x1948c25c, 0x02fb8a8c, + 0x01c36ae4, 0xd6ebe1f9, 0x90d4f869, 0xa65cdea0, 0x3f09252d, 0xc208e69f, + 0xb74e6132, 0xce77e25b, 0x578fdfe3, 0x3ac372e6, +} + +var p = [18]uint32{ + 0x243f6a88, 0x85a308d3, 0x13198a2e, 0x03707344, 0xa4093822, 0x299f31d0, + 0x082efa98, 0xec4e6c89, 0x452821e6, 0x38d01377, 0xbe5466cf, 0x34e90c6c, + 0xc0ac29b7, 0xc97c50dd, 0x3f84d5b5, 0xb5470917, 0x9216d5d9, 0x8979fb1b, +} diff --git a/vendor/golang.org/x/crypto/curve25519/const_amd64.h b/vendor/golang.org/x/crypto/curve25519/const_amd64.h new file mode 100644 index 0000000000..80ad2220fd --- /dev/null +++ b/vendor/golang.org/x/crypto/curve25519/const_amd64.h @@ -0,0 +1,8 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// This code was translated into a form compatible with 6a from the public +// domain sources in SUPERCOP: http://bench.cr.yp.to/supercop.html + +#define REDMASK51 0x0007FFFFFFFFFFFF diff --git a/vendor/golang.org/x/crypto/curve25519/const_amd64.s b/vendor/golang.org/x/crypto/curve25519/const_amd64.s new file mode 100644 index 0000000000..0ad539885b --- /dev/null +++ b/vendor/golang.org/x/crypto/curve25519/const_amd64.s @@ -0,0 +1,20 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// This code was translated into a form compatible with 6a from the public +// domain sources in SUPERCOP: http://bench.cr.yp.to/supercop.html + +// +build amd64,!gccgo,!appengine + +// These constants cannot be encoded in non-MOVQ immediates. +// We access them directly from memory instead. + +DATA ·_121666_213(SB)/8, $996687872 +GLOBL ·_121666_213(SB), 8, $8 + +DATA ·_2P0(SB)/8, $0xFFFFFFFFFFFDA +GLOBL ·_2P0(SB), 8, $8 + +DATA ·_2P1234(SB)/8, $0xFFFFFFFFFFFFE +GLOBL ·_2P1234(SB), 8, $8 diff --git a/vendor/golang.org/x/crypto/curve25519/cswap_amd64.s b/vendor/golang.org/x/crypto/curve25519/cswap_amd64.s new file mode 100644 index 0000000000..45484d1b59 --- /dev/null +++ b/vendor/golang.org/x/crypto/curve25519/cswap_amd64.s @@ -0,0 +1,88 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// This code was translated into a form compatible with 6a from the public +// domain sources in SUPERCOP: http://bench.cr.yp.to/supercop.html + +// +build amd64,!gccgo,!appengine + +// func cswap(inout *[5]uint64, v uint64) +TEXT ·cswap(SB),7,$0 + MOVQ inout+0(FP),DI + MOVQ v+8(FP),SI + + CMPQ SI,$1 + MOVQ 0(DI),SI + MOVQ 80(DI),DX + MOVQ 8(DI),CX + MOVQ 88(DI),R8 + MOVQ SI,R9 + CMOVQEQ DX,SI + CMOVQEQ R9,DX + MOVQ CX,R9 + CMOVQEQ R8,CX + CMOVQEQ R9,R8 + MOVQ SI,0(DI) + MOVQ DX,80(DI) + MOVQ CX,8(DI) + MOVQ R8,88(DI) + MOVQ 16(DI),SI + MOVQ 96(DI),DX + MOVQ 24(DI),CX + MOVQ 104(DI),R8 + MOVQ SI,R9 + CMOVQEQ DX,SI + CMOVQEQ R9,DX + MOVQ CX,R9 + CMOVQEQ R8,CX + CMOVQEQ R9,R8 + MOVQ SI,16(DI) + MOVQ DX,96(DI) + MOVQ CX,24(DI) + MOVQ R8,104(DI) + MOVQ 32(DI),SI + MOVQ 112(DI),DX + MOVQ 40(DI),CX + MOVQ 120(DI),R8 + MOVQ SI,R9 + CMOVQEQ DX,SI + CMOVQEQ R9,DX + MOVQ CX,R9 + CMOVQEQ R8,CX + CMOVQEQ R9,R8 + MOVQ SI,32(DI) + MOVQ DX,112(DI) + MOVQ CX,40(DI) + MOVQ R8,120(DI) + MOVQ 48(DI),SI + MOVQ 128(DI),DX + MOVQ 56(DI),CX + MOVQ 136(DI),R8 + MOVQ SI,R9 + CMOVQEQ DX,SI + CMOVQEQ R9,DX + MOVQ CX,R9 + CMOVQEQ R8,CX + CMOVQEQ R9,R8 + MOVQ SI,48(DI) + MOVQ DX,128(DI) + MOVQ CX,56(DI) + MOVQ R8,136(DI) + MOVQ 64(DI),SI + MOVQ 144(DI),DX + MOVQ 72(DI),CX + MOVQ 152(DI),R8 + MOVQ SI,R9 + CMOVQEQ DX,SI + CMOVQEQ R9,DX + MOVQ CX,R9 + CMOVQEQ R8,CX + CMOVQEQ R9,R8 + MOVQ SI,64(DI) + MOVQ DX,144(DI) + MOVQ CX,72(DI) + MOVQ R8,152(DI) + MOVQ DI,AX + MOVQ SI,DX + RET diff --git a/vendor/golang.org/x/crypto/curve25519/curve25519.go b/vendor/golang.org/x/crypto/curve25519/curve25519.go new file mode 100644 index 0000000000..6918c47fc2 --- /dev/null +++ b/vendor/golang.org/x/crypto/curve25519/curve25519.go @@ -0,0 +1,841 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// We have a implementation in amd64 assembly so this code is only run on +// non-amd64 platforms. The amd64 assembly does not support gccgo. +// +build !amd64 gccgo appengine + +package curve25519 + +// This code is a port of the public domain, "ref10" implementation of +// curve25519 from SUPERCOP 20130419 by D. J. Bernstein. + +// fieldElement represents an element of the field GF(2^255 - 19). An element +// t, entries t[0]...t[9], represents the integer t[0]+2^26 t[1]+2^51 t[2]+2^77 +// t[3]+2^102 t[4]+...+2^230 t[9]. Bounds on each t[i] vary depending on +// context. +type fieldElement [10]int32 + +func feZero(fe *fieldElement) { + for i := range fe { + fe[i] = 0 + } +} + +func feOne(fe *fieldElement) { + feZero(fe) + fe[0] = 1 +} + +func feAdd(dst, a, b *fieldElement) { + for i := range dst { + dst[i] = a[i] + b[i] + } +} + +func feSub(dst, a, b *fieldElement) { + for i := range dst { + dst[i] = a[i] - b[i] + } +} + +func feCopy(dst, src *fieldElement) { + for i := range dst { + dst[i] = src[i] + } +} + +// feCSwap replaces (f,g) with (g,f) if b == 1; replaces (f,g) with (f,g) if b == 0. +// +// Preconditions: b in {0,1}. +func feCSwap(f, g *fieldElement, b int32) { + var x fieldElement + b = -b + for i := range x { + x[i] = b & (f[i] ^ g[i]) + } + + for i := range f { + f[i] ^= x[i] + } + for i := range g { + g[i] ^= x[i] + } +} + +// load3 reads a 24-bit, little-endian value from in. +func load3(in []byte) int64 { + var r int64 + r = int64(in[0]) + r |= int64(in[1]) << 8 + r |= int64(in[2]) << 16 + return r +} + +// load4 reads a 32-bit, little-endian value from in. +func load4(in []byte) int64 { + var r int64 + r = int64(in[0]) + r |= int64(in[1]) << 8 + r |= int64(in[2]) << 16 + r |= int64(in[3]) << 24 + return r +} + +func feFromBytes(dst *fieldElement, src *[32]byte) { + h0 := load4(src[:]) + h1 := load3(src[4:]) << 6 + h2 := load3(src[7:]) << 5 + h3 := load3(src[10:]) << 3 + h4 := load3(src[13:]) << 2 + h5 := load4(src[16:]) + h6 := load3(src[20:]) << 7 + h7 := load3(src[23:]) << 5 + h8 := load3(src[26:]) << 4 + h9 := load3(src[29:]) << 2 + + var carry [10]int64 + carry[9] = (h9 + 1<<24) >> 25 + h0 += carry[9] * 19 + h9 -= carry[9] << 25 + carry[1] = (h1 + 1<<24) >> 25 + h2 += carry[1] + h1 -= carry[1] << 25 + carry[3] = (h3 + 1<<24) >> 25 + h4 += carry[3] + h3 -= carry[3] << 25 + carry[5] = (h5 + 1<<24) >> 25 + h6 += carry[5] + h5 -= carry[5] << 25 + carry[7] = (h7 + 1<<24) >> 25 + h8 += carry[7] + h7 -= carry[7] << 25 + + carry[0] = (h0 + 1<<25) >> 26 + h1 += carry[0] + h0 -= carry[0] << 26 + carry[2] = (h2 + 1<<25) >> 26 + h3 += carry[2] + h2 -= carry[2] << 26 + carry[4] = (h4 + 1<<25) >> 26 + h5 += carry[4] + h4 -= carry[4] << 26 + carry[6] = (h6 + 1<<25) >> 26 + h7 += carry[6] + h6 -= carry[6] << 26 + carry[8] = (h8 + 1<<25) >> 26 + h9 += carry[8] + h8 -= carry[8] << 26 + + dst[0] = int32(h0) + dst[1] = int32(h1) + dst[2] = int32(h2) + dst[3] = int32(h3) + dst[4] = int32(h4) + dst[5] = int32(h5) + dst[6] = int32(h6) + dst[7] = int32(h7) + dst[8] = int32(h8) + dst[9] = int32(h9) +} + +// feToBytes marshals h to s. +// Preconditions: +// |h| bounded by 1.1*2^25,1.1*2^24,1.1*2^25,1.1*2^24,etc. +// +// Write p=2^255-19; q=floor(h/p). +// Basic claim: q = floor(2^(-255)(h + 19 2^(-25)h9 + 2^(-1))). +// +// Proof: +// Have |h|<=p so |q|<=1 so |19^2 2^(-255) q|<1/4. +// Also have |h-2^230 h9|<2^230 so |19 2^(-255)(h-2^230 h9)|<1/4. +// +// Write y=2^(-1)-19^2 2^(-255)q-19 2^(-255)(h-2^230 h9). +// Then 0> 25 + q = (h[0] + q) >> 26 + q = (h[1] + q) >> 25 + q = (h[2] + q) >> 26 + q = (h[3] + q) >> 25 + q = (h[4] + q) >> 26 + q = (h[5] + q) >> 25 + q = (h[6] + q) >> 26 + q = (h[7] + q) >> 25 + q = (h[8] + q) >> 26 + q = (h[9] + q) >> 25 + + // Goal: Output h-(2^255-19)q, which is between 0 and 2^255-20. + h[0] += 19 * q + // Goal: Output h-2^255 q, which is between 0 and 2^255-20. + + carry[0] = h[0] >> 26 + h[1] += carry[0] + h[0] -= carry[0] << 26 + carry[1] = h[1] >> 25 + h[2] += carry[1] + h[1] -= carry[1] << 25 + carry[2] = h[2] >> 26 + h[3] += carry[2] + h[2] -= carry[2] << 26 + carry[3] = h[3] >> 25 + h[4] += carry[3] + h[3] -= carry[3] << 25 + carry[4] = h[4] >> 26 + h[5] += carry[4] + h[4] -= carry[4] << 26 + carry[5] = h[5] >> 25 + h[6] += carry[5] + h[5] -= carry[5] << 25 + carry[6] = h[6] >> 26 + h[7] += carry[6] + h[6] -= carry[6] << 26 + carry[7] = h[7] >> 25 + h[8] += carry[7] + h[7] -= carry[7] << 25 + carry[8] = h[8] >> 26 + h[9] += carry[8] + h[8] -= carry[8] << 26 + carry[9] = h[9] >> 25 + h[9] -= carry[9] << 25 + // h10 = carry9 + + // Goal: Output h[0]+...+2^255 h10-2^255 q, which is between 0 and 2^255-20. + // Have h[0]+...+2^230 h[9] between 0 and 2^255-1; + // evidently 2^255 h10-2^255 q = 0. + // Goal: Output h[0]+...+2^230 h[9]. + + s[0] = byte(h[0] >> 0) + s[1] = byte(h[0] >> 8) + s[2] = byte(h[0] >> 16) + s[3] = byte((h[0] >> 24) | (h[1] << 2)) + s[4] = byte(h[1] >> 6) + s[5] = byte(h[1] >> 14) + s[6] = byte((h[1] >> 22) | (h[2] << 3)) + s[7] = byte(h[2] >> 5) + s[8] = byte(h[2] >> 13) + s[9] = byte((h[2] >> 21) | (h[3] << 5)) + s[10] = byte(h[3] >> 3) + s[11] = byte(h[3] >> 11) + s[12] = byte((h[3] >> 19) | (h[4] << 6)) + s[13] = byte(h[4] >> 2) + s[14] = byte(h[4] >> 10) + s[15] = byte(h[4] >> 18) + s[16] = byte(h[5] >> 0) + s[17] = byte(h[5] >> 8) + s[18] = byte(h[5] >> 16) + s[19] = byte((h[5] >> 24) | (h[6] << 1)) + s[20] = byte(h[6] >> 7) + s[21] = byte(h[6] >> 15) + s[22] = byte((h[6] >> 23) | (h[7] << 3)) + s[23] = byte(h[7] >> 5) + s[24] = byte(h[7] >> 13) + s[25] = byte((h[7] >> 21) | (h[8] << 4)) + s[26] = byte(h[8] >> 4) + s[27] = byte(h[8] >> 12) + s[28] = byte((h[8] >> 20) | (h[9] << 6)) + s[29] = byte(h[9] >> 2) + s[30] = byte(h[9] >> 10) + s[31] = byte(h[9] >> 18) +} + +// feMul calculates h = f * g +// Can overlap h with f or g. +// +// Preconditions: +// |f| bounded by 1.1*2^26,1.1*2^25,1.1*2^26,1.1*2^25,etc. +// |g| bounded by 1.1*2^26,1.1*2^25,1.1*2^26,1.1*2^25,etc. +// +// Postconditions: +// |h| bounded by 1.1*2^25,1.1*2^24,1.1*2^25,1.1*2^24,etc. +// +// Notes on implementation strategy: +// +// Using schoolbook multiplication. +// Karatsuba would save a little in some cost models. +// +// Most multiplications by 2 and 19 are 32-bit precomputations; +// cheaper than 64-bit postcomputations. +// +// There is one remaining multiplication by 19 in the carry chain; +// one *19 precomputation can be merged into this, +// but the resulting data flow is considerably less clean. +// +// There are 12 carries below. +// 10 of them are 2-way parallelizable and vectorizable. +// Can get away with 11 carries, but then data flow is much deeper. +// +// With tighter constraints on inputs can squeeze carries into int32. +func feMul(h, f, g *fieldElement) { + f0 := f[0] + f1 := f[1] + f2 := f[2] + f3 := f[3] + f4 := f[4] + f5 := f[5] + f6 := f[6] + f7 := f[7] + f8 := f[8] + f9 := f[9] + g0 := g[0] + g1 := g[1] + g2 := g[2] + g3 := g[3] + g4 := g[4] + g5 := g[5] + g6 := g[6] + g7 := g[7] + g8 := g[8] + g9 := g[9] + g1_19 := 19 * g1 // 1.4*2^29 + g2_19 := 19 * g2 // 1.4*2^30; still ok + g3_19 := 19 * g3 + g4_19 := 19 * g4 + g5_19 := 19 * g5 + g6_19 := 19 * g6 + g7_19 := 19 * g7 + g8_19 := 19 * g8 + g9_19 := 19 * g9 + f1_2 := 2 * f1 + f3_2 := 2 * f3 + f5_2 := 2 * f5 + f7_2 := 2 * f7 + f9_2 := 2 * f9 + f0g0 := int64(f0) * int64(g0) + f0g1 := int64(f0) * int64(g1) + f0g2 := int64(f0) * int64(g2) + f0g3 := int64(f0) * int64(g3) + f0g4 := int64(f0) * int64(g4) + f0g5 := int64(f0) * int64(g5) + f0g6 := int64(f0) * int64(g6) + f0g7 := int64(f0) * int64(g7) + f0g8 := int64(f0) * int64(g8) + f0g9 := int64(f0) * int64(g9) + f1g0 := int64(f1) * int64(g0) + f1g1_2 := int64(f1_2) * int64(g1) + f1g2 := int64(f1) * int64(g2) + f1g3_2 := int64(f1_2) * int64(g3) + f1g4 := int64(f1) * int64(g4) + f1g5_2 := int64(f1_2) * int64(g5) + f1g6 := int64(f1) * int64(g6) + f1g7_2 := int64(f1_2) * int64(g7) + f1g8 := int64(f1) * int64(g8) + f1g9_38 := int64(f1_2) * int64(g9_19) + f2g0 := int64(f2) * int64(g0) + f2g1 := int64(f2) * int64(g1) + f2g2 := int64(f2) * int64(g2) + f2g3 := int64(f2) * int64(g3) + f2g4 := int64(f2) * int64(g4) + f2g5 := int64(f2) * int64(g5) + f2g6 := int64(f2) * int64(g6) + f2g7 := int64(f2) * int64(g7) + f2g8_19 := int64(f2) * int64(g8_19) + f2g9_19 := int64(f2) * int64(g9_19) + f3g0 := int64(f3) * int64(g0) + f3g1_2 := int64(f3_2) * int64(g1) + f3g2 := int64(f3) * int64(g2) + f3g3_2 := int64(f3_2) * int64(g3) + f3g4 := int64(f3) * int64(g4) + f3g5_2 := int64(f3_2) * int64(g5) + f3g6 := int64(f3) * int64(g6) + f3g7_38 := int64(f3_2) * int64(g7_19) + f3g8_19 := int64(f3) * int64(g8_19) + f3g9_38 := int64(f3_2) * int64(g9_19) + f4g0 := int64(f4) * int64(g0) + f4g1 := int64(f4) * int64(g1) + f4g2 := int64(f4) * int64(g2) + f4g3 := int64(f4) * int64(g3) + f4g4 := int64(f4) * int64(g4) + f4g5 := int64(f4) * int64(g5) + f4g6_19 := int64(f4) * int64(g6_19) + f4g7_19 := int64(f4) * int64(g7_19) + f4g8_19 := int64(f4) * int64(g8_19) + f4g9_19 := int64(f4) * int64(g9_19) + f5g0 := int64(f5) * int64(g0) + f5g1_2 := int64(f5_2) * int64(g1) + f5g2 := int64(f5) * int64(g2) + f5g3_2 := int64(f5_2) * int64(g3) + f5g4 := int64(f5) * int64(g4) + f5g5_38 := int64(f5_2) * int64(g5_19) + f5g6_19 := int64(f5) * int64(g6_19) + f5g7_38 := int64(f5_2) * int64(g7_19) + f5g8_19 := int64(f5) * int64(g8_19) + f5g9_38 := int64(f5_2) * int64(g9_19) + f6g0 := int64(f6) * int64(g0) + f6g1 := int64(f6) * int64(g1) + f6g2 := int64(f6) * int64(g2) + f6g3 := int64(f6) * int64(g3) + f6g4_19 := int64(f6) * int64(g4_19) + f6g5_19 := int64(f6) * int64(g5_19) + f6g6_19 := int64(f6) * int64(g6_19) + f6g7_19 := int64(f6) * int64(g7_19) + f6g8_19 := int64(f6) * int64(g8_19) + f6g9_19 := int64(f6) * int64(g9_19) + f7g0 := int64(f7) * int64(g0) + f7g1_2 := int64(f7_2) * int64(g1) + f7g2 := int64(f7) * int64(g2) + f7g3_38 := int64(f7_2) * int64(g3_19) + f7g4_19 := int64(f7) * int64(g4_19) + f7g5_38 := int64(f7_2) * int64(g5_19) + f7g6_19 := int64(f7) * int64(g6_19) + f7g7_38 := int64(f7_2) * int64(g7_19) + f7g8_19 := int64(f7) * int64(g8_19) + f7g9_38 := int64(f7_2) * int64(g9_19) + f8g0 := int64(f8) * int64(g0) + f8g1 := int64(f8) * int64(g1) + f8g2_19 := int64(f8) * int64(g2_19) + f8g3_19 := int64(f8) * int64(g3_19) + f8g4_19 := int64(f8) * int64(g4_19) + f8g5_19 := int64(f8) * int64(g5_19) + f8g6_19 := int64(f8) * int64(g6_19) + f8g7_19 := int64(f8) * int64(g7_19) + f8g8_19 := int64(f8) * int64(g8_19) + f8g9_19 := int64(f8) * int64(g9_19) + f9g0 := int64(f9) * int64(g0) + f9g1_38 := int64(f9_2) * int64(g1_19) + f9g2_19 := int64(f9) * int64(g2_19) + f9g3_38 := int64(f9_2) * int64(g3_19) + f9g4_19 := int64(f9) * int64(g4_19) + f9g5_38 := int64(f9_2) * int64(g5_19) + f9g6_19 := int64(f9) * int64(g6_19) + f9g7_38 := int64(f9_2) * int64(g7_19) + f9g8_19 := int64(f9) * int64(g8_19) + f9g9_38 := int64(f9_2) * int64(g9_19) + h0 := f0g0 + f1g9_38 + f2g8_19 + f3g7_38 + f4g6_19 + f5g5_38 + f6g4_19 + f7g3_38 + f8g2_19 + f9g1_38 + h1 := f0g1 + f1g0 + f2g9_19 + f3g8_19 + f4g7_19 + f5g6_19 + f6g5_19 + f7g4_19 + f8g3_19 + f9g2_19 + h2 := f0g2 + f1g1_2 + f2g0 + f3g9_38 + f4g8_19 + f5g7_38 + f6g6_19 + f7g5_38 + f8g4_19 + f9g3_38 + h3 := f0g3 + f1g2 + f2g1 + f3g0 + f4g9_19 + f5g8_19 + f6g7_19 + f7g6_19 + f8g5_19 + f9g4_19 + h4 := f0g4 + f1g3_2 + f2g2 + f3g1_2 + f4g0 + f5g9_38 + f6g8_19 + f7g7_38 + f8g6_19 + f9g5_38 + h5 := f0g5 + f1g4 + f2g3 + f3g2 + f4g1 + f5g0 + f6g9_19 + f7g8_19 + f8g7_19 + f9g6_19 + h6 := f0g6 + f1g5_2 + f2g4 + f3g3_2 + f4g2 + f5g1_2 + f6g0 + f7g9_38 + f8g8_19 + f9g7_38 + h7 := f0g7 + f1g6 + f2g5 + f3g4 + f4g3 + f5g2 + f6g1 + f7g0 + f8g9_19 + f9g8_19 + h8 := f0g8 + f1g7_2 + f2g6 + f3g5_2 + f4g4 + f5g3_2 + f6g2 + f7g1_2 + f8g0 + f9g9_38 + h9 := f0g9 + f1g8 + f2g7 + f3g6 + f4g5 + f5g4 + f6g3 + f7g2 + f8g1 + f9g0 + var carry [10]int64 + + // |h0| <= (1.1*1.1*2^52*(1+19+19+19+19)+1.1*1.1*2^50*(38+38+38+38+38)) + // i.e. |h0| <= 1.2*2^59; narrower ranges for h2, h4, h6, h8 + // |h1| <= (1.1*1.1*2^51*(1+1+19+19+19+19+19+19+19+19)) + // i.e. |h1| <= 1.5*2^58; narrower ranges for h3, h5, h7, h9 + + carry[0] = (h0 + (1 << 25)) >> 26 + h1 += carry[0] + h0 -= carry[0] << 26 + carry[4] = (h4 + (1 << 25)) >> 26 + h5 += carry[4] + h4 -= carry[4] << 26 + // |h0| <= 2^25 + // |h4| <= 2^25 + // |h1| <= 1.51*2^58 + // |h5| <= 1.51*2^58 + + carry[1] = (h1 + (1 << 24)) >> 25 + h2 += carry[1] + h1 -= carry[1] << 25 + carry[5] = (h5 + (1 << 24)) >> 25 + h6 += carry[5] + h5 -= carry[5] << 25 + // |h1| <= 2^24; from now on fits into int32 + // |h5| <= 2^24; from now on fits into int32 + // |h2| <= 1.21*2^59 + // |h6| <= 1.21*2^59 + + carry[2] = (h2 + (1 << 25)) >> 26 + h3 += carry[2] + h2 -= carry[2] << 26 + carry[6] = (h6 + (1 << 25)) >> 26 + h7 += carry[6] + h6 -= carry[6] << 26 + // |h2| <= 2^25; from now on fits into int32 unchanged + // |h6| <= 2^25; from now on fits into int32 unchanged + // |h3| <= 1.51*2^58 + // |h7| <= 1.51*2^58 + + carry[3] = (h3 + (1 << 24)) >> 25 + h4 += carry[3] + h3 -= carry[3] << 25 + carry[7] = (h7 + (1 << 24)) >> 25 + h8 += carry[7] + h7 -= carry[7] << 25 + // |h3| <= 2^24; from now on fits into int32 unchanged + // |h7| <= 2^24; from now on fits into int32 unchanged + // |h4| <= 1.52*2^33 + // |h8| <= 1.52*2^33 + + carry[4] = (h4 + (1 << 25)) >> 26 + h5 += carry[4] + h4 -= carry[4] << 26 + carry[8] = (h8 + (1 << 25)) >> 26 + h9 += carry[8] + h8 -= carry[8] << 26 + // |h4| <= 2^25; from now on fits into int32 unchanged + // |h8| <= 2^25; from now on fits into int32 unchanged + // |h5| <= 1.01*2^24 + // |h9| <= 1.51*2^58 + + carry[9] = (h9 + (1 << 24)) >> 25 + h0 += carry[9] * 19 + h9 -= carry[9] << 25 + // |h9| <= 2^24; from now on fits into int32 unchanged + // |h0| <= 1.8*2^37 + + carry[0] = (h0 + (1 << 25)) >> 26 + h1 += carry[0] + h0 -= carry[0] << 26 + // |h0| <= 2^25; from now on fits into int32 unchanged + // |h1| <= 1.01*2^24 + + h[0] = int32(h0) + h[1] = int32(h1) + h[2] = int32(h2) + h[3] = int32(h3) + h[4] = int32(h4) + h[5] = int32(h5) + h[6] = int32(h6) + h[7] = int32(h7) + h[8] = int32(h8) + h[9] = int32(h9) +} + +// feSquare calculates h = f*f. Can overlap h with f. +// +// Preconditions: +// |f| bounded by 1.1*2^26,1.1*2^25,1.1*2^26,1.1*2^25,etc. +// +// Postconditions: +// |h| bounded by 1.1*2^25,1.1*2^24,1.1*2^25,1.1*2^24,etc. +func feSquare(h, f *fieldElement) { + f0 := f[0] + f1 := f[1] + f2 := f[2] + f3 := f[3] + f4 := f[4] + f5 := f[5] + f6 := f[6] + f7 := f[7] + f8 := f[8] + f9 := f[9] + f0_2 := 2 * f0 + f1_2 := 2 * f1 + f2_2 := 2 * f2 + f3_2 := 2 * f3 + f4_2 := 2 * f4 + f5_2 := 2 * f5 + f6_2 := 2 * f6 + f7_2 := 2 * f7 + f5_38 := 38 * f5 // 1.31*2^30 + f6_19 := 19 * f6 // 1.31*2^30 + f7_38 := 38 * f7 // 1.31*2^30 + f8_19 := 19 * f8 // 1.31*2^30 + f9_38 := 38 * f9 // 1.31*2^30 + f0f0 := int64(f0) * int64(f0) + f0f1_2 := int64(f0_2) * int64(f1) + f0f2_2 := int64(f0_2) * int64(f2) + f0f3_2 := int64(f0_2) * int64(f3) + f0f4_2 := int64(f0_2) * int64(f4) + f0f5_2 := int64(f0_2) * int64(f5) + f0f6_2 := int64(f0_2) * int64(f6) + f0f7_2 := int64(f0_2) * int64(f7) + f0f8_2 := int64(f0_2) * int64(f8) + f0f9_2 := int64(f0_2) * int64(f9) + f1f1_2 := int64(f1_2) * int64(f1) + f1f2_2 := int64(f1_2) * int64(f2) + f1f3_4 := int64(f1_2) * int64(f3_2) + f1f4_2 := int64(f1_2) * int64(f4) + f1f5_4 := int64(f1_2) * int64(f5_2) + f1f6_2 := int64(f1_2) * int64(f6) + f1f7_4 := int64(f1_2) * int64(f7_2) + f1f8_2 := int64(f1_2) * int64(f8) + f1f9_76 := int64(f1_2) * int64(f9_38) + f2f2 := int64(f2) * int64(f2) + f2f3_2 := int64(f2_2) * int64(f3) + f2f4_2 := int64(f2_2) * int64(f4) + f2f5_2 := int64(f2_2) * int64(f5) + f2f6_2 := int64(f2_2) * int64(f6) + f2f7_2 := int64(f2_2) * int64(f7) + f2f8_38 := int64(f2_2) * int64(f8_19) + f2f9_38 := int64(f2) * int64(f9_38) + f3f3_2 := int64(f3_2) * int64(f3) + f3f4_2 := int64(f3_2) * int64(f4) + f3f5_4 := int64(f3_2) * int64(f5_2) + f3f6_2 := int64(f3_2) * int64(f6) + f3f7_76 := int64(f3_2) * int64(f7_38) + f3f8_38 := int64(f3_2) * int64(f8_19) + f3f9_76 := int64(f3_2) * int64(f9_38) + f4f4 := int64(f4) * int64(f4) + f4f5_2 := int64(f4_2) * int64(f5) + f4f6_38 := int64(f4_2) * int64(f6_19) + f4f7_38 := int64(f4) * int64(f7_38) + f4f8_38 := int64(f4_2) * int64(f8_19) + f4f9_38 := int64(f4) * int64(f9_38) + f5f5_38 := int64(f5) * int64(f5_38) + f5f6_38 := int64(f5_2) * int64(f6_19) + f5f7_76 := int64(f5_2) * int64(f7_38) + f5f8_38 := int64(f5_2) * int64(f8_19) + f5f9_76 := int64(f5_2) * int64(f9_38) + f6f6_19 := int64(f6) * int64(f6_19) + f6f7_38 := int64(f6) * int64(f7_38) + f6f8_38 := int64(f6_2) * int64(f8_19) + f6f9_38 := int64(f6) * int64(f9_38) + f7f7_38 := int64(f7) * int64(f7_38) + f7f8_38 := int64(f7_2) * int64(f8_19) + f7f9_76 := int64(f7_2) * int64(f9_38) + f8f8_19 := int64(f8) * int64(f8_19) + f8f9_38 := int64(f8) * int64(f9_38) + f9f9_38 := int64(f9) * int64(f9_38) + h0 := f0f0 + f1f9_76 + f2f8_38 + f3f7_76 + f4f6_38 + f5f5_38 + h1 := f0f1_2 + f2f9_38 + f3f8_38 + f4f7_38 + f5f6_38 + h2 := f0f2_2 + f1f1_2 + f3f9_76 + f4f8_38 + f5f7_76 + f6f6_19 + h3 := f0f3_2 + f1f2_2 + f4f9_38 + f5f8_38 + f6f7_38 + h4 := f0f4_2 + f1f3_4 + f2f2 + f5f9_76 + f6f8_38 + f7f7_38 + h5 := f0f5_2 + f1f4_2 + f2f3_2 + f6f9_38 + f7f8_38 + h6 := f0f6_2 + f1f5_4 + f2f4_2 + f3f3_2 + f7f9_76 + f8f8_19 + h7 := f0f7_2 + f1f6_2 + f2f5_2 + f3f4_2 + f8f9_38 + h8 := f0f8_2 + f1f7_4 + f2f6_2 + f3f5_4 + f4f4 + f9f9_38 + h9 := f0f9_2 + f1f8_2 + f2f7_2 + f3f6_2 + f4f5_2 + var carry [10]int64 + + carry[0] = (h0 + (1 << 25)) >> 26 + h1 += carry[0] + h0 -= carry[0] << 26 + carry[4] = (h4 + (1 << 25)) >> 26 + h5 += carry[4] + h4 -= carry[4] << 26 + + carry[1] = (h1 + (1 << 24)) >> 25 + h2 += carry[1] + h1 -= carry[1] << 25 + carry[5] = (h5 + (1 << 24)) >> 25 + h6 += carry[5] + h5 -= carry[5] << 25 + + carry[2] = (h2 + (1 << 25)) >> 26 + h3 += carry[2] + h2 -= carry[2] << 26 + carry[6] = (h6 + (1 << 25)) >> 26 + h7 += carry[6] + h6 -= carry[6] << 26 + + carry[3] = (h3 + (1 << 24)) >> 25 + h4 += carry[3] + h3 -= carry[3] << 25 + carry[7] = (h7 + (1 << 24)) >> 25 + h8 += carry[7] + h7 -= carry[7] << 25 + + carry[4] = (h4 + (1 << 25)) >> 26 + h5 += carry[4] + h4 -= carry[4] << 26 + carry[8] = (h8 + (1 << 25)) >> 26 + h9 += carry[8] + h8 -= carry[8] << 26 + + carry[9] = (h9 + (1 << 24)) >> 25 + h0 += carry[9] * 19 + h9 -= carry[9] << 25 + + carry[0] = (h0 + (1 << 25)) >> 26 + h1 += carry[0] + h0 -= carry[0] << 26 + + h[0] = int32(h0) + h[1] = int32(h1) + h[2] = int32(h2) + h[3] = int32(h3) + h[4] = int32(h4) + h[5] = int32(h5) + h[6] = int32(h6) + h[7] = int32(h7) + h[8] = int32(h8) + h[9] = int32(h9) +} + +// feMul121666 calculates h = f * 121666. Can overlap h with f. +// +// Preconditions: +// |f| bounded by 1.1*2^26,1.1*2^25,1.1*2^26,1.1*2^25,etc. +// +// Postconditions: +// |h| bounded by 1.1*2^25,1.1*2^24,1.1*2^25,1.1*2^24,etc. +func feMul121666(h, f *fieldElement) { + h0 := int64(f[0]) * 121666 + h1 := int64(f[1]) * 121666 + h2 := int64(f[2]) * 121666 + h3 := int64(f[3]) * 121666 + h4 := int64(f[4]) * 121666 + h5 := int64(f[5]) * 121666 + h6 := int64(f[6]) * 121666 + h7 := int64(f[7]) * 121666 + h8 := int64(f[8]) * 121666 + h9 := int64(f[9]) * 121666 + var carry [10]int64 + + carry[9] = (h9 + (1 << 24)) >> 25 + h0 += carry[9] * 19 + h9 -= carry[9] << 25 + carry[1] = (h1 + (1 << 24)) >> 25 + h2 += carry[1] + h1 -= carry[1] << 25 + carry[3] = (h3 + (1 << 24)) >> 25 + h4 += carry[3] + h3 -= carry[3] << 25 + carry[5] = (h5 + (1 << 24)) >> 25 + h6 += carry[5] + h5 -= carry[5] << 25 + carry[7] = (h7 + (1 << 24)) >> 25 + h8 += carry[7] + h7 -= carry[7] << 25 + + carry[0] = (h0 + (1 << 25)) >> 26 + h1 += carry[0] + h0 -= carry[0] << 26 + carry[2] = (h2 + (1 << 25)) >> 26 + h3 += carry[2] + h2 -= carry[2] << 26 + carry[4] = (h4 + (1 << 25)) >> 26 + h5 += carry[4] + h4 -= carry[4] << 26 + carry[6] = (h6 + (1 << 25)) >> 26 + h7 += carry[6] + h6 -= carry[6] << 26 + carry[8] = (h8 + (1 << 25)) >> 26 + h9 += carry[8] + h8 -= carry[8] << 26 + + h[0] = int32(h0) + h[1] = int32(h1) + h[2] = int32(h2) + h[3] = int32(h3) + h[4] = int32(h4) + h[5] = int32(h5) + h[6] = int32(h6) + h[7] = int32(h7) + h[8] = int32(h8) + h[9] = int32(h9) +} + +// feInvert sets out = z^-1. +func feInvert(out, z *fieldElement) { + var t0, t1, t2, t3 fieldElement + var i int + + feSquare(&t0, z) + for i = 1; i < 1; i++ { + feSquare(&t0, &t0) + } + feSquare(&t1, &t0) + for i = 1; i < 2; i++ { + feSquare(&t1, &t1) + } + feMul(&t1, z, &t1) + feMul(&t0, &t0, &t1) + feSquare(&t2, &t0) + for i = 1; i < 1; i++ { + feSquare(&t2, &t2) + } + feMul(&t1, &t1, &t2) + feSquare(&t2, &t1) + for i = 1; i < 5; i++ { + feSquare(&t2, &t2) + } + feMul(&t1, &t2, &t1) + feSquare(&t2, &t1) + for i = 1; i < 10; i++ { + feSquare(&t2, &t2) + } + feMul(&t2, &t2, &t1) + feSquare(&t3, &t2) + for i = 1; i < 20; i++ { + feSquare(&t3, &t3) + } + feMul(&t2, &t3, &t2) + feSquare(&t2, &t2) + for i = 1; i < 10; i++ { + feSquare(&t2, &t2) + } + feMul(&t1, &t2, &t1) + feSquare(&t2, &t1) + for i = 1; i < 50; i++ { + feSquare(&t2, &t2) + } + feMul(&t2, &t2, &t1) + feSquare(&t3, &t2) + for i = 1; i < 100; i++ { + feSquare(&t3, &t3) + } + feMul(&t2, &t3, &t2) + feSquare(&t2, &t2) + for i = 1; i < 50; i++ { + feSquare(&t2, &t2) + } + feMul(&t1, &t2, &t1) + feSquare(&t1, &t1) + for i = 1; i < 5; i++ { + feSquare(&t1, &t1) + } + feMul(out, &t1, &t0) +} + +func scalarMult(out, in, base *[32]byte) { + var e [32]byte + + copy(e[:], in[:]) + e[0] &= 248 + e[31] &= 127 + e[31] |= 64 + + var x1, x2, z2, x3, z3, tmp0, tmp1 fieldElement + feFromBytes(&x1, base) + feOne(&x2) + feCopy(&x3, &x1) + feOne(&z3) + + swap := int32(0) + for pos := 254; pos >= 0; pos-- { + b := e[pos/8] >> uint(pos&7) + b &= 1 + swap ^= int32(b) + feCSwap(&x2, &x3, swap) + feCSwap(&z2, &z3, swap) + swap = int32(b) + + feSub(&tmp0, &x3, &z3) + feSub(&tmp1, &x2, &z2) + feAdd(&x2, &x2, &z2) + feAdd(&z2, &x3, &z3) + feMul(&z3, &tmp0, &x2) + feMul(&z2, &z2, &tmp1) + feSquare(&tmp0, &tmp1) + feSquare(&tmp1, &x2) + feAdd(&x3, &z3, &z2) + feSub(&z2, &z3, &z2) + feMul(&x2, &tmp1, &tmp0) + feSub(&tmp1, &tmp1, &tmp0) + feSquare(&z2, &z2) + feMul121666(&z3, &tmp1) + feSquare(&x3, &x3) + feAdd(&tmp0, &tmp0, &z3) + feMul(&z3, &x1, &z2) + feMul(&z2, &tmp1, &tmp0) + } + + feCSwap(&x2, &x3, swap) + feCSwap(&z2, &z3, swap) + + feInvert(&z2, &z2) + feMul(&x2, &x2, &z2) + feToBytes(out, &x2) +} diff --git a/vendor/golang.org/x/crypto/curve25519/doc.go b/vendor/golang.org/x/crypto/curve25519/doc.go new file mode 100644 index 0000000000..ebeea3c2d6 --- /dev/null +++ b/vendor/golang.org/x/crypto/curve25519/doc.go @@ -0,0 +1,23 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package curve25519 provides an implementation of scalar multiplication on +// the elliptic curve known as curve25519. See http://cr.yp.to/ecdh.html +package curve25519 // import "golang.org/x/crypto/curve25519" + +// basePoint is the x coordinate of the generator of the curve. +var basePoint = [32]byte{9, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} + +// ScalarMult sets dst to the product in*base where dst and base are the x +// coordinates of group points and all values are in little-endian form. +func ScalarMult(dst, in, base *[32]byte) { + scalarMult(dst, in, base) +} + +// ScalarBaseMult sets dst to the product in*base where dst and base are the x +// coordinates of group points, base is the standard generator and all values +// are in little-endian form. +func ScalarBaseMult(dst, in *[32]byte) { + ScalarMult(dst, in, &basePoint) +} diff --git a/vendor/golang.org/x/crypto/curve25519/freeze_amd64.s b/vendor/golang.org/x/crypto/curve25519/freeze_amd64.s new file mode 100644 index 0000000000..536479bf62 --- /dev/null +++ b/vendor/golang.org/x/crypto/curve25519/freeze_amd64.s @@ -0,0 +1,73 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// This code was translated into a form compatible with 6a from the public +// domain sources in SUPERCOP: http://bench.cr.yp.to/supercop.html + +// +build amd64,!gccgo,!appengine + +#include "const_amd64.h" + +// func freeze(inout *[5]uint64) +TEXT ·freeze(SB),7,$0-8 + MOVQ inout+0(FP), DI + + MOVQ 0(DI),SI + MOVQ 8(DI),DX + MOVQ 16(DI),CX + MOVQ 24(DI),R8 + MOVQ 32(DI),R9 + MOVQ $REDMASK51,AX + MOVQ AX,R10 + SUBQ $18,R10 + MOVQ $3,R11 +REDUCELOOP: + MOVQ SI,R12 + SHRQ $51,R12 + ANDQ AX,SI + ADDQ R12,DX + MOVQ DX,R12 + SHRQ $51,R12 + ANDQ AX,DX + ADDQ R12,CX + MOVQ CX,R12 + SHRQ $51,R12 + ANDQ AX,CX + ADDQ R12,R8 + MOVQ R8,R12 + SHRQ $51,R12 + ANDQ AX,R8 + ADDQ R12,R9 + MOVQ R9,R12 + SHRQ $51,R12 + ANDQ AX,R9 + IMUL3Q $19,R12,R12 + ADDQ R12,SI + SUBQ $1,R11 + JA REDUCELOOP + MOVQ $1,R12 + CMPQ R10,SI + CMOVQLT R11,R12 + CMPQ AX,DX + CMOVQNE R11,R12 + CMPQ AX,CX + CMOVQNE R11,R12 + CMPQ AX,R8 + CMOVQNE R11,R12 + CMPQ AX,R9 + CMOVQNE R11,R12 + NEGQ R12 + ANDQ R12,AX + ANDQ R12,R10 + SUBQ R10,SI + SUBQ AX,DX + SUBQ AX,CX + SUBQ AX,R8 + SUBQ AX,R9 + MOVQ SI,0(DI) + MOVQ DX,8(DI) + MOVQ CX,16(DI) + MOVQ R8,24(DI) + MOVQ R9,32(DI) + RET diff --git a/vendor/golang.org/x/crypto/curve25519/ladderstep_amd64.s b/vendor/golang.org/x/crypto/curve25519/ladderstep_amd64.s new file mode 100644 index 0000000000..7074e5cd9d --- /dev/null +++ b/vendor/golang.org/x/crypto/curve25519/ladderstep_amd64.s @@ -0,0 +1,1377 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// This code was translated into a form compatible with 6a from the public +// domain sources in SUPERCOP: http://bench.cr.yp.to/supercop.html + +// +build amd64,!gccgo,!appengine + +#include "const_amd64.h" + +// func ladderstep(inout *[5][5]uint64) +TEXT ·ladderstep(SB),0,$296-8 + MOVQ inout+0(FP),DI + + MOVQ 40(DI),SI + MOVQ 48(DI),DX + MOVQ 56(DI),CX + MOVQ 64(DI),R8 + MOVQ 72(DI),R9 + MOVQ SI,AX + MOVQ DX,R10 + MOVQ CX,R11 + MOVQ R8,R12 + MOVQ R9,R13 + ADDQ ·_2P0(SB),AX + ADDQ ·_2P1234(SB),R10 + ADDQ ·_2P1234(SB),R11 + ADDQ ·_2P1234(SB),R12 + ADDQ ·_2P1234(SB),R13 + ADDQ 80(DI),SI + ADDQ 88(DI),DX + ADDQ 96(DI),CX + ADDQ 104(DI),R8 + ADDQ 112(DI),R9 + SUBQ 80(DI),AX + SUBQ 88(DI),R10 + SUBQ 96(DI),R11 + SUBQ 104(DI),R12 + SUBQ 112(DI),R13 + MOVQ SI,0(SP) + MOVQ DX,8(SP) + MOVQ CX,16(SP) + MOVQ R8,24(SP) + MOVQ R9,32(SP) + MOVQ AX,40(SP) + MOVQ R10,48(SP) + MOVQ R11,56(SP) + MOVQ R12,64(SP) + MOVQ R13,72(SP) + MOVQ 40(SP),AX + MULQ 40(SP) + MOVQ AX,SI + MOVQ DX,CX + MOVQ 40(SP),AX + SHLQ $1,AX + MULQ 48(SP) + MOVQ AX,R8 + MOVQ DX,R9 + MOVQ 40(SP),AX + SHLQ $1,AX + MULQ 56(SP) + MOVQ AX,R10 + MOVQ DX,R11 + MOVQ 40(SP),AX + SHLQ $1,AX + MULQ 64(SP) + MOVQ AX,R12 + MOVQ DX,R13 + MOVQ 40(SP),AX + SHLQ $1,AX + MULQ 72(SP) + MOVQ AX,R14 + MOVQ DX,R15 + MOVQ 48(SP),AX + MULQ 48(SP) + ADDQ AX,R10 + ADCQ DX,R11 + MOVQ 48(SP),AX + SHLQ $1,AX + MULQ 56(SP) + ADDQ AX,R12 + ADCQ DX,R13 + MOVQ 48(SP),AX + SHLQ $1,AX + MULQ 64(SP) + ADDQ AX,R14 + ADCQ DX,R15 + MOVQ 48(SP),DX + IMUL3Q $38,DX,AX + MULQ 72(SP) + ADDQ AX,SI + ADCQ DX,CX + MOVQ 56(SP),AX + MULQ 56(SP) + ADDQ AX,R14 + ADCQ DX,R15 + MOVQ 56(SP),DX + IMUL3Q $38,DX,AX + MULQ 64(SP) + ADDQ AX,SI + ADCQ DX,CX + MOVQ 56(SP),DX + IMUL3Q $38,DX,AX + MULQ 72(SP) + ADDQ AX,R8 + ADCQ DX,R9 + MOVQ 64(SP),DX + IMUL3Q $19,DX,AX + MULQ 64(SP) + ADDQ AX,R8 + ADCQ DX,R9 + MOVQ 64(SP),DX + IMUL3Q $38,DX,AX + MULQ 72(SP) + ADDQ AX,R10 + ADCQ DX,R11 + MOVQ 72(SP),DX + IMUL3Q $19,DX,AX + MULQ 72(SP) + ADDQ AX,R12 + ADCQ DX,R13 + MOVQ $REDMASK51,DX + SHLQ $13,CX:SI + ANDQ DX,SI + SHLQ $13,R9:R8 + ANDQ DX,R8 + ADDQ CX,R8 + SHLQ $13,R11:R10 + ANDQ DX,R10 + ADDQ R9,R10 + SHLQ $13,R13:R12 + ANDQ DX,R12 + ADDQ R11,R12 + SHLQ $13,R15:R14 + ANDQ DX,R14 + ADDQ R13,R14 + IMUL3Q $19,R15,CX + ADDQ CX,SI + MOVQ SI,CX + SHRQ $51,CX + ADDQ R8,CX + ANDQ DX,SI + MOVQ CX,R8 + SHRQ $51,CX + ADDQ R10,CX + ANDQ DX,R8 + MOVQ CX,R9 + SHRQ $51,CX + ADDQ R12,CX + ANDQ DX,R9 + MOVQ CX,AX + SHRQ $51,CX + ADDQ R14,CX + ANDQ DX,AX + MOVQ CX,R10 + SHRQ $51,CX + IMUL3Q $19,CX,CX + ADDQ CX,SI + ANDQ DX,R10 + MOVQ SI,80(SP) + MOVQ R8,88(SP) + MOVQ R9,96(SP) + MOVQ AX,104(SP) + MOVQ R10,112(SP) + MOVQ 0(SP),AX + MULQ 0(SP) + MOVQ AX,SI + MOVQ DX,CX + MOVQ 0(SP),AX + SHLQ $1,AX + MULQ 8(SP) + MOVQ AX,R8 + MOVQ DX,R9 + MOVQ 0(SP),AX + SHLQ $1,AX + MULQ 16(SP) + MOVQ AX,R10 + MOVQ DX,R11 + MOVQ 0(SP),AX + SHLQ $1,AX + MULQ 24(SP) + MOVQ AX,R12 + MOVQ DX,R13 + MOVQ 0(SP),AX + SHLQ $1,AX + MULQ 32(SP) + MOVQ AX,R14 + MOVQ DX,R15 + MOVQ 8(SP),AX + MULQ 8(SP) + ADDQ AX,R10 + ADCQ DX,R11 + MOVQ 8(SP),AX + SHLQ $1,AX + MULQ 16(SP) + ADDQ AX,R12 + ADCQ DX,R13 + MOVQ 8(SP),AX + SHLQ $1,AX + MULQ 24(SP) + ADDQ AX,R14 + ADCQ DX,R15 + MOVQ 8(SP),DX + IMUL3Q $38,DX,AX + MULQ 32(SP) + ADDQ AX,SI + ADCQ DX,CX + MOVQ 16(SP),AX + MULQ 16(SP) + ADDQ AX,R14 + ADCQ DX,R15 + MOVQ 16(SP),DX + IMUL3Q $38,DX,AX + MULQ 24(SP) + ADDQ AX,SI + ADCQ DX,CX + MOVQ 16(SP),DX + IMUL3Q $38,DX,AX + MULQ 32(SP) + ADDQ AX,R8 + ADCQ DX,R9 + MOVQ 24(SP),DX + IMUL3Q $19,DX,AX + MULQ 24(SP) + ADDQ AX,R8 + ADCQ DX,R9 + MOVQ 24(SP),DX + IMUL3Q $38,DX,AX + MULQ 32(SP) + ADDQ AX,R10 + ADCQ DX,R11 + MOVQ 32(SP),DX + IMUL3Q $19,DX,AX + MULQ 32(SP) + ADDQ AX,R12 + ADCQ DX,R13 + MOVQ $REDMASK51,DX + SHLQ $13,CX:SI + ANDQ DX,SI + SHLQ $13,R9:R8 + ANDQ DX,R8 + ADDQ CX,R8 + SHLQ $13,R11:R10 + ANDQ DX,R10 + ADDQ R9,R10 + SHLQ $13,R13:R12 + ANDQ DX,R12 + ADDQ R11,R12 + SHLQ $13,R15:R14 + ANDQ DX,R14 + ADDQ R13,R14 + IMUL3Q $19,R15,CX + ADDQ CX,SI + MOVQ SI,CX + SHRQ $51,CX + ADDQ R8,CX + ANDQ DX,SI + MOVQ CX,R8 + SHRQ $51,CX + ADDQ R10,CX + ANDQ DX,R8 + MOVQ CX,R9 + SHRQ $51,CX + ADDQ R12,CX + ANDQ DX,R9 + MOVQ CX,AX + SHRQ $51,CX + ADDQ R14,CX + ANDQ DX,AX + MOVQ CX,R10 + SHRQ $51,CX + IMUL3Q $19,CX,CX + ADDQ CX,SI + ANDQ DX,R10 + MOVQ SI,120(SP) + MOVQ R8,128(SP) + MOVQ R9,136(SP) + MOVQ AX,144(SP) + MOVQ R10,152(SP) + MOVQ SI,SI + MOVQ R8,DX + MOVQ R9,CX + MOVQ AX,R8 + MOVQ R10,R9 + ADDQ ·_2P0(SB),SI + ADDQ ·_2P1234(SB),DX + ADDQ ·_2P1234(SB),CX + ADDQ ·_2P1234(SB),R8 + ADDQ ·_2P1234(SB),R9 + SUBQ 80(SP),SI + SUBQ 88(SP),DX + SUBQ 96(SP),CX + SUBQ 104(SP),R8 + SUBQ 112(SP),R9 + MOVQ SI,160(SP) + MOVQ DX,168(SP) + MOVQ CX,176(SP) + MOVQ R8,184(SP) + MOVQ R9,192(SP) + MOVQ 120(DI),SI + MOVQ 128(DI),DX + MOVQ 136(DI),CX + MOVQ 144(DI),R8 + MOVQ 152(DI),R9 + MOVQ SI,AX + MOVQ DX,R10 + MOVQ CX,R11 + MOVQ R8,R12 + MOVQ R9,R13 + ADDQ ·_2P0(SB),AX + ADDQ ·_2P1234(SB),R10 + ADDQ ·_2P1234(SB),R11 + ADDQ ·_2P1234(SB),R12 + ADDQ ·_2P1234(SB),R13 + ADDQ 160(DI),SI + ADDQ 168(DI),DX + ADDQ 176(DI),CX + ADDQ 184(DI),R8 + ADDQ 192(DI),R9 + SUBQ 160(DI),AX + SUBQ 168(DI),R10 + SUBQ 176(DI),R11 + SUBQ 184(DI),R12 + SUBQ 192(DI),R13 + MOVQ SI,200(SP) + MOVQ DX,208(SP) + MOVQ CX,216(SP) + MOVQ R8,224(SP) + MOVQ R9,232(SP) + MOVQ AX,240(SP) + MOVQ R10,248(SP) + MOVQ R11,256(SP) + MOVQ R12,264(SP) + MOVQ R13,272(SP) + MOVQ 224(SP),SI + IMUL3Q $19,SI,AX + MOVQ AX,280(SP) + MULQ 56(SP) + MOVQ AX,SI + MOVQ DX,CX + MOVQ 232(SP),DX + IMUL3Q $19,DX,AX + MOVQ AX,288(SP) + MULQ 48(SP) + ADDQ AX,SI + ADCQ DX,CX + MOVQ 200(SP),AX + MULQ 40(SP) + ADDQ AX,SI + ADCQ DX,CX + MOVQ 200(SP),AX + MULQ 48(SP) + MOVQ AX,R8 + MOVQ DX,R9 + MOVQ 200(SP),AX + MULQ 56(SP) + MOVQ AX,R10 + MOVQ DX,R11 + MOVQ 200(SP),AX + MULQ 64(SP) + MOVQ AX,R12 + MOVQ DX,R13 + MOVQ 200(SP),AX + MULQ 72(SP) + MOVQ AX,R14 + MOVQ DX,R15 + MOVQ 208(SP),AX + MULQ 40(SP) + ADDQ AX,R8 + ADCQ DX,R9 + MOVQ 208(SP),AX + MULQ 48(SP) + ADDQ AX,R10 + ADCQ DX,R11 + MOVQ 208(SP),AX + MULQ 56(SP) + ADDQ AX,R12 + ADCQ DX,R13 + MOVQ 208(SP),AX + MULQ 64(SP) + ADDQ AX,R14 + ADCQ DX,R15 + MOVQ 208(SP),DX + IMUL3Q $19,DX,AX + MULQ 72(SP) + ADDQ AX,SI + ADCQ DX,CX + MOVQ 216(SP),AX + MULQ 40(SP) + ADDQ AX,R10 + ADCQ DX,R11 + MOVQ 216(SP),AX + MULQ 48(SP) + ADDQ AX,R12 + ADCQ DX,R13 + MOVQ 216(SP),AX + MULQ 56(SP) + ADDQ AX,R14 + ADCQ DX,R15 + MOVQ 216(SP),DX + IMUL3Q $19,DX,AX + MULQ 64(SP) + ADDQ AX,SI + ADCQ DX,CX + MOVQ 216(SP),DX + IMUL3Q $19,DX,AX + MULQ 72(SP) + ADDQ AX,R8 + ADCQ DX,R9 + MOVQ 224(SP),AX + MULQ 40(SP) + ADDQ AX,R12 + ADCQ DX,R13 + MOVQ 224(SP),AX + MULQ 48(SP) + ADDQ AX,R14 + ADCQ DX,R15 + MOVQ 280(SP),AX + MULQ 64(SP) + ADDQ AX,R8 + ADCQ DX,R9 + MOVQ 280(SP),AX + MULQ 72(SP) + ADDQ AX,R10 + ADCQ DX,R11 + MOVQ 232(SP),AX + MULQ 40(SP) + ADDQ AX,R14 + ADCQ DX,R15 + MOVQ 288(SP),AX + MULQ 56(SP) + ADDQ AX,R8 + ADCQ DX,R9 + MOVQ 288(SP),AX + MULQ 64(SP) + ADDQ AX,R10 + ADCQ DX,R11 + MOVQ 288(SP),AX + MULQ 72(SP) + ADDQ AX,R12 + ADCQ DX,R13 + MOVQ $REDMASK51,DX + SHLQ $13,CX:SI + ANDQ DX,SI + SHLQ $13,R9:R8 + ANDQ DX,R8 + ADDQ CX,R8 + SHLQ $13,R11:R10 + ANDQ DX,R10 + ADDQ R9,R10 + SHLQ $13,R13:R12 + ANDQ DX,R12 + ADDQ R11,R12 + SHLQ $13,R15:R14 + ANDQ DX,R14 + ADDQ R13,R14 + IMUL3Q $19,R15,CX + ADDQ CX,SI + MOVQ SI,CX + SHRQ $51,CX + ADDQ R8,CX + MOVQ CX,R8 + SHRQ $51,CX + ANDQ DX,SI + ADDQ R10,CX + MOVQ CX,R9 + SHRQ $51,CX + ANDQ DX,R8 + ADDQ R12,CX + MOVQ CX,AX + SHRQ $51,CX + ANDQ DX,R9 + ADDQ R14,CX + MOVQ CX,R10 + SHRQ $51,CX + ANDQ DX,AX + IMUL3Q $19,CX,CX + ADDQ CX,SI + ANDQ DX,R10 + MOVQ SI,40(SP) + MOVQ R8,48(SP) + MOVQ R9,56(SP) + MOVQ AX,64(SP) + MOVQ R10,72(SP) + MOVQ 264(SP),SI + IMUL3Q $19,SI,AX + MOVQ AX,200(SP) + MULQ 16(SP) + MOVQ AX,SI + MOVQ DX,CX + MOVQ 272(SP),DX + IMUL3Q $19,DX,AX + MOVQ AX,208(SP) + MULQ 8(SP) + ADDQ AX,SI + ADCQ DX,CX + MOVQ 240(SP),AX + MULQ 0(SP) + ADDQ AX,SI + ADCQ DX,CX + MOVQ 240(SP),AX + MULQ 8(SP) + MOVQ AX,R8 + MOVQ DX,R9 + MOVQ 240(SP),AX + MULQ 16(SP) + MOVQ AX,R10 + MOVQ DX,R11 + MOVQ 240(SP),AX + MULQ 24(SP) + MOVQ AX,R12 + MOVQ DX,R13 + MOVQ 240(SP),AX + MULQ 32(SP) + MOVQ AX,R14 + MOVQ DX,R15 + MOVQ 248(SP),AX + MULQ 0(SP) + ADDQ AX,R8 + ADCQ DX,R9 + MOVQ 248(SP),AX + MULQ 8(SP) + ADDQ AX,R10 + ADCQ DX,R11 + MOVQ 248(SP),AX + MULQ 16(SP) + ADDQ AX,R12 + ADCQ DX,R13 + MOVQ 248(SP),AX + MULQ 24(SP) + ADDQ AX,R14 + ADCQ DX,R15 + MOVQ 248(SP),DX + IMUL3Q $19,DX,AX + MULQ 32(SP) + ADDQ AX,SI + ADCQ DX,CX + MOVQ 256(SP),AX + MULQ 0(SP) + ADDQ AX,R10 + ADCQ DX,R11 + MOVQ 256(SP),AX + MULQ 8(SP) + ADDQ AX,R12 + ADCQ DX,R13 + MOVQ 256(SP),AX + MULQ 16(SP) + ADDQ AX,R14 + ADCQ DX,R15 + MOVQ 256(SP),DX + IMUL3Q $19,DX,AX + MULQ 24(SP) + ADDQ AX,SI + ADCQ DX,CX + MOVQ 256(SP),DX + IMUL3Q $19,DX,AX + MULQ 32(SP) + ADDQ AX,R8 + ADCQ DX,R9 + MOVQ 264(SP),AX + MULQ 0(SP) + ADDQ AX,R12 + ADCQ DX,R13 + MOVQ 264(SP),AX + MULQ 8(SP) + ADDQ AX,R14 + ADCQ DX,R15 + MOVQ 200(SP),AX + MULQ 24(SP) + ADDQ AX,R8 + ADCQ DX,R9 + MOVQ 200(SP),AX + MULQ 32(SP) + ADDQ AX,R10 + ADCQ DX,R11 + MOVQ 272(SP),AX + MULQ 0(SP) + ADDQ AX,R14 + ADCQ DX,R15 + MOVQ 208(SP),AX + MULQ 16(SP) + ADDQ AX,R8 + ADCQ DX,R9 + MOVQ 208(SP),AX + MULQ 24(SP) + ADDQ AX,R10 + ADCQ DX,R11 + MOVQ 208(SP),AX + MULQ 32(SP) + ADDQ AX,R12 + ADCQ DX,R13 + MOVQ $REDMASK51,DX + SHLQ $13,CX:SI + ANDQ DX,SI + SHLQ $13,R9:R8 + ANDQ DX,R8 + ADDQ CX,R8 + SHLQ $13,R11:R10 + ANDQ DX,R10 + ADDQ R9,R10 + SHLQ $13,R13:R12 + ANDQ DX,R12 + ADDQ R11,R12 + SHLQ $13,R15:R14 + ANDQ DX,R14 + ADDQ R13,R14 + IMUL3Q $19,R15,CX + ADDQ CX,SI + MOVQ SI,CX + SHRQ $51,CX + ADDQ R8,CX + MOVQ CX,R8 + SHRQ $51,CX + ANDQ DX,SI + ADDQ R10,CX + MOVQ CX,R9 + SHRQ $51,CX + ANDQ DX,R8 + ADDQ R12,CX + MOVQ CX,AX + SHRQ $51,CX + ANDQ DX,R9 + ADDQ R14,CX + MOVQ CX,R10 + SHRQ $51,CX + ANDQ DX,AX + IMUL3Q $19,CX,CX + ADDQ CX,SI + ANDQ DX,R10 + MOVQ SI,DX + MOVQ R8,CX + MOVQ R9,R11 + MOVQ AX,R12 + MOVQ R10,R13 + ADDQ ·_2P0(SB),DX + ADDQ ·_2P1234(SB),CX + ADDQ ·_2P1234(SB),R11 + ADDQ ·_2P1234(SB),R12 + ADDQ ·_2P1234(SB),R13 + ADDQ 40(SP),SI + ADDQ 48(SP),R8 + ADDQ 56(SP),R9 + ADDQ 64(SP),AX + ADDQ 72(SP),R10 + SUBQ 40(SP),DX + SUBQ 48(SP),CX + SUBQ 56(SP),R11 + SUBQ 64(SP),R12 + SUBQ 72(SP),R13 + MOVQ SI,120(DI) + MOVQ R8,128(DI) + MOVQ R9,136(DI) + MOVQ AX,144(DI) + MOVQ R10,152(DI) + MOVQ DX,160(DI) + MOVQ CX,168(DI) + MOVQ R11,176(DI) + MOVQ R12,184(DI) + MOVQ R13,192(DI) + MOVQ 120(DI),AX + MULQ 120(DI) + MOVQ AX,SI + MOVQ DX,CX + MOVQ 120(DI),AX + SHLQ $1,AX + MULQ 128(DI) + MOVQ AX,R8 + MOVQ DX,R9 + MOVQ 120(DI),AX + SHLQ $1,AX + MULQ 136(DI) + MOVQ AX,R10 + MOVQ DX,R11 + MOVQ 120(DI),AX + SHLQ $1,AX + MULQ 144(DI) + MOVQ AX,R12 + MOVQ DX,R13 + MOVQ 120(DI),AX + SHLQ $1,AX + MULQ 152(DI) + MOVQ AX,R14 + MOVQ DX,R15 + MOVQ 128(DI),AX + MULQ 128(DI) + ADDQ AX,R10 + ADCQ DX,R11 + MOVQ 128(DI),AX + SHLQ $1,AX + MULQ 136(DI) + ADDQ AX,R12 + ADCQ DX,R13 + MOVQ 128(DI),AX + SHLQ $1,AX + MULQ 144(DI) + ADDQ AX,R14 + ADCQ DX,R15 + MOVQ 128(DI),DX + IMUL3Q $38,DX,AX + MULQ 152(DI) + ADDQ AX,SI + ADCQ DX,CX + MOVQ 136(DI),AX + MULQ 136(DI) + ADDQ AX,R14 + ADCQ DX,R15 + MOVQ 136(DI),DX + IMUL3Q $38,DX,AX + MULQ 144(DI) + ADDQ AX,SI + ADCQ DX,CX + MOVQ 136(DI),DX + IMUL3Q $38,DX,AX + MULQ 152(DI) + ADDQ AX,R8 + ADCQ DX,R9 + MOVQ 144(DI),DX + IMUL3Q $19,DX,AX + MULQ 144(DI) + ADDQ AX,R8 + ADCQ DX,R9 + MOVQ 144(DI),DX + IMUL3Q $38,DX,AX + MULQ 152(DI) + ADDQ AX,R10 + ADCQ DX,R11 + MOVQ 152(DI),DX + IMUL3Q $19,DX,AX + MULQ 152(DI) + ADDQ AX,R12 + ADCQ DX,R13 + MOVQ $REDMASK51,DX + SHLQ $13,CX:SI + ANDQ DX,SI + SHLQ $13,R9:R8 + ANDQ DX,R8 + ADDQ CX,R8 + SHLQ $13,R11:R10 + ANDQ DX,R10 + ADDQ R9,R10 + SHLQ $13,R13:R12 + ANDQ DX,R12 + ADDQ R11,R12 + SHLQ $13,R15:R14 + ANDQ DX,R14 + ADDQ R13,R14 + IMUL3Q $19,R15,CX + ADDQ CX,SI + MOVQ SI,CX + SHRQ $51,CX + ADDQ R8,CX + ANDQ DX,SI + MOVQ CX,R8 + SHRQ $51,CX + ADDQ R10,CX + ANDQ DX,R8 + MOVQ CX,R9 + SHRQ $51,CX + ADDQ R12,CX + ANDQ DX,R9 + MOVQ CX,AX + SHRQ $51,CX + ADDQ R14,CX + ANDQ DX,AX + MOVQ CX,R10 + SHRQ $51,CX + IMUL3Q $19,CX,CX + ADDQ CX,SI + ANDQ DX,R10 + MOVQ SI,120(DI) + MOVQ R8,128(DI) + MOVQ R9,136(DI) + MOVQ AX,144(DI) + MOVQ R10,152(DI) + MOVQ 160(DI),AX + MULQ 160(DI) + MOVQ AX,SI + MOVQ DX,CX + MOVQ 160(DI),AX + SHLQ $1,AX + MULQ 168(DI) + MOVQ AX,R8 + MOVQ DX,R9 + MOVQ 160(DI),AX + SHLQ $1,AX + MULQ 176(DI) + MOVQ AX,R10 + MOVQ DX,R11 + MOVQ 160(DI),AX + SHLQ $1,AX + MULQ 184(DI) + MOVQ AX,R12 + MOVQ DX,R13 + MOVQ 160(DI),AX + SHLQ $1,AX + MULQ 192(DI) + MOVQ AX,R14 + MOVQ DX,R15 + MOVQ 168(DI),AX + MULQ 168(DI) + ADDQ AX,R10 + ADCQ DX,R11 + MOVQ 168(DI),AX + SHLQ $1,AX + MULQ 176(DI) + ADDQ AX,R12 + ADCQ DX,R13 + MOVQ 168(DI),AX + SHLQ $1,AX + MULQ 184(DI) + ADDQ AX,R14 + ADCQ DX,R15 + MOVQ 168(DI),DX + IMUL3Q $38,DX,AX + MULQ 192(DI) + ADDQ AX,SI + ADCQ DX,CX + MOVQ 176(DI),AX + MULQ 176(DI) + ADDQ AX,R14 + ADCQ DX,R15 + MOVQ 176(DI),DX + IMUL3Q $38,DX,AX + MULQ 184(DI) + ADDQ AX,SI + ADCQ DX,CX + MOVQ 176(DI),DX + IMUL3Q $38,DX,AX + MULQ 192(DI) + ADDQ AX,R8 + ADCQ DX,R9 + MOVQ 184(DI),DX + IMUL3Q $19,DX,AX + MULQ 184(DI) + ADDQ AX,R8 + ADCQ DX,R9 + MOVQ 184(DI),DX + IMUL3Q $38,DX,AX + MULQ 192(DI) + ADDQ AX,R10 + ADCQ DX,R11 + MOVQ 192(DI),DX + IMUL3Q $19,DX,AX + MULQ 192(DI) + ADDQ AX,R12 + ADCQ DX,R13 + MOVQ $REDMASK51,DX + SHLQ $13,CX:SI + ANDQ DX,SI + SHLQ $13,R9:R8 + ANDQ DX,R8 + ADDQ CX,R8 + SHLQ $13,R11:R10 + ANDQ DX,R10 + ADDQ R9,R10 + SHLQ $13,R13:R12 + ANDQ DX,R12 + ADDQ R11,R12 + SHLQ $13,R15:R14 + ANDQ DX,R14 + ADDQ R13,R14 + IMUL3Q $19,R15,CX + ADDQ CX,SI + MOVQ SI,CX + SHRQ $51,CX + ADDQ R8,CX + ANDQ DX,SI + MOVQ CX,R8 + SHRQ $51,CX + ADDQ R10,CX + ANDQ DX,R8 + MOVQ CX,R9 + SHRQ $51,CX + ADDQ R12,CX + ANDQ DX,R9 + MOVQ CX,AX + SHRQ $51,CX + ADDQ R14,CX + ANDQ DX,AX + MOVQ CX,R10 + SHRQ $51,CX + IMUL3Q $19,CX,CX + ADDQ CX,SI + ANDQ DX,R10 + MOVQ SI,160(DI) + MOVQ R8,168(DI) + MOVQ R9,176(DI) + MOVQ AX,184(DI) + MOVQ R10,192(DI) + MOVQ 184(DI),SI + IMUL3Q $19,SI,AX + MOVQ AX,0(SP) + MULQ 16(DI) + MOVQ AX,SI + MOVQ DX,CX + MOVQ 192(DI),DX + IMUL3Q $19,DX,AX + MOVQ AX,8(SP) + MULQ 8(DI) + ADDQ AX,SI + ADCQ DX,CX + MOVQ 160(DI),AX + MULQ 0(DI) + ADDQ AX,SI + ADCQ DX,CX + MOVQ 160(DI),AX + MULQ 8(DI) + MOVQ AX,R8 + MOVQ DX,R9 + MOVQ 160(DI),AX + MULQ 16(DI) + MOVQ AX,R10 + MOVQ DX,R11 + MOVQ 160(DI),AX + MULQ 24(DI) + MOVQ AX,R12 + MOVQ DX,R13 + MOVQ 160(DI),AX + MULQ 32(DI) + MOVQ AX,R14 + MOVQ DX,R15 + MOVQ 168(DI),AX + MULQ 0(DI) + ADDQ AX,R8 + ADCQ DX,R9 + MOVQ 168(DI),AX + MULQ 8(DI) + ADDQ AX,R10 + ADCQ DX,R11 + MOVQ 168(DI),AX + MULQ 16(DI) + ADDQ AX,R12 + ADCQ DX,R13 + MOVQ 168(DI),AX + MULQ 24(DI) + ADDQ AX,R14 + ADCQ DX,R15 + MOVQ 168(DI),DX + IMUL3Q $19,DX,AX + MULQ 32(DI) + ADDQ AX,SI + ADCQ DX,CX + MOVQ 176(DI),AX + MULQ 0(DI) + ADDQ AX,R10 + ADCQ DX,R11 + MOVQ 176(DI),AX + MULQ 8(DI) + ADDQ AX,R12 + ADCQ DX,R13 + MOVQ 176(DI),AX + MULQ 16(DI) + ADDQ AX,R14 + ADCQ DX,R15 + MOVQ 176(DI),DX + IMUL3Q $19,DX,AX + MULQ 24(DI) + ADDQ AX,SI + ADCQ DX,CX + MOVQ 176(DI),DX + IMUL3Q $19,DX,AX + MULQ 32(DI) + ADDQ AX,R8 + ADCQ DX,R9 + MOVQ 184(DI),AX + MULQ 0(DI) + ADDQ AX,R12 + ADCQ DX,R13 + MOVQ 184(DI),AX + MULQ 8(DI) + ADDQ AX,R14 + ADCQ DX,R15 + MOVQ 0(SP),AX + MULQ 24(DI) + ADDQ AX,R8 + ADCQ DX,R9 + MOVQ 0(SP),AX + MULQ 32(DI) + ADDQ AX,R10 + ADCQ DX,R11 + MOVQ 192(DI),AX + MULQ 0(DI) + ADDQ AX,R14 + ADCQ DX,R15 + MOVQ 8(SP),AX + MULQ 16(DI) + ADDQ AX,R8 + ADCQ DX,R9 + MOVQ 8(SP),AX + MULQ 24(DI) + ADDQ AX,R10 + ADCQ DX,R11 + MOVQ 8(SP),AX + MULQ 32(DI) + ADDQ AX,R12 + ADCQ DX,R13 + MOVQ $REDMASK51,DX + SHLQ $13,CX:SI + ANDQ DX,SI + SHLQ $13,R9:R8 + ANDQ DX,R8 + ADDQ CX,R8 + SHLQ $13,R11:R10 + ANDQ DX,R10 + ADDQ R9,R10 + SHLQ $13,R13:R12 + ANDQ DX,R12 + ADDQ R11,R12 + SHLQ $13,R15:R14 + ANDQ DX,R14 + ADDQ R13,R14 + IMUL3Q $19,R15,CX + ADDQ CX,SI + MOVQ SI,CX + SHRQ $51,CX + ADDQ R8,CX + MOVQ CX,R8 + SHRQ $51,CX + ANDQ DX,SI + ADDQ R10,CX + MOVQ CX,R9 + SHRQ $51,CX + ANDQ DX,R8 + ADDQ R12,CX + MOVQ CX,AX + SHRQ $51,CX + ANDQ DX,R9 + ADDQ R14,CX + MOVQ CX,R10 + SHRQ $51,CX + ANDQ DX,AX + IMUL3Q $19,CX,CX + ADDQ CX,SI + ANDQ DX,R10 + MOVQ SI,160(DI) + MOVQ R8,168(DI) + MOVQ R9,176(DI) + MOVQ AX,184(DI) + MOVQ R10,192(DI) + MOVQ 144(SP),SI + IMUL3Q $19,SI,AX + MOVQ AX,0(SP) + MULQ 96(SP) + MOVQ AX,SI + MOVQ DX,CX + MOVQ 152(SP),DX + IMUL3Q $19,DX,AX + MOVQ AX,8(SP) + MULQ 88(SP) + ADDQ AX,SI + ADCQ DX,CX + MOVQ 120(SP),AX + MULQ 80(SP) + ADDQ AX,SI + ADCQ DX,CX + MOVQ 120(SP),AX + MULQ 88(SP) + MOVQ AX,R8 + MOVQ DX,R9 + MOVQ 120(SP),AX + MULQ 96(SP) + MOVQ AX,R10 + MOVQ DX,R11 + MOVQ 120(SP),AX + MULQ 104(SP) + MOVQ AX,R12 + MOVQ DX,R13 + MOVQ 120(SP),AX + MULQ 112(SP) + MOVQ AX,R14 + MOVQ DX,R15 + MOVQ 128(SP),AX + MULQ 80(SP) + ADDQ AX,R8 + ADCQ DX,R9 + MOVQ 128(SP),AX + MULQ 88(SP) + ADDQ AX,R10 + ADCQ DX,R11 + MOVQ 128(SP),AX + MULQ 96(SP) + ADDQ AX,R12 + ADCQ DX,R13 + MOVQ 128(SP),AX + MULQ 104(SP) + ADDQ AX,R14 + ADCQ DX,R15 + MOVQ 128(SP),DX + IMUL3Q $19,DX,AX + MULQ 112(SP) + ADDQ AX,SI + ADCQ DX,CX + MOVQ 136(SP),AX + MULQ 80(SP) + ADDQ AX,R10 + ADCQ DX,R11 + MOVQ 136(SP),AX + MULQ 88(SP) + ADDQ AX,R12 + ADCQ DX,R13 + MOVQ 136(SP),AX + MULQ 96(SP) + ADDQ AX,R14 + ADCQ DX,R15 + MOVQ 136(SP),DX + IMUL3Q $19,DX,AX + MULQ 104(SP) + ADDQ AX,SI + ADCQ DX,CX + MOVQ 136(SP),DX + IMUL3Q $19,DX,AX + MULQ 112(SP) + ADDQ AX,R8 + ADCQ DX,R9 + MOVQ 144(SP),AX + MULQ 80(SP) + ADDQ AX,R12 + ADCQ DX,R13 + MOVQ 144(SP),AX + MULQ 88(SP) + ADDQ AX,R14 + ADCQ DX,R15 + MOVQ 0(SP),AX + MULQ 104(SP) + ADDQ AX,R8 + ADCQ DX,R9 + MOVQ 0(SP),AX + MULQ 112(SP) + ADDQ AX,R10 + ADCQ DX,R11 + MOVQ 152(SP),AX + MULQ 80(SP) + ADDQ AX,R14 + ADCQ DX,R15 + MOVQ 8(SP),AX + MULQ 96(SP) + ADDQ AX,R8 + ADCQ DX,R9 + MOVQ 8(SP),AX + MULQ 104(SP) + ADDQ AX,R10 + ADCQ DX,R11 + MOVQ 8(SP),AX + MULQ 112(SP) + ADDQ AX,R12 + ADCQ DX,R13 + MOVQ $REDMASK51,DX + SHLQ $13,CX:SI + ANDQ DX,SI + SHLQ $13,R9:R8 + ANDQ DX,R8 + ADDQ CX,R8 + SHLQ $13,R11:R10 + ANDQ DX,R10 + ADDQ R9,R10 + SHLQ $13,R13:R12 + ANDQ DX,R12 + ADDQ R11,R12 + SHLQ $13,R15:R14 + ANDQ DX,R14 + ADDQ R13,R14 + IMUL3Q $19,R15,CX + ADDQ CX,SI + MOVQ SI,CX + SHRQ $51,CX + ADDQ R8,CX + MOVQ CX,R8 + SHRQ $51,CX + ANDQ DX,SI + ADDQ R10,CX + MOVQ CX,R9 + SHRQ $51,CX + ANDQ DX,R8 + ADDQ R12,CX + MOVQ CX,AX + SHRQ $51,CX + ANDQ DX,R9 + ADDQ R14,CX + MOVQ CX,R10 + SHRQ $51,CX + ANDQ DX,AX + IMUL3Q $19,CX,CX + ADDQ CX,SI + ANDQ DX,R10 + MOVQ SI,40(DI) + MOVQ R8,48(DI) + MOVQ R9,56(DI) + MOVQ AX,64(DI) + MOVQ R10,72(DI) + MOVQ 160(SP),AX + MULQ ·_121666_213(SB) + SHRQ $13,AX + MOVQ AX,SI + MOVQ DX,CX + MOVQ 168(SP),AX + MULQ ·_121666_213(SB) + SHRQ $13,AX + ADDQ AX,CX + MOVQ DX,R8 + MOVQ 176(SP),AX + MULQ ·_121666_213(SB) + SHRQ $13,AX + ADDQ AX,R8 + MOVQ DX,R9 + MOVQ 184(SP),AX + MULQ ·_121666_213(SB) + SHRQ $13,AX + ADDQ AX,R9 + MOVQ DX,R10 + MOVQ 192(SP),AX + MULQ ·_121666_213(SB) + SHRQ $13,AX + ADDQ AX,R10 + IMUL3Q $19,DX,DX + ADDQ DX,SI + ADDQ 80(SP),SI + ADDQ 88(SP),CX + ADDQ 96(SP),R8 + ADDQ 104(SP),R9 + ADDQ 112(SP),R10 + MOVQ SI,80(DI) + MOVQ CX,88(DI) + MOVQ R8,96(DI) + MOVQ R9,104(DI) + MOVQ R10,112(DI) + MOVQ 104(DI),SI + IMUL3Q $19,SI,AX + MOVQ AX,0(SP) + MULQ 176(SP) + MOVQ AX,SI + MOVQ DX,CX + MOVQ 112(DI),DX + IMUL3Q $19,DX,AX + MOVQ AX,8(SP) + MULQ 168(SP) + ADDQ AX,SI + ADCQ DX,CX + MOVQ 80(DI),AX + MULQ 160(SP) + ADDQ AX,SI + ADCQ DX,CX + MOVQ 80(DI),AX + MULQ 168(SP) + MOVQ AX,R8 + MOVQ DX,R9 + MOVQ 80(DI),AX + MULQ 176(SP) + MOVQ AX,R10 + MOVQ DX,R11 + MOVQ 80(DI),AX + MULQ 184(SP) + MOVQ AX,R12 + MOVQ DX,R13 + MOVQ 80(DI),AX + MULQ 192(SP) + MOVQ AX,R14 + MOVQ DX,R15 + MOVQ 88(DI),AX + MULQ 160(SP) + ADDQ AX,R8 + ADCQ DX,R9 + MOVQ 88(DI),AX + MULQ 168(SP) + ADDQ AX,R10 + ADCQ DX,R11 + MOVQ 88(DI),AX + MULQ 176(SP) + ADDQ AX,R12 + ADCQ DX,R13 + MOVQ 88(DI),AX + MULQ 184(SP) + ADDQ AX,R14 + ADCQ DX,R15 + MOVQ 88(DI),DX + IMUL3Q $19,DX,AX + MULQ 192(SP) + ADDQ AX,SI + ADCQ DX,CX + MOVQ 96(DI),AX + MULQ 160(SP) + ADDQ AX,R10 + ADCQ DX,R11 + MOVQ 96(DI),AX + MULQ 168(SP) + ADDQ AX,R12 + ADCQ DX,R13 + MOVQ 96(DI),AX + MULQ 176(SP) + ADDQ AX,R14 + ADCQ DX,R15 + MOVQ 96(DI),DX + IMUL3Q $19,DX,AX + MULQ 184(SP) + ADDQ AX,SI + ADCQ DX,CX + MOVQ 96(DI),DX + IMUL3Q $19,DX,AX + MULQ 192(SP) + ADDQ AX,R8 + ADCQ DX,R9 + MOVQ 104(DI),AX + MULQ 160(SP) + ADDQ AX,R12 + ADCQ DX,R13 + MOVQ 104(DI),AX + MULQ 168(SP) + ADDQ AX,R14 + ADCQ DX,R15 + MOVQ 0(SP),AX + MULQ 184(SP) + ADDQ AX,R8 + ADCQ DX,R9 + MOVQ 0(SP),AX + MULQ 192(SP) + ADDQ AX,R10 + ADCQ DX,R11 + MOVQ 112(DI),AX + MULQ 160(SP) + ADDQ AX,R14 + ADCQ DX,R15 + MOVQ 8(SP),AX + MULQ 176(SP) + ADDQ AX,R8 + ADCQ DX,R9 + MOVQ 8(SP),AX + MULQ 184(SP) + ADDQ AX,R10 + ADCQ DX,R11 + MOVQ 8(SP),AX + MULQ 192(SP) + ADDQ AX,R12 + ADCQ DX,R13 + MOVQ $REDMASK51,DX + SHLQ $13,CX:SI + ANDQ DX,SI + SHLQ $13,R9:R8 + ANDQ DX,R8 + ADDQ CX,R8 + SHLQ $13,R11:R10 + ANDQ DX,R10 + ADDQ R9,R10 + SHLQ $13,R13:R12 + ANDQ DX,R12 + ADDQ R11,R12 + SHLQ $13,R15:R14 + ANDQ DX,R14 + ADDQ R13,R14 + IMUL3Q $19,R15,CX + ADDQ CX,SI + MOVQ SI,CX + SHRQ $51,CX + ADDQ R8,CX + MOVQ CX,R8 + SHRQ $51,CX + ANDQ DX,SI + ADDQ R10,CX + MOVQ CX,R9 + SHRQ $51,CX + ANDQ DX,R8 + ADDQ R12,CX + MOVQ CX,AX + SHRQ $51,CX + ANDQ DX,R9 + ADDQ R14,CX + MOVQ CX,R10 + SHRQ $51,CX + ANDQ DX,AX + IMUL3Q $19,CX,CX + ADDQ CX,SI + ANDQ DX,R10 + MOVQ SI,80(DI) + MOVQ R8,88(DI) + MOVQ R9,96(DI) + MOVQ AX,104(DI) + MOVQ R10,112(DI) + RET diff --git a/vendor/golang.org/x/crypto/curve25519/mont25519_amd64.go b/vendor/golang.org/x/crypto/curve25519/mont25519_amd64.go new file mode 100644 index 0000000000..5822bd5338 --- /dev/null +++ b/vendor/golang.org/x/crypto/curve25519/mont25519_amd64.go @@ -0,0 +1,240 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build amd64,!gccgo,!appengine + +package curve25519 + +// These functions are implemented in the .s files. The names of the functions +// in the rest of the file are also taken from the SUPERCOP sources to help +// people following along. + +//go:noescape + +func cswap(inout *[5]uint64, v uint64) + +//go:noescape + +func ladderstep(inout *[5][5]uint64) + +//go:noescape + +func freeze(inout *[5]uint64) + +//go:noescape + +func mul(dest, a, b *[5]uint64) + +//go:noescape + +func square(out, in *[5]uint64) + +// mladder uses a Montgomery ladder to calculate (xr/zr) *= s. +func mladder(xr, zr *[5]uint64, s *[32]byte) { + var work [5][5]uint64 + + work[0] = *xr + setint(&work[1], 1) + setint(&work[2], 0) + work[3] = *xr + setint(&work[4], 1) + + j := uint(6) + var prevbit byte + + for i := 31; i >= 0; i-- { + for j < 8 { + bit := ((*s)[i] >> j) & 1 + swap := bit ^ prevbit + prevbit = bit + cswap(&work[1], uint64(swap)) + ladderstep(&work) + j-- + } + j = 7 + } + + *xr = work[1] + *zr = work[2] +} + +func scalarMult(out, in, base *[32]byte) { + var e [32]byte + copy(e[:], (*in)[:]) + e[0] &= 248 + e[31] &= 127 + e[31] |= 64 + + var t, z [5]uint64 + unpack(&t, base) + mladder(&t, &z, &e) + invert(&z, &z) + mul(&t, &t, &z) + pack(out, &t) +} + +func setint(r *[5]uint64, v uint64) { + r[0] = v + r[1] = 0 + r[2] = 0 + r[3] = 0 + r[4] = 0 +} + +// unpack sets r = x where r consists of 5, 51-bit limbs in little-endian +// order. +func unpack(r *[5]uint64, x *[32]byte) { + r[0] = uint64(x[0]) | + uint64(x[1])<<8 | + uint64(x[2])<<16 | + uint64(x[3])<<24 | + uint64(x[4])<<32 | + uint64(x[5])<<40 | + uint64(x[6]&7)<<48 + + r[1] = uint64(x[6])>>3 | + uint64(x[7])<<5 | + uint64(x[8])<<13 | + uint64(x[9])<<21 | + uint64(x[10])<<29 | + uint64(x[11])<<37 | + uint64(x[12]&63)<<45 + + r[2] = uint64(x[12])>>6 | + uint64(x[13])<<2 | + uint64(x[14])<<10 | + uint64(x[15])<<18 | + uint64(x[16])<<26 | + uint64(x[17])<<34 | + uint64(x[18])<<42 | + uint64(x[19]&1)<<50 + + r[3] = uint64(x[19])>>1 | + uint64(x[20])<<7 | + uint64(x[21])<<15 | + uint64(x[22])<<23 | + uint64(x[23])<<31 | + uint64(x[24])<<39 | + uint64(x[25]&15)<<47 + + r[4] = uint64(x[25])>>4 | + uint64(x[26])<<4 | + uint64(x[27])<<12 | + uint64(x[28])<<20 | + uint64(x[29])<<28 | + uint64(x[30])<<36 | + uint64(x[31]&127)<<44 +} + +// pack sets out = x where out is the usual, little-endian form of the 5, +// 51-bit limbs in x. +func pack(out *[32]byte, x *[5]uint64) { + t := *x + freeze(&t) + + out[0] = byte(t[0]) + out[1] = byte(t[0] >> 8) + out[2] = byte(t[0] >> 16) + out[3] = byte(t[0] >> 24) + out[4] = byte(t[0] >> 32) + out[5] = byte(t[0] >> 40) + out[6] = byte(t[0] >> 48) + + out[6] ^= byte(t[1]<<3) & 0xf8 + out[7] = byte(t[1] >> 5) + out[8] = byte(t[1] >> 13) + out[9] = byte(t[1] >> 21) + out[10] = byte(t[1] >> 29) + out[11] = byte(t[1] >> 37) + out[12] = byte(t[1] >> 45) + + out[12] ^= byte(t[2]<<6) & 0xc0 + out[13] = byte(t[2] >> 2) + out[14] = byte(t[2] >> 10) + out[15] = byte(t[2] >> 18) + out[16] = byte(t[2] >> 26) + out[17] = byte(t[2] >> 34) + out[18] = byte(t[2] >> 42) + out[19] = byte(t[2] >> 50) + + out[19] ^= byte(t[3]<<1) & 0xfe + out[20] = byte(t[3] >> 7) + out[21] = byte(t[3] >> 15) + out[22] = byte(t[3] >> 23) + out[23] = byte(t[3] >> 31) + out[24] = byte(t[3] >> 39) + out[25] = byte(t[3] >> 47) + + out[25] ^= byte(t[4]<<4) & 0xf0 + out[26] = byte(t[4] >> 4) + out[27] = byte(t[4] >> 12) + out[28] = byte(t[4] >> 20) + out[29] = byte(t[4] >> 28) + out[30] = byte(t[4] >> 36) + out[31] = byte(t[4] >> 44) +} + +// invert calculates r = x^-1 mod p using Fermat's little theorem. +func invert(r *[5]uint64, x *[5]uint64) { + var z2, z9, z11, z2_5_0, z2_10_0, z2_20_0, z2_50_0, z2_100_0, t [5]uint64 + + square(&z2, x) /* 2 */ + square(&t, &z2) /* 4 */ + square(&t, &t) /* 8 */ + mul(&z9, &t, x) /* 9 */ + mul(&z11, &z9, &z2) /* 11 */ + square(&t, &z11) /* 22 */ + mul(&z2_5_0, &t, &z9) /* 2^5 - 2^0 = 31 */ + + square(&t, &z2_5_0) /* 2^6 - 2^1 */ + for i := 1; i < 5; i++ { /* 2^20 - 2^10 */ + square(&t, &t) + } + mul(&z2_10_0, &t, &z2_5_0) /* 2^10 - 2^0 */ + + square(&t, &z2_10_0) /* 2^11 - 2^1 */ + for i := 1; i < 10; i++ { /* 2^20 - 2^10 */ + square(&t, &t) + } + mul(&z2_20_0, &t, &z2_10_0) /* 2^20 - 2^0 */ + + square(&t, &z2_20_0) /* 2^21 - 2^1 */ + for i := 1; i < 20; i++ { /* 2^40 - 2^20 */ + square(&t, &t) + } + mul(&t, &t, &z2_20_0) /* 2^40 - 2^0 */ + + square(&t, &t) /* 2^41 - 2^1 */ + for i := 1; i < 10; i++ { /* 2^50 - 2^10 */ + square(&t, &t) + } + mul(&z2_50_0, &t, &z2_10_0) /* 2^50 - 2^0 */ + + square(&t, &z2_50_0) /* 2^51 - 2^1 */ + for i := 1; i < 50; i++ { /* 2^100 - 2^50 */ + square(&t, &t) + } + mul(&z2_100_0, &t, &z2_50_0) /* 2^100 - 2^0 */ + + square(&t, &z2_100_0) /* 2^101 - 2^1 */ + for i := 1; i < 100; i++ { /* 2^200 - 2^100 */ + square(&t, &t) + } + mul(&t, &t, &z2_100_0) /* 2^200 - 2^0 */ + + square(&t, &t) /* 2^201 - 2^1 */ + for i := 1; i < 50; i++ { /* 2^250 - 2^50 */ + square(&t, &t) + } + mul(&t, &t, &z2_50_0) /* 2^250 - 2^0 */ + + square(&t, &t) /* 2^251 - 2^1 */ + square(&t, &t) /* 2^252 - 2^2 */ + square(&t, &t) /* 2^253 - 2^3 */ + + square(&t, &t) /* 2^254 - 2^4 */ + + square(&t, &t) /* 2^255 - 2^5 */ + mul(r, &t, &z11) /* 2^255 - 21 */ +} diff --git a/vendor/golang.org/x/crypto/curve25519/mul_amd64.s b/vendor/golang.org/x/crypto/curve25519/mul_amd64.s new file mode 100644 index 0000000000..b162e65159 --- /dev/null +++ b/vendor/golang.org/x/crypto/curve25519/mul_amd64.s @@ -0,0 +1,169 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// This code was translated into a form compatible with 6a from the public +// domain sources in SUPERCOP: http://bench.cr.yp.to/supercop.html + +// +build amd64,!gccgo,!appengine + +#include "const_amd64.h" + +// func mul(dest, a, b *[5]uint64) +TEXT ·mul(SB),0,$16-24 + MOVQ dest+0(FP), DI + MOVQ a+8(FP), SI + MOVQ b+16(FP), DX + + MOVQ DX,CX + MOVQ 24(SI),DX + IMUL3Q $19,DX,AX + MOVQ AX,0(SP) + MULQ 16(CX) + MOVQ AX,R8 + MOVQ DX,R9 + MOVQ 32(SI),DX + IMUL3Q $19,DX,AX + MOVQ AX,8(SP) + MULQ 8(CX) + ADDQ AX,R8 + ADCQ DX,R9 + MOVQ 0(SI),AX + MULQ 0(CX) + ADDQ AX,R8 + ADCQ DX,R9 + MOVQ 0(SI),AX + MULQ 8(CX) + MOVQ AX,R10 + MOVQ DX,R11 + MOVQ 0(SI),AX + MULQ 16(CX) + MOVQ AX,R12 + MOVQ DX,R13 + MOVQ 0(SI),AX + MULQ 24(CX) + MOVQ AX,R14 + MOVQ DX,R15 + MOVQ 0(SI),AX + MULQ 32(CX) + MOVQ AX,BX + MOVQ DX,BP + MOVQ 8(SI),AX + MULQ 0(CX) + ADDQ AX,R10 + ADCQ DX,R11 + MOVQ 8(SI),AX + MULQ 8(CX) + ADDQ AX,R12 + ADCQ DX,R13 + MOVQ 8(SI),AX + MULQ 16(CX) + ADDQ AX,R14 + ADCQ DX,R15 + MOVQ 8(SI),AX + MULQ 24(CX) + ADDQ AX,BX + ADCQ DX,BP + MOVQ 8(SI),DX + IMUL3Q $19,DX,AX + MULQ 32(CX) + ADDQ AX,R8 + ADCQ DX,R9 + MOVQ 16(SI),AX + MULQ 0(CX) + ADDQ AX,R12 + ADCQ DX,R13 + MOVQ 16(SI),AX + MULQ 8(CX) + ADDQ AX,R14 + ADCQ DX,R15 + MOVQ 16(SI),AX + MULQ 16(CX) + ADDQ AX,BX + ADCQ DX,BP + MOVQ 16(SI),DX + IMUL3Q $19,DX,AX + MULQ 24(CX) + ADDQ AX,R8 + ADCQ DX,R9 + MOVQ 16(SI),DX + IMUL3Q $19,DX,AX + MULQ 32(CX) + ADDQ AX,R10 + ADCQ DX,R11 + MOVQ 24(SI),AX + MULQ 0(CX) + ADDQ AX,R14 + ADCQ DX,R15 + MOVQ 24(SI),AX + MULQ 8(CX) + ADDQ AX,BX + ADCQ DX,BP + MOVQ 0(SP),AX + MULQ 24(CX) + ADDQ AX,R10 + ADCQ DX,R11 + MOVQ 0(SP),AX + MULQ 32(CX) + ADDQ AX,R12 + ADCQ DX,R13 + MOVQ 32(SI),AX + MULQ 0(CX) + ADDQ AX,BX + ADCQ DX,BP + MOVQ 8(SP),AX + MULQ 16(CX) + ADDQ AX,R10 + ADCQ DX,R11 + MOVQ 8(SP),AX + MULQ 24(CX) + ADDQ AX,R12 + ADCQ DX,R13 + MOVQ 8(SP),AX + MULQ 32(CX) + ADDQ AX,R14 + ADCQ DX,R15 + MOVQ $REDMASK51,SI + SHLQ $13,R9:R8 + ANDQ SI,R8 + SHLQ $13,R11:R10 + ANDQ SI,R10 + ADDQ R9,R10 + SHLQ $13,R13:R12 + ANDQ SI,R12 + ADDQ R11,R12 + SHLQ $13,R15:R14 + ANDQ SI,R14 + ADDQ R13,R14 + SHLQ $13,BP:BX + ANDQ SI,BX + ADDQ R15,BX + IMUL3Q $19,BP,DX + ADDQ DX,R8 + MOVQ R8,DX + SHRQ $51,DX + ADDQ R10,DX + MOVQ DX,CX + SHRQ $51,DX + ANDQ SI,R8 + ADDQ R12,DX + MOVQ DX,R9 + SHRQ $51,DX + ANDQ SI,CX + ADDQ R14,DX + MOVQ DX,AX + SHRQ $51,DX + ANDQ SI,R9 + ADDQ BX,DX + MOVQ DX,R10 + SHRQ $51,DX + ANDQ SI,AX + IMUL3Q $19,DX,DX + ADDQ DX,R8 + ANDQ SI,R10 + MOVQ R8,0(DI) + MOVQ CX,8(DI) + MOVQ R9,16(DI) + MOVQ AX,24(DI) + MOVQ R10,32(DI) + RET diff --git a/vendor/golang.org/x/crypto/curve25519/square_amd64.s b/vendor/golang.org/x/crypto/curve25519/square_amd64.s new file mode 100644 index 0000000000..4e864a83ef --- /dev/null +++ b/vendor/golang.org/x/crypto/curve25519/square_amd64.s @@ -0,0 +1,132 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// This code was translated into a form compatible with 6a from the public +// domain sources in SUPERCOP: http://bench.cr.yp.to/supercop.html + +// +build amd64,!gccgo,!appengine + +#include "const_amd64.h" + +// func square(out, in *[5]uint64) +TEXT ·square(SB),7,$0-16 + MOVQ out+0(FP), DI + MOVQ in+8(FP), SI + + MOVQ 0(SI),AX + MULQ 0(SI) + MOVQ AX,CX + MOVQ DX,R8 + MOVQ 0(SI),AX + SHLQ $1,AX + MULQ 8(SI) + MOVQ AX,R9 + MOVQ DX,R10 + MOVQ 0(SI),AX + SHLQ $1,AX + MULQ 16(SI) + MOVQ AX,R11 + MOVQ DX,R12 + MOVQ 0(SI),AX + SHLQ $1,AX + MULQ 24(SI) + MOVQ AX,R13 + MOVQ DX,R14 + MOVQ 0(SI),AX + SHLQ $1,AX + MULQ 32(SI) + MOVQ AX,R15 + MOVQ DX,BX + MOVQ 8(SI),AX + MULQ 8(SI) + ADDQ AX,R11 + ADCQ DX,R12 + MOVQ 8(SI),AX + SHLQ $1,AX + MULQ 16(SI) + ADDQ AX,R13 + ADCQ DX,R14 + MOVQ 8(SI),AX + SHLQ $1,AX + MULQ 24(SI) + ADDQ AX,R15 + ADCQ DX,BX + MOVQ 8(SI),DX + IMUL3Q $38,DX,AX + MULQ 32(SI) + ADDQ AX,CX + ADCQ DX,R8 + MOVQ 16(SI),AX + MULQ 16(SI) + ADDQ AX,R15 + ADCQ DX,BX + MOVQ 16(SI),DX + IMUL3Q $38,DX,AX + MULQ 24(SI) + ADDQ AX,CX + ADCQ DX,R8 + MOVQ 16(SI),DX + IMUL3Q $38,DX,AX + MULQ 32(SI) + ADDQ AX,R9 + ADCQ DX,R10 + MOVQ 24(SI),DX + IMUL3Q $19,DX,AX + MULQ 24(SI) + ADDQ AX,R9 + ADCQ DX,R10 + MOVQ 24(SI),DX + IMUL3Q $38,DX,AX + MULQ 32(SI) + ADDQ AX,R11 + ADCQ DX,R12 + MOVQ 32(SI),DX + IMUL3Q $19,DX,AX + MULQ 32(SI) + ADDQ AX,R13 + ADCQ DX,R14 + MOVQ $REDMASK51,SI + SHLQ $13,R8:CX + ANDQ SI,CX + SHLQ $13,R10:R9 + ANDQ SI,R9 + ADDQ R8,R9 + SHLQ $13,R12:R11 + ANDQ SI,R11 + ADDQ R10,R11 + SHLQ $13,R14:R13 + ANDQ SI,R13 + ADDQ R12,R13 + SHLQ $13,BX:R15 + ANDQ SI,R15 + ADDQ R14,R15 + IMUL3Q $19,BX,DX + ADDQ DX,CX + MOVQ CX,DX + SHRQ $51,DX + ADDQ R9,DX + ANDQ SI,CX + MOVQ DX,R8 + SHRQ $51,DX + ADDQ R11,DX + ANDQ SI,R8 + MOVQ DX,R9 + SHRQ $51,DX + ADDQ R13,DX + ANDQ SI,R9 + MOVQ DX,AX + SHRQ $51,DX + ADDQ R15,DX + ANDQ SI,AX + MOVQ DX,R10 + SHRQ $51,DX + IMUL3Q $19,DX,DX + ADDQ DX,CX + ANDQ SI,R10 + MOVQ CX,0(DI) + MOVQ R8,8(DI) + MOVQ R9,16(DI) + MOVQ AX,24(DI) + MOVQ R10,32(DI) + RET diff --git a/vendor/golang.org/x/crypto/ed25519/ed25519.go b/vendor/golang.org/x/crypto/ed25519/ed25519.go new file mode 100644 index 0000000000..f1d95674ac --- /dev/null +++ b/vendor/golang.org/x/crypto/ed25519/ed25519.go @@ -0,0 +1,181 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package ed25519 implements the Ed25519 signature algorithm. See +// http://ed25519.cr.yp.to/. +// +// These functions are also compatible with the “Ed25519” function defined in +// https://tools.ietf.org/html/draft-irtf-cfrg-eddsa-05. +package ed25519 + +// This code is a port of the public domain, “ref10” implementation of ed25519 +// from SUPERCOP. + +import ( + "crypto" + cryptorand "crypto/rand" + "crypto/sha512" + "crypto/subtle" + "errors" + "io" + "strconv" + + "golang.org/x/crypto/ed25519/internal/edwards25519" +) + +const ( + // PublicKeySize is the size, in bytes, of public keys as used in this package. + PublicKeySize = 32 + // PrivateKeySize is the size, in bytes, of private keys as used in this package. + PrivateKeySize = 64 + // SignatureSize is the size, in bytes, of signatures generated and verified by this package. + SignatureSize = 64 +) + +// PublicKey is the type of Ed25519 public keys. +type PublicKey []byte + +// PrivateKey is the type of Ed25519 private keys. It implements crypto.Signer. +type PrivateKey []byte + +// Public returns the PublicKey corresponding to priv. +func (priv PrivateKey) Public() crypto.PublicKey { + publicKey := make([]byte, PublicKeySize) + copy(publicKey, priv[32:]) + return PublicKey(publicKey) +} + +// Sign signs the given message with priv. +// Ed25519 performs two passes over messages to be signed and therefore cannot +// handle pre-hashed messages. Thus opts.HashFunc() must return zero to +// indicate the message hasn't been hashed. This can be achieved by passing +// crypto.Hash(0) as the value for opts. +func (priv PrivateKey) Sign(rand io.Reader, message []byte, opts crypto.SignerOpts) (signature []byte, err error) { + if opts.HashFunc() != crypto.Hash(0) { + return nil, errors.New("ed25519: cannot sign hashed message") + } + + return Sign(priv, message), nil +} + +// GenerateKey generates a public/private key pair using entropy from rand. +// If rand is nil, crypto/rand.Reader will be used. +func GenerateKey(rand io.Reader) (publicKey PublicKey, privateKey PrivateKey, err error) { + if rand == nil { + rand = cryptorand.Reader + } + + privateKey = make([]byte, PrivateKeySize) + publicKey = make([]byte, PublicKeySize) + _, err = io.ReadFull(rand, privateKey[:32]) + if err != nil { + return nil, nil, err + } + + digest := sha512.Sum512(privateKey[:32]) + digest[0] &= 248 + digest[31] &= 127 + digest[31] |= 64 + + var A edwards25519.ExtendedGroupElement + var hBytes [32]byte + copy(hBytes[:], digest[:]) + edwards25519.GeScalarMultBase(&A, &hBytes) + var publicKeyBytes [32]byte + A.ToBytes(&publicKeyBytes) + + copy(privateKey[32:], publicKeyBytes[:]) + copy(publicKey, publicKeyBytes[:]) + + return publicKey, privateKey, nil +} + +// Sign signs the message with privateKey and returns a signature. It will +// panic if len(privateKey) is not PrivateKeySize. +func Sign(privateKey PrivateKey, message []byte) []byte { + if l := len(privateKey); l != PrivateKeySize { + panic("ed25519: bad private key length: " + strconv.Itoa(l)) + } + + h := sha512.New() + h.Write(privateKey[:32]) + + var digest1, messageDigest, hramDigest [64]byte + var expandedSecretKey [32]byte + h.Sum(digest1[:0]) + copy(expandedSecretKey[:], digest1[:]) + expandedSecretKey[0] &= 248 + expandedSecretKey[31] &= 63 + expandedSecretKey[31] |= 64 + + h.Reset() + h.Write(digest1[32:]) + h.Write(message) + h.Sum(messageDigest[:0]) + + var messageDigestReduced [32]byte + edwards25519.ScReduce(&messageDigestReduced, &messageDigest) + var R edwards25519.ExtendedGroupElement + edwards25519.GeScalarMultBase(&R, &messageDigestReduced) + + var encodedR [32]byte + R.ToBytes(&encodedR) + + h.Reset() + h.Write(encodedR[:]) + h.Write(privateKey[32:]) + h.Write(message) + h.Sum(hramDigest[:0]) + var hramDigestReduced [32]byte + edwards25519.ScReduce(&hramDigestReduced, &hramDigest) + + var s [32]byte + edwards25519.ScMulAdd(&s, &hramDigestReduced, &expandedSecretKey, &messageDigestReduced) + + signature := make([]byte, SignatureSize) + copy(signature[:], encodedR[:]) + copy(signature[32:], s[:]) + + return signature +} + +// Verify reports whether sig is a valid signature of message by publicKey. It +// will panic if len(publicKey) is not PublicKeySize. +func Verify(publicKey PublicKey, message, sig []byte) bool { + if l := len(publicKey); l != PublicKeySize { + panic("ed25519: bad public key length: " + strconv.Itoa(l)) + } + + if len(sig) != SignatureSize || sig[63]&224 != 0 { + return false + } + + var A edwards25519.ExtendedGroupElement + var publicKeyBytes [32]byte + copy(publicKeyBytes[:], publicKey) + if !A.FromBytes(&publicKeyBytes) { + return false + } + edwards25519.FeNeg(&A.X, &A.X) + edwards25519.FeNeg(&A.T, &A.T) + + h := sha512.New() + h.Write(sig[:32]) + h.Write(publicKey[:]) + h.Write(message) + var digest [64]byte + h.Sum(digest[:0]) + + var hReduced [32]byte + edwards25519.ScReduce(&hReduced, &digest) + + var R edwards25519.ProjectiveGroupElement + var b [32]byte + copy(b[:], sig[32:]) + edwards25519.GeDoubleScalarMultVartime(&R, &hReduced, &A, &b) + + var checkR [32]byte + R.ToBytes(&checkR) + return subtle.ConstantTimeCompare(sig[:32], checkR[:]) == 1 +} diff --git a/vendor/golang.org/x/crypto/ed25519/internal/edwards25519/const.go b/vendor/golang.org/x/crypto/ed25519/internal/edwards25519/const.go new file mode 100644 index 0000000000..e39f086c1d --- /dev/null +++ b/vendor/golang.org/x/crypto/ed25519/internal/edwards25519/const.go @@ -0,0 +1,1422 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package edwards25519 + +// These values are from the public domain, “ref10” implementation of ed25519 +// from SUPERCOP. + +// d is a constant in the Edwards curve equation. +var d = FieldElement{ + -10913610, 13857413, -15372611, 6949391, 114729, -8787816, -6275908, -3247719, -18696448, -12055116, +} + +// d2 is 2*d. +var d2 = FieldElement{ + -21827239, -5839606, -30745221, 13898782, 229458, 15978800, -12551817, -6495438, 29715968, 9444199, +} + +// SqrtM1 is the square-root of -1 in the field. +var SqrtM1 = FieldElement{ + -32595792, -7943725, 9377950, 3500415, 12389472, -272473, -25146209, -2005654, 326686, 11406482, +} + +// A is a constant in the Montgomery-form of curve25519. +var A = FieldElement{ + 486662, 0, 0, 0, 0, 0, 0, 0, 0, 0, +} + +// bi contains precomputed multiples of the base-point. See the Ed25519 paper +// for a discussion about how these values are used. +var bi = [8]PreComputedGroupElement{ + { + FieldElement{25967493, -14356035, 29566456, 3660896, -12694345, 4014787, 27544626, -11754271, -6079156, 2047605}, + FieldElement{-12545711, 934262, -2722910, 3049990, -727428, 9406986, 12720692, 5043384, 19500929, -15469378}, + FieldElement{-8738181, 4489570, 9688441, -14785194, 10184609, -12363380, 29287919, 11864899, -24514362, -4438546}, + }, + { + FieldElement{15636291, -9688557, 24204773, -7912398, 616977, -16685262, 27787600, -14772189, 28944400, -1550024}, + FieldElement{16568933, 4717097, -11556148, -1102322, 15682896, -11807043, 16354577, -11775962, 7689662, 11199574}, + FieldElement{30464156, -5976125, -11779434, -15670865, 23220365, 15915852, 7512774, 10017326, -17749093, -9920357}, + }, + { + FieldElement{10861363, 11473154, 27284546, 1981175, -30064349, 12577861, 32867885, 14515107, -15438304, 10819380}, + FieldElement{4708026, 6336745, 20377586, 9066809, -11272109, 6594696, -25653668, 12483688, -12668491, 5581306}, + FieldElement{19563160, 16186464, -29386857, 4097519, 10237984, -4348115, 28542350, 13850243, -23678021, -15815942}, + }, + { + FieldElement{5153746, 9909285, 1723747, -2777874, 30523605, 5516873, 19480852, 5230134, -23952439, -15175766}, + FieldElement{-30269007, -3463509, 7665486, 10083793, 28475525, 1649722, 20654025, 16520125, 30598449, 7715701}, + FieldElement{28881845, 14381568, 9657904, 3680757, -20181635, 7843316, -31400660, 1370708, 29794553, -1409300}, + }, + { + FieldElement{-22518993, -6692182, 14201702, -8745502, -23510406, 8844726, 18474211, -1361450, -13062696, 13821877}, + FieldElement{-6455177, -7839871, 3374702, -4740862, -27098617, -10571707, 31655028, -7212327, 18853322, -14220951}, + FieldElement{4566830, -12963868, -28974889, -12240689, -7602672, -2830569, -8514358, -10431137, 2207753, -3209784}, + }, + { + FieldElement{-25154831, -4185821, 29681144, 7868801, -6854661, -9423865, -12437364, -663000, -31111463, -16132436}, + FieldElement{25576264, -2703214, 7349804, -11814844, 16472782, 9300885, 3844789, 15725684, 171356, 6466918}, + FieldElement{23103977, 13316479, 9739013, -16149481, 817875, -15038942, 8965339, -14088058, -30714912, 16193877}, + }, + { + FieldElement{-33521811, 3180713, -2394130, 14003687, -16903474, -16270840, 17238398, 4729455, -18074513, 9256800}, + FieldElement{-25182317, -4174131, 32336398, 5036987, -21236817, 11360617, 22616405, 9761698, -19827198, 630305}, + FieldElement{-13720693, 2639453, -24237460, -7406481, 9494427, -5774029, -6554551, -15960994, -2449256, -14291300}, + }, + { + FieldElement{-3151181, -5046075, 9282714, 6866145, -31907062, -863023, -18940575, 15033784, 25105118, -7894876}, + FieldElement{-24326370, 15950226, -31801215, -14592823, -11662737, -5090925, 1573892, -2625887, 2198790, -15804619}, + FieldElement{-3099351, 10324967, -2241613, 7453183, -5446979, -2735503, -13812022, -16236442, -32461234, -12290683}, + }, +} + +// base contains precomputed multiples of the base-point. See the Ed25519 paper +// for a discussion about how these values are used. +var base = [32][8]PreComputedGroupElement{ + { + { + FieldElement{25967493, -14356035, 29566456, 3660896, -12694345, 4014787, 27544626, -11754271, -6079156, 2047605}, + FieldElement{-12545711, 934262, -2722910, 3049990, -727428, 9406986, 12720692, 5043384, 19500929, -15469378}, + FieldElement{-8738181, 4489570, 9688441, -14785194, 10184609, -12363380, 29287919, 11864899, -24514362, -4438546}, + }, + { + FieldElement{-12815894, -12976347, -21581243, 11784320, -25355658, -2750717, -11717903, -3814571, -358445, -10211303}, + FieldElement{-21703237, 6903825, 27185491, 6451973, -29577724, -9554005, -15616551, 11189268, -26829678, -5319081}, + FieldElement{26966642, 11152617, 32442495, 15396054, 14353839, -12752335, -3128826, -9541118, -15472047, -4166697}, + }, + { + FieldElement{15636291, -9688557, 24204773, -7912398, 616977, -16685262, 27787600, -14772189, 28944400, -1550024}, + FieldElement{16568933, 4717097, -11556148, -1102322, 15682896, -11807043, 16354577, -11775962, 7689662, 11199574}, + FieldElement{30464156, -5976125, -11779434, -15670865, 23220365, 15915852, 7512774, 10017326, -17749093, -9920357}, + }, + { + FieldElement{-17036878, 13921892, 10945806, -6033431, 27105052, -16084379, -28926210, 15006023, 3284568, -6276540}, + FieldElement{23599295, -8306047, -11193664, -7687416, 13236774, 10506355, 7464579, 9656445, 13059162, 10374397}, + FieldElement{7798556, 16710257, 3033922, 2874086, 28997861, 2835604, 32406664, -3839045, -641708, -101325}, + }, + { + FieldElement{10861363, 11473154, 27284546, 1981175, -30064349, 12577861, 32867885, 14515107, -15438304, 10819380}, + FieldElement{4708026, 6336745, 20377586, 9066809, -11272109, 6594696, -25653668, 12483688, -12668491, 5581306}, + FieldElement{19563160, 16186464, -29386857, 4097519, 10237984, -4348115, 28542350, 13850243, -23678021, -15815942}, + }, + { + FieldElement{-15371964, -12862754, 32573250, 4720197, -26436522, 5875511, -19188627, -15224819, -9818940, -12085777}, + FieldElement{-8549212, 109983, 15149363, 2178705, 22900618, 4543417, 3044240, -15689887, 1762328, 14866737}, + FieldElement{-18199695, -15951423, -10473290, 1707278, -17185920, 3916101, -28236412, 3959421, 27914454, 4383652}, + }, + { + FieldElement{5153746, 9909285, 1723747, -2777874, 30523605, 5516873, 19480852, 5230134, -23952439, -15175766}, + FieldElement{-30269007, -3463509, 7665486, 10083793, 28475525, 1649722, 20654025, 16520125, 30598449, 7715701}, + FieldElement{28881845, 14381568, 9657904, 3680757, -20181635, 7843316, -31400660, 1370708, 29794553, -1409300}, + }, + { + FieldElement{14499471, -2729599, -33191113, -4254652, 28494862, 14271267, 30290735, 10876454, -33154098, 2381726}, + FieldElement{-7195431, -2655363, -14730155, 462251, -27724326, 3941372, -6236617, 3696005, -32300832, 15351955}, + FieldElement{27431194, 8222322, 16448760, -3907995, -18707002, 11938355, -32961401, -2970515, 29551813, 10109425}, + }, + }, + { + { + FieldElement{-13657040, -13155431, -31283750, 11777098, 21447386, 6519384, -2378284, -1627556, 10092783, -4764171}, + FieldElement{27939166, 14210322, 4677035, 16277044, -22964462, -12398139, -32508754, 12005538, -17810127, 12803510}, + FieldElement{17228999, -15661624, -1233527, 300140, -1224870, -11714777, 30364213, -9038194, 18016357, 4397660}, + }, + { + FieldElement{-10958843, -7690207, 4776341, -14954238, 27850028, -15602212, -26619106, 14544525, -17477504, 982639}, + FieldElement{29253598, 15796703, -2863982, -9908884, 10057023, 3163536, 7332899, -4120128, -21047696, 9934963}, + FieldElement{5793303, 16271923, -24131614, -10116404, 29188560, 1206517, -14747930, 4559895, -30123922, -10897950}, + }, + { + FieldElement{-27643952, -11493006, 16282657, -11036493, 28414021, -15012264, 24191034, 4541697, -13338309, 5500568}, + FieldElement{12650548, -1497113, 9052871, 11355358, -17680037, -8400164, -17430592, 12264343, 10874051, 13524335}, + FieldElement{25556948, -3045990, 714651, 2510400, 23394682, -10415330, 33119038, 5080568, -22528059, 5376628}, + }, + { + FieldElement{-26088264, -4011052, -17013699, -3537628, -6726793, 1920897, -22321305, -9447443, 4535768, 1569007}, + FieldElement{-2255422, 14606630, -21692440, -8039818, 28430649, 8775819, -30494562, 3044290, 31848280, 12543772}, + FieldElement{-22028579, 2943893, -31857513, 6777306, 13784462, -4292203, -27377195, -2062731, 7718482, 14474653}, + }, + { + FieldElement{2385315, 2454213, -22631320, 46603, -4437935, -15680415, 656965, -7236665, 24316168, -5253567}, + FieldElement{13741529, 10911568, -33233417, -8603737, -20177830, -1033297, 33040651, -13424532, -20729456, 8321686}, + FieldElement{21060490, -2212744, 15712757, -4336099, 1639040, 10656336, 23845965, -11874838, -9984458, 608372}, + }, + { + FieldElement{-13672732, -15087586, -10889693, -7557059, -6036909, 11305547, 1123968, -6780577, 27229399, 23887}, + FieldElement{-23244140, -294205, -11744728, 14712571, -29465699, -2029617, 12797024, -6440308, -1633405, 16678954}, + FieldElement{-29500620, 4770662, -16054387, 14001338, 7830047, 9564805, -1508144, -4795045, -17169265, 4904953}, + }, + { + FieldElement{24059557, 14617003, 19037157, -15039908, 19766093, -14906429, 5169211, 16191880, 2128236, -4326833}, + FieldElement{-16981152, 4124966, -8540610, -10653797, 30336522, -14105247, -29806336, 916033, -6882542, -2986532}, + FieldElement{-22630907, 12419372, -7134229, -7473371, -16478904, 16739175, 285431, 2763829, 15736322, 4143876}, + }, + { + FieldElement{2379352, 11839345, -4110402, -5988665, 11274298, 794957, 212801, -14594663, 23527084, -16458268}, + FieldElement{33431127, -11130478, -17838966, -15626900, 8909499, 8376530, -32625340, 4087881, -15188911, -14416214}, + FieldElement{1767683, 7197987, -13205226, -2022635, -13091350, 448826, 5799055, 4357868, -4774191, -16323038}, + }, + }, + { + { + FieldElement{6721966, 13833823, -23523388, -1551314, 26354293, -11863321, 23365147, -3949732, 7390890, 2759800}, + FieldElement{4409041, 2052381, 23373853, 10530217, 7676779, -12885954, 21302353, -4264057, 1244380, -12919645}, + FieldElement{-4421239, 7169619, 4982368, -2957590, 30256825, -2777540, 14086413, 9208236, 15886429, 16489664}, + }, + { + FieldElement{1996075, 10375649, 14346367, 13311202, -6874135, -16438411, -13693198, 398369, -30606455, -712933}, + FieldElement{-25307465, 9795880, -2777414, 14878809, -33531835, 14780363, 13348553, 12076947, -30836462, 5113182}, + FieldElement{-17770784, 11797796, 31950843, 13929123, -25888302, 12288344, -30341101, -7336386, 13847711, 5387222}, + }, + { + FieldElement{-18582163, -3416217, 17824843, -2340966, 22744343, -10442611, 8763061, 3617786, -19600662, 10370991}, + FieldElement{20246567, -14369378, 22358229, -543712, 18507283, -10413996, 14554437, -8746092, 32232924, 16763880}, + FieldElement{9648505, 10094563, 26416693, 14745928, -30374318, -6472621, 11094161, 15689506, 3140038, -16510092}, + }, + { + FieldElement{-16160072, 5472695, 31895588, 4744994, 8823515, 10365685, -27224800, 9448613, -28774454, 366295}, + FieldElement{19153450, 11523972, -11096490, -6503142, -24647631, 5420647, 28344573, 8041113, 719605, 11671788}, + FieldElement{8678025, 2694440, -6808014, 2517372, 4964326, 11152271, -15432916, -15266516, 27000813, -10195553}, + }, + { + FieldElement{-15157904, 7134312, 8639287, -2814877, -7235688, 10421742, 564065, 5336097, 6750977, -14521026}, + FieldElement{11836410, -3979488, 26297894, 16080799, 23455045, 15735944, 1695823, -8819122, 8169720, 16220347}, + FieldElement{-18115838, 8653647, 17578566, -6092619, -8025777, -16012763, -11144307, -2627664, -5990708, -14166033}, + }, + { + FieldElement{-23308498, -10968312, 15213228, -10081214, -30853605, -11050004, 27884329, 2847284, 2655861, 1738395}, + FieldElement{-27537433, -14253021, -25336301, -8002780, -9370762, 8129821, 21651608, -3239336, -19087449, -11005278}, + FieldElement{1533110, 3437855, 23735889, 459276, 29970501, 11335377, 26030092, 5821408, 10478196, 8544890}, + }, + { + FieldElement{32173121, -16129311, 24896207, 3921497, 22579056, -3410854, 19270449, 12217473, 17789017, -3395995}, + FieldElement{-30552961, -2228401, -15578829, -10147201, 13243889, 517024, 15479401, -3853233, 30460520, 1052596}, + FieldElement{-11614875, 13323618, 32618793, 8175907, -15230173, 12596687, 27491595, -4612359, 3179268, -9478891}, + }, + { + FieldElement{31947069, -14366651, -4640583, -15339921, -15125977, -6039709, -14756777, -16411740, 19072640, -9511060}, + FieldElement{11685058, 11822410, 3158003, -13952594, 33402194, -4165066, 5977896, -5215017, 473099, 5040608}, + FieldElement{-20290863, 8198642, -27410132, 11602123, 1290375, -2799760, 28326862, 1721092, -19558642, -3131606}, + }, + }, + { + { + FieldElement{7881532, 10687937, 7578723, 7738378, -18951012, -2553952, 21820786, 8076149, -27868496, 11538389}, + FieldElement{-19935666, 3899861, 18283497, -6801568, -15728660, -11249211, 8754525, 7446702, -5676054, 5797016}, + FieldElement{-11295600, -3793569, -15782110, -7964573, 12708869, -8456199, 2014099, -9050574, -2369172, -5877341}, + }, + { + FieldElement{-22472376, -11568741, -27682020, 1146375, 18956691, 16640559, 1192730, -3714199, 15123619, 10811505}, + FieldElement{14352098, -3419715, -18942044, 10822655, 32750596, 4699007, -70363, 15776356, -28886779, -11974553}, + FieldElement{-28241164, -8072475, -4978962, -5315317, 29416931, 1847569, -20654173, -16484855, 4714547, -9600655}, + }, + { + FieldElement{15200332, 8368572, 19679101, 15970074, -31872674, 1959451, 24611599, -4543832, -11745876, 12340220}, + FieldElement{12876937, -10480056, 33134381, 6590940, -6307776, 14872440, 9613953, 8241152, 15370987, 9608631}, + FieldElement{-4143277, -12014408, 8446281, -391603, 4407738, 13629032, -7724868, 15866074, -28210621, -8814099}, + }, + { + FieldElement{26660628, -15677655, 8393734, 358047, -7401291, 992988, -23904233, 858697, 20571223, 8420556}, + FieldElement{14620715, 13067227, -15447274, 8264467, 14106269, 15080814, 33531827, 12516406, -21574435, -12476749}, + FieldElement{236881, 10476226, 57258, -14677024, 6472998, 2466984, 17258519, 7256740, 8791136, 15069930}, + }, + { + FieldElement{1276410, -9371918, 22949635, -16322807, -23493039, -5702186, 14711875, 4874229, -30663140, -2331391}, + FieldElement{5855666, 4990204, -13711848, 7294284, -7804282, 1924647, -1423175, -7912378, -33069337, 9234253}, + FieldElement{20590503, -9018988, 31529744, -7352666, -2706834, 10650548, 31559055, -11609587, 18979186, 13396066}, + }, + { + FieldElement{24474287, 4968103, 22267082, 4407354, 24063882, -8325180, -18816887, 13594782, 33514650, 7021958}, + FieldElement{-11566906, -6565505, -21365085, 15928892, -26158305, 4315421, -25948728, -3916677, -21480480, 12868082}, + FieldElement{-28635013, 13504661, 19988037, -2132761, 21078225, 6443208, -21446107, 2244500, -12455797, -8089383}, + }, + { + FieldElement{-30595528, 13793479, -5852820, 319136, -25723172, -6263899, 33086546, 8957937, -15233648, 5540521}, + FieldElement{-11630176, -11503902, -8119500, -7643073, 2620056, 1022908, -23710744, -1568984, -16128528, -14962807}, + FieldElement{23152971, 775386, 27395463, 14006635, -9701118, 4649512, 1689819, 892185, -11513277, -15205948}, + }, + { + FieldElement{9770129, 9586738, 26496094, 4324120, 1556511, -3550024, 27453819, 4763127, -19179614, 5867134}, + FieldElement{-32765025, 1927590, 31726409, -4753295, 23962434, -16019500, 27846559, 5931263, -29749703, -16108455}, + FieldElement{27461885, -2977536, 22380810, 1815854, -23033753, -3031938, 7283490, -15148073, -19526700, 7734629}, + }, + }, + { + { + FieldElement{-8010264, -9590817, -11120403, 6196038, 29344158, -13430885, 7585295, -3176626, 18549497, 15302069}, + FieldElement{-32658337, -6171222, -7672793, -11051681, 6258878, 13504381, 10458790, -6418461, -8872242, 8424746}, + FieldElement{24687205, 8613276, -30667046, -3233545, 1863892, -1830544, 19206234, 7134917, -11284482, -828919}, + }, + { + FieldElement{11334899, -9218022, 8025293, 12707519, 17523892, -10476071, 10243738, -14685461, -5066034, 16498837}, + FieldElement{8911542, 6887158, -9584260, -6958590, 11145641, -9543680, 17303925, -14124238, 6536641, 10543906}, + FieldElement{-28946384, 15479763, -17466835, 568876, -1497683, 11223454, -2669190, -16625574, -27235709, 8876771}, + }, + { + FieldElement{-25742899, -12566864, -15649966, -846607, -33026686, -796288, -33481822, 15824474, -604426, -9039817}, + FieldElement{10330056, 70051, 7957388, -9002667, 9764902, 15609756, 27698697, -4890037, 1657394, 3084098}, + FieldElement{10477963, -7470260, 12119566, -13250805, 29016247, -5365589, 31280319, 14396151, -30233575, 15272409}, + }, + { + FieldElement{-12288309, 3169463, 28813183, 16658753, 25116432, -5630466, -25173957, -12636138, -25014757, 1950504}, + FieldElement{-26180358, 9489187, 11053416, -14746161, -31053720, 5825630, -8384306, -8767532, 15341279, 8373727}, + FieldElement{28685821, 7759505, -14378516, -12002860, -31971820, 4079242, 298136, -10232602, -2878207, 15190420}, + }, + { + FieldElement{-32932876, 13806336, -14337485, -15794431, -24004620, 10940928, 8669718, 2742393, -26033313, -6875003}, + FieldElement{-1580388, -11729417, -25979658, -11445023, -17411874, -10912854, 9291594, -16247779, -12154742, 6048605}, + FieldElement{-30305315, 14843444, 1539301, 11864366, 20201677, 1900163, 13934231, 5128323, 11213262, 9168384}, + }, + { + FieldElement{-26280513, 11007847, 19408960, -940758, -18592965, -4328580, -5088060, -11105150, 20470157, -16398701}, + FieldElement{-23136053, 9282192, 14855179, -15390078, -7362815, -14408560, -22783952, 14461608, 14042978, 5230683}, + FieldElement{29969567, -2741594, -16711867, -8552442, 9175486, -2468974, 21556951, 3506042, -5933891, -12449708}, + }, + { + FieldElement{-3144746, 8744661, 19704003, 4581278, -20430686, 6830683, -21284170, 8971513, -28539189, 15326563}, + FieldElement{-19464629, 10110288, -17262528, -3503892, -23500387, 1355669, -15523050, 15300988, -20514118, 9168260}, + FieldElement{-5353335, 4488613, -23803248, 16314347, 7780487, -15638939, -28948358, 9601605, 33087103, -9011387}, + }, + { + FieldElement{-19443170, -15512900, -20797467, -12445323, -29824447, 10229461, -27444329, -15000531, -5996870, 15664672}, + FieldElement{23294591, -16632613, -22650781, -8470978, 27844204, 11461195, 13099750, -2460356, 18151676, 13417686}, + FieldElement{-24722913, -4176517, -31150679, 5988919, -26858785, 6685065, 1661597, -12551441, 15271676, -15452665}, + }, + }, + { + { + FieldElement{11433042, -13228665, 8239631, -5279517, -1985436, -725718, -18698764, 2167544, -6921301, -13440182}, + FieldElement{-31436171, 15575146, 30436815, 12192228, -22463353, 9395379, -9917708, -8638997, 12215110, 12028277}, + FieldElement{14098400, 6555944, 23007258, 5757252, -15427832, -12950502, 30123440, 4617780, -16900089, -655628}, + }, + { + FieldElement{-4026201, -15240835, 11893168, 13718664, -14809462, 1847385, -15819999, 10154009, 23973261, -12684474}, + FieldElement{-26531820, -3695990, -1908898, 2534301, -31870557, -16550355, 18341390, -11419951, 32013174, -10103539}, + FieldElement{-25479301, 10876443, -11771086, -14625140, -12369567, 1838104, 21911214, 6354752, 4425632, -837822}, + }, + { + FieldElement{-10433389, -14612966, 22229858, -3091047, -13191166, 776729, -17415375, -12020462, 4725005, 14044970}, + FieldElement{19268650, -7304421, 1555349, 8692754, -21474059, -9910664, 6347390, -1411784, -19522291, -16109756}, + FieldElement{-24864089, 12986008, -10898878, -5558584, -11312371, -148526, 19541418, 8180106, 9282262, 10282508}, + }, + { + FieldElement{-26205082, 4428547, -8661196, -13194263, 4098402, -14165257, 15522535, 8372215, 5542595, -10702683}, + FieldElement{-10562541, 14895633, 26814552, -16673850, -17480754, -2489360, -2781891, 6993761, -18093885, 10114655}, + FieldElement{-20107055, -929418, 31422704, 10427861, -7110749, 6150669, -29091755, -11529146, 25953725, -106158}, + }, + { + FieldElement{-4234397, -8039292, -9119125, 3046000, 2101609, -12607294, 19390020, 6094296, -3315279, 12831125}, + FieldElement{-15998678, 7578152, 5310217, 14408357, -33548620, -224739, 31575954, 6326196, 7381791, -2421839}, + FieldElement{-20902779, 3296811, 24736065, -16328389, 18374254, 7318640, 6295303, 8082724, -15362489, 12339664}, + }, + { + FieldElement{27724736, 2291157, 6088201, -14184798, 1792727, 5857634, 13848414, 15768922, 25091167, 14856294}, + FieldElement{-18866652, 8331043, 24373479, 8541013, -701998, -9269457, 12927300, -12695493, -22182473, -9012899}, + FieldElement{-11423429, -5421590, 11632845, 3405020, 30536730, -11674039, -27260765, 13866390, 30146206, 9142070}, + }, + { + FieldElement{3924129, -15307516, -13817122, -10054960, 12291820, -668366, -27702774, 9326384, -8237858, 4171294}, + FieldElement{-15921940, 16037937, 6713787, 16606682, -21612135, 2790944, 26396185, 3731949, 345228, -5462949}, + FieldElement{-21327538, 13448259, 25284571, 1143661, 20614966, -8849387, 2031539, -12391231, -16253183, -13582083}, + }, + { + FieldElement{31016211, -16722429, 26371392, -14451233, -5027349, 14854137, 17477601, 3842657, 28012650, -16405420}, + FieldElement{-5075835, 9368966, -8562079, -4600902, -15249953, 6970560, -9189873, 16292057, -8867157, 3507940}, + FieldElement{29439664, 3537914, 23333589, 6997794, -17555561, -11018068, -15209202, -15051267, -9164929, 6580396}, + }, + }, + { + { + FieldElement{-12185861, -7679788, 16438269, 10826160, -8696817, -6235611, 17860444, -9273846, -2095802, 9304567}, + FieldElement{20714564, -4336911, 29088195, 7406487, 11426967, -5095705, 14792667, -14608617, 5289421, -477127}, + FieldElement{-16665533, -10650790, -6160345, -13305760, 9192020, -1802462, 17271490, 12349094, 26939669, -3752294}, + }, + { + FieldElement{-12889898, 9373458, 31595848, 16374215, 21471720, 13221525, -27283495, -12348559, -3698806, 117887}, + FieldElement{22263325, -6560050, 3984570, -11174646, -15114008, -566785, 28311253, 5358056, -23319780, 541964}, + FieldElement{16259219, 3261970, 2309254, -15534474, -16885711, -4581916, 24134070, -16705829, -13337066, -13552195}, + }, + { + FieldElement{9378160, -13140186, -22845982, -12745264, 28198281, -7244098, -2399684, -717351, 690426, 14876244}, + FieldElement{24977353, -314384, -8223969, -13465086, 28432343, -1176353, -13068804, -12297348, -22380984, 6618999}, + FieldElement{-1538174, 11685646, 12944378, 13682314, -24389511, -14413193, 8044829, -13817328, 32239829, -5652762}, + }, + { + FieldElement{-18603066, 4762990, -926250, 8885304, -28412480, -3187315, 9781647, -10350059, 32779359, 5095274}, + FieldElement{-33008130, -5214506, -32264887, -3685216, 9460461, -9327423, -24601656, 14506724, 21639561, -2630236}, + FieldElement{-16400943, -13112215, 25239338, 15531969, 3987758, -4499318, -1289502, -6863535, 17874574, 558605}, + }, + { + FieldElement{-13600129, 10240081, 9171883, 16131053, -20869254, 9599700, 33499487, 5080151, 2085892, 5119761}, + FieldElement{-22205145, -2519528, -16381601, 414691, -25019550, 2170430, 30634760, -8363614, -31999993, -5759884}, + FieldElement{-6845704, 15791202, 8550074, -1312654, 29928809, -12092256, 27534430, -7192145, -22351378, 12961482}, + }, + { + FieldElement{-24492060, -9570771, 10368194, 11582341, -23397293, -2245287, 16533930, 8206996, -30194652, -5159638}, + FieldElement{-11121496, -3382234, 2307366, 6362031, -135455, 8868177, -16835630, 7031275, 7589640, 8945490}, + FieldElement{-32152748, 8917967, 6661220, -11677616, -1192060, -15793393, 7251489, -11182180, 24099109, -14456170}, + }, + { + FieldElement{5019558, -7907470, 4244127, -14714356, -26933272, 6453165, -19118182, -13289025, -6231896, -10280736}, + FieldElement{10853594, 10721687, 26480089, 5861829, -22995819, 1972175, -1866647, -10557898, -3363451, -6441124}, + FieldElement{-17002408, 5906790, 221599, -6563147, 7828208, -13248918, 24362661, -2008168, -13866408, 7421392}, + }, + { + FieldElement{8139927, -6546497, 32257646, -5890546, 30375719, 1886181, -21175108, 15441252, 28826358, -4123029}, + FieldElement{6267086, 9695052, 7709135, -16603597, -32869068, -1886135, 14795160, -7840124, 13746021, -1742048}, + FieldElement{28584902, 7787108, -6732942, -15050729, 22846041, -7571236, -3181936, -363524, 4771362, -8419958}, + }, + }, + { + { + FieldElement{24949256, 6376279, -27466481, -8174608, -18646154, -9930606, 33543569, -12141695, 3569627, 11342593}, + FieldElement{26514989, 4740088, 27912651, 3697550, 19331575, -11472339, 6809886, 4608608, 7325975, -14801071}, + FieldElement{-11618399, -14554430, -24321212, 7655128, -1369274, 5214312, -27400540, 10258390, -17646694, -8186692}, + }, + { + FieldElement{11431204, 15823007, 26570245, 14329124, 18029990, 4796082, -31446179, 15580664, 9280358, -3973687}, + FieldElement{-160783, -10326257, -22855316, -4304997, -20861367, -13621002, -32810901, -11181622, -15545091, 4387441}, + FieldElement{-20799378, 12194512, 3937617, -5805892, -27154820, 9340370, -24513992, 8548137, 20617071, -7482001}, + }, + { + FieldElement{-938825, -3930586, -8714311, 16124718, 24603125, -6225393, -13775352, -11875822, 24345683, 10325460}, + FieldElement{-19855277, -1568885, -22202708, 8714034, 14007766, 6928528, 16318175, -1010689, 4766743, 3552007}, + FieldElement{-21751364, -16730916, 1351763, -803421, -4009670, 3950935, 3217514, 14481909, 10988822, -3994762}, + }, + { + FieldElement{15564307, -14311570, 3101243, 5684148, 30446780, -8051356, 12677127, -6505343, -8295852, 13296005}, + FieldElement{-9442290, 6624296, -30298964, -11913677, -4670981, -2057379, 31521204, 9614054, -30000824, 12074674}, + FieldElement{4771191, -135239, 14290749, -13089852, 27992298, 14998318, -1413936, -1556716, 29832613, -16391035}, + }, + { + FieldElement{7064884, -7541174, -19161962, -5067537, -18891269, -2912736, 25825242, 5293297, -27122660, 13101590}, + FieldElement{-2298563, 2439670, -7466610, 1719965, -27267541, -16328445, 32512469, -5317593, -30356070, -4190957}, + FieldElement{-30006540, 10162316, -33180176, 3981723, -16482138, -13070044, 14413974, 9515896, 19568978, 9628812}, + }, + { + FieldElement{33053803, 199357, 15894591, 1583059, 27380243, -4580435, -17838894, -6106839, -6291786, 3437740}, + FieldElement{-18978877, 3884493, 19469877, 12726490, 15913552, 13614290, -22961733, 70104, 7463304, 4176122}, + FieldElement{-27124001, 10659917, 11482427, -16070381, 12771467, -6635117, -32719404, -5322751, 24216882, 5944158}, + }, + { + FieldElement{8894125, 7450974, -2664149, -9765752, -28080517, -12389115, 19345746, 14680796, 11632993, 5847885}, + FieldElement{26942781, -2315317, 9129564, -4906607, 26024105, 11769399, -11518837, 6367194, -9727230, 4782140}, + FieldElement{19916461, -4828410, -22910704, -11414391, 25606324, -5972441, 33253853, 8220911, 6358847, -1873857}, + }, + { + FieldElement{801428, -2081702, 16569428, 11065167, 29875704, 96627, 7908388, -4480480, -13538503, 1387155}, + FieldElement{19646058, 5720633, -11416706, 12814209, 11607948, 12749789, 14147075, 15156355, -21866831, 11835260}, + FieldElement{19299512, 1155910, 28703737, 14890794, 2925026, 7269399, 26121523, 15467869, -26560550, 5052483}, + }, + }, + { + { + FieldElement{-3017432, 10058206, 1980837, 3964243, 22160966, 12322533, -6431123, -12618185, 12228557, -7003677}, + FieldElement{32944382, 14922211, -22844894, 5188528, 21913450, -8719943, 4001465, 13238564, -6114803, 8653815}, + FieldElement{22865569, -4652735, 27603668, -12545395, 14348958, 8234005, 24808405, 5719875, 28483275, 2841751}, + }, + { + FieldElement{-16420968, -1113305, -327719, -12107856, 21886282, -15552774, -1887966, -315658, 19932058, -12739203}, + FieldElement{-11656086, 10087521, -8864888, -5536143, -19278573, -3055912, 3999228, 13239134, -4777469, -13910208}, + FieldElement{1382174, -11694719, 17266790, 9194690, -13324356, 9720081, 20403944, 11284705, -14013818, 3093230}, + }, + { + FieldElement{16650921, -11037932, -1064178, 1570629, -8329746, 7352753, -302424, 16271225, -24049421, -6691850}, + FieldElement{-21911077, -5927941, -4611316, -5560156, -31744103, -10785293, 24123614, 15193618, -21652117, -16739389}, + FieldElement{-9935934, -4289447, -25279823, 4372842, 2087473, 10399484, 31870908, 14690798, 17361620, 11864968}, + }, + { + FieldElement{-11307610, 6210372, 13206574, 5806320, -29017692, -13967200, -12331205, -7486601, -25578460, -16240689}, + FieldElement{14668462, -12270235, 26039039, 15305210, 25515617, 4542480, 10453892, 6577524, 9145645, -6443880}, + FieldElement{5974874, 3053895, -9433049, -10385191, -31865124, 3225009, -7972642, 3936128, -5652273, -3050304}, + }, + { + FieldElement{30625386, -4729400, -25555961, -12792866, -20484575, 7695099, 17097188, -16303496, -27999779, 1803632}, + FieldElement{-3553091, 9865099, -5228566, 4272701, -5673832, -16689700, 14911344, 12196514, -21405489, 7047412}, + FieldElement{20093277, 9920966, -11138194, -5343857, 13161587, 12044805, -32856851, 4124601, -32343828, -10257566}, + }, + { + FieldElement{-20788824, 14084654, -13531713, 7842147, 19119038, -13822605, 4752377, -8714640, -21679658, 2288038}, + FieldElement{-26819236, -3283715, 29965059, 3039786, -14473765, 2540457, 29457502, 14625692, -24819617, 12570232}, + FieldElement{-1063558, -11551823, 16920318, 12494842, 1278292, -5869109, -21159943, -3498680, -11974704, 4724943}, + }, + { + FieldElement{17960970, -11775534, -4140968, -9702530, -8876562, -1410617, -12907383, -8659932, -29576300, 1903856}, + FieldElement{23134274, -14279132, -10681997, -1611936, 20684485, 15770816, -12989750, 3190296, 26955097, 14109738}, + FieldElement{15308788, 5320727, -30113809, -14318877, 22902008, 7767164, 29425325, -11277562, 31960942, 11934971}, + }, + { + FieldElement{-27395711, 8435796, 4109644, 12222639, -24627868, 14818669, 20638173, 4875028, 10491392, 1379718}, + FieldElement{-13159415, 9197841, 3875503, -8936108, -1383712, -5879801, 33518459, 16176658, 21432314, 12180697}, + FieldElement{-11787308, 11500838, 13787581, -13832590, -22430679, 10140205, 1465425, 12689540, -10301319, -13872883}, + }, + }, + { + { + FieldElement{5414091, -15386041, -21007664, 9643570, 12834970, 1186149, -2622916, -1342231, 26128231, 6032912}, + FieldElement{-26337395, -13766162, 32496025, -13653919, 17847801, -12669156, 3604025, 8316894, -25875034, -10437358}, + FieldElement{3296484, 6223048, 24680646, -12246460, -23052020, 5903205, -8862297, -4639164, 12376617, 3188849}, + }, + { + FieldElement{29190488, -14659046, 27549113, -1183516, 3520066, -10697301, 32049515, -7309113, -16109234, -9852307}, + FieldElement{-14744486, -9309156, 735818, -598978, -20407687, -5057904, 25246078, -15795669, 18640741, -960977}, + FieldElement{-6928835, -16430795, 10361374, 5642961, 4910474, 12345252, -31638386, -494430, 10530747, 1053335}, + }, + { + FieldElement{-29265967, -14186805, -13538216, -12117373, -19457059, -10655384, -31462369, -2948985, 24018831, 15026644}, + FieldElement{-22592535, -3145277, -2289276, 5953843, -13440189, 9425631, 25310643, 13003497, -2314791, -15145616}, + FieldElement{-27419985, -603321, -8043984, -1669117, -26092265, 13987819, -27297622, 187899, -23166419, -2531735}, + }, + { + FieldElement{-21744398, -13810475, 1844840, 5021428, -10434399, -15911473, 9716667, 16266922, -5070217, 726099}, + FieldElement{29370922, -6053998, 7334071, -15342259, 9385287, 2247707, -13661962, -4839461, 30007388, -15823341}, + FieldElement{-936379, 16086691, 23751945, -543318, -1167538, -5189036, 9137109, 730663, 9835848, 4555336}, + }, + { + FieldElement{-23376435, 1410446, -22253753, -12899614, 30867635, 15826977, 17693930, 544696, -11985298, 12422646}, + FieldElement{31117226, -12215734, -13502838, 6561947, -9876867, -12757670, -5118685, -4096706, 29120153, 13924425}, + FieldElement{-17400879, -14233209, 19675799, -2734756, -11006962, -5858820, -9383939, -11317700, 7240931, -237388}, + }, + { + FieldElement{-31361739, -11346780, -15007447, -5856218, -22453340, -12152771, 1222336, 4389483, 3293637, -15551743}, + FieldElement{-16684801, -14444245, 11038544, 11054958, -13801175, -3338533, -24319580, 7733547, 12796905, -6335822}, + FieldElement{-8759414, -10817836, -25418864, 10783769, -30615557, -9746811, -28253339, 3647836, 3222231, -11160462}, + }, + { + FieldElement{18606113, 1693100, -25448386, -15170272, 4112353, 10045021, 23603893, -2048234, -7550776, 2484985}, + FieldElement{9255317, -3131197, -12156162, -1004256, 13098013, -9214866, 16377220, -2102812, -19802075, -3034702}, + FieldElement{-22729289, 7496160, -5742199, 11329249, 19991973, -3347502, -31718148, 9936966, -30097688, -10618797}, + }, + { + FieldElement{21878590, -5001297, 4338336, 13643897, -3036865, 13160960, 19708896, 5415497, -7360503, -4109293}, + FieldElement{27736861, 10103576, 12500508, 8502413, -3413016, -9633558, 10436918, -1550276, -23659143, -8132100}, + FieldElement{19492550, -12104365, -29681976, -852630, -3208171, 12403437, 30066266, 8367329, 13243957, 8709688}, + }, + }, + { + { + FieldElement{12015105, 2801261, 28198131, 10151021, 24818120, -4743133, -11194191, -5645734, 5150968, 7274186}, + FieldElement{2831366, -12492146, 1478975, 6122054, 23825128, -12733586, 31097299, 6083058, 31021603, -9793610}, + FieldElement{-2529932, -2229646, 445613, 10720828, -13849527, -11505937, -23507731, 16354465, 15067285, -14147707}, + }, + { + FieldElement{7840942, 14037873, -33364863, 15934016, -728213, -3642706, 21403988, 1057586, -19379462, -12403220}, + FieldElement{915865, -16469274, 15608285, -8789130, -24357026, 6060030, -17371319, 8410997, -7220461, 16527025}, + FieldElement{32922597, -556987, 20336074, -16184568, 10903705, -5384487, 16957574, 52992, 23834301, 6588044}, + }, + { + FieldElement{32752030, 11232950, 3381995, -8714866, 22652988, -10744103, 17159699, 16689107, -20314580, -1305992}, + FieldElement{-4689649, 9166776, -25710296, -10847306, 11576752, 12733943, 7924251, -2752281, 1976123, -7249027}, + FieldElement{21251222, 16309901, -2983015, -6783122, 30810597, 12967303, 156041, -3371252, 12331345, -8237197}, + }, + { + FieldElement{8651614, -4477032, -16085636, -4996994, 13002507, 2950805, 29054427, -5106970, 10008136, -4667901}, + FieldElement{31486080, 15114593, -14261250, 12951354, 14369431, -7387845, 16347321, -13662089, 8684155, -10532952}, + FieldElement{19443825, 11385320, 24468943, -9659068, -23919258, 2187569, -26263207, -6086921, 31316348, 14219878}, + }, + { + FieldElement{-28594490, 1193785, 32245219, 11392485, 31092169, 15722801, 27146014, 6992409, 29126555, 9207390}, + FieldElement{32382935, 1110093, 18477781, 11028262, -27411763, -7548111, -4980517, 10843782, -7957600, -14435730}, + FieldElement{2814918, 7836403, 27519878, -7868156, -20894015, -11553689, -21494559, 8550130, 28346258, 1994730}, + }, + { + FieldElement{-19578299, 8085545, -14000519, -3948622, 2785838, -16231307, -19516951, 7174894, 22628102, 8115180}, + FieldElement{-30405132, 955511, -11133838, -15078069, -32447087, -13278079, -25651578, 3317160, -9943017, 930272}, + FieldElement{-15303681, -6833769, 28856490, 1357446, 23421993, 1057177, 24091212, -1388970, -22765376, -10650715}, + }, + { + FieldElement{-22751231, -5303997, -12907607, -12768866, -15811511, -7797053, -14839018, -16554220, -1867018, 8398970}, + FieldElement{-31969310, 2106403, -4736360, 1362501, 12813763, 16200670, 22981545, -6291273, 18009408, -15772772}, + FieldElement{-17220923, -9545221, -27784654, 14166835, 29815394, 7444469, 29551787, -3727419, 19288549, 1325865}, + }, + { + FieldElement{15100157, -15835752, -23923978, -1005098, -26450192, 15509408, 12376730, -3479146, 33166107, -8042750}, + FieldElement{20909231, 13023121, -9209752, 16251778, -5778415, -8094914, 12412151, 10018715, 2213263, -13878373}, + FieldElement{32529814, -11074689, 30361439, -16689753, -9135940, 1513226, 22922121, 6382134, -5766928, 8371348}, + }, + }, + { + { + FieldElement{9923462, 11271500, 12616794, 3544722, -29998368, -1721626, 12891687, -8193132, -26442943, 10486144}, + FieldElement{-22597207, -7012665, 8587003, -8257861, 4084309, -12970062, 361726, 2610596, -23921530, -11455195}, + FieldElement{5408411, -1136691, -4969122, 10561668, 24145918, 14240566, 31319731, -4235541, 19985175, -3436086}, + }, + { + FieldElement{-13994457, 16616821, 14549246, 3341099, 32155958, 13648976, -17577068, 8849297, 65030, 8370684}, + FieldElement{-8320926, -12049626, 31204563, 5839400, -20627288, -1057277, -19442942, 6922164, 12743482, -9800518}, + FieldElement{-2361371, 12678785, 28815050, 4759974, -23893047, 4884717, 23783145, 11038569, 18800704, 255233}, + }, + { + FieldElement{-5269658, -1773886, 13957886, 7990715, 23132995, 728773, 13393847, 9066957, 19258688, -14753793}, + FieldElement{-2936654, -10827535, -10432089, 14516793, -3640786, 4372541, -31934921, 2209390, -1524053, 2055794}, + FieldElement{580882, 16705327, 5468415, -2683018, -30926419, -14696000, -7203346, -8994389, -30021019, 7394435}, + }, + { + FieldElement{23838809, 1822728, -15738443, 15242727, 8318092, -3733104, -21672180, -3492205, -4821741, 14799921}, + FieldElement{13345610, 9759151, 3371034, -16137791, 16353039, 8577942, 31129804, 13496856, -9056018, 7402518}, + FieldElement{2286874, -4435931, -20042458, -2008336, -13696227, 5038122, 11006906, -15760352, 8205061, 1607563}, + }, + { + FieldElement{14414086, -8002132, 3331830, -3208217, 22249151, -5594188, 18364661, -2906958, 30019587, -9029278}, + FieldElement{-27688051, 1585953, -10775053, 931069, -29120221, -11002319, -14410829, 12029093, 9944378, 8024}, + FieldElement{4368715, -3709630, 29874200, -15022983, -20230386, -11410704, -16114594, -999085, -8142388, 5640030}, + }, + { + FieldElement{10299610, 13746483, 11661824, 16234854, 7630238, 5998374, 9809887, -16694564, 15219798, -14327783}, + FieldElement{27425505, -5719081, 3055006, 10660664, 23458024, 595578, -15398605, -1173195, -18342183, 9742717}, + FieldElement{6744077, 2427284, 26042789, 2720740, -847906, 1118974, 32324614, 7406442, 12420155, 1994844}, + }, + { + FieldElement{14012521, -5024720, -18384453, -9578469, -26485342, -3936439, -13033478, -10909803, 24319929, -6446333}, + FieldElement{16412690, -4507367, 10772641, 15929391, -17068788, -4658621, 10555945, -10484049, -30102368, -4739048}, + FieldElement{22397382, -7767684, -9293161, -12792868, 17166287, -9755136, -27333065, 6199366, 21880021, -12250760}, + }, + { + FieldElement{-4283307, 5368523, -31117018, 8163389, -30323063, 3209128, 16557151, 8890729, 8840445, 4957760}, + FieldElement{-15447727, 709327, -6919446, -10870178, -29777922, 6522332, -21720181, 12130072, -14796503, 5005757}, + FieldElement{-2114751, -14308128, 23019042, 15765735, -25269683, 6002752, 10183197, -13239326, -16395286, -2176112}, + }, + }, + { + { + FieldElement{-19025756, 1632005, 13466291, -7995100, -23640451, 16573537, -32013908, -3057104, 22208662, 2000468}, + FieldElement{3065073, -1412761, -25598674, -361432, -17683065, -5703415, -8164212, 11248527, -3691214, -7414184}, + FieldElement{10379208, -6045554, 8877319, 1473647, -29291284, -12507580, 16690915, 2553332, -3132688, 16400289}, + }, + { + FieldElement{15716668, 1254266, -18472690, 7446274, -8448918, 6344164, -22097271, -7285580, 26894937, 9132066}, + FieldElement{24158887, 12938817, 11085297, -8177598, -28063478, -4457083, -30576463, 64452, -6817084, -2692882}, + FieldElement{13488534, 7794716, 22236231, 5989356, 25426474, -12578208, 2350710, -3418511, -4688006, 2364226}, + }, + { + FieldElement{16335052, 9132434, 25640582, 6678888, 1725628, 8517937, -11807024, -11697457, 15445875, -7798101}, + FieldElement{29004207, -7867081, 28661402, -640412, -12794003, -7943086, 31863255, -4135540, -278050, -15759279}, + FieldElement{-6122061, -14866665, -28614905, 14569919, -10857999, -3591829, 10343412, -6976290, -29828287, -10815811}, + }, + { + FieldElement{27081650, 3463984, 14099042, -4517604, 1616303, -6205604, 29542636, 15372179, 17293797, 960709}, + FieldElement{20263915, 11434237, -5765435, 11236810, 13505955, -10857102, -16111345, 6493122, -19384511, 7639714}, + FieldElement{-2830798, -14839232, 25403038, -8215196, -8317012, -16173699, 18006287, -16043750, 29994677, -15808121}, + }, + { + FieldElement{9769828, 5202651, -24157398, -13631392, -28051003, -11561624, -24613141, -13860782, -31184575, 709464}, + FieldElement{12286395, 13076066, -21775189, -1176622, -25003198, 4057652, -32018128, -8890874, 16102007, 13205847}, + FieldElement{13733362, 5599946, 10557076, 3195751, -5557991, 8536970, -25540170, 8525972, 10151379, 10394400}, + }, + { + FieldElement{4024660, -16137551, 22436262, 12276534, -9099015, -2686099, 19698229, 11743039, -33302334, 8934414}, + FieldElement{-15879800, -4525240, -8580747, -2934061, 14634845, -698278, -9449077, 3137094, -11536886, 11721158}, + FieldElement{17555939, -5013938, 8268606, 2331751, -22738815, 9761013, 9319229, 8835153, -9205489, -1280045}, + }, + { + FieldElement{-461409, -7830014, 20614118, 16688288, -7514766, -4807119, 22300304, 505429, 6108462, -6183415}, + FieldElement{-5070281, 12367917, -30663534, 3234473, 32617080, -8422642, 29880583, -13483331, -26898490, -7867459}, + FieldElement{-31975283, 5726539, 26934134, 10237677, -3173717, -605053, 24199304, 3795095, 7592688, -14992079}, + }, + { + FieldElement{21594432, -14964228, 17466408, -4077222, 32537084, 2739898, 6407723, 12018833, -28256052, 4298412}, + FieldElement{-20650503, -11961496, -27236275, 570498, 3767144, -1717540, 13891942, -1569194, 13717174, 10805743}, + FieldElement{-14676630, -15644296, 15287174, 11927123, 24177847, -8175568, -796431, 14860609, -26938930, -5863836}, + }, + }, + { + { + FieldElement{12962541, 5311799, -10060768, 11658280, 18855286, -7954201, 13286263, -12808704, -4381056, 9882022}, + FieldElement{18512079, 11319350, -20123124, 15090309, 18818594, 5271736, -22727904, 3666879, -23967430, -3299429}, + FieldElement{-6789020, -3146043, 16192429, 13241070, 15898607, -14206114, -10084880, -6661110, -2403099, 5276065}, + }, + { + FieldElement{30169808, -5317648, 26306206, -11750859, 27814964, 7069267, 7152851, 3684982, 1449224, 13082861}, + FieldElement{10342826, 3098505, 2119311, 193222, 25702612, 12233820, 23697382, 15056736, -21016438, -8202000}, + FieldElement{-33150110, 3261608, 22745853, 7948688, 19370557, -15177665, -26171976, 6482814, -10300080, -11060101}, + }, + { + FieldElement{32869458, -5408545, 25609743, 15678670, -10687769, -15471071, 26112421, 2521008, -22664288, 6904815}, + FieldElement{29506923, 4457497, 3377935, -9796444, -30510046, 12935080, 1561737, 3841096, -29003639, -6657642}, + FieldElement{10340844, -6630377, -18656632, -2278430, 12621151, -13339055, 30878497, -11824370, -25584551, 5181966}, + }, + { + FieldElement{25940115, -12658025, 17324188, -10307374, -8671468, 15029094, 24396252, -16450922, -2322852, -12388574}, + FieldElement{-21765684, 9916823, -1300409, 4079498, -1028346, 11909559, 1782390, 12641087, 20603771, -6561742}, + FieldElement{-18882287, -11673380, 24849422, 11501709, 13161720, -4768874, 1925523, 11914390, 4662781, 7820689}, + }, + { + FieldElement{12241050, -425982, 8132691, 9393934, 32846760, -1599620, 29749456, 12172924, 16136752, 15264020}, + FieldElement{-10349955, -14680563, -8211979, 2330220, -17662549, -14545780, 10658213, 6671822, 19012087, 3772772}, + FieldElement{3753511, -3421066, 10617074, 2028709, 14841030, -6721664, 28718732, -15762884, 20527771, 12988982}, + }, + { + FieldElement{-14822485, -5797269, -3707987, 12689773, -898983, -10914866, -24183046, -10564943, 3299665, -12424953}, + FieldElement{-16777703, -15253301, -9642417, 4978983, 3308785, 8755439, 6943197, 6461331, -25583147, 8991218}, + FieldElement{-17226263, 1816362, -1673288, -6086439, 31783888, -8175991, -32948145, 7417950, -30242287, 1507265}, + }, + { + FieldElement{29692663, 6829891, -10498800, 4334896, 20945975, -11906496, -28887608, 8209391, 14606362, -10647073}, + FieldElement{-3481570, 8707081, 32188102, 5672294, 22096700, 1711240, -33020695, 9761487, 4170404, -2085325}, + FieldElement{-11587470, 14855945, -4127778, -1531857, -26649089, 15084046, 22186522, 16002000, -14276837, -8400798}, + }, + { + FieldElement{-4811456, 13761029, -31703877, -2483919, -3312471, 7869047, -7113572, -9620092, 13240845, 10965870}, + FieldElement{-7742563, -8256762, -14768334, -13656260, -23232383, 12387166, 4498947, 14147411, 29514390, 4302863}, + FieldElement{-13413405, -12407859, 20757302, -13801832, 14785143, 8976368, -5061276, -2144373, 17846988, -13971927}, + }, + }, + { + { + FieldElement{-2244452, -754728, -4597030, -1066309, -6247172, 1455299, -21647728, -9214789, -5222701, 12650267}, + FieldElement{-9906797, -16070310, 21134160, 12198166, -27064575, 708126, 387813, 13770293, -19134326, 10958663}, + FieldElement{22470984, 12369526, 23446014, -5441109, -21520802, -9698723, -11772496, -11574455, -25083830, 4271862}, + }, + { + FieldElement{-25169565, -10053642, -19909332, 15361595, -5984358, 2159192, 75375, -4278529, -32526221, 8469673}, + FieldElement{15854970, 4148314, -8893890, 7259002, 11666551, 13824734, -30531198, 2697372, 24154791, -9460943}, + FieldElement{15446137, -15806644, 29759747, 14019369, 30811221, -9610191, -31582008, 12840104, 24913809, 9815020}, + }, + { + FieldElement{-4709286, -5614269, -31841498, -12288893, -14443537, 10799414, -9103676, 13438769, 18735128, 9466238}, + FieldElement{11933045, 9281483, 5081055, -5183824, -2628162, -4905629, -7727821, -10896103, -22728655, 16199064}, + FieldElement{14576810, 379472, -26786533, -8317236, -29426508, -10812974, -102766, 1876699, 30801119, 2164795}, + }, + { + FieldElement{15995086, 3199873, 13672555, 13712240, -19378835, -4647646, -13081610, -15496269, -13492807, 1268052}, + FieldElement{-10290614, -3659039, -3286592, 10948818, 23037027, 3794475, -3470338, -12600221, -17055369, 3565904}, + FieldElement{29210088, -9419337, -5919792, -4952785, 10834811, -13327726, -16512102, -10820713, -27162222, -14030531}, + }, + { + FieldElement{-13161890, 15508588, 16663704, -8156150, -28349942, 9019123, -29183421, -3769423, 2244111, -14001979}, + FieldElement{-5152875, -3800936, -9306475, -6071583, 16243069, 14684434, -25673088, -16180800, 13491506, 4641841}, + FieldElement{10813417, 643330, -19188515, -728916, 30292062, -16600078, 27548447, -7721242, 14476989, -12767431}, + }, + { + FieldElement{10292079, 9984945, 6481436, 8279905, -7251514, 7032743, 27282937, -1644259, -27912810, 12651324}, + FieldElement{-31185513, -813383, 22271204, 11835308, 10201545, 15351028, 17099662, 3988035, 21721536, -3148940}, + FieldElement{10202177, -6545839, -31373232, -9574638, -32150642, -8119683, -12906320, 3852694, 13216206, 14842320}, + }, + { + FieldElement{-15815640, -10601066, -6538952, -7258995, -6984659, -6581778, -31500847, 13765824, -27434397, 9900184}, + FieldElement{14465505, -13833331, -32133984, -14738873, -27443187, 12990492, 33046193, 15796406, -7051866, -8040114}, + FieldElement{30924417, -8279620, 6359016, -12816335, 16508377, 9071735, -25488601, 15413635, 9524356, -7018878}, + }, + { + FieldElement{12274201, -13175547, 32627641, -1785326, 6736625, 13267305, 5237659, -5109483, 15663516, 4035784}, + FieldElement{-2951309, 8903985, 17349946, 601635, -16432815, -4612556, -13732739, -15889334, -22258478, 4659091}, + FieldElement{-16916263, -4952973, -30393711, -15158821, 20774812, 15897498, 5736189, 15026997, -2178256, -13455585}, + }, + }, + { + { + FieldElement{-8858980, -2219056, 28571666, -10155518, -474467, -10105698, -3801496, 278095, 23440562, -290208}, + FieldElement{10226241, -5928702, 15139956, 120818, -14867693, 5218603, 32937275, 11551483, -16571960, -7442864}, + FieldElement{17932739, -12437276, -24039557, 10749060, 11316803, 7535897, 22503767, 5561594, -3646624, 3898661}, + }, + { + FieldElement{7749907, -969567, -16339731, -16464, -25018111, 15122143, -1573531, 7152530, 21831162, 1245233}, + FieldElement{26958459, -14658026, 4314586, 8346991, -5677764, 11960072, -32589295, -620035, -30402091, -16716212}, + FieldElement{-12165896, 9166947, 33491384, 13673479, 29787085, 13096535, 6280834, 14587357, -22338025, 13987525}, + }, + { + FieldElement{-24349909, 7778775, 21116000, 15572597, -4833266, -5357778, -4300898, -5124639, -7469781, -2858068}, + FieldElement{9681908, -6737123, -31951644, 13591838, -6883821, 386950, 31622781, 6439245, -14581012, 4091397}, + FieldElement{-8426427, 1470727, -28109679, -1596990, 3978627, -5123623, -19622683, 12092163, 29077877, -14741988}, + }, + { + FieldElement{5269168, -6859726, -13230211, -8020715, 25932563, 1763552, -5606110, -5505881, -20017847, 2357889}, + FieldElement{32264008, -15407652, -5387735, -1160093, -2091322, -3946900, 23104804, -12869908, 5727338, 189038}, + FieldElement{14609123, -8954470, -6000566, -16622781, -14577387, -7743898, -26745169, 10942115, -25888931, -14884697}, + }, + { + FieldElement{20513500, 5557931, -15604613, 7829531, 26413943, -2019404, -21378968, 7471781, 13913677, -5137875}, + FieldElement{-25574376, 11967826, 29233242, 12948236, -6754465, 4713227, -8940970, 14059180, 12878652, 8511905}, + FieldElement{-25656801, 3393631, -2955415, -7075526, -2250709, 9366908, -30223418, 6812974, 5568676, -3127656}, + }, + { + FieldElement{11630004, 12144454, 2116339, 13606037, 27378885, 15676917, -17408753, -13504373, -14395196, 8070818}, + FieldElement{27117696, -10007378, -31282771, -5570088, 1127282, 12772488, -29845906, 10483306, -11552749, -1028714}, + FieldElement{10637467, -5688064, 5674781, 1072708, -26343588, -6982302, -1683975, 9177853, -27493162, 15431203}, + }, + { + FieldElement{20525145, 10892566, -12742472, 12779443, -29493034, 16150075, -28240519, 14943142, -15056790, -7935931}, + FieldElement{-30024462, 5626926, -551567, -9981087, 753598, 11981191, 25244767, -3239766, -3356550, 9594024}, + FieldElement{-23752644, 2636870, -5163910, -10103818, 585134, 7877383, 11345683, -6492290, 13352335, -10977084}, + }, + { + FieldElement{-1931799, -5407458, 3304649, -12884869, 17015806, -4877091, -29783850, -7752482, -13215537, -319204}, + FieldElement{20239939, 6607058, 6203985, 3483793, -18386976, -779229, -20723742, 15077870, -22750759, 14523817}, + FieldElement{27406042, -6041657, 27423596, -4497394, 4996214, 10002360, -28842031, -4545494, -30172742, -4805667}, + }, + }, + { + { + FieldElement{11374242, 12660715, 17861383, -12540833, 10935568, 1099227, -13886076, -9091740, -27727044, 11358504}, + FieldElement{-12730809, 10311867, 1510375, 10778093, -2119455, -9145702, 32676003, 11149336, -26123651, 4985768}, + FieldElement{-19096303, 341147, -6197485, -239033, 15756973, -8796662, -983043, 13794114, -19414307, -15621255}, + }, + { + FieldElement{6490081, 11940286, 25495923, -7726360, 8668373, -8751316, 3367603, 6970005, -1691065, -9004790}, + FieldElement{1656497, 13457317, 15370807, 6364910, 13605745, 8362338, -19174622, -5475723, -16796596, -5031438}, + FieldElement{-22273315, -13524424, -64685, -4334223, -18605636, -10921968, -20571065, -7007978, -99853, -10237333}, + }, + { + FieldElement{17747465, 10039260, 19368299, -4050591, -20630635, -16041286, 31992683, -15857976, -29260363, -5511971}, + FieldElement{31932027, -4986141, -19612382, 16366580, 22023614, 88450, 11371999, -3744247, 4882242, -10626905}, + FieldElement{29796507, 37186, 19818052, 10115756, -11829032, 3352736, 18551198, 3272828, -5190932, -4162409}, + }, + { + FieldElement{12501286, 4044383, -8612957, -13392385, -32430052, 5136599, -19230378, -3529697, 330070, -3659409}, + FieldElement{6384877, 2899513, 17807477, 7663917, -2358888, 12363165, 25366522, -8573892, -271295, 12071499}, + FieldElement{-8365515, -4042521, 25133448, -4517355, -6211027, 2265927, -32769618, 1936675, -5159697, 3829363}, + }, + { + FieldElement{28425966, -5835433, -577090, -4697198, -14217555, 6870930, 7921550, -6567787, 26333140, 14267664}, + FieldElement{-11067219, 11871231, 27385719, -10559544, -4585914, -11189312, 10004786, -8709488, -21761224, 8930324}, + FieldElement{-21197785, -16396035, 25654216, -1725397, 12282012, 11008919, 1541940, 4757911, -26491501, -16408940}, + }, + { + FieldElement{13537262, -7759490, -20604840, 10961927, -5922820, -13218065, -13156584, 6217254, -15943699, 13814990}, + FieldElement{-17422573, 15157790, 18705543, 29619, 24409717, -260476, 27361681, 9257833, -1956526, -1776914}, + FieldElement{-25045300, -10191966, 15366585, 15166509, -13105086, 8423556, -29171540, 12361135, -18685978, 4578290}, + }, + { + FieldElement{24579768, 3711570, 1342322, -11180126, -27005135, 14124956, -22544529, 14074919, 21964432, 8235257}, + FieldElement{-6528613, -2411497, 9442966, -5925588, 12025640, -1487420, -2981514, -1669206, 13006806, 2355433}, + FieldElement{-16304899, -13605259, -6632427, -5142349, 16974359, -10911083, 27202044, 1719366, 1141648, -12796236}, + }, + { + FieldElement{-12863944, -13219986, -8318266, -11018091, -6810145, -4843894, 13475066, -3133972, 32674895, 13715045}, + FieldElement{11423335, -5468059, 32344216, 8962751, 24989809, 9241752, -13265253, 16086212, -28740881, -15642093}, + FieldElement{-1409668, 12530728, -6368726, 10847387, 19531186, -14132160, -11709148, 7791794, -27245943, 4383347}, + }, + }, + { + { + FieldElement{-28970898, 5271447, -1266009, -9736989, -12455236, 16732599, -4862407, -4906449, 27193557, 6245191}, + FieldElement{-15193956, 5362278, -1783893, 2695834, 4960227, 12840725, 23061898, 3260492, 22510453, 8577507}, + FieldElement{-12632451, 11257346, -32692994, 13548177, -721004, 10879011, 31168030, 13952092, -29571492, -3635906}, + }, + { + FieldElement{3877321, -9572739, 32416692, 5405324, -11004407, -13656635, 3759769, 11935320, 5611860, 8164018}, + FieldElement{-16275802, 14667797, 15906460, 12155291, -22111149, -9039718, 32003002, -8832289, 5773085, -8422109}, + FieldElement{-23788118, -8254300, 1950875, 8937633, 18686727, 16459170, -905725, 12376320, 31632953, 190926}, + }, + { + FieldElement{-24593607, -16138885, -8423991, 13378746, 14162407, 6901328, -8288749, 4508564, -25341555, -3627528}, + FieldElement{8884438, -5884009, 6023974, 10104341, -6881569, -4941533, 18722941, -14786005, -1672488, 827625}, + FieldElement{-32720583, -16289296, -32503547, 7101210, 13354605, 2659080, -1800575, -14108036, -24878478, 1541286}, + }, + { + FieldElement{2901347, -1117687, 3880376, -10059388, -17620940, -3612781, -21802117, -3567481, 20456845, -1885033}, + FieldElement{27019610, 12299467, -13658288, -1603234, -12861660, -4861471, -19540150, -5016058, 29439641, 15138866}, + FieldElement{21536104, -6626420, -32447818, -10690208, -22408077, 5175814, -5420040, -16361163, 7779328, 109896}, + }, + { + FieldElement{30279744, 14648750, -8044871, 6425558, 13639621, -743509, 28698390, 12180118, 23177719, -554075}, + FieldElement{26572847, 3405927, -31701700, 12890905, -19265668, 5335866, -6493768, 2378492, 4439158, -13279347}, + FieldElement{-22716706, 3489070, -9225266, -332753, 18875722, -1140095, 14819434, -12731527, -17717757, -5461437}, + }, + { + FieldElement{-5056483, 16566551, 15953661, 3767752, -10436499, 15627060, -820954, 2177225, 8550082, -15114165}, + FieldElement{-18473302, 16596775, -381660, 15663611, 22860960, 15585581, -27844109, -3582739, -23260460, -8428588}, + FieldElement{-32480551, 15707275, -8205912, -5652081, 29464558, 2713815, -22725137, 15860482, -21902570, 1494193}, + }, + { + FieldElement{-19562091, -14087393, -25583872, -9299552, 13127842, 759709, 21923482, 16529112, 8742704, 12967017}, + FieldElement{-28464899, 1553205, 32536856, -10473729, -24691605, -406174, -8914625, -2933896, -29903758, 15553883}, + FieldElement{21877909, 3230008, 9881174, 10539357, -4797115, 2841332, 11543572, 14513274, 19375923, -12647961}, + }, + { + FieldElement{8832269, -14495485, 13253511, 5137575, 5037871, 4078777, 24880818, -6222716, 2862653, 9455043}, + FieldElement{29306751, 5123106, 20245049, -14149889, 9592566, 8447059, -2077124, -2990080, 15511449, 4789663}, + FieldElement{-20679756, 7004547, 8824831, -9434977, -4045704, -3750736, -5754762, 108893, 23513200, 16652362}, + }, + }, + { + { + FieldElement{-33256173, 4144782, -4476029, -6579123, 10770039, -7155542, -6650416, -12936300, -18319198, 10212860}, + FieldElement{2756081, 8598110, 7383731, -6859892, 22312759, -1105012, 21179801, 2600940, -9988298, -12506466}, + FieldElement{-24645692, 13317462, -30449259, -15653928, 21365574, -10869657, 11344424, 864440, -2499677, -16710063}, + }, + { + FieldElement{-26432803, 6148329, -17184412, -14474154, 18782929, -275997, -22561534, 211300, 2719757, 4940997}, + FieldElement{-1323882, 3911313, -6948744, 14759765, -30027150, 7851207, 21690126, 8518463, 26699843, 5276295}, + FieldElement{-13149873, -6429067, 9396249, 365013, 24703301, -10488939, 1321586, 149635, -15452774, 7159369}, + }, + { + FieldElement{9987780, -3404759, 17507962, 9505530, 9731535, -2165514, 22356009, 8312176, 22477218, -8403385}, + FieldElement{18155857, -16504990, 19744716, 9006923, 15154154, -10538976, 24256460, -4864995, -22548173, 9334109}, + FieldElement{2986088, -4911893, 10776628, -3473844, 10620590, -7083203, -21413845, 14253545, -22587149, 536906}, + }, + { + FieldElement{4377756, 8115836, 24567078, 15495314, 11625074, 13064599, 7390551, 10589625, 10838060, -15420424}, + FieldElement{-19342404, 867880, 9277171, -3218459, -14431572, -1986443, 19295826, -15796950, 6378260, 699185}, + FieldElement{7895026, 4057113, -7081772, -13077756, -17886831, -323126, -716039, 15693155, -5045064, -13373962}, + }, + { + FieldElement{-7737563, -5869402, -14566319, -7406919, 11385654, 13201616, 31730678, -10962840, -3918636, -9669325}, + FieldElement{10188286, -15770834, -7336361, 13427543, 22223443, 14896287, 30743455, 7116568, -21786507, 5427593}, + FieldElement{696102, 13206899, 27047647, -10632082, 15285305, -9853179, 10798490, -4578720, 19236243, 12477404}, + }, + { + FieldElement{-11229439, 11243796, -17054270, -8040865, -788228, -8167967, -3897669, 11180504, -23169516, 7733644}, + FieldElement{17800790, -14036179, -27000429, -11766671, 23887827, 3149671, 23466177, -10538171, 10322027, 15313801}, + FieldElement{26246234, 11968874, 32263343, -5468728, 6830755, -13323031, -15794704, -101982, -24449242, 10890804}, + }, + { + FieldElement{-31365647, 10271363, -12660625, -6267268, 16690207, -13062544, -14982212, 16484931, 25180797, -5334884}, + FieldElement{-586574, 10376444, -32586414, -11286356, 19801893, 10997610, 2276632, 9482883, 316878, 13820577}, + FieldElement{-9882808, -4510367, -2115506, 16457136, -11100081, 11674996, 30756178, -7515054, 30696930, -3712849}, + }, + { + FieldElement{32988917, -9603412, 12499366, 7910787, -10617257, -11931514, -7342816, -9985397, -32349517, 7392473}, + FieldElement{-8855661, 15927861, 9866406, -3649411, -2396914, -16655781, -30409476, -9134995, 25112947, -2926644}, + FieldElement{-2504044, -436966, 25621774, -5678772, 15085042, -5479877, -24884878, -13526194, 5537438, -13914319}, + }, + }, + { + { + FieldElement{-11225584, 2320285, -9584280, 10149187, -33444663, 5808648, -14876251, -1729667, 31234590, 6090599}, + FieldElement{-9633316, 116426, 26083934, 2897444, -6364437, -2688086, 609721, 15878753, -6970405, -9034768}, + FieldElement{-27757857, 247744, -15194774, -9002551, 23288161, -10011936, -23869595, 6503646, 20650474, 1804084}, + }, + { + FieldElement{-27589786, 15456424, 8972517, 8469608, 15640622, 4439847, 3121995, -10329713, 27842616, -202328}, + FieldElement{-15306973, 2839644, 22530074, 10026331, 4602058, 5048462, 28248656, 5031932, -11375082, 12714369}, + FieldElement{20807691, -7270825, 29286141, 11421711, -27876523, -13868230, -21227475, 1035546, -19733229, 12796920}, + }, + { + FieldElement{12076899, -14301286, -8785001, -11848922, -25012791, 16400684, -17591495, -12899438, 3480665, -15182815}, + FieldElement{-32361549, 5457597, 28548107, 7833186, 7303070, -11953545, -24363064, -15921875, -33374054, 2771025}, + FieldElement{-21389266, 421932, 26597266, 6860826, 22486084, -6737172, -17137485, -4210226, -24552282, 15673397}, + }, + { + FieldElement{-20184622, 2338216, 19788685, -9620956, -4001265, -8740893, -20271184, 4733254, 3727144, -12934448}, + FieldElement{6120119, 814863, -11794402, -622716, 6812205, -15747771, 2019594, 7975683, 31123697, -10958981}, + FieldElement{30069250, -11435332, 30434654, 2958439, 18399564, -976289, 12296869, 9204260, -16432438, 9648165}, + }, + { + FieldElement{32705432, -1550977, 30705658, 7451065, -11805606, 9631813, 3305266, 5248604, -26008332, -11377501}, + FieldElement{17219865, 2375039, -31570947, -5575615, -19459679, 9219903, 294711, 15298639, 2662509, -16297073}, + FieldElement{-1172927, -7558695, -4366770, -4287744, -21346413, -8434326, 32087529, -1222777, 32247248, -14389861}, + }, + { + FieldElement{14312628, 1221556, 17395390, -8700143, -4945741, -8684635, -28197744, -9637817, -16027623, -13378845}, + FieldElement{-1428825, -9678990, -9235681, 6549687, -7383069, -468664, 23046502, 9803137, 17597934, 2346211}, + FieldElement{18510800, 15337574, 26171504, 981392, -22241552, 7827556, -23491134, -11323352, 3059833, -11782870}, + }, + { + FieldElement{10141598, 6082907, 17829293, -1947643, 9830092, 13613136, -25556636, -5544586, -33502212, 3592096}, + FieldElement{33114168, -15889352, -26525686, -13343397, 33076705, 8716171, 1151462, 1521897, -982665, -6837803}, + FieldElement{-32939165, -4255815, 23947181, -324178, -33072974, -12305637, -16637686, 3891704, 26353178, 693168}, + }, + { + FieldElement{30374239, 1595580, -16884039, 13186931, 4600344, 406904, 9585294, -400668, 31375464, 14369965}, + FieldElement{-14370654, -7772529, 1510301, 6434173, -18784789, -6262728, 32732230, -13108839, 17901441, 16011505}, + FieldElement{18171223, -11934626, -12500402, 15197122, -11038147, -15230035, -19172240, -16046376, 8764035, 12309598}, + }, + }, + { + { + FieldElement{5975908, -5243188, -19459362, -9681747, -11541277, 14015782, -23665757, 1228319, 17544096, -10593782}, + FieldElement{5811932, -1715293, 3442887, -2269310, -18367348, -8359541, -18044043, -15410127, -5565381, 12348900}, + FieldElement{-31399660, 11407555, 25755363, 6891399, -3256938, 14872274, -24849353, 8141295, -10632534, -585479}, + }, + { + FieldElement{-12675304, 694026, -5076145, 13300344, 14015258, -14451394, -9698672, -11329050, 30944593, 1130208}, + FieldElement{8247766, -6710942, -26562381, -7709309, -14401939, -14648910, 4652152, 2488540, 23550156, -271232}, + FieldElement{17294316, -3788438, 7026748, 15626851, 22990044, 113481, 2267737, -5908146, -408818, -137719}, + }, + { + FieldElement{16091085, -16253926, 18599252, 7340678, 2137637, -1221657, -3364161, 14550936, 3260525, -7166271}, + FieldElement{-4910104, -13332887, 18550887, 10864893, -16459325, -7291596, -23028869, -13204905, -12748722, 2701326}, + FieldElement{-8574695, 16099415, 4629974, -16340524, -20786213, -6005432, -10018363, 9276971, 11329923, 1862132}, + }, + { + FieldElement{14763076, -15903608, -30918270, 3689867, 3511892, 10313526, -21951088, 12219231, -9037963, -940300}, + FieldElement{8894987, -3446094, 6150753, 3013931, 301220, 15693451, -31981216, -2909717, -15438168, 11595570}, + FieldElement{15214962, 3537601, -26238722, -14058872, 4418657, -15230761, 13947276, 10730794, -13489462, -4363670}, + }, + { + FieldElement{-2538306, 7682793, 32759013, 263109, -29984731, -7955452, -22332124, -10188635, 977108, 699994}, + FieldElement{-12466472, 4195084, -9211532, 550904, -15565337, 12917920, 19118110, -439841, -30534533, -14337913}, + FieldElement{31788461, -14507657, 4799989, 7372237, 8808585, -14747943, 9408237, -10051775, 12493932, -5409317}, + }, + { + FieldElement{-25680606, 5260744, -19235809, -6284470, -3695942, 16566087, 27218280, 2607121, 29375955, 6024730}, + FieldElement{842132, -2794693, -4763381, -8722815, 26332018, -12405641, 11831880, 6985184, -9940361, 2854096}, + FieldElement{-4847262, -7969331, 2516242, -5847713, 9695691, -7221186, 16512645, 960770, 12121869, 16648078}, + }, + { + FieldElement{-15218652, 14667096, -13336229, 2013717, 30598287, -464137, -31504922, -7882064, 20237806, 2838411}, + FieldElement{-19288047, 4453152, 15298546, -16178388, 22115043, -15972604, 12544294, -13470457, 1068881, -12499905}, + FieldElement{-9558883, -16518835, 33238498, 13506958, 30505848, -1114596, -8486907, -2630053, 12521378, 4845654}, + }, + { + FieldElement{-28198521, 10744108, -2958380, 10199664, 7759311, -13088600, 3409348, -873400, -6482306, -12885870}, + FieldElement{-23561822, 6230156, -20382013, 10655314, -24040585, -11621172, 10477734, -1240216, -3113227, 13974498}, + FieldElement{12966261, 15550616, -32038948, -1615346, 21025980, -629444, 5642325, 7188737, 18895762, 12629579}, + }, + }, + { + { + FieldElement{14741879, -14946887, 22177208, -11721237, 1279741, 8058600, 11758140, 789443, 32195181, 3895677}, + FieldElement{10758205, 15755439, -4509950, 9243698, -4879422, 6879879, -2204575, -3566119, -8982069, 4429647}, + FieldElement{-2453894, 15725973, -20436342, -10410672, -5803908, -11040220, -7135870, -11642895, 18047436, -15281743}, + }, + { + FieldElement{-25173001, -11307165, 29759956, 11776784, -22262383, -15820455, 10993114, -12850837, -17620701, -9408468}, + FieldElement{21987233, 700364, -24505048, 14972008, -7774265, -5718395, 32155026, 2581431, -29958985, 8773375}, + FieldElement{-25568350, 454463, -13211935, 16126715, 25240068, 8594567, 20656846, 12017935, -7874389, -13920155}, + }, + { + FieldElement{6028182, 6263078, -31011806, -11301710, -818919, 2461772, -31841174, -5468042, -1721788, -2776725}, + FieldElement{-12278994, 16624277, 987579, -5922598, 32908203, 1248608, 7719845, -4166698, 28408820, 6816612}, + FieldElement{-10358094, -8237829, 19549651, -12169222, 22082623, 16147817, 20613181, 13982702, -10339570, 5067943}, + }, + { + FieldElement{-30505967, -3821767, 12074681, 13582412, -19877972, 2443951, -19719286, 12746132, 5331210, -10105944}, + FieldElement{30528811, 3601899, -1957090, 4619785, -27361822, -15436388, 24180793, -12570394, 27679908, -1648928}, + FieldElement{9402404, -13957065, 32834043, 10838634, -26580150, -13237195, 26653274, -8685565, 22611444, -12715406}, + }, + { + FieldElement{22190590, 1118029, 22736441, 15130463, -30460692, -5991321, 19189625, -4648942, 4854859, 6622139}, + FieldElement{-8310738, -2953450, -8262579, -3388049, -10401731, -271929, 13424426, -3567227, 26404409, 13001963}, + FieldElement{-31241838, -15415700, -2994250, 8939346, 11562230, -12840670, -26064365, -11621720, -15405155, 11020693}, + }, + { + FieldElement{1866042, -7949489, -7898649, -10301010, 12483315, 13477547, 3175636, -12424163, 28761762, 1406734}, + FieldElement{-448555, -1777666, 13018551, 3194501, -9580420, -11161737, 24760585, -4347088, 25577411, -13378680}, + FieldElement{-24290378, 4759345, -690653, -1852816, 2066747, 10693769, -29595790, 9884936, -9368926, 4745410}, + }, + { + FieldElement{-9141284, 6049714, -19531061, -4341411, -31260798, 9944276, -15462008, -11311852, 10931924, -11931931}, + FieldElement{-16561513, 14112680, -8012645, 4817318, -8040464, -11414606, -22853429, 10856641, -20470770, 13434654}, + FieldElement{22759489, -10073434, -16766264, -1871422, 13637442, -10168091, 1765144, -12654326, 28445307, -5364710}, + }, + { + FieldElement{29875063, 12493613, 2795536, -3786330, 1710620, 15181182, -10195717, -8788675, 9074234, 1167180}, + FieldElement{-26205683, 11014233, -9842651, -2635485, -26908120, 7532294, -18716888, -9535498, 3843903, 9367684}, + FieldElement{-10969595, -6403711, 9591134, 9582310, 11349256, 108879, 16235123, 8601684, -139197, 4242895}, + }, + }, + { + { + FieldElement{22092954, -13191123, -2042793, -11968512, 32186753, -11517388, -6574341, 2470660, -27417366, 16625501}, + FieldElement{-11057722, 3042016, 13770083, -9257922, 584236, -544855, -7770857, 2602725, -27351616, 14247413}, + FieldElement{6314175, -10264892, -32772502, 15957557, -10157730, 168750, -8618807, 14290061, 27108877, -1180880}, + }, + { + FieldElement{-8586597, -7170966, 13241782, 10960156, -32991015, -13794596, 33547976, -11058889, -27148451, 981874}, + FieldElement{22833440, 9293594, -32649448, -13618667, -9136966, 14756819, -22928859, -13970780, -10479804, -16197962}, + FieldElement{-7768587, 3326786, -28111797, 10783824, 19178761, 14905060, 22680049, 13906969, -15933690, 3797899}, + }, + { + FieldElement{21721356, -4212746, -12206123, 9310182, -3882239, -13653110, 23740224, -2709232, 20491983, -8042152}, + FieldElement{9209270, -15135055, -13256557, -6167798, -731016, 15289673, 25947805, 15286587, 30997318, -6703063}, + FieldElement{7392032, 16618386, 23946583, -8039892, -13265164, -1533858, -14197445, -2321576, 17649998, -250080}, + }, + { + FieldElement{-9301088, -14193827, 30609526, -3049543, -25175069, -1283752, -15241566, -9525724, -2233253, 7662146}, + FieldElement{-17558673, 1763594, -33114336, 15908610, -30040870, -12174295, 7335080, -8472199, -3174674, 3440183}, + FieldElement{-19889700, -5977008, -24111293, -9688870, 10799743, -16571957, 40450, -4431835, 4862400, 1133}, + }, + { + FieldElement{-32856209, -7873957, -5422389, 14860950, -16319031, 7956142, 7258061, 311861, -30594991, -7379421}, + FieldElement{-3773428, -1565936, 28985340, 7499440, 24445838, 9325937, 29727763, 16527196, 18278453, 15405622}, + FieldElement{-4381906, 8508652, -19898366, -3674424, -5984453, 15149970, -13313598, 843523, -21875062, 13626197}, + }, + { + FieldElement{2281448, -13487055, -10915418, -2609910, 1879358, 16164207, -10783882, 3953792, 13340839, 15928663}, + FieldElement{31727126, -7179855, -18437503, -8283652, 2875793, -16390330, -25269894, -7014826, -23452306, 5964753}, + FieldElement{4100420, -5959452, -17179337, 6017714, -18705837, 12227141, -26684835, 11344144, 2538215, -7570755}, + }, + { + FieldElement{-9433605, 6123113, 11159803, -2156608, 30016280, 14966241, -20474983, 1485421, -629256, -15958862}, + FieldElement{-26804558, 4260919, 11851389, 9658551, -32017107, 16367492, -20205425, -13191288, 11659922, -11115118}, + FieldElement{26180396, 10015009, -30844224, -8581293, 5418197, 9480663, 2231568, -10170080, 33100372, -1306171}, + }, + { + FieldElement{15121113, -5201871, -10389905, 15427821, -27509937, -15992507, 21670947, 4486675, -5931810, -14466380}, + FieldElement{16166486, -9483733, -11104130, 6023908, -31926798, -1364923, 2340060, -16254968, -10735770, -10039824}, + FieldElement{28042865, -3557089, -12126526, 12259706, -3717498, -6945899, 6766453, -8689599, 18036436, 5803270}, + }, + }, + { + { + FieldElement{-817581, 6763912, 11803561, 1585585, 10958447, -2671165, 23855391, 4598332, -6159431, -14117438}, + FieldElement{-31031306, -14256194, 17332029, -2383520, 31312682, -5967183, 696309, 50292, -20095739, 11763584}, + FieldElement{-594563, -2514283, -32234153, 12643980, 12650761, 14811489, 665117, -12613632, -19773211, -10713562}, + }, + { + FieldElement{30464590, -11262872, -4127476, -12734478, 19835327, -7105613, -24396175, 2075773, -17020157, 992471}, + FieldElement{18357185, -6994433, 7766382, 16342475, -29324918, 411174, 14578841, 8080033, -11574335, -10601610}, + FieldElement{19598397, 10334610, 12555054, 2555664, 18821899, -10339780, 21873263, 16014234, 26224780, 16452269}, + }, + { + FieldElement{-30223925, 5145196, 5944548, 16385966, 3976735, 2009897, -11377804, -7618186, -20533829, 3698650}, + FieldElement{14187449, 3448569, -10636236, -10810935, -22663880, -3433596, 7268410, -10890444, 27394301, 12015369}, + FieldElement{19695761, 16087646, 28032085, 12999827, 6817792, 11427614, 20244189, -1312777, -13259127, -3402461}, + }, + { + FieldElement{30860103, 12735208, -1888245, -4699734, -16974906, 2256940, -8166013, 12298312, -8550524, -10393462}, + FieldElement{-5719826, -11245325, -1910649, 15569035, 26642876, -7587760, -5789354, -15118654, -4976164, 12651793}, + FieldElement{-2848395, 9953421, 11531313, -5282879, 26895123, -12697089, -13118820, -16517902, 9768698, -2533218}, + }, + { + FieldElement{-24719459, 1894651, -287698, -4704085, 15348719, -8156530, 32767513, 12765450, 4940095, 10678226}, + FieldElement{18860224, 15980149, -18987240, -1562570, -26233012, -11071856, -7843882, 13944024, -24372348, 16582019}, + FieldElement{-15504260, 4970268, -29893044, 4175593, -20993212, -2199756, -11704054, 15444560, -11003761, 7989037}, + }, + { + FieldElement{31490452, 5568061, -2412803, 2182383, -32336847, 4531686, -32078269, 6200206, -19686113, -14800171}, + FieldElement{-17308668, -15879940, -31522777, -2831, -32887382, 16375549, 8680158, -16371713, 28550068, -6857132}, + FieldElement{-28126887, -5688091, 16837845, -1820458, -6850681, 12700016, -30039981, 4364038, 1155602, 5988841}, + }, + { + FieldElement{21890435, -13272907, -12624011, 12154349, -7831873, 15300496, 23148983, -4470481, 24618407, 8283181}, + FieldElement{-33136107, -10512751, 9975416, 6841041, -31559793, 16356536, 3070187, -7025928, 1466169, 10740210}, + FieldElement{-1509399, -15488185, -13503385, -10655916, 32799044, 909394, -13938903, -5779719, -32164649, -15327040}, + }, + { + FieldElement{3960823, -14267803, -28026090, -15918051, -19404858, 13146868, 15567327, 951507, -3260321, -573935}, + FieldElement{24740841, 5052253, -30094131, 8961361, 25877428, 6165135, -24368180, 14397372, -7380369, -6144105}, + FieldElement{-28888365, 3510803, -28103278, -1158478, -11238128, -10631454, -15441463, -14453128, -1625486, -6494814}, + }, + }, + { + { + FieldElement{793299, -9230478, 8836302, -6235707, -27360908, -2369593, 33152843, -4885251, -9906200, -621852}, + FieldElement{5666233, 525582, 20782575, -8038419, -24538499, 14657740, 16099374, 1468826, -6171428, -15186581}, + FieldElement{-4859255, -3779343, -2917758, -6748019, 7778750, 11688288, -30404353, -9871238, -1558923, -9863646}, + }, + { + FieldElement{10896332, -7719704, 824275, 472601, -19460308, 3009587, 25248958, 14783338, -30581476, -15757844}, + FieldElement{10566929, 12612572, -31944212, 11118703, -12633376, 12362879, 21752402, 8822496, 24003793, 14264025}, + FieldElement{27713862, -7355973, -11008240, 9227530, 27050101, 2504721, 23886875, -13117525, 13958495, -5732453}, + }, + { + FieldElement{-23481610, 4867226, -27247128, 3900521, 29838369, -8212291, -31889399, -10041781, 7340521, -15410068}, + FieldElement{4646514, -8011124, -22766023, -11532654, 23184553, 8566613, 31366726, -1381061, -15066784, -10375192}, + FieldElement{-17270517, 12723032, -16993061, 14878794, 21619651, -6197576, 27584817, 3093888, -8843694, 3849921}, + }, + { + FieldElement{-9064912, 2103172, 25561640, -15125738, -5239824, 9582958, 32477045, -9017955, 5002294, -15550259}, + FieldElement{-12057553, -11177906, 21115585, -13365155, 8808712, -12030708, 16489530, 13378448, -25845716, 12741426}, + FieldElement{-5946367, 10645103, -30911586, 15390284, -3286982, -7118677, 24306472, 15852464, 28834118, -7646072}, + }, + { + FieldElement{-17335748, -9107057, -24531279, 9434953, -8472084, -583362, -13090771, 455841, 20461858, 5491305}, + FieldElement{13669248, -16095482, -12481974, -10203039, -14569770, -11893198, -24995986, 11293807, -28588204, -9421832}, + FieldElement{28497928, 6272777, -33022994, 14470570, 8906179, -1225630, 18504674, -14165166, 29867745, -8795943}, + }, + { + FieldElement{-16207023, 13517196, -27799630, -13697798, 24009064, -6373891, -6367600, -13175392, 22853429, -4012011}, + FieldElement{24191378, 16712145, -13931797, 15217831, 14542237, 1646131, 18603514, -11037887, 12876623, -2112447}, + FieldElement{17902668, 4518229, -411702, -2829247, 26878217, 5258055, -12860753, 608397, 16031844, 3723494}, + }, + { + FieldElement{-28632773, 12763728, -20446446, 7577504, 33001348, -13017745, 17558842, -7872890, 23896954, -4314245}, + FieldElement{-20005381, -12011952, 31520464, 605201, 2543521, 5991821, -2945064, 7229064, -9919646, -8826859}, + FieldElement{28816045, 298879, -28165016, -15920938, 19000928, -1665890, -12680833, -2949325, -18051778, -2082915}, + }, + { + FieldElement{16000882, -344896, 3493092, -11447198, -29504595, -13159789, 12577740, 16041268, -19715240, 7847707}, + FieldElement{10151868, 10572098, 27312476, 7922682, 14825339, 4723128, -32855931, -6519018, -10020567, 3852848}, + FieldElement{-11430470, 15697596, -21121557, -4420647, 5386314, 15063598, 16514493, -15932110, 29330899, -15076224}, + }, + }, + { + { + FieldElement{-25499735, -4378794, -15222908, -6901211, 16615731, 2051784, 3303702, 15490, -27548796, 12314391}, + FieldElement{15683520, -6003043, 18109120, -9980648, 15337968, -5997823, -16717435, 15921866, 16103996, -3731215}, + FieldElement{-23169824, -10781249, 13588192, -1628807, -3798557, -1074929, -19273607, 5402699, -29815713, -9841101}, + }, + { + FieldElement{23190676, 2384583, -32714340, 3462154, -29903655, -1529132, -11266856, 8911517, -25205859, 2739713}, + FieldElement{21374101, -3554250, -33524649, 9874411, 15377179, 11831242, -33529904, 6134907, 4931255, 11987849}, + FieldElement{-7732, -2978858, -16223486, 7277597, 105524, -322051, -31480539, 13861388, -30076310, 10117930}, + }, + { + FieldElement{-29501170, -10744872, -26163768, 13051539, -25625564, 5089643, -6325503, 6704079, 12890019, 15728940}, + FieldElement{-21972360, -11771379, -951059, -4418840, 14704840, 2695116, 903376, -10428139, 12885167, 8311031}, + FieldElement{-17516482, 5352194, 10384213, -13811658, 7506451, 13453191, 26423267, 4384730, 1888765, -5435404}, + }, + { + FieldElement{-25817338, -3107312, -13494599, -3182506, 30896459, -13921729, -32251644, -12707869, -19464434, -3340243}, + FieldElement{-23607977, -2665774, -526091, 4651136, 5765089, 4618330, 6092245, 14845197, 17151279, -9854116}, + FieldElement{-24830458, -12733720, -15165978, 10367250, -29530908, -265356, 22825805, -7087279, -16866484, 16176525}, + }, + { + FieldElement{-23583256, 6564961, 20063689, 3798228, -4740178, 7359225, 2006182, -10363426, -28746253, -10197509}, + FieldElement{-10626600, -4486402, -13320562, -5125317, 3432136, -6393229, 23632037, -1940610, 32808310, 1099883}, + FieldElement{15030977, 5768825, -27451236, -2887299, -6427378, -15361371, -15277896, -6809350, 2051441, -15225865}, + }, + { + FieldElement{-3362323, -7239372, 7517890, 9824992, 23555850, 295369, 5148398, -14154188, -22686354, 16633660}, + FieldElement{4577086, -16752288, 13249841, -15304328, 19958763, -14537274, 18559670, -10759549, 8402478, -9864273}, + FieldElement{-28406330, -1051581, -26790155, -907698, -17212414, -11030789, 9453451, -14980072, 17983010, 9967138}, + }, + { + FieldElement{-25762494, 6524722, 26585488, 9969270, 24709298, 1220360, -1677990, 7806337, 17507396, 3651560}, + FieldElement{-10420457, -4118111, 14584639, 15971087, -15768321, 8861010, 26556809, -5574557, -18553322, -11357135}, + FieldElement{2839101, 14284142, 4029895, 3472686, 14402957, 12689363, -26642121, 8459447, -5605463, -7621941}, + }, + { + FieldElement{-4839289, -3535444, 9744961, 2871048, 25113978, 3187018, -25110813, -849066, 17258084, -7977739}, + FieldElement{18164541, -10595176, -17154882, -1542417, 19237078, -9745295, 23357533, -15217008, 26908270, 12150756}, + FieldElement{-30264870, -7647865, 5112249, -7036672, -1499807, -6974257, 43168, -5537701, -32302074, 16215819}, + }, + }, + { + { + FieldElement{-6898905, 9824394, -12304779, -4401089, -31397141, -6276835, 32574489, 12532905, -7503072, -8675347}, + FieldElement{-27343522, -16515468, -27151524, -10722951, 946346, 16291093, 254968, 7168080, 21676107, -1943028}, + FieldElement{21260961, -8424752, -16831886, -11920822, -23677961, 3968121, -3651949, -6215466, -3556191, -7913075}, + }, + { + FieldElement{16544754, 13250366, -16804428, 15546242, -4583003, 12757258, -2462308, -8680336, -18907032, -9662799}, + FieldElement{-2415239, -15577728, 18312303, 4964443, -15272530, -12653564, 26820651, 16690659, 25459437, -4564609}, + FieldElement{-25144690, 11425020, 28423002, -11020557, -6144921, -15826224, 9142795, -2391602, -6432418, -1644817}, + }, + { + FieldElement{-23104652, 6253476, 16964147, -3768872, -25113972, -12296437, -27457225, -16344658, 6335692, 7249989}, + FieldElement{-30333227, 13979675, 7503222, -12368314, -11956721, -4621693, -30272269, 2682242, 25993170, -12478523}, + FieldElement{4364628, 5930691, 32304656, -10044554, -8054781, 15091131, 22857016, -10598955, 31820368, 15075278}, + }, + { + FieldElement{31879134, -8918693, 17258761, 90626, -8041836, -4917709, 24162788, -9650886, -17970238, 12833045}, + FieldElement{19073683, 14851414, -24403169, -11860168, 7625278, 11091125, -19619190, 2074449, -9413939, 14905377}, + FieldElement{24483667, -11935567, -2518866, -11547418, -1553130, 15355506, -25282080, 9253129, 27628530, -7555480}, + }, + { + FieldElement{17597607, 8340603, 19355617, 552187, 26198470, -3176583, 4593324, -9157582, -14110875, 15297016}, + FieldElement{510886, 14337390, -31785257, 16638632, 6328095, 2713355, -20217417, -11864220, 8683221, 2921426}, + FieldElement{18606791, 11874196, 27155355, -5281482, -24031742, 6265446, -25178240, -1278924, 4674690, 13890525}, + }, + { + FieldElement{13609624, 13069022, -27372361, -13055908, 24360586, 9592974, 14977157, 9835105, 4389687, 288396}, + FieldElement{9922506, -519394, 13613107, 5883594, -18758345, -434263, -12304062, 8317628, 23388070, 16052080}, + FieldElement{12720016, 11937594, -31970060, -5028689, 26900120, 8561328, -20155687, -11632979, -14754271, -10812892}, + }, + { + FieldElement{15961858, 14150409, 26716931, -665832, -22794328, 13603569, 11829573, 7467844, -28822128, 929275}, + FieldElement{11038231, -11582396, -27310482, -7316562, -10498527, -16307831, -23479533, -9371869, -21393143, 2465074}, + FieldElement{20017163, -4323226, 27915242, 1529148, 12396362, 15675764, 13817261, -9658066, 2463391, -4622140}, + }, + { + FieldElement{-16358878, -12663911, -12065183, 4996454, -1256422, 1073572, 9583558, 12851107, 4003896, 12673717}, + FieldElement{-1731589, -15155870, -3262930, 16143082, 19294135, 13385325, 14741514, -9103726, 7903886, 2348101}, + FieldElement{24536016, -16515207, 12715592, -3862155, 1511293, 10047386, -3842346, -7129159, -28377538, 10048127}, + }, + }, + { + { + FieldElement{-12622226, -6204820, 30718825, 2591312, -10617028, 12192840, 18873298, -7297090, -32297756, 15221632}, + FieldElement{-26478122, -11103864, 11546244, -1852483, 9180880, 7656409, -21343950, 2095755, 29769758, 6593415}, + FieldElement{-31994208, -2907461, 4176912, 3264766, 12538965, -868111, 26312345, -6118678, 30958054, 8292160}, + }, + { + FieldElement{31429822, -13959116, 29173532, 15632448, 12174511, -2760094, 32808831, 3977186, 26143136, -3148876}, + FieldElement{22648901, 1402143, -22799984, 13746059, 7936347, 365344, -8668633, -1674433, -3758243, -2304625}, + FieldElement{-15491917, 8012313, -2514730, -12702462, -23965846, -10254029, -1612713, -1535569, -16664475, 8194478}, + }, + { + FieldElement{27338066, -7507420, -7414224, 10140405, -19026427, -6589889, 27277191, 8855376, 28572286, 3005164}, + FieldElement{26287124, 4821776, 25476601, -4145903, -3764513, -15788984, -18008582, 1182479, -26094821, -13079595}, + FieldElement{-7171154, 3178080, 23970071, 6201893, -17195577, -4489192, -21876275, -13982627, 32208683, -1198248}, + }, + { + FieldElement{-16657702, 2817643, -10286362, 14811298, 6024667, 13349505, -27315504, -10497842, -27672585, -11539858}, + FieldElement{15941029, -9405932, -21367050, 8062055, 31876073, -238629, -15278393, -1444429, 15397331, -4130193}, + FieldElement{8934485, -13485467, -23286397, -13423241, -32446090, 14047986, 31170398, -1441021, -27505566, 15087184}, + }, + { + FieldElement{-18357243, -2156491, 24524913, -16677868, 15520427, -6360776, -15502406, 11461896, 16788528, -5868942}, + FieldElement{-1947386, 16013773, 21750665, 3714552, -17401782, -16055433, -3770287, -10323320, 31322514, -11615635}, + FieldElement{21426655, -5650218, -13648287, -5347537, -28812189, -4920970, -18275391, -14621414, 13040862, -12112948}, + }, + { + FieldElement{11293895, 12478086, -27136401, 15083750, -29307421, 14748872, 14555558, -13417103, 1613711, 4896935}, + FieldElement{-25894883, 15323294, -8489791, -8057900, 25967126, -13425460, 2825960, -4897045, -23971776, -11267415}, + FieldElement{-15924766, -5229880, -17443532, 6410664, 3622847, 10243618, 20615400, 12405433, -23753030, -8436416}, + }, + { + FieldElement{-7091295, 12556208, -20191352, 9025187, -17072479, 4333801, 4378436, 2432030, 23097949, -566018}, + FieldElement{4565804, -16025654, 20084412, -7842817, 1724999, 189254, 24767264, 10103221, -18512313, 2424778}, + FieldElement{366633, -11976806, 8173090, -6890119, 30788634, 5745705, -7168678, 1344109, -3642553, 12412659}, + }, + { + FieldElement{-24001791, 7690286, 14929416, -168257, -32210835, -13412986, 24162697, -15326504, -3141501, 11179385}, + FieldElement{18289522, -14724954, 8056945, 16430056, -21729724, 7842514, -6001441, -1486897, -18684645, -11443503}, + FieldElement{476239, 6601091, -6152790, -9723375, 17503545, -4863900, 27672959, 13403813, 11052904, 5219329}, + }, + }, + { + { + FieldElement{20678546, -8375738, -32671898, 8849123, -5009758, 14574752, 31186971, -3973730, 9014762, -8579056}, + FieldElement{-13644050, -10350239, -15962508, 5075808, -1514661, -11534600, -33102500, 9160280, 8473550, -3256838}, + FieldElement{24900749, 14435722, 17209120, -15292541, -22592275, 9878983, -7689309, -16335821, -24568481, 11788948}, + }, + { + FieldElement{-3118155, -11395194, -13802089, 14797441, 9652448, -6845904, -20037437, 10410733, -24568470, -1458691}, + FieldElement{-15659161, 16736706, -22467150, 10215878, -9097177, 7563911, 11871841, -12505194, -18513325, 8464118}, + FieldElement{-23400612, 8348507, -14585951, -861714, -3950205, -6373419, 14325289, 8628612, 33313881, -8370517}, + }, + { + FieldElement{-20186973, -4967935, 22367356, 5271547, -1097117, -4788838, -24805667, -10236854, -8940735, -5818269}, + FieldElement{-6948785, -1795212, -32625683, -16021179, 32635414, -7374245, 15989197, -12838188, 28358192, -4253904}, + FieldElement{-23561781, -2799059, -32351682, -1661963, -9147719, 10429267, -16637684, 4072016, -5351664, 5596589}, + }, + { + FieldElement{-28236598, -3390048, 12312896, 6213178, 3117142, 16078565, 29266239, 2557221, 1768301, 15373193}, + FieldElement{-7243358, -3246960, -4593467, -7553353, -127927, -912245, -1090902, -4504991, -24660491, 3442910}, + FieldElement{-30210571, 5124043, 14181784, 8197961, 18964734, -11939093, 22597931, 7176455, -18585478, 13365930}, + }, + { + FieldElement{-7877390, -1499958, 8324673, 4690079, 6261860, 890446, 24538107, -8570186, -9689599, -3031667}, + FieldElement{25008904, -10771599, -4305031, -9638010, 16265036, 15721635, 683793, -11823784, 15723479, -15163481}, + FieldElement{-9660625, 12374379, -27006999, -7026148, -7724114, -12314514, 11879682, 5400171, 519526, -1235876}, + }, + { + FieldElement{22258397, -16332233, -7869817, 14613016, -22520255, -2950923, -20353881, 7315967, 16648397, 7605640}, + FieldElement{-8081308, -8464597, -8223311, 9719710, 19259459, -15348212, 23994942, -5281555, -9468848, 4763278}, + FieldElement{-21699244, 9220969, -15730624, 1084137, -25476107, -2852390, 31088447, -7764523, -11356529, 728112}, + }, + { + FieldElement{26047220, -11751471, -6900323, -16521798, 24092068, 9158119, -4273545, -12555558, -29365436, -5498272}, + FieldElement{17510331, -322857, 5854289, 8403524, 17133918, -3112612, -28111007, 12327945, 10750447, 10014012}, + FieldElement{-10312768, 3936952, 9156313, -8897683, 16498692, -994647, -27481051, -666732, 3424691, 7540221}, + }, + { + FieldElement{30322361, -6964110, 11361005, -4143317, 7433304, 4989748, -7071422, -16317219, -9244265, 15258046}, + FieldElement{13054562, -2779497, 19155474, 469045, -12482797, 4566042, 5631406, 2711395, 1062915, -5136345}, + FieldElement{-19240248, -11254599, -29509029, -7499965, -5835763, 13005411, -6066489, 12194497, 32960380, 1459310}, + }, + }, + { + { + FieldElement{19852034, 7027924, 23669353, 10020366, 8586503, -6657907, 394197, -6101885, 18638003, -11174937}, + FieldElement{31395534, 15098109, 26581030, 8030562, -16527914, -5007134, 9012486, -7584354, -6643087, -5442636}, + FieldElement{-9192165, -2347377, -1997099, 4529534, 25766844, 607986, -13222, 9677543, -32294889, -6456008}, + }, + { + FieldElement{-2444496, -149937, 29348902, 8186665, 1873760, 12489863, -30934579, -7839692, -7852844, -8138429}, + FieldElement{-15236356, -15433509, 7766470, 746860, 26346930, -10221762, -27333451, 10754588, -9431476, 5203576}, + FieldElement{31834314, 14135496, -770007, 5159118, 20917671, -16768096, -7467973, -7337524, 31809243, 7347066}, + }, + { + FieldElement{-9606723, -11874240, 20414459, 13033986, 13716524, -11691881, 19797970, -12211255, 15192876, -2087490}, + FieldElement{-12663563, -2181719, 1168162, -3804809, 26747877, -14138091, 10609330, 12694420, 33473243, -13382104}, + FieldElement{33184999, 11180355, 15832085, -11385430, -1633671, 225884, 15089336, -11023903, -6135662, 14480053}, + }, + { + FieldElement{31308717, -5619998, 31030840, -1897099, 15674547, -6582883, 5496208, 13685227, 27595050, 8737275}, + FieldElement{-20318852, -15150239, 10933843, -16178022, 8335352, -7546022, -31008351, -12610604, 26498114, 66511}, + FieldElement{22644454, -8761729, -16671776, 4884562, -3105614, -13559366, 30540766, -4286747, -13327787, -7515095}, + }, + { + FieldElement{-28017847, 9834845, 18617207, -2681312, -3401956, -13307506, 8205540, 13585437, -17127465, 15115439}, + FieldElement{23711543, -672915, 31206561, -8362711, 6164647, -9709987, -33535882, -1426096, 8236921, 16492939}, + FieldElement{-23910559, -13515526, -26299483, -4503841, 25005590, -7687270, 19574902, 10071562, 6708380, -6222424}, + }, + { + FieldElement{2101391, -4930054, 19702731, 2367575, -15427167, 1047675, 5301017, 9328700, 29955601, -11678310}, + FieldElement{3096359, 9271816, -21620864, -15521844, -14847996, -7592937, -25892142, -12635595, -9917575, 6216608}, + FieldElement{-32615849, 338663, -25195611, 2510422, -29213566, -13820213, 24822830, -6146567, -26767480, 7525079}, + }, + { + FieldElement{-23066649, -13985623, 16133487, -7896178, -3389565, 778788, -910336, -2782495, -19386633, 11994101}, + FieldElement{21691500, -13624626, -641331, -14367021, 3285881, -3483596, -25064666, 9718258, -7477437, 13381418}, + FieldElement{18445390, -4202236, 14979846, 11622458, -1727110, -3582980, 23111648, -6375247, 28535282, 15779576}, + }, + { + FieldElement{30098053, 3089662, -9234387, 16662135, -21306940, 11308411, -14068454, 12021730, 9955285, -16303356}, + FieldElement{9734894, -14576830, -7473633, -9138735, 2060392, 11313496, -18426029, 9924399, 20194861, 13380996}, + FieldElement{-26378102, -7965207, -22167821, 15789297, -18055342, -6168792, -1984914, 15707771, 26342023, 10146099}, + }, + }, + { + { + FieldElement{-26016874, -219943, 21339191, -41388, 19745256, -2878700, -29637280, 2227040, 21612326, -545728}, + FieldElement{-13077387, 1184228, 23562814, -5970442, -20351244, -6348714, 25764461, 12243797, -20856566, 11649658}, + FieldElement{-10031494, 11262626, 27384172, 2271902, 26947504, -15997771, 39944, 6114064, 33514190, 2333242}, + }, + { + FieldElement{-21433588, -12421821, 8119782, 7219913, -21830522, -9016134, -6679750, -12670638, 24350578, -13450001}, + FieldElement{-4116307, -11271533, -23886186, 4843615, -30088339, 690623, -31536088, -10406836, 8317860, 12352766}, + FieldElement{18200138, -14475911, -33087759, -2696619, -23702521, -9102511, -23552096, -2287550, 20712163, 6719373}, + }, + { + FieldElement{26656208, 6075253, -7858556, 1886072, -28344043, 4262326, 11117530, -3763210, 26224235, -3297458}, + FieldElement{-17168938, -14854097, -3395676, -16369877, -19954045, 14050420, 21728352, 9493610, 18620611, -16428628}, + FieldElement{-13323321, 13325349, 11432106, 5964811, 18609221, 6062965, -5269471, -9725556, -30701573, -16479657}, + }, + { + FieldElement{-23860538, -11233159, 26961357, 1640861, -32413112, -16737940, 12248509, -5240639, 13735342, 1934062}, + FieldElement{25089769, 6742589, 17081145, -13406266, 21909293, -16067981, -15136294, -3765346, -21277997, 5473616}, + FieldElement{31883677, -7961101, 1083432, -11572403, 22828471, 13290673, -7125085, 12469656, 29111212, -5451014}, + }, + { + FieldElement{24244947, -15050407, -26262976, 2791540, -14997599, 16666678, 24367466, 6388839, -10295587, 452383}, + FieldElement{-25640782, -3417841, 5217916, 16224624, 19987036, -4082269, -24236251, -5915248, 15766062, 8407814}, + FieldElement{-20406999, 13990231, 15495425, 16395525, 5377168, 15166495, -8917023, -4388953, -8067909, 2276718}, + }, + { + FieldElement{30157918, 12924066, -17712050, 9245753, 19895028, 3368142, -23827587, 5096219, 22740376, -7303417}, + FieldElement{2041139, -14256350, 7783687, 13876377, -25946985, -13352459, 24051124, 13742383, -15637599, 13295222}, + FieldElement{33338237, -8505733, 12532113, 7977527, 9106186, -1715251, -17720195, -4612972, -4451357, -14669444}, + }, + { + FieldElement{-20045281, 5454097, -14346548, 6447146, 28862071, 1883651, -2469266, -4141880, 7770569, 9620597}, + FieldElement{23208068, 7979712, 33071466, 8149229, 1758231, -10834995, 30945528, -1694323, -33502340, -14767970}, + FieldElement{1439958, -16270480, -1079989, -793782, 4625402, 10647766, -5043801, 1220118, 30494170, -11440799}, + }, + { + FieldElement{-5037580, -13028295, -2970559, -3061767, 15640974, -6701666, -26739026, 926050, -1684339, -13333647}, + FieldElement{13908495, -3549272, 30919928, -6273825, -21521863, 7989039, 9021034, 9078865, 3353509, 4033511}, + FieldElement{-29663431, -15113610, 32259991, -344482, 24295849, -12912123, 23161163, 8839127, 27485041, 7356032}, + }, + }, + { + { + FieldElement{9661027, 705443, 11980065, -5370154, -1628543, 14661173, -6346142, 2625015, 28431036, -16771834}, + FieldElement{-23839233, -8311415, -25945511, 7480958, -17681669, -8354183, -22545972, 14150565, 15970762, 4099461}, + FieldElement{29262576, 16756590, 26350592, -8793563, 8529671, -11208050, 13617293, -9937143, 11465739, 8317062}, + }, + { + FieldElement{-25493081, -6962928, 32500200, -9419051, -23038724, -2302222, 14898637, 3848455, 20969334, -5157516}, + FieldElement{-20384450, -14347713, -18336405, 13884722, -33039454, 2842114, -21610826, -3649888, 11177095, 14989547}, + FieldElement{-24496721, -11716016, 16959896, 2278463, 12066309, 10137771, 13515641, 2581286, -28487508, 9930240}, + }, + { + FieldElement{-17751622, -2097826, 16544300, -13009300, -15914807, -14949081, 18345767, -13403753, 16291481, -5314038}, + FieldElement{-33229194, 2553288, 32678213, 9875984, 8534129, 6889387, -9676774, 6957617, 4368891, 9788741}, + FieldElement{16660756, 7281060, -10830758, 12911820, 20108584, -8101676, -21722536, -8613148, 16250552, -11111103}, + }, + { + FieldElement{-19765507, 2390526, -16551031, 14161980, 1905286, 6414907, 4689584, 10604807, -30190403, 4782747}, + FieldElement{-1354539, 14736941, -7367442, -13292886, 7710542, -14155590, -9981571, 4383045, 22546403, 437323}, + FieldElement{31665577, -12180464, -16186830, 1491339, -18368625, 3294682, 27343084, 2786261, -30633590, -14097016}, + }, + { + FieldElement{-14467279, -683715, -33374107, 7448552, 19294360, 14334329, -19690631, 2355319, -19284671, -6114373}, + FieldElement{15121312, -15796162, 6377020, -6031361, -10798111, -12957845, 18952177, 15496498, -29380133, 11754228}, + FieldElement{-2637277, -13483075, 8488727, -14303896, 12728761, -1622493, 7141596, 11724556, 22761615, -10134141}, + }, + { + FieldElement{16918416, 11729663, -18083579, 3022987, -31015732, -13339659, -28741185, -12227393, 32851222, 11717399}, + FieldElement{11166634, 7338049, -6722523, 4531520, -29468672, -7302055, 31474879, 3483633, -1193175, -4030831}, + FieldElement{-185635, 9921305, 31456609, -13536438, -12013818, 13348923, 33142652, 6546660, -19985279, -3948376}, + }, + { + FieldElement{-32460596, 11266712, -11197107, -7899103, 31703694, 3855903, -8537131, -12833048, -30772034, -15486313}, + FieldElement{-18006477, 12709068, 3991746, -6479188, -21491523, -10550425, -31135347, -16049879, 10928917, 3011958}, + FieldElement{-6957757, -15594337, 31696059, 334240, 29576716, 14796075, -30831056, -12805180, 18008031, 10258577}, + }, + { + FieldElement{-22448644, 15655569, 7018479, -4410003, -30314266, -1201591, -1853465, 1367120, 25127874, 6671743}, + FieldElement{29701166, -14373934, -10878120, 9279288, -17568, 13127210, 21382910, 11042292, 25838796, 4642684}, + FieldElement{-20430234, 14955537, -24126347, 8124619, -5369288, -5990470, 30468147, -13900640, 18423289, 4177476}, + }, + }, +} diff --git a/vendor/golang.org/x/crypto/ed25519/internal/edwards25519/edwards25519.go b/vendor/golang.org/x/crypto/ed25519/internal/edwards25519/edwards25519.go new file mode 100644 index 0000000000..5f8b994787 --- /dev/null +++ b/vendor/golang.org/x/crypto/ed25519/internal/edwards25519/edwards25519.go @@ -0,0 +1,1771 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package edwards25519 + +// This code is a port of the public domain, “ref10” implementation of ed25519 +// from SUPERCOP. + +// FieldElement represents an element of the field GF(2^255 - 19). An element +// t, entries t[0]...t[9], represents the integer t[0]+2^26 t[1]+2^51 t[2]+2^77 +// t[3]+2^102 t[4]+...+2^230 t[9]. Bounds on each t[i] vary depending on +// context. +type FieldElement [10]int32 + +var zero FieldElement + +func FeZero(fe *FieldElement) { + copy(fe[:], zero[:]) +} + +func FeOne(fe *FieldElement) { + FeZero(fe) + fe[0] = 1 +} + +func FeAdd(dst, a, b *FieldElement) { + dst[0] = a[0] + b[0] + dst[1] = a[1] + b[1] + dst[2] = a[2] + b[2] + dst[3] = a[3] + b[3] + dst[4] = a[4] + b[4] + dst[5] = a[5] + b[5] + dst[6] = a[6] + b[6] + dst[7] = a[7] + b[7] + dst[8] = a[8] + b[8] + dst[9] = a[9] + b[9] +} + +func FeSub(dst, a, b *FieldElement) { + dst[0] = a[0] - b[0] + dst[1] = a[1] - b[1] + dst[2] = a[2] - b[2] + dst[3] = a[3] - b[3] + dst[4] = a[4] - b[4] + dst[5] = a[5] - b[5] + dst[6] = a[6] - b[6] + dst[7] = a[7] - b[7] + dst[8] = a[8] - b[8] + dst[9] = a[9] - b[9] +} + +func FeCopy(dst, src *FieldElement) { + copy(dst[:], src[:]) +} + +// Replace (f,g) with (g,g) if b == 1; +// replace (f,g) with (f,g) if b == 0. +// +// Preconditions: b in {0,1}. +func FeCMove(f, g *FieldElement, b int32) { + b = -b + f[0] ^= b & (f[0] ^ g[0]) + f[1] ^= b & (f[1] ^ g[1]) + f[2] ^= b & (f[2] ^ g[2]) + f[3] ^= b & (f[3] ^ g[3]) + f[4] ^= b & (f[4] ^ g[4]) + f[5] ^= b & (f[5] ^ g[5]) + f[6] ^= b & (f[6] ^ g[6]) + f[7] ^= b & (f[7] ^ g[7]) + f[8] ^= b & (f[8] ^ g[8]) + f[9] ^= b & (f[9] ^ g[9]) +} + +func load3(in []byte) int64 { + var r int64 + r = int64(in[0]) + r |= int64(in[1]) << 8 + r |= int64(in[2]) << 16 + return r +} + +func load4(in []byte) int64 { + var r int64 + r = int64(in[0]) + r |= int64(in[1]) << 8 + r |= int64(in[2]) << 16 + r |= int64(in[3]) << 24 + return r +} + +func FeFromBytes(dst *FieldElement, src *[32]byte) { + h0 := load4(src[:]) + h1 := load3(src[4:]) << 6 + h2 := load3(src[7:]) << 5 + h3 := load3(src[10:]) << 3 + h4 := load3(src[13:]) << 2 + h5 := load4(src[16:]) + h6 := load3(src[20:]) << 7 + h7 := load3(src[23:]) << 5 + h8 := load3(src[26:]) << 4 + h9 := (load3(src[29:]) & 8388607) << 2 + + FeCombine(dst, h0, h1, h2, h3, h4, h5, h6, h7, h8, h9) +} + +// FeToBytes marshals h to s. +// Preconditions: +// |h| bounded by 1.1*2^25,1.1*2^24,1.1*2^25,1.1*2^24,etc. +// +// Write p=2^255-19; q=floor(h/p). +// Basic claim: q = floor(2^(-255)(h + 19 2^(-25)h9 + 2^(-1))). +// +// Proof: +// Have |h|<=p so |q|<=1 so |19^2 2^(-255) q|<1/4. +// Also have |h-2^230 h9|<2^230 so |19 2^(-255)(h-2^230 h9)|<1/4. +// +// Write y=2^(-1)-19^2 2^(-255)q-19 2^(-255)(h-2^230 h9). +// Then 0> 25 + q = (h[0] + q) >> 26 + q = (h[1] + q) >> 25 + q = (h[2] + q) >> 26 + q = (h[3] + q) >> 25 + q = (h[4] + q) >> 26 + q = (h[5] + q) >> 25 + q = (h[6] + q) >> 26 + q = (h[7] + q) >> 25 + q = (h[8] + q) >> 26 + q = (h[9] + q) >> 25 + + // Goal: Output h-(2^255-19)q, which is between 0 and 2^255-20. + h[0] += 19 * q + // Goal: Output h-2^255 q, which is between 0 and 2^255-20. + + carry[0] = h[0] >> 26 + h[1] += carry[0] + h[0] -= carry[0] << 26 + carry[1] = h[1] >> 25 + h[2] += carry[1] + h[1] -= carry[1] << 25 + carry[2] = h[2] >> 26 + h[3] += carry[2] + h[2] -= carry[2] << 26 + carry[3] = h[3] >> 25 + h[4] += carry[3] + h[3] -= carry[3] << 25 + carry[4] = h[4] >> 26 + h[5] += carry[4] + h[4] -= carry[4] << 26 + carry[5] = h[5] >> 25 + h[6] += carry[5] + h[5] -= carry[5] << 25 + carry[6] = h[6] >> 26 + h[7] += carry[6] + h[6] -= carry[6] << 26 + carry[7] = h[7] >> 25 + h[8] += carry[7] + h[7] -= carry[7] << 25 + carry[8] = h[8] >> 26 + h[9] += carry[8] + h[8] -= carry[8] << 26 + carry[9] = h[9] >> 25 + h[9] -= carry[9] << 25 + // h10 = carry9 + + // Goal: Output h[0]+...+2^255 h10-2^255 q, which is between 0 and 2^255-20. + // Have h[0]+...+2^230 h[9] between 0 and 2^255-1; + // evidently 2^255 h10-2^255 q = 0. + // Goal: Output h[0]+...+2^230 h[9]. + + s[0] = byte(h[0] >> 0) + s[1] = byte(h[0] >> 8) + s[2] = byte(h[0] >> 16) + s[3] = byte((h[0] >> 24) | (h[1] << 2)) + s[4] = byte(h[1] >> 6) + s[5] = byte(h[1] >> 14) + s[6] = byte((h[1] >> 22) | (h[2] << 3)) + s[7] = byte(h[2] >> 5) + s[8] = byte(h[2] >> 13) + s[9] = byte((h[2] >> 21) | (h[3] << 5)) + s[10] = byte(h[3] >> 3) + s[11] = byte(h[3] >> 11) + s[12] = byte((h[3] >> 19) | (h[4] << 6)) + s[13] = byte(h[4] >> 2) + s[14] = byte(h[4] >> 10) + s[15] = byte(h[4] >> 18) + s[16] = byte(h[5] >> 0) + s[17] = byte(h[5] >> 8) + s[18] = byte(h[5] >> 16) + s[19] = byte((h[5] >> 24) | (h[6] << 1)) + s[20] = byte(h[6] >> 7) + s[21] = byte(h[6] >> 15) + s[22] = byte((h[6] >> 23) | (h[7] << 3)) + s[23] = byte(h[7] >> 5) + s[24] = byte(h[7] >> 13) + s[25] = byte((h[7] >> 21) | (h[8] << 4)) + s[26] = byte(h[8] >> 4) + s[27] = byte(h[8] >> 12) + s[28] = byte((h[8] >> 20) | (h[9] << 6)) + s[29] = byte(h[9] >> 2) + s[30] = byte(h[9] >> 10) + s[31] = byte(h[9] >> 18) +} + +func FeIsNegative(f *FieldElement) byte { + var s [32]byte + FeToBytes(&s, f) + return s[0] & 1 +} + +func FeIsNonZero(f *FieldElement) int32 { + var s [32]byte + FeToBytes(&s, f) + var x uint8 + for _, b := range s { + x |= b + } + x |= x >> 4 + x |= x >> 2 + x |= x >> 1 + return int32(x & 1) +} + +// FeNeg sets h = -f +// +// Preconditions: +// |f| bounded by 1.1*2^25,1.1*2^24,1.1*2^25,1.1*2^24,etc. +// +// Postconditions: +// |h| bounded by 1.1*2^25,1.1*2^24,1.1*2^25,1.1*2^24,etc. +func FeNeg(h, f *FieldElement) { + h[0] = -f[0] + h[1] = -f[1] + h[2] = -f[2] + h[3] = -f[3] + h[4] = -f[4] + h[5] = -f[5] + h[6] = -f[6] + h[7] = -f[7] + h[8] = -f[8] + h[9] = -f[9] +} + +func FeCombine(h *FieldElement, h0, h1, h2, h3, h4, h5, h6, h7, h8, h9 int64) { + var c0, c1, c2, c3, c4, c5, c6, c7, c8, c9 int64 + + /* + |h0| <= (1.1*1.1*2^52*(1+19+19+19+19)+1.1*1.1*2^50*(38+38+38+38+38)) + i.e. |h0| <= 1.2*2^59; narrower ranges for h2, h4, h6, h8 + |h1| <= (1.1*1.1*2^51*(1+1+19+19+19+19+19+19+19+19)) + i.e. |h1| <= 1.5*2^58; narrower ranges for h3, h5, h7, h9 + */ + + c0 = (h0 + (1 << 25)) >> 26 + h1 += c0 + h0 -= c0 << 26 + c4 = (h4 + (1 << 25)) >> 26 + h5 += c4 + h4 -= c4 << 26 + /* |h0| <= 2^25 */ + /* |h4| <= 2^25 */ + /* |h1| <= 1.51*2^58 */ + /* |h5| <= 1.51*2^58 */ + + c1 = (h1 + (1 << 24)) >> 25 + h2 += c1 + h1 -= c1 << 25 + c5 = (h5 + (1 << 24)) >> 25 + h6 += c5 + h5 -= c5 << 25 + /* |h1| <= 2^24; from now on fits into int32 */ + /* |h5| <= 2^24; from now on fits into int32 */ + /* |h2| <= 1.21*2^59 */ + /* |h6| <= 1.21*2^59 */ + + c2 = (h2 + (1 << 25)) >> 26 + h3 += c2 + h2 -= c2 << 26 + c6 = (h6 + (1 << 25)) >> 26 + h7 += c6 + h6 -= c6 << 26 + /* |h2| <= 2^25; from now on fits into int32 unchanged */ + /* |h6| <= 2^25; from now on fits into int32 unchanged */ + /* |h3| <= 1.51*2^58 */ + /* |h7| <= 1.51*2^58 */ + + c3 = (h3 + (1 << 24)) >> 25 + h4 += c3 + h3 -= c3 << 25 + c7 = (h7 + (1 << 24)) >> 25 + h8 += c7 + h7 -= c7 << 25 + /* |h3| <= 2^24; from now on fits into int32 unchanged */ + /* |h7| <= 2^24; from now on fits into int32 unchanged */ + /* |h4| <= 1.52*2^33 */ + /* |h8| <= 1.52*2^33 */ + + c4 = (h4 + (1 << 25)) >> 26 + h5 += c4 + h4 -= c4 << 26 + c8 = (h8 + (1 << 25)) >> 26 + h9 += c8 + h8 -= c8 << 26 + /* |h4| <= 2^25; from now on fits into int32 unchanged */ + /* |h8| <= 2^25; from now on fits into int32 unchanged */ + /* |h5| <= 1.01*2^24 */ + /* |h9| <= 1.51*2^58 */ + + c9 = (h9 + (1 << 24)) >> 25 + h0 += c9 * 19 + h9 -= c9 << 25 + /* |h9| <= 2^24; from now on fits into int32 unchanged */ + /* |h0| <= 1.8*2^37 */ + + c0 = (h0 + (1 << 25)) >> 26 + h1 += c0 + h0 -= c0 << 26 + /* |h0| <= 2^25; from now on fits into int32 unchanged */ + /* |h1| <= 1.01*2^24 */ + + h[0] = int32(h0) + h[1] = int32(h1) + h[2] = int32(h2) + h[3] = int32(h3) + h[4] = int32(h4) + h[5] = int32(h5) + h[6] = int32(h6) + h[7] = int32(h7) + h[8] = int32(h8) + h[9] = int32(h9) +} + +// FeMul calculates h = f * g +// Can overlap h with f or g. +// +// Preconditions: +// |f| bounded by 1.1*2^26,1.1*2^25,1.1*2^26,1.1*2^25,etc. +// |g| bounded by 1.1*2^26,1.1*2^25,1.1*2^26,1.1*2^25,etc. +// +// Postconditions: +// |h| bounded by 1.1*2^25,1.1*2^24,1.1*2^25,1.1*2^24,etc. +// +// Notes on implementation strategy: +// +// Using schoolbook multiplication. +// Karatsuba would save a little in some cost models. +// +// Most multiplications by 2 and 19 are 32-bit precomputations; +// cheaper than 64-bit postcomputations. +// +// There is one remaining multiplication by 19 in the carry chain; +// one *19 precomputation can be merged into this, +// but the resulting data flow is considerably less clean. +// +// There are 12 carries below. +// 10 of them are 2-way parallelizable and vectorizable. +// Can get away with 11 carries, but then data flow is much deeper. +// +// With tighter constraints on inputs, can squeeze carries into int32. +func FeMul(h, f, g *FieldElement) { + f0 := int64(f[0]) + f1 := int64(f[1]) + f2 := int64(f[2]) + f3 := int64(f[3]) + f4 := int64(f[4]) + f5 := int64(f[5]) + f6 := int64(f[6]) + f7 := int64(f[7]) + f8 := int64(f[8]) + f9 := int64(f[9]) + + f1_2 := int64(2 * f[1]) + f3_2 := int64(2 * f[3]) + f5_2 := int64(2 * f[5]) + f7_2 := int64(2 * f[7]) + f9_2 := int64(2 * f[9]) + + g0 := int64(g[0]) + g1 := int64(g[1]) + g2 := int64(g[2]) + g3 := int64(g[3]) + g4 := int64(g[4]) + g5 := int64(g[5]) + g6 := int64(g[6]) + g7 := int64(g[7]) + g8 := int64(g[8]) + g9 := int64(g[9]) + + g1_19 := int64(19 * g[1]) /* 1.4*2^29 */ + g2_19 := int64(19 * g[2]) /* 1.4*2^30; still ok */ + g3_19 := int64(19 * g[3]) + g4_19 := int64(19 * g[4]) + g5_19 := int64(19 * g[5]) + g6_19 := int64(19 * g[6]) + g7_19 := int64(19 * g[7]) + g8_19 := int64(19 * g[8]) + g9_19 := int64(19 * g[9]) + + h0 := f0*g0 + f1_2*g9_19 + f2*g8_19 + f3_2*g7_19 + f4*g6_19 + f5_2*g5_19 + f6*g4_19 + f7_2*g3_19 + f8*g2_19 + f9_2*g1_19 + h1 := f0*g1 + f1*g0 + f2*g9_19 + f3*g8_19 + f4*g7_19 + f5*g6_19 + f6*g5_19 + f7*g4_19 + f8*g3_19 + f9*g2_19 + h2 := f0*g2 + f1_2*g1 + f2*g0 + f3_2*g9_19 + f4*g8_19 + f5_2*g7_19 + f6*g6_19 + f7_2*g5_19 + f8*g4_19 + f9_2*g3_19 + h3 := f0*g3 + f1*g2 + f2*g1 + f3*g0 + f4*g9_19 + f5*g8_19 + f6*g7_19 + f7*g6_19 + f8*g5_19 + f9*g4_19 + h4 := f0*g4 + f1_2*g3 + f2*g2 + f3_2*g1 + f4*g0 + f5_2*g9_19 + f6*g8_19 + f7_2*g7_19 + f8*g6_19 + f9_2*g5_19 + h5 := f0*g5 + f1*g4 + f2*g3 + f3*g2 + f4*g1 + f5*g0 + f6*g9_19 + f7*g8_19 + f8*g7_19 + f9*g6_19 + h6 := f0*g6 + f1_2*g5 + f2*g4 + f3_2*g3 + f4*g2 + f5_2*g1 + f6*g0 + f7_2*g9_19 + f8*g8_19 + f9_2*g7_19 + h7 := f0*g7 + f1*g6 + f2*g5 + f3*g4 + f4*g3 + f5*g2 + f6*g1 + f7*g0 + f8*g9_19 + f9*g8_19 + h8 := f0*g8 + f1_2*g7 + f2*g6 + f3_2*g5 + f4*g4 + f5_2*g3 + f6*g2 + f7_2*g1 + f8*g0 + f9_2*g9_19 + h9 := f0*g9 + f1*g8 + f2*g7 + f3*g6 + f4*g5 + f5*g4 + f6*g3 + f7*g2 + f8*g1 + f9*g0 + + FeCombine(h, h0, h1, h2, h3, h4, h5, h6, h7, h8, h9) +} + +func feSquare(f *FieldElement) (h0, h1, h2, h3, h4, h5, h6, h7, h8, h9 int64) { + f0 := int64(f[0]) + f1 := int64(f[1]) + f2 := int64(f[2]) + f3 := int64(f[3]) + f4 := int64(f[4]) + f5 := int64(f[5]) + f6 := int64(f[6]) + f7 := int64(f[7]) + f8 := int64(f[8]) + f9 := int64(f[9]) + f0_2 := int64(2 * f[0]) + f1_2 := int64(2 * f[1]) + f2_2 := int64(2 * f[2]) + f3_2 := int64(2 * f[3]) + f4_2 := int64(2 * f[4]) + f5_2 := int64(2 * f[5]) + f6_2 := int64(2 * f[6]) + f7_2 := int64(2 * f[7]) + f5_38 := 38 * f5 // 1.31*2^30 + f6_19 := 19 * f6 // 1.31*2^30 + f7_38 := 38 * f7 // 1.31*2^30 + f8_19 := 19 * f8 // 1.31*2^30 + f9_38 := 38 * f9 // 1.31*2^30 + + h0 = f0*f0 + f1_2*f9_38 + f2_2*f8_19 + f3_2*f7_38 + f4_2*f6_19 + f5*f5_38 + h1 = f0_2*f1 + f2*f9_38 + f3_2*f8_19 + f4*f7_38 + f5_2*f6_19 + h2 = f0_2*f2 + f1_2*f1 + f3_2*f9_38 + f4_2*f8_19 + f5_2*f7_38 + f6*f6_19 + h3 = f0_2*f3 + f1_2*f2 + f4*f9_38 + f5_2*f8_19 + f6*f7_38 + h4 = f0_2*f4 + f1_2*f3_2 + f2*f2 + f5_2*f9_38 + f6_2*f8_19 + f7*f7_38 + h5 = f0_2*f5 + f1_2*f4 + f2_2*f3 + f6*f9_38 + f7_2*f8_19 + h6 = f0_2*f6 + f1_2*f5_2 + f2_2*f4 + f3_2*f3 + f7_2*f9_38 + f8*f8_19 + h7 = f0_2*f7 + f1_2*f6 + f2_2*f5 + f3_2*f4 + f8*f9_38 + h8 = f0_2*f8 + f1_2*f7_2 + f2_2*f6 + f3_2*f5_2 + f4*f4 + f9*f9_38 + h9 = f0_2*f9 + f1_2*f8 + f2_2*f7 + f3_2*f6 + f4_2*f5 + + return +} + +// FeSquare calculates h = f*f. Can overlap h with f. +// +// Preconditions: +// |f| bounded by 1.1*2^26,1.1*2^25,1.1*2^26,1.1*2^25,etc. +// +// Postconditions: +// |h| bounded by 1.1*2^25,1.1*2^24,1.1*2^25,1.1*2^24,etc. +func FeSquare(h, f *FieldElement) { + h0, h1, h2, h3, h4, h5, h6, h7, h8, h9 := feSquare(f) + FeCombine(h, h0, h1, h2, h3, h4, h5, h6, h7, h8, h9) +} + +// FeSquare2 sets h = 2 * f * f +// +// Can overlap h with f. +// +// Preconditions: +// |f| bounded by 1.65*2^26,1.65*2^25,1.65*2^26,1.65*2^25,etc. +// +// Postconditions: +// |h| bounded by 1.01*2^25,1.01*2^24,1.01*2^25,1.01*2^24,etc. +// See fe_mul.c for discussion of implementation strategy. +func FeSquare2(h, f *FieldElement) { + h0, h1, h2, h3, h4, h5, h6, h7, h8, h9 := feSquare(f) + + h0 += h0 + h1 += h1 + h2 += h2 + h3 += h3 + h4 += h4 + h5 += h5 + h6 += h6 + h7 += h7 + h8 += h8 + h9 += h9 + + FeCombine(h, h0, h1, h2, h3, h4, h5, h6, h7, h8, h9) +} + +func FeInvert(out, z *FieldElement) { + var t0, t1, t2, t3 FieldElement + var i int + + FeSquare(&t0, z) // 2^1 + FeSquare(&t1, &t0) // 2^2 + for i = 1; i < 2; i++ { // 2^3 + FeSquare(&t1, &t1) + } + FeMul(&t1, z, &t1) // 2^3 + 2^0 + FeMul(&t0, &t0, &t1) // 2^3 + 2^1 + 2^0 + FeSquare(&t2, &t0) // 2^4 + 2^2 + 2^1 + FeMul(&t1, &t1, &t2) // 2^4 + 2^3 + 2^2 + 2^1 + 2^0 + FeSquare(&t2, &t1) // 5,4,3,2,1 + for i = 1; i < 5; i++ { // 9,8,7,6,5 + FeSquare(&t2, &t2) + } + FeMul(&t1, &t2, &t1) // 9,8,7,6,5,4,3,2,1,0 + FeSquare(&t2, &t1) // 10..1 + for i = 1; i < 10; i++ { // 19..10 + FeSquare(&t2, &t2) + } + FeMul(&t2, &t2, &t1) // 19..0 + FeSquare(&t3, &t2) // 20..1 + for i = 1; i < 20; i++ { // 39..20 + FeSquare(&t3, &t3) + } + FeMul(&t2, &t3, &t2) // 39..0 + FeSquare(&t2, &t2) // 40..1 + for i = 1; i < 10; i++ { // 49..10 + FeSquare(&t2, &t2) + } + FeMul(&t1, &t2, &t1) // 49..0 + FeSquare(&t2, &t1) // 50..1 + for i = 1; i < 50; i++ { // 99..50 + FeSquare(&t2, &t2) + } + FeMul(&t2, &t2, &t1) // 99..0 + FeSquare(&t3, &t2) // 100..1 + for i = 1; i < 100; i++ { // 199..100 + FeSquare(&t3, &t3) + } + FeMul(&t2, &t3, &t2) // 199..0 + FeSquare(&t2, &t2) // 200..1 + for i = 1; i < 50; i++ { // 249..50 + FeSquare(&t2, &t2) + } + FeMul(&t1, &t2, &t1) // 249..0 + FeSquare(&t1, &t1) // 250..1 + for i = 1; i < 5; i++ { // 254..5 + FeSquare(&t1, &t1) + } + FeMul(out, &t1, &t0) // 254..5,3,1,0 +} + +func fePow22523(out, z *FieldElement) { + var t0, t1, t2 FieldElement + var i int + + FeSquare(&t0, z) + for i = 1; i < 1; i++ { + FeSquare(&t0, &t0) + } + FeSquare(&t1, &t0) + for i = 1; i < 2; i++ { + FeSquare(&t1, &t1) + } + FeMul(&t1, z, &t1) + FeMul(&t0, &t0, &t1) + FeSquare(&t0, &t0) + for i = 1; i < 1; i++ { + FeSquare(&t0, &t0) + } + FeMul(&t0, &t1, &t0) + FeSquare(&t1, &t0) + for i = 1; i < 5; i++ { + FeSquare(&t1, &t1) + } + FeMul(&t0, &t1, &t0) + FeSquare(&t1, &t0) + for i = 1; i < 10; i++ { + FeSquare(&t1, &t1) + } + FeMul(&t1, &t1, &t0) + FeSquare(&t2, &t1) + for i = 1; i < 20; i++ { + FeSquare(&t2, &t2) + } + FeMul(&t1, &t2, &t1) + FeSquare(&t1, &t1) + for i = 1; i < 10; i++ { + FeSquare(&t1, &t1) + } + FeMul(&t0, &t1, &t0) + FeSquare(&t1, &t0) + for i = 1; i < 50; i++ { + FeSquare(&t1, &t1) + } + FeMul(&t1, &t1, &t0) + FeSquare(&t2, &t1) + for i = 1; i < 100; i++ { + FeSquare(&t2, &t2) + } + FeMul(&t1, &t2, &t1) + FeSquare(&t1, &t1) + for i = 1; i < 50; i++ { + FeSquare(&t1, &t1) + } + FeMul(&t0, &t1, &t0) + FeSquare(&t0, &t0) + for i = 1; i < 2; i++ { + FeSquare(&t0, &t0) + } + FeMul(out, &t0, z) +} + +// Group elements are members of the elliptic curve -x^2 + y^2 = 1 + d * x^2 * +// y^2 where d = -121665/121666. +// +// Several representations are used: +// ProjectiveGroupElement: (X:Y:Z) satisfying x=X/Z, y=Y/Z +// ExtendedGroupElement: (X:Y:Z:T) satisfying x=X/Z, y=Y/Z, XY=ZT +// CompletedGroupElement: ((X:Z),(Y:T)) satisfying x=X/Z, y=Y/T +// PreComputedGroupElement: (y+x,y-x,2dxy) + +type ProjectiveGroupElement struct { + X, Y, Z FieldElement +} + +type ExtendedGroupElement struct { + X, Y, Z, T FieldElement +} + +type CompletedGroupElement struct { + X, Y, Z, T FieldElement +} + +type PreComputedGroupElement struct { + yPlusX, yMinusX, xy2d FieldElement +} + +type CachedGroupElement struct { + yPlusX, yMinusX, Z, T2d FieldElement +} + +func (p *ProjectiveGroupElement) Zero() { + FeZero(&p.X) + FeOne(&p.Y) + FeOne(&p.Z) +} + +func (p *ProjectiveGroupElement) Double(r *CompletedGroupElement) { + var t0 FieldElement + + FeSquare(&r.X, &p.X) + FeSquare(&r.Z, &p.Y) + FeSquare2(&r.T, &p.Z) + FeAdd(&r.Y, &p.X, &p.Y) + FeSquare(&t0, &r.Y) + FeAdd(&r.Y, &r.Z, &r.X) + FeSub(&r.Z, &r.Z, &r.X) + FeSub(&r.X, &t0, &r.Y) + FeSub(&r.T, &r.T, &r.Z) +} + +func (p *ProjectiveGroupElement) ToBytes(s *[32]byte) { + var recip, x, y FieldElement + + FeInvert(&recip, &p.Z) + FeMul(&x, &p.X, &recip) + FeMul(&y, &p.Y, &recip) + FeToBytes(s, &y) + s[31] ^= FeIsNegative(&x) << 7 +} + +func (p *ExtendedGroupElement) Zero() { + FeZero(&p.X) + FeOne(&p.Y) + FeOne(&p.Z) + FeZero(&p.T) +} + +func (p *ExtendedGroupElement) Double(r *CompletedGroupElement) { + var q ProjectiveGroupElement + p.ToProjective(&q) + q.Double(r) +} + +func (p *ExtendedGroupElement) ToCached(r *CachedGroupElement) { + FeAdd(&r.yPlusX, &p.Y, &p.X) + FeSub(&r.yMinusX, &p.Y, &p.X) + FeCopy(&r.Z, &p.Z) + FeMul(&r.T2d, &p.T, &d2) +} + +func (p *ExtendedGroupElement) ToProjective(r *ProjectiveGroupElement) { + FeCopy(&r.X, &p.X) + FeCopy(&r.Y, &p.Y) + FeCopy(&r.Z, &p.Z) +} + +func (p *ExtendedGroupElement) ToBytes(s *[32]byte) { + var recip, x, y FieldElement + + FeInvert(&recip, &p.Z) + FeMul(&x, &p.X, &recip) + FeMul(&y, &p.Y, &recip) + FeToBytes(s, &y) + s[31] ^= FeIsNegative(&x) << 7 +} + +func (p *ExtendedGroupElement) FromBytes(s *[32]byte) bool { + var u, v, v3, vxx, check FieldElement + + FeFromBytes(&p.Y, s) + FeOne(&p.Z) + FeSquare(&u, &p.Y) + FeMul(&v, &u, &d) + FeSub(&u, &u, &p.Z) // y = y^2-1 + FeAdd(&v, &v, &p.Z) // v = dy^2+1 + + FeSquare(&v3, &v) + FeMul(&v3, &v3, &v) // v3 = v^3 + FeSquare(&p.X, &v3) + FeMul(&p.X, &p.X, &v) + FeMul(&p.X, &p.X, &u) // x = uv^7 + + fePow22523(&p.X, &p.X) // x = (uv^7)^((q-5)/8) + FeMul(&p.X, &p.X, &v3) + FeMul(&p.X, &p.X, &u) // x = uv^3(uv^7)^((q-5)/8) + + var tmpX, tmp2 [32]byte + + FeSquare(&vxx, &p.X) + FeMul(&vxx, &vxx, &v) + FeSub(&check, &vxx, &u) // vx^2-u + if FeIsNonZero(&check) == 1 { + FeAdd(&check, &vxx, &u) // vx^2+u + if FeIsNonZero(&check) == 1 { + return false + } + FeMul(&p.X, &p.X, &SqrtM1) + + FeToBytes(&tmpX, &p.X) + for i, v := range tmpX { + tmp2[31-i] = v + } + } + + if FeIsNegative(&p.X) != (s[31] >> 7) { + FeNeg(&p.X, &p.X) + } + + FeMul(&p.T, &p.X, &p.Y) + return true +} + +func (p *CompletedGroupElement) ToProjective(r *ProjectiveGroupElement) { + FeMul(&r.X, &p.X, &p.T) + FeMul(&r.Y, &p.Y, &p.Z) + FeMul(&r.Z, &p.Z, &p.T) +} + +func (p *CompletedGroupElement) ToExtended(r *ExtendedGroupElement) { + FeMul(&r.X, &p.X, &p.T) + FeMul(&r.Y, &p.Y, &p.Z) + FeMul(&r.Z, &p.Z, &p.T) + FeMul(&r.T, &p.X, &p.Y) +} + +func (p *PreComputedGroupElement) Zero() { + FeOne(&p.yPlusX) + FeOne(&p.yMinusX) + FeZero(&p.xy2d) +} + +func geAdd(r *CompletedGroupElement, p *ExtendedGroupElement, q *CachedGroupElement) { + var t0 FieldElement + + FeAdd(&r.X, &p.Y, &p.X) + FeSub(&r.Y, &p.Y, &p.X) + FeMul(&r.Z, &r.X, &q.yPlusX) + FeMul(&r.Y, &r.Y, &q.yMinusX) + FeMul(&r.T, &q.T2d, &p.T) + FeMul(&r.X, &p.Z, &q.Z) + FeAdd(&t0, &r.X, &r.X) + FeSub(&r.X, &r.Z, &r.Y) + FeAdd(&r.Y, &r.Z, &r.Y) + FeAdd(&r.Z, &t0, &r.T) + FeSub(&r.T, &t0, &r.T) +} + +func geSub(r *CompletedGroupElement, p *ExtendedGroupElement, q *CachedGroupElement) { + var t0 FieldElement + + FeAdd(&r.X, &p.Y, &p.X) + FeSub(&r.Y, &p.Y, &p.X) + FeMul(&r.Z, &r.X, &q.yMinusX) + FeMul(&r.Y, &r.Y, &q.yPlusX) + FeMul(&r.T, &q.T2d, &p.T) + FeMul(&r.X, &p.Z, &q.Z) + FeAdd(&t0, &r.X, &r.X) + FeSub(&r.X, &r.Z, &r.Y) + FeAdd(&r.Y, &r.Z, &r.Y) + FeSub(&r.Z, &t0, &r.T) + FeAdd(&r.T, &t0, &r.T) +} + +func geMixedAdd(r *CompletedGroupElement, p *ExtendedGroupElement, q *PreComputedGroupElement) { + var t0 FieldElement + + FeAdd(&r.X, &p.Y, &p.X) + FeSub(&r.Y, &p.Y, &p.X) + FeMul(&r.Z, &r.X, &q.yPlusX) + FeMul(&r.Y, &r.Y, &q.yMinusX) + FeMul(&r.T, &q.xy2d, &p.T) + FeAdd(&t0, &p.Z, &p.Z) + FeSub(&r.X, &r.Z, &r.Y) + FeAdd(&r.Y, &r.Z, &r.Y) + FeAdd(&r.Z, &t0, &r.T) + FeSub(&r.T, &t0, &r.T) +} + +func geMixedSub(r *CompletedGroupElement, p *ExtendedGroupElement, q *PreComputedGroupElement) { + var t0 FieldElement + + FeAdd(&r.X, &p.Y, &p.X) + FeSub(&r.Y, &p.Y, &p.X) + FeMul(&r.Z, &r.X, &q.yMinusX) + FeMul(&r.Y, &r.Y, &q.yPlusX) + FeMul(&r.T, &q.xy2d, &p.T) + FeAdd(&t0, &p.Z, &p.Z) + FeSub(&r.X, &r.Z, &r.Y) + FeAdd(&r.Y, &r.Z, &r.Y) + FeSub(&r.Z, &t0, &r.T) + FeAdd(&r.T, &t0, &r.T) +} + +func slide(r *[256]int8, a *[32]byte) { + for i := range r { + r[i] = int8(1 & (a[i>>3] >> uint(i&7))) + } + + for i := range r { + if r[i] != 0 { + for b := 1; b <= 6 && i+b < 256; b++ { + if r[i+b] != 0 { + if r[i]+(r[i+b]<= -15 { + r[i] -= r[i+b] << uint(b) + for k := i + b; k < 256; k++ { + if r[k] == 0 { + r[k] = 1 + break + } + r[k] = 0 + } + } else { + break + } + } + } + } + } +} + +// GeDoubleScalarMultVartime sets r = a*A + b*B +// where a = a[0]+256*a[1]+...+256^31 a[31]. +// and b = b[0]+256*b[1]+...+256^31 b[31]. +// B is the Ed25519 base point (x,4/5) with x positive. +func GeDoubleScalarMultVartime(r *ProjectiveGroupElement, a *[32]byte, A *ExtendedGroupElement, b *[32]byte) { + var aSlide, bSlide [256]int8 + var Ai [8]CachedGroupElement // A,3A,5A,7A,9A,11A,13A,15A + var t CompletedGroupElement + var u, A2 ExtendedGroupElement + var i int + + slide(&aSlide, a) + slide(&bSlide, b) + + A.ToCached(&Ai[0]) + A.Double(&t) + t.ToExtended(&A2) + + for i := 0; i < 7; i++ { + geAdd(&t, &A2, &Ai[i]) + t.ToExtended(&u) + u.ToCached(&Ai[i+1]) + } + + r.Zero() + + for i = 255; i >= 0; i-- { + if aSlide[i] != 0 || bSlide[i] != 0 { + break + } + } + + for ; i >= 0; i-- { + r.Double(&t) + + if aSlide[i] > 0 { + t.ToExtended(&u) + geAdd(&t, &u, &Ai[aSlide[i]/2]) + } else if aSlide[i] < 0 { + t.ToExtended(&u) + geSub(&t, &u, &Ai[(-aSlide[i])/2]) + } + + if bSlide[i] > 0 { + t.ToExtended(&u) + geMixedAdd(&t, &u, &bi[bSlide[i]/2]) + } else if bSlide[i] < 0 { + t.ToExtended(&u) + geMixedSub(&t, &u, &bi[(-bSlide[i])/2]) + } + + t.ToProjective(r) + } +} + +// equal returns 1 if b == c and 0 otherwise, assuming that b and c are +// non-negative. +func equal(b, c int32) int32 { + x := uint32(b ^ c) + x-- + return int32(x >> 31) +} + +// negative returns 1 if b < 0 and 0 otherwise. +func negative(b int32) int32 { + return (b >> 31) & 1 +} + +func PreComputedGroupElementCMove(t, u *PreComputedGroupElement, b int32) { + FeCMove(&t.yPlusX, &u.yPlusX, b) + FeCMove(&t.yMinusX, &u.yMinusX, b) + FeCMove(&t.xy2d, &u.xy2d, b) +} + +func selectPoint(t *PreComputedGroupElement, pos int32, b int32) { + var minusT PreComputedGroupElement + bNegative := negative(b) + bAbs := b - (((-bNegative) & b) << 1) + + t.Zero() + for i := int32(0); i < 8; i++ { + PreComputedGroupElementCMove(t, &base[pos][i], equal(bAbs, i+1)) + } + FeCopy(&minusT.yPlusX, &t.yMinusX) + FeCopy(&minusT.yMinusX, &t.yPlusX) + FeNeg(&minusT.xy2d, &t.xy2d) + PreComputedGroupElementCMove(t, &minusT, bNegative) +} + +// GeScalarMultBase computes h = a*B, where +// a = a[0]+256*a[1]+...+256^31 a[31] +// B is the Ed25519 base point (x,4/5) with x positive. +// +// Preconditions: +// a[31] <= 127 +func GeScalarMultBase(h *ExtendedGroupElement, a *[32]byte) { + var e [64]int8 + + for i, v := range a { + e[2*i] = int8(v & 15) + e[2*i+1] = int8((v >> 4) & 15) + } + + // each e[i] is between 0 and 15 and e[63] is between 0 and 7. + + carry := int8(0) + for i := 0; i < 63; i++ { + e[i] += carry + carry = (e[i] + 8) >> 4 + e[i] -= carry << 4 + } + e[63] += carry + // each e[i] is between -8 and 8. + + h.Zero() + var t PreComputedGroupElement + var r CompletedGroupElement + for i := int32(1); i < 64; i += 2 { + selectPoint(&t, i/2, int32(e[i])) + geMixedAdd(&r, h, &t) + r.ToExtended(h) + } + + var s ProjectiveGroupElement + + h.Double(&r) + r.ToProjective(&s) + s.Double(&r) + r.ToProjective(&s) + s.Double(&r) + r.ToProjective(&s) + s.Double(&r) + r.ToExtended(h) + + for i := int32(0); i < 64; i += 2 { + selectPoint(&t, i/2, int32(e[i])) + geMixedAdd(&r, h, &t) + r.ToExtended(h) + } +} + +// The scalars are GF(2^252 + 27742317777372353535851937790883648493). + +// Input: +// a[0]+256*a[1]+...+256^31*a[31] = a +// b[0]+256*b[1]+...+256^31*b[31] = b +// c[0]+256*c[1]+...+256^31*c[31] = c +// +// Output: +// s[0]+256*s[1]+...+256^31*s[31] = (ab+c) mod l +// where l = 2^252 + 27742317777372353535851937790883648493. +func ScMulAdd(s, a, b, c *[32]byte) { + a0 := 2097151 & load3(a[:]) + a1 := 2097151 & (load4(a[2:]) >> 5) + a2 := 2097151 & (load3(a[5:]) >> 2) + a3 := 2097151 & (load4(a[7:]) >> 7) + a4 := 2097151 & (load4(a[10:]) >> 4) + a5 := 2097151 & (load3(a[13:]) >> 1) + a6 := 2097151 & (load4(a[15:]) >> 6) + a7 := 2097151 & (load3(a[18:]) >> 3) + a8 := 2097151 & load3(a[21:]) + a9 := 2097151 & (load4(a[23:]) >> 5) + a10 := 2097151 & (load3(a[26:]) >> 2) + a11 := (load4(a[28:]) >> 7) + b0 := 2097151 & load3(b[:]) + b1 := 2097151 & (load4(b[2:]) >> 5) + b2 := 2097151 & (load3(b[5:]) >> 2) + b3 := 2097151 & (load4(b[7:]) >> 7) + b4 := 2097151 & (load4(b[10:]) >> 4) + b5 := 2097151 & (load3(b[13:]) >> 1) + b6 := 2097151 & (load4(b[15:]) >> 6) + b7 := 2097151 & (load3(b[18:]) >> 3) + b8 := 2097151 & load3(b[21:]) + b9 := 2097151 & (load4(b[23:]) >> 5) + b10 := 2097151 & (load3(b[26:]) >> 2) + b11 := (load4(b[28:]) >> 7) + c0 := 2097151 & load3(c[:]) + c1 := 2097151 & (load4(c[2:]) >> 5) + c2 := 2097151 & (load3(c[5:]) >> 2) + c3 := 2097151 & (load4(c[7:]) >> 7) + c4 := 2097151 & (load4(c[10:]) >> 4) + c5 := 2097151 & (load3(c[13:]) >> 1) + c6 := 2097151 & (load4(c[15:]) >> 6) + c7 := 2097151 & (load3(c[18:]) >> 3) + c8 := 2097151 & load3(c[21:]) + c9 := 2097151 & (load4(c[23:]) >> 5) + c10 := 2097151 & (load3(c[26:]) >> 2) + c11 := (load4(c[28:]) >> 7) + var carry [23]int64 + + s0 := c0 + a0*b0 + s1 := c1 + a0*b1 + a1*b0 + s2 := c2 + a0*b2 + a1*b1 + a2*b0 + s3 := c3 + a0*b3 + a1*b2 + a2*b1 + a3*b0 + s4 := c4 + a0*b4 + a1*b3 + a2*b2 + a3*b1 + a4*b0 + s5 := c5 + a0*b5 + a1*b4 + a2*b3 + a3*b2 + a4*b1 + a5*b0 + s6 := c6 + a0*b6 + a1*b5 + a2*b4 + a3*b3 + a4*b2 + a5*b1 + a6*b0 + s7 := c7 + a0*b7 + a1*b6 + a2*b5 + a3*b4 + a4*b3 + a5*b2 + a6*b1 + a7*b0 + s8 := c8 + a0*b8 + a1*b7 + a2*b6 + a3*b5 + a4*b4 + a5*b3 + a6*b2 + a7*b1 + a8*b0 + s9 := c9 + a0*b9 + a1*b8 + a2*b7 + a3*b6 + a4*b5 + a5*b4 + a6*b3 + a7*b2 + a8*b1 + a9*b0 + s10 := c10 + a0*b10 + a1*b9 + a2*b8 + a3*b7 + a4*b6 + a5*b5 + a6*b4 + a7*b3 + a8*b2 + a9*b1 + a10*b0 + s11 := c11 + a0*b11 + a1*b10 + a2*b9 + a3*b8 + a4*b7 + a5*b6 + a6*b5 + a7*b4 + a8*b3 + a9*b2 + a10*b1 + a11*b0 + s12 := a1*b11 + a2*b10 + a3*b9 + a4*b8 + a5*b7 + a6*b6 + a7*b5 + a8*b4 + a9*b3 + a10*b2 + a11*b1 + s13 := a2*b11 + a3*b10 + a4*b9 + a5*b8 + a6*b7 + a7*b6 + a8*b5 + a9*b4 + a10*b3 + a11*b2 + s14 := a3*b11 + a4*b10 + a5*b9 + a6*b8 + a7*b7 + a8*b6 + a9*b5 + a10*b4 + a11*b3 + s15 := a4*b11 + a5*b10 + a6*b9 + a7*b8 + a8*b7 + a9*b6 + a10*b5 + a11*b4 + s16 := a5*b11 + a6*b10 + a7*b9 + a8*b8 + a9*b7 + a10*b6 + a11*b5 + s17 := a6*b11 + a7*b10 + a8*b9 + a9*b8 + a10*b7 + a11*b6 + s18 := a7*b11 + a8*b10 + a9*b9 + a10*b8 + a11*b7 + s19 := a8*b11 + a9*b10 + a10*b9 + a11*b8 + s20 := a9*b11 + a10*b10 + a11*b9 + s21 := a10*b11 + a11*b10 + s22 := a11 * b11 + s23 := int64(0) + + carry[0] = (s0 + (1 << 20)) >> 21 + s1 += carry[0] + s0 -= carry[0] << 21 + carry[2] = (s2 + (1 << 20)) >> 21 + s3 += carry[2] + s2 -= carry[2] << 21 + carry[4] = (s4 + (1 << 20)) >> 21 + s5 += carry[4] + s4 -= carry[4] << 21 + carry[6] = (s6 + (1 << 20)) >> 21 + s7 += carry[6] + s6 -= carry[6] << 21 + carry[8] = (s8 + (1 << 20)) >> 21 + s9 += carry[8] + s8 -= carry[8] << 21 + carry[10] = (s10 + (1 << 20)) >> 21 + s11 += carry[10] + s10 -= carry[10] << 21 + carry[12] = (s12 + (1 << 20)) >> 21 + s13 += carry[12] + s12 -= carry[12] << 21 + carry[14] = (s14 + (1 << 20)) >> 21 + s15 += carry[14] + s14 -= carry[14] << 21 + carry[16] = (s16 + (1 << 20)) >> 21 + s17 += carry[16] + s16 -= carry[16] << 21 + carry[18] = (s18 + (1 << 20)) >> 21 + s19 += carry[18] + s18 -= carry[18] << 21 + carry[20] = (s20 + (1 << 20)) >> 21 + s21 += carry[20] + s20 -= carry[20] << 21 + carry[22] = (s22 + (1 << 20)) >> 21 + s23 += carry[22] + s22 -= carry[22] << 21 + + carry[1] = (s1 + (1 << 20)) >> 21 + s2 += carry[1] + s1 -= carry[1] << 21 + carry[3] = (s3 + (1 << 20)) >> 21 + s4 += carry[3] + s3 -= carry[3] << 21 + carry[5] = (s5 + (1 << 20)) >> 21 + s6 += carry[5] + s5 -= carry[5] << 21 + carry[7] = (s7 + (1 << 20)) >> 21 + s8 += carry[7] + s7 -= carry[7] << 21 + carry[9] = (s9 + (1 << 20)) >> 21 + s10 += carry[9] + s9 -= carry[9] << 21 + carry[11] = (s11 + (1 << 20)) >> 21 + s12 += carry[11] + s11 -= carry[11] << 21 + carry[13] = (s13 + (1 << 20)) >> 21 + s14 += carry[13] + s13 -= carry[13] << 21 + carry[15] = (s15 + (1 << 20)) >> 21 + s16 += carry[15] + s15 -= carry[15] << 21 + carry[17] = (s17 + (1 << 20)) >> 21 + s18 += carry[17] + s17 -= carry[17] << 21 + carry[19] = (s19 + (1 << 20)) >> 21 + s20 += carry[19] + s19 -= carry[19] << 21 + carry[21] = (s21 + (1 << 20)) >> 21 + s22 += carry[21] + s21 -= carry[21] << 21 + + s11 += s23 * 666643 + s12 += s23 * 470296 + s13 += s23 * 654183 + s14 -= s23 * 997805 + s15 += s23 * 136657 + s16 -= s23 * 683901 + s23 = 0 + + s10 += s22 * 666643 + s11 += s22 * 470296 + s12 += s22 * 654183 + s13 -= s22 * 997805 + s14 += s22 * 136657 + s15 -= s22 * 683901 + s22 = 0 + + s9 += s21 * 666643 + s10 += s21 * 470296 + s11 += s21 * 654183 + s12 -= s21 * 997805 + s13 += s21 * 136657 + s14 -= s21 * 683901 + s21 = 0 + + s8 += s20 * 666643 + s9 += s20 * 470296 + s10 += s20 * 654183 + s11 -= s20 * 997805 + s12 += s20 * 136657 + s13 -= s20 * 683901 + s20 = 0 + + s7 += s19 * 666643 + s8 += s19 * 470296 + s9 += s19 * 654183 + s10 -= s19 * 997805 + s11 += s19 * 136657 + s12 -= s19 * 683901 + s19 = 0 + + s6 += s18 * 666643 + s7 += s18 * 470296 + s8 += s18 * 654183 + s9 -= s18 * 997805 + s10 += s18 * 136657 + s11 -= s18 * 683901 + s18 = 0 + + carry[6] = (s6 + (1 << 20)) >> 21 + s7 += carry[6] + s6 -= carry[6] << 21 + carry[8] = (s8 + (1 << 20)) >> 21 + s9 += carry[8] + s8 -= carry[8] << 21 + carry[10] = (s10 + (1 << 20)) >> 21 + s11 += carry[10] + s10 -= carry[10] << 21 + carry[12] = (s12 + (1 << 20)) >> 21 + s13 += carry[12] + s12 -= carry[12] << 21 + carry[14] = (s14 + (1 << 20)) >> 21 + s15 += carry[14] + s14 -= carry[14] << 21 + carry[16] = (s16 + (1 << 20)) >> 21 + s17 += carry[16] + s16 -= carry[16] << 21 + + carry[7] = (s7 + (1 << 20)) >> 21 + s8 += carry[7] + s7 -= carry[7] << 21 + carry[9] = (s9 + (1 << 20)) >> 21 + s10 += carry[9] + s9 -= carry[9] << 21 + carry[11] = (s11 + (1 << 20)) >> 21 + s12 += carry[11] + s11 -= carry[11] << 21 + carry[13] = (s13 + (1 << 20)) >> 21 + s14 += carry[13] + s13 -= carry[13] << 21 + carry[15] = (s15 + (1 << 20)) >> 21 + s16 += carry[15] + s15 -= carry[15] << 21 + + s5 += s17 * 666643 + s6 += s17 * 470296 + s7 += s17 * 654183 + s8 -= s17 * 997805 + s9 += s17 * 136657 + s10 -= s17 * 683901 + s17 = 0 + + s4 += s16 * 666643 + s5 += s16 * 470296 + s6 += s16 * 654183 + s7 -= s16 * 997805 + s8 += s16 * 136657 + s9 -= s16 * 683901 + s16 = 0 + + s3 += s15 * 666643 + s4 += s15 * 470296 + s5 += s15 * 654183 + s6 -= s15 * 997805 + s7 += s15 * 136657 + s8 -= s15 * 683901 + s15 = 0 + + s2 += s14 * 666643 + s3 += s14 * 470296 + s4 += s14 * 654183 + s5 -= s14 * 997805 + s6 += s14 * 136657 + s7 -= s14 * 683901 + s14 = 0 + + s1 += s13 * 666643 + s2 += s13 * 470296 + s3 += s13 * 654183 + s4 -= s13 * 997805 + s5 += s13 * 136657 + s6 -= s13 * 683901 + s13 = 0 + + s0 += s12 * 666643 + s1 += s12 * 470296 + s2 += s12 * 654183 + s3 -= s12 * 997805 + s4 += s12 * 136657 + s5 -= s12 * 683901 + s12 = 0 + + carry[0] = (s0 + (1 << 20)) >> 21 + s1 += carry[0] + s0 -= carry[0] << 21 + carry[2] = (s2 + (1 << 20)) >> 21 + s3 += carry[2] + s2 -= carry[2] << 21 + carry[4] = (s4 + (1 << 20)) >> 21 + s5 += carry[4] + s4 -= carry[4] << 21 + carry[6] = (s6 + (1 << 20)) >> 21 + s7 += carry[6] + s6 -= carry[6] << 21 + carry[8] = (s8 + (1 << 20)) >> 21 + s9 += carry[8] + s8 -= carry[8] << 21 + carry[10] = (s10 + (1 << 20)) >> 21 + s11 += carry[10] + s10 -= carry[10] << 21 + + carry[1] = (s1 + (1 << 20)) >> 21 + s2 += carry[1] + s1 -= carry[1] << 21 + carry[3] = (s3 + (1 << 20)) >> 21 + s4 += carry[3] + s3 -= carry[3] << 21 + carry[5] = (s5 + (1 << 20)) >> 21 + s6 += carry[5] + s5 -= carry[5] << 21 + carry[7] = (s7 + (1 << 20)) >> 21 + s8 += carry[7] + s7 -= carry[7] << 21 + carry[9] = (s9 + (1 << 20)) >> 21 + s10 += carry[9] + s9 -= carry[9] << 21 + carry[11] = (s11 + (1 << 20)) >> 21 + s12 += carry[11] + s11 -= carry[11] << 21 + + s0 += s12 * 666643 + s1 += s12 * 470296 + s2 += s12 * 654183 + s3 -= s12 * 997805 + s4 += s12 * 136657 + s5 -= s12 * 683901 + s12 = 0 + + carry[0] = s0 >> 21 + s1 += carry[0] + s0 -= carry[0] << 21 + carry[1] = s1 >> 21 + s2 += carry[1] + s1 -= carry[1] << 21 + carry[2] = s2 >> 21 + s3 += carry[2] + s2 -= carry[2] << 21 + carry[3] = s3 >> 21 + s4 += carry[3] + s3 -= carry[3] << 21 + carry[4] = s4 >> 21 + s5 += carry[4] + s4 -= carry[4] << 21 + carry[5] = s5 >> 21 + s6 += carry[5] + s5 -= carry[5] << 21 + carry[6] = s6 >> 21 + s7 += carry[6] + s6 -= carry[6] << 21 + carry[7] = s7 >> 21 + s8 += carry[7] + s7 -= carry[7] << 21 + carry[8] = s8 >> 21 + s9 += carry[8] + s8 -= carry[8] << 21 + carry[9] = s9 >> 21 + s10 += carry[9] + s9 -= carry[9] << 21 + carry[10] = s10 >> 21 + s11 += carry[10] + s10 -= carry[10] << 21 + carry[11] = s11 >> 21 + s12 += carry[11] + s11 -= carry[11] << 21 + + s0 += s12 * 666643 + s1 += s12 * 470296 + s2 += s12 * 654183 + s3 -= s12 * 997805 + s4 += s12 * 136657 + s5 -= s12 * 683901 + s12 = 0 + + carry[0] = s0 >> 21 + s1 += carry[0] + s0 -= carry[0] << 21 + carry[1] = s1 >> 21 + s2 += carry[1] + s1 -= carry[1] << 21 + carry[2] = s2 >> 21 + s3 += carry[2] + s2 -= carry[2] << 21 + carry[3] = s3 >> 21 + s4 += carry[3] + s3 -= carry[3] << 21 + carry[4] = s4 >> 21 + s5 += carry[4] + s4 -= carry[4] << 21 + carry[5] = s5 >> 21 + s6 += carry[5] + s5 -= carry[5] << 21 + carry[6] = s6 >> 21 + s7 += carry[6] + s6 -= carry[6] << 21 + carry[7] = s7 >> 21 + s8 += carry[7] + s7 -= carry[7] << 21 + carry[8] = s8 >> 21 + s9 += carry[8] + s8 -= carry[8] << 21 + carry[9] = s9 >> 21 + s10 += carry[9] + s9 -= carry[9] << 21 + carry[10] = s10 >> 21 + s11 += carry[10] + s10 -= carry[10] << 21 + + s[0] = byte(s0 >> 0) + s[1] = byte(s0 >> 8) + s[2] = byte((s0 >> 16) | (s1 << 5)) + s[3] = byte(s1 >> 3) + s[4] = byte(s1 >> 11) + s[5] = byte((s1 >> 19) | (s2 << 2)) + s[6] = byte(s2 >> 6) + s[7] = byte((s2 >> 14) | (s3 << 7)) + s[8] = byte(s3 >> 1) + s[9] = byte(s3 >> 9) + s[10] = byte((s3 >> 17) | (s4 << 4)) + s[11] = byte(s4 >> 4) + s[12] = byte(s4 >> 12) + s[13] = byte((s4 >> 20) | (s5 << 1)) + s[14] = byte(s5 >> 7) + s[15] = byte((s5 >> 15) | (s6 << 6)) + s[16] = byte(s6 >> 2) + s[17] = byte(s6 >> 10) + s[18] = byte((s6 >> 18) | (s7 << 3)) + s[19] = byte(s7 >> 5) + s[20] = byte(s7 >> 13) + s[21] = byte(s8 >> 0) + s[22] = byte(s8 >> 8) + s[23] = byte((s8 >> 16) | (s9 << 5)) + s[24] = byte(s9 >> 3) + s[25] = byte(s9 >> 11) + s[26] = byte((s9 >> 19) | (s10 << 2)) + s[27] = byte(s10 >> 6) + s[28] = byte((s10 >> 14) | (s11 << 7)) + s[29] = byte(s11 >> 1) + s[30] = byte(s11 >> 9) + s[31] = byte(s11 >> 17) +} + +// Input: +// s[0]+256*s[1]+...+256^63*s[63] = s +// +// Output: +// s[0]+256*s[1]+...+256^31*s[31] = s mod l +// where l = 2^252 + 27742317777372353535851937790883648493. +func ScReduce(out *[32]byte, s *[64]byte) { + s0 := 2097151 & load3(s[:]) + s1 := 2097151 & (load4(s[2:]) >> 5) + s2 := 2097151 & (load3(s[5:]) >> 2) + s3 := 2097151 & (load4(s[7:]) >> 7) + s4 := 2097151 & (load4(s[10:]) >> 4) + s5 := 2097151 & (load3(s[13:]) >> 1) + s6 := 2097151 & (load4(s[15:]) >> 6) + s7 := 2097151 & (load3(s[18:]) >> 3) + s8 := 2097151 & load3(s[21:]) + s9 := 2097151 & (load4(s[23:]) >> 5) + s10 := 2097151 & (load3(s[26:]) >> 2) + s11 := 2097151 & (load4(s[28:]) >> 7) + s12 := 2097151 & (load4(s[31:]) >> 4) + s13 := 2097151 & (load3(s[34:]) >> 1) + s14 := 2097151 & (load4(s[36:]) >> 6) + s15 := 2097151 & (load3(s[39:]) >> 3) + s16 := 2097151 & load3(s[42:]) + s17 := 2097151 & (load4(s[44:]) >> 5) + s18 := 2097151 & (load3(s[47:]) >> 2) + s19 := 2097151 & (load4(s[49:]) >> 7) + s20 := 2097151 & (load4(s[52:]) >> 4) + s21 := 2097151 & (load3(s[55:]) >> 1) + s22 := 2097151 & (load4(s[57:]) >> 6) + s23 := (load4(s[60:]) >> 3) + + s11 += s23 * 666643 + s12 += s23 * 470296 + s13 += s23 * 654183 + s14 -= s23 * 997805 + s15 += s23 * 136657 + s16 -= s23 * 683901 + s23 = 0 + + s10 += s22 * 666643 + s11 += s22 * 470296 + s12 += s22 * 654183 + s13 -= s22 * 997805 + s14 += s22 * 136657 + s15 -= s22 * 683901 + s22 = 0 + + s9 += s21 * 666643 + s10 += s21 * 470296 + s11 += s21 * 654183 + s12 -= s21 * 997805 + s13 += s21 * 136657 + s14 -= s21 * 683901 + s21 = 0 + + s8 += s20 * 666643 + s9 += s20 * 470296 + s10 += s20 * 654183 + s11 -= s20 * 997805 + s12 += s20 * 136657 + s13 -= s20 * 683901 + s20 = 0 + + s7 += s19 * 666643 + s8 += s19 * 470296 + s9 += s19 * 654183 + s10 -= s19 * 997805 + s11 += s19 * 136657 + s12 -= s19 * 683901 + s19 = 0 + + s6 += s18 * 666643 + s7 += s18 * 470296 + s8 += s18 * 654183 + s9 -= s18 * 997805 + s10 += s18 * 136657 + s11 -= s18 * 683901 + s18 = 0 + + var carry [17]int64 + + carry[6] = (s6 + (1 << 20)) >> 21 + s7 += carry[6] + s6 -= carry[6] << 21 + carry[8] = (s8 + (1 << 20)) >> 21 + s9 += carry[8] + s8 -= carry[8] << 21 + carry[10] = (s10 + (1 << 20)) >> 21 + s11 += carry[10] + s10 -= carry[10] << 21 + carry[12] = (s12 + (1 << 20)) >> 21 + s13 += carry[12] + s12 -= carry[12] << 21 + carry[14] = (s14 + (1 << 20)) >> 21 + s15 += carry[14] + s14 -= carry[14] << 21 + carry[16] = (s16 + (1 << 20)) >> 21 + s17 += carry[16] + s16 -= carry[16] << 21 + + carry[7] = (s7 + (1 << 20)) >> 21 + s8 += carry[7] + s7 -= carry[7] << 21 + carry[9] = (s9 + (1 << 20)) >> 21 + s10 += carry[9] + s9 -= carry[9] << 21 + carry[11] = (s11 + (1 << 20)) >> 21 + s12 += carry[11] + s11 -= carry[11] << 21 + carry[13] = (s13 + (1 << 20)) >> 21 + s14 += carry[13] + s13 -= carry[13] << 21 + carry[15] = (s15 + (1 << 20)) >> 21 + s16 += carry[15] + s15 -= carry[15] << 21 + + s5 += s17 * 666643 + s6 += s17 * 470296 + s7 += s17 * 654183 + s8 -= s17 * 997805 + s9 += s17 * 136657 + s10 -= s17 * 683901 + s17 = 0 + + s4 += s16 * 666643 + s5 += s16 * 470296 + s6 += s16 * 654183 + s7 -= s16 * 997805 + s8 += s16 * 136657 + s9 -= s16 * 683901 + s16 = 0 + + s3 += s15 * 666643 + s4 += s15 * 470296 + s5 += s15 * 654183 + s6 -= s15 * 997805 + s7 += s15 * 136657 + s8 -= s15 * 683901 + s15 = 0 + + s2 += s14 * 666643 + s3 += s14 * 470296 + s4 += s14 * 654183 + s5 -= s14 * 997805 + s6 += s14 * 136657 + s7 -= s14 * 683901 + s14 = 0 + + s1 += s13 * 666643 + s2 += s13 * 470296 + s3 += s13 * 654183 + s4 -= s13 * 997805 + s5 += s13 * 136657 + s6 -= s13 * 683901 + s13 = 0 + + s0 += s12 * 666643 + s1 += s12 * 470296 + s2 += s12 * 654183 + s3 -= s12 * 997805 + s4 += s12 * 136657 + s5 -= s12 * 683901 + s12 = 0 + + carry[0] = (s0 + (1 << 20)) >> 21 + s1 += carry[0] + s0 -= carry[0] << 21 + carry[2] = (s2 + (1 << 20)) >> 21 + s3 += carry[2] + s2 -= carry[2] << 21 + carry[4] = (s4 + (1 << 20)) >> 21 + s5 += carry[4] + s4 -= carry[4] << 21 + carry[6] = (s6 + (1 << 20)) >> 21 + s7 += carry[6] + s6 -= carry[6] << 21 + carry[8] = (s8 + (1 << 20)) >> 21 + s9 += carry[8] + s8 -= carry[8] << 21 + carry[10] = (s10 + (1 << 20)) >> 21 + s11 += carry[10] + s10 -= carry[10] << 21 + + carry[1] = (s1 + (1 << 20)) >> 21 + s2 += carry[1] + s1 -= carry[1] << 21 + carry[3] = (s3 + (1 << 20)) >> 21 + s4 += carry[3] + s3 -= carry[3] << 21 + carry[5] = (s5 + (1 << 20)) >> 21 + s6 += carry[5] + s5 -= carry[5] << 21 + carry[7] = (s7 + (1 << 20)) >> 21 + s8 += carry[7] + s7 -= carry[7] << 21 + carry[9] = (s9 + (1 << 20)) >> 21 + s10 += carry[9] + s9 -= carry[9] << 21 + carry[11] = (s11 + (1 << 20)) >> 21 + s12 += carry[11] + s11 -= carry[11] << 21 + + s0 += s12 * 666643 + s1 += s12 * 470296 + s2 += s12 * 654183 + s3 -= s12 * 997805 + s4 += s12 * 136657 + s5 -= s12 * 683901 + s12 = 0 + + carry[0] = s0 >> 21 + s1 += carry[0] + s0 -= carry[0] << 21 + carry[1] = s1 >> 21 + s2 += carry[1] + s1 -= carry[1] << 21 + carry[2] = s2 >> 21 + s3 += carry[2] + s2 -= carry[2] << 21 + carry[3] = s3 >> 21 + s4 += carry[3] + s3 -= carry[3] << 21 + carry[4] = s4 >> 21 + s5 += carry[4] + s4 -= carry[4] << 21 + carry[5] = s5 >> 21 + s6 += carry[5] + s5 -= carry[5] << 21 + carry[6] = s6 >> 21 + s7 += carry[6] + s6 -= carry[6] << 21 + carry[7] = s7 >> 21 + s8 += carry[7] + s7 -= carry[7] << 21 + carry[8] = s8 >> 21 + s9 += carry[8] + s8 -= carry[8] << 21 + carry[9] = s9 >> 21 + s10 += carry[9] + s9 -= carry[9] << 21 + carry[10] = s10 >> 21 + s11 += carry[10] + s10 -= carry[10] << 21 + carry[11] = s11 >> 21 + s12 += carry[11] + s11 -= carry[11] << 21 + + s0 += s12 * 666643 + s1 += s12 * 470296 + s2 += s12 * 654183 + s3 -= s12 * 997805 + s4 += s12 * 136657 + s5 -= s12 * 683901 + s12 = 0 + + carry[0] = s0 >> 21 + s1 += carry[0] + s0 -= carry[0] << 21 + carry[1] = s1 >> 21 + s2 += carry[1] + s1 -= carry[1] << 21 + carry[2] = s2 >> 21 + s3 += carry[2] + s2 -= carry[2] << 21 + carry[3] = s3 >> 21 + s4 += carry[3] + s3 -= carry[3] << 21 + carry[4] = s4 >> 21 + s5 += carry[4] + s4 -= carry[4] << 21 + carry[5] = s5 >> 21 + s6 += carry[5] + s5 -= carry[5] << 21 + carry[6] = s6 >> 21 + s7 += carry[6] + s6 -= carry[6] << 21 + carry[7] = s7 >> 21 + s8 += carry[7] + s7 -= carry[7] << 21 + carry[8] = s8 >> 21 + s9 += carry[8] + s8 -= carry[8] << 21 + carry[9] = s9 >> 21 + s10 += carry[9] + s9 -= carry[9] << 21 + carry[10] = s10 >> 21 + s11 += carry[10] + s10 -= carry[10] << 21 + + out[0] = byte(s0 >> 0) + out[1] = byte(s0 >> 8) + out[2] = byte((s0 >> 16) | (s1 << 5)) + out[3] = byte(s1 >> 3) + out[4] = byte(s1 >> 11) + out[5] = byte((s1 >> 19) | (s2 << 2)) + out[6] = byte(s2 >> 6) + out[7] = byte((s2 >> 14) | (s3 << 7)) + out[8] = byte(s3 >> 1) + out[9] = byte(s3 >> 9) + out[10] = byte((s3 >> 17) | (s4 << 4)) + out[11] = byte(s4 >> 4) + out[12] = byte(s4 >> 12) + out[13] = byte((s4 >> 20) | (s5 << 1)) + out[14] = byte(s5 >> 7) + out[15] = byte((s5 >> 15) | (s6 << 6)) + out[16] = byte(s6 >> 2) + out[17] = byte(s6 >> 10) + out[18] = byte((s6 >> 18) | (s7 << 3)) + out[19] = byte(s7 >> 5) + out[20] = byte(s7 >> 13) + out[21] = byte(s8 >> 0) + out[22] = byte(s8 >> 8) + out[23] = byte((s8 >> 16) | (s9 << 5)) + out[24] = byte(s9 >> 3) + out[25] = byte(s9 >> 11) + out[26] = byte((s9 >> 19) | (s10 << 2)) + out[27] = byte(s10 >> 6) + out[28] = byte((s10 >> 14) | (s11 << 7)) + out[29] = byte(s11 >> 1) + out[30] = byte(s11 >> 9) + out[31] = byte(s11 >> 17) +} diff --git a/vendor/golang.org/x/crypto/ssh/buffer.go b/vendor/golang.org/x/crypto/ssh/buffer.go new file mode 100644 index 0000000000..6931b5114f --- /dev/null +++ b/vendor/golang.org/x/crypto/ssh/buffer.go @@ -0,0 +1,98 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ssh + +import ( + "io" + "sync" +) + +// buffer provides a linked list buffer for data exchange +// between producer and consumer. Theoretically the buffer is +// of unlimited capacity as it does no allocation of its own. +type buffer struct { + // protects concurrent access to head, tail and closed + *sync.Cond + + head *element // the buffer that will be read first + tail *element // the buffer that will be read last + + closed bool +} + +// An element represents a single link in a linked list. +type element struct { + buf []byte + next *element +} + +// newBuffer returns an empty buffer that is not closed. +func newBuffer() *buffer { + e := new(element) + b := &buffer{ + Cond: newCond(), + head: e, + tail: e, + } + return b +} + +// write makes buf available for Read to receive. +// buf must not be modified after the call to write. +func (b *buffer) write(buf []byte) { + b.Cond.L.Lock() + e := &element{buf: buf} + b.tail.next = e + b.tail = e + b.Cond.Signal() + b.Cond.L.Unlock() +} + +// eof closes the buffer. Reads from the buffer once all +// the data has been consumed will receive os.EOF. +func (b *buffer) eof() error { + b.Cond.L.Lock() + b.closed = true + b.Cond.Signal() + b.Cond.L.Unlock() + return nil +} + +// Read reads data from the internal buffer in buf. Reads will block +// if no data is available, or until the buffer is closed. +func (b *buffer) Read(buf []byte) (n int, err error) { + b.Cond.L.Lock() + defer b.Cond.L.Unlock() + + for len(buf) > 0 { + // if there is data in b.head, copy it + if len(b.head.buf) > 0 { + r := copy(buf, b.head.buf) + buf, b.head.buf = buf[r:], b.head.buf[r:] + n += r + continue + } + // if there is a next buffer, make it the head + if len(b.head.buf) == 0 && b.head != b.tail { + b.head = b.head.next + continue + } + + // if at least one byte has been copied, return + if n > 0 { + break + } + + // if nothing was read, and there is nothing outstanding + // check to see if the buffer is closed. + if b.closed { + err = io.EOF + break + } + // out of buffers, wait for producer + b.Cond.Wait() + } + return +} diff --git a/vendor/golang.org/x/crypto/ssh/certs.go b/vendor/golang.org/x/crypto/ssh/certs.go new file mode 100644 index 0000000000..6331c94d53 --- /dev/null +++ b/vendor/golang.org/x/crypto/ssh/certs.go @@ -0,0 +1,503 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ssh + +import ( + "bytes" + "errors" + "fmt" + "io" + "net" + "sort" + "time" +) + +// These constants from [PROTOCOL.certkeys] represent the algorithm names +// for certificate types supported by this package. +const ( + CertAlgoRSAv01 = "ssh-rsa-cert-v01@openssh.com" + CertAlgoDSAv01 = "ssh-dss-cert-v01@openssh.com" + CertAlgoECDSA256v01 = "ecdsa-sha2-nistp256-cert-v01@openssh.com" + CertAlgoECDSA384v01 = "ecdsa-sha2-nistp384-cert-v01@openssh.com" + CertAlgoECDSA521v01 = "ecdsa-sha2-nistp521-cert-v01@openssh.com" + CertAlgoED25519v01 = "ssh-ed25519-cert-v01@openssh.com" +) + +// Certificate types distinguish between host and user +// certificates. The values can be set in the CertType field of +// Certificate. +const ( + UserCert = 1 + HostCert = 2 +) + +// Signature represents a cryptographic signature. +type Signature struct { + Format string + Blob []byte +} + +// CertTimeInfinity can be used for OpenSSHCertV01.ValidBefore to indicate that +// a certificate does not expire. +const CertTimeInfinity = 1<<64 - 1 + +// An Certificate represents an OpenSSH certificate as defined in +// [PROTOCOL.certkeys]?rev=1.8. +type Certificate struct { + Nonce []byte + Key PublicKey + Serial uint64 + CertType uint32 + KeyId string + ValidPrincipals []string + ValidAfter uint64 + ValidBefore uint64 + Permissions + Reserved []byte + SignatureKey PublicKey + Signature *Signature +} + +// genericCertData holds the key-independent part of the certificate data. +// Overall, certificates contain an nonce, public key fields and +// key-independent fields. +type genericCertData struct { + Serial uint64 + CertType uint32 + KeyId string + ValidPrincipals []byte + ValidAfter uint64 + ValidBefore uint64 + CriticalOptions []byte + Extensions []byte + Reserved []byte + SignatureKey []byte + Signature []byte +} + +func marshalStringList(namelist []string) []byte { + var to []byte + for _, name := range namelist { + s := struct{ N string }{name} + to = append(to, Marshal(&s)...) + } + return to +} + +type optionsTuple struct { + Key string + Value []byte +} + +type optionsTupleValue struct { + Value string +} + +// serialize a map of critical options or extensions +// issue #10569 - per [PROTOCOL.certkeys] and SSH implementation, +// we need two length prefixes for a non-empty string value +func marshalTuples(tups map[string]string) []byte { + keys := make([]string, 0, len(tups)) + for key := range tups { + keys = append(keys, key) + } + sort.Strings(keys) + + var ret []byte + for _, key := range keys { + s := optionsTuple{Key: key} + if value := tups[key]; len(value) > 0 { + s.Value = Marshal(&optionsTupleValue{value}) + } + ret = append(ret, Marshal(&s)...) + } + return ret +} + +// issue #10569 - per [PROTOCOL.certkeys] and SSH implementation, +// we need two length prefixes for a non-empty option value +func parseTuples(in []byte) (map[string]string, error) { + tups := map[string]string{} + var lastKey string + var haveLastKey bool + + for len(in) > 0 { + var key, val, extra []byte + var ok bool + + if key, in, ok = parseString(in); !ok { + return nil, errShortRead + } + keyStr := string(key) + // according to [PROTOCOL.certkeys], the names must be in + // lexical order. + if haveLastKey && keyStr <= lastKey { + return nil, fmt.Errorf("ssh: certificate options are not in lexical order") + } + lastKey, haveLastKey = keyStr, true + // the next field is a data field, which if non-empty has a string embedded + if val, in, ok = parseString(in); !ok { + return nil, errShortRead + } + if len(val) > 0 { + val, extra, ok = parseString(val) + if !ok { + return nil, errShortRead + } + if len(extra) > 0 { + return nil, fmt.Errorf("ssh: unexpected trailing data after certificate option value") + } + tups[keyStr] = string(val) + } else { + tups[keyStr] = "" + } + } + return tups, nil +} + +func parseCert(in []byte, privAlgo string) (*Certificate, error) { + nonce, rest, ok := parseString(in) + if !ok { + return nil, errShortRead + } + + key, rest, err := parsePubKey(rest, privAlgo) + if err != nil { + return nil, err + } + + var g genericCertData + if err := Unmarshal(rest, &g); err != nil { + return nil, err + } + + c := &Certificate{ + Nonce: nonce, + Key: key, + Serial: g.Serial, + CertType: g.CertType, + KeyId: g.KeyId, + ValidAfter: g.ValidAfter, + ValidBefore: g.ValidBefore, + } + + for principals := g.ValidPrincipals; len(principals) > 0; { + principal, rest, ok := parseString(principals) + if !ok { + return nil, errShortRead + } + c.ValidPrincipals = append(c.ValidPrincipals, string(principal)) + principals = rest + } + + c.CriticalOptions, err = parseTuples(g.CriticalOptions) + if err != nil { + return nil, err + } + c.Extensions, err = parseTuples(g.Extensions) + if err != nil { + return nil, err + } + c.Reserved = g.Reserved + k, err := ParsePublicKey(g.SignatureKey) + if err != nil { + return nil, err + } + + c.SignatureKey = k + c.Signature, rest, ok = parseSignatureBody(g.Signature) + if !ok || len(rest) > 0 { + return nil, errors.New("ssh: signature parse error") + } + + return c, nil +} + +type openSSHCertSigner struct { + pub *Certificate + signer Signer +} + +// NewCertSigner returns a Signer that signs with the given Certificate, whose +// private key is held by signer. It returns an error if the public key in cert +// doesn't match the key used by signer. +func NewCertSigner(cert *Certificate, signer Signer) (Signer, error) { + if bytes.Compare(cert.Key.Marshal(), signer.PublicKey().Marshal()) != 0 { + return nil, errors.New("ssh: signer and cert have different public key") + } + + return &openSSHCertSigner{cert, signer}, nil +} + +func (s *openSSHCertSigner) Sign(rand io.Reader, data []byte) (*Signature, error) { + return s.signer.Sign(rand, data) +} + +func (s *openSSHCertSigner) PublicKey() PublicKey { + return s.pub +} + +const sourceAddressCriticalOption = "source-address" + +// CertChecker does the work of verifying a certificate. Its methods +// can be plugged into ClientConfig.HostKeyCallback and +// ServerConfig.PublicKeyCallback. For the CertChecker to work, +// minimally, the IsAuthority callback should be set. +type CertChecker struct { + // SupportedCriticalOptions lists the CriticalOptions that the + // server application layer understands. These are only used + // for user certificates. + SupportedCriticalOptions []string + + // IsAuthority should return true if the key is recognized as + // an authority. This allows for certificates to be signed by other + // certificates. + IsAuthority func(auth PublicKey) bool + + // Clock is used for verifying time stamps. If nil, time.Now + // is used. + Clock func() time.Time + + // UserKeyFallback is called when CertChecker.Authenticate encounters a + // public key that is not a certificate. It must implement validation + // of user keys or else, if nil, all such keys are rejected. + UserKeyFallback func(conn ConnMetadata, key PublicKey) (*Permissions, error) + + // HostKeyFallback is called when CertChecker.CheckHostKey encounters a + // public key that is not a certificate. It must implement host key + // validation or else, if nil, all such keys are rejected. + HostKeyFallback func(addr string, remote net.Addr, key PublicKey) error + + // IsRevoked is called for each certificate so that revocation checking + // can be implemented. It should return true if the given certificate + // is revoked and false otherwise. If nil, no certificates are + // considered to have been revoked. + IsRevoked func(cert *Certificate) bool +} + +// CheckHostKey checks a host key certificate. This method can be +// plugged into ClientConfig.HostKeyCallback. +func (c *CertChecker) CheckHostKey(addr string, remote net.Addr, key PublicKey) error { + cert, ok := key.(*Certificate) + if !ok { + if c.HostKeyFallback != nil { + return c.HostKeyFallback(addr, remote, key) + } + return errors.New("ssh: non-certificate host key") + } + if cert.CertType != HostCert { + return fmt.Errorf("ssh: certificate presented as a host key has type %d", cert.CertType) + } + + return c.CheckCert(addr, cert) +} + +// Authenticate checks a user certificate. Authenticate can be used as +// a value for ServerConfig.PublicKeyCallback. +func (c *CertChecker) Authenticate(conn ConnMetadata, pubKey PublicKey) (*Permissions, error) { + cert, ok := pubKey.(*Certificate) + if !ok { + if c.UserKeyFallback != nil { + return c.UserKeyFallback(conn, pubKey) + } + return nil, errors.New("ssh: normal key pairs not accepted") + } + + if cert.CertType != UserCert { + return nil, fmt.Errorf("ssh: cert has type %d", cert.CertType) + } + + if err := c.CheckCert(conn.User(), cert); err != nil { + return nil, err + } + + return &cert.Permissions, nil +} + +// CheckCert checks CriticalOptions, ValidPrincipals, revocation, timestamp and +// the signature of the certificate. +func (c *CertChecker) CheckCert(principal string, cert *Certificate) error { + if c.IsRevoked != nil && c.IsRevoked(cert) { + return fmt.Errorf("ssh: certicate serial %d revoked", cert.Serial) + } + + for opt, _ := range cert.CriticalOptions { + // sourceAddressCriticalOption will be enforced by + // serverAuthenticate + if opt == sourceAddressCriticalOption { + continue + } + + found := false + for _, supp := range c.SupportedCriticalOptions { + if supp == opt { + found = true + break + } + } + if !found { + return fmt.Errorf("ssh: unsupported critical option %q in certificate", opt) + } + } + + if len(cert.ValidPrincipals) > 0 { + // By default, certs are valid for all users/hosts. + found := false + for _, p := range cert.ValidPrincipals { + if p == principal { + found = true + break + } + } + if !found { + return fmt.Errorf("ssh: principal %q not in the set of valid principals for given certificate: %q", principal, cert.ValidPrincipals) + } + } + + if !c.IsAuthority(cert.SignatureKey) { + return fmt.Errorf("ssh: certificate signed by unrecognized authority") + } + + clock := c.Clock + if clock == nil { + clock = time.Now + } + + unixNow := clock().Unix() + if after := int64(cert.ValidAfter); after < 0 || unixNow < int64(cert.ValidAfter) { + return fmt.Errorf("ssh: cert is not yet valid") + } + if before := int64(cert.ValidBefore); cert.ValidBefore != uint64(CertTimeInfinity) && (unixNow >= before || before < 0) { + return fmt.Errorf("ssh: cert has expired") + } + if err := cert.SignatureKey.Verify(cert.bytesForSigning(), cert.Signature); err != nil { + return fmt.Errorf("ssh: certificate signature does not verify") + } + + return nil +} + +// SignCert sets c.SignatureKey to the authority's public key and stores a +// Signature, by authority, in the certificate. +func (c *Certificate) SignCert(rand io.Reader, authority Signer) error { + c.Nonce = make([]byte, 32) + if _, err := io.ReadFull(rand, c.Nonce); err != nil { + return err + } + c.SignatureKey = authority.PublicKey() + + sig, err := authority.Sign(rand, c.bytesForSigning()) + if err != nil { + return err + } + c.Signature = sig + return nil +} + +var certAlgoNames = map[string]string{ + KeyAlgoRSA: CertAlgoRSAv01, + KeyAlgoDSA: CertAlgoDSAv01, + KeyAlgoECDSA256: CertAlgoECDSA256v01, + KeyAlgoECDSA384: CertAlgoECDSA384v01, + KeyAlgoECDSA521: CertAlgoECDSA521v01, + KeyAlgoED25519: CertAlgoED25519v01, +} + +// certToPrivAlgo returns the underlying algorithm for a certificate algorithm. +// Panics if a non-certificate algorithm is passed. +func certToPrivAlgo(algo string) string { + for privAlgo, pubAlgo := range certAlgoNames { + if pubAlgo == algo { + return privAlgo + } + } + panic("unknown cert algorithm") +} + +func (cert *Certificate) bytesForSigning() []byte { + c2 := *cert + c2.Signature = nil + out := c2.Marshal() + // Drop trailing signature length. + return out[:len(out)-4] +} + +// Marshal serializes c into OpenSSH's wire format. It is part of the +// PublicKey interface. +func (c *Certificate) Marshal() []byte { + generic := genericCertData{ + Serial: c.Serial, + CertType: c.CertType, + KeyId: c.KeyId, + ValidPrincipals: marshalStringList(c.ValidPrincipals), + ValidAfter: uint64(c.ValidAfter), + ValidBefore: uint64(c.ValidBefore), + CriticalOptions: marshalTuples(c.CriticalOptions), + Extensions: marshalTuples(c.Extensions), + Reserved: c.Reserved, + SignatureKey: c.SignatureKey.Marshal(), + } + if c.Signature != nil { + generic.Signature = Marshal(c.Signature) + } + genericBytes := Marshal(&generic) + keyBytes := c.Key.Marshal() + _, keyBytes, _ = parseString(keyBytes) + prefix := Marshal(&struct { + Name string + Nonce []byte + Key []byte `ssh:"rest"` + }{c.Type(), c.Nonce, keyBytes}) + + result := make([]byte, 0, len(prefix)+len(genericBytes)) + result = append(result, prefix...) + result = append(result, genericBytes...) + return result +} + +// Type returns the key name. It is part of the PublicKey interface. +func (c *Certificate) Type() string { + algo, ok := certAlgoNames[c.Key.Type()] + if !ok { + panic("unknown cert key type " + c.Key.Type()) + } + return algo +} + +// Verify verifies a signature against the certificate's public +// key. It is part of the PublicKey interface. +func (c *Certificate) Verify(data []byte, sig *Signature) error { + return c.Key.Verify(data, sig) +} + +func parseSignatureBody(in []byte) (out *Signature, rest []byte, ok bool) { + format, in, ok := parseString(in) + if !ok { + return + } + + out = &Signature{ + Format: string(format), + } + + if out.Blob, in, ok = parseString(in); !ok { + return + } + + return out, in, ok +} + +func parseSignature(in []byte) (out *Signature, rest []byte, ok bool) { + sigBytes, rest, ok := parseString(in) + if !ok { + return + } + + out, trailing, ok := parseSignatureBody(sigBytes) + if !ok || len(trailing) > 0 { + return nil, nil, false + } + return +} diff --git a/vendor/golang.org/x/crypto/ssh/channel.go b/vendor/golang.org/x/crypto/ssh/channel.go new file mode 100644 index 0000000000..195530ea0d --- /dev/null +++ b/vendor/golang.org/x/crypto/ssh/channel.go @@ -0,0 +1,633 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ssh + +import ( + "encoding/binary" + "errors" + "fmt" + "io" + "log" + "sync" +) + +const ( + minPacketLength = 9 + // channelMaxPacket contains the maximum number of bytes that will be + // sent in a single packet. As per RFC 4253, section 6.1, 32k is also + // the minimum. + channelMaxPacket = 1 << 15 + // We follow OpenSSH here. + channelWindowSize = 64 * channelMaxPacket +) + +// NewChannel represents an incoming request to a channel. It must either be +// accepted for use by calling Accept, or rejected by calling Reject. +type NewChannel interface { + // Accept accepts the channel creation request. It returns the Channel + // and a Go channel containing SSH requests. The Go channel must be + // serviced otherwise the Channel will hang. + Accept() (Channel, <-chan *Request, error) + + // Reject rejects the channel creation request. After calling + // this, no other methods on the Channel may be called. + Reject(reason RejectionReason, message string) error + + // ChannelType returns the type of the channel, as supplied by the + // client. + ChannelType() string + + // ExtraData returns the arbitrary payload for this channel, as supplied + // by the client. This data is specific to the channel type. + ExtraData() []byte +} + +// A Channel is an ordered, reliable, flow-controlled, duplex stream +// that is multiplexed over an SSH connection. +type Channel interface { + // Read reads up to len(data) bytes from the channel. + Read(data []byte) (int, error) + + // Write writes len(data) bytes to the channel. + Write(data []byte) (int, error) + + // Close signals end of channel use. No data may be sent after this + // call. + Close() error + + // CloseWrite signals the end of sending in-band + // data. Requests may still be sent, and the other side may + // still send data + CloseWrite() error + + // SendRequest sends a channel request. If wantReply is true, + // it will wait for a reply and return the result as a + // boolean, otherwise the return value will be false. Channel + // requests are out-of-band messages so they may be sent even + // if the data stream is closed or blocked by flow control. + // If the channel is closed before a reply is returned, io.EOF + // is returned. + SendRequest(name string, wantReply bool, payload []byte) (bool, error) + + // Stderr returns an io.ReadWriter that writes to this channel + // with the extended data type set to stderr. Stderr may + // safely be read and written from a different goroutine than + // Read and Write respectively. + Stderr() io.ReadWriter +} + +// Request is a request sent outside of the normal stream of +// data. Requests can either be specific to an SSH channel, or they +// can be global. +type Request struct { + Type string + WantReply bool + Payload []byte + + ch *channel + mux *mux +} + +// Reply sends a response to a request. It must be called for all requests +// where WantReply is true and is a no-op otherwise. The payload argument is +// ignored for replies to channel-specific requests. +func (r *Request) Reply(ok bool, payload []byte) error { + if !r.WantReply { + return nil + } + + if r.ch == nil { + return r.mux.ackRequest(ok, payload) + } + + return r.ch.ackRequest(ok) +} + +// RejectionReason is an enumeration used when rejecting channel creation +// requests. See RFC 4254, section 5.1. +type RejectionReason uint32 + +const ( + Prohibited RejectionReason = iota + 1 + ConnectionFailed + UnknownChannelType + ResourceShortage +) + +// String converts the rejection reason to human readable form. +func (r RejectionReason) String() string { + switch r { + case Prohibited: + return "administratively prohibited" + case ConnectionFailed: + return "connect failed" + case UnknownChannelType: + return "unknown channel type" + case ResourceShortage: + return "resource shortage" + } + return fmt.Sprintf("unknown reason %d", int(r)) +} + +func min(a uint32, b int) uint32 { + if a < uint32(b) { + return a + } + return uint32(b) +} + +type channelDirection uint8 + +const ( + channelInbound channelDirection = iota + channelOutbound +) + +// channel is an implementation of the Channel interface that works +// with the mux class. +type channel struct { + // R/O after creation + chanType string + extraData []byte + localId, remoteId uint32 + + // maxIncomingPayload and maxRemotePayload are the maximum + // payload sizes of normal and extended data packets for + // receiving and sending, respectively. The wire packet will + // be 9 or 13 bytes larger (excluding encryption overhead). + maxIncomingPayload uint32 + maxRemotePayload uint32 + + mux *mux + + // decided is set to true if an accept or reject message has been sent + // (for outbound channels) or received (for inbound channels). + decided bool + + // direction contains either channelOutbound, for channels created + // locally, or channelInbound, for channels created by the peer. + direction channelDirection + + // Pending internal channel messages. + msg chan interface{} + + // Since requests have no ID, there can be only one request + // with WantReply=true outstanding. This lock is held by a + // goroutine that has such an outgoing request pending. + sentRequestMu sync.Mutex + + incomingRequests chan *Request + + sentEOF bool + + // thread-safe data + remoteWin window + pending *buffer + extPending *buffer + + // windowMu protects myWindow, the flow-control window. + windowMu sync.Mutex + myWindow uint32 + + // writeMu serializes calls to mux.conn.writePacket() and + // protects sentClose and packetPool. This mutex must be + // different from windowMu, as writePacket can block if there + // is a key exchange pending. + writeMu sync.Mutex + sentClose bool + + // packetPool has a buffer for each extended channel ID to + // save allocations during writes. + packetPool map[uint32][]byte +} + +// writePacket sends a packet. If the packet is a channel close, it updates +// sentClose. This method takes the lock c.writeMu. +func (c *channel) writePacket(packet []byte) error { + c.writeMu.Lock() + if c.sentClose { + c.writeMu.Unlock() + return io.EOF + } + c.sentClose = (packet[0] == msgChannelClose) + err := c.mux.conn.writePacket(packet) + c.writeMu.Unlock() + return err +} + +func (c *channel) sendMessage(msg interface{}) error { + if debugMux { + log.Printf("send(%d): %#v", c.mux.chanList.offset, msg) + } + + p := Marshal(msg) + binary.BigEndian.PutUint32(p[1:], c.remoteId) + return c.writePacket(p) +} + +// WriteExtended writes data to a specific extended stream. These streams are +// used, for example, for stderr. +func (c *channel) WriteExtended(data []byte, extendedCode uint32) (n int, err error) { + if c.sentEOF { + return 0, io.EOF + } + // 1 byte message type, 4 bytes remoteId, 4 bytes data length + opCode := byte(msgChannelData) + headerLength := uint32(9) + if extendedCode > 0 { + headerLength += 4 + opCode = msgChannelExtendedData + } + + c.writeMu.Lock() + packet := c.packetPool[extendedCode] + // We don't remove the buffer from packetPool, so + // WriteExtended calls from different goroutines will be + // flagged as errors by the race detector. + c.writeMu.Unlock() + + for len(data) > 0 { + space := min(c.maxRemotePayload, len(data)) + if space, err = c.remoteWin.reserve(space); err != nil { + return n, err + } + if want := headerLength + space; uint32(cap(packet)) < want { + packet = make([]byte, want) + } else { + packet = packet[:want] + } + + todo := data[:space] + + packet[0] = opCode + binary.BigEndian.PutUint32(packet[1:], c.remoteId) + if extendedCode > 0 { + binary.BigEndian.PutUint32(packet[5:], uint32(extendedCode)) + } + binary.BigEndian.PutUint32(packet[headerLength-4:], uint32(len(todo))) + copy(packet[headerLength:], todo) + if err = c.writePacket(packet); err != nil { + return n, err + } + + n += len(todo) + data = data[len(todo):] + } + + c.writeMu.Lock() + c.packetPool[extendedCode] = packet + c.writeMu.Unlock() + + return n, err +} + +func (c *channel) handleData(packet []byte) error { + headerLen := 9 + isExtendedData := packet[0] == msgChannelExtendedData + if isExtendedData { + headerLen = 13 + } + if len(packet) < headerLen { + // malformed data packet + return parseError(packet[0]) + } + + var extended uint32 + if isExtendedData { + extended = binary.BigEndian.Uint32(packet[5:]) + } + + length := binary.BigEndian.Uint32(packet[headerLen-4 : headerLen]) + if length == 0 { + return nil + } + if length > c.maxIncomingPayload { + // TODO(hanwen): should send Disconnect? + return errors.New("ssh: incoming packet exceeds maximum payload size") + } + + data := packet[headerLen:] + if length != uint32(len(data)) { + return errors.New("ssh: wrong packet length") + } + + c.windowMu.Lock() + if c.myWindow < length { + c.windowMu.Unlock() + // TODO(hanwen): should send Disconnect with reason? + return errors.New("ssh: remote side wrote too much") + } + c.myWindow -= length + c.windowMu.Unlock() + + if extended == 1 { + c.extPending.write(data) + } else if extended > 0 { + // discard other extended data. + } else { + c.pending.write(data) + } + return nil +} + +func (c *channel) adjustWindow(n uint32) error { + c.windowMu.Lock() + // Since myWindow is managed on our side, and can never exceed + // the initial window setting, we don't worry about overflow. + c.myWindow += uint32(n) + c.windowMu.Unlock() + return c.sendMessage(windowAdjustMsg{ + AdditionalBytes: uint32(n), + }) +} + +func (c *channel) ReadExtended(data []byte, extended uint32) (n int, err error) { + switch extended { + case 1: + n, err = c.extPending.Read(data) + case 0: + n, err = c.pending.Read(data) + default: + return 0, fmt.Errorf("ssh: extended code %d unimplemented", extended) + } + + if n > 0 { + err = c.adjustWindow(uint32(n)) + // sendWindowAdjust can return io.EOF if the remote + // peer has closed the connection, however we want to + // defer forwarding io.EOF to the caller of Read until + // the buffer has been drained. + if n > 0 && err == io.EOF { + err = nil + } + } + + return n, err +} + +func (c *channel) close() { + c.pending.eof() + c.extPending.eof() + close(c.msg) + close(c.incomingRequests) + c.writeMu.Lock() + // This is not necessary for a normal channel teardown, but if + // there was another error, it is. + c.sentClose = true + c.writeMu.Unlock() + // Unblock writers. + c.remoteWin.close() +} + +// responseMessageReceived is called when a success or failure message is +// received on a channel to check that such a message is reasonable for the +// given channel. +func (c *channel) responseMessageReceived() error { + if c.direction == channelInbound { + return errors.New("ssh: channel response message received on inbound channel") + } + if c.decided { + return errors.New("ssh: duplicate response received for channel") + } + c.decided = true + return nil +} + +func (c *channel) handlePacket(packet []byte) error { + switch packet[0] { + case msgChannelData, msgChannelExtendedData: + return c.handleData(packet) + case msgChannelClose: + c.sendMessage(channelCloseMsg{PeersId: c.remoteId}) + c.mux.chanList.remove(c.localId) + c.close() + return nil + case msgChannelEOF: + // RFC 4254 is mute on how EOF affects dataExt messages but + // it is logical to signal EOF at the same time. + c.extPending.eof() + c.pending.eof() + return nil + } + + decoded, err := decode(packet) + if err != nil { + return err + } + + switch msg := decoded.(type) { + case *channelOpenFailureMsg: + if err := c.responseMessageReceived(); err != nil { + return err + } + c.mux.chanList.remove(msg.PeersId) + c.msg <- msg + case *channelOpenConfirmMsg: + if err := c.responseMessageReceived(); err != nil { + return err + } + if msg.MaxPacketSize < minPacketLength || msg.MaxPacketSize > 1<<31 { + return fmt.Errorf("ssh: invalid MaxPacketSize %d from peer", msg.MaxPacketSize) + } + c.remoteId = msg.MyId + c.maxRemotePayload = msg.MaxPacketSize + c.remoteWin.add(msg.MyWindow) + c.msg <- msg + case *windowAdjustMsg: + if !c.remoteWin.add(msg.AdditionalBytes) { + return fmt.Errorf("ssh: invalid window update for %d bytes", msg.AdditionalBytes) + } + case *channelRequestMsg: + req := Request{ + Type: msg.Request, + WantReply: msg.WantReply, + Payload: msg.RequestSpecificData, + ch: c, + } + + c.incomingRequests <- &req + default: + c.msg <- msg + } + return nil +} + +func (m *mux) newChannel(chanType string, direction channelDirection, extraData []byte) *channel { + ch := &channel{ + remoteWin: window{Cond: newCond()}, + myWindow: channelWindowSize, + pending: newBuffer(), + extPending: newBuffer(), + direction: direction, + incomingRequests: make(chan *Request, chanSize), + msg: make(chan interface{}, chanSize), + chanType: chanType, + extraData: extraData, + mux: m, + packetPool: make(map[uint32][]byte), + } + ch.localId = m.chanList.add(ch) + return ch +} + +var errUndecided = errors.New("ssh: must Accept or Reject channel") +var errDecidedAlready = errors.New("ssh: can call Accept or Reject only once") + +type extChannel struct { + code uint32 + ch *channel +} + +func (e *extChannel) Write(data []byte) (n int, err error) { + return e.ch.WriteExtended(data, e.code) +} + +func (e *extChannel) Read(data []byte) (n int, err error) { + return e.ch.ReadExtended(data, e.code) +} + +func (c *channel) Accept() (Channel, <-chan *Request, error) { + if c.decided { + return nil, nil, errDecidedAlready + } + c.maxIncomingPayload = channelMaxPacket + confirm := channelOpenConfirmMsg{ + PeersId: c.remoteId, + MyId: c.localId, + MyWindow: c.myWindow, + MaxPacketSize: c.maxIncomingPayload, + } + c.decided = true + if err := c.sendMessage(confirm); err != nil { + return nil, nil, err + } + + return c, c.incomingRequests, nil +} + +func (ch *channel) Reject(reason RejectionReason, message string) error { + if ch.decided { + return errDecidedAlready + } + reject := channelOpenFailureMsg{ + PeersId: ch.remoteId, + Reason: reason, + Message: message, + Language: "en", + } + ch.decided = true + return ch.sendMessage(reject) +} + +func (ch *channel) Read(data []byte) (int, error) { + if !ch.decided { + return 0, errUndecided + } + return ch.ReadExtended(data, 0) +} + +func (ch *channel) Write(data []byte) (int, error) { + if !ch.decided { + return 0, errUndecided + } + return ch.WriteExtended(data, 0) +} + +func (ch *channel) CloseWrite() error { + if !ch.decided { + return errUndecided + } + ch.sentEOF = true + return ch.sendMessage(channelEOFMsg{ + PeersId: ch.remoteId}) +} + +func (ch *channel) Close() error { + if !ch.decided { + return errUndecided + } + + return ch.sendMessage(channelCloseMsg{ + PeersId: ch.remoteId}) +} + +// Extended returns an io.ReadWriter that sends and receives data on the given, +// SSH extended stream. Such streams are used, for example, for stderr. +func (ch *channel) Extended(code uint32) io.ReadWriter { + if !ch.decided { + return nil + } + return &extChannel{code, ch} +} + +func (ch *channel) Stderr() io.ReadWriter { + return ch.Extended(1) +} + +func (ch *channel) SendRequest(name string, wantReply bool, payload []byte) (bool, error) { + if !ch.decided { + return false, errUndecided + } + + if wantReply { + ch.sentRequestMu.Lock() + defer ch.sentRequestMu.Unlock() + } + + msg := channelRequestMsg{ + PeersId: ch.remoteId, + Request: name, + WantReply: wantReply, + RequestSpecificData: payload, + } + + if err := ch.sendMessage(msg); err != nil { + return false, err + } + + if wantReply { + m, ok := (<-ch.msg) + if !ok { + return false, io.EOF + } + switch m.(type) { + case *channelRequestFailureMsg: + return false, nil + case *channelRequestSuccessMsg: + return true, nil + default: + return false, fmt.Errorf("ssh: unexpected response to channel request: %#v", m) + } + } + + return false, nil +} + +// ackRequest either sends an ack or nack to the channel request. +func (ch *channel) ackRequest(ok bool) error { + if !ch.decided { + return errUndecided + } + + var msg interface{} + if !ok { + msg = channelRequestFailureMsg{ + PeersId: ch.remoteId, + } + } else { + msg = channelRequestSuccessMsg{ + PeersId: ch.remoteId, + } + } + return ch.sendMessage(msg) +} + +func (ch *channel) ChannelType() string { + return ch.chanType +} + +func (ch *channel) ExtraData() []byte { + return ch.extraData +} diff --git a/vendor/golang.org/x/crypto/ssh/cipher.go b/vendor/golang.org/x/crypto/ssh/cipher.go new file mode 100644 index 0000000000..13484ab4b3 --- /dev/null +++ b/vendor/golang.org/x/crypto/ssh/cipher.go @@ -0,0 +1,627 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ssh + +import ( + "crypto/aes" + "crypto/cipher" + "crypto/des" + "crypto/rc4" + "crypto/subtle" + "encoding/binary" + "errors" + "fmt" + "hash" + "io" + "io/ioutil" +) + +const ( + packetSizeMultiple = 16 // TODO(huin) this should be determined by the cipher. + + // RFC 4253 section 6.1 defines a minimum packet size of 32768 that implementations + // MUST be able to process (plus a few more kilobytes for padding and mac). The RFC + // indicates implementations SHOULD be able to handle larger packet sizes, but then + // waffles on about reasonable limits. + // + // OpenSSH caps their maxPacket at 256kB so we choose to do + // the same. maxPacket is also used to ensure that uint32 + // length fields do not overflow, so it should remain well + // below 4G. + maxPacket = 256 * 1024 +) + +// noneCipher implements cipher.Stream and provides no encryption. It is used +// by the transport before the first key-exchange. +type noneCipher struct{} + +func (c noneCipher) XORKeyStream(dst, src []byte) { + copy(dst, src) +} + +func newAESCTR(key, iv []byte) (cipher.Stream, error) { + c, err := aes.NewCipher(key) + if err != nil { + return nil, err + } + return cipher.NewCTR(c, iv), nil +} + +func newRC4(key, iv []byte) (cipher.Stream, error) { + return rc4.NewCipher(key) +} + +type streamCipherMode struct { + keySize int + ivSize int + skip int + createFunc func(key, iv []byte) (cipher.Stream, error) +} + +func (c *streamCipherMode) createStream(key, iv []byte) (cipher.Stream, error) { + if len(key) < c.keySize { + panic("ssh: key length too small for cipher") + } + if len(iv) < c.ivSize { + panic("ssh: iv too small for cipher") + } + + stream, err := c.createFunc(key[:c.keySize], iv[:c.ivSize]) + if err != nil { + return nil, err + } + + var streamDump []byte + if c.skip > 0 { + streamDump = make([]byte, 512) + } + + for remainingToDump := c.skip; remainingToDump > 0; { + dumpThisTime := remainingToDump + if dumpThisTime > len(streamDump) { + dumpThisTime = len(streamDump) + } + stream.XORKeyStream(streamDump[:dumpThisTime], streamDump[:dumpThisTime]) + remainingToDump -= dumpThisTime + } + + return stream, nil +} + +// cipherModes documents properties of supported ciphers. Ciphers not included +// are not supported and will not be negotiated, even if explicitly requested in +// ClientConfig.Crypto.Ciphers. +var cipherModes = map[string]*streamCipherMode{ + // Ciphers from RFC4344, which introduced many CTR-based ciphers. Algorithms + // are defined in the order specified in the RFC. + "aes128-ctr": {16, aes.BlockSize, 0, newAESCTR}, + "aes192-ctr": {24, aes.BlockSize, 0, newAESCTR}, + "aes256-ctr": {32, aes.BlockSize, 0, newAESCTR}, + + // Ciphers from RFC4345, which introduces security-improved arcfour ciphers. + // They are defined in the order specified in the RFC. + "arcfour128": {16, 0, 1536, newRC4}, + "arcfour256": {32, 0, 1536, newRC4}, + + // Cipher defined in RFC 4253, which describes SSH Transport Layer Protocol. + // Note that this cipher is not safe, as stated in RFC 4253: "Arcfour (and + // RC4) has problems with weak keys, and should be used with caution." + // RFC4345 introduces improved versions of Arcfour. + "arcfour": {16, 0, 0, newRC4}, + + // AES-GCM is not a stream cipher, so it is constructed with a + // special case. If we add any more non-stream ciphers, we + // should invest a cleaner way to do this. + gcmCipherID: {16, 12, 0, nil}, + + // CBC mode is insecure and so is not included in the default config. + // (See http://www.isg.rhul.ac.uk/~kp/SandPfinal.pdf). If absolutely + // needed, it's possible to specify a custom Config to enable it. + // You should expect that an active attacker can recover plaintext if + // you do. + aes128cbcID: {16, aes.BlockSize, 0, nil}, + + // 3des-cbc is insecure and is disabled by default. + tripledescbcID: {24, des.BlockSize, 0, nil}, +} + +// prefixLen is the length of the packet prefix that contains the packet length +// and number of padding bytes. +const prefixLen = 5 + +// streamPacketCipher is a packetCipher using a stream cipher. +type streamPacketCipher struct { + mac hash.Hash + cipher cipher.Stream + etm bool + + // The following members are to avoid per-packet allocations. + prefix [prefixLen]byte + seqNumBytes [4]byte + padding [2 * packetSizeMultiple]byte + packetData []byte + macResult []byte +} + +// readPacket reads and decrypt a single packet from the reader argument. +func (s *streamPacketCipher) readPacket(seqNum uint32, r io.Reader) ([]byte, error) { + if _, err := io.ReadFull(r, s.prefix[:]); err != nil { + return nil, err + } + + var encryptedPaddingLength [1]byte + if s.mac != nil && s.etm { + copy(encryptedPaddingLength[:], s.prefix[4:5]) + s.cipher.XORKeyStream(s.prefix[4:5], s.prefix[4:5]) + } else { + s.cipher.XORKeyStream(s.prefix[:], s.prefix[:]) + } + + length := binary.BigEndian.Uint32(s.prefix[0:4]) + paddingLength := uint32(s.prefix[4]) + + var macSize uint32 + if s.mac != nil { + s.mac.Reset() + binary.BigEndian.PutUint32(s.seqNumBytes[:], seqNum) + s.mac.Write(s.seqNumBytes[:]) + if s.etm { + s.mac.Write(s.prefix[:4]) + s.mac.Write(encryptedPaddingLength[:]) + } else { + s.mac.Write(s.prefix[:]) + } + macSize = uint32(s.mac.Size()) + } + + if length <= paddingLength+1 { + return nil, errors.New("ssh: invalid packet length, packet too small") + } + + if length > maxPacket { + return nil, errors.New("ssh: invalid packet length, packet too large") + } + + // the maxPacket check above ensures that length-1+macSize + // does not overflow. + if uint32(cap(s.packetData)) < length-1+macSize { + s.packetData = make([]byte, length-1+macSize) + } else { + s.packetData = s.packetData[:length-1+macSize] + } + + if _, err := io.ReadFull(r, s.packetData); err != nil { + return nil, err + } + mac := s.packetData[length-1:] + data := s.packetData[:length-1] + + if s.mac != nil && s.etm { + s.mac.Write(data) + } + + s.cipher.XORKeyStream(data, data) + + if s.mac != nil { + if !s.etm { + s.mac.Write(data) + } + s.macResult = s.mac.Sum(s.macResult[:0]) + if subtle.ConstantTimeCompare(s.macResult, mac) != 1 { + return nil, errors.New("ssh: MAC failure") + } + } + + return s.packetData[:length-paddingLength-1], nil +} + +// writePacket encrypts and sends a packet of data to the writer argument +func (s *streamPacketCipher) writePacket(seqNum uint32, w io.Writer, rand io.Reader, packet []byte) error { + if len(packet) > maxPacket { + return errors.New("ssh: packet too large") + } + + aadlen := 0 + if s.mac != nil && s.etm { + // packet length is not encrypted for EtM modes + aadlen = 4 + } + + paddingLength := packetSizeMultiple - (prefixLen+len(packet)-aadlen)%packetSizeMultiple + if paddingLength < 4 { + paddingLength += packetSizeMultiple + } + + length := len(packet) + 1 + paddingLength + binary.BigEndian.PutUint32(s.prefix[:], uint32(length)) + s.prefix[4] = byte(paddingLength) + padding := s.padding[:paddingLength] + if _, err := io.ReadFull(rand, padding); err != nil { + return err + } + + if s.mac != nil { + s.mac.Reset() + binary.BigEndian.PutUint32(s.seqNumBytes[:], seqNum) + s.mac.Write(s.seqNumBytes[:]) + + if s.etm { + // For EtM algorithms, the packet length must stay unencrypted, + // but the following data (padding length) must be encrypted + s.cipher.XORKeyStream(s.prefix[4:5], s.prefix[4:5]) + } + + s.mac.Write(s.prefix[:]) + + if !s.etm { + // For non-EtM algorithms, the algorithm is applied on unencrypted data + s.mac.Write(packet) + s.mac.Write(padding) + } + } + + if !(s.mac != nil && s.etm) { + // For EtM algorithms, the padding length has already been encrypted + // and the packet length must remain unencrypted + s.cipher.XORKeyStream(s.prefix[:], s.prefix[:]) + } + + s.cipher.XORKeyStream(packet, packet) + s.cipher.XORKeyStream(padding, padding) + + if s.mac != nil && s.etm { + // For EtM algorithms, packet and padding must be encrypted + s.mac.Write(packet) + s.mac.Write(padding) + } + + if _, err := w.Write(s.prefix[:]); err != nil { + return err + } + if _, err := w.Write(packet); err != nil { + return err + } + if _, err := w.Write(padding); err != nil { + return err + } + + if s.mac != nil { + s.macResult = s.mac.Sum(s.macResult[:0]) + if _, err := w.Write(s.macResult); err != nil { + return err + } + } + + return nil +} + +type gcmCipher struct { + aead cipher.AEAD + prefix [4]byte + iv []byte + buf []byte +} + +func newGCMCipher(iv, key, macKey []byte) (packetCipher, error) { + c, err := aes.NewCipher(key) + if err != nil { + return nil, err + } + + aead, err := cipher.NewGCM(c) + if err != nil { + return nil, err + } + + return &gcmCipher{ + aead: aead, + iv: iv, + }, nil +} + +const gcmTagSize = 16 + +func (c *gcmCipher) writePacket(seqNum uint32, w io.Writer, rand io.Reader, packet []byte) error { + // Pad out to multiple of 16 bytes. This is different from the + // stream cipher because that encrypts the length too. + padding := byte(packetSizeMultiple - (1+len(packet))%packetSizeMultiple) + if padding < 4 { + padding += packetSizeMultiple + } + + length := uint32(len(packet) + int(padding) + 1) + binary.BigEndian.PutUint32(c.prefix[:], length) + if _, err := w.Write(c.prefix[:]); err != nil { + return err + } + + if cap(c.buf) < int(length) { + c.buf = make([]byte, length) + } else { + c.buf = c.buf[:length] + } + + c.buf[0] = padding + copy(c.buf[1:], packet) + if _, err := io.ReadFull(rand, c.buf[1+len(packet):]); err != nil { + return err + } + c.buf = c.aead.Seal(c.buf[:0], c.iv, c.buf, c.prefix[:]) + if _, err := w.Write(c.buf); err != nil { + return err + } + c.incIV() + + return nil +} + +func (c *gcmCipher) incIV() { + for i := 4 + 7; i >= 4; i-- { + c.iv[i]++ + if c.iv[i] != 0 { + break + } + } +} + +func (c *gcmCipher) readPacket(seqNum uint32, r io.Reader) ([]byte, error) { + if _, err := io.ReadFull(r, c.prefix[:]); err != nil { + return nil, err + } + length := binary.BigEndian.Uint32(c.prefix[:]) + if length > maxPacket { + return nil, errors.New("ssh: max packet length exceeded.") + } + + if cap(c.buf) < int(length+gcmTagSize) { + c.buf = make([]byte, length+gcmTagSize) + } else { + c.buf = c.buf[:length+gcmTagSize] + } + + if _, err := io.ReadFull(r, c.buf); err != nil { + return nil, err + } + + plain, err := c.aead.Open(c.buf[:0], c.iv, c.buf, c.prefix[:]) + if err != nil { + return nil, err + } + c.incIV() + + padding := plain[0] + if padding < 4 || padding >= 20 { + return nil, fmt.Errorf("ssh: illegal padding %d", padding) + } + + if int(padding+1) >= len(plain) { + return nil, fmt.Errorf("ssh: padding %d too large", padding) + } + plain = plain[1 : length-uint32(padding)] + return plain, nil +} + +// cbcCipher implements aes128-cbc cipher defined in RFC 4253 section 6.1 +type cbcCipher struct { + mac hash.Hash + macSize uint32 + decrypter cipher.BlockMode + encrypter cipher.BlockMode + + // The following members are to avoid per-packet allocations. + seqNumBytes [4]byte + packetData []byte + macResult []byte + + // Amount of data we should still read to hide which + // verification error triggered. + oracleCamouflage uint32 +} + +func newCBCCipher(c cipher.Block, iv, key, macKey []byte, algs directionAlgorithms) (packetCipher, error) { + cbc := &cbcCipher{ + mac: macModes[algs.MAC].new(macKey), + decrypter: cipher.NewCBCDecrypter(c, iv), + encrypter: cipher.NewCBCEncrypter(c, iv), + packetData: make([]byte, 1024), + } + if cbc.mac != nil { + cbc.macSize = uint32(cbc.mac.Size()) + } + + return cbc, nil +} + +func newAESCBCCipher(iv, key, macKey []byte, algs directionAlgorithms) (packetCipher, error) { + c, err := aes.NewCipher(key) + if err != nil { + return nil, err + } + + cbc, err := newCBCCipher(c, iv, key, macKey, algs) + if err != nil { + return nil, err + } + + return cbc, nil +} + +func newTripleDESCBCCipher(iv, key, macKey []byte, algs directionAlgorithms) (packetCipher, error) { + c, err := des.NewTripleDESCipher(key) + if err != nil { + return nil, err + } + + cbc, err := newCBCCipher(c, iv, key, macKey, algs) + if err != nil { + return nil, err + } + + return cbc, nil +} + +func maxUInt32(a, b int) uint32 { + if a > b { + return uint32(a) + } + return uint32(b) +} + +const ( + cbcMinPacketSizeMultiple = 8 + cbcMinPacketSize = 16 + cbcMinPaddingSize = 4 +) + +// cbcError represents a verification error that may leak information. +type cbcError string + +func (e cbcError) Error() string { return string(e) } + +func (c *cbcCipher) readPacket(seqNum uint32, r io.Reader) ([]byte, error) { + p, err := c.readPacketLeaky(seqNum, r) + if err != nil { + if _, ok := err.(cbcError); ok { + // Verification error: read a fixed amount of + // data, to make distinguishing between + // failing MAC and failing length check more + // difficult. + io.CopyN(ioutil.Discard, r, int64(c.oracleCamouflage)) + } + } + return p, err +} + +func (c *cbcCipher) readPacketLeaky(seqNum uint32, r io.Reader) ([]byte, error) { + blockSize := c.decrypter.BlockSize() + + // Read the header, which will include some of the subsequent data in the + // case of block ciphers - this is copied back to the payload later. + // How many bytes of payload/padding will be read with this first read. + firstBlockLength := uint32((prefixLen + blockSize - 1) / blockSize * blockSize) + firstBlock := c.packetData[:firstBlockLength] + if _, err := io.ReadFull(r, firstBlock); err != nil { + return nil, err + } + + c.oracleCamouflage = maxPacket + 4 + c.macSize - firstBlockLength + + c.decrypter.CryptBlocks(firstBlock, firstBlock) + length := binary.BigEndian.Uint32(firstBlock[:4]) + if length > maxPacket { + return nil, cbcError("ssh: packet too large") + } + if length+4 < maxUInt32(cbcMinPacketSize, blockSize) { + // The minimum size of a packet is 16 (or the cipher block size, whichever + // is larger) bytes. + return nil, cbcError("ssh: packet too small") + } + // The length of the packet (including the length field but not the MAC) must + // be a multiple of the block size or 8, whichever is larger. + if (length+4)%maxUInt32(cbcMinPacketSizeMultiple, blockSize) != 0 { + return nil, cbcError("ssh: invalid packet length multiple") + } + + paddingLength := uint32(firstBlock[4]) + if paddingLength < cbcMinPaddingSize || length <= paddingLength+1 { + return nil, cbcError("ssh: invalid packet length") + } + + // Positions within the c.packetData buffer: + macStart := 4 + length + paddingStart := macStart - paddingLength + + // Entire packet size, starting before length, ending at end of mac. + entirePacketSize := macStart + c.macSize + + // Ensure c.packetData is large enough for the entire packet data. + if uint32(cap(c.packetData)) < entirePacketSize { + // Still need to upsize and copy, but this should be rare at runtime, only + // on upsizing the packetData buffer. + c.packetData = make([]byte, entirePacketSize) + copy(c.packetData, firstBlock) + } else { + c.packetData = c.packetData[:entirePacketSize] + } + + if n, err := io.ReadFull(r, c.packetData[firstBlockLength:]); err != nil { + return nil, err + } else { + c.oracleCamouflage -= uint32(n) + } + + remainingCrypted := c.packetData[firstBlockLength:macStart] + c.decrypter.CryptBlocks(remainingCrypted, remainingCrypted) + + mac := c.packetData[macStart:] + if c.mac != nil { + c.mac.Reset() + binary.BigEndian.PutUint32(c.seqNumBytes[:], seqNum) + c.mac.Write(c.seqNumBytes[:]) + c.mac.Write(c.packetData[:macStart]) + c.macResult = c.mac.Sum(c.macResult[:0]) + if subtle.ConstantTimeCompare(c.macResult, mac) != 1 { + return nil, cbcError("ssh: MAC failure") + } + } + + return c.packetData[prefixLen:paddingStart], nil +} + +func (c *cbcCipher) writePacket(seqNum uint32, w io.Writer, rand io.Reader, packet []byte) error { + effectiveBlockSize := maxUInt32(cbcMinPacketSizeMultiple, c.encrypter.BlockSize()) + + // Length of encrypted portion of the packet (header, payload, padding). + // Enforce minimum padding and packet size. + encLength := maxUInt32(prefixLen+len(packet)+cbcMinPaddingSize, cbcMinPaddingSize) + // Enforce block size. + encLength = (encLength + effectiveBlockSize - 1) / effectiveBlockSize * effectiveBlockSize + + length := encLength - 4 + paddingLength := int(length) - (1 + len(packet)) + + // Overall buffer contains: header, payload, padding, mac. + // Space for the MAC is reserved in the capacity but not the slice length. + bufferSize := encLength + c.macSize + if uint32(cap(c.packetData)) < bufferSize { + c.packetData = make([]byte, encLength, bufferSize) + } else { + c.packetData = c.packetData[:encLength] + } + + p := c.packetData + + // Packet header. + binary.BigEndian.PutUint32(p, length) + p = p[4:] + p[0] = byte(paddingLength) + + // Payload. + p = p[1:] + copy(p, packet) + + // Padding. + p = p[len(packet):] + if _, err := io.ReadFull(rand, p); err != nil { + return err + } + + if c.mac != nil { + c.mac.Reset() + binary.BigEndian.PutUint32(c.seqNumBytes[:], seqNum) + c.mac.Write(c.seqNumBytes[:]) + c.mac.Write(c.packetData) + // The MAC is now appended into the capacity reserved for it earlier. + c.packetData = c.mac.Sum(c.packetData) + } + + c.encrypter.CryptBlocks(c.packetData[:encLength], c.packetData[:encLength]) + + if _, err := w.Write(c.packetData); err != nil { + return err + } + + return nil +} diff --git a/vendor/golang.org/x/crypto/ssh/client.go b/vendor/golang.org/x/crypto/ssh/client.go new file mode 100644 index 0000000000..c97f2978e8 --- /dev/null +++ b/vendor/golang.org/x/crypto/ssh/client.go @@ -0,0 +1,211 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ssh + +import ( + "errors" + "fmt" + "net" + "sync" + "time" +) + +// Client implements a traditional SSH client that supports shells, +// subprocesses, port forwarding and tunneled dialing. +type Client struct { + Conn + + forwards forwardList // forwarded tcpip connections from the remote side + mu sync.Mutex + channelHandlers map[string]chan NewChannel +} + +// HandleChannelOpen returns a channel on which NewChannel requests +// for the given type are sent. If the type already is being handled, +// nil is returned. The channel is closed when the connection is closed. +func (c *Client) HandleChannelOpen(channelType string) <-chan NewChannel { + c.mu.Lock() + defer c.mu.Unlock() + if c.channelHandlers == nil { + // The SSH channel has been closed. + c := make(chan NewChannel) + close(c) + return c + } + + ch := c.channelHandlers[channelType] + if ch != nil { + return nil + } + + ch = make(chan NewChannel, chanSize) + c.channelHandlers[channelType] = ch + return ch +} + +// NewClient creates a Client on top of the given connection. +func NewClient(c Conn, chans <-chan NewChannel, reqs <-chan *Request) *Client { + conn := &Client{ + Conn: c, + channelHandlers: make(map[string]chan NewChannel, 1), + } + + go conn.handleGlobalRequests(reqs) + go conn.handleChannelOpens(chans) + go func() { + conn.Wait() + conn.forwards.closeAll() + }() + go conn.forwards.handleChannels(conn.HandleChannelOpen("forwarded-tcpip")) + return conn +} + +// NewClientConn establishes an authenticated SSH connection using c +// as the underlying transport. The Request and NewChannel channels +// must be serviced or the connection will hang. +func NewClientConn(c net.Conn, addr string, config *ClientConfig) (Conn, <-chan NewChannel, <-chan *Request, error) { + fullConf := *config + fullConf.SetDefaults() + conn := &connection{ + sshConn: sshConn{conn: c}, + } + + if err := conn.clientHandshake(addr, &fullConf); err != nil { + c.Close() + return nil, nil, nil, fmt.Errorf("ssh: handshake failed: %v", err) + } + conn.mux = newMux(conn.transport) + return conn, conn.mux.incomingChannels, conn.mux.incomingRequests, nil +} + +// clientHandshake performs the client side key exchange. See RFC 4253 Section +// 7. +func (c *connection) clientHandshake(dialAddress string, config *ClientConfig) error { + if config.ClientVersion != "" { + c.clientVersion = []byte(config.ClientVersion) + } else { + c.clientVersion = []byte(packageVersion) + } + var err error + c.serverVersion, err = exchangeVersions(c.sshConn.conn, c.clientVersion) + if err != nil { + return err + } + + c.transport = newClientTransport( + newTransport(c.sshConn.conn, config.Rand, true /* is client */), + c.clientVersion, c.serverVersion, config, dialAddress, c.sshConn.RemoteAddr()) + if err := c.transport.waitSession(); err != nil { + return err + } + + c.sessionID = c.transport.getSessionID() + return c.clientAuthenticate(config) +} + +// verifyHostKeySignature verifies the host key obtained in the key +// exchange. +func verifyHostKeySignature(hostKey PublicKey, result *kexResult) error { + sig, rest, ok := parseSignatureBody(result.Signature) + if len(rest) > 0 || !ok { + return errors.New("ssh: signature parse error") + } + + return hostKey.Verify(result.H, sig) +} + +// NewSession opens a new Session for this client. (A session is a remote +// execution of a program.) +func (c *Client) NewSession() (*Session, error) { + ch, in, err := c.OpenChannel("session", nil) + if err != nil { + return nil, err + } + return newSession(ch, in) +} + +func (c *Client) handleGlobalRequests(incoming <-chan *Request) { + for r := range incoming { + // This handles keepalive messages and matches + // the behaviour of OpenSSH. + r.Reply(false, nil) + } +} + +// handleChannelOpens channel open messages from the remote side. +func (c *Client) handleChannelOpens(in <-chan NewChannel) { + for ch := range in { + c.mu.Lock() + handler := c.channelHandlers[ch.ChannelType()] + c.mu.Unlock() + + if handler != nil { + handler <- ch + } else { + ch.Reject(UnknownChannelType, fmt.Sprintf("unknown channel type: %v", ch.ChannelType())) + } + } + + c.mu.Lock() + for _, ch := range c.channelHandlers { + close(ch) + } + c.channelHandlers = nil + c.mu.Unlock() +} + +// Dial starts a client connection to the given SSH server. It is a +// convenience function that connects to the given network address, +// initiates the SSH handshake, and then sets up a Client. For access +// to incoming channels and requests, use net.Dial with NewClientConn +// instead. +func Dial(network, addr string, config *ClientConfig) (*Client, error) { + conn, err := net.DialTimeout(network, addr, config.Timeout) + if err != nil { + return nil, err + } + c, chans, reqs, err := NewClientConn(conn, addr, config) + if err != nil { + return nil, err + } + return NewClient(c, chans, reqs), nil +} + +// A ClientConfig structure is used to configure a Client. It must not be +// modified after having been passed to an SSH function. +type ClientConfig struct { + // Config contains configuration that is shared between clients and + // servers. + Config + + // User contains the username to authenticate as. + User string + + // Auth contains possible authentication methods to use with the + // server. Only the first instance of a particular RFC 4252 method will + // be used during authentication. + Auth []AuthMethod + + // HostKeyCallback, if not nil, is called during the cryptographic + // handshake to validate the server's host key. A nil HostKeyCallback + // implies that all host keys are accepted. + HostKeyCallback func(hostname string, remote net.Addr, key PublicKey) error + + // ClientVersion contains the version identification string that will + // be used for the connection. If empty, a reasonable default is used. + ClientVersion string + + // HostKeyAlgorithms lists the key types that the client will + // accept from the server as host key, in order of + // preference. If empty, a reasonable default is used. Any + // string returned from PublicKey.Type method may be used, or + // any of the CertAlgoXxxx and KeyAlgoXxxx constants. + HostKeyAlgorithms []string + + // Timeout is the maximum amount of time for the TCP connection to establish. + // + // A Timeout of zero means no timeout. + Timeout time.Duration +} diff --git a/vendor/golang.org/x/crypto/ssh/client_auth.go b/vendor/golang.org/x/crypto/ssh/client_auth.go new file mode 100644 index 0000000000..fd1ec5dda6 --- /dev/null +++ b/vendor/golang.org/x/crypto/ssh/client_auth.go @@ -0,0 +1,475 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ssh + +import ( + "bytes" + "errors" + "fmt" + "io" +) + +// clientAuthenticate authenticates with the remote server. See RFC 4252. +func (c *connection) clientAuthenticate(config *ClientConfig) error { + // initiate user auth session + if err := c.transport.writePacket(Marshal(&serviceRequestMsg{serviceUserAuth})); err != nil { + return err + } + packet, err := c.transport.readPacket() + if err != nil { + return err + } + var serviceAccept serviceAcceptMsg + if err := Unmarshal(packet, &serviceAccept); err != nil { + return err + } + + // during the authentication phase the client first attempts the "none" method + // then any untried methods suggested by the server. + tried := make(map[string]bool) + var lastMethods []string + + sessionID := c.transport.getSessionID() + for auth := AuthMethod(new(noneAuth)); auth != nil; { + ok, methods, err := auth.auth(sessionID, config.User, c.transport, config.Rand) + if err != nil { + return err + } + if ok { + // success + return nil + } + tried[auth.method()] = true + if methods == nil { + methods = lastMethods + } + lastMethods = methods + + auth = nil + + findNext: + for _, a := range config.Auth { + candidateMethod := a.method() + if tried[candidateMethod] { + continue + } + for _, meth := range methods { + if meth == candidateMethod { + auth = a + break findNext + } + } + } + } + return fmt.Errorf("ssh: unable to authenticate, attempted methods %v, no supported methods remain", keys(tried)) +} + +func keys(m map[string]bool) []string { + s := make([]string, 0, len(m)) + + for key := range m { + s = append(s, key) + } + return s +} + +// An AuthMethod represents an instance of an RFC 4252 authentication method. +type AuthMethod interface { + // auth authenticates user over transport t. + // Returns true if authentication is successful. + // If authentication is not successful, a []string of alternative + // method names is returned. If the slice is nil, it will be ignored + // and the previous set of possible methods will be reused. + auth(session []byte, user string, p packetConn, rand io.Reader) (bool, []string, error) + + // method returns the RFC 4252 method name. + method() string +} + +// "none" authentication, RFC 4252 section 5.2. +type noneAuth int + +func (n *noneAuth) auth(session []byte, user string, c packetConn, rand io.Reader) (bool, []string, error) { + if err := c.writePacket(Marshal(&userAuthRequestMsg{ + User: user, + Service: serviceSSH, + Method: "none", + })); err != nil { + return false, nil, err + } + + return handleAuthResponse(c) +} + +func (n *noneAuth) method() string { + return "none" +} + +// passwordCallback is an AuthMethod that fetches the password through +// a function call, e.g. by prompting the user. +type passwordCallback func() (password string, err error) + +func (cb passwordCallback) auth(session []byte, user string, c packetConn, rand io.Reader) (bool, []string, error) { + type passwordAuthMsg struct { + User string `sshtype:"50"` + Service string + Method string + Reply bool + Password string + } + + pw, err := cb() + // REVIEW NOTE: is there a need to support skipping a password attempt? + // The program may only find out that the user doesn't have a password + // when prompting. + if err != nil { + return false, nil, err + } + + if err := c.writePacket(Marshal(&passwordAuthMsg{ + User: user, + Service: serviceSSH, + Method: cb.method(), + Reply: false, + Password: pw, + })); err != nil { + return false, nil, err + } + + return handleAuthResponse(c) +} + +func (cb passwordCallback) method() string { + return "password" +} + +// Password returns an AuthMethod using the given password. +func Password(secret string) AuthMethod { + return passwordCallback(func() (string, error) { return secret, nil }) +} + +// PasswordCallback returns an AuthMethod that uses a callback for +// fetching a password. +func PasswordCallback(prompt func() (secret string, err error)) AuthMethod { + return passwordCallback(prompt) +} + +type publickeyAuthMsg struct { + User string `sshtype:"50"` + Service string + Method string + // HasSig indicates to the receiver packet that the auth request is signed and + // should be used for authentication of the request. + HasSig bool + Algoname string + PubKey []byte + // Sig is tagged with "rest" so Marshal will exclude it during + // validateKey + Sig []byte `ssh:"rest"` +} + +// publicKeyCallback is an AuthMethod that uses a set of key +// pairs for authentication. +type publicKeyCallback func() ([]Signer, error) + +func (cb publicKeyCallback) method() string { + return "publickey" +} + +func (cb publicKeyCallback) auth(session []byte, user string, c packetConn, rand io.Reader) (bool, []string, error) { + // Authentication is performed in two stages. The first stage sends an + // enquiry to test if each key is acceptable to the remote. The second + // stage attempts to authenticate with the valid keys obtained in the + // first stage. + + signers, err := cb() + if err != nil { + return false, nil, err + } + var validKeys []Signer + for _, signer := range signers { + if ok, err := validateKey(signer.PublicKey(), user, c); ok { + validKeys = append(validKeys, signer) + } else { + if err != nil { + return false, nil, err + } + } + } + + // methods that may continue if this auth is not successful. + var methods []string + for _, signer := range validKeys { + pub := signer.PublicKey() + + pubKey := pub.Marshal() + sign, err := signer.Sign(rand, buildDataSignedForAuth(session, userAuthRequestMsg{ + User: user, + Service: serviceSSH, + Method: cb.method(), + }, []byte(pub.Type()), pubKey)) + if err != nil { + return false, nil, err + } + + // manually wrap the serialized signature in a string + s := Marshal(sign) + sig := make([]byte, stringLength(len(s))) + marshalString(sig, s) + msg := publickeyAuthMsg{ + User: user, + Service: serviceSSH, + Method: cb.method(), + HasSig: true, + Algoname: pub.Type(), + PubKey: pubKey, + Sig: sig, + } + p := Marshal(&msg) + if err := c.writePacket(p); err != nil { + return false, nil, err + } + var success bool + success, methods, err = handleAuthResponse(c) + if err != nil { + return false, nil, err + } + if success { + return success, methods, err + } + } + return false, methods, nil +} + +// validateKey validates the key provided is acceptable to the server. +func validateKey(key PublicKey, user string, c packetConn) (bool, error) { + pubKey := key.Marshal() + msg := publickeyAuthMsg{ + User: user, + Service: serviceSSH, + Method: "publickey", + HasSig: false, + Algoname: key.Type(), + PubKey: pubKey, + } + if err := c.writePacket(Marshal(&msg)); err != nil { + return false, err + } + + return confirmKeyAck(key, c) +} + +func confirmKeyAck(key PublicKey, c packetConn) (bool, error) { + pubKey := key.Marshal() + algoname := key.Type() + + for { + packet, err := c.readPacket() + if err != nil { + return false, err + } + switch packet[0] { + case msgUserAuthBanner: + // TODO(gpaul): add callback to present the banner to the user + case msgUserAuthPubKeyOk: + var msg userAuthPubKeyOkMsg + if err := Unmarshal(packet, &msg); err != nil { + return false, err + } + if msg.Algo != algoname || !bytes.Equal(msg.PubKey, pubKey) { + return false, nil + } + return true, nil + case msgUserAuthFailure: + return false, nil + default: + return false, unexpectedMessageError(msgUserAuthSuccess, packet[0]) + } + } +} + +// PublicKeys returns an AuthMethod that uses the given key +// pairs. +func PublicKeys(signers ...Signer) AuthMethod { + return publicKeyCallback(func() ([]Signer, error) { return signers, nil }) +} + +// PublicKeysCallback returns an AuthMethod that runs the given +// function to obtain a list of key pairs. +func PublicKeysCallback(getSigners func() (signers []Signer, err error)) AuthMethod { + return publicKeyCallback(getSigners) +} + +// handleAuthResponse returns whether the preceding authentication request succeeded +// along with a list of remaining authentication methods to try next and +// an error if an unexpected response was received. +func handleAuthResponse(c packetConn) (bool, []string, error) { + for { + packet, err := c.readPacket() + if err != nil { + return false, nil, err + } + + switch packet[0] { + case msgUserAuthBanner: + // TODO: add callback to present the banner to the user + case msgUserAuthFailure: + var msg userAuthFailureMsg + if err := Unmarshal(packet, &msg); err != nil { + return false, nil, err + } + return false, msg.Methods, nil + case msgUserAuthSuccess: + return true, nil, nil + default: + return false, nil, unexpectedMessageError(msgUserAuthSuccess, packet[0]) + } + } +} + +// KeyboardInteractiveChallenge should print questions, optionally +// disabling echoing (e.g. for passwords), and return all the answers. +// Challenge may be called multiple times in a single session. After +// successful authentication, the server may send a challenge with no +// questions, for which the user and instruction messages should be +// printed. RFC 4256 section 3.3 details how the UI should behave for +// both CLI and GUI environments. +type KeyboardInteractiveChallenge func(user, instruction string, questions []string, echos []bool) (answers []string, err error) + +// KeyboardInteractive returns a AuthMethod using a prompt/response +// sequence controlled by the server. +func KeyboardInteractive(challenge KeyboardInteractiveChallenge) AuthMethod { + return challenge +} + +func (cb KeyboardInteractiveChallenge) method() string { + return "keyboard-interactive" +} + +func (cb KeyboardInteractiveChallenge) auth(session []byte, user string, c packetConn, rand io.Reader) (bool, []string, error) { + type initiateMsg struct { + User string `sshtype:"50"` + Service string + Method string + Language string + Submethods string + } + + if err := c.writePacket(Marshal(&initiateMsg{ + User: user, + Service: serviceSSH, + Method: "keyboard-interactive", + })); err != nil { + return false, nil, err + } + + for { + packet, err := c.readPacket() + if err != nil { + return false, nil, err + } + + // like handleAuthResponse, but with less options. + switch packet[0] { + case msgUserAuthBanner: + // TODO: Print banners during userauth. + continue + case msgUserAuthInfoRequest: + // OK + case msgUserAuthFailure: + var msg userAuthFailureMsg + if err := Unmarshal(packet, &msg); err != nil { + return false, nil, err + } + return false, msg.Methods, nil + case msgUserAuthSuccess: + return true, nil, nil + default: + return false, nil, unexpectedMessageError(msgUserAuthInfoRequest, packet[0]) + } + + var msg userAuthInfoRequestMsg + if err := Unmarshal(packet, &msg); err != nil { + return false, nil, err + } + + // Manually unpack the prompt/echo pairs. + rest := msg.Prompts + var prompts []string + var echos []bool + for i := 0; i < int(msg.NumPrompts); i++ { + prompt, r, ok := parseString(rest) + if !ok || len(r) == 0 { + return false, nil, errors.New("ssh: prompt format error") + } + prompts = append(prompts, string(prompt)) + echos = append(echos, r[0] != 0) + rest = r[1:] + } + + if len(rest) != 0 { + return false, nil, errors.New("ssh: extra data following keyboard-interactive pairs") + } + + answers, err := cb(msg.User, msg.Instruction, prompts, echos) + if err != nil { + return false, nil, err + } + + if len(answers) != len(prompts) { + return false, nil, errors.New("ssh: not enough answers from keyboard-interactive callback") + } + responseLength := 1 + 4 + for _, a := range answers { + responseLength += stringLength(len(a)) + } + serialized := make([]byte, responseLength) + p := serialized + p[0] = msgUserAuthInfoResponse + p = p[1:] + p = marshalUint32(p, uint32(len(answers))) + for _, a := range answers { + p = marshalString(p, []byte(a)) + } + + if err := c.writePacket(serialized); err != nil { + return false, nil, err + } + } +} + +type retryableAuthMethod struct { + authMethod AuthMethod + maxTries int +} + +func (r *retryableAuthMethod) auth(session []byte, user string, c packetConn, rand io.Reader) (ok bool, methods []string, err error) { + for i := 0; r.maxTries <= 0 || i < r.maxTries; i++ { + ok, methods, err = r.authMethod.auth(session, user, c, rand) + if ok || err != nil { // either success or error terminate + return ok, methods, err + } + } + return ok, methods, err +} + +func (r *retryableAuthMethod) method() string { + return r.authMethod.method() +} + +// RetryableAuthMethod is a decorator for other auth methods enabling them to +// be retried up to maxTries before considering that AuthMethod itself failed. +// If maxTries is <= 0, will retry indefinitely +// +// This is useful for interactive clients using challenge/response type +// authentication (e.g. Keyboard-Interactive, Password, etc) where the user +// could mistype their response resulting in the server issuing a +// SSH_MSG_USERAUTH_FAILURE (rfc4252 #8 [password] and rfc4256 #3.4 +// [keyboard-interactive]); Without this decorator, the non-retryable +// AuthMethod would be removed from future consideration, and never tried again +// (and so the user would never be able to retry their entry). +func RetryableAuthMethod(auth AuthMethod, maxTries int) AuthMethod { + return &retryableAuthMethod{authMethod: auth, maxTries: maxTries} +} diff --git a/vendor/golang.org/x/crypto/ssh/common.go b/vendor/golang.org/x/crypto/ssh/common.go new file mode 100644 index 0000000000..8656d0f85d --- /dev/null +++ b/vendor/golang.org/x/crypto/ssh/common.go @@ -0,0 +1,371 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ssh + +import ( + "crypto" + "crypto/rand" + "fmt" + "io" + "sync" + + _ "crypto/sha1" + _ "crypto/sha256" + _ "crypto/sha512" +) + +// These are string constants in the SSH protocol. +const ( + compressionNone = "none" + serviceUserAuth = "ssh-userauth" + serviceSSH = "ssh-connection" +) + +// supportedCiphers specifies the supported ciphers in preference order. +var supportedCiphers = []string{ + "aes128-ctr", "aes192-ctr", "aes256-ctr", + "aes128-gcm@openssh.com", + "arcfour256", "arcfour128", +} + +// supportedKexAlgos specifies the supported key-exchange algorithms in +// preference order. +var supportedKexAlgos = []string{ + kexAlgoCurve25519SHA256, + // P384 and P521 are not constant-time yet, but since we don't + // reuse ephemeral keys, using them for ECDH should be OK. + kexAlgoECDH256, kexAlgoECDH384, kexAlgoECDH521, + kexAlgoDH14SHA1, kexAlgoDH1SHA1, +} + +// supportedKexAlgos specifies the supported host-key algorithms (i.e. methods +// of authenticating servers) in preference order. +var supportedHostKeyAlgos = []string{ + CertAlgoRSAv01, CertAlgoDSAv01, CertAlgoECDSA256v01, + CertAlgoECDSA384v01, CertAlgoECDSA521v01, CertAlgoED25519v01, + + KeyAlgoECDSA256, KeyAlgoECDSA384, KeyAlgoECDSA521, + KeyAlgoRSA, KeyAlgoDSA, + + KeyAlgoED25519, +} + +// supportedMACs specifies a default set of MAC algorithms in preference order. +// This is based on RFC 4253, section 6.4, but with hmac-md5 variants removed +// because they have reached the end of their useful life. +var supportedMACs = []string{ + "hmac-sha2-256-etm@openssh.com", "hmac-sha2-256", "hmac-sha1", "hmac-sha1-96", +} + +var supportedCompressions = []string{compressionNone} + +// hashFuncs keeps the mapping of supported algorithms to their respective +// hashes needed for signature verification. +var hashFuncs = map[string]crypto.Hash{ + KeyAlgoRSA: crypto.SHA1, + KeyAlgoDSA: crypto.SHA1, + KeyAlgoECDSA256: crypto.SHA256, + KeyAlgoECDSA384: crypto.SHA384, + KeyAlgoECDSA521: crypto.SHA512, + CertAlgoRSAv01: crypto.SHA1, + CertAlgoDSAv01: crypto.SHA1, + CertAlgoECDSA256v01: crypto.SHA256, + CertAlgoECDSA384v01: crypto.SHA384, + CertAlgoECDSA521v01: crypto.SHA512, +} + +// unexpectedMessageError results when the SSH message that we received didn't +// match what we wanted. +func unexpectedMessageError(expected, got uint8) error { + return fmt.Errorf("ssh: unexpected message type %d (expected %d)", got, expected) +} + +// parseError results from a malformed SSH message. +func parseError(tag uint8) error { + return fmt.Errorf("ssh: parse error in message type %d", tag) +} + +func findCommon(what string, client []string, server []string) (common string, err error) { + for _, c := range client { + for _, s := range server { + if c == s { + return c, nil + } + } + } + return "", fmt.Errorf("ssh: no common algorithm for %s; client offered: %v, server offered: %v", what, client, server) +} + +type directionAlgorithms struct { + Cipher string + MAC string + Compression string +} + +// rekeyBytes returns a rekeying intervals in bytes. +func (a *directionAlgorithms) rekeyBytes() int64 { + // According to RFC4344 block ciphers should rekey after + // 2^(BLOCKSIZE/4) blocks. For all AES flavors BLOCKSIZE is + // 128. + switch a.Cipher { + case "aes128-ctr", "aes192-ctr", "aes256-ctr", gcmCipherID, aes128cbcID: + return 16 * (1 << 32) + + } + + // For others, stick with RFC4253 recommendation to rekey after 1 Gb of data. + return 1 << 30 +} + +type algorithms struct { + kex string + hostKey string + w directionAlgorithms + r directionAlgorithms +} + +func findAgreedAlgorithms(clientKexInit, serverKexInit *kexInitMsg) (algs *algorithms, err error) { + result := &algorithms{} + + result.kex, err = findCommon("key exchange", clientKexInit.KexAlgos, serverKexInit.KexAlgos) + if err != nil { + return + } + + result.hostKey, err = findCommon("host key", clientKexInit.ServerHostKeyAlgos, serverKexInit.ServerHostKeyAlgos) + if err != nil { + return + } + + result.w.Cipher, err = findCommon("client to server cipher", clientKexInit.CiphersClientServer, serverKexInit.CiphersClientServer) + if err != nil { + return + } + + result.r.Cipher, err = findCommon("server to client cipher", clientKexInit.CiphersServerClient, serverKexInit.CiphersServerClient) + if err != nil { + return + } + + result.w.MAC, err = findCommon("client to server MAC", clientKexInit.MACsClientServer, serverKexInit.MACsClientServer) + if err != nil { + return + } + + result.r.MAC, err = findCommon("server to client MAC", clientKexInit.MACsServerClient, serverKexInit.MACsServerClient) + if err != nil { + return + } + + result.w.Compression, err = findCommon("client to server compression", clientKexInit.CompressionClientServer, serverKexInit.CompressionClientServer) + if err != nil { + return + } + + result.r.Compression, err = findCommon("server to client compression", clientKexInit.CompressionServerClient, serverKexInit.CompressionServerClient) + if err != nil { + return + } + + return result, nil +} + +// If rekeythreshold is too small, we can't make any progress sending +// stuff. +const minRekeyThreshold uint64 = 256 + +// Config contains configuration data common to both ServerConfig and +// ClientConfig. +type Config struct { + // Rand provides the source of entropy for cryptographic + // primitives. If Rand is nil, the cryptographic random reader + // in package crypto/rand will be used. + Rand io.Reader + + // The maximum number of bytes sent or received after which a + // new key is negotiated. It must be at least 256. If + // unspecified, 1 gigabyte is used. + RekeyThreshold uint64 + + // The allowed key exchanges algorithms. If unspecified then a + // default set of algorithms is used. + KeyExchanges []string + + // The allowed cipher algorithms. If unspecified then a sensible + // default is used. + Ciphers []string + + // The allowed MAC algorithms. If unspecified then a sensible default + // is used. + MACs []string +} + +// SetDefaults sets sensible values for unset fields in config. This is +// exported for testing: Configs passed to SSH functions are copied and have +// default values set automatically. +func (c *Config) SetDefaults() { + if c.Rand == nil { + c.Rand = rand.Reader + } + if c.Ciphers == nil { + c.Ciphers = supportedCiphers + } + var ciphers []string + for _, c := range c.Ciphers { + if cipherModes[c] != nil { + // reject the cipher if we have no cipherModes definition + ciphers = append(ciphers, c) + } + } + c.Ciphers = ciphers + + if c.KeyExchanges == nil { + c.KeyExchanges = supportedKexAlgos + } + + if c.MACs == nil { + c.MACs = supportedMACs + } + + if c.RekeyThreshold == 0 { + // RFC 4253, section 9 suggests rekeying after 1G. + c.RekeyThreshold = 1 << 30 + } + if c.RekeyThreshold < minRekeyThreshold { + c.RekeyThreshold = minRekeyThreshold + } +} + +// buildDataSignedForAuth returns the data that is signed in order to prove +// possession of a private key. See RFC 4252, section 7. +func buildDataSignedForAuth(sessionId []byte, req userAuthRequestMsg, algo, pubKey []byte) []byte { + data := struct { + Session []byte + Type byte + User string + Service string + Method string + Sign bool + Algo []byte + PubKey []byte + }{ + sessionId, + msgUserAuthRequest, + req.User, + req.Service, + req.Method, + true, + algo, + pubKey, + } + return Marshal(data) +} + +func appendU16(buf []byte, n uint16) []byte { + return append(buf, byte(n>>8), byte(n)) +} + +func appendU32(buf []byte, n uint32) []byte { + return append(buf, byte(n>>24), byte(n>>16), byte(n>>8), byte(n)) +} + +func appendU64(buf []byte, n uint64) []byte { + return append(buf, + byte(n>>56), byte(n>>48), byte(n>>40), byte(n>>32), + byte(n>>24), byte(n>>16), byte(n>>8), byte(n)) +} + +func appendInt(buf []byte, n int) []byte { + return appendU32(buf, uint32(n)) +} + +func appendString(buf []byte, s string) []byte { + buf = appendU32(buf, uint32(len(s))) + buf = append(buf, s...) + return buf +} + +func appendBool(buf []byte, b bool) []byte { + if b { + return append(buf, 1) + } + return append(buf, 0) +} + +// newCond is a helper to hide the fact that there is no usable zero +// value for sync.Cond. +func newCond() *sync.Cond { return sync.NewCond(new(sync.Mutex)) } + +// window represents the buffer available to clients +// wishing to write to a channel. +type window struct { + *sync.Cond + win uint32 // RFC 4254 5.2 says the window size can grow to 2^32-1 + writeWaiters int + closed bool +} + +// add adds win to the amount of window available +// for consumers. +func (w *window) add(win uint32) bool { + // a zero sized window adjust is a noop. + if win == 0 { + return true + } + w.L.Lock() + if w.win+win < win { + w.L.Unlock() + return false + } + w.win += win + // It is unusual that multiple goroutines would be attempting to reserve + // window space, but not guaranteed. Use broadcast to notify all waiters + // that additional window is available. + w.Broadcast() + w.L.Unlock() + return true +} + +// close sets the window to closed, so all reservations fail +// immediately. +func (w *window) close() { + w.L.Lock() + w.closed = true + w.Broadcast() + w.L.Unlock() +} + +// reserve reserves win from the available window capacity. +// If no capacity remains, reserve will block. reserve may +// return less than requested. +func (w *window) reserve(win uint32) (uint32, error) { + var err error + w.L.Lock() + w.writeWaiters++ + w.Broadcast() + for w.win == 0 && !w.closed { + w.Wait() + } + w.writeWaiters-- + if w.win < win { + win = w.win + } + w.win -= win + if w.closed { + err = io.EOF + } + w.L.Unlock() + return win, err +} + +// waitWriterBlocked waits until some goroutine is blocked for further +// writes. It is used in tests only. +func (w *window) waitWriterBlocked() { + w.Cond.L.Lock() + for w.writeWaiters == 0 { + w.Cond.Wait() + } + w.Cond.L.Unlock() +} diff --git a/vendor/golang.org/x/crypto/ssh/connection.go b/vendor/golang.org/x/crypto/ssh/connection.go new file mode 100644 index 0000000000..e786f2f9a2 --- /dev/null +++ b/vendor/golang.org/x/crypto/ssh/connection.go @@ -0,0 +1,143 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ssh + +import ( + "fmt" + "net" +) + +// OpenChannelError is returned if the other side rejects an +// OpenChannel request. +type OpenChannelError struct { + Reason RejectionReason + Message string +} + +func (e *OpenChannelError) Error() string { + return fmt.Sprintf("ssh: rejected: %s (%s)", e.Reason, e.Message) +} + +// ConnMetadata holds metadata for the connection. +type ConnMetadata interface { + // User returns the user ID for this connection. + User() string + + // SessionID returns the sesson hash, also denoted by H. + SessionID() []byte + + // ClientVersion returns the client's version string as hashed + // into the session ID. + ClientVersion() []byte + + // ServerVersion returns the server's version string as hashed + // into the session ID. + ServerVersion() []byte + + // RemoteAddr returns the remote address for this connection. + RemoteAddr() net.Addr + + // LocalAddr returns the local address for this connection. + LocalAddr() net.Addr +} + +// Conn represents an SSH connection for both server and client roles. +// Conn is the basis for implementing an application layer, such +// as ClientConn, which implements the traditional shell access for +// clients. +type Conn interface { + ConnMetadata + + // SendRequest sends a global request, and returns the + // reply. If wantReply is true, it returns the response status + // and payload. See also RFC4254, section 4. + SendRequest(name string, wantReply bool, payload []byte) (bool, []byte, error) + + // OpenChannel tries to open an channel. If the request is + // rejected, it returns *OpenChannelError. On success it returns + // the SSH Channel and a Go channel for incoming, out-of-band + // requests. The Go channel must be serviced, or the + // connection will hang. + OpenChannel(name string, data []byte) (Channel, <-chan *Request, error) + + // Close closes the underlying network connection + Close() error + + // Wait blocks until the connection has shut down, and returns the + // error causing the shutdown. + Wait() error + + // TODO(hanwen): consider exposing: + // RequestKeyChange + // Disconnect +} + +// DiscardRequests consumes and rejects all requests from the +// passed-in channel. +func DiscardRequests(in <-chan *Request) { + for req := range in { + if req.WantReply { + req.Reply(false, nil) + } + } +} + +// A connection represents an incoming connection. +type connection struct { + transport *handshakeTransport + sshConn + + // The connection protocol. + *mux +} + +func (c *connection) Close() error { + return c.sshConn.conn.Close() +} + +// sshconn provides net.Conn metadata, but disallows direct reads and +// writes. +type sshConn struct { + conn net.Conn + + user string + sessionID []byte + clientVersion []byte + serverVersion []byte +} + +func dup(src []byte) []byte { + dst := make([]byte, len(src)) + copy(dst, src) + return dst +} + +func (c *sshConn) User() string { + return c.user +} + +func (c *sshConn) RemoteAddr() net.Addr { + return c.conn.RemoteAddr() +} + +func (c *sshConn) Close() error { + return c.conn.Close() +} + +func (c *sshConn) LocalAddr() net.Addr { + return c.conn.LocalAddr() +} + +func (c *sshConn) SessionID() []byte { + return dup(c.sessionID) +} + +func (c *sshConn) ClientVersion() []byte { + return dup(c.clientVersion) +} + +func (c *sshConn) ServerVersion() []byte { + return dup(c.serverVersion) +} diff --git a/vendor/golang.org/x/crypto/ssh/doc.go b/vendor/golang.org/x/crypto/ssh/doc.go new file mode 100644 index 0000000000..d6be894662 --- /dev/null +++ b/vendor/golang.org/x/crypto/ssh/doc.go @@ -0,0 +1,18 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +/* +Package ssh implements an SSH client and server. + +SSH is a transport security protocol, an authentication protocol and a +family of application protocols. The most typical application level +protocol is a remote shell and this is specifically implemented. However, +the multiplexed nature of SSH is exposed to users that wish to support +others. + +References: + [PROTOCOL.certkeys]: http://cvsweb.openbsd.org/cgi-bin/cvsweb/src/usr.bin/ssh/PROTOCOL.certkeys?rev=HEAD + [SSH-PARAMETERS]: http://www.iana.org/assignments/ssh-parameters/ssh-parameters.xml#ssh-parameters-1 +*/ +package ssh // import "golang.org/x/crypto/ssh" diff --git a/vendor/golang.org/x/crypto/ssh/handshake.go b/vendor/golang.org/x/crypto/ssh/handshake.go new file mode 100644 index 0000000000..8de650644a --- /dev/null +++ b/vendor/golang.org/x/crypto/ssh/handshake.go @@ -0,0 +1,625 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ssh + +import ( + "crypto/rand" + "errors" + "fmt" + "io" + "log" + "net" + "sync" +) + +// debugHandshake, if set, prints messages sent and received. Key +// exchange messages are printed as if DH were used, so the debug +// messages are wrong when using ECDH. +const debugHandshake = false + +// chanSize sets the amount of buffering SSH connections. This is +// primarily for testing: setting chanSize=0 uncovers deadlocks more +// quickly. +const chanSize = 16 + +// keyingTransport is a packet based transport that supports key +// changes. It need not be thread-safe. It should pass through +// msgNewKeys in both directions. +type keyingTransport interface { + packetConn + + // prepareKeyChange sets up a key change. The key change for a + // direction will be effected if a msgNewKeys message is sent + // or received. + prepareKeyChange(*algorithms, *kexResult) error +} + +// handshakeTransport implements rekeying on top of a keyingTransport +// and offers a thread-safe writePacket() interface. +type handshakeTransport struct { + conn keyingTransport + config *Config + + serverVersion []byte + clientVersion []byte + + // hostKeys is non-empty if we are the server. In that case, + // it contains all host keys that can be used to sign the + // connection. + hostKeys []Signer + + // hostKeyAlgorithms is non-empty if we are the client. In that case, + // we accept these key types from the server as host key. + hostKeyAlgorithms []string + + // On read error, incoming is closed, and readError is set. + incoming chan []byte + readError error + + mu sync.Mutex + writeError error + sentInitPacket []byte + sentInitMsg *kexInitMsg + pendingPackets [][]byte // Used when a key exchange is in progress. + + // If the read loop wants to schedule a kex, it pings this + // channel, and the write loop will send out a kex + // message. + requestKex chan struct{} + + // If the other side requests or confirms a kex, its kexInit + // packet is sent here for the write loop to find it. + startKex chan *pendingKex + + // data for host key checking + hostKeyCallback func(hostname string, remote net.Addr, key PublicKey) error + dialAddress string + remoteAddr net.Addr + + // Algorithms agreed in the last key exchange. + algorithms *algorithms + + readPacketsLeft uint32 + readBytesLeft int64 + + writePacketsLeft uint32 + writeBytesLeft int64 + + // The session ID or nil if first kex did not complete yet. + sessionID []byte +} + +type pendingKex struct { + otherInit []byte + done chan error +} + +func newHandshakeTransport(conn keyingTransport, config *Config, clientVersion, serverVersion []byte) *handshakeTransport { + t := &handshakeTransport{ + conn: conn, + serverVersion: serverVersion, + clientVersion: clientVersion, + incoming: make(chan []byte, chanSize), + requestKex: make(chan struct{}, 1), + startKex: make(chan *pendingKex, 1), + + config: config, + } + + // We always start with a mandatory key exchange. + t.requestKex <- struct{}{} + return t +} + +func newClientTransport(conn keyingTransport, clientVersion, serverVersion []byte, config *ClientConfig, dialAddr string, addr net.Addr) *handshakeTransport { + t := newHandshakeTransport(conn, &config.Config, clientVersion, serverVersion) + t.dialAddress = dialAddr + t.remoteAddr = addr + t.hostKeyCallback = config.HostKeyCallback + if config.HostKeyAlgorithms != nil { + t.hostKeyAlgorithms = config.HostKeyAlgorithms + } else { + t.hostKeyAlgorithms = supportedHostKeyAlgos + } + go t.readLoop() + go t.kexLoop() + return t +} + +func newServerTransport(conn keyingTransport, clientVersion, serverVersion []byte, config *ServerConfig) *handshakeTransport { + t := newHandshakeTransport(conn, &config.Config, clientVersion, serverVersion) + t.hostKeys = config.hostKeys + go t.readLoop() + go t.kexLoop() + return t +} + +func (t *handshakeTransport) getSessionID() []byte { + return t.sessionID +} + +// waitSession waits for the session to be established. This should be +// the first thing to call after instantiating handshakeTransport. +func (t *handshakeTransport) waitSession() error { + p, err := t.readPacket() + if err != nil { + return err + } + if p[0] != msgNewKeys { + return fmt.Errorf("ssh: first packet should be msgNewKeys") + } + + return nil +} + +func (t *handshakeTransport) id() string { + if len(t.hostKeys) > 0 { + return "server" + } + return "client" +} + +func (t *handshakeTransport) printPacket(p []byte, write bool) { + action := "got" + if write { + action = "sent" + } + + if p[0] == msgChannelData || p[0] == msgChannelExtendedData { + log.Printf("%s %s data (packet %d bytes)", t.id(), action, len(p)) + } else { + msg, err := decode(p) + log.Printf("%s %s %T %v (%v)", t.id(), action, msg, msg, err) + } +} + +func (t *handshakeTransport) readPacket() ([]byte, error) { + p, ok := <-t.incoming + if !ok { + return nil, t.readError + } + return p, nil +} + +func (t *handshakeTransport) readLoop() { + first := true + for { + p, err := t.readOnePacket(first) + first = false + if err != nil { + t.readError = err + close(t.incoming) + break + } + if p[0] == msgIgnore || p[0] == msgDebug { + continue + } + t.incoming <- p + } + + // Stop writers too. + t.recordWriteError(t.readError) + + // Unblock the writer should it wait for this. + close(t.startKex) + + // Don't close t.requestKex; it's also written to from writePacket. +} + +func (t *handshakeTransport) pushPacket(p []byte) error { + if debugHandshake { + t.printPacket(p, true) + } + return t.conn.writePacket(p) +} + +func (t *handshakeTransport) getWriteError() error { + t.mu.Lock() + defer t.mu.Unlock() + return t.writeError +} + +func (t *handshakeTransport) recordWriteError(err error) { + t.mu.Lock() + defer t.mu.Unlock() + if t.writeError == nil && err != nil { + t.writeError = err + } +} + +func (t *handshakeTransport) requestKeyExchange() { + select { + case t.requestKex <- struct{}{}: + default: + // something already requested a kex, so do nothing. + } +} + +func (t *handshakeTransport) kexLoop() { + +write: + for t.getWriteError() == nil { + var request *pendingKex + var sent bool + + for request == nil || !sent { + var ok bool + select { + case request, ok = <-t.startKex: + if !ok { + break write + } + case <-t.requestKex: + break + } + + if !sent { + if err := t.sendKexInit(); err != nil { + t.recordWriteError(err) + break + } + sent = true + } + } + + if err := t.getWriteError(); err != nil { + if request != nil { + request.done <- err + } + break + } + + // We're not servicing t.requestKex, but that is OK: + // we never block on sending to t.requestKex. + + // We're not servicing t.startKex, but the remote end + // has just sent us a kexInitMsg, so it can't send + // another key change request, until we close the done + // channel on the pendingKex request. + + err := t.enterKeyExchange(request.otherInit) + + t.mu.Lock() + t.writeError = err + t.sentInitPacket = nil + t.sentInitMsg = nil + t.writePacketsLeft = packetRekeyThreshold + if t.config.RekeyThreshold > 0 { + t.writeBytesLeft = int64(t.config.RekeyThreshold) + } else if t.algorithms != nil { + t.writeBytesLeft = t.algorithms.w.rekeyBytes() + } + + // we have completed the key exchange. Since the + // reader is still blocked, it is safe to clear out + // the requestKex channel. This avoids the situation + // where: 1) we consumed our own request for the + // initial kex, and 2) the kex from the remote side + // caused another send on the requestKex channel, + clear: + for { + select { + case <-t.requestKex: + // + default: + break clear + } + } + + request.done <- t.writeError + + // kex finished. Push packets that we received while + // the kex was in progress. Don't look at t.startKex + // and don't increment writtenSinceKex: if we trigger + // another kex while we are still busy with the last + // one, things will become very confusing. + for _, p := range t.pendingPackets { + t.writeError = t.pushPacket(p) + if t.writeError != nil { + break + } + } + t.pendingPackets = t.pendingPackets[:0] + t.mu.Unlock() + } + + // drain startKex channel. We don't service t.requestKex + // because nobody does blocking sends there. + go func() { + for init := range t.startKex { + init.done <- t.writeError + } + }() + + // Unblock reader. + t.conn.Close() +} + +// The protocol uses uint32 for packet counters, so we can't let them +// reach 1<<32. We will actually read and write more packets than +// this, though: the other side may send more packets, and after we +// hit this limit on writing we will send a few more packets for the +// key exchange itself. +const packetRekeyThreshold = (1 << 31) + +func (t *handshakeTransport) readOnePacket(first bool) ([]byte, error) { + p, err := t.conn.readPacket() + if err != nil { + return nil, err + } + + if t.readPacketsLeft > 0 { + t.readPacketsLeft-- + } else { + t.requestKeyExchange() + } + + if t.readBytesLeft > 0 { + t.readBytesLeft -= int64(len(p)) + } else { + t.requestKeyExchange() + } + + if debugHandshake { + t.printPacket(p, false) + } + + if first && p[0] != msgKexInit { + return nil, fmt.Errorf("ssh: first packet should be msgKexInit") + } + + if p[0] != msgKexInit { + return p, nil + } + + firstKex := t.sessionID == nil + + kex := pendingKex{ + done: make(chan error, 1), + otherInit: p, + } + t.startKex <- &kex + err = <-kex.done + + if debugHandshake { + log.Printf("%s exited key exchange (first %v), err %v", t.id(), firstKex, err) + } + + if err != nil { + return nil, err + } + + t.readPacketsLeft = packetRekeyThreshold + if t.config.RekeyThreshold > 0 { + t.readBytesLeft = int64(t.config.RekeyThreshold) + } else { + t.readBytesLeft = t.algorithms.r.rekeyBytes() + } + + // By default, a key exchange is hidden from higher layers by + // translating it into msgIgnore. + successPacket := []byte{msgIgnore} + if firstKex { + // sendKexInit() for the first kex waits for + // msgNewKeys so the authentication process is + // guaranteed to happen over an encrypted transport. + successPacket = []byte{msgNewKeys} + } + + return successPacket, nil +} + +// sendKexInit sends a key change message. +func (t *handshakeTransport) sendKexInit() error { + t.mu.Lock() + defer t.mu.Unlock() + if t.sentInitMsg != nil { + // kexInits may be sent either in response to the other side, + // or because our side wants to initiate a key change, so we + // may have already sent a kexInit. In that case, don't send a + // second kexInit. + return nil + } + + msg := &kexInitMsg{ + KexAlgos: t.config.KeyExchanges, + CiphersClientServer: t.config.Ciphers, + CiphersServerClient: t.config.Ciphers, + MACsClientServer: t.config.MACs, + MACsServerClient: t.config.MACs, + CompressionClientServer: supportedCompressions, + CompressionServerClient: supportedCompressions, + } + io.ReadFull(rand.Reader, msg.Cookie[:]) + + if len(t.hostKeys) > 0 { + for _, k := range t.hostKeys { + msg.ServerHostKeyAlgos = append( + msg.ServerHostKeyAlgos, k.PublicKey().Type()) + } + } else { + msg.ServerHostKeyAlgos = t.hostKeyAlgorithms + } + packet := Marshal(msg) + + // writePacket destroys the contents, so save a copy. + packetCopy := make([]byte, len(packet)) + copy(packetCopy, packet) + + if err := t.pushPacket(packetCopy); err != nil { + return err + } + + t.sentInitMsg = msg + t.sentInitPacket = packet + + return nil +} + +func (t *handshakeTransport) writePacket(p []byte) error { + switch p[0] { + case msgKexInit: + return errors.New("ssh: only handshakeTransport can send kexInit") + case msgNewKeys: + return errors.New("ssh: only handshakeTransport can send newKeys") + } + + t.mu.Lock() + defer t.mu.Unlock() + if t.writeError != nil { + return t.writeError + } + + if t.sentInitMsg != nil { + // Copy the packet so the writer can reuse the buffer. + cp := make([]byte, len(p)) + copy(cp, p) + t.pendingPackets = append(t.pendingPackets, cp) + return nil + } + + if t.writeBytesLeft > 0 { + t.writeBytesLeft -= int64(len(p)) + } else { + t.requestKeyExchange() + } + + if t.writePacketsLeft > 0 { + t.writePacketsLeft-- + } else { + t.requestKeyExchange() + } + + if err := t.pushPacket(p); err != nil { + t.writeError = err + } + + return nil +} + +func (t *handshakeTransport) Close() error { + return t.conn.Close() +} + +func (t *handshakeTransport) enterKeyExchange(otherInitPacket []byte) error { + if debugHandshake { + log.Printf("%s entered key exchange", t.id()) + } + + otherInit := &kexInitMsg{} + if err := Unmarshal(otherInitPacket, otherInit); err != nil { + return err + } + + magics := handshakeMagics{ + clientVersion: t.clientVersion, + serverVersion: t.serverVersion, + clientKexInit: otherInitPacket, + serverKexInit: t.sentInitPacket, + } + + clientInit := otherInit + serverInit := t.sentInitMsg + if len(t.hostKeys) == 0 { + clientInit, serverInit = serverInit, clientInit + + magics.clientKexInit = t.sentInitPacket + magics.serverKexInit = otherInitPacket + } + + var err error + t.algorithms, err = findAgreedAlgorithms(clientInit, serverInit) + if err != nil { + return err + } + + // We don't send FirstKexFollows, but we handle receiving it. + // + // RFC 4253 section 7 defines the kex and the agreement method for + // first_kex_packet_follows. It states that the guessed packet + // should be ignored if the "kex algorithm and/or the host + // key algorithm is guessed wrong (server and client have + // different preferred algorithm), or if any of the other + // algorithms cannot be agreed upon". The other algorithms have + // already been checked above so the kex algorithm and host key + // algorithm are checked here. + if otherInit.FirstKexFollows && (clientInit.KexAlgos[0] != serverInit.KexAlgos[0] || clientInit.ServerHostKeyAlgos[0] != serverInit.ServerHostKeyAlgos[0]) { + // other side sent a kex message for the wrong algorithm, + // which we have to ignore. + if _, err := t.conn.readPacket(); err != nil { + return err + } + } + + kex, ok := kexAlgoMap[t.algorithms.kex] + if !ok { + return fmt.Errorf("ssh: unexpected key exchange algorithm %v", t.algorithms.kex) + } + + var result *kexResult + if len(t.hostKeys) > 0 { + result, err = t.server(kex, t.algorithms, &magics) + } else { + result, err = t.client(kex, t.algorithms, &magics) + } + + if err != nil { + return err + } + + if t.sessionID == nil { + t.sessionID = result.H + } + result.SessionID = t.sessionID + + t.conn.prepareKeyChange(t.algorithms, result) + if err = t.conn.writePacket([]byte{msgNewKeys}); err != nil { + return err + } + if packet, err := t.conn.readPacket(); err != nil { + return err + } else if packet[0] != msgNewKeys { + return unexpectedMessageError(msgNewKeys, packet[0]) + } + + return nil +} + +func (t *handshakeTransport) server(kex kexAlgorithm, algs *algorithms, magics *handshakeMagics) (*kexResult, error) { + var hostKey Signer + for _, k := range t.hostKeys { + if algs.hostKey == k.PublicKey().Type() { + hostKey = k + } + } + + r, err := kex.Server(t.conn, t.config.Rand, magics, hostKey) + return r, err +} + +func (t *handshakeTransport) client(kex kexAlgorithm, algs *algorithms, magics *handshakeMagics) (*kexResult, error) { + result, err := kex.Client(t.conn, t.config.Rand, magics) + if err != nil { + return nil, err + } + + hostKey, err := ParsePublicKey(result.HostKey) + if err != nil { + return nil, err + } + + if err := verifyHostKeySignature(hostKey, result); err != nil { + return nil, err + } + + if t.hostKeyCallback != nil { + err = t.hostKeyCallback(t.dialAddress, t.remoteAddr, hostKey) + if err != nil { + return nil, err + } + } + + return result, nil +} diff --git a/vendor/golang.org/x/crypto/ssh/kex.go b/vendor/golang.org/x/crypto/ssh/kex.go new file mode 100644 index 0000000000..c87fbebfde --- /dev/null +++ b/vendor/golang.org/x/crypto/ssh/kex.go @@ -0,0 +1,540 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ssh + +import ( + "crypto" + "crypto/ecdsa" + "crypto/elliptic" + "crypto/rand" + "crypto/subtle" + "errors" + "io" + "math/big" + + "golang.org/x/crypto/curve25519" +) + +const ( + kexAlgoDH1SHA1 = "diffie-hellman-group1-sha1" + kexAlgoDH14SHA1 = "diffie-hellman-group14-sha1" + kexAlgoECDH256 = "ecdh-sha2-nistp256" + kexAlgoECDH384 = "ecdh-sha2-nistp384" + kexAlgoECDH521 = "ecdh-sha2-nistp521" + kexAlgoCurve25519SHA256 = "curve25519-sha256@libssh.org" +) + +// kexResult captures the outcome of a key exchange. +type kexResult struct { + // Session hash. See also RFC 4253, section 8. + H []byte + + // Shared secret. See also RFC 4253, section 8. + K []byte + + // Host key as hashed into H. + HostKey []byte + + // Signature of H. + Signature []byte + + // A cryptographic hash function that matches the security + // level of the key exchange algorithm. It is used for + // calculating H, and for deriving keys from H and K. + Hash crypto.Hash + + // The session ID, which is the first H computed. This is used + // to derive key material inside the transport. + SessionID []byte +} + +// handshakeMagics contains data that is always included in the +// session hash. +type handshakeMagics struct { + clientVersion, serverVersion []byte + clientKexInit, serverKexInit []byte +} + +func (m *handshakeMagics) write(w io.Writer) { + writeString(w, m.clientVersion) + writeString(w, m.serverVersion) + writeString(w, m.clientKexInit) + writeString(w, m.serverKexInit) +} + +// kexAlgorithm abstracts different key exchange algorithms. +type kexAlgorithm interface { + // Server runs server-side key agreement, signing the result + // with a hostkey. + Server(p packetConn, rand io.Reader, magics *handshakeMagics, s Signer) (*kexResult, error) + + // Client runs the client-side key agreement. Caller is + // responsible for verifying the host key signature. + Client(p packetConn, rand io.Reader, magics *handshakeMagics) (*kexResult, error) +} + +// dhGroup is a multiplicative group suitable for implementing Diffie-Hellman key agreement. +type dhGroup struct { + g, p, pMinus1 *big.Int +} + +func (group *dhGroup) diffieHellman(theirPublic, myPrivate *big.Int) (*big.Int, error) { + if theirPublic.Cmp(bigOne) <= 0 || theirPublic.Cmp(group.pMinus1) >= 0 { + return nil, errors.New("ssh: DH parameter out of bounds") + } + return new(big.Int).Exp(theirPublic, myPrivate, group.p), nil +} + +func (group *dhGroup) Client(c packetConn, randSource io.Reader, magics *handshakeMagics) (*kexResult, error) { + hashFunc := crypto.SHA1 + + var x *big.Int + for { + var err error + if x, err = rand.Int(randSource, group.pMinus1); err != nil { + return nil, err + } + if x.Sign() > 0 { + break + } + } + + X := new(big.Int).Exp(group.g, x, group.p) + kexDHInit := kexDHInitMsg{ + X: X, + } + if err := c.writePacket(Marshal(&kexDHInit)); err != nil { + return nil, err + } + + packet, err := c.readPacket() + if err != nil { + return nil, err + } + + var kexDHReply kexDHReplyMsg + if err = Unmarshal(packet, &kexDHReply); err != nil { + return nil, err + } + + kInt, err := group.diffieHellman(kexDHReply.Y, x) + if err != nil { + return nil, err + } + + h := hashFunc.New() + magics.write(h) + writeString(h, kexDHReply.HostKey) + writeInt(h, X) + writeInt(h, kexDHReply.Y) + K := make([]byte, intLength(kInt)) + marshalInt(K, kInt) + h.Write(K) + + return &kexResult{ + H: h.Sum(nil), + K: K, + HostKey: kexDHReply.HostKey, + Signature: kexDHReply.Signature, + Hash: crypto.SHA1, + }, nil +} + +func (group *dhGroup) Server(c packetConn, randSource io.Reader, magics *handshakeMagics, priv Signer) (result *kexResult, err error) { + hashFunc := crypto.SHA1 + packet, err := c.readPacket() + if err != nil { + return + } + var kexDHInit kexDHInitMsg + if err = Unmarshal(packet, &kexDHInit); err != nil { + return + } + + var y *big.Int + for { + if y, err = rand.Int(randSource, group.pMinus1); err != nil { + return + } + if y.Sign() > 0 { + break + } + } + + Y := new(big.Int).Exp(group.g, y, group.p) + kInt, err := group.diffieHellman(kexDHInit.X, y) + if err != nil { + return nil, err + } + + hostKeyBytes := priv.PublicKey().Marshal() + + h := hashFunc.New() + magics.write(h) + writeString(h, hostKeyBytes) + writeInt(h, kexDHInit.X) + writeInt(h, Y) + + K := make([]byte, intLength(kInt)) + marshalInt(K, kInt) + h.Write(K) + + H := h.Sum(nil) + + // H is already a hash, but the hostkey signing will apply its + // own key-specific hash algorithm. + sig, err := signAndMarshal(priv, randSource, H) + if err != nil { + return nil, err + } + + kexDHReply := kexDHReplyMsg{ + HostKey: hostKeyBytes, + Y: Y, + Signature: sig, + } + packet = Marshal(&kexDHReply) + + err = c.writePacket(packet) + return &kexResult{ + H: H, + K: K, + HostKey: hostKeyBytes, + Signature: sig, + Hash: crypto.SHA1, + }, nil +} + +// ecdh performs Elliptic Curve Diffie-Hellman key exchange as +// described in RFC 5656, section 4. +type ecdh struct { + curve elliptic.Curve +} + +func (kex *ecdh) Client(c packetConn, rand io.Reader, magics *handshakeMagics) (*kexResult, error) { + ephKey, err := ecdsa.GenerateKey(kex.curve, rand) + if err != nil { + return nil, err + } + + kexInit := kexECDHInitMsg{ + ClientPubKey: elliptic.Marshal(kex.curve, ephKey.PublicKey.X, ephKey.PublicKey.Y), + } + + serialized := Marshal(&kexInit) + if err := c.writePacket(serialized); err != nil { + return nil, err + } + + packet, err := c.readPacket() + if err != nil { + return nil, err + } + + var reply kexECDHReplyMsg + if err = Unmarshal(packet, &reply); err != nil { + return nil, err + } + + x, y, err := unmarshalECKey(kex.curve, reply.EphemeralPubKey) + if err != nil { + return nil, err + } + + // generate shared secret + secret, _ := kex.curve.ScalarMult(x, y, ephKey.D.Bytes()) + + h := ecHash(kex.curve).New() + magics.write(h) + writeString(h, reply.HostKey) + writeString(h, kexInit.ClientPubKey) + writeString(h, reply.EphemeralPubKey) + K := make([]byte, intLength(secret)) + marshalInt(K, secret) + h.Write(K) + + return &kexResult{ + H: h.Sum(nil), + K: K, + HostKey: reply.HostKey, + Signature: reply.Signature, + Hash: ecHash(kex.curve), + }, nil +} + +// unmarshalECKey parses and checks an EC key. +func unmarshalECKey(curve elliptic.Curve, pubkey []byte) (x, y *big.Int, err error) { + x, y = elliptic.Unmarshal(curve, pubkey) + if x == nil { + return nil, nil, errors.New("ssh: elliptic.Unmarshal failure") + } + if !validateECPublicKey(curve, x, y) { + return nil, nil, errors.New("ssh: public key not on curve") + } + return x, y, nil +} + +// validateECPublicKey checks that the point is a valid public key for +// the given curve. See [SEC1], 3.2.2 +func validateECPublicKey(curve elliptic.Curve, x, y *big.Int) bool { + if x.Sign() == 0 && y.Sign() == 0 { + return false + } + + if x.Cmp(curve.Params().P) >= 0 { + return false + } + + if y.Cmp(curve.Params().P) >= 0 { + return false + } + + if !curve.IsOnCurve(x, y) { + return false + } + + // We don't check if N * PubKey == 0, since + // + // - the NIST curves have cofactor = 1, so this is implicit. + // (We don't foresee an implementation that supports non NIST + // curves) + // + // - for ephemeral keys, we don't need to worry about small + // subgroup attacks. + return true +} + +func (kex *ecdh) Server(c packetConn, rand io.Reader, magics *handshakeMagics, priv Signer) (result *kexResult, err error) { + packet, err := c.readPacket() + if err != nil { + return nil, err + } + + var kexECDHInit kexECDHInitMsg + if err = Unmarshal(packet, &kexECDHInit); err != nil { + return nil, err + } + + clientX, clientY, err := unmarshalECKey(kex.curve, kexECDHInit.ClientPubKey) + if err != nil { + return nil, err + } + + // We could cache this key across multiple users/multiple + // connection attempts, but the benefit is small. OpenSSH + // generates a new key for each incoming connection. + ephKey, err := ecdsa.GenerateKey(kex.curve, rand) + if err != nil { + return nil, err + } + + hostKeyBytes := priv.PublicKey().Marshal() + + serializedEphKey := elliptic.Marshal(kex.curve, ephKey.PublicKey.X, ephKey.PublicKey.Y) + + // generate shared secret + secret, _ := kex.curve.ScalarMult(clientX, clientY, ephKey.D.Bytes()) + + h := ecHash(kex.curve).New() + magics.write(h) + writeString(h, hostKeyBytes) + writeString(h, kexECDHInit.ClientPubKey) + writeString(h, serializedEphKey) + + K := make([]byte, intLength(secret)) + marshalInt(K, secret) + h.Write(K) + + H := h.Sum(nil) + + // H is already a hash, but the hostkey signing will apply its + // own key-specific hash algorithm. + sig, err := signAndMarshal(priv, rand, H) + if err != nil { + return nil, err + } + + reply := kexECDHReplyMsg{ + EphemeralPubKey: serializedEphKey, + HostKey: hostKeyBytes, + Signature: sig, + } + + serialized := Marshal(&reply) + if err := c.writePacket(serialized); err != nil { + return nil, err + } + + return &kexResult{ + H: H, + K: K, + HostKey: reply.HostKey, + Signature: sig, + Hash: ecHash(kex.curve), + }, nil +} + +var kexAlgoMap = map[string]kexAlgorithm{} + +func init() { + // This is the group called diffie-hellman-group1-sha1 in RFC + // 4253 and Oakley Group 2 in RFC 2409. + p, _ := new(big.Int).SetString("FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E088A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7EDEE386BFB5A899FA5AE9F24117C4B1FE649286651ECE65381FFFFFFFFFFFFFFFF", 16) + kexAlgoMap[kexAlgoDH1SHA1] = &dhGroup{ + g: new(big.Int).SetInt64(2), + p: p, + pMinus1: new(big.Int).Sub(p, bigOne), + } + + // This is the group called diffie-hellman-group14-sha1 in RFC + // 4253 and Oakley Group 14 in RFC 3526. + p, _ = new(big.Int).SetString("FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E088A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7EDEE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3DC2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F83655D23DCA3AD961C62F356208552BB9ED529077096966D670C354E4ABC9804F1746C08CA18217C32905E462E36CE3BE39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9DE2BCBF6955817183995497CEA956AE515D2261898FA051015728E5A8AACAA68FFFFFFFFFFFFFFFF", 16) + + kexAlgoMap[kexAlgoDH14SHA1] = &dhGroup{ + g: new(big.Int).SetInt64(2), + p: p, + pMinus1: new(big.Int).Sub(p, bigOne), + } + + kexAlgoMap[kexAlgoECDH521] = &ecdh{elliptic.P521()} + kexAlgoMap[kexAlgoECDH384] = &ecdh{elliptic.P384()} + kexAlgoMap[kexAlgoECDH256] = &ecdh{elliptic.P256()} + kexAlgoMap[kexAlgoCurve25519SHA256] = &curve25519sha256{} +} + +// curve25519sha256 implements the curve25519-sha256@libssh.org key +// agreement protocol, as described in +// https://git.libssh.org/projects/libssh.git/tree/doc/curve25519-sha256@libssh.org.txt +type curve25519sha256 struct{} + +type curve25519KeyPair struct { + priv [32]byte + pub [32]byte +} + +func (kp *curve25519KeyPair) generate(rand io.Reader) error { + if _, err := io.ReadFull(rand, kp.priv[:]); err != nil { + return err + } + curve25519.ScalarBaseMult(&kp.pub, &kp.priv) + return nil +} + +// curve25519Zeros is just an array of 32 zero bytes so that we have something +// convenient to compare against in order to reject curve25519 points with the +// wrong order. +var curve25519Zeros [32]byte + +func (kex *curve25519sha256) Client(c packetConn, rand io.Reader, magics *handshakeMagics) (*kexResult, error) { + var kp curve25519KeyPair + if err := kp.generate(rand); err != nil { + return nil, err + } + if err := c.writePacket(Marshal(&kexECDHInitMsg{kp.pub[:]})); err != nil { + return nil, err + } + + packet, err := c.readPacket() + if err != nil { + return nil, err + } + + var reply kexECDHReplyMsg + if err = Unmarshal(packet, &reply); err != nil { + return nil, err + } + if len(reply.EphemeralPubKey) != 32 { + return nil, errors.New("ssh: peer's curve25519 public value has wrong length") + } + + var servPub, secret [32]byte + copy(servPub[:], reply.EphemeralPubKey) + curve25519.ScalarMult(&secret, &kp.priv, &servPub) + if subtle.ConstantTimeCompare(secret[:], curve25519Zeros[:]) == 1 { + return nil, errors.New("ssh: peer's curve25519 public value has wrong order") + } + + h := crypto.SHA256.New() + magics.write(h) + writeString(h, reply.HostKey) + writeString(h, kp.pub[:]) + writeString(h, reply.EphemeralPubKey) + + kInt := new(big.Int).SetBytes(secret[:]) + K := make([]byte, intLength(kInt)) + marshalInt(K, kInt) + h.Write(K) + + return &kexResult{ + H: h.Sum(nil), + K: K, + HostKey: reply.HostKey, + Signature: reply.Signature, + Hash: crypto.SHA256, + }, nil +} + +func (kex *curve25519sha256) Server(c packetConn, rand io.Reader, magics *handshakeMagics, priv Signer) (result *kexResult, err error) { + packet, err := c.readPacket() + if err != nil { + return + } + var kexInit kexECDHInitMsg + if err = Unmarshal(packet, &kexInit); err != nil { + return + } + + if len(kexInit.ClientPubKey) != 32 { + return nil, errors.New("ssh: peer's curve25519 public value has wrong length") + } + + var kp curve25519KeyPair + if err := kp.generate(rand); err != nil { + return nil, err + } + + var clientPub, secret [32]byte + copy(clientPub[:], kexInit.ClientPubKey) + curve25519.ScalarMult(&secret, &kp.priv, &clientPub) + if subtle.ConstantTimeCompare(secret[:], curve25519Zeros[:]) == 1 { + return nil, errors.New("ssh: peer's curve25519 public value has wrong order") + } + + hostKeyBytes := priv.PublicKey().Marshal() + + h := crypto.SHA256.New() + magics.write(h) + writeString(h, hostKeyBytes) + writeString(h, kexInit.ClientPubKey) + writeString(h, kp.pub[:]) + + kInt := new(big.Int).SetBytes(secret[:]) + K := make([]byte, intLength(kInt)) + marshalInt(K, kInt) + h.Write(K) + + H := h.Sum(nil) + + sig, err := signAndMarshal(priv, rand, H) + if err != nil { + return nil, err + } + + reply := kexECDHReplyMsg{ + EphemeralPubKey: kp.pub[:], + HostKey: hostKeyBytes, + Signature: sig, + } + if err := c.writePacket(Marshal(&reply)); err != nil { + return nil, err + } + return &kexResult{ + H: H, + K: K, + HostKey: hostKeyBytes, + Signature: sig, + Hash: crypto.SHA256, + }, nil +} diff --git a/vendor/golang.org/x/crypto/ssh/keys.go b/vendor/golang.org/x/crypto/ssh/keys.go new file mode 100644 index 0000000000..f38de9898c --- /dev/null +++ b/vendor/golang.org/x/crypto/ssh/keys.go @@ -0,0 +1,905 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ssh + +import ( + "bytes" + "crypto" + "crypto/dsa" + "crypto/ecdsa" + "crypto/elliptic" + "crypto/md5" + "crypto/rsa" + "crypto/sha256" + "crypto/x509" + "encoding/asn1" + "encoding/base64" + "encoding/hex" + "encoding/pem" + "errors" + "fmt" + "io" + "math/big" + "strings" + + "golang.org/x/crypto/ed25519" +) + +// These constants represent the algorithm names for key types supported by this +// package. +const ( + KeyAlgoRSA = "ssh-rsa" + KeyAlgoDSA = "ssh-dss" + KeyAlgoECDSA256 = "ecdsa-sha2-nistp256" + KeyAlgoECDSA384 = "ecdsa-sha2-nistp384" + KeyAlgoECDSA521 = "ecdsa-sha2-nistp521" + KeyAlgoED25519 = "ssh-ed25519" +) + +// parsePubKey parses a public key of the given algorithm. +// Use ParsePublicKey for keys with prepended algorithm. +func parsePubKey(in []byte, algo string) (pubKey PublicKey, rest []byte, err error) { + switch algo { + case KeyAlgoRSA: + return parseRSA(in) + case KeyAlgoDSA: + return parseDSA(in) + case KeyAlgoECDSA256, KeyAlgoECDSA384, KeyAlgoECDSA521: + return parseECDSA(in) + case KeyAlgoED25519: + return parseED25519(in) + case CertAlgoRSAv01, CertAlgoDSAv01, CertAlgoECDSA256v01, CertAlgoECDSA384v01, CertAlgoECDSA521v01, CertAlgoED25519v01: + cert, err := parseCert(in, certToPrivAlgo(algo)) + if err != nil { + return nil, nil, err + } + return cert, nil, nil + } + return nil, nil, fmt.Errorf("ssh: unknown key algorithm: %v", algo) +} + +// parseAuthorizedKey parses a public key in OpenSSH authorized_keys format +// (see sshd(8) manual page) once the options and key type fields have been +// removed. +func parseAuthorizedKey(in []byte) (out PublicKey, comment string, err error) { + in = bytes.TrimSpace(in) + + i := bytes.IndexAny(in, " \t") + if i == -1 { + i = len(in) + } + base64Key := in[:i] + + key := make([]byte, base64.StdEncoding.DecodedLen(len(base64Key))) + n, err := base64.StdEncoding.Decode(key, base64Key) + if err != nil { + return nil, "", err + } + key = key[:n] + out, err = ParsePublicKey(key) + if err != nil { + return nil, "", err + } + comment = string(bytes.TrimSpace(in[i:])) + return out, comment, nil +} + +// ParseKnownHosts parses an entry in the format of the known_hosts file. +// +// The known_hosts format is documented in the sshd(8) manual page. This +// function will parse a single entry from in. On successful return, marker +// will contain the optional marker value (i.e. "cert-authority" or "revoked") +// or else be empty, hosts will contain the hosts that this entry matches, +// pubKey will contain the public key and comment will contain any trailing +// comment at the end of the line. See the sshd(8) manual page for the various +// forms that a host string can take. +// +// The unparsed remainder of the input will be returned in rest. This function +// can be called repeatedly to parse multiple entries. +// +// If no entries were found in the input then err will be io.EOF. Otherwise a +// non-nil err value indicates a parse error. +func ParseKnownHosts(in []byte) (marker string, hosts []string, pubKey PublicKey, comment string, rest []byte, err error) { + for len(in) > 0 { + end := bytes.IndexByte(in, '\n') + if end != -1 { + rest = in[end+1:] + in = in[:end] + } else { + rest = nil + } + + end = bytes.IndexByte(in, '\r') + if end != -1 { + in = in[:end] + } + + in = bytes.TrimSpace(in) + if len(in) == 0 || in[0] == '#' { + in = rest + continue + } + + i := bytes.IndexAny(in, " \t") + if i == -1 { + in = rest + continue + } + + // Strip out the beginning of the known_host key. + // This is either an optional marker or a (set of) hostname(s). + keyFields := bytes.Fields(in) + if len(keyFields) < 3 || len(keyFields) > 5 { + return "", nil, nil, "", nil, errors.New("ssh: invalid entry in known_hosts data") + } + + // keyFields[0] is either "@cert-authority", "@revoked" or a comma separated + // list of hosts + marker := "" + if keyFields[0][0] == '@' { + marker = string(keyFields[0][1:]) + keyFields = keyFields[1:] + } + + hosts := string(keyFields[0]) + // keyFields[1] contains the key type (e.g. “ssh-rsa”). + // However, that information is duplicated inside the + // base64-encoded key and so is ignored here. + + key := bytes.Join(keyFields[2:], []byte(" ")) + if pubKey, comment, err = parseAuthorizedKey(key); err != nil { + return "", nil, nil, "", nil, err + } + + return marker, strings.Split(hosts, ","), pubKey, comment, rest, nil + } + + return "", nil, nil, "", nil, io.EOF +} + +// ParseAuthorizedKeys parses a public key from an authorized_keys +// file used in OpenSSH according to the sshd(8) manual page. +func ParseAuthorizedKey(in []byte) (out PublicKey, comment string, options []string, rest []byte, err error) { + for len(in) > 0 { + end := bytes.IndexByte(in, '\n') + if end != -1 { + rest = in[end+1:] + in = in[:end] + } else { + rest = nil + } + + end = bytes.IndexByte(in, '\r') + if end != -1 { + in = in[:end] + } + + in = bytes.TrimSpace(in) + if len(in) == 0 || in[0] == '#' { + in = rest + continue + } + + i := bytes.IndexAny(in, " \t") + if i == -1 { + in = rest + continue + } + + if out, comment, err = parseAuthorizedKey(in[i:]); err == nil { + return out, comment, options, rest, nil + } + + // No key type recognised. Maybe there's an options field at + // the beginning. + var b byte + inQuote := false + var candidateOptions []string + optionStart := 0 + for i, b = range in { + isEnd := !inQuote && (b == ' ' || b == '\t') + if (b == ',' && !inQuote) || isEnd { + if i-optionStart > 0 { + candidateOptions = append(candidateOptions, string(in[optionStart:i])) + } + optionStart = i + 1 + } + if isEnd { + break + } + if b == '"' && (i == 0 || (i > 0 && in[i-1] != '\\')) { + inQuote = !inQuote + } + } + for i < len(in) && (in[i] == ' ' || in[i] == '\t') { + i++ + } + if i == len(in) { + // Invalid line: unmatched quote + in = rest + continue + } + + in = in[i:] + i = bytes.IndexAny(in, " \t") + if i == -1 { + in = rest + continue + } + + if out, comment, err = parseAuthorizedKey(in[i:]); err == nil { + options = candidateOptions + return out, comment, options, rest, nil + } + + in = rest + continue + } + + return nil, "", nil, nil, errors.New("ssh: no key found") +} + +// ParsePublicKey parses an SSH public key formatted for use in +// the SSH wire protocol according to RFC 4253, section 6.6. +func ParsePublicKey(in []byte) (out PublicKey, err error) { + algo, in, ok := parseString(in) + if !ok { + return nil, errShortRead + } + var rest []byte + out, rest, err = parsePubKey(in, string(algo)) + if len(rest) > 0 { + return nil, errors.New("ssh: trailing junk in public key") + } + + return out, err +} + +// MarshalAuthorizedKey serializes key for inclusion in an OpenSSH +// authorized_keys file. The return value ends with newline. +func MarshalAuthorizedKey(key PublicKey) []byte { + b := &bytes.Buffer{} + b.WriteString(key.Type()) + b.WriteByte(' ') + e := base64.NewEncoder(base64.StdEncoding, b) + e.Write(key.Marshal()) + e.Close() + b.WriteByte('\n') + return b.Bytes() +} + +// PublicKey is an abstraction of different types of public keys. +type PublicKey interface { + // Type returns the key's type, e.g. "ssh-rsa". + Type() string + + // Marshal returns the serialized key data in SSH wire format, + // with the name prefix. + Marshal() []byte + + // Verify that sig is a signature on the given data using this + // key. This function will hash the data appropriately first. + Verify(data []byte, sig *Signature) error +} + +// CryptoPublicKey, if implemented by a PublicKey, +// returns the underlying crypto.PublicKey form of the key. +type CryptoPublicKey interface { + CryptoPublicKey() crypto.PublicKey +} + +// A Signer can create signatures that verify against a public key. +type Signer interface { + // PublicKey returns an associated PublicKey instance. + PublicKey() PublicKey + + // Sign returns raw signature for the given data. This method + // will apply the hash specified for the keytype to the data. + Sign(rand io.Reader, data []byte) (*Signature, error) +} + +type rsaPublicKey rsa.PublicKey + +func (r *rsaPublicKey) Type() string { + return "ssh-rsa" +} + +// parseRSA parses an RSA key according to RFC 4253, section 6.6. +func parseRSA(in []byte) (out PublicKey, rest []byte, err error) { + var w struct { + E *big.Int + N *big.Int + Rest []byte `ssh:"rest"` + } + if err := Unmarshal(in, &w); err != nil { + return nil, nil, err + } + + if w.E.BitLen() > 24 { + return nil, nil, errors.New("ssh: exponent too large") + } + e := w.E.Int64() + if e < 3 || e&1 == 0 { + return nil, nil, errors.New("ssh: incorrect exponent") + } + + var key rsa.PublicKey + key.E = int(e) + key.N = w.N + return (*rsaPublicKey)(&key), w.Rest, nil +} + +func (r *rsaPublicKey) Marshal() []byte { + e := new(big.Int).SetInt64(int64(r.E)) + // RSA publickey struct layout should match the struct used by + // parseRSACert in the x/crypto/ssh/agent package. + wirekey := struct { + Name string + E *big.Int + N *big.Int + }{ + KeyAlgoRSA, + e, + r.N, + } + return Marshal(&wirekey) +} + +func (r *rsaPublicKey) Verify(data []byte, sig *Signature) error { + if sig.Format != r.Type() { + return fmt.Errorf("ssh: signature type %s for key type %s", sig.Format, r.Type()) + } + h := crypto.SHA1.New() + h.Write(data) + digest := h.Sum(nil) + return rsa.VerifyPKCS1v15((*rsa.PublicKey)(r), crypto.SHA1, digest, sig.Blob) +} + +func (r *rsaPublicKey) CryptoPublicKey() crypto.PublicKey { + return (*rsa.PublicKey)(r) +} + +type dsaPublicKey dsa.PublicKey + +func (r *dsaPublicKey) Type() string { + return "ssh-dss" +} + +// parseDSA parses an DSA key according to RFC 4253, section 6.6. +func parseDSA(in []byte) (out PublicKey, rest []byte, err error) { + var w struct { + P, Q, G, Y *big.Int + Rest []byte `ssh:"rest"` + } + if err := Unmarshal(in, &w); err != nil { + return nil, nil, err + } + + key := &dsaPublicKey{ + Parameters: dsa.Parameters{ + P: w.P, + Q: w.Q, + G: w.G, + }, + Y: w.Y, + } + return key, w.Rest, nil +} + +func (k *dsaPublicKey) Marshal() []byte { + // DSA publickey struct layout should match the struct used by + // parseDSACert in the x/crypto/ssh/agent package. + w := struct { + Name string + P, Q, G, Y *big.Int + }{ + k.Type(), + k.P, + k.Q, + k.G, + k.Y, + } + + return Marshal(&w) +} + +func (k *dsaPublicKey) Verify(data []byte, sig *Signature) error { + if sig.Format != k.Type() { + return fmt.Errorf("ssh: signature type %s for key type %s", sig.Format, k.Type()) + } + h := crypto.SHA1.New() + h.Write(data) + digest := h.Sum(nil) + + // Per RFC 4253, section 6.6, + // The value for 'dss_signature_blob' is encoded as a string containing + // r, followed by s (which are 160-bit integers, without lengths or + // padding, unsigned, and in network byte order). + // For DSS purposes, sig.Blob should be exactly 40 bytes in length. + if len(sig.Blob) != 40 { + return errors.New("ssh: DSA signature parse error") + } + r := new(big.Int).SetBytes(sig.Blob[:20]) + s := new(big.Int).SetBytes(sig.Blob[20:]) + if dsa.Verify((*dsa.PublicKey)(k), digest, r, s) { + return nil + } + return errors.New("ssh: signature did not verify") +} + +func (k *dsaPublicKey) CryptoPublicKey() crypto.PublicKey { + return (*dsa.PublicKey)(k) +} + +type dsaPrivateKey struct { + *dsa.PrivateKey +} + +func (k *dsaPrivateKey) PublicKey() PublicKey { + return (*dsaPublicKey)(&k.PrivateKey.PublicKey) +} + +func (k *dsaPrivateKey) Sign(rand io.Reader, data []byte) (*Signature, error) { + h := crypto.SHA1.New() + h.Write(data) + digest := h.Sum(nil) + r, s, err := dsa.Sign(rand, k.PrivateKey, digest) + if err != nil { + return nil, err + } + + sig := make([]byte, 40) + rb := r.Bytes() + sb := s.Bytes() + + copy(sig[20-len(rb):20], rb) + copy(sig[40-len(sb):], sb) + + return &Signature{ + Format: k.PublicKey().Type(), + Blob: sig, + }, nil +} + +type ecdsaPublicKey ecdsa.PublicKey + +func (key *ecdsaPublicKey) Type() string { + return "ecdsa-sha2-" + key.nistID() +} + +func (key *ecdsaPublicKey) nistID() string { + switch key.Params().BitSize { + case 256: + return "nistp256" + case 384: + return "nistp384" + case 521: + return "nistp521" + } + panic("ssh: unsupported ecdsa key size") +} + +type ed25519PublicKey ed25519.PublicKey + +func (key ed25519PublicKey) Type() string { + return KeyAlgoED25519 +} + +func parseED25519(in []byte) (out PublicKey, rest []byte, err error) { + var w struct { + KeyBytes []byte + Rest []byte `ssh:"rest"` + } + + if err := Unmarshal(in, &w); err != nil { + return nil, nil, err + } + + key := ed25519.PublicKey(w.KeyBytes) + + return (ed25519PublicKey)(key), w.Rest, nil +} + +func (key ed25519PublicKey) Marshal() []byte { + w := struct { + Name string + KeyBytes []byte + }{ + KeyAlgoED25519, + []byte(key), + } + return Marshal(&w) +} + +func (key ed25519PublicKey) Verify(b []byte, sig *Signature) error { + if sig.Format != key.Type() { + return fmt.Errorf("ssh: signature type %s for key type %s", sig.Format, key.Type()) + } + + edKey := (ed25519.PublicKey)(key) + if ok := ed25519.Verify(edKey, b, sig.Blob); !ok { + return errors.New("ssh: signature did not verify") + } + + return nil +} + +func (k ed25519PublicKey) CryptoPublicKey() crypto.PublicKey { + return ed25519.PublicKey(k) +} + +func supportedEllipticCurve(curve elliptic.Curve) bool { + return curve == elliptic.P256() || curve == elliptic.P384() || curve == elliptic.P521() +} + +// ecHash returns the hash to match the given elliptic curve, see RFC +// 5656, section 6.2.1 +func ecHash(curve elliptic.Curve) crypto.Hash { + bitSize := curve.Params().BitSize + switch { + case bitSize <= 256: + return crypto.SHA256 + case bitSize <= 384: + return crypto.SHA384 + } + return crypto.SHA512 +} + +// parseECDSA parses an ECDSA key according to RFC 5656, section 3.1. +func parseECDSA(in []byte) (out PublicKey, rest []byte, err error) { + var w struct { + Curve string + KeyBytes []byte + Rest []byte `ssh:"rest"` + } + + if err := Unmarshal(in, &w); err != nil { + return nil, nil, err + } + + key := new(ecdsa.PublicKey) + + switch w.Curve { + case "nistp256": + key.Curve = elliptic.P256() + case "nistp384": + key.Curve = elliptic.P384() + case "nistp521": + key.Curve = elliptic.P521() + default: + return nil, nil, errors.New("ssh: unsupported curve") + } + + key.X, key.Y = elliptic.Unmarshal(key.Curve, w.KeyBytes) + if key.X == nil || key.Y == nil { + return nil, nil, errors.New("ssh: invalid curve point") + } + return (*ecdsaPublicKey)(key), w.Rest, nil +} + +func (key *ecdsaPublicKey) Marshal() []byte { + // See RFC 5656, section 3.1. + keyBytes := elliptic.Marshal(key.Curve, key.X, key.Y) + // ECDSA publickey struct layout should match the struct used by + // parseECDSACert in the x/crypto/ssh/agent package. + w := struct { + Name string + ID string + Key []byte + }{ + key.Type(), + key.nistID(), + keyBytes, + } + + return Marshal(&w) +} + +func (key *ecdsaPublicKey) Verify(data []byte, sig *Signature) error { + if sig.Format != key.Type() { + return fmt.Errorf("ssh: signature type %s for key type %s", sig.Format, key.Type()) + } + + h := ecHash(key.Curve).New() + h.Write(data) + digest := h.Sum(nil) + + // Per RFC 5656, section 3.1.2, + // The ecdsa_signature_blob value has the following specific encoding: + // mpint r + // mpint s + var ecSig struct { + R *big.Int + S *big.Int + } + + if err := Unmarshal(sig.Blob, &ecSig); err != nil { + return err + } + + if ecdsa.Verify((*ecdsa.PublicKey)(key), digest, ecSig.R, ecSig.S) { + return nil + } + return errors.New("ssh: signature did not verify") +} + +func (k *ecdsaPublicKey) CryptoPublicKey() crypto.PublicKey { + return (*ecdsa.PublicKey)(k) +} + +// NewSignerFromKey takes an *rsa.PrivateKey, *dsa.PrivateKey, +// *ecdsa.PrivateKey or any other crypto.Signer and returns a corresponding +// Signer instance. ECDSA keys must use P-256, P-384 or P-521. +func NewSignerFromKey(key interface{}) (Signer, error) { + switch key := key.(type) { + case crypto.Signer: + return NewSignerFromSigner(key) + case *dsa.PrivateKey: + return &dsaPrivateKey{key}, nil + default: + return nil, fmt.Errorf("ssh: unsupported key type %T", key) + } +} + +type wrappedSigner struct { + signer crypto.Signer + pubKey PublicKey +} + +// NewSignerFromSigner takes any crypto.Signer implementation and +// returns a corresponding Signer interface. This can be used, for +// example, with keys kept in hardware modules. +func NewSignerFromSigner(signer crypto.Signer) (Signer, error) { + pubKey, err := NewPublicKey(signer.Public()) + if err != nil { + return nil, err + } + + return &wrappedSigner{signer, pubKey}, nil +} + +func (s *wrappedSigner) PublicKey() PublicKey { + return s.pubKey +} + +func (s *wrappedSigner) Sign(rand io.Reader, data []byte) (*Signature, error) { + var hashFunc crypto.Hash + + switch key := s.pubKey.(type) { + case *rsaPublicKey, *dsaPublicKey: + hashFunc = crypto.SHA1 + case *ecdsaPublicKey: + hashFunc = ecHash(key.Curve) + case ed25519PublicKey: + default: + return nil, fmt.Errorf("ssh: unsupported key type %T", key) + } + + var digest []byte + if hashFunc != 0 { + h := hashFunc.New() + h.Write(data) + digest = h.Sum(nil) + } else { + digest = data + } + + signature, err := s.signer.Sign(rand, digest, hashFunc) + if err != nil { + return nil, err + } + + // crypto.Signer.Sign is expected to return an ASN.1-encoded signature + // for ECDSA and DSA, but that's not the encoding expected by SSH, so + // re-encode. + switch s.pubKey.(type) { + case *ecdsaPublicKey, *dsaPublicKey: + type asn1Signature struct { + R, S *big.Int + } + asn1Sig := new(asn1Signature) + _, err := asn1.Unmarshal(signature, asn1Sig) + if err != nil { + return nil, err + } + + switch s.pubKey.(type) { + case *ecdsaPublicKey: + signature = Marshal(asn1Sig) + + case *dsaPublicKey: + signature = make([]byte, 40) + r := asn1Sig.R.Bytes() + s := asn1Sig.S.Bytes() + copy(signature[20-len(r):20], r) + copy(signature[40-len(s):40], s) + } + } + + return &Signature{ + Format: s.pubKey.Type(), + Blob: signature, + }, nil +} + +// NewPublicKey takes an *rsa.PublicKey, *dsa.PublicKey, *ecdsa.PublicKey, +// or ed25519.PublicKey returns a corresponding PublicKey instance. +// ECDSA keys must use P-256, P-384 or P-521. +func NewPublicKey(key interface{}) (PublicKey, error) { + switch key := key.(type) { + case *rsa.PublicKey: + return (*rsaPublicKey)(key), nil + case *ecdsa.PublicKey: + if !supportedEllipticCurve(key.Curve) { + return nil, errors.New("ssh: only P-256, P-384 and P-521 EC keys are supported.") + } + return (*ecdsaPublicKey)(key), nil + case *dsa.PublicKey: + return (*dsaPublicKey)(key), nil + case ed25519.PublicKey: + return (ed25519PublicKey)(key), nil + default: + return nil, fmt.Errorf("ssh: unsupported key type %T", key) + } +} + +// ParsePrivateKey returns a Signer from a PEM encoded private key. It supports +// the same keys as ParseRawPrivateKey. +func ParsePrivateKey(pemBytes []byte) (Signer, error) { + key, err := ParseRawPrivateKey(pemBytes) + if err != nil { + return nil, err + } + + return NewSignerFromKey(key) +} + +// encryptedBlock tells whether a private key is +// encrypted by examining its Proc-Type header +// for a mention of ENCRYPTED +// according to RFC 1421 Section 4.6.1.1. +func encryptedBlock(block *pem.Block) bool { + return strings.Contains(block.Headers["Proc-Type"], "ENCRYPTED") +} + +// ParseRawPrivateKey returns a private key from a PEM encoded private key. It +// supports RSA (PKCS#1), DSA (OpenSSL), and ECDSA private keys. +func ParseRawPrivateKey(pemBytes []byte) (interface{}, error) { + block, _ := pem.Decode(pemBytes) + if block == nil { + return nil, errors.New("ssh: no key found") + } + + if encryptedBlock(block) { + return nil, errors.New("ssh: cannot decode encrypted private keys") + } + + switch block.Type { + case "RSA PRIVATE KEY": + return x509.ParsePKCS1PrivateKey(block.Bytes) + case "EC PRIVATE KEY": + return x509.ParseECPrivateKey(block.Bytes) + case "DSA PRIVATE KEY": + return ParseDSAPrivateKey(block.Bytes) + case "OPENSSH PRIVATE KEY": + return parseOpenSSHPrivateKey(block.Bytes) + default: + return nil, fmt.Errorf("ssh: unsupported key type %q", block.Type) + } +} + +// ParseDSAPrivateKey returns a DSA private key from its ASN.1 DER encoding, as +// specified by the OpenSSL DSA man page. +func ParseDSAPrivateKey(der []byte) (*dsa.PrivateKey, error) { + var k struct { + Version int + P *big.Int + Q *big.Int + G *big.Int + Pub *big.Int + Priv *big.Int + } + rest, err := asn1.Unmarshal(der, &k) + if err != nil { + return nil, errors.New("ssh: failed to parse DSA key: " + err.Error()) + } + if len(rest) > 0 { + return nil, errors.New("ssh: garbage after DSA key") + } + + return &dsa.PrivateKey{ + PublicKey: dsa.PublicKey{ + Parameters: dsa.Parameters{ + P: k.P, + Q: k.Q, + G: k.G, + }, + Y: k.Pub, + }, + X: k.Priv, + }, nil +} + +// Implemented based on the documentation at +// https://github.com/openssh/openssh-portable/blob/master/PROTOCOL.key +func parseOpenSSHPrivateKey(key []byte) (*ed25519.PrivateKey, error) { + magic := append([]byte("openssh-key-v1"), 0) + if !bytes.Equal(magic, key[0:len(magic)]) { + return nil, errors.New("ssh: invalid openssh private key format") + } + remaining := key[len(magic):] + + var w struct { + CipherName string + KdfName string + KdfOpts string + NumKeys uint32 + PubKey []byte + PrivKeyBlock []byte + } + + if err := Unmarshal(remaining, &w); err != nil { + return nil, err + } + + pk1 := struct { + Check1 uint32 + Check2 uint32 + Keytype string + Pub []byte + Priv []byte + Comment string + Pad []byte `ssh:"rest"` + }{} + + if err := Unmarshal(w.PrivKeyBlock, &pk1); err != nil { + return nil, err + } + + if pk1.Check1 != pk1.Check2 { + return nil, errors.New("ssh: checkint mismatch") + } + + // we only handle ed25519 keys currently + if pk1.Keytype != KeyAlgoED25519 { + return nil, errors.New("ssh: unhandled key type") + } + + for i, b := range pk1.Pad { + if int(b) != i+1 { + return nil, errors.New("ssh: padding not as expected") + } + } + + if len(pk1.Priv) != ed25519.PrivateKeySize { + return nil, errors.New("ssh: private key unexpected length") + } + + pk := ed25519.PrivateKey(make([]byte, ed25519.PrivateKeySize)) + copy(pk, pk1.Priv) + return &pk, nil +} + +// FingerprintLegacyMD5 returns the user presentation of the key's +// fingerprint as described by RFC 4716 section 4. +func FingerprintLegacyMD5(pubKey PublicKey) string { + md5sum := md5.Sum(pubKey.Marshal()) + hexarray := make([]string, len(md5sum)) + for i, c := range md5sum { + hexarray[i] = hex.EncodeToString([]byte{c}) + } + return strings.Join(hexarray, ":") +} + +// FingerprintSHA256 returns the user presentation of the key's +// fingerprint as unpadded base64 encoded sha256 hash. +// This format was introduced from OpenSSH 6.8. +// https://www.openssh.com/txt/release-6.8 +// https://tools.ietf.org/html/rfc4648#section-3.2 (unpadded base64 encoding) +func FingerprintSHA256(pubKey PublicKey) string { + sha256sum := sha256.Sum256(pubKey.Marshal()) + hash := base64.RawStdEncoding.EncodeToString(sha256sum[:]) + return "SHA256:" + hash +} diff --git a/vendor/golang.org/x/crypto/ssh/mac.go b/vendor/golang.org/x/crypto/ssh/mac.go new file mode 100644 index 0000000000..c07a06285e --- /dev/null +++ b/vendor/golang.org/x/crypto/ssh/mac.go @@ -0,0 +1,61 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ssh + +// Message authentication support + +import ( + "crypto/hmac" + "crypto/sha1" + "crypto/sha256" + "hash" +) + +type macMode struct { + keySize int + etm bool + new func(key []byte) hash.Hash +} + +// truncatingMAC wraps around a hash.Hash and truncates the output digest to +// a given size. +type truncatingMAC struct { + length int + hmac hash.Hash +} + +func (t truncatingMAC) Write(data []byte) (int, error) { + return t.hmac.Write(data) +} + +func (t truncatingMAC) Sum(in []byte) []byte { + out := t.hmac.Sum(in) + return out[:len(in)+t.length] +} + +func (t truncatingMAC) Reset() { + t.hmac.Reset() +} + +func (t truncatingMAC) Size() int { + return t.length +} + +func (t truncatingMAC) BlockSize() int { return t.hmac.BlockSize() } + +var macModes = map[string]*macMode{ + "hmac-sha2-256-etm@openssh.com": {32, true, func(key []byte) hash.Hash { + return hmac.New(sha256.New, key) + }}, + "hmac-sha2-256": {32, false, func(key []byte) hash.Hash { + return hmac.New(sha256.New, key) + }}, + "hmac-sha1": {20, false, func(key []byte) hash.Hash { + return hmac.New(sha1.New, key) + }}, + "hmac-sha1-96": {20, false, func(key []byte) hash.Hash { + return truncatingMAC{12, hmac.New(sha1.New, key)} + }}, +} diff --git a/vendor/golang.org/x/crypto/ssh/messages.go b/vendor/golang.org/x/crypto/ssh/messages.go new file mode 100644 index 0000000000..e6ecd3afa5 --- /dev/null +++ b/vendor/golang.org/x/crypto/ssh/messages.go @@ -0,0 +1,758 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ssh + +import ( + "bytes" + "encoding/binary" + "errors" + "fmt" + "io" + "math/big" + "reflect" + "strconv" + "strings" +) + +// These are SSH message type numbers. They are scattered around several +// documents but many were taken from [SSH-PARAMETERS]. +const ( + msgIgnore = 2 + msgUnimplemented = 3 + msgDebug = 4 + msgNewKeys = 21 + + // Standard authentication messages + msgUserAuthSuccess = 52 + msgUserAuthBanner = 53 +) + +// SSH messages: +// +// These structures mirror the wire format of the corresponding SSH messages. +// They are marshaled using reflection with the marshal and unmarshal functions +// in this file. The only wrinkle is that a final member of type []byte with a +// ssh tag of "rest" receives the remainder of a packet when unmarshaling. + +// See RFC 4253, section 11.1. +const msgDisconnect = 1 + +// disconnectMsg is the message that signals a disconnect. It is also +// the error type returned from mux.Wait() +type disconnectMsg struct { + Reason uint32 `sshtype:"1"` + Message string + Language string +} + +func (d *disconnectMsg) Error() string { + return fmt.Sprintf("ssh: disconnect, reason %d: %s", d.Reason, d.Message) +} + +// See RFC 4253, section 7.1. +const msgKexInit = 20 + +type kexInitMsg struct { + Cookie [16]byte `sshtype:"20"` + KexAlgos []string + ServerHostKeyAlgos []string + CiphersClientServer []string + CiphersServerClient []string + MACsClientServer []string + MACsServerClient []string + CompressionClientServer []string + CompressionServerClient []string + LanguagesClientServer []string + LanguagesServerClient []string + FirstKexFollows bool + Reserved uint32 +} + +// See RFC 4253, section 8. + +// Diffie-Helman +const msgKexDHInit = 30 + +type kexDHInitMsg struct { + X *big.Int `sshtype:"30"` +} + +const msgKexECDHInit = 30 + +type kexECDHInitMsg struct { + ClientPubKey []byte `sshtype:"30"` +} + +const msgKexECDHReply = 31 + +type kexECDHReplyMsg struct { + HostKey []byte `sshtype:"31"` + EphemeralPubKey []byte + Signature []byte +} + +const msgKexDHReply = 31 + +type kexDHReplyMsg struct { + HostKey []byte `sshtype:"31"` + Y *big.Int + Signature []byte +} + +// See RFC 4253, section 10. +const msgServiceRequest = 5 + +type serviceRequestMsg struct { + Service string `sshtype:"5"` +} + +// See RFC 4253, section 10. +const msgServiceAccept = 6 + +type serviceAcceptMsg struct { + Service string `sshtype:"6"` +} + +// See RFC 4252, section 5. +const msgUserAuthRequest = 50 + +type userAuthRequestMsg struct { + User string `sshtype:"50"` + Service string + Method string + Payload []byte `ssh:"rest"` +} + +// Used for debug printouts of packets. +type userAuthSuccessMsg struct { +} + +// See RFC 4252, section 5.1 +const msgUserAuthFailure = 51 + +type userAuthFailureMsg struct { + Methods []string `sshtype:"51"` + PartialSuccess bool +} + +// See RFC 4256, section 3.2 +const msgUserAuthInfoRequest = 60 +const msgUserAuthInfoResponse = 61 + +type userAuthInfoRequestMsg struct { + User string `sshtype:"60"` + Instruction string + DeprecatedLanguage string + NumPrompts uint32 + Prompts []byte `ssh:"rest"` +} + +// See RFC 4254, section 5.1. +const msgChannelOpen = 90 + +type channelOpenMsg struct { + ChanType string `sshtype:"90"` + PeersId uint32 + PeersWindow uint32 + MaxPacketSize uint32 + TypeSpecificData []byte `ssh:"rest"` +} + +const msgChannelExtendedData = 95 +const msgChannelData = 94 + +// Used for debug print outs of packets. +type channelDataMsg struct { + PeersId uint32 `sshtype:"94"` + Length uint32 + Rest []byte `ssh:"rest"` +} + +// See RFC 4254, section 5.1. +const msgChannelOpenConfirm = 91 + +type channelOpenConfirmMsg struct { + PeersId uint32 `sshtype:"91"` + MyId uint32 + MyWindow uint32 + MaxPacketSize uint32 + TypeSpecificData []byte `ssh:"rest"` +} + +// See RFC 4254, section 5.1. +const msgChannelOpenFailure = 92 + +type channelOpenFailureMsg struct { + PeersId uint32 `sshtype:"92"` + Reason RejectionReason + Message string + Language string +} + +const msgChannelRequest = 98 + +type channelRequestMsg struct { + PeersId uint32 `sshtype:"98"` + Request string + WantReply bool + RequestSpecificData []byte `ssh:"rest"` +} + +// See RFC 4254, section 5.4. +const msgChannelSuccess = 99 + +type channelRequestSuccessMsg struct { + PeersId uint32 `sshtype:"99"` +} + +// See RFC 4254, section 5.4. +const msgChannelFailure = 100 + +type channelRequestFailureMsg struct { + PeersId uint32 `sshtype:"100"` +} + +// See RFC 4254, section 5.3 +const msgChannelClose = 97 + +type channelCloseMsg struct { + PeersId uint32 `sshtype:"97"` +} + +// See RFC 4254, section 5.3 +const msgChannelEOF = 96 + +type channelEOFMsg struct { + PeersId uint32 `sshtype:"96"` +} + +// See RFC 4254, section 4 +const msgGlobalRequest = 80 + +type globalRequestMsg struct { + Type string `sshtype:"80"` + WantReply bool + Data []byte `ssh:"rest"` +} + +// See RFC 4254, section 4 +const msgRequestSuccess = 81 + +type globalRequestSuccessMsg struct { + Data []byte `ssh:"rest" sshtype:"81"` +} + +// See RFC 4254, section 4 +const msgRequestFailure = 82 + +type globalRequestFailureMsg struct { + Data []byte `ssh:"rest" sshtype:"82"` +} + +// See RFC 4254, section 5.2 +const msgChannelWindowAdjust = 93 + +type windowAdjustMsg struct { + PeersId uint32 `sshtype:"93"` + AdditionalBytes uint32 +} + +// See RFC 4252, section 7 +const msgUserAuthPubKeyOk = 60 + +type userAuthPubKeyOkMsg struct { + Algo string `sshtype:"60"` + PubKey []byte +} + +// typeTags returns the possible type bytes for the given reflect.Type, which +// should be a struct. The possible values are separated by a '|' character. +func typeTags(structType reflect.Type) (tags []byte) { + tagStr := structType.Field(0).Tag.Get("sshtype") + + for _, tag := range strings.Split(tagStr, "|") { + i, err := strconv.Atoi(tag) + if err == nil { + tags = append(tags, byte(i)) + } + } + + return tags +} + +func fieldError(t reflect.Type, field int, problem string) error { + if problem != "" { + problem = ": " + problem + } + return fmt.Errorf("ssh: unmarshal error for field %s of type %s%s", t.Field(field).Name, t.Name(), problem) +} + +var errShortRead = errors.New("ssh: short read") + +// Unmarshal parses data in SSH wire format into a structure. The out +// argument should be a pointer to struct. If the first member of the +// struct has the "sshtype" tag set to a '|'-separated set of numbers +// in decimal, the packet must start with one of those numbers. In +// case of error, Unmarshal returns a ParseError or +// UnexpectedMessageError. +func Unmarshal(data []byte, out interface{}) error { + v := reflect.ValueOf(out).Elem() + structType := v.Type() + expectedTypes := typeTags(structType) + + var expectedType byte + if len(expectedTypes) > 0 { + expectedType = expectedTypes[0] + } + + if len(data) == 0 { + return parseError(expectedType) + } + + if len(expectedTypes) > 0 { + goodType := false + for _, e := range expectedTypes { + if e > 0 && data[0] == e { + goodType = true + break + } + } + if !goodType { + return fmt.Errorf("ssh: unexpected message type %d (expected one of %v)", data[0], expectedTypes) + } + data = data[1:] + } + + var ok bool + for i := 0; i < v.NumField(); i++ { + field := v.Field(i) + t := field.Type() + switch t.Kind() { + case reflect.Bool: + if len(data) < 1 { + return errShortRead + } + field.SetBool(data[0] != 0) + data = data[1:] + case reflect.Array: + if t.Elem().Kind() != reflect.Uint8 { + return fieldError(structType, i, "array of unsupported type") + } + if len(data) < t.Len() { + return errShortRead + } + for j, n := 0, t.Len(); j < n; j++ { + field.Index(j).Set(reflect.ValueOf(data[j])) + } + data = data[t.Len():] + case reflect.Uint64: + var u64 uint64 + if u64, data, ok = parseUint64(data); !ok { + return errShortRead + } + field.SetUint(u64) + case reflect.Uint32: + var u32 uint32 + if u32, data, ok = parseUint32(data); !ok { + return errShortRead + } + field.SetUint(uint64(u32)) + case reflect.Uint8: + if len(data) < 1 { + return errShortRead + } + field.SetUint(uint64(data[0])) + data = data[1:] + case reflect.String: + var s []byte + if s, data, ok = parseString(data); !ok { + return fieldError(structType, i, "") + } + field.SetString(string(s)) + case reflect.Slice: + switch t.Elem().Kind() { + case reflect.Uint8: + if structType.Field(i).Tag.Get("ssh") == "rest" { + field.Set(reflect.ValueOf(data)) + data = nil + } else { + var s []byte + if s, data, ok = parseString(data); !ok { + return errShortRead + } + field.Set(reflect.ValueOf(s)) + } + case reflect.String: + var nl []string + if nl, data, ok = parseNameList(data); !ok { + return errShortRead + } + field.Set(reflect.ValueOf(nl)) + default: + return fieldError(structType, i, "slice of unsupported type") + } + case reflect.Ptr: + if t == bigIntType { + var n *big.Int + if n, data, ok = parseInt(data); !ok { + return errShortRead + } + field.Set(reflect.ValueOf(n)) + } else { + return fieldError(structType, i, "pointer to unsupported type") + } + default: + return fieldError(structType, i, fmt.Sprintf("unsupported type: %v", t)) + } + } + + if len(data) != 0 { + return parseError(expectedType) + } + + return nil +} + +// Marshal serializes the message in msg to SSH wire format. The msg +// argument should be a struct or pointer to struct. If the first +// member has the "sshtype" tag set to a number in decimal, that +// number is prepended to the result. If the last of member has the +// "ssh" tag set to "rest", its contents are appended to the output. +func Marshal(msg interface{}) []byte { + out := make([]byte, 0, 64) + return marshalStruct(out, msg) +} + +func marshalStruct(out []byte, msg interface{}) []byte { + v := reflect.Indirect(reflect.ValueOf(msg)) + msgTypes := typeTags(v.Type()) + if len(msgTypes) > 0 { + out = append(out, msgTypes[0]) + } + + for i, n := 0, v.NumField(); i < n; i++ { + field := v.Field(i) + switch t := field.Type(); t.Kind() { + case reflect.Bool: + var v uint8 + if field.Bool() { + v = 1 + } + out = append(out, v) + case reflect.Array: + if t.Elem().Kind() != reflect.Uint8 { + panic(fmt.Sprintf("array of non-uint8 in field %d: %T", i, field.Interface())) + } + for j, l := 0, t.Len(); j < l; j++ { + out = append(out, uint8(field.Index(j).Uint())) + } + case reflect.Uint32: + out = appendU32(out, uint32(field.Uint())) + case reflect.Uint64: + out = appendU64(out, uint64(field.Uint())) + case reflect.Uint8: + out = append(out, uint8(field.Uint())) + case reflect.String: + s := field.String() + out = appendInt(out, len(s)) + out = append(out, s...) + case reflect.Slice: + switch t.Elem().Kind() { + case reflect.Uint8: + if v.Type().Field(i).Tag.Get("ssh") != "rest" { + out = appendInt(out, field.Len()) + } + out = append(out, field.Bytes()...) + case reflect.String: + offset := len(out) + out = appendU32(out, 0) + if n := field.Len(); n > 0 { + for j := 0; j < n; j++ { + f := field.Index(j) + if j != 0 { + out = append(out, ',') + } + out = append(out, f.String()...) + } + // overwrite length value + binary.BigEndian.PutUint32(out[offset:], uint32(len(out)-offset-4)) + } + default: + panic(fmt.Sprintf("slice of unknown type in field %d: %T", i, field.Interface())) + } + case reflect.Ptr: + if t == bigIntType { + var n *big.Int + nValue := reflect.ValueOf(&n) + nValue.Elem().Set(field) + needed := intLength(n) + oldLength := len(out) + + if cap(out)-len(out) < needed { + newOut := make([]byte, len(out), 2*(len(out)+needed)) + copy(newOut, out) + out = newOut + } + out = out[:oldLength+needed] + marshalInt(out[oldLength:], n) + } else { + panic(fmt.Sprintf("pointer to unknown type in field %d: %T", i, field.Interface())) + } + } + } + + return out +} + +var bigOne = big.NewInt(1) + +func parseString(in []byte) (out, rest []byte, ok bool) { + if len(in) < 4 { + return + } + length := binary.BigEndian.Uint32(in) + in = in[4:] + if uint32(len(in)) < length { + return + } + out = in[:length] + rest = in[length:] + ok = true + return +} + +var ( + comma = []byte{','} + emptyNameList = []string{} +) + +func parseNameList(in []byte) (out []string, rest []byte, ok bool) { + contents, rest, ok := parseString(in) + if !ok { + return + } + if len(contents) == 0 { + out = emptyNameList + return + } + parts := bytes.Split(contents, comma) + out = make([]string, len(parts)) + for i, part := range parts { + out[i] = string(part) + } + return +} + +func parseInt(in []byte) (out *big.Int, rest []byte, ok bool) { + contents, rest, ok := parseString(in) + if !ok { + return + } + out = new(big.Int) + + if len(contents) > 0 && contents[0]&0x80 == 0x80 { + // This is a negative number + notBytes := make([]byte, len(contents)) + for i := range notBytes { + notBytes[i] = ^contents[i] + } + out.SetBytes(notBytes) + out.Add(out, bigOne) + out.Neg(out) + } else { + // Positive number + out.SetBytes(contents) + } + ok = true + return +} + +func parseUint32(in []byte) (uint32, []byte, bool) { + if len(in) < 4 { + return 0, nil, false + } + return binary.BigEndian.Uint32(in), in[4:], true +} + +func parseUint64(in []byte) (uint64, []byte, bool) { + if len(in) < 8 { + return 0, nil, false + } + return binary.BigEndian.Uint64(in), in[8:], true +} + +func intLength(n *big.Int) int { + length := 4 /* length bytes */ + if n.Sign() < 0 { + nMinus1 := new(big.Int).Neg(n) + nMinus1.Sub(nMinus1, bigOne) + bitLen := nMinus1.BitLen() + if bitLen%8 == 0 { + // The number will need 0xff padding + length++ + } + length += (bitLen + 7) / 8 + } else if n.Sign() == 0 { + // A zero is the zero length string + } else { + bitLen := n.BitLen() + if bitLen%8 == 0 { + // The number will need 0x00 padding + length++ + } + length += (bitLen + 7) / 8 + } + + return length +} + +func marshalUint32(to []byte, n uint32) []byte { + binary.BigEndian.PutUint32(to, n) + return to[4:] +} + +func marshalUint64(to []byte, n uint64) []byte { + binary.BigEndian.PutUint64(to, n) + return to[8:] +} + +func marshalInt(to []byte, n *big.Int) []byte { + lengthBytes := to + to = to[4:] + length := 0 + + if n.Sign() < 0 { + // A negative number has to be converted to two's-complement + // form. So we'll subtract 1 and invert. If the + // most-significant-bit isn't set then we'll need to pad the + // beginning with 0xff in order to keep the number negative. + nMinus1 := new(big.Int).Neg(n) + nMinus1.Sub(nMinus1, bigOne) + bytes := nMinus1.Bytes() + for i := range bytes { + bytes[i] ^= 0xff + } + if len(bytes) == 0 || bytes[0]&0x80 == 0 { + to[0] = 0xff + to = to[1:] + length++ + } + nBytes := copy(to, bytes) + to = to[nBytes:] + length += nBytes + } else if n.Sign() == 0 { + // A zero is the zero length string + } else { + bytes := n.Bytes() + if len(bytes) > 0 && bytes[0]&0x80 != 0 { + // We'll have to pad this with a 0x00 in order to + // stop it looking like a negative number. + to[0] = 0 + to = to[1:] + length++ + } + nBytes := copy(to, bytes) + to = to[nBytes:] + length += nBytes + } + + lengthBytes[0] = byte(length >> 24) + lengthBytes[1] = byte(length >> 16) + lengthBytes[2] = byte(length >> 8) + lengthBytes[3] = byte(length) + return to +} + +func writeInt(w io.Writer, n *big.Int) { + length := intLength(n) + buf := make([]byte, length) + marshalInt(buf, n) + w.Write(buf) +} + +func writeString(w io.Writer, s []byte) { + var lengthBytes [4]byte + lengthBytes[0] = byte(len(s) >> 24) + lengthBytes[1] = byte(len(s) >> 16) + lengthBytes[2] = byte(len(s) >> 8) + lengthBytes[3] = byte(len(s)) + w.Write(lengthBytes[:]) + w.Write(s) +} + +func stringLength(n int) int { + return 4 + n +} + +func marshalString(to []byte, s []byte) []byte { + to[0] = byte(len(s) >> 24) + to[1] = byte(len(s) >> 16) + to[2] = byte(len(s) >> 8) + to[3] = byte(len(s)) + to = to[4:] + copy(to, s) + return to[len(s):] +} + +var bigIntType = reflect.TypeOf((*big.Int)(nil)) + +// Decode a packet into its corresponding message. +func decode(packet []byte) (interface{}, error) { + var msg interface{} + switch packet[0] { + case msgDisconnect: + msg = new(disconnectMsg) + case msgServiceRequest: + msg = new(serviceRequestMsg) + case msgServiceAccept: + msg = new(serviceAcceptMsg) + case msgKexInit: + msg = new(kexInitMsg) + case msgKexDHInit: + msg = new(kexDHInitMsg) + case msgKexDHReply: + msg = new(kexDHReplyMsg) + case msgUserAuthRequest: + msg = new(userAuthRequestMsg) + case msgUserAuthSuccess: + return new(userAuthSuccessMsg), nil + case msgUserAuthFailure: + msg = new(userAuthFailureMsg) + case msgUserAuthPubKeyOk: + msg = new(userAuthPubKeyOkMsg) + case msgGlobalRequest: + msg = new(globalRequestMsg) + case msgRequestSuccess: + msg = new(globalRequestSuccessMsg) + case msgRequestFailure: + msg = new(globalRequestFailureMsg) + case msgChannelOpen: + msg = new(channelOpenMsg) + case msgChannelData: + msg = new(channelDataMsg) + case msgChannelOpenConfirm: + msg = new(channelOpenConfirmMsg) + case msgChannelOpenFailure: + msg = new(channelOpenFailureMsg) + case msgChannelWindowAdjust: + msg = new(windowAdjustMsg) + case msgChannelEOF: + msg = new(channelEOFMsg) + case msgChannelClose: + msg = new(channelCloseMsg) + case msgChannelRequest: + msg = new(channelRequestMsg) + case msgChannelSuccess: + msg = new(channelRequestSuccessMsg) + case msgChannelFailure: + msg = new(channelRequestFailureMsg) + default: + return nil, unexpectedMessageError(0, packet[0]) + } + if err := Unmarshal(packet, msg); err != nil { + return nil, err + } + return msg, nil +} diff --git a/vendor/golang.org/x/crypto/ssh/mux.go b/vendor/golang.org/x/crypto/ssh/mux.go new file mode 100644 index 0000000000..27a527c106 --- /dev/null +++ b/vendor/golang.org/x/crypto/ssh/mux.go @@ -0,0 +1,330 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ssh + +import ( + "encoding/binary" + "fmt" + "io" + "log" + "sync" + "sync/atomic" +) + +// debugMux, if set, causes messages in the connection protocol to be +// logged. +const debugMux = false + +// chanList is a thread safe channel list. +type chanList struct { + // protects concurrent access to chans + sync.Mutex + + // chans are indexed by the local id of the channel, which the + // other side should send in the PeersId field. + chans []*channel + + // This is a debugging aid: it offsets all IDs by this + // amount. This helps distinguish otherwise identical + // server/client muxes + offset uint32 +} + +// Assigns a channel ID to the given channel. +func (c *chanList) add(ch *channel) uint32 { + c.Lock() + defer c.Unlock() + for i := range c.chans { + if c.chans[i] == nil { + c.chans[i] = ch + return uint32(i) + c.offset + } + } + c.chans = append(c.chans, ch) + return uint32(len(c.chans)-1) + c.offset +} + +// getChan returns the channel for the given ID. +func (c *chanList) getChan(id uint32) *channel { + id -= c.offset + + c.Lock() + defer c.Unlock() + if id < uint32(len(c.chans)) { + return c.chans[id] + } + return nil +} + +func (c *chanList) remove(id uint32) { + id -= c.offset + c.Lock() + if id < uint32(len(c.chans)) { + c.chans[id] = nil + } + c.Unlock() +} + +// dropAll forgets all channels it knows, returning them in a slice. +func (c *chanList) dropAll() []*channel { + c.Lock() + defer c.Unlock() + var r []*channel + + for _, ch := range c.chans { + if ch == nil { + continue + } + r = append(r, ch) + } + c.chans = nil + return r +} + +// mux represents the state for the SSH connection protocol, which +// multiplexes many channels onto a single packet transport. +type mux struct { + conn packetConn + chanList chanList + + incomingChannels chan NewChannel + + globalSentMu sync.Mutex + globalResponses chan interface{} + incomingRequests chan *Request + + errCond *sync.Cond + err error +} + +// When debugging, each new chanList instantiation has a different +// offset. +var globalOff uint32 + +func (m *mux) Wait() error { + m.errCond.L.Lock() + defer m.errCond.L.Unlock() + for m.err == nil { + m.errCond.Wait() + } + return m.err +} + +// newMux returns a mux that runs over the given connection. +func newMux(p packetConn) *mux { + m := &mux{ + conn: p, + incomingChannels: make(chan NewChannel, chanSize), + globalResponses: make(chan interface{}, 1), + incomingRequests: make(chan *Request, chanSize), + errCond: newCond(), + } + if debugMux { + m.chanList.offset = atomic.AddUint32(&globalOff, 1) + } + + go m.loop() + return m +} + +func (m *mux) sendMessage(msg interface{}) error { + p := Marshal(msg) + if debugMux { + log.Printf("send global(%d): %#v", m.chanList.offset, msg) + } + return m.conn.writePacket(p) +} + +func (m *mux) SendRequest(name string, wantReply bool, payload []byte) (bool, []byte, error) { + if wantReply { + m.globalSentMu.Lock() + defer m.globalSentMu.Unlock() + } + + if err := m.sendMessage(globalRequestMsg{ + Type: name, + WantReply: wantReply, + Data: payload, + }); err != nil { + return false, nil, err + } + + if !wantReply { + return false, nil, nil + } + + msg, ok := <-m.globalResponses + if !ok { + return false, nil, io.EOF + } + switch msg := msg.(type) { + case *globalRequestFailureMsg: + return false, msg.Data, nil + case *globalRequestSuccessMsg: + return true, msg.Data, nil + default: + return false, nil, fmt.Errorf("ssh: unexpected response to request: %#v", msg) + } +} + +// ackRequest must be called after processing a global request that +// has WantReply set. +func (m *mux) ackRequest(ok bool, data []byte) error { + if ok { + return m.sendMessage(globalRequestSuccessMsg{Data: data}) + } + return m.sendMessage(globalRequestFailureMsg{Data: data}) +} + +func (m *mux) Close() error { + return m.conn.Close() +} + +// loop runs the connection machine. It will process packets until an +// error is encountered. To synchronize on loop exit, use mux.Wait. +func (m *mux) loop() { + var err error + for err == nil { + err = m.onePacket() + } + + for _, ch := range m.chanList.dropAll() { + ch.close() + } + + close(m.incomingChannels) + close(m.incomingRequests) + close(m.globalResponses) + + m.conn.Close() + + m.errCond.L.Lock() + m.err = err + m.errCond.Broadcast() + m.errCond.L.Unlock() + + if debugMux { + log.Println("loop exit", err) + } +} + +// onePacket reads and processes one packet. +func (m *mux) onePacket() error { + packet, err := m.conn.readPacket() + if err != nil { + return err + } + + if debugMux { + if packet[0] == msgChannelData || packet[0] == msgChannelExtendedData { + log.Printf("decoding(%d): data packet - %d bytes", m.chanList.offset, len(packet)) + } else { + p, _ := decode(packet) + log.Printf("decoding(%d): %d %#v - %d bytes", m.chanList.offset, packet[0], p, len(packet)) + } + } + + switch packet[0] { + case msgChannelOpen: + return m.handleChannelOpen(packet) + case msgGlobalRequest, msgRequestSuccess, msgRequestFailure: + return m.handleGlobalPacket(packet) + } + + // assume a channel packet. + if len(packet) < 5 { + return parseError(packet[0]) + } + id := binary.BigEndian.Uint32(packet[1:]) + ch := m.chanList.getChan(id) + if ch == nil { + return fmt.Errorf("ssh: invalid channel %d", id) + } + + return ch.handlePacket(packet) +} + +func (m *mux) handleGlobalPacket(packet []byte) error { + msg, err := decode(packet) + if err != nil { + return err + } + + switch msg := msg.(type) { + case *globalRequestMsg: + m.incomingRequests <- &Request{ + Type: msg.Type, + WantReply: msg.WantReply, + Payload: msg.Data, + mux: m, + } + case *globalRequestSuccessMsg, *globalRequestFailureMsg: + m.globalResponses <- msg + default: + panic(fmt.Sprintf("not a global message %#v", msg)) + } + + return nil +} + +// handleChannelOpen schedules a channel to be Accept()ed. +func (m *mux) handleChannelOpen(packet []byte) error { + var msg channelOpenMsg + if err := Unmarshal(packet, &msg); err != nil { + return err + } + + if msg.MaxPacketSize < minPacketLength || msg.MaxPacketSize > 1<<31 { + failMsg := channelOpenFailureMsg{ + PeersId: msg.PeersId, + Reason: ConnectionFailed, + Message: "invalid request", + Language: "en_US.UTF-8", + } + return m.sendMessage(failMsg) + } + + c := m.newChannel(msg.ChanType, channelInbound, msg.TypeSpecificData) + c.remoteId = msg.PeersId + c.maxRemotePayload = msg.MaxPacketSize + c.remoteWin.add(msg.PeersWindow) + m.incomingChannels <- c + return nil +} + +func (m *mux) OpenChannel(chanType string, extra []byte) (Channel, <-chan *Request, error) { + ch, err := m.openChannel(chanType, extra) + if err != nil { + return nil, nil, err + } + + return ch, ch.incomingRequests, nil +} + +func (m *mux) openChannel(chanType string, extra []byte) (*channel, error) { + ch := m.newChannel(chanType, channelOutbound, extra) + + ch.maxIncomingPayload = channelMaxPacket + + open := channelOpenMsg{ + ChanType: chanType, + PeersWindow: ch.myWindow, + MaxPacketSize: ch.maxIncomingPayload, + TypeSpecificData: extra, + PeersId: ch.localId, + } + if err := m.sendMessage(open); err != nil { + return nil, err + } + + switch msg := (<-ch.msg).(type) { + case *channelOpenConfirmMsg: + return ch, nil + case *channelOpenFailureMsg: + return nil, &OpenChannelError{msg.Reason, msg.Message} + default: + return nil, fmt.Errorf("ssh: unexpected packet in response to channel open: %T", msg) + } +} diff --git a/vendor/golang.org/x/crypto/ssh/server.go b/vendor/golang.org/x/crypto/ssh/server.go new file mode 100644 index 0000000000..77c84d165c --- /dev/null +++ b/vendor/golang.org/x/crypto/ssh/server.go @@ -0,0 +1,491 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ssh + +import ( + "bytes" + "errors" + "fmt" + "io" + "net" + "strings" +) + +// The Permissions type holds fine-grained permissions that are +// specific to a user or a specific authentication method for a +// user. Permissions, except for "source-address", must be enforced in +// the server application layer, after successful authentication. The +// Permissions are passed on in ServerConn so a server implementation +// can honor them. +type Permissions struct { + // Critical options restrict default permissions. Common + // restrictions are "source-address" and "force-command". If + // the server cannot enforce the restriction, or does not + // recognize it, the user should not authenticate. + CriticalOptions map[string]string + + // Extensions are extra functionality that the server may + // offer on authenticated connections. Common extensions are + // "permit-agent-forwarding", "permit-X11-forwarding". Lack of + // support for an extension does not preclude authenticating a + // user. + Extensions map[string]string +} + +// ServerConfig holds server specific configuration data. +type ServerConfig struct { + // Config contains configuration shared between client and server. + Config + + hostKeys []Signer + + // NoClientAuth is true if clients are allowed to connect without + // authenticating. + NoClientAuth bool + + // PasswordCallback, if non-nil, is called when a user + // attempts to authenticate using a password. + PasswordCallback func(conn ConnMetadata, password []byte) (*Permissions, error) + + // PublicKeyCallback, if non-nil, is called when a client attempts public + // key authentication. It must return true if the given public key is + // valid for the given user. For example, see CertChecker.Authenticate. + PublicKeyCallback func(conn ConnMetadata, key PublicKey) (*Permissions, error) + + // KeyboardInteractiveCallback, if non-nil, is called when + // keyboard-interactive authentication is selected (RFC + // 4256). The client object's Challenge function should be + // used to query the user. The callback may offer multiple + // Challenge rounds. To avoid information leaks, the client + // should be presented a challenge even if the user is + // unknown. + KeyboardInteractiveCallback func(conn ConnMetadata, client KeyboardInteractiveChallenge) (*Permissions, error) + + // AuthLogCallback, if non-nil, is called to log all authentication + // attempts. + AuthLogCallback func(conn ConnMetadata, method string, err error) + + // ServerVersion is the version identification string to announce in + // the public handshake. + // If empty, a reasonable default is used. + // Note that RFC 4253 section 4.2 requires that this string start with + // "SSH-2.0-". + ServerVersion string +} + +// AddHostKey adds a private key as a host key. If an existing host +// key exists with the same algorithm, it is overwritten. Each server +// config must have at least one host key. +func (s *ServerConfig) AddHostKey(key Signer) { + for i, k := range s.hostKeys { + if k.PublicKey().Type() == key.PublicKey().Type() { + s.hostKeys[i] = key + return + } + } + + s.hostKeys = append(s.hostKeys, key) +} + +// cachedPubKey contains the results of querying whether a public key is +// acceptable for a user. +type cachedPubKey struct { + user string + pubKeyData []byte + result error + perms *Permissions +} + +const maxCachedPubKeys = 16 + +// pubKeyCache caches tests for public keys. Since SSH clients +// will query whether a public key is acceptable before attempting to +// authenticate with it, we end up with duplicate queries for public +// key validity. The cache only applies to a single ServerConn. +type pubKeyCache struct { + keys []cachedPubKey +} + +// get returns the result for a given user/algo/key tuple. +func (c *pubKeyCache) get(user string, pubKeyData []byte) (cachedPubKey, bool) { + for _, k := range c.keys { + if k.user == user && bytes.Equal(k.pubKeyData, pubKeyData) { + return k, true + } + } + return cachedPubKey{}, false +} + +// add adds the given tuple to the cache. +func (c *pubKeyCache) add(candidate cachedPubKey) { + if len(c.keys) < maxCachedPubKeys { + c.keys = append(c.keys, candidate) + } +} + +// ServerConn is an authenticated SSH connection, as seen from the +// server +type ServerConn struct { + Conn + + // If the succeeding authentication callback returned a + // non-nil Permissions pointer, it is stored here. + Permissions *Permissions +} + +// NewServerConn starts a new SSH server with c as the underlying +// transport. It starts with a handshake and, if the handshake is +// unsuccessful, it closes the connection and returns an error. The +// Request and NewChannel channels must be serviced, or the connection +// will hang. +func NewServerConn(c net.Conn, config *ServerConfig) (*ServerConn, <-chan NewChannel, <-chan *Request, error) { + fullConf := *config + fullConf.SetDefaults() + s := &connection{ + sshConn: sshConn{conn: c}, + } + perms, err := s.serverHandshake(&fullConf) + if err != nil { + c.Close() + return nil, nil, nil, err + } + return &ServerConn{s, perms}, s.mux.incomingChannels, s.mux.incomingRequests, nil +} + +// signAndMarshal signs the data with the appropriate algorithm, +// and serializes the result in SSH wire format. +func signAndMarshal(k Signer, rand io.Reader, data []byte) ([]byte, error) { + sig, err := k.Sign(rand, data) + if err != nil { + return nil, err + } + + return Marshal(sig), nil +} + +// handshake performs key exchange and user authentication. +func (s *connection) serverHandshake(config *ServerConfig) (*Permissions, error) { + if len(config.hostKeys) == 0 { + return nil, errors.New("ssh: server has no host keys") + } + + if !config.NoClientAuth && config.PasswordCallback == nil && config.PublicKeyCallback == nil && config.KeyboardInteractiveCallback == nil { + return nil, errors.New("ssh: no authentication methods configured but NoClientAuth is also false") + } + + if config.ServerVersion != "" { + s.serverVersion = []byte(config.ServerVersion) + } else { + s.serverVersion = []byte(packageVersion) + } + var err error + s.clientVersion, err = exchangeVersions(s.sshConn.conn, s.serverVersion) + if err != nil { + return nil, err + } + + tr := newTransport(s.sshConn.conn, config.Rand, false /* not client */) + s.transport = newServerTransport(tr, s.clientVersion, s.serverVersion, config) + + if err := s.transport.waitSession(); err != nil { + return nil, err + } + + // We just did the key change, so the session ID is established. + s.sessionID = s.transport.getSessionID() + + var packet []byte + if packet, err = s.transport.readPacket(); err != nil { + return nil, err + } + + var serviceRequest serviceRequestMsg + if err = Unmarshal(packet, &serviceRequest); err != nil { + return nil, err + } + if serviceRequest.Service != serviceUserAuth { + return nil, errors.New("ssh: requested service '" + serviceRequest.Service + "' before authenticating") + } + serviceAccept := serviceAcceptMsg{ + Service: serviceUserAuth, + } + if err := s.transport.writePacket(Marshal(&serviceAccept)); err != nil { + return nil, err + } + + perms, err := s.serverAuthenticate(config) + if err != nil { + return nil, err + } + s.mux = newMux(s.transport) + return perms, err +} + +func isAcceptableAlgo(algo string) bool { + switch algo { + case KeyAlgoRSA, KeyAlgoDSA, KeyAlgoECDSA256, KeyAlgoECDSA384, KeyAlgoECDSA521, KeyAlgoED25519, + CertAlgoRSAv01, CertAlgoDSAv01, CertAlgoECDSA256v01, CertAlgoECDSA384v01, CertAlgoECDSA521v01: + return true + } + return false +} + +func checkSourceAddress(addr net.Addr, sourceAddrs string) error { + if addr == nil { + return errors.New("ssh: no address known for client, but source-address match required") + } + + tcpAddr, ok := addr.(*net.TCPAddr) + if !ok { + return fmt.Errorf("ssh: remote address %v is not an TCP address when checking source-address match", addr) + } + + for _, sourceAddr := range strings.Split(sourceAddrs, ",") { + if allowedIP := net.ParseIP(sourceAddr); allowedIP != nil { + if allowedIP.Equal(tcpAddr.IP) { + return nil + } + } else { + _, ipNet, err := net.ParseCIDR(sourceAddr) + if err != nil { + return fmt.Errorf("ssh: error parsing source-address restriction %q: %v", sourceAddr, err) + } + + if ipNet.Contains(tcpAddr.IP) { + return nil + } + } + } + + return fmt.Errorf("ssh: remote address %v is not allowed because of source-address restriction", addr) +} + +func (s *connection) serverAuthenticate(config *ServerConfig) (*Permissions, error) { + sessionID := s.transport.getSessionID() + var cache pubKeyCache + var perms *Permissions + +userAuthLoop: + for { + var userAuthReq userAuthRequestMsg + if packet, err := s.transport.readPacket(); err != nil { + return nil, err + } else if err = Unmarshal(packet, &userAuthReq); err != nil { + return nil, err + } + + if userAuthReq.Service != serviceSSH { + return nil, errors.New("ssh: client attempted to negotiate for unknown service: " + userAuthReq.Service) + } + + s.user = userAuthReq.User + perms = nil + authErr := errors.New("no auth passed yet") + + switch userAuthReq.Method { + case "none": + if config.NoClientAuth { + authErr = nil + } + case "password": + if config.PasswordCallback == nil { + authErr = errors.New("ssh: password auth not configured") + break + } + payload := userAuthReq.Payload + if len(payload) < 1 || payload[0] != 0 { + return nil, parseError(msgUserAuthRequest) + } + payload = payload[1:] + password, payload, ok := parseString(payload) + if !ok || len(payload) > 0 { + return nil, parseError(msgUserAuthRequest) + } + + perms, authErr = config.PasswordCallback(s, password) + case "keyboard-interactive": + if config.KeyboardInteractiveCallback == nil { + authErr = errors.New("ssh: keyboard-interactive auth not configubred") + break + } + + prompter := &sshClientKeyboardInteractive{s} + perms, authErr = config.KeyboardInteractiveCallback(s, prompter.Challenge) + case "publickey": + if config.PublicKeyCallback == nil { + authErr = errors.New("ssh: publickey auth not configured") + break + } + payload := userAuthReq.Payload + if len(payload) < 1 { + return nil, parseError(msgUserAuthRequest) + } + isQuery := payload[0] == 0 + payload = payload[1:] + algoBytes, payload, ok := parseString(payload) + if !ok { + return nil, parseError(msgUserAuthRequest) + } + algo := string(algoBytes) + if !isAcceptableAlgo(algo) { + authErr = fmt.Errorf("ssh: algorithm %q not accepted", algo) + break + } + + pubKeyData, payload, ok := parseString(payload) + if !ok { + return nil, parseError(msgUserAuthRequest) + } + + pubKey, err := ParsePublicKey(pubKeyData) + if err != nil { + return nil, err + } + + candidate, ok := cache.get(s.user, pubKeyData) + if !ok { + candidate.user = s.user + candidate.pubKeyData = pubKeyData + candidate.perms, candidate.result = config.PublicKeyCallback(s, pubKey) + if candidate.result == nil && candidate.perms != nil && candidate.perms.CriticalOptions != nil && candidate.perms.CriticalOptions[sourceAddressCriticalOption] != "" { + candidate.result = checkSourceAddress( + s.RemoteAddr(), + candidate.perms.CriticalOptions[sourceAddressCriticalOption]) + } + cache.add(candidate) + } + + if isQuery { + // The client can query if the given public key + // would be okay. + if len(payload) > 0 { + return nil, parseError(msgUserAuthRequest) + } + + if candidate.result == nil { + okMsg := userAuthPubKeyOkMsg{ + Algo: algo, + PubKey: pubKeyData, + } + if err = s.transport.writePacket(Marshal(&okMsg)); err != nil { + return nil, err + } + continue userAuthLoop + } + authErr = candidate.result + } else { + sig, payload, ok := parseSignature(payload) + if !ok || len(payload) > 0 { + return nil, parseError(msgUserAuthRequest) + } + // Ensure the public key algo and signature algo + // are supported. Compare the private key + // algorithm name that corresponds to algo with + // sig.Format. This is usually the same, but + // for certs, the names differ. + if !isAcceptableAlgo(sig.Format) { + break + } + signedData := buildDataSignedForAuth(sessionID, userAuthReq, algoBytes, pubKeyData) + + if err := pubKey.Verify(signedData, sig); err != nil { + return nil, err + } + + authErr = candidate.result + perms = candidate.perms + } + default: + authErr = fmt.Errorf("ssh: unknown method %q", userAuthReq.Method) + } + + if config.AuthLogCallback != nil { + config.AuthLogCallback(s, userAuthReq.Method, authErr) + } + + if authErr == nil { + break userAuthLoop + } + + var failureMsg userAuthFailureMsg + if config.PasswordCallback != nil { + failureMsg.Methods = append(failureMsg.Methods, "password") + } + if config.PublicKeyCallback != nil { + failureMsg.Methods = append(failureMsg.Methods, "publickey") + } + if config.KeyboardInteractiveCallback != nil { + failureMsg.Methods = append(failureMsg.Methods, "keyboard-interactive") + } + + if len(failureMsg.Methods) == 0 { + return nil, errors.New("ssh: no authentication methods configured but NoClientAuth is also false") + } + + if err := s.transport.writePacket(Marshal(&failureMsg)); err != nil { + return nil, err + } + } + + if err := s.transport.writePacket([]byte{msgUserAuthSuccess}); err != nil { + return nil, err + } + return perms, nil +} + +// sshClientKeyboardInteractive implements a ClientKeyboardInteractive by +// asking the client on the other side of a ServerConn. +type sshClientKeyboardInteractive struct { + *connection +} + +func (c *sshClientKeyboardInteractive) Challenge(user, instruction string, questions []string, echos []bool) (answers []string, err error) { + if len(questions) != len(echos) { + return nil, errors.New("ssh: echos and questions must have equal length") + } + + var prompts []byte + for i := range questions { + prompts = appendString(prompts, questions[i]) + prompts = appendBool(prompts, echos[i]) + } + + if err := c.transport.writePacket(Marshal(&userAuthInfoRequestMsg{ + Instruction: instruction, + NumPrompts: uint32(len(questions)), + Prompts: prompts, + })); err != nil { + return nil, err + } + + packet, err := c.transport.readPacket() + if err != nil { + return nil, err + } + if packet[0] != msgUserAuthInfoResponse { + return nil, unexpectedMessageError(msgUserAuthInfoResponse, packet[0]) + } + packet = packet[1:] + + n, packet, ok := parseUint32(packet) + if !ok || int(n) != len(questions) { + return nil, parseError(msgUserAuthInfoResponse) + } + + for i := uint32(0); i < n; i++ { + ans, rest, ok := parseString(packet) + if !ok { + return nil, parseError(msgUserAuthInfoResponse) + } + + answers = append(answers, string(ans)) + packet = rest + } + if len(packet) != 0 { + return nil, errors.New("ssh: junk at end of message") + } + + return answers, nil +} diff --git a/vendor/golang.org/x/crypto/ssh/session.go b/vendor/golang.org/x/crypto/ssh/session.go new file mode 100644 index 0000000000..17e2aa85c1 --- /dev/null +++ b/vendor/golang.org/x/crypto/ssh/session.go @@ -0,0 +1,627 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ssh + +// Session implements an interactive session described in +// "RFC 4254, section 6". + +import ( + "bytes" + "encoding/binary" + "errors" + "fmt" + "io" + "io/ioutil" + "sync" +) + +type Signal string + +// POSIX signals as listed in RFC 4254 Section 6.10. +const ( + SIGABRT Signal = "ABRT" + SIGALRM Signal = "ALRM" + SIGFPE Signal = "FPE" + SIGHUP Signal = "HUP" + SIGILL Signal = "ILL" + SIGINT Signal = "INT" + SIGKILL Signal = "KILL" + SIGPIPE Signal = "PIPE" + SIGQUIT Signal = "QUIT" + SIGSEGV Signal = "SEGV" + SIGTERM Signal = "TERM" + SIGUSR1 Signal = "USR1" + SIGUSR2 Signal = "USR2" +) + +var signals = map[Signal]int{ + SIGABRT: 6, + SIGALRM: 14, + SIGFPE: 8, + SIGHUP: 1, + SIGILL: 4, + SIGINT: 2, + SIGKILL: 9, + SIGPIPE: 13, + SIGQUIT: 3, + SIGSEGV: 11, + SIGTERM: 15, +} + +type TerminalModes map[uint8]uint32 + +// POSIX terminal mode flags as listed in RFC 4254 Section 8. +const ( + tty_OP_END = 0 + VINTR = 1 + VQUIT = 2 + VERASE = 3 + VKILL = 4 + VEOF = 5 + VEOL = 6 + VEOL2 = 7 + VSTART = 8 + VSTOP = 9 + VSUSP = 10 + VDSUSP = 11 + VREPRINT = 12 + VWERASE = 13 + VLNEXT = 14 + VFLUSH = 15 + VSWTCH = 16 + VSTATUS = 17 + VDISCARD = 18 + IGNPAR = 30 + PARMRK = 31 + INPCK = 32 + ISTRIP = 33 + INLCR = 34 + IGNCR = 35 + ICRNL = 36 + IUCLC = 37 + IXON = 38 + IXANY = 39 + IXOFF = 40 + IMAXBEL = 41 + ISIG = 50 + ICANON = 51 + XCASE = 52 + ECHO = 53 + ECHOE = 54 + ECHOK = 55 + ECHONL = 56 + NOFLSH = 57 + TOSTOP = 58 + IEXTEN = 59 + ECHOCTL = 60 + ECHOKE = 61 + PENDIN = 62 + OPOST = 70 + OLCUC = 71 + ONLCR = 72 + OCRNL = 73 + ONOCR = 74 + ONLRET = 75 + CS7 = 90 + CS8 = 91 + PARENB = 92 + PARODD = 93 + TTY_OP_ISPEED = 128 + TTY_OP_OSPEED = 129 +) + +// A Session represents a connection to a remote command or shell. +type Session struct { + // Stdin specifies the remote process's standard input. + // If Stdin is nil, the remote process reads from an empty + // bytes.Buffer. + Stdin io.Reader + + // Stdout and Stderr specify the remote process's standard + // output and error. + // + // If either is nil, Run connects the corresponding file + // descriptor to an instance of ioutil.Discard. There is a + // fixed amount of buffering that is shared for the two streams. + // If either blocks it may eventually cause the remote + // command to block. + Stdout io.Writer + Stderr io.Writer + + ch Channel // the channel backing this session + started bool // true once Start, Run or Shell is invoked. + copyFuncs []func() error + errors chan error // one send per copyFunc + + // true if pipe method is active + stdinpipe, stdoutpipe, stderrpipe bool + + // stdinPipeWriter is non-nil if StdinPipe has not been called + // and Stdin was specified by the user; it is the write end of + // a pipe connecting Session.Stdin to the stdin channel. + stdinPipeWriter io.WriteCloser + + exitStatus chan error +} + +// SendRequest sends an out-of-band channel request on the SSH channel +// underlying the session. +func (s *Session) SendRequest(name string, wantReply bool, payload []byte) (bool, error) { + return s.ch.SendRequest(name, wantReply, payload) +} + +func (s *Session) Close() error { + return s.ch.Close() +} + +// RFC 4254 Section 6.4. +type setenvRequest struct { + Name string + Value string +} + +// Setenv sets an environment variable that will be applied to any +// command executed by Shell or Run. +func (s *Session) Setenv(name, value string) error { + msg := setenvRequest{ + Name: name, + Value: value, + } + ok, err := s.ch.SendRequest("env", true, Marshal(&msg)) + if err == nil && !ok { + err = errors.New("ssh: setenv failed") + } + return err +} + +// RFC 4254 Section 6.2. +type ptyRequestMsg struct { + Term string + Columns uint32 + Rows uint32 + Width uint32 + Height uint32 + Modelist string +} + +// RequestPty requests the association of a pty with the session on the remote host. +func (s *Session) RequestPty(term string, h, w int, termmodes TerminalModes) error { + var tm []byte + for k, v := range termmodes { + kv := struct { + Key byte + Val uint32 + }{k, v} + + tm = append(tm, Marshal(&kv)...) + } + tm = append(tm, tty_OP_END) + req := ptyRequestMsg{ + Term: term, + Columns: uint32(w), + Rows: uint32(h), + Width: uint32(w * 8), + Height: uint32(h * 8), + Modelist: string(tm), + } + ok, err := s.ch.SendRequest("pty-req", true, Marshal(&req)) + if err == nil && !ok { + err = errors.New("ssh: pty-req failed") + } + return err +} + +// RFC 4254 Section 6.5. +type subsystemRequestMsg struct { + Subsystem string +} + +// RequestSubsystem requests the association of a subsystem with the session on the remote host. +// A subsystem is a predefined command that runs in the background when the ssh session is initiated +func (s *Session) RequestSubsystem(subsystem string) error { + msg := subsystemRequestMsg{ + Subsystem: subsystem, + } + ok, err := s.ch.SendRequest("subsystem", true, Marshal(&msg)) + if err == nil && !ok { + err = errors.New("ssh: subsystem request failed") + } + return err +} + +// RFC 4254 Section 6.9. +type signalMsg struct { + Signal string +} + +// Signal sends the given signal to the remote process. +// sig is one of the SIG* constants. +func (s *Session) Signal(sig Signal) error { + msg := signalMsg{ + Signal: string(sig), + } + + _, err := s.ch.SendRequest("signal", false, Marshal(&msg)) + return err +} + +// RFC 4254 Section 6.5. +type execMsg struct { + Command string +} + +// Start runs cmd on the remote host. Typically, the remote +// server passes cmd to the shell for interpretation. +// A Session only accepts one call to Run, Start or Shell. +func (s *Session) Start(cmd string) error { + if s.started { + return errors.New("ssh: session already started") + } + req := execMsg{ + Command: cmd, + } + + ok, err := s.ch.SendRequest("exec", true, Marshal(&req)) + if err == nil && !ok { + err = fmt.Errorf("ssh: command %v failed", cmd) + } + if err != nil { + return err + } + return s.start() +} + +// Run runs cmd on the remote host. Typically, the remote +// server passes cmd to the shell for interpretation. +// A Session only accepts one call to Run, Start, Shell, Output, +// or CombinedOutput. +// +// The returned error is nil if the command runs, has no problems +// copying stdin, stdout, and stderr, and exits with a zero exit +// status. +// +// If the remote server does not send an exit status, an error of type +// *ExitMissingError is returned. If the command completes +// unsuccessfully or is interrupted by a signal, the error is of type +// *ExitError. Other error types may be returned for I/O problems. +func (s *Session) Run(cmd string) error { + err := s.Start(cmd) + if err != nil { + return err + } + return s.Wait() +} + +// Output runs cmd on the remote host and returns its standard output. +func (s *Session) Output(cmd string) ([]byte, error) { + if s.Stdout != nil { + return nil, errors.New("ssh: Stdout already set") + } + var b bytes.Buffer + s.Stdout = &b + err := s.Run(cmd) + return b.Bytes(), err +} + +type singleWriter struct { + b bytes.Buffer + mu sync.Mutex +} + +func (w *singleWriter) Write(p []byte) (int, error) { + w.mu.Lock() + defer w.mu.Unlock() + return w.b.Write(p) +} + +// CombinedOutput runs cmd on the remote host and returns its combined +// standard output and standard error. +func (s *Session) CombinedOutput(cmd string) ([]byte, error) { + if s.Stdout != nil { + return nil, errors.New("ssh: Stdout already set") + } + if s.Stderr != nil { + return nil, errors.New("ssh: Stderr already set") + } + var b singleWriter + s.Stdout = &b + s.Stderr = &b + err := s.Run(cmd) + return b.b.Bytes(), err +} + +// Shell starts a login shell on the remote host. A Session only +// accepts one call to Run, Start, Shell, Output, or CombinedOutput. +func (s *Session) Shell() error { + if s.started { + return errors.New("ssh: session already started") + } + + ok, err := s.ch.SendRequest("shell", true, nil) + if err == nil && !ok { + return errors.New("ssh: could not start shell") + } + if err != nil { + return err + } + return s.start() +} + +func (s *Session) start() error { + s.started = true + + type F func(*Session) + for _, setupFd := range []F{(*Session).stdin, (*Session).stdout, (*Session).stderr} { + setupFd(s) + } + + s.errors = make(chan error, len(s.copyFuncs)) + for _, fn := range s.copyFuncs { + go func(fn func() error) { + s.errors <- fn() + }(fn) + } + return nil +} + +// Wait waits for the remote command to exit. +// +// The returned error is nil if the command runs, has no problems +// copying stdin, stdout, and stderr, and exits with a zero exit +// status. +// +// If the remote server does not send an exit status, an error of type +// *ExitMissingError is returned. If the command completes +// unsuccessfully or is interrupted by a signal, the error is of type +// *ExitError. Other error types may be returned for I/O problems. +func (s *Session) Wait() error { + if !s.started { + return errors.New("ssh: session not started") + } + waitErr := <-s.exitStatus + + if s.stdinPipeWriter != nil { + s.stdinPipeWriter.Close() + } + var copyError error + for _ = range s.copyFuncs { + if err := <-s.errors; err != nil && copyError == nil { + copyError = err + } + } + if waitErr != nil { + return waitErr + } + return copyError +} + +func (s *Session) wait(reqs <-chan *Request) error { + wm := Waitmsg{status: -1} + // Wait for msg channel to be closed before returning. + for msg := range reqs { + switch msg.Type { + case "exit-status": + wm.status = int(binary.BigEndian.Uint32(msg.Payload)) + case "exit-signal": + var sigval struct { + Signal string + CoreDumped bool + Error string + Lang string + } + if err := Unmarshal(msg.Payload, &sigval); err != nil { + return err + } + + // Must sanitize strings? + wm.signal = sigval.Signal + wm.msg = sigval.Error + wm.lang = sigval.Lang + default: + // This handles keepalives and matches + // OpenSSH's behaviour. + if msg.WantReply { + msg.Reply(false, nil) + } + } + } + if wm.status == 0 { + return nil + } + if wm.status == -1 { + // exit-status was never sent from server + if wm.signal == "" { + // signal was not sent either. RFC 4254 + // section 6.10 recommends against this + // behavior, but it is allowed, so we let + // clients handle it. + return &ExitMissingError{} + } + wm.status = 128 + if _, ok := signals[Signal(wm.signal)]; ok { + wm.status += signals[Signal(wm.signal)] + } + } + + return &ExitError{wm} +} + +// ExitMissingError is returned if a session is torn down cleanly, but +// the server sends no confirmation of the exit status. +type ExitMissingError struct{} + +func (e *ExitMissingError) Error() string { + return "wait: remote command exited without exit status or exit signal" +} + +func (s *Session) stdin() { + if s.stdinpipe { + return + } + var stdin io.Reader + if s.Stdin == nil { + stdin = new(bytes.Buffer) + } else { + r, w := io.Pipe() + go func() { + _, err := io.Copy(w, s.Stdin) + w.CloseWithError(err) + }() + stdin, s.stdinPipeWriter = r, w + } + s.copyFuncs = append(s.copyFuncs, func() error { + _, err := io.Copy(s.ch, stdin) + if err1 := s.ch.CloseWrite(); err == nil && err1 != io.EOF { + err = err1 + } + return err + }) +} + +func (s *Session) stdout() { + if s.stdoutpipe { + return + } + if s.Stdout == nil { + s.Stdout = ioutil.Discard + } + s.copyFuncs = append(s.copyFuncs, func() error { + _, err := io.Copy(s.Stdout, s.ch) + return err + }) +} + +func (s *Session) stderr() { + if s.stderrpipe { + return + } + if s.Stderr == nil { + s.Stderr = ioutil.Discard + } + s.copyFuncs = append(s.copyFuncs, func() error { + _, err := io.Copy(s.Stderr, s.ch.Stderr()) + return err + }) +} + +// sessionStdin reroutes Close to CloseWrite. +type sessionStdin struct { + io.Writer + ch Channel +} + +func (s *sessionStdin) Close() error { + return s.ch.CloseWrite() +} + +// StdinPipe returns a pipe that will be connected to the +// remote command's standard input when the command starts. +func (s *Session) StdinPipe() (io.WriteCloser, error) { + if s.Stdin != nil { + return nil, errors.New("ssh: Stdin already set") + } + if s.started { + return nil, errors.New("ssh: StdinPipe after process started") + } + s.stdinpipe = true + return &sessionStdin{s.ch, s.ch}, nil +} + +// StdoutPipe returns a pipe that will be connected to the +// remote command's standard output when the command starts. +// There is a fixed amount of buffering that is shared between +// stdout and stderr streams. If the StdoutPipe reader is +// not serviced fast enough it may eventually cause the +// remote command to block. +func (s *Session) StdoutPipe() (io.Reader, error) { + if s.Stdout != nil { + return nil, errors.New("ssh: Stdout already set") + } + if s.started { + return nil, errors.New("ssh: StdoutPipe after process started") + } + s.stdoutpipe = true + return s.ch, nil +} + +// StderrPipe returns a pipe that will be connected to the +// remote command's standard error when the command starts. +// There is a fixed amount of buffering that is shared between +// stdout and stderr streams. If the StderrPipe reader is +// not serviced fast enough it may eventually cause the +// remote command to block. +func (s *Session) StderrPipe() (io.Reader, error) { + if s.Stderr != nil { + return nil, errors.New("ssh: Stderr already set") + } + if s.started { + return nil, errors.New("ssh: StderrPipe after process started") + } + s.stderrpipe = true + return s.ch.Stderr(), nil +} + +// newSession returns a new interactive session on the remote host. +func newSession(ch Channel, reqs <-chan *Request) (*Session, error) { + s := &Session{ + ch: ch, + } + s.exitStatus = make(chan error, 1) + go func() { + s.exitStatus <- s.wait(reqs) + }() + + return s, nil +} + +// An ExitError reports unsuccessful completion of a remote command. +type ExitError struct { + Waitmsg +} + +func (e *ExitError) Error() string { + return e.Waitmsg.String() +} + +// Waitmsg stores the information about an exited remote command +// as reported by Wait. +type Waitmsg struct { + status int + signal string + msg string + lang string +} + +// ExitStatus returns the exit status of the remote command. +func (w Waitmsg) ExitStatus() int { + return w.status +} + +// Signal returns the exit signal of the remote command if +// it was terminated violently. +func (w Waitmsg) Signal() string { + return w.signal +} + +// Msg returns the exit message given by the remote command +func (w Waitmsg) Msg() string { + return w.msg +} + +// Lang returns the language tag. See RFC 3066 +func (w Waitmsg) Lang() string { + return w.lang +} + +func (w Waitmsg) String() string { + str := fmt.Sprintf("Process exited with status %v", w.status) + if w.signal != "" { + str += fmt.Sprintf(" from signal %v", w.signal) + } + if w.msg != "" { + str += fmt.Sprintf(". Reason was: %v", w.msg) + } + return str +} diff --git a/vendor/golang.org/x/crypto/ssh/tcpip.go b/vendor/golang.org/x/crypto/ssh/tcpip.go new file mode 100644 index 0000000000..6151241ff0 --- /dev/null +++ b/vendor/golang.org/x/crypto/ssh/tcpip.go @@ -0,0 +1,407 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ssh + +import ( + "errors" + "fmt" + "io" + "math/rand" + "net" + "strconv" + "strings" + "sync" + "time" +) + +// Listen requests the remote peer open a listening socket on +// addr. Incoming connections will be available by calling Accept on +// the returned net.Listener. The listener must be serviced, or the +// SSH connection may hang. +func (c *Client) Listen(n, addr string) (net.Listener, error) { + laddr, err := net.ResolveTCPAddr(n, addr) + if err != nil { + return nil, err + } + return c.ListenTCP(laddr) +} + +// Automatic port allocation is broken with OpenSSH before 6.0. See +// also https://bugzilla.mindrot.org/show_bug.cgi?id=2017. In +// particular, OpenSSH 5.9 sends a channelOpenMsg with port number 0, +// rather than the actual port number. This means you can never open +// two different listeners with auto allocated ports. We work around +// this by trying explicit ports until we succeed. + +const openSSHPrefix = "OpenSSH_" + +var portRandomizer = rand.New(rand.NewSource(time.Now().UnixNano())) + +// isBrokenOpenSSHVersion returns true if the given version string +// specifies a version of OpenSSH that is known to have a bug in port +// forwarding. +func isBrokenOpenSSHVersion(versionStr string) bool { + i := strings.Index(versionStr, openSSHPrefix) + if i < 0 { + return false + } + i += len(openSSHPrefix) + j := i + for ; j < len(versionStr); j++ { + if versionStr[j] < '0' || versionStr[j] > '9' { + break + } + } + version, _ := strconv.Atoi(versionStr[i:j]) + return version < 6 +} + +// autoPortListenWorkaround simulates automatic port allocation by +// trying random ports repeatedly. +func (c *Client) autoPortListenWorkaround(laddr *net.TCPAddr) (net.Listener, error) { + var sshListener net.Listener + var err error + const tries = 10 + for i := 0; i < tries; i++ { + addr := *laddr + addr.Port = 1024 + portRandomizer.Intn(60000) + sshListener, err = c.ListenTCP(&addr) + if err == nil { + laddr.Port = addr.Port + return sshListener, err + } + } + return nil, fmt.Errorf("ssh: listen on random port failed after %d tries: %v", tries, err) +} + +// RFC 4254 7.1 +type channelForwardMsg struct { + addr string + rport uint32 +} + +// ListenTCP requests the remote peer open a listening socket +// on laddr. Incoming connections will be available by calling +// Accept on the returned net.Listener. +func (c *Client) ListenTCP(laddr *net.TCPAddr) (net.Listener, error) { + if laddr.Port == 0 && isBrokenOpenSSHVersion(string(c.ServerVersion())) { + return c.autoPortListenWorkaround(laddr) + } + + m := channelForwardMsg{ + laddr.IP.String(), + uint32(laddr.Port), + } + // send message + ok, resp, err := c.SendRequest("tcpip-forward", true, Marshal(&m)) + if err != nil { + return nil, err + } + if !ok { + return nil, errors.New("ssh: tcpip-forward request denied by peer") + } + + // If the original port was 0, then the remote side will + // supply a real port number in the response. + if laddr.Port == 0 { + var p struct { + Port uint32 + } + if err := Unmarshal(resp, &p); err != nil { + return nil, err + } + laddr.Port = int(p.Port) + } + + // Register this forward, using the port number we obtained. + ch := c.forwards.add(*laddr) + + return &tcpListener{laddr, c, ch}, nil +} + +// forwardList stores a mapping between remote +// forward requests and the tcpListeners. +type forwardList struct { + sync.Mutex + entries []forwardEntry +} + +// forwardEntry represents an established mapping of a laddr on a +// remote ssh server to a channel connected to a tcpListener. +type forwardEntry struct { + laddr net.TCPAddr + c chan forward +} + +// forward represents an incoming forwarded tcpip connection. The +// arguments to add/remove/lookup should be address as specified in +// the original forward-request. +type forward struct { + newCh NewChannel // the ssh client channel underlying this forward + raddr *net.TCPAddr // the raddr of the incoming connection +} + +func (l *forwardList) add(addr net.TCPAddr) chan forward { + l.Lock() + defer l.Unlock() + f := forwardEntry{ + addr, + make(chan forward, 1), + } + l.entries = append(l.entries, f) + return f.c +} + +// See RFC 4254, section 7.2 +type forwardedTCPPayload struct { + Addr string + Port uint32 + OriginAddr string + OriginPort uint32 +} + +// parseTCPAddr parses the originating address from the remote into a *net.TCPAddr. +func parseTCPAddr(addr string, port uint32) (*net.TCPAddr, error) { + if port == 0 || port > 65535 { + return nil, fmt.Errorf("ssh: port number out of range: %d", port) + } + ip := net.ParseIP(string(addr)) + if ip == nil { + return nil, fmt.Errorf("ssh: cannot parse IP address %q", addr) + } + return &net.TCPAddr{IP: ip, Port: int(port)}, nil +} + +func (l *forwardList) handleChannels(in <-chan NewChannel) { + for ch := range in { + var payload forwardedTCPPayload + if err := Unmarshal(ch.ExtraData(), &payload); err != nil { + ch.Reject(ConnectionFailed, "could not parse forwarded-tcpip payload: "+err.Error()) + continue + } + + // RFC 4254 section 7.2 specifies that incoming + // addresses should list the address, in string + // format. It is implied that this should be an IP + // address, as it would be impossible to connect to it + // otherwise. + laddr, err := parseTCPAddr(payload.Addr, payload.Port) + if err != nil { + ch.Reject(ConnectionFailed, err.Error()) + continue + } + raddr, err := parseTCPAddr(payload.OriginAddr, payload.OriginPort) + if err != nil { + ch.Reject(ConnectionFailed, err.Error()) + continue + } + + if ok := l.forward(*laddr, *raddr, ch); !ok { + // Section 7.2, implementations MUST reject spurious incoming + // connections. + ch.Reject(Prohibited, "no forward for address") + continue + } + } +} + +// remove removes the forward entry, and the channel feeding its +// listener. +func (l *forwardList) remove(addr net.TCPAddr) { + l.Lock() + defer l.Unlock() + for i, f := range l.entries { + if addr.IP.Equal(f.laddr.IP) && addr.Port == f.laddr.Port { + l.entries = append(l.entries[:i], l.entries[i+1:]...) + close(f.c) + return + } + } +} + +// closeAll closes and clears all forwards. +func (l *forwardList) closeAll() { + l.Lock() + defer l.Unlock() + for _, f := range l.entries { + close(f.c) + } + l.entries = nil +} + +func (l *forwardList) forward(laddr, raddr net.TCPAddr, ch NewChannel) bool { + l.Lock() + defer l.Unlock() + for _, f := range l.entries { + if laddr.IP.Equal(f.laddr.IP) && laddr.Port == f.laddr.Port { + f.c <- forward{ch, &raddr} + return true + } + } + return false +} + +type tcpListener struct { + laddr *net.TCPAddr + + conn *Client + in <-chan forward +} + +// Accept waits for and returns the next connection to the listener. +func (l *tcpListener) Accept() (net.Conn, error) { + s, ok := <-l.in + if !ok { + return nil, io.EOF + } + ch, incoming, err := s.newCh.Accept() + if err != nil { + return nil, err + } + go DiscardRequests(incoming) + + return &tcpChanConn{ + Channel: ch, + laddr: l.laddr, + raddr: s.raddr, + }, nil +} + +// Close closes the listener. +func (l *tcpListener) Close() error { + m := channelForwardMsg{ + l.laddr.IP.String(), + uint32(l.laddr.Port), + } + + // this also closes the listener. + l.conn.forwards.remove(*l.laddr) + ok, _, err := l.conn.SendRequest("cancel-tcpip-forward", true, Marshal(&m)) + if err == nil && !ok { + err = errors.New("ssh: cancel-tcpip-forward failed") + } + return err +} + +// Addr returns the listener's network address. +func (l *tcpListener) Addr() net.Addr { + return l.laddr +} + +// Dial initiates a connection to the addr from the remote host. +// The resulting connection has a zero LocalAddr() and RemoteAddr(). +func (c *Client) Dial(n, addr string) (net.Conn, error) { + // Parse the address into host and numeric port. + host, portString, err := net.SplitHostPort(addr) + if err != nil { + return nil, err + } + port, err := strconv.ParseUint(portString, 10, 16) + if err != nil { + return nil, err + } + // Use a zero address for local and remote address. + zeroAddr := &net.TCPAddr{ + IP: net.IPv4zero, + Port: 0, + } + ch, err := c.dial(net.IPv4zero.String(), 0, host, int(port)) + if err != nil { + return nil, err + } + return &tcpChanConn{ + Channel: ch, + laddr: zeroAddr, + raddr: zeroAddr, + }, nil +} + +// DialTCP connects to the remote address raddr on the network net, +// which must be "tcp", "tcp4", or "tcp6". If laddr is not nil, it is used +// as the local address for the connection. +func (c *Client) DialTCP(n string, laddr, raddr *net.TCPAddr) (net.Conn, error) { + if laddr == nil { + laddr = &net.TCPAddr{ + IP: net.IPv4zero, + Port: 0, + } + } + ch, err := c.dial(laddr.IP.String(), laddr.Port, raddr.IP.String(), raddr.Port) + if err != nil { + return nil, err + } + return &tcpChanConn{ + Channel: ch, + laddr: laddr, + raddr: raddr, + }, nil +} + +// RFC 4254 7.2 +type channelOpenDirectMsg struct { + raddr string + rport uint32 + laddr string + lport uint32 +} + +func (c *Client) dial(laddr string, lport int, raddr string, rport int) (Channel, error) { + msg := channelOpenDirectMsg{ + raddr: raddr, + rport: uint32(rport), + laddr: laddr, + lport: uint32(lport), + } + ch, in, err := c.OpenChannel("direct-tcpip", Marshal(&msg)) + if err != nil { + return nil, err + } + go DiscardRequests(in) + return ch, err +} + +type tcpChan struct { + Channel // the backing channel +} + +// tcpChanConn fulfills the net.Conn interface without +// the tcpChan having to hold laddr or raddr directly. +type tcpChanConn struct { + Channel + laddr, raddr net.Addr +} + +// LocalAddr returns the local network address. +func (t *tcpChanConn) LocalAddr() net.Addr { + return t.laddr +} + +// RemoteAddr returns the remote network address. +func (t *tcpChanConn) RemoteAddr() net.Addr { + return t.raddr +} + +// SetDeadline sets the read and write deadlines associated +// with the connection. +func (t *tcpChanConn) SetDeadline(deadline time.Time) error { + if err := t.SetReadDeadline(deadline); err != nil { + return err + } + return t.SetWriteDeadline(deadline) +} + +// SetReadDeadline sets the read deadline. +// A zero value for t means Read will not time out. +// After the deadline, the error from Read will implement net.Error +// with Timeout() == true. +func (t *tcpChanConn) SetReadDeadline(deadline time.Time) error { + return errors.New("ssh: tcpChan: deadline not supported") +} + +// SetWriteDeadline exists to satisfy the net.Conn interface +// but is not implemented by this type. It always returns an error. +func (t *tcpChanConn) SetWriteDeadline(deadline time.Time) error { + return errors.New("ssh: tcpChan: deadline not supported") +} diff --git a/vendor/golang.org/x/crypto/ssh/transport.go b/vendor/golang.org/x/crypto/ssh/transport.go new file mode 100644 index 0000000000..f9780e0ae7 --- /dev/null +++ b/vendor/golang.org/x/crypto/ssh/transport.go @@ -0,0 +1,375 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ssh + +import ( + "bufio" + "errors" + "io" + "log" +) + +// debugTransport if set, will print packet types as they go over the +// wire. No message decoding is done, to minimize the impact on timing. +const debugTransport = false + +const ( + gcmCipherID = "aes128-gcm@openssh.com" + aes128cbcID = "aes128-cbc" + tripledescbcID = "3des-cbc" +) + +// packetConn represents a transport that implements packet based +// operations. +type packetConn interface { + // Encrypt and send a packet of data to the remote peer. + writePacket(packet []byte) error + + // Read a packet from the connection. The read is blocking, + // i.e. if error is nil, then the returned byte slice is + // always non-empty. + readPacket() ([]byte, error) + + // Close closes the write-side of the connection. + Close() error +} + +// transport is the keyingTransport that implements the SSH packet +// protocol. +type transport struct { + reader connectionState + writer connectionState + + bufReader *bufio.Reader + bufWriter *bufio.Writer + rand io.Reader + isClient bool + io.Closer +} + +// packetCipher represents a combination of SSH encryption/MAC +// protocol. A single instance should be used for one direction only. +type packetCipher interface { + // writePacket encrypts the packet and writes it to w. The + // contents of the packet are generally scrambled. + writePacket(seqnum uint32, w io.Writer, rand io.Reader, packet []byte) error + + // readPacket reads and decrypts a packet of data. The + // returned packet may be overwritten by future calls of + // readPacket. + readPacket(seqnum uint32, r io.Reader) ([]byte, error) +} + +// connectionState represents one side (read or write) of the +// connection. This is necessary because each direction has its own +// keys, and can even have its own algorithms +type connectionState struct { + packetCipher + seqNum uint32 + dir direction + pendingKeyChange chan packetCipher +} + +// prepareKeyChange sets up key material for a keychange. The key changes in +// both directions are triggered by reading and writing a msgNewKey packet +// respectively. +func (t *transport) prepareKeyChange(algs *algorithms, kexResult *kexResult) error { + if ciph, err := newPacketCipher(t.reader.dir, algs.r, kexResult); err != nil { + return err + } else { + t.reader.pendingKeyChange <- ciph + } + + if ciph, err := newPacketCipher(t.writer.dir, algs.w, kexResult); err != nil { + return err + } else { + t.writer.pendingKeyChange <- ciph + } + + return nil +} + +func (t *transport) printPacket(p []byte, write bool) { + if len(p) == 0 { + return + } + who := "server" + if t.isClient { + who = "client" + } + what := "read" + if write { + what = "write" + } + + log.Println(what, who, p[0]) +} + +// Read and decrypt next packet. +func (t *transport) readPacket() (p []byte, err error) { + for { + p, err = t.reader.readPacket(t.bufReader) + if err != nil { + break + } + if len(p) == 0 || (p[0] != msgIgnore && p[0] != msgDebug) { + break + } + } + if debugTransport { + t.printPacket(p, false) + } + + return p, err +} + +func (s *connectionState) readPacket(r *bufio.Reader) ([]byte, error) { + packet, err := s.packetCipher.readPacket(s.seqNum, r) + s.seqNum++ + if err == nil && len(packet) == 0 { + err = errors.New("ssh: zero length packet") + } + + if len(packet) > 0 { + switch packet[0] { + case msgNewKeys: + select { + case cipher := <-s.pendingKeyChange: + s.packetCipher = cipher + default: + return nil, errors.New("ssh: got bogus newkeys message.") + } + + case msgDisconnect: + // Transform a disconnect message into an + // error. Since this is lowest level at which + // we interpret message types, doing it here + // ensures that we don't have to handle it + // elsewhere. + var msg disconnectMsg + if err := Unmarshal(packet, &msg); err != nil { + return nil, err + } + return nil, &msg + } + } + + // The packet may point to an internal buffer, so copy the + // packet out here. + fresh := make([]byte, len(packet)) + copy(fresh, packet) + + return fresh, err +} + +func (t *transport) writePacket(packet []byte) error { + if debugTransport { + t.printPacket(packet, true) + } + return t.writer.writePacket(t.bufWriter, t.rand, packet) +} + +func (s *connectionState) writePacket(w *bufio.Writer, rand io.Reader, packet []byte) error { + changeKeys := len(packet) > 0 && packet[0] == msgNewKeys + + err := s.packetCipher.writePacket(s.seqNum, w, rand, packet) + if err != nil { + return err + } + if err = w.Flush(); err != nil { + return err + } + s.seqNum++ + if changeKeys { + select { + case cipher := <-s.pendingKeyChange: + s.packetCipher = cipher + default: + panic("ssh: no key material for msgNewKeys") + } + } + return err +} + +func newTransport(rwc io.ReadWriteCloser, rand io.Reader, isClient bool) *transport { + t := &transport{ + bufReader: bufio.NewReader(rwc), + bufWriter: bufio.NewWriter(rwc), + rand: rand, + reader: connectionState{ + packetCipher: &streamPacketCipher{cipher: noneCipher{}}, + pendingKeyChange: make(chan packetCipher, 1), + }, + writer: connectionState{ + packetCipher: &streamPacketCipher{cipher: noneCipher{}}, + pendingKeyChange: make(chan packetCipher, 1), + }, + Closer: rwc, + } + t.isClient = isClient + + if isClient { + t.reader.dir = serverKeys + t.writer.dir = clientKeys + } else { + t.reader.dir = clientKeys + t.writer.dir = serverKeys + } + + return t +} + +type direction struct { + ivTag []byte + keyTag []byte + macKeyTag []byte +} + +var ( + serverKeys = direction{[]byte{'B'}, []byte{'D'}, []byte{'F'}} + clientKeys = direction{[]byte{'A'}, []byte{'C'}, []byte{'E'}} +) + +// generateKeys generates key material for IV, MAC and encryption. +func generateKeys(d direction, algs directionAlgorithms, kex *kexResult) (iv, key, macKey []byte) { + cipherMode := cipherModes[algs.Cipher] + macMode := macModes[algs.MAC] + + iv = make([]byte, cipherMode.ivSize) + key = make([]byte, cipherMode.keySize) + macKey = make([]byte, macMode.keySize) + + generateKeyMaterial(iv, d.ivTag, kex) + generateKeyMaterial(key, d.keyTag, kex) + generateKeyMaterial(macKey, d.macKeyTag, kex) + return +} + +// setupKeys sets the cipher and MAC keys from kex.K, kex.H and sessionId, as +// described in RFC 4253, section 6.4. direction should either be serverKeys +// (to setup server->client keys) or clientKeys (for client->server keys). +func newPacketCipher(d direction, algs directionAlgorithms, kex *kexResult) (packetCipher, error) { + iv, key, macKey := generateKeys(d, algs, kex) + + if algs.Cipher == gcmCipherID { + return newGCMCipher(iv, key, macKey) + } + + if algs.Cipher == aes128cbcID { + return newAESCBCCipher(iv, key, macKey, algs) + } + + if algs.Cipher == tripledescbcID { + return newTripleDESCBCCipher(iv, key, macKey, algs) + } + + c := &streamPacketCipher{ + mac: macModes[algs.MAC].new(macKey), + etm: macModes[algs.MAC].etm, + } + c.macResult = make([]byte, c.mac.Size()) + + var err error + c.cipher, err = cipherModes[algs.Cipher].createStream(key, iv) + if err != nil { + return nil, err + } + + return c, nil +} + +// generateKeyMaterial fills out with key material generated from tag, K, H +// and sessionId, as specified in RFC 4253, section 7.2. +func generateKeyMaterial(out, tag []byte, r *kexResult) { + var digestsSoFar []byte + + h := r.Hash.New() + for len(out) > 0 { + h.Reset() + h.Write(r.K) + h.Write(r.H) + + if len(digestsSoFar) == 0 { + h.Write(tag) + h.Write(r.SessionID) + } else { + h.Write(digestsSoFar) + } + + digest := h.Sum(nil) + n := copy(out, digest) + out = out[n:] + if len(out) > 0 { + digestsSoFar = append(digestsSoFar, digest...) + } + } +} + +const packageVersion = "SSH-2.0-Go" + +// Sends and receives a version line. The versionLine string should +// be US ASCII, start with "SSH-2.0-", and should not include a +// newline. exchangeVersions returns the other side's version line. +func exchangeVersions(rw io.ReadWriter, versionLine []byte) (them []byte, err error) { + // Contrary to the RFC, we do not ignore lines that don't + // start with "SSH-2.0-" to make the library usable with + // nonconforming servers. + for _, c := range versionLine { + // The spec disallows non US-ASCII chars, and + // specifically forbids null chars. + if c < 32 { + return nil, errors.New("ssh: junk character in version line") + } + } + if _, err = rw.Write(append(versionLine, '\r', '\n')); err != nil { + return + } + + them, err = readVersion(rw) + return them, err +} + +// maxVersionStringBytes is the maximum number of bytes that we'll +// accept as a version string. RFC 4253 section 4.2 limits this at 255 +// chars +const maxVersionStringBytes = 255 + +// Read version string as specified by RFC 4253, section 4.2. +func readVersion(r io.Reader) ([]byte, error) { + versionString := make([]byte, 0, 64) + var ok bool + var buf [1]byte + + for len(versionString) < maxVersionStringBytes { + _, err := io.ReadFull(r, buf[:]) + if err != nil { + return nil, err + } + // The RFC says that the version should be terminated with \r\n + // but several SSH servers actually only send a \n. + if buf[0] == '\n' { + ok = true + break + } + + // non ASCII chars are disallowed, but we are lenient, + // since Go doesn't use null-terminated strings. + + // The RFC allows a comment after a space, however, + // all of it (version and comments) goes into the + // session hash. + versionString = append(versionString, buf[0]) + } + + if !ok { + return nil, errors.New("ssh: overflow reading version string") + } + + // There might be a '\r' on the end which we should remove. + if len(versionString) > 0 && versionString[len(versionString)-1] == '\r' { + versionString = versionString[:len(versionString)-1] + } + return versionString, nil +} diff --git a/vendor/golang.org/x/net/LICENSE b/vendor/golang.org/x/net/LICENSE new file mode 100644 index 0000000000..6a66aea5ea --- /dev/null +++ b/vendor/golang.org/x/net/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2009 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/golang.org/x/net/PATENTS b/vendor/golang.org/x/net/PATENTS new file mode 100644 index 0000000000..733099041f --- /dev/null +++ b/vendor/golang.org/x/net/PATENTS @@ -0,0 +1,22 @@ +Additional IP Rights Grant (Patents) + +"This implementation" means the copyrightable works distributed by +Google as part of the Go project. + +Google hereby grants to You a perpetual, worldwide, non-exclusive, +no-charge, royalty-free, irrevocable (except as stated in this section) +patent license to make, have made, use, offer to sell, sell, import, +transfer and otherwise run, modify and propagate the contents of this +implementation of Go, where such license applies only to those patent +claims, both currently owned or controlled by Google and acquired in +the future, licensable by Google that are necessarily infringed by this +implementation of Go. This grant does not include claims that would be +infringed only as a consequence of further modification of this +implementation. If you or your agent or exclusive licensee institute or +order or agree to the institution of patent litigation against any +entity (including a cross-claim or counterclaim in a lawsuit) alleging +that this implementation of Go or any code incorporated within this +implementation of Go constitutes direct or contributory patent +infringement, or inducement of patent infringement, then any patent +rights granted to you under this License for this implementation of Go +shall terminate as of the date such litigation is filed. diff --git a/vendor/golang.org/x/net/websocket/client.go b/vendor/golang.org/x/net/websocket/client.go new file mode 100644 index 0000000000..69a4ac7eef --- /dev/null +++ b/vendor/golang.org/x/net/websocket/client.go @@ -0,0 +1,106 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package websocket + +import ( + "bufio" + "io" + "net" + "net/http" + "net/url" +) + +// DialError is an error that occurs while dialling a websocket server. +type DialError struct { + *Config + Err error +} + +func (e *DialError) Error() string { + return "websocket.Dial " + e.Config.Location.String() + ": " + e.Err.Error() +} + +// NewConfig creates a new WebSocket config for client connection. +func NewConfig(server, origin string) (config *Config, err error) { + config = new(Config) + config.Version = ProtocolVersionHybi13 + config.Location, err = url.ParseRequestURI(server) + if err != nil { + return + } + config.Origin, err = url.ParseRequestURI(origin) + if err != nil { + return + } + config.Header = http.Header(make(map[string][]string)) + return +} + +// NewClient creates a new WebSocket client connection over rwc. +func NewClient(config *Config, rwc io.ReadWriteCloser) (ws *Conn, err error) { + br := bufio.NewReader(rwc) + bw := bufio.NewWriter(rwc) + err = hybiClientHandshake(config, br, bw) + if err != nil { + return + } + buf := bufio.NewReadWriter(br, bw) + ws = newHybiClientConn(config, buf, rwc) + return +} + +// Dial opens a new client connection to a WebSocket. +func Dial(url_, protocol, origin string) (ws *Conn, err error) { + config, err := NewConfig(url_, origin) + if err != nil { + return nil, err + } + if protocol != "" { + config.Protocol = []string{protocol} + } + return DialConfig(config) +} + +var portMap = map[string]string{ + "ws": "80", + "wss": "443", +} + +func parseAuthority(location *url.URL) string { + if _, ok := portMap[location.Scheme]; ok { + if _, _, err := net.SplitHostPort(location.Host); err != nil { + return net.JoinHostPort(location.Host, portMap[location.Scheme]) + } + } + return location.Host +} + +// DialConfig opens a new client connection to a WebSocket with a config. +func DialConfig(config *Config) (ws *Conn, err error) { + var client net.Conn + if config.Location == nil { + return nil, &DialError{config, ErrBadWebSocketLocation} + } + if config.Origin == nil { + return nil, &DialError{config, ErrBadWebSocketOrigin} + } + dialer := config.Dialer + if dialer == nil { + dialer = &net.Dialer{} + } + client, err = dialWithDialer(dialer, config) + if err != nil { + goto Error + } + ws, err = NewClient(config, client) + if err != nil { + client.Close() + goto Error + } + return + +Error: + return nil, &DialError{config, err} +} diff --git a/vendor/golang.org/x/net/websocket/dial.go b/vendor/golang.org/x/net/websocket/dial.go new file mode 100644 index 0000000000..2dab943a48 --- /dev/null +++ b/vendor/golang.org/x/net/websocket/dial.go @@ -0,0 +1,24 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package websocket + +import ( + "crypto/tls" + "net" +) + +func dialWithDialer(dialer *net.Dialer, config *Config) (conn net.Conn, err error) { + switch config.Location.Scheme { + case "ws": + conn, err = dialer.Dial("tcp", parseAuthority(config.Location)) + + case "wss": + conn, err = tls.DialWithDialer(dialer, "tcp", parseAuthority(config.Location), config.TlsConfig) + + default: + err = ErrBadScheme + } + return +} diff --git a/vendor/golang.org/x/net/websocket/hybi.go b/vendor/golang.org/x/net/websocket/hybi.go new file mode 100644 index 0000000000..8cffdd16c9 --- /dev/null +++ b/vendor/golang.org/x/net/websocket/hybi.go @@ -0,0 +1,583 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package websocket + +// This file implements a protocol of hybi draft. +// http://tools.ietf.org/html/draft-ietf-hybi-thewebsocketprotocol-17 + +import ( + "bufio" + "bytes" + "crypto/rand" + "crypto/sha1" + "encoding/base64" + "encoding/binary" + "fmt" + "io" + "io/ioutil" + "net/http" + "net/url" + "strings" +) + +const ( + websocketGUID = "258EAFA5-E914-47DA-95CA-C5AB0DC85B11" + + closeStatusNormal = 1000 + closeStatusGoingAway = 1001 + closeStatusProtocolError = 1002 + closeStatusUnsupportedData = 1003 + closeStatusFrameTooLarge = 1004 + closeStatusNoStatusRcvd = 1005 + closeStatusAbnormalClosure = 1006 + closeStatusBadMessageData = 1007 + closeStatusPolicyViolation = 1008 + closeStatusTooBigData = 1009 + closeStatusExtensionMismatch = 1010 + + maxControlFramePayloadLength = 125 +) + +var ( + ErrBadMaskingKey = &ProtocolError{"bad masking key"} + ErrBadPongMessage = &ProtocolError{"bad pong message"} + ErrBadClosingStatus = &ProtocolError{"bad closing status"} + ErrUnsupportedExtensions = &ProtocolError{"unsupported extensions"} + ErrNotImplemented = &ProtocolError{"not implemented"} + + handshakeHeader = map[string]bool{ + "Host": true, + "Upgrade": true, + "Connection": true, + "Sec-Websocket-Key": true, + "Sec-Websocket-Origin": true, + "Sec-Websocket-Version": true, + "Sec-Websocket-Protocol": true, + "Sec-Websocket-Accept": true, + } +) + +// A hybiFrameHeader is a frame header as defined in hybi draft. +type hybiFrameHeader struct { + Fin bool + Rsv [3]bool + OpCode byte + Length int64 + MaskingKey []byte + + data *bytes.Buffer +} + +// A hybiFrameReader is a reader for hybi frame. +type hybiFrameReader struct { + reader io.Reader + + header hybiFrameHeader + pos int64 + length int +} + +func (frame *hybiFrameReader) Read(msg []byte) (n int, err error) { + n, err = frame.reader.Read(msg) + if frame.header.MaskingKey != nil { + for i := 0; i < n; i++ { + msg[i] = msg[i] ^ frame.header.MaskingKey[frame.pos%4] + frame.pos++ + } + } + return n, err +} + +func (frame *hybiFrameReader) PayloadType() byte { return frame.header.OpCode } + +func (frame *hybiFrameReader) HeaderReader() io.Reader { + if frame.header.data == nil { + return nil + } + if frame.header.data.Len() == 0 { + return nil + } + return frame.header.data +} + +func (frame *hybiFrameReader) TrailerReader() io.Reader { return nil } + +func (frame *hybiFrameReader) Len() (n int) { return frame.length } + +// A hybiFrameReaderFactory creates new frame reader based on its frame type. +type hybiFrameReaderFactory struct { + *bufio.Reader +} + +// NewFrameReader reads a frame header from the connection, and creates new reader for the frame. +// See Section 5.2 Base Framing protocol for detail. +// http://tools.ietf.org/html/draft-ietf-hybi-thewebsocketprotocol-17#section-5.2 +func (buf hybiFrameReaderFactory) NewFrameReader() (frame frameReader, err error) { + hybiFrame := new(hybiFrameReader) + frame = hybiFrame + var header []byte + var b byte + // First byte. FIN/RSV1/RSV2/RSV3/OpCode(4bits) + b, err = buf.ReadByte() + if err != nil { + return + } + header = append(header, b) + hybiFrame.header.Fin = ((header[0] >> 7) & 1) != 0 + for i := 0; i < 3; i++ { + j := uint(6 - i) + hybiFrame.header.Rsv[i] = ((header[0] >> j) & 1) != 0 + } + hybiFrame.header.OpCode = header[0] & 0x0f + + // Second byte. Mask/Payload len(7bits) + b, err = buf.ReadByte() + if err != nil { + return + } + header = append(header, b) + mask := (b & 0x80) != 0 + b &= 0x7f + lengthFields := 0 + switch { + case b <= 125: // Payload length 7bits. + hybiFrame.header.Length = int64(b) + case b == 126: // Payload length 7+16bits + lengthFields = 2 + case b == 127: // Payload length 7+64bits + lengthFields = 8 + } + for i := 0; i < lengthFields; i++ { + b, err = buf.ReadByte() + if err != nil { + return + } + if lengthFields == 8 && i == 0 { // MSB must be zero when 7+64 bits + b &= 0x7f + } + header = append(header, b) + hybiFrame.header.Length = hybiFrame.header.Length*256 + int64(b) + } + if mask { + // Masking key. 4 bytes. + for i := 0; i < 4; i++ { + b, err = buf.ReadByte() + if err != nil { + return + } + header = append(header, b) + hybiFrame.header.MaskingKey = append(hybiFrame.header.MaskingKey, b) + } + } + hybiFrame.reader = io.LimitReader(buf.Reader, hybiFrame.header.Length) + hybiFrame.header.data = bytes.NewBuffer(header) + hybiFrame.length = len(header) + int(hybiFrame.header.Length) + return +} + +// A HybiFrameWriter is a writer for hybi frame. +type hybiFrameWriter struct { + writer *bufio.Writer + + header *hybiFrameHeader +} + +func (frame *hybiFrameWriter) Write(msg []byte) (n int, err error) { + var header []byte + var b byte + if frame.header.Fin { + b |= 0x80 + } + for i := 0; i < 3; i++ { + if frame.header.Rsv[i] { + j := uint(6 - i) + b |= 1 << j + } + } + b |= frame.header.OpCode + header = append(header, b) + if frame.header.MaskingKey != nil { + b = 0x80 + } else { + b = 0 + } + lengthFields := 0 + length := len(msg) + switch { + case length <= 125: + b |= byte(length) + case length < 65536: + b |= 126 + lengthFields = 2 + default: + b |= 127 + lengthFields = 8 + } + header = append(header, b) + for i := 0; i < lengthFields; i++ { + j := uint((lengthFields - i - 1) * 8) + b = byte((length >> j) & 0xff) + header = append(header, b) + } + if frame.header.MaskingKey != nil { + if len(frame.header.MaskingKey) != 4 { + return 0, ErrBadMaskingKey + } + header = append(header, frame.header.MaskingKey...) + frame.writer.Write(header) + data := make([]byte, length) + for i := range data { + data[i] = msg[i] ^ frame.header.MaskingKey[i%4] + } + frame.writer.Write(data) + err = frame.writer.Flush() + return length, err + } + frame.writer.Write(header) + frame.writer.Write(msg) + err = frame.writer.Flush() + return length, err +} + +func (frame *hybiFrameWriter) Close() error { return nil } + +type hybiFrameWriterFactory struct { + *bufio.Writer + needMaskingKey bool +} + +func (buf hybiFrameWriterFactory) NewFrameWriter(payloadType byte) (frame frameWriter, err error) { + frameHeader := &hybiFrameHeader{Fin: true, OpCode: payloadType} + if buf.needMaskingKey { + frameHeader.MaskingKey, err = generateMaskingKey() + if err != nil { + return nil, err + } + } + return &hybiFrameWriter{writer: buf.Writer, header: frameHeader}, nil +} + +type hybiFrameHandler struct { + conn *Conn + payloadType byte +} + +func (handler *hybiFrameHandler) HandleFrame(frame frameReader) (frameReader, error) { + if handler.conn.IsServerConn() { + // The client MUST mask all frames sent to the server. + if frame.(*hybiFrameReader).header.MaskingKey == nil { + handler.WriteClose(closeStatusProtocolError) + return nil, io.EOF + } + } else { + // The server MUST NOT mask all frames. + if frame.(*hybiFrameReader).header.MaskingKey != nil { + handler.WriteClose(closeStatusProtocolError) + return nil, io.EOF + } + } + if header := frame.HeaderReader(); header != nil { + io.Copy(ioutil.Discard, header) + } + switch frame.PayloadType() { + case ContinuationFrame: + frame.(*hybiFrameReader).header.OpCode = handler.payloadType + case TextFrame, BinaryFrame: + handler.payloadType = frame.PayloadType() + case CloseFrame: + return nil, io.EOF + case PingFrame, PongFrame: + b := make([]byte, maxControlFramePayloadLength) + n, err := io.ReadFull(frame, b) + if err != nil && err != io.EOF && err != io.ErrUnexpectedEOF { + return nil, err + } + io.Copy(ioutil.Discard, frame) + if frame.PayloadType() == PingFrame { + if _, err := handler.WritePong(b[:n]); err != nil { + return nil, err + } + } + return nil, nil + } + return frame, nil +} + +func (handler *hybiFrameHandler) WriteClose(status int) (err error) { + handler.conn.wio.Lock() + defer handler.conn.wio.Unlock() + w, err := handler.conn.frameWriterFactory.NewFrameWriter(CloseFrame) + if err != nil { + return err + } + msg := make([]byte, 2) + binary.BigEndian.PutUint16(msg, uint16(status)) + _, err = w.Write(msg) + w.Close() + return err +} + +func (handler *hybiFrameHandler) WritePong(msg []byte) (n int, err error) { + handler.conn.wio.Lock() + defer handler.conn.wio.Unlock() + w, err := handler.conn.frameWriterFactory.NewFrameWriter(PongFrame) + if err != nil { + return 0, err + } + n, err = w.Write(msg) + w.Close() + return n, err +} + +// newHybiConn creates a new WebSocket connection speaking hybi draft protocol. +func newHybiConn(config *Config, buf *bufio.ReadWriter, rwc io.ReadWriteCloser, request *http.Request) *Conn { + if buf == nil { + br := bufio.NewReader(rwc) + bw := bufio.NewWriter(rwc) + buf = bufio.NewReadWriter(br, bw) + } + ws := &Conn{config: config, request: request, buf: buf, rwc: rwc, + frameReaderFactory: hybiFrameReaderFactory{buf.Reader}, + frameWriterFactory: hybiFrameWriterFactory{ + buf.Writer, request == nil}, + PayloadType: TextFrame, + defaultCloseStatus: closeStatusNormal} + ws.frameHandler = &hybiFrameHandler{conn: ws} + return ws +} + +// generateMaskingKey generates a masking key for a frame. +func generateMaskingKey() (maskingKey []byte, err error) { + maskingKey = make([]byte, 4) + if _, err = io.ReadFull(rand.Reader, maskingKey); err != nil { + return + } + return +} + +// generateNonce generates a nonce consisting of a randomly selected 16-byte +// value that has been base64-encoded. +func generateNonce() (nonce []byte) { + key := make([]byte, 16) + if _, err := io.ReadFull(rand.Reader, key); err != nil { + panic(err) + } + nonce = make([]byte, 24) + base64.StdEncoding.Encode(nonce, key) + return +} + +// removeZone removes IPv6 zone identifer from host. +// E.g., "[fe80::1%en0]:8080" to "[fe80::1]:8080" +func removeZone(host string) string { + if !strings.HasPrefix(host, "[") { + return host + } + i := strings.LastIndex(host, "]") + if i < 0 { + return host + } + j := strings.LastIndex(host[:i], "%") + if j < 0 { + return host + } + return host[:j] + host[i:] +} + +// getNonceAccept computes the base64-encoded SHA-1 of the concatenation of +// the nonce ("Sec-WebSocket-Key" value) with the websocket GUID string. +func getNonceAccept(nonce []byte) (expected []byte, err error) { + h := sha1.New() + if _, err = h.Write(nonce); err != nil { + return + } + if _, err = h.Write([]byte(websocketGUID)); err != nil { + return + } + expected = make([]byte, 28) + base64.StdEncoding.Encode(expected, h.Sum(nil)) + return +} + +// Client handshake described in draft-ietf-hybi-thewebsocket-protocol-17 +func hybiClientHandshake(config *Config, br *bufio.Reader, bw *bufio.Writer) (err error) { + bw.WriteString("GET " + config.Location.RequestURI() + " HTTP/1.1\r\n") + + // According to RFC 6874, an HTTP client, proxy, or other + // intermediary must remove any IPv6 zone identifier attached + // to an outgoing URI. + bw.WriteString("Host: " + removeZone(config.Location.Host) + "\r\n") + bw.WriteString("Upgrade: websocket\r\n") + bw.WriteString("Connection: Upgrade\r\n") + nonce := generateNonce() + if config.handshakeData != nil { + nonce = []byte(config.handshakeData["key"]) + } + bw.WriteString("Sec-WebSocket-Key: " + string(nonce) + "\r\n") + bw.WriteString("Origin: " + strings.ToLower(config.Origin.String()) + "\r\n") + + if config.Version != ProtocolVersionHybi13 { + return ErrBadProtocolVersion + } + + bw.WriteString("Sec-WebSocket-Version: " + fmt.Sprintf("%d", config.Version) + "\r\n") + if len(config.Protocol) > 0 { + bw.WriteString("Sec-WebSocket-Protocol: " + strings.Join(config.Protocol, ", ") + "\r\n") + } + // TODO(ukai): send Sec-WebSocket-Extensions. + err = config.Header.WriteSubset(bw, handshakeHeader) + if err != nil { + return err + } + + bw.WriteString("\r\n") + if err = bw.Flush(); err != nil { + return err + } + + resp, err := http.ReadResponse(br, &http.Request{Method: "GET"}) + if err != nil { + return err + } + if resp.StatusCode != 101 { + return ErrBadStatus + } + if strings.ToLower(resp.Header.Get("Upgrade")) != "websocket" || + strings.ToLower(resp.Header.Get("Connection")) != "upgrade" { + return ErrBadUpgrade + } + expectedAccept, err := getNonceAccept(nonce) + if err != nil { + return err + } + if resp.Header.Get("Sec-WebSocket-Accept") != string(expectedAccept) { + return ErrChallengeResponse + } + if resp.Header.Get("Sec-WebSocket-Extensions") != "" { + return ErrUnsupportedExtensions + } + offeredProtocol := resp.Header.Get("Sec-WebSocket-Protocol") + if offeredProtocol != "" { + protocolMatched := false + for i := 0; i < len(config.Protocol); i++ { + if config.Protocol[i] == offeredProtocol { + protocolMatched = true + break + } + } + if !protocolMatched { + return ErrBadWebSocketProtocol + } + config.Protocol = []string{offeredProtocol} + } + + return nil +} + +// newHybiClientConn creates a client WebSocket connection after handshake. +func newHybiClientConn(config *Config, buf *bufio.ReadWriter, rwc io.ReadWriteCloser) *Conn { + return newHybiConn(config, buf, rwc, nil) +} + +// A HybiServerHandshaker performs a server handshake using hybi draft protocol. +type hybiServerHandshaker struct { + *Config + accept []byte +} + +func (c *hybiServerHandshaker) ReadHandshake(buf *bufio.Reader, req *http.Request) (code int, err error) { + c.Version = ProtocolVersionHybi13 + if req.Method != "GET" { + return http.StatusMethodNotAllowed, ErrBadRequestMethod + } + // HTTP version can be safely ignored. + + if strings.ToLower(req.Header.Get("Upgrade")) != "websocket" || + !strings.Contains(strings.ToLower(req.Header.Get("Connection")), "upgrade") { + return http.StatusBadRequest, ErrNotWebSocket + } + + key := req.Header.Get("Sec-Websocket-Key") + if key == "" { + return http.StatusBadRequest, ErrChallengeResponse + } + version := req.Header.Get("Sec-Websocket-Version") + switch version { + case "13": + c.Version = ProtocolVersionHybi13 + default: + return http.StatusBadRequest, ErrBadWebSocketVersion + } + var scheme string + if req.TLS != nil { + scheme = "wss" + } else { + scheme = "ws" + } + c.Location, err = url.ParseRequestURI(scheme + "://" + req.Host + req.URL.RequestURI()) + if err != nil { + return http.StatusBadRequest, err + } + protocol := strings.TrimSpace(req.Header.Get("Sec-Websocket-Protocol")) + if protocol != "" { + protocols := strings.Split(protocol, ",") + for i := 0; i < len(protocols); i++ { + c.Protocol = append(c.Protocol, strings.TrimSpace(protocols[i])) + } + } + c.accept, err = getNonceAccept([]byte(key)) + if err != nil { + return http.StatusInternalServerError, err + } + return http.StatusSwitchingProtocols, nil +} + +// Origin parses the Origin header in req. +// If the Origin header is not set, it returns nil and nil. +func Origin(config *Config, req *http.Request) (*url.URL, error) { + var origin string + switch config.Version { + case ProtocolVersionHybi13: + origin = req.Header.Get("Origin") + } + if origin == "" { + return nil, nil + } + return url.ParseRequestURI(origin) +} + +func (c *hybiServerHandshaker) AcceptHandshake(buf *bufio.Writer) (err error) { + if len(c.Protocol) > 0 { + if len(c.Protocol) != 1 { + // You need choose a Protocol in Handshake func in Server. + return ErrBadWebSocketProtocol + } + } + buf.WriteString("HTTP/1.1 101 Switching Protocols\r\n") + buf.WriteString("Upgrade: websocket\r\n") + buf.WriteString("Connection: Upgrade\r\n") + buf.WriteString("Sec-WebSocket-Accept: " + string(c.accept) + "\r\n") + if len(c.Protocol) > 0 { + buf.WriteString("Sec-WebSocket-Protocol: " + c.Protocol[0] + "\r\n") + } + // TODO(ukai): send Sec-WebSocket-Extensions. + if c.Header != nil { + err := c.Header.WriteSubset(buf, handshakeHeader) + if err != nil { + return err + } + } + buf.WriteString("\r\n") + return buf.Flush() +} + +func (c *hybiServerHandshaker) NewServerConn(buf *bufio.ReadWriter, rwc io.ReadWriteCloser, request *http.Request) *Conn { + return newHybiServerConn(c.Config, buf, rwc, request) +} + +// newHybiServerConn returns a new WebSocket connection speaking hybi draft protocol. +func newHybiServerConn(config *Config, buf *bufio.ReadWriter, rwc io.ReadWriteCloser, request *http.Request) *Conn { + return newHybiConn(config, buf, rwc, request) +} diff --git a/vendor/golang.org/x/net/websocket/server.go b/vendor/golang.org/x/net/websocket/server.go new file mode 100644 index 0000000000..0895dea190 --- /dev/null +++ b/vendor/golang.org/x/net/websocket/server.go @@ -0,0 +1,113 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package websocket + +import ( + "bufio" + "fmt" + "io" + "net/http" +) + +func newServerConn(rwc io.ReadWriteCloser, buf *bufio.ReadWriter, req *http.Request, config *Config, handshake func(*Config, *http.Request) error) (conn *Conn, err error) { + var hs serverHandshaker = &hybiServerHandshaker{Config: config} + code, err := hs.ReadHandshake(buf.Reader, req) + if err == ErrBadWebSocketVersion { + fmt.Fprintf(buf, "HTTP/1.1 %03d %s\r\n", code, http.StatusText(code)) + fmt.Fprintf(buf, "Sec-WebSocket-Version: %s\r\n", SupportedProtocolVersion) + buf.WriteString("\r\n") + buf.WriteString(err.Error()) + buf.Flush() + return + } + if err != nil { + fmt.Fprintf(buf, "HTTP/1.1 %03d %s\r\n", code, http.StatusText(code)) + buf.WriteString("\r\n") + buf.WriteString(err.Error()) + buf.Flush() + return + } + if handshake != nil { + err = handshake(config, req) + if err != nil { + code = http.StatusForbidden + fmt.Fprintf(buf, "HTTP/1.1 %03d %s\r\n", code, http.StatusText(code)) + buf.WriteString("\r\n") + buf.Flush() + return + } + } + err = hs.AcceptHandshake(buf.Writer) + if err != nil { + code = http.StatusBadRequest + fmt.Fprintf(buf, "HTTP/1.1 %03d %s\r\n", code, http.StatusText(code)) + buf.WriteString("\r\n") + buf.Flush() + return + } + conn = hs.NewServerConn(buf, rwc, req) + return +} + +// Server represents a server of a WebSocket. +type Server struct { + // Config is a WebSocket configuration for new WebSocket connection. + Config + + // Handshake is an optional function in WebSocket handshake. + // For example, you can check, or don't check Origin header. + // Another example, you can select config.Protocol. + Handshake func(*Config, *http.Request) error + + // Handler handles a WebSocket connection. + Handler +} + +// ServeHTTP implements the http.Handler interface for a WebSocket +func (s Server) ServeHTTP(w http.ResponseWriter, req *http.Request) { + s.serveWebSocket(w, req) +} + +func (s Server) serveWebSocket(w http.ResponseWriter, req *http.Request) { + rwc, buf, err := w.(http.Hijacker).Hijack() + if err != nil { + panic("Hijack failed: " + err.Error()) + } + // The server should abort the WebSocket connection if it finds + // the client did not send a handshake that matches with protocol + // specification. + defer rwc.Close() + conn, err := newServerConn(rwc, buf, req, &s.Config, s.Handshake) + if err != nil { + return + } + if conn == nil { + panic("unexpected nil conn") + } + s.Handler(conn) +} + +// Handler is a simple interface to a WebSocket browser client. +// It checks if Origin header is valid URL by default. +// You might want to verify websocket.Conn.Config().Origin in the func. +// If you use Server instead of Handler, you could call websocket.Origin and +// check the origin in your Handshake func. So, if you want to accept +// non-browser clients, which do not send an Origin header, set a +// Server.Handshake that does not check the origin. +type Handler func(*Conn) + +func checkOrigin(config *Config, req *http.Request) (err error) { + config.Origin, err = Origin(config, req) + if err == nil && config.Origin == nil { + return fmt.Errorf("null origin") + } + return err +} + +// ServeHTTP implements the http.Handler interface for a WebSocket +func (h Handler) ServeHTTP(w http.ResponseWriter, req *http.Request) { + s := Server{Handler: h, Handshake: checkOrigin} + s.serveWebSocket(w, req) +} diff --git a/vendor/golang.org/x/net/websocket/websocket.go b/vendor/golang.org/x/net/websocket/websocket.go new file mode 100644 index 0000000000..e242c89a7a --- /dev/null +++ b/vendor/golang.org/x/net/websocket/websocket.go @@ -0,0 +1,448 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package websocket implements a client and server for the WebSocket protocol +// as specified in RFC 6455. +// +// This package currently lacks some features found in an alternative +// and more actively maintained WebSocket package: +// +// https://godoc.org/github.com/gorilla/websocket +// +package websocket // import "golang.org/x/net/websocket" + +import ( + "bufio" + "crypto/tls" + "encoding/json" + "errors" + "io" + "io/ioutil" + "net" + "net/http" + "net/url" + "sync" + "time" +) + +const ( + ProtocolVersionHybi13 = 13 + ProtocolVersionHybi = ProtocolVersionHybi13 + SupportedProtocolVersion = "13" + + ContinuationFrame = 0 + TextFrame = 1 + BinaryFrame = 2 + CloseFrame = 8 + PingFrame = 9 + PongFrame = 10 + UnknownFrame = 255 + + DefaultMaxPayloadBytes = 32 << 20 // 32MB +) + +// ProtocolError represents WebSocket protocol errors. +type ProtocolError struct { + ErrorString string +} + +func (err *ProtocolError) Error() string { return err.ErrorString } + +var ( + ErrBadProtocolVersion = &ProtocolError{"bad protocol version"} + ErrBadScheme = &ProtocolError{"bad scheme"} + ErrBadStatus = &ProtocolError{"bad status"} + ErrBadUpgrade = &ProtocolError{"missing or bad upgrade"} + ErrBadWebSocketOrigin = &ProtocolError{"missing or bad WebSocket-Origin"} + ErrBadWebSocketLocation = &ProtocolError{"missing or bad WebSocket-Location"} + ErrBadWebSocketProtocol = &ProtocolError{"missing or bad WebSocket-Protocol"} + ErrBadWebSocketVersion = &ProtocolError{"missing or bad WebSocket Version"} + ErrChallengeResponse = &ProtocolError{"mismatch challenge/response"} + ErrBadFrame = &ProtocolError{"bad frame"} + ErrBadFrameBoundary = &ProtocolError{"not on frame boundary"} + ErrNotWebSocket = &ProtocolError{"not websocket protocol"} + ErrBadRequestMethod = &ProtocolError{"bad method"} + ErrNotSupported = &ProtocolError{"not supported"} +) + +// ErrFrameTooLarge is returned by Codec's Receive method if payload size +// exceeds limit set by Conn.MaxPayloadBytes +var ErrFrameTooLarge = errors.New("websocket: frame payload size exceeds limit") + +// Addr is an implementation of net.Addr for WebSocket. +type Addr struct { + *url.URL +} + +// Network returns the network type for a WebSocket, "websocket". +func (addr *Addr) Network() string { return "websocket" } + +// Config is a WebSocket configuration +type Config struct { + // A WebSocket server address. + Location *url.URL + + // A Websocket client origin. + Origin *url.URL + + // WebSocket subprotocols. + Protocol []string + + // WebSocket protocol version. + Version int + + // TLS config for secure WebSocket (wss). + TlsConfig *tls.Config + + // Additional header fields to be sent in WebSocket opening handshake. + Header http.Header + + // Dialer used when opening websocket connections. + Dialer *net.Dialer + + handshakeData map[string]string +} + +// serverHandshaker is an interface to handle WebSocket server side handshake. +type serverHandshaker interface { + // ReadHandshake reads handshake request message from client. + // Returns http response code and error if any. + ReadHandshake(buf *bufio.Reader, req *http.Request) (code int, err error) + + // AcceptHandshake accepts the client handshake request and sends + // handshake response back to client. + AcceptHandshake(buf *bufio.Writer) (err error) + + // NewServerConn creates a new WebSocket connection. + NewServerConn(buf *bufio.ReadWriter, rwc io.ReadWriteCloser, request *http.Request) (conn *Conn) +} + +// frameReader is an interface to read a WebSocket frame. +type frameReader interface { + // Reader is to read payload of the frame. + io.Reader + + // PayloadType returns payload type. + PayloadType() byte + + // HeaderReader returns a reader to read header of the frame. + HeaderReader() io.Reader + + // TrailerReader returns a reader to read trailer of the frame. + // If it returns nil, there is no trailer in the frame. + TrailerReader() io.Reader + + // Len returns total length of the frame, including header and trailer. + Len() int +} + +// frameReaderFactory is an interface to creates new frame reader. +type frameReaderFactory interface { + NewFrameReader() (r frameReader, err error) +} + +// frameWriter is an interface to write a WebSocket frame. +type frameWriter interface { + // Writer is to write payload of the frame. + io.WriteCloser +} + +// frameWriterFactory is an interface to create new frame writer. +type frameWriterFactory interface { + NewFrameWriter(payloadType byte) (w frameWriter, err error) +} + +type frameHandler interface { + HandleFrame(frame frameReader) (r frameReader, err error) + WriteClose(status int) (err error) +} + +// Conn represents a WebSocket connection. +// +// Multiple goroutines may invoke methods on a Conn simultaneously. +type Conn struct { + config *Config + request *http.Request + + buf *bufio.ReadWriter + rwc io.ReadWriteCloser + + rio sync.Mutex + frameReaderFactory + frameReader + + wio sync.Mutex + frameWriterFactory + + frameHandler + PayloadType byte + defaultCloseStatus int + + // MaxPayloadBytes limits the size of frame payload received over Conn + // by Codec's Receive method. If zero, DefaultMaxPayloadBytes is used. + MaxPayloadBytes int +} + +// Read implements the io.Reader interface: +// it reads data of a frame from the WebSocket connection. +// if msg is not large enough for the frame data, it fills the msg and next Read +// will read the rest of the frame data. +// it reads Text frame or Binary frame. +func (ws *Conn) Read(msg []byte) (n int, err error) { + ws.rio.Lock() + defer ws.rio.Unlock() +again: + if ws.frameReader == nil { + frame, err := ws.frameReaderFactory.NewFrameReader() + if err != nil { + return 0, err + } + ws.frameReader, err = ws.frameHandler.HandleFrame(frame) + if err != nil { + return 0, err + } + if ws.frameReader == nil { + goto again + } + } + n, err = ws.frameReader.Read(msg) + if err == io.EOF { + if trailer := ws.frameReader.TrailerReader(); trailer != nil { + io.Copy(ioutil.Discard, trailer) + } + ws.frameReader = nil + goto again + } + return n, err +} + +// Write implements the io.Writer interface: +// it writes data as a frame to the WebSocket connection. +func (ws *Conn) Write(msg []byte) (n int, err error) { + ws.wio.Lock() + defer ws.wio.Unlock() + w, err := ws.frameWriterFactory.NewFrameWriter(ws.PayloadType) + if err != nil { + return 0, err + } + n, err = w.Write(msg) + w.Close() + return n, err +} + +// Close implements the io.Closer interface. +func (ws *Conn) Close() error { + err := ws.frameHandler.WriteClose(ws.defaultCloseStatus) + err1 := ws.rwc.Close() + if err != nil { + return err + } + return err1 +} + +func (ws *Conn) IsClientConn() bool { return ws.request == nil } +func (ws *Conn) IsServerConn() bool { return ws.request != nil } + +// LocalAddr returns the WebSocket Origin for the connection for client, or +// the WebSocket location for server. +func (ws *Conn) LocalAddr() net.Addr { + if ws.IsClientConn() { + return &Addr{ws.config.Origin} + } + return &Addr{ws.config.Location} +} + +// RemoteAddr returns the WebSocket location for the connection for client, or +// the Websocket Origin for server. +func (ws *Conn) RemoteAddr() net.Addr { + if ws.IsClientConn() { + return &Addr{ws.config.Location} + } + return &Addr{ws.config.Origin} +} + +var errSetDeadline = errors.New("websocket: cannot set deadline: not using a net.Conn") + +// SetDeadline sets the connection's network read & write deadlines. +func (ws *Conn) SetDeadline(t time.Time) error { + if conn, ok := ws.rwc.(net.Conn); ok { + return conn.SetDeadline(t) + } + return errSetDeadline +} + +// SetReadDeadline sets the connection's network read deadline. +func (ws *Conn) SetReadDeadline(t time.Time) error { + if conn, ok := ws.rwc.(net.Conn); ok { + return conn.SetReadDeadline(t) + } + return errSetDeadline +} + +// SetWriteDeadline sets the connection's network write deadline. +func (ws *Conn) SetWriteDeadline(t time.Time) error { + if conn, ok := ws.rwc.(net.Conn); ok { + return conn.SetWriteDeadline(t) + } + return errSetDeadline +} + +// Config returns the WebSocket config. +func (ws *Conn) Config() *Config { return ws.config } + +// Request returns the http request upgraded to the WebSocket. +// It is nil for client side. +func (ws *Conn) Request() *http.Request { return ws.request } + +// Codec represents a symmetric pair of functions that implement a codec. +type Codec struct { + Marshal func(v interface{}) (data []byte, payloadType byte, err error) + Unmarshal func(data []byte, payloadType byte, v interface{}) (err error) +} + +// Send sends v marshaled by cd.Marshal as single frame to ws. +func (cd Codec) Send(ws *Conn, v interface{}) (err error) { + data, payloadType, err := cd.Marshal(v) + if err != nil { + return err + } + ws.wio.Lock() + defer ws.wio.Unlock() + w, err := ws.frameWriterFactory.NewFrameWriter(payloadType) + if err != nil { + return err + } + _, err = w.Write(data) + w.Close() + return err +} + +// Receive receives single frame from ws, unmarshaled by cd.Unmarshal and stores +// in v. The whole frame payload is read to an in-memory buffer; max size of +// payload is defined by ws.MaxPayloadBytes. If frame payload size exceeds +// limit, ErrFrameTooLarge is returned; in this case frame is not read off wire +// completely. The next call to Receive would read and discard leftover data of +// previous oversized frame before processing next frame. +func (cd Codec) Receive(ws *Conn, v interface{}) (err error) { + ws.rio.Lock() + defer ws.rio.Unlock() + if ws.frameReader != nil { + _, err = io.Copy(ioutil.Discard, ws.frameReader) + if err != nil { + return err + } + ws.frameReader = nil + } +again: + frame, err := ws.frameReaderFactory.NewFrameReader() + if err != nil { + return err + } + frame, err = ws.frameHandler.HandleFrame(frame) + if err != nil { + return err + } + if frame == nil { + goto again + } + maxPayloadBytes := ws.MaxPayloadBytes + if maxPayloadBytes == 0 { + maxPayloadBytes = DefaultMaxPayloadBytes + } + if hf, ok := frame.(*hybiFrameReader); ok && hf.header.Length > int64(maxPayloadBytes) { + // payload size exceeds limit, no need to call Unmarshal + // + // set frameReader to current oversized frame so that + // the next call to this function can drain leftover + // data before processing the next frame + ws.frameReader = frame + return ErrFrameTooLarge + } + payloadType := frame.PayloadType() + data, err := ioutil.ReadAll(frame) + if err != nil { + return err + } + return cd.Unmarshal(data, payloadType, v) +} + +func marshal(v interface{}) (msg []byte, payloadType byte, err error) { + switch data := v.(type) { + case string: + return []byte(data), TextFrame, nil + case []byte: + return data, BinaryFrame, nil + } + return nil, UnknownFrame, ErrNotSupported +} + +func unmarshal(msg []byte, payloadType byte, v interface{}) (err error) { + switch data := v.(type) { + case *string: + *data = string(msg) + return nil + case *[]byte: + *data = msg + return nil + } + return ErrNotSupported +} + +/* +Message is a codec to send/receive text/binary data in a frame on WebSocket connection. +To send/receive text frame, use string type. +To send/receive binary frame, use []byte type. + +Trivial usage: + + import "websocket" + + // receive text frame + var message string + websocket.Message.Receive(ws, &message) + + // send text frame + message = "hello" + websocket.Message.Send(ws, message) + + // receive binary frame + var data []byte + websocket.Message.Receive(ws, &data) + + // send binary frame + data = []byte{0, 1, 2} + websocket.Message.Send(ws, data) + +*/ +var Message = Codec{marshal, unmarshal} + +func jsonMarshal(v interface{}) (msg []byte, payloadType byte, err error) { + msg, err = json.Marshal(v) + return msg, TextFrame, err +} + +func jsonUnmarshal(msg []byte, payloadType byte, v interface{}) (err error) { + return json.Unmarshal(msg, v) +} + +/* +JSON is a codec to send/receive JSON data in a frame from a WebSocket connection. + +Trivial usage: + + import "websocket" + + type T struct { + Msg string + Count int + } + + // receive JSON type T + var data T + websocket.JSON.Receive(ws, &data) + + // send JSON type T + websocket.JSON.Send(ws, data) +*/ +var JSON = Codec{jsonMarshal, jsonUnmarshal} diff --git a/vendor/golang.org/x/text/LICENSE b/vendor/golang.org/x/text/LICENSE new file mode 100644 index 0000000000..6a66aea5ea --- /dev/null +++ b/vendor/golang.org/x/text/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2009 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/golang.org/x/text/PATENTS b/vendor/golang.org/x/text/PATENTS new file mode 100644 index 0000000000..733099041f --- /dev/null +++ b/vendor/golang.org/x/text/PATENTS @@ -0,0 +1,22 @@ +Additional IP Rights Grant (Patents) + +"This implementation" means the copyrightable works distributed by +Google as part of the Go project. + +Google hereby grants to You a perpetual, worldwide, non-exclusive, +no-charge, royalty-free, irrevocable (except as stated in this section) +patent license to make, have made, use, offer to sell, sell, import, +transfer and otherwise run, modify and propagate the contents of this +implementation of Go, where such license applies only to those patent +claims, both currently owned or controlled by Google and acquired in +the future, licensable by Google that are necessarily infringed by this +implementation of Go. This grant does not include claims that would be +infringed only as a consequence of further modification of this +implementation. If you or your agent or exclusive licensee institute or +order or agree to the institution of patent litigation against any +entity (including a cross-claim or counterclaim in a lawsuit) alleging +that this implementation of Go or any code incorporated within this +implementation of Go constitutes direct or contributory patent +infringement, or inducement of patent infringement, then any patent +rights granted to you under this License for this implementation of Go +shall terminate as of the date such litigation is filed. diff --git a/vendor/golang.org/x/text/encoding/charmap/charmap.go b/vendor/golang.org/x/text/encoding/charmap/charmap.go new file mode 100644 index 0000000000..e89ff0734f --- /dev/null +++ b/vendor/golang.org/x/text/encoding/charmap/charmap.go @@ -0,0 +1,249 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:generate go run maketables.go + +// Package charmap provides simple character encodings such as IBM Code Page 437 +// and Windows 1252. +package charmap // import "golang.org/x/text/encoding/charmap" + +import ( + "unicode/utf8" + + "golang.org/x/text/encoding" + "golang.org/x/text/encoding/internal" + "golang.org/x/text/encoding/internal/identifier" + "golang.org/x/text/transform" +) + +// These encodings vary only in the way clients should interpret them. Their +// coded character set is identical and a single implementation can be shared. +var ( + // ISO8859_6E is the ISO 8859-6E encoding. + ISO8859_6E encoding.Encoding = &iso8859_6E + + // ISO8859_6I is the ISO 8859-6I encoding. + ISO8859_6I encoding.Encoding = &iso8859_6I + + // ISO8859_8E is the ISO 8859-8E encoding. + ISO8859_8E encoding.Encoding = &iso8859_8E + + // ISO8859_8I is the ISO 8859-8I encoding. + ISO8859_8I encoding.Encoding = &iso8859_8I + + iso8859_6E = internal.Encoding{ + Encoding: ISO8859_6, + Name: "ISO-8859-6E", + MIB: identifier.ISO88596E, + } + + iso8859_6I = internal.Encoding{ + Encoding: ISO8859_6, + Name: "ISO-8859-6I", + MIB: identifier.ISO88596I, + } + + iso8859_8E = internal.Encoding{ + Encoding: ISO8859_8, + Name: "ISO-8859-8E", + MIB: identifier.ISO88598E, + } + + iso8859_8I = internal.Encoding{ + Encoding: ISO8859_8, + Name: "ISO-8859-8I", + MIB: identifier.ISO88598I, + } +) + +// All is a list of all defined encodings in this package. +var All []encoding.Encoding = listAll + +// TODO: implement these encodings, in order of importance. +// ASCII, ISO8859_1: Rather common. Close to Windows 1252. +// ISO8859_9: Close to Windows 1254. + +// utf8Enc holds a rune's UTF-8 encoding in data[:len]. +type utf8Enc struct { + len uint8 + data [3]byte +} + +// Charmap is an 8-bit character set encoding. +type Charmap struct { + // name is the encoding's name. + name string + // mib is the encoding type of this encoder. + mib identifier.MIB + // asciiSuperset states whether the encoding is a superset of ASCII. + asciiSuperset bool + // low is the lower bound of the encoded byte for a non-ASCII rune. If + // Charmap.asciiSuperset is true then this will be 0x80, otherwise 0x00. + low uint8 + // replacement is the encoded replacement character. + replacement byte + // decode is the map from encoded byte to UTF-8. + decode [256]utf8Enc + // encoding is the map from runes to encoded bytes. Each entry is a + // uint32: the high 8 bits are the encoded byte and the low 24 bits are + // the rune. The table entries are sorted by ascending rune. + encode [256]uint32 +} + +// NewDecoder implements the encoding.Encoding interface. +func (m *Charmap) NewDecoder() *encoding.Decoder { + return &encoding.Decoder{Transformer: charmapDecoder{charmap: m}} +} + +// NewEncoder implements the encoding.Encoding interface. +func (m *Charmap) NewEncoder() *encoding.Encoder { + return &encoding.Encoder{Transformer: charmapEncoder{charmap: m}} +} + +// String returns the Charmap's name. +func (m *Charmap) String() string { + return m.name +} + +// ID implements an internal interface. +func (m *Charmap) ID() (mib identifier.MIB, other string) { + return m.mib, "" +} + +// charmapDecoder implements transform.Transformer by decoding to UTF-8. +type charmapDecoder struct { + transform.NopResetter + charmap *Charmap +} + +func (m charmapDecoder) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error) { + for i, c := range src { + if m.charmap.asciiSuperset && c < utf8.RuneSelf { + if nDst >= len(dst) { + err = transform.ErrShortDst + break + } + dst[nDst] = c + nDst++ + nSrc = i + 1 + continue + } + + decode := &m.charmap.decode[c] + n := int(decode.len) + if nDst+n > len(dst) { + err = transform.ErrShortDst + break + } + // It's 15% faster to avoid calling copy for these tiny slices. + for j := 0; j < n; j++ { + dst[nDst] = decode.data[j] + nDst++ + } + nSrc = i + 1 + } + return nDst, nSrc, err +} + +// DecodeByte returns the Charmap's rune decoding of the byte b. +func (m *Charmap) DecodeByte(b byte) rune { + switch x := &m.decode[b]; x.len { + case 1: + return rune(x.data[0]) + case 2: + return rune(x.data[0]&0x1f)<<6 | rune(x.data[1]&0x3f) + default: + return rune(x.data[0]&0x0f)<<12 | rune(x.data[1]&0x3f)<<6 | rune(x.data[2]&0x3f) + } +} + +// charmapEncoder implements transform.Transformer by encoding from UTF-8. +type charmapEncoder struct { + transform.NopResetter + charmap *Charmap +} + +func (m charmapEncoder) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error) { + r, size := rune(0), 0 +loop: + for nSrc < len(src) { + if nDst >= len(dst) { + err = transform.ErrShortDst + break + } + r = rune(src[nSrc]) + + // Decode a 1-byte rune. + if r < utf8.RuneSelf { + if m.charmap.asciiSuperset { + nSrc++ + dst[nDst] = uint8(r) + nDst++ + continue + } + size = 1 + + } else { + // Decode a multi-byte rune. + r, size = utf8.DecodeRune(src[nSrc:]) + if size == 1 { + // All valid runes of size 1 (those below utf8.RuneSelf) were + // handled above. We have invalid UTF-8 or we haven't seen the + // full character yet. + if !atEOF && !utf8.FullRune(src[nSrc:]) { + err = transform.ErrShortSrc + } else { + err = internal.RepertoireError(m.charmap.replacement) + } + break + } + } + + // Binary search in [low, high) for that rune in the m.charmap.encode table. + for low, high := int(m.charmap.low), 0x100; ; { + if low >= high { + err = internal.RepertoireError(m.charmap.replacement) + break loop + } + mid := (low + high) / 2 + got := m.charmap.encode[mid] + gotRune := rune(got & (1<<24 - 1)) + if gotRune < r { + low = mid + 1 + } else if gotRune > r { + high = mid + } else { + dst[nDst] = byte(got >> 24) + nDst++ + break + } + } + nSrc += size + } + return nDst, nSrc, err +} + +// EncodeRune returns the Charmap's byte encoding of the rune r. ok is whether +// r is in the Charmap's repertoire. If not, b is set to the Charmap's +// replacement byte. This is often the ASCII substitute character '\x1a'. +func (m *Charmap) EncodeRune(r rune) (b byte, ok bool) { + if r < utf8.RuneSelf && m.asciiSuperset { + return byte(r), true + } + for low, high := int(m.low), 0x100; ; { + if low >= high { + return m.replacement, false + } + mid := (low + high) / 2 + got := m.encode[mid] + gotRune := rune(got & (1<<24 - 1)) + if gotRune < r { + low = mid + 1 + } else if gotRune > r { + high = mid + } else { + return byte(got >> 24), true + } + } +} diff --git a/vendor/golang.org/x/text/encoding/charmap/maketables.go b/vendor/golang.org/x/text/encoding/charmap/maketables.go new file mode 100644 index 0000000000..f7941701e8 --- /dev/null +++ b/vendor/golang.org/x/text/encoding/charmap/maketables.go @@ -0,0 +1,556 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build ignore + +package main + +import ( + "bufio" + "fmt" + "log" + "net/http" + "sort" + "strings" + "unicode/utf8" + + "golang.org/x/text/encoding" + "golang.org/x/text/internal/gen" +) + +const ascii = "\x00\x01\x02\x03\x04\x05\x06\x07\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f" + + "\x10\x11\x12\x13\x14\x15\x16\x17\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f" + + ` !"#$%&'()*+,-./0123456789:;<=>?` + + `@ABCDEFGHIJKLMNOPQRSTUVWXYZ[\]^_` + + "`abcdefghijklmnopqrstuvwxyz{|}~\u007f" + +var encodings = []struct { + name string + mib string + comment string + varName string + replacement byte + mapping string +}{ + { + "IBM Code Page 037", + "IBM037", + "", + "CodePage037", + 0x3f, + "http://source.icu-project.org/repos/icu/data/trunk/charset/data/ucm/glibc-IBM037-2.1.2.ucm", + }, + { + "IBM Code Page 437", + "PC8CodePage437", + "", + "CodePage437", + encoding.ASCIISub, + "http://source.icu-project.org/repos/icu/data/trunk/charset/data/ucm/glibc-IBM437-2.1.2.ucm", + }, + { + "IBM Code Page 850", + "PC850Multilingual", + "", + "CodePage850", + encoding.ASCIISub, + "http://source.icu-project.org/repos/icu/data/trunk/charset/data/ucm/glibc-IBM850-2.1.2.ucm", + }, + { + "IBM Code Page 852", + "PCp852", + "", + "CodePage852", + encoding.ASCIISub, + "http://source.icu-project.org/repos/icu/data/trunk/charset/data/ucm/glibc-IBM852-2.1.2.ucm", + }, + { + "IBM Code Page 855", + "IBM855", + "", + "CodePage855", + encoding.ASCIISub, + "http://source.icu-project.org/repos/icu/data/trunk/charset/data/ucm/glibc-IBM855-2.1.2.ucm", + }, + { + "Windows Code Page 858", // PC latin1 with Euro + "IBM00858", + "", + "CodePage858", + encoding.ASCIISub, + "http://source.icu-project.org/repos/icu/data/trunk/charset/data/ucm/windows-858-2000.ucm", + }, + { + "IBM Code Page 860", + "IBM860", + "", + "CodePage860", + encoding.ASCIISub, + "http://source.icu-project.org/repos/icu/data/trunk/charset/data/ucm/glibc-IBM860-2.1.2.ucm", + }, + { + "IBM Code Page 862", + "PC862LatinHebrew", + "", + "CodePage862", + encoding.ASCIISub, + "http://source.icu-project.org/repos/icu/data/trunk/charset/data/ucm/glibc-IBM862-2.1.2.ucm", + }, + { + "IBM Code Page 863", + "IBM863", + "", + "CodePage863", + encoding.ASCIISub, + "http://source.icu-project.org/repos/icu/data/trunk/charset/data/ucm/glibc-IBM863-2.1.2.ucm", + }, + { + "IBM Code Page 865", + "IBM865", + "", + "CodePage865", + encoding.ASCIISub, + "http://source.icu-project.org/repos/icu/data/trunk/charset/data/ucm/glibc-IBM865-2.1.2.ucm", + }, + { + "IBM Code Page 866", + "IBM866", + "", + "CodePage866", + encoding.ASCIISub, + "http://encoding.spec.whatwg.org/index-ibm866.txt", + }, + { + "IBM Code Page 1047", + "IBM1047", + "", + "CodePage1047", + 0x3f, + "http://source.icu-project.org/repos/icu/data/trunk/charset/data/ucm/glibc-IBM1047-2.1.2.ucm", + }, + { + "IBM Code Page 1140", + "IBM01140", + "", + "CodePage1140", + 0x3f, + "http://source.icu-project.org/repos/icu/data/trunk/charset/data/ucm/ibm-1140_P100-1997.ucm", + }, + { + "ISO 8859-1", + "ISOLatin1", + "", + "ISO8859_1", + encoding.ASCIISub, + "http://source.icu-project.org/repos/icu/data/trunk/charset/data/ucm/iso-8859_1-1998.ucm", + }, + { + "ISO 8859-2", + "ISOLatin2", + "", + "ISO8859_2", + encoding.ASCIISub, + "http://encoding.spec.whatwg.org/index-iso-8859-2.txt", + }, + { + "ISO 8859-3", + "ISOLatin3", + "", + "ISO8859_3", + encoding.ASCIISub, + "http://encoding.spec.whatwg.org/index-iso-8859-3.txt", + }, + { + "ISO 8859-4", + "ISOLatin4", + "", + "ISO8859_4", + encoding.ASCIISub, + "http://encoding.spec.whatwg.org/index-iso-8859-4.txt", + }, + { + "ISO 8859-5", + "ISOLatinCyrillic", + "", + "ISO8859_5", + encoding.ASCIISub, + "http://encoding.spec.whatwg.org/index-iso-8859-5.txt", + }, + { + "ISO 8859-6", + "ISOLatinArabic", + "", + "ISO8859_6,ISO8859_6E,ISO8859_6I", + encoding.ASCIISub, + "http://encoding.spec.whatwg.org/index-iso-8859-6.txt", + }, + { + "ISO 8859-7", + "ISOLatinGreek", + "", + "ISO8859_7", + encoding.ASCIISub, + "http://encoding.spec.whatwg.org/index-iso-8859-7.txt", + }, + { + "ISO 8859-8", + "ISOLatinHebrew", + "", + "ISO8859_8,ISO8859_8E,ISO8859_8I", + encoding.ASCIISub, + "http://encoding.spec.whatwg.org/index-iso-8859-8.txt", + }, + { + "ISO 8859-9", + "ISOLatin5", + "", + "ISO8859_9", + encoding.ASCIISub, + "http://source.icu-project.org/repos/icu/data/trunk/charset/data/ucm/iso-8859_9-1999.ucm", + }, + { + "ISO 8859-10", + "ISOLatin6", + "", + "ISO8859_10", + encoding.ASCIISub, + "http://encoding.spec.whatwg.org/index-iso-8859-10.txt", + }, + { + "ISO 8859-13", + "ISO885913", + "", + "ISO8859_13", + encoding.ASCIISub, + "http://encoding.spec.whatwg.org/index-iso-8859-13.txt", + }, + { + "ISO 8859-14", + "ISO885914", + "", + "ISO8859_14", + encoding.ASCIISub, + "http://encoding.spec.whatwg.org/index-iso-8859-14.txt", + }, + { + "ISO 8859-15", + "ISO885915", + "", + "ISO8859_15", + encoding.ASCIISub, + "http://encoding.spec.whatwg.org/index-iso-8859-15.txt", + }, + { + "ISO 8859-16", + "ISO885916", + "", + "ISO8859_16", + encoding.ASCIISub, + "http://encoding.spec.whatwg.org/index-iso-8859-16.txt", + }, + { + "KOI8-R", + "KOI8R", + "", + "KOI8R", + encoding.ASCIISub, + "http://encoding.spec.whatwg.org/index-koi8-r.txt", + }, + { + "KOI8-U", + "KOI8U", + "", + "KOI8U", + encoding.ASCIISub, + "http://encoding.spec.whatwg.org/index-koi8-u.txt", + }, + { + "Macintosh", + "Macintosh", + "", + "Macintosh", + encoding.ASCIISub, + "http://encoding.spec.whatwg.org/index-macintosh.txt", + }, + { + "Macintosh Cyrillic", + "MacintoshCyrillic", + "", + "MacintoshCyrillic", + encoding.ASCIISub, + "http://encoding.spec.whatwg.org/index-x-mac-cyrillic.txt", + }, + { + "Windows 874", + "Windows874", + "", + "Windows874", + encoding.ASCIISub, + "http://encoding.spec.whatwg.org/index-windows-874.txt", + }, + { + "Windows 1250", + "Windows1250", + "", + "Windows1250", + encoding.ASCIISub, + "http://encoding.spec.whatwg.org/index-windows-1250.txt", + }, + { + "Windows 1251", + "Windows1251", + "", + "Windows1251", + encoding.ASCIISub, + "http://encoding.spec.whatwg.org/index-windows-1251.txt", + }, + { + "Windows 1252", + "Windows1252", + "", + "Windows1252", + encoding.ASCIISub, + "http://encoding.spec.whatwg.org/index-windows-1252.txt", + }, + { + "Windows 1253", + "Windows1253", + "", + "Windows1253", + encoding.ASCIISub, + "http://encoding.spec.whatwg.org/index-windows-1253.txt", + }, + { + "Windows 1254", + "Windows1254", + "", + "Windows1254", + encoding.ASCIISub, + "http://encoding.spec.whatwg.org/index-windows-1254.txt", + }, + { + "Windows 1255", + "Windows1255", + "", + "Windows1255", + encoding.ASCIISub, + "http://encoding.spec.whatwg.org/index-windows-1255.txt", + }, + { + "Windows 1256", + "Windows1256", + "", + "Windows1256", + encoding.ASCIISub, + "http://encoding.spec.whatwg.org/index-windows-1256.txt", + }, + { + "Windows 1257", + "Windows1257", + "", + "Windows1257", + encoding.ASCIISub, + "http://encoding.spec.whatwg.org/index-windows-1257.txt", + }, + { + "Windows 1258", + "Windows1258", + "", + "Windows1258", + encoding.ASCIISub, + "http://encoding.spec.whatwg.org/index-windows-1258.txt", + }, + { + "X-User-Defined", + "XUserDefined", + "It is defined at http://encoding.spec.whatwg.org/#x-user-defined", + "XUserDefined", + encoding.ASCIISub, + ascii + + "\uf780\uf781\uf782\uf783\uf784\uf785\uf786\uf787" + + "\uf788\uf789\uf78a\uf78b\uf78c\uf78d\uf78e\uf78f" + + "\uf790\uf791\uf792\uf793\uf794\uf795\uf796\uf797" + + "\uf798\uf799\uf79a\uf79b\uf79c\uf79d\uf79e\uf79f" + + "\uf7a0\uf7a1\uf7a2\uf7a3\uf7a4\uf7a5\uf7a6\uf7a7" + + "\uf7a8\uf7a9\uf7aa\uf7ab\uf7ac\uf7ad\uf7ae\uf7af" + + "\uf7b0\uf7b1\uf7b2\uf7b3\uf7b4\uf7b5\uf7b6\uf7b7" + + "\uf7b8\uf7b9\uf7ba\uf7bb\uf7bc\uf7bd\uf7be\uf7bf" + + "\uf7c0\uf7c1\uf7c2\uf7c3\uf7c4\uf7c5\uf7c6\uf7c7" + + "\uf7c8\uf7c9\uf7ca\uf7cb\uf7cc\uf7cd\uf7ce\uf7cf" + + "\uf7d0\uf7d1\uf7d2\uf7d3\uf7d4\uf7d5\uf7d6\uf7d7" + + "\uf7d8\uf7d9\uf7da\uf7db\uf7dc\uf7dd\uf7de\uf7df" + + "\uf7e0\uf7e1\uf7e2\uf7e3\uf7e4\uf7e5\uf7e6\uf7e7" + + "\uf7e8\uf7e9\uf7ea\uf7eb\uf7ec\uf7ed\uf7ee\uf7ef" + + "\uf7f0\uf7f1\uf7f2\uf7f3\uf7f4\uf7f5\uf7f6\uf7f7" + + "\uf7f8\uf7f9\uf7fa\uf7fb\uf7fc\uf7fd\uf7fe\uf7ff", + }, +} + +func getWHATWG(url string) string { + res, err := http.Get(url) + if err != nil { + log.Fatalf("%q: Get: %v", url, err) + } + defer res.Body.Close() + + mapping := make([]rune, 128) + for i := range mapping { + mapping[i] = '\ufffd' + } + + scanner := bufio.NewScanner(res.Body) + for scanner.Scan() { + s := strings.TrimSpace(scanner.Text()) + if s == "" || s[0] == '#' { + continue + } + x, y := 0, 0 + if _, err := fmt.Sscanf(s, "%d\t0x%x", &x, &y); err != nil { + log.Fatalf("could not parse %q", s) + } + if x < 0 || 128 <= x { + log.Fatalf("code %d is out of range", x) + } + if 0x80 <= y && y < 0xa0 { + // We diverge from the WHATWG spec by mapping control characters + // in the range [0x80, 0xa0) to U+FFFD. + continue + } + mapping[x] = rune(y) + } + return ascii + string(mapping) +} + +func getUCM(url string) string { + res, err := http.Get(url) + if err != nil { + log.Fatalf("%q: Get: %v", url, err) + } + defer res.Body.Close() + + mapping := make([]rune, 256) + for i := range mapping { + mapping[i] = '\ufffd' + } + + charsFound := 0 + scanner := bufio.NewScanner(res.Body) + for scanner.Scan() { + s := strings.TrimSpace(scanner.Text()) + if s == "" || s[0] == '#' { + continue + } + var c byte + var r rune + if _, err := fmt.Sscanf(s, ` \x%x |0`, &r, &c); err != nil { + continue + } + mapping[c] = r + charsFound++ + } + + if charsFound < 200 { + log.Fatalf("%q: only %d characters found (wrong page format?)", url, charsFound) + } + + return string(mapping) +} + +func main() { + mibs := map[string]bool{} + all := []string{} + + w := gen.NewCodeWriter() + defer w.WriteGoFile("tables.go", "charmap") + + printf := func(s string, a ...interface{}) { fmt.Fprintf(w, s, a...) } + + printf("import (\n") + printf("\t\"golang.org/x/text/encoding\"\n") + printf("\t\"golang.org/x/text/encoding/internal/identifier\"\n") + printf(")\n\n") + for _, e := range encodings { + varNames := strings.Split(e.varName, ",") + all = append(all, varNames...) + varName := varNames[0] + switch { + case strings.HasPrefix(e.mapping, "http://encoding.spec.whatwg.org/"): + e.mapping = getWHATWG(e.mapping) + case strings.HasPrefix(e.mapping, "http://source.icu-project.org/repos/icu/data/trunk/charset/data/ucm/"): + e.mapping = getUCM(e.mapping) + } + + asciiSuperset, low := strings.HasPrefix(e.mapping, ascii), 0x00 + if asciiSuperset { + low = 0x80 + } + lvn := 1 + if strings.HasPrefix(varName, "ISO") || strings.HasPrefix(varName, "KOI") { + lvn = 3 + } + lowerVarName := strings.ToLower(varName[:lvn]) + varName[lvn:] + printf("// %s is the %s encoding.\n", varName, e.name) + if e.comment != "" { + printf("//\n// %s\n", e.comment) + } + printf("var %s *Charmap = &%s\n\nvar %s = Charmap{\nname: %q,\n", + varName, lowerVarName, lowerVarName, e.name) + if mibs[e.mib] { + log.Fatalf("MIB type %q declared multiple times.", e.mib) + } + printf("mib: identifier.%s,\n", e.mib) + printf("asciiSuperset: %t,\n", asciiSuperset) + printf("low: 0x%02x,\n", low) + printf("replacement: 0x%02x,\n", e.replacement) + + printf("decode: [256]utf8Enc{\n") + i, backMapping := 0, map[rune]byte{} + for _, c := range e.mapping { + if _, ok := backMapping[c]; !ok && c != utf8.RuneError { + backMapping[c] = byte(i) + } + var buf [8]byte + n := utf8.EncodeRune(buf[:], c) + if n > 3 { + panic(fmt.Sprintf("rune %q (%U) is too long", c, c)) + } + printf("{%d,[3]byte{0x%02x,0x%02x,0x%02x}},", n, buf[0], buf[1], buf[2]) + if i%2 == 1 { + printf("\n") + } + i++ + } + printf("},\n") + + printf("encode: [256]uint32{\n") + encode := make([]uint32, 0, 256) + for c, i := range backMapping { + encode = append(encode, uint32(i)<<24|uint32(c)) + } + sort.Sort(byRune(encode)) + for len(encode) < cap(encode) { + encode = append(encode, encode[len(encode)-1]) + } + for i, enc := range encode { + printf("0x%08x,", enc) + if i%8 == 7 { + printf("\n") + } + } + printf("},\n}\n") + + // Add an estimate of the size of a single Charmap{} struct value, which + // includes two 256 elem arrays of 4 bytes and some extra fields, which + // align to 3 uint64s on 64-bit architectures. + w.Size += 2*4*256 + 3*8 + } + // TODO: add proper line breaking. + printf("var listAll = []encoding.Encoding{\n%s,\n}\n\n", strings.Join(all, ",\n")) +} + +type byRune []uint32 + +func (b byRune) Len() int { return len(b) } +func (b byRune) Less(i, j int) bool { return b[i]&0xffffff < b[j]&0xffffff } +func (b byRune) Swap(i, j int) { b[i], b[j] = b[j], b[i] } diff --git a/vendor/golang.org/x/text/encoding/charmap/tables.go b/vendor/golang.org/x/text/encoding/charmap/tables.go new file mode 100644 index 0000000000..cf7281e9e3 --- /dev/null +++ b/vendor/golang.org/x/text/encoding/charmap/tables.go @@ -0,0 +1,7410 @@ +// Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. + +package charmap + +import ( + "golang.org/x/text/encoding" + "golang.org/x/text/encoding/internal/identifier" +) + +// CodePage037 is the IBM Code Page 037 encoding. +var CodePage037 *Charmap = &codePage037 + +var codePage037 = Charmap{ + name: "IBM Code Page 037", + mib: identifier.IBM037, + asciiSuperset: false, + low: 0x00, + replacement: 0x3f, + decode: [256]utf8Enc{ + {1, [3]byte{0x00, 0x00, 0x00}}, {1, [3]byte{0x01, 0x00, 0x00}}, + {1, [3]byte{0x02, 0x00, 0x00}}, {1, [3]byte{0x03, 0x00, 0x00}}, + {2, [3]byte{0xc2, 0x9c, 0x00}}, {1, [3]byte{0x09, 0x00, 0x00}}, + {2, [3]byte{0xc2, 0x86, 0x00}}, {1, [3]byte{0x7f, 0x00, 0x00}}, + {2, [3]byte{0xc2, 0x97, 0x00}}, {2, [3]byte{0xc2, 0x8d, 0x00}}, + {2, [3]byte{0xc2, 0x8e, 0x00}}, {1, [3]byte{0x0b, 0x00, 0x00}}, + {1, [3]byte{0x0c, 0x00, 0x00}}, {1, [3]byte{0x0d, 0x00, 0x00}}, + {1, [3]byte{0x0e, 0x00, 0x00}}, {1, [3]byte{0x0f, 0x00, 0x00}}, + {1, [3]byte{0x10, 0x00, 0x00}}, {1, [3]byte{0x11, 0x00, 0x00}}, + {1, [3]byte{0x12, 0x00, 0x00}}, {1, [3]byte{0x13, 0x00, 0x00}}, + {2, [3]byte{0xc2, 0x9d, 0x00}}, {2, [3]byte{0xc2, 0x85, 0x00}}, + {1, [3]byte{0x08, 0x00, 0x00}}, {2, [3]byte{0xc2, 0x87, 0x00}}, + {1, [3]byte{0x18, 0x00, 0x00}}, {1, [3]byte{0x19, 0x00, 0x00}}, + {2, [3]byte{0xc2, 0x92, 0x00}}, {2, [3]byte{0xc2, 0x8f, 0x00}}, + {1, [3]byte{0x1c, 0x00, 0x00}}, {1, [3]byte{0x1d, 0x00, 0x00}}, + {1, [3]byte{0x1e, 0x00, 0x00}}, {1, [3]byte{0x1f, 0x00, 0x00}}, + {2, [3]byte{0xc2, 0x80, 0x00}}, {2, [3]byte{0xc2, 0x81, 0x00}}, + {2, [3]byte{0xc2, 0x82, 0x00}}, {2, [3]byte{0xc2, 0x83, 0x00}}, + {2, [3]byte{0xc2, 0x84, 0x00}}, {1, [3]byte{0x0a, 0x00, 0x00}}, + {1, [3]byte{0x17, 0x00, 0x00}}, {1, [3]byte{0x1b, 0x00, 0x00}}, + {2, [3]byte{0xc2, 0x88, 0x00}}, {2, [3]byte{0xc2, 0x89, 0x00}}, + {2, [3]byte{0xc2, 0x8a, 0x00}}, {2, [3]byte{0xc2, 0x8b, 0x00}}, + {2, [3]byte{0xc2, 0x8c, 0x00}}, {1, [3]byte{0x05, 0x00, 0x00}}, + {1, [3]byte{0x06, 0x00, 0x00}}, {1, [3]byte{0x07, 0x00, 0x00}}, + {2, [3]byte{0xc2, 0x90, 0x00}}, {2, [3]byte{0xc2, 0x91, 0x00}}, + {1, [3]byte{0x16, 0x00, 0x00}}, {2, [3]byte{0xc2, 0x93, 0x00}}, + {2, [3]byte{0xc2, 0x94, 0x00}}, {2, [3]byte{0xc2, 0x95, 0x00}}, + {2, [3]byte{0xc2, 0x96, 0x00}}, {1, [3]byte{0x04, 0x00, 0x00}}, + {2, [3]byte{0xc2, 0x98, 0x00}}, {2, [3]byte{0xc2, 0x99, 0x00}}, + {2, [3]byte{0xc2, 0x9a, 0x00}}, {2, [3]byte{0xc2, 0x9b, 0x00}}, + {1, [3]byte{0x14, 0x00, 0x00}}, {1, [3]byte{0x15, 0x00, 0x00}}, + {2, [3]byte{0xc2, 0x9e, 0x00}}, {1, [3]byte{0x1a, 0x00, 0x00}}, + {1, [3]byte{0x20, 0x00, 0x00}}, {2, [3]byte{0xc2, 0xa0, 0x00}}, + {2, [3]byte{0xc3, 0xa2, 0x00}}, {2, [3]byte{0xc3, 0xa4, 0x00}}, + {2, [3]byte{0xc3, 0xa0, 0x00}}, {2, [3]byte{0xc3, 0xa1, 0x00}}, + {2, [3]byte{0xc3, 0xa3, 0x00}}, {2, [3]byte{0xc3, 0xa5, 0x00}}, + {2, [3]byte{0xc3, 0xa7, 0x00}}, {2, [3]byte{0xc3, 0xb1, 0x00}}, + {2, [3]byte{0xc2, 0xa2, 0x00}}, {1, [3]byte{0x2e, 0x00, 0x00}}, + {1, [3]byte{0x3c, 0x00, 0x00}}, {1, [3]byte{0x28, 0x00, 0x00}}, + {1, [3]byte{0x2b, 0x00, 0x00}}, {1, [3]byte{0x7c, 0x00, 0x00}}, + {1, [3]byte{0x26, 0x00, 0x00}}, {2, [3]byte{0xc3, 0xa9, 0x00}}, + {2, [3]byte{0xc3, 0xaa, 0x00}}, {2, [3]byte{0xc3, 0xab, 0x00}}, + {2, [3]byte{0xc3, 0xa8, 0x00}}, {2, [3]byte{0xc3, 0xad, 0x00}}, + {2, [3]byte{0xc3, 0xae, 0x00}}, {2, [3]byte{0xc3, 0xaf, 0x00}}, + {2, [3]byte{0xc3, 0xac, 0x00}}, {2, [3]byte{0xc3, 0x9f, 0x00}}, + {1, [3]byte{0x21, 0x00, 0x00}}, {1, [3]byte{0x24, 0x00, 0x00}}, + {1, [3]byte{0x2a, 0x00, 0x00}}, {1, [3]byte{0x29, 0x00, 0x00}}, + {1, [3]byte{0x3b, 0x00, 0x00}}, {2, [3]byte{0xc2, 0xac, 0x00}}, + {1, [3]byte{0x2d, 0x00, 0x00}}, {1, [3]byte{0x2f, 0x00, 0x00}}, + {2, [3]byte{0xc3, 0x82, 0x00}}, {2, [3]byte{0xc3, 0x84, 0x00}}, + {2, [3]byte{0xc3, 0x80, 0x00}}, {2, [3]byte{0xc3, 0x81, 0x00}}, + {2, [3]byte{0xc3, 0x83, 0x00}}, {2, [3]byte{0xc3, 0x85, 0x00}}, + {2, [3]byte{0xc3, 0x87, 0x00}}, {2, [3]byte{0xc3, 0x91, 0x00}}, + {2, [3]byte{0xc2, 0xa6, 0x00}}, {1, [3]byte{0x2c, 0x00, 0x00}}, + {1, [3]byte{0x25, 0x00, 0x00}}, {1, [3]byte{0x5f, 0x00, 0x00}}, + {1, [3]byte{0x3e, 0x00, 0x00}}, {1, [3]byte{0x3f, 0x00, 0x00}}, + {2, [3]byte{0xc3, 0xb8, 0x00}}, {2, [3]byte{0xc3, 0x89, 0x00}}, + {2, [3]byte{0xc3, 0x8a, 0x00}}, {2, [3]byte{0xc3, 0x8b, 0x00}}, + {2, [3]byte{0xc3, 0x88, 0x00}}, {2, [3]byte{0xc3, 0x8d, 0x00}}, + {2, [3]byte{0xc3, 0x8e, 0x00}}, {2, [3]byte{0xc3, 0x8f, 0x00}}, + {2, [3]byte{0xc3, 0x8c, 0x00}}, {1, [3]byte{0x60, 0x00, 0x00}}, + {1, [3]byte{0x3a, 0x00, 0x00}}, {1, [3]byte{0x23, 0x00, 0x00}}, + {1, [3]byte{0x40, 0x00, 0x00}}, {1, [3]byte{0x27, 0x00, 0x00}}, + {1, [3]byte{0x3d, 0x00, 0x00}}, {1, [3]byte{0x22, 0x00, 0x00}}, + {2, [3]byte{0xc3, 0x98, 0x00}}, {1, [3]byte{0x61, 0x00, 0x00}}, + {1, [3]byte{0x62, 0x00, 0x00}}, {1, [3]byte{0x63, 0x00, 0x00}}, + {1, [3]byte{0x64, 0x00, 0x00}}, {1, [3]byte{0x65, 0x00, 0x00}}, + {1, [3]byte{0x66, 0x00, 0x00}}, {1, [3]byte{0x67, 0x00, 0x00}}, + {1, [3]byte{0x68, 0x00, 0x00}}, {1, [3]byte{0x69, 0x00, 0x00}}, + {2, [3]byte{0xc2, 0xab, 0x00}}, {2, [3]byte{0xc2, 0xbb, 0x00}}, + {2, [3]byte{0xc3, 0xb0, 0x00}}, {2, [3]byte{0xc3, 0xbd, 0x00}}, + {2, [3]byte{0xc3, 0xbe, 0x00}}, {2, [3]byte{0xc2, 0xb1, 0x00}}, + {2, [3]byte{0xc2, 0xb0, 0x00}}, {1, [3]byte{0x6a, 0x00, 0x00}}, + {1, [3]byte{0x6b, 0x00, 0x00}}, {1, [3]byte{0x6c, 0x00, 0x00}}, + {1, [3]byte{0x6d, 0x00, 0x00}}, {1, [3]byte{0x6e, 0x00, 0x00}}, + {1, [3]byte{0x6f, 0x00, 0x00}}, {1, [3]byte{0x70, 0x00, 0x00}}, + {1, [3]byte{0x71, 0x00, 0x00}}, {1, [3]byte{0x72, 0x00, 0x00}}, + {2, [3]byte{0xc2, 0xaa, 0x00}}, {2, [3]byte{0xc2, 0xba, 0x00}}, + {2, [3]byte{0xc3, 0xa6, 0x00}}, {2, [3]byte{0xc2, 0xb8, 0x00}}, + {2, [3]byte{0xc3, 0x86, 0x00}}, {2, [3]byte{0xc2, 0xa4, 0x00}}, + {2, [3]byte{0xc2, 0xb5, 0x00}}, {1, [3]byte{0x7e, 0x00, 0x00}}, + {1, [3]byte{0x73, 0x00, 0x00}}, {1, [3]byte{0x74, 0x00, 0x00}}, + {1, [3]byte{0x75, 0x00, 0x00}}, {1, [3]byte{0x76, 0x00, 0x00}}, + {1, [3]byte{0x77, 0x00, 0x00}}, {1, [3]byte{0x78, 0x00, 0x00}}, + {1, [3]byte{0x79, 0x00, 0x00}}, {1, [3]byte{0x7a, 0x00, 0x00}}, + {2, [3]byte{0xc2, 0xa1, 0x00}}, {2, [3]byte{0xc2, 0xbf, 0x00}}, + {2, [3]byte{0xc3, 0x90, 0x00}}, {2, [3]byte{0xc3, 0x9d, 0x00}}, + {2, [3]byte{0xc3, 0x9e, 0x00}}, {2, [3]byte{0xc2, 0xae, 0x00}}, + {1, [3]byte{0x5e, 0x00, 0x00}}, {2, [3]byte{0xc2, 0xa3, 0x00}}, + {2, [3]byte{0xc2, 0xa5, 0x00}}, {2, [3]byte{0xc2, 0xb7, 0x00}}, + {2, [3]byte{0xc2, 0xa9, 0x00}}, {2, [3]byte{0xc2, 0xa7, 0x00}}, + {2, [3]byte{0xc2, 0xb6, 0x00}}, {2, [3]byte{0xc2, 0xbc, 0x00}}, + {2, [3]byte{0xc2, 0xbd, 0x00}}, {2, [3]byte{0xc2, 0xbe, 0x00}}, + {1, [3]byte{0x5b, 0x00, 0x00}}, {1, [3]byte{0x5d, 0x00, 0x00}}, + {2, [3]byte{0xc2, 0xaf, 0x00}}, {2, [3]byte{0xc2, 0xa8, 0x00}}, + {2, [3]byte{0xc2, 0xb4, 0x00}}, {2, [3]byte{0xc3, 0x97, 0x00}}, + {1, [3]byte{0x7b, 0x00, 0x00}}, {1, [3]byte{0x41, 0x00, 0x00}}, + {1, [3]byte{0x42, 0x00, 0x00}}, {1, [3]byte{0x43, 0x00, 0x00}}, + {1, [3]byte{0x44, 0x00, 0x00}}, {1, [3]byte{0x45, 0x00, 0x00}}, + {1, [3]byte{0x46, 0x00, 0x00}}, {1, [3]byte{0x47, 0x00, 0x00}}, + {1, [3]byte{0x48, 0x00, 0x00}}, {1, [3]byte{0x49, 0x00, 0x00}}, + {2, [3]byte{0xc2, 0xad, 0x00}}, {2, [3]byte{0xc3, 0xb4, 0x00}}, + {2, [3]byte{0xc3, 0xb6, 0x00}}, {2, [3]byte{0xc3, 0xb2, 0x00}}, + {2, [3]byte{0xc3, 0xb3, 0x00}}, {2, [3]byte{0xc3, 0xb5, 0x00}}, + {1, [3]byte{0x7d, 0x00, 0x00}}, {1, [3]byte{0x4a, 0x00, 0x00}}, + {1, [3]byte{0x4b, 0x00, 0x00}}, {1, [3]byte{0x4c, 0x00, 0x00}}, + {1, [3]byte{0x4d, 0x00, 0x00}}, {1, [3]byte{0x4e, 0x00, 0x00}}, + {1, [3]byte{0x4f, 0x00, 0x00}}, {1, [3]byte{0x50, 0x00, 0x00}}, + {1, [3]byte{0x51, 0x00, 0x00}}, {1, [3]byte{0x52, 0x00, 0x00}}, + {2, [3]byte{0xc2, 0xb9, 0x00}}, {2, [3]byte{0xc3, 0xbb, 0x00}}, + {2, [3]byte{0xc3, 0xbc, 0x00}}, {2, [3]byte{0xc3, 0xb9, 0x00}}, + {2, [3]byte{0xc3, 0xba, 0x00}}, {2, [3]byte{0xc3, 0xbf, 0x00}}, + {1, [3]byte{0x5c, 0x00, 0x00}}, {2, [3]byte{0xc3, 0xb7, 0x00}}, + {1, [3]byte{0x53, 0x00, 0x00}}, {1, [3]byte{0x54, 0x00, 0x00}}, + {1, [3]byte{0x55, 0x00, 0x00}}, {1, [3]byte{0x56, 0x00, 0x00}}, + {1, [3]byte{0x57, 0x00, 0x00}}, {1, [3]byte{0x58, 0x00, 0x00}}, + {1, [3]byte{0x59, 0x00, 0x00}}, {1, [3]byte{0x5a, 0x00, 0x00}}, + {2, [3]byte{0xc2, 0xb2, 0x00}}, {2, [3]byte{0xc3, 0x94, 0x00}}, + {2, [3]byte{0xc3, 0x96, 0x00}}, {2, [3]byte{0xc3, 0x92, 0x00}}, + {2, [3]byte{0xc3, 0x93, 0x00}}, {2, [3]byte{0xc3, 0x95, 0x00}}, + {1, [3]byte{0x30, 0x00, 0x00}}, {1, [3]byte{0x31, 0x00, 0x00}}, + {1, [3]byte{0x32, 0x00, 0x00}}, {1, [3]byte{0x33, 0x00, 0x00}}, + {1, [3]byte{0x34, 0x00, 0x00}}, {1, [3]byte{0x35, 0x00, 0x00}}, + {1, [3]byte{0x36, 0x00, 0x00}}, {1, [3]byte{0x37, 0x00, 0x00}}, + {1, [3]byte{0x38, 0x00, 0x00}}, {1, [3]byte{0x39, 0x00, 0x00}}, + {2, [3]byte{0xc2, 0xb3, 0x00}}, {2, [3]byte{0xc3, 0x9b, 0x00}}, + {2, [3]byte{0xc3, 0x9c, 0x00}}, {2, [3]byte{0xc3, 0x99, 0x00}}, + {2, [3]byte{0xc3, 0x9a, 0x00}}, {2, [3]byte{0xc2, 0x9f, 0x00}}, + }, + encode: [256]uint32{ + 0x00000000, 0x01000001, 0x02000002, 0x03000003, 0x37000004, 0x2d000005, 0x2e000006, 0x2f000007, + 0x16000008, 0x05000009, 0x2500000a, 0x0b00000b, 0x0c00000c, 0x0d00000d, 0x0e00000e, 0x0f00000f, + 0x10000010, 0x11000011, 0x12000012, 0x13000013, 0x3c000014, 0x3d000015, 0x32000016, 0x26000017, + 0x18000018, 0x19000019, 0x3f00001a, 0x2700001b, 0x1c00001c, 0x1d00001d, 0x1e00001e, 0x1f00001f, + 0x40000020, 0x5a000021, 0x7f000022, 0x7b000023, 0x5b000024, 0x6c000025, 0x50000026, 0x7d000027, + 0x4d000028, 0x5d000029, 0x5c00002a, 0x4e00002b, 0x6b00002c, 0x6000002d, 0x4b00002e, 0x6100002f, + 0xf0000030, 0xf1000031, 0xf2000032, 0xf3000033, 0xf4000034, 0xf5000035, 0xf6000036, 0xf7000037, + 0xf8000038, 0xf9000039, 0x7a00003a, 0x5e00003b, 0x4c00003c, 0x7e00003d, 0x6e00003e, 0x6f00003f, + 0x7c000040, 0xc1000041, 0xc2000042, 0xc3000043, 0xc4000044, 0xc5000045, 0xc6000046, 0xc7000047, + 0xc8000048, 0xc9000049, 0xd100004a, 0xd200004b, 0xd300004c, 0xd400004d, 0xd500004e, 0xd600004f, + 0xd7000050, 0xd8000051, 0xd9000052, 0xe2000053, 0xe3000054, 0xe4000055, 0xe5000056, 0xe6000057, + 0xe7000058, 0xe8000059, 0xe900005a, 0xba00005b, 0xe000005c, 0xbb00005d, 0xb000005e, 0x6d00005f, + 0x79000060, 0x81000061, 0x82000062, 0x83000063, 0x84000064, 0x85000065, 0x86000066, 0x87000067, + 0x88000068, 0x89000069, 0x9100006a, 0x9200006b, 0x9300006c, 0x9400006d, 0x9500006e, 0x9600006f, + 0x97000070, 0x98000071, 0x99000072, 0xa2000073, 0xa3000074, 0xa4000075, 0xa5000076, 0xa6000077, + 0xa7000078, 0xa8000079, 0xa900007a, 0xc000007b, 0x4f00007c, 0xd000007d, 0xa100007e, 0x0700007f, + 0x20000080, 0x21000081, 0x22000082, 0x23000083, 0x24000084, 0x15000085, 0x06000086, 0x17000087, + 0x28000088, 0x29000089, 0x2a00008a, 0x2b00008b, 0x2c00008c, 0x0900008d, 0x0a00008e, 0x1b00008f, + 0x30000090, 0x31000091, 0x1a000092, 0x33000093, 0x34000094, 0x35000095, 0x36000096, 0x08000097, + 0x38000098, 0x39000099, 0x3a00009a, 0x3b00009b, 0x0400009c, 0x1400009d, 0x3e00009e, 0xff00009f, + 0x410000a0, 0xaa0000a1, 0x4a0000a2, 0xb10000a3, 0x9f0000a4, 0xb20000a5, 0x6a0000a6, 0xb50000a7, + 0xbd0000a8, 0xb40000a9, 0x9a0000aa, 0x8a0000ab, 0x5f0000ac, 0xca0000ad, 0xaf0000ae, 0xbc0000af, + 0x900000b0, 0x8f0000b1, 0xea0000b2, 0xfa0000b3, 0xbe0000b4, 0xa00000b5, 0xb60000b6, 0xb30000b7, + 0x9d0000b8, 0xda0000b9, 0x9b0000ba, 0x8b0000bb, 0xb70000bc, 0xb80000bd, 0xb90000be, 0xab0000bf, + 0x640000c0, 0x650000c1, 0x620000c2, 0x660000c3, 0x630000c4, 0x670000c5, 0x9e0000c6, 0x680000c7, + 0x740000c8, 0x710000c9, 0x720000ca, 0x730000cb, 0x780000cc, 0x750000cd, 0x760000ce, 0x770000cf, + 0xac0000d0, 0x690000d1, 0xed0000d2, 0xee0000d3, 0xeb0000d4, 0xef0000d5, 0xec0000d6, 0xbf0000d7, + 0x800000d8, 0xfd0000d9, 0xfe0000da, 0xfb0000db, 0xfc0000dc, 0xad0000dd, 0xae0000de, 0x590000df, + 0x440000e0, 0x450000e1, 0x420000e2, 0x460000e3, 0x430000e4, 0x470000e5, 0x9c0000e6, 0x480000e7, + 0x540000e8, 0x510000e9, 0x520000ea, 0x530000eb, 0x580000ec, 0x550000ed, 0x560000ee, 0x570000ef, + 0x8c0000f0, 0x490000f1, 0xcd0000f2, 0xce0000f3, 0xcb0000f4, 0xcf0000f5, 0xcc0000f6, 0xe10000f7, + 0x700000f8, 0xdd0000f9, 0xde0000fa, 0xdb0000fb, 0xdc0000fc, 0x8d0000fd, 0x8e0000fe, 0xdf0000ff, + }, +} + +// CodePage437 is the IBM Code Page 437 encoding. +var CodePage437 *Charmap = &codePage437 + +var codePage437 = Charmap{ + name: "IBM Code Page 437", + mib: identifier.PC8CodePage437, + asciiSuperset: true, + low: 0x80, + replacement: 0x1a, + decode: [256]utf8Enc{ + {1, [3]byte{0x00, 0x00, 0x00}}, {1, [3]byte{0x01, 0x00, 0x00}}, + {1, [3]byte{0x02, 0x00, 0x00}}, {1, [3]byte{0x03, 0x00, 0x00}}, + {1, [3]byte{0x04, 0x00, 0x00}}, {1, [3]byte{0x05, 0x00, 0x00}}, + {1, [3]byte{0x06, 0x00, 0x00}}, {1, [3]byte{0x07, 0x00, 0x00}}, + {1, [3]byte{0x08, 0x00, 0x00}}, {1, [3]byte{0x09, 0x00, 0x00}}, + {1, [3]byte{0x0a, 0x00, 0x00}}, {1, [3]byte{0x0b, 0x00, 0x00}}, + {1, [3]byte{0x0c, 0x00, 0x00}}, {1, [3]byte{0x0d, 0x00, 0x00}}, + {1, [3]byte{0x0e, 0x00, 0x00}}, {1, [3]byte{0x0f, 0x00, 0x00}}, + {1, [3]byte{0x10, 0x00, 0x00}}, {1, [3]byte{0x11, 0x00, 0x00}}, + {1, [3]byte{0x12, 0x00, 0x00}}, {1, [3]byte{0x13, 0x00, 0x00}}, + {1, [3]byte{0x14, 0x00, 0x00}}, {1, [3]byte{0x15, 0x00, 0x00}}, + {1, [3]byte{0x16, 0x00, 0x00}}, {1, [3]byte{0x17, 0x00, 0x00}}, + {1, [3]byte{0x18, 0x00, 0x00}}, {1, [3]byte{0x19, 0x00, 0x00}}, + {1, [3]byte{0x1a, 0x00, 0x00}}, {1, [3]byte{0x1b, 0x00, 0x00}}, + {1, [3]byte{0x1c, 0x00, 0x00}}, {1, [3]byte{0x1d, 0x00, 0x00}}, + {1, [3]byte{0x1e, 0x00, 0x00}}, {1, [3]byte{0x1f, 0x00, 0x00}}, + {1, [3]byte{0x20, 0x00, 0x00}}, {1, [3]byte{0x21, 0x00, 0x00}}, + {1, [3]byte{0x22, 0x00, 0x00}}, {1, [3]byte{0x23, 0x00, 0x00}}, + {1, [3]byte{0x24, 0x00, 0x00}}, {1, [3]byte{0x25, 0x00, 0x00}}, + {1, [3]byte{0x26, 0x00, 0x00}}, {1, [3]byte{0x27, 0x00, 0x00}}, + {1, [3]byte{0x28, 0x00, 0x00}}, {1, [3]byte{0x29, 0x00, 0x00}}, + {1, [3]byte{0x2a, 0x00, 0x00}}, {1, [3]byte{0x2b, 0x00, 0x00}}, + {1, [3]byte{0x2c, 0x00, 0x00}}, {1, [3]byte{0x2d, 0x00, 0x00}}, + {1, [3]byte{0x2e, 0x00, 0x00}}, {1, [3]byte{0x2f, 0x00, 0x00}}, + {1, [3]byte{0x30, 0x00, 0x00}}, {1, [3]byte{0x31, 0x00, 0x00}}, + {1, [3]byte{0x32, 0x00, 0x00}}, {1, [3]byte{0x33, 0x00, 0x00}}, + {1, [3]byte{0x34, 0x00, 0x00}}, {1, [3]byte{0x35, 0x00, 0x00}}, + {1, [3]byte{0x36, 0x00, 0x00}}, {1, [3]byte{0x37, 0x00, 0x00}}, + {1, [3]byte{0x38, 0x00, 0x00}}, {1, [3]byte{0x39, 0x00, 0x00}}, + {1, [3]byte{0x3a, 0x00, 0x00}}, {1, [3]byte{0x3b, 0x00, 0x00}}, + {1, [3]byte{0x3c, 0x00, 0x00}}, {1, [3]byte{0x3d, 0x00, 0x00}}, + {1, [3]byte{0x3e, 0x00, 0x00}}, {1, [3]byte{0x3f, 0x00, 0x00}}, + {1, [3]byte{0x40, 0x00, 0x00}}, {1, [3]byte{0x41, 0x00, 0x00}}, + {1, [3]byte{0x42, 0x00, 0x00}}, {1, [3]byte{0x43, 0x00, 0x00}}, + {1, [3]byte{0x44, 0x00, 0x00}}, {1, [3]byte{0x45, 0x00, 0x00}}, + {1, [3]byte{0x46, 0x00, 0x00}}, {1, [3]byte{0x47, 0x00, 0x00}}, + {1, [3]byte{0x48, 0x00, 0x00}}, {1, [3]byte{0x49, 0x00, 0x00}}, + {1, [3]byte{0x4a, 0x00, 0x00}}, {1, [3]byte{0x4b, 0x00, 0x00}}, + {1, [3]byte{0x4c, 0x00, 0x00}}, {1, [3]byte{0x4d, 0x00, 0x00}}, + {1, [3]byte{0x4e, 0x00, 0x00}}, {1, [3]byte{0x4f, 0x00, 0x00}}, + {1, [3]byte{0x50, 0x00, 0x00}}, {1, [3]byte{0x51, 0x00, 0x00}}, + {1, [3]byte{0x52, 0x00, 0x00}}, {1, [3]byte{0x53, 0x00, 0x00}}, + {1, [3]byte{0x54, 0x00, 0x00}}, {1, [3]byte{0x55, 0x00, 0x00}}, + {1, [3]byte{0x56, 0x00, 0x00}}, {1, [3]byte{0x57, 0x00, 0x00}}, + {1, [3]byte{0x58, 0x00, 0x00}}, {1, [3]byte{0x59, 0x00, 0x00}}, + {1, [3]byte{0x5a, 0x00, 0x00}}, {1, [3]byte{0x5b, 0x00, 0x00}}, + {1, [3]byte{0x5c, 0x00, 0x00}}, {1, [3]byte{0x5d, 0x00, 0x00}}, + {1, [3]byte{0x5e, 0x00, 0x00}}, {1, [3]byte{0x5f, 0x00, 0x00}}, + {1, [3]byte{0x60, 0x00, 0x00}}, {1, [3]byte{0x61, 0x00, 0x00}}, + {1, [3]byte{0x62, 0x00, 0x00}}, {1, [3]byte{0x63, 0x00, 0x00}}, + {1, [3]byte{0x64, 0x00, 0x00}}, {1, [3]byte{0x65, 0x00, 0x00}}, + {1, [3]byte{0x66, 0x00, 0x00}}, {1, [3]byte{0x67, 0x00, 0x00}}, + {1, [3]byte{0x68, 0x00, 0x00}}, {1, [3]byte{0x69, 0x00, 0x00}}, + {1, [3]byte{0x6a, 0x00, 0x00}}, {1, [3]byte{0x6b, 0x00, 0x00}}, + {1, [3]byte{0x6c, 0x00, 0x00}}, {1, [3]byte{0x6d, 0x00, 0x00}}, + {1, [3]byte{0x6e, 0x00, 0x00}}, {1, [3]byte{0x6f, 0x00, 0x00}}, + {1, [3]byte{0x70, 0x00, 0x00}}, {1, [3]byte{0x71, 0x00, 0x00}}, + {1, [3]byte{0x72, 0x00, 0x00}}, {1, [3]byte{0x73, 0x00, 0x00}}, + {1, [3]byte{0x74, 0x00, 0x00}}, {1, [3]byte{0x75, 0x00, 0x00}}, + {1, [3]byte{0x76, 0x00, 0x00}}, {1, [3]byte{0x77, 0x00, 0x00}}, + {1, [3]byte{0x78, 0x00, 0x00}}, {1, [3]byte{0x79, 0x00, 0x00}}, + {1, [3]byte{0x7a, 0x00, 0x00}}, {1, [3]byte{0x7b, 0x00, 0x00}}, + {1, [3]byte{0x7c, 0x00, 0x00}}, {1, [3]byte{0x7d, 0x00, 0x00}}, + {1, [3]byte{0x7e, 0x00, 0x00}}, {1, [3]byte{0x7f, 0x00, 0x00}}, + {2, [3]byte{0xc3, 0x87, 0x00}}, {2, [3]byte{0xc3, 0xbc, 0x00}}, + {2, [3]byte{0xc3, 0xa9, 0x00}}, {2, [3]byte{0xc3, 0xa2, 0x00}}, + {2, [3]byte{0xc3, 0xa4, 0x00}}, {2, [3]byte{0xc3, 0xa0, 0x00}}, + {2, [3]byte{0xc3, 0xa5, 0x00}}, {2, [3]byte{0xc3, 0xa7, 0x00}}, + {2, [3]byte{0xc3, 0xaa, 0x00}}, {2, [3]byte{0xc3, 0xab, 0x00}}, + {2, [3]byte{0xc3, 0xa8, 0x00}}, {2, [3]byte{0xc3, 0xaf, 0x00}}, + {2, [3]byte{0xc3, 0xae, 0x00}}, {2, [3]byte{0xc3, 0xac, 0x00}}, + {2, [3]byte{0xc3, 0x84, 0x00}}, {2, [3]byte{0xc3, 0x85, 0x00}}, + {2, [3]byte{0xc3, 0x89, 0x00}}, {2, [3]byte{0xc3, 0xa6, 0x00}}, + {2, [3]byte{0xc3, 0x86, 0x00}}, {2, [3]byte{0xc3, 0xb4, 0x00}}, + {2, [3]byte{0xc3, 0xb6, 0x00}}, {2, [3]byte{0xc3, 0xb2, 0x00}}, + {2, [3]byte{0xc3, 0xbb, 0x00}}, {2, [3]byte{0xc3, 0xb9, 0x00}}, + {2, [3]byte{0xc3, 0xbf, 0x00}}, {2, [3]byte{0xc3, 0x96, 0x00}}, + {2, [3]byte{0xc3, 0x9c, 0x00}}, {2, [3]byte{0xc2, 0xa2, 0x00}}, + {2, [3]byte{0xc2, 0xa3, 0x00}}, {2, [3]byte{0xc2, 0xa5, 0x00}}, + {3, [3]byte{0xe2, 0x82, 0xa7}}, {2, [3]byte{0xc6, 0x92, 0x00}}, + {2, [3]byte{0xc3, 0xa1, 0x00}}, {2, [3]byte{0xc3, 0xad, 0x00}}, + {2, [3]byte{0xc3, 0xb3, 0x00}}, {2, [3]byte{0xc3, 0xba, 0x00}}, + {2, [3]byte{0xc3, 0xb1, 0x00}}, {2, [3]byte{0xc3, 0x91, 0x00}}, + {2, [3]byte{0xc2, 0xaa, 0x00}}, {2, [3]byte{0xc2, 0xba, 0x00}}, + {2, [3]byte{0xc2, 0xbf, 0x00}}, {3, [3]byte{0xe2, 0x8c, 0x90}}, + {2, [3]byte{0xc2, 0xac, 0x00}}, {2, [3]byte{0xc2, 0xbd, 0x00}}, + {2, [3]byte{0xc2, 0xbc, 0x00}}, {2, [3]byte{0xc2, 0xa1, 0x00}}, + {2, [3]byte{0xc2, 0xab, 0x00}}, {2, [3]byte{0xc2, 0xbb, 0x00}}, + {3, [3]byte{0xe2, 0x96, 0x91}}, {3, [3]byte{0xe2, 0x96, 0x92}}, + {3, [3]byte{0xe2, 0x96, 0x93}}, {3, [3]byte{0xe2, 0x94, 0x82}}, + {3, [3]byte{0xe2, 0x94, 0xa4}}, {3, [3]byte{0xe2, 0x95, 0xa1}}, + {3, [3]byte{0xe2, 0x95, 0xa2}}, {3, [3]byte{0xe2, 0x95, 0x96}}, + {3, [3]byte{0xe2, 0x95, 0x95}}, {3, [3]byte{0xe2, 0x95, 0xa3}}, + {3, [3]byte{0xe2, 0x95, 0x91}}, {3, [3]byte{0xe2, 0x95, 0x97}}, + {3, [3]byte{0xe2, 0x95, 0x9d}}, {3, [3]byte{0xe2, 0x95, 0x9c}}, + {3, [3]byte{0xe2, 0x95, 0x9b}}, {3, [3]byte{0xe2, 0x94, 0x90}}, + {3, [3]byte{0xe2, 0x94, 0x94}}, {3, [3]byte{0xe2, 0x94, 0xb4}}, + {3, [3]byte{0xe2, 0x94, 0xac}}, {3, [3]byte{0xe2, 0x94, 0x9c}}, + {3, [3]byte{0xe2, 0x94, 0x80}}, {3, [3]byte{0xe2, 0x94, 0xbc}}, + {3, [3]byte{0xe2, 0x95, 0x9e}}, {3, [3]byte{0xe2, 0x95, 0x9f}}, + {3, [3]byte{0xe2, 0x95, 0x9a}}, {3, [3]byte{0xe2, 0x95, 0x94}}, + {3, [3]byte{0xe2, 0x95, 0xa9}}, {3, [3]byte{0xe2, 0x95, 0xa6}}, + {3, [3]byte{0xe2, 0x95, 0xa0}}, {3, [3]byte{0xe2, 0x95, 0x90}}, + {3, [3]byte{0xe2, 0x95, 0xac}}, {3, [3]byte{0xe2, 0x95, 0xa7}}, + {3, [3]byte{0xe2, 0x95, 0xa8}}, {3, [3]byte{0xe2, 0x95, 0xa4}}, + {3, [3]byte{0xe2, 0x95, 0xa5}}, {3, [3]byte{0xe2, 0x95, 0x99}}, + {3, [3]byte{0xe2, 0x95, 0x98}}, {3, [3]byte{0xe2, 0x95, 0x92}}, + {3, [3]byte{0xe2, 0x95, 0x93}}, {3, [3]byte{0xe2, 0x95, 0xab}}, + {3, [3]byte{0xe2, 0x95, 0xaa}}, {3, [3]byte{0xe2, 0x94, 0x98}}, + {3, [3]byte{0xe2, 0x94, 0x8c}}, {3, [3]byte{0xe2, 0x96, 0x88}}, + {3, [3]byte{0xe2, 0x96, 0x84}}, {3, [3]byte{0xe2, 0x96, 0x8c}}, + {3, [3]byte{0xe2, 0x96, 0x90}}, {3, [3]byte{0xe2, 0x96, 0x80}}, + {2, [3]byte{0xce, 0xb1, 0x00}}, {2, [3]byte{0xc3, 0x9f, 0x00}}, + {2, [3]byte{0xce, 0x93, 0x00}}, {2, [3]byte{0xcf, 0x80, 0x00}}, + {2, [3]byte{0xce, 0xa3, 0x00}}, {2, [3]byte{0xcf, 0x83, 0x00}}, + {2, [3]byte{0xc2, 0xb5, 0x00}}, {2, [3]byte{0xcf, 0x84, 0x00}}, + {2, [3]byte{0xce, 0xa6, 0x00}}, {2, [3]byte{0xce, 0x98, 0x00}}, + {2, [3]byte{0xce, 0xa9, 0x00}}, {2, [3]byte{0xce, 0xb4, 0x00}}, + {3, [3]byte{0xe2, 0x88, 0x9e}}, {2, [3]byte{0xcf, 0x86, 0x00}}, + {2, [3]byte{0xce, 0xb5, 0x00}}, {3, [3]byte{0xe2, 0x88, 0xa9}}, + {3, [3]byte{0xe2, 0x89, 0xa1}}, {2, [3]byte{0xc2, 0xb1, 0x00}}, + {3, [3]byte{0xe2, 0x89, 0xa5}}, {3, [3]byte{0xe2, 0x89, 0xa4}}, + {3, [3]byte{0xe2, 0x8c, 0xa0}}, {3, [3]byte{0xe2, 0x8c, 0xa1}}, + {2, [3]byte{0xc3, 0xb7, 0x00}}, {3, [3]byte{0xe2, 0x89, 0x88}}, + {2, [3]byte{0xc2, 0xb0, 0x00}}, {3, [3]byte{0xe2, 0x88, 0x99}}, + {2, [3]byte{0xc2, 0xb7, 0x00}}, {3, [3]byte{0xe2, 0x88, 0x9a}}, + {3, [3]byte{0xe2, 0x81, 0xbf}}, {2, [3]byte{0xc2, 0xb2, 0x00}}, + {3, [3]byte{0xe2, 0x96, 0xa0}}, {2, [3]byte{0xc2, 0xa0, 0x00}}, + }, + encode: [256]uint32{ + 0x00000000, 0x01000001, 0x02000002, 0x03000003, 0x04000004, 0x05000005, 0x06000006, 0x07000007, + 0x08000008, 0x09000009, 0x0a00000a, 0x0b00000b, 0x0c00000c, 0x0d00000d, 0x0e00000e, 0x0f00000f, + 0x10000010, 0x11000011, 0x12000012, 0x13000013, 0x14000014, 0x15000015, 0x16000016, 0x17000017, + 0x18000018, 0x19000019, 0x1a00001a, 0x1b00001b, 0x1c00001c, 0x1d00001d, 0x1e00001e, 0x1f00001f, + 0x20000020, 0x21000021, 0x22000022, 0x23000023, 0x24000024, 0x25000025, 0x26000026, 0x27000027, + 0x28000028, 0x29000029, 0x2a00002a, 0x2b00002b, 0x2c00002c, 0x2d00002d, 0x2e00002e, 0x2f00002f, + 0x30000030, 0x31000031, 0x32000032, 0x33000033, 0x34000034, 0x35000035, 0x36000036, 0x37000037, + 0x38000038, 0x39000039, 0x3a00003a, 0x3b00003b, 0x3c00003c, 0x3d00003d, 0x3e00003e, 0x3f00003f, + 0x40000040, 0x41000041, 0x42000042, 0x43000043, 0x44000044, 0x45000045, 0x46000046, 0x47000047, + 0x48000048, 0x49000049, 0x4a00004a, 0x4b00004b, 0x4c00004c, 0x4d00004d, 0x4e00004e, 0x4f00004f, + 0x50000050, 0x51000051, 0x52000052, 0x53000053, 0x54000054, 0x55000055, 0x56000056, 0x57000057, + 0x58000058, 0x59000059, 0x5a00005a, 0x5b00005b, 0x5c00005c, 0x5d00005d, 0x5e00005e, 0x5f00005f, + 0x60000060, 0x61000061, 0x62000062, 0x63000063, 0x64000064, 0x65000065, 0x66000066, 0x67000067, + 0x68000068, 0x69000069, 0x6a00006a, 0x6b00006b, 0x6c00006c, 0x6d00006d, 0x6e00006e, 0x6f00006f, + 0x70000070, 0x71000071, 0x72000072, 0x73000073, 0x74000074, 0x75000075, 0x76000076, 0x77000077, + 0x78000078, 0x79000079, 0x7a00007a, 0x7b00007b, 0x7c00007c, 0x7d00007d, 0x7e00007e, 0x7f00007f, + 0xff0000a0, 0xad0000a1, 0x9b0000a2, 0x9c0000a3, 0x9d0000a5, 0xa60000aa, 0xae0000ab, 0xaa0000ac, + 0xf80000b0, 0xf10000b1, 0xfd0000b2, 0xe60000b5, 0xfa0000b7, 0xa70000ba, 0xaf0000bb, 0xac0000bc, + 0xab0000bd, 0xa80000bf, 0x8e0000c4, 0x8f0000c5, 0x920000c6, 0x800000c7, 0x900000c9, 0xa50000d1, + 0x990000d6, 0x9a0000dc, 0xe10000df, 0x850000e0, 0xa00000e1, 0x830000e2, 0x840000e4, 0x860000e5, + 0x910000e6, 0x870000e7, 0x8a0000e8, 0x820000e9, 0x880000ea, 0x890000eb, 0x8d0000ec, 0xa10000ed, + 0x8c0000ee, 0x8b0000ef, 0xa40000f1, 0x950000f2, 0xa20000f3, 0x930000f4, 0x940000f6, 0xf60000f7, + 0x970000f9, 0xa30000fa, 0x960000fb, 0x810000fc, 0x980000ff, 0x9f000192, 0xe2000393, 0xe9000398, + 0xe40003a3, 0xe80003a6, 0xea0003a9, 0xe00003b1, 0xeb0003b4, 0xee0003b5, 0xe30003c0, 0xe50003c3, + 0xe70003c4, 0xed0003c6, 0xfc00207f, 0x9e0020a7, 0xf9002219, 0xfb00221a, 0xec00221e, 0xef002229, + 0xf7002248, 0xf0002261, 0xf3002264, 0xf2002265, 0xa9002310, 0xf4002320, 0xf5002321, 0xc4002500, + 0xb3002502, 0xda00250c, 0xbf002510, 0xc0002514, 0xd9002518, 0xc300251c, 0xb4002524, 0xc200252c, + 0xc1002534, 0xc500253c, 0xcd002550, 0xba002551, 0xd5002552, 0xd6002553, 0xc9002554, 0xb8002555, + 0xb7002556, 0xbb002557, 0xd4002558, 0xd3002559, 0xc800255a, 0xbe00255b, 0xbd00255c, 0xbc00255d, + 0xc600255e, 0xc700255f, 0xcc002560, 0xb5002561, 0xb6002562, 0xb9002563, 0xd1002564, 0xd2002565, + 0xcb002566, 0xcf002567, 0xd0002568, 0xca002569, 0xd800256a, 0xd700256b, 0xce00256c, 0xdf002580, + 0xdc002584, 0xdb002588, 0xdd00258c, 0xde002590, 0xb0002591, 0xb1002592, 0xb2002593, 0xfe0025a0, + }, +} + +// CodePage850 is the IBM Code Page 850 encoding. +var CodePage850 *Charmap = &codePage850 + +var codePage850 = Charmap{ + name: "IBM Code Page 850", + mib: identifier.PC850Multilingual, + asciiSuperset: true, + low: 0x80, + replacement: 0x1a, + decode: [256]utf8Enc{ + {1, [3]byte{0x00, 0x00, 0x00}}, {1, [3]byte{0x01, 0x00, 0x00}}, + {1, [3]byte{0x02, 0x00, 0x00}}, {1, [3]byte{0x03, 0x00, 0x00}}, + {1, [3]byte{0x04, 0x00, 0x00}}, {1, [3]byte{0x05, 0x00, 0x00}}, + {1, [3]byte{0x06, 0x00, 0x00}}, {1, [3]byte{0x07, 0x00, 0x00}}, + {1, [3]byte{0x08, 0x00, 0x00}}, {1, [3]byte{0x09, 0x00, 0x00}}, + {1, [3]byte{0x0a, 0x00, 0x00}}, {1, [3]byte{0x0b, 0x00, 0x00}}, + {1, [3]byte{0x0c, 0x00, 0x00}}, {1, [3]byte{0x0d, 0x00, 0x00}}, + {1, [3]byte{0x0e, 0x00, 0x00}}, {1, [3]byte{0x0f, 0x00, 0x00}}, + {1, [3]byte{0x10, 0x00, 0x00}}, {1, [3]byte{0x11, 0x00, 0x00}}, + {1, [3]byte{0x12, 0x00, 0x00}}, {1, [3]byte{0x13, 0x00, 0x00}}, + {1, [3]byte{0x14, 0x00, 0x00}}, {1, [3]byte{0x15, 0x00, 0x00}}, + {1, [3]byte{0x16, 0x00, 0x00}}, {1, [3]byte{0x17, 0x00, 0x00}}, + {1, [3]byte{0x18, 0x00, 0x00}}, {1, [3]byte{0x19, 0x00, 0x00}}, + {1, [3]byte{0x1a, 0x00, 0x00}}, {1, [3]byte{0x1b, 0x00, 0x00}}, + {1, [3]byte{0x1c, 0x00, 0x00}}, {1, [3]byte{0x1d, 0x00, 0x00}}, + {1, [3]byte{0x1e, 0x00, 0x00}}, {1, [3]byte{0x1f, 0x00, 0x00}}, + {1, [3]byte{0x20, 0x00, 0x00}}, {1, [3]byte{0x21, 0x00, 0x00}}, + {1, [3]byte{0x22, 0x00, 0x00}}, {1, [3]byte{0x23, 0x00, 0x00}}, + {1, [3]byte{0x24, 0x00, 0x00}}, {1, [3]byte{0x25, 0x00, 0x00}}, + {1, [3]byte{0x26, 0x00, 0x00}}, {1, [3]byte{0x27, 0x00, 0x00}}, + {1, [3]byte{0x28, 0x00, 0x00}}, {1, [3]byte{0x29, 0x00, 0x00}}, + {1, [3]byte{0x2a, 0x00, 0x00}}, {1, [3]byte{0x2b, 0x00, 0x00}}, + {1, [3]byte{0x2c, 0x00, 0x00}}, {1, [3]byte{0x2d, 0x00, 0x00}}, + {1, [3]byte{0x2e, 0x00, 0x00}}, {1, [3]byte{0x2f, 0x00, 0x00}}, + {1, [3]byte{0x30, 0x00, 0x00}}, {1, [3]byte{0x31, 0x00, 0x00}}, + {1, [3]byte{0x32, 0x00, 0x00}}, {1, [3]byte{0x33, 0x00, 0x00}}, + {1, [3]byte{0x34, 0x00, 0x00}}, {1, [3]byte{0x35, 0x00, 0x00}}, + {1, [3]byte{0x36, 0x00, 0x00}}, {1, [3]byte{0x37, 0x00, 0x00}}, + {1, [3]byte{0x38, 0x00, 0x00}}, {1, [3]byte{0x39, 0x00, 0x00}}, + {1, [3]byte{0x3a, 0x00, 0x00}}, {1, [3]byte{0x3b, 0x00, 0x00}}, + {1, [3]byte{0x3c, 0x00, 0x00}}, {1, [3]byte{0x3d, 0x00, 0x00}}, + {1, [3]byte{0x3e, 0x00, 0x00}}, {1, [3]byte{0x3f, 0x00, 0x00}}, + {1, [3]byte{0x40, 0x00, 0x00}}, {1, [3]byte{0x41, 0x00, 0x00}}, + {1, [3]byte{0x42, 0x00, 0x00}}, {1, [3]byte{0x43, 0x00, 0x00}}, + {1, [3]byte{0x44, 0x00, 0x00}}, {1, [3]byte{0x45, 0x00, 0x00}}, + {1, [3]byte{0x46, 0x00, 0x00}}, {1, [3]byte{0x47, 0x00, 0x00}}, + {1, [3]byte{0x48, 0x00, 0x00}}, {1, [3]byte{0x49, 0x00, 0x00}}, + {1, [3]byte{0x4a, 0x00, 0x00}}, {1, [3]byte{0x4b, 0x00, 0x00}}, + {1, [3]byte{0x4c, 0x00, 0x00}}, {1, [3]byte{0x4d, 0x00, 0x00}}, + {1, [3]byte{0x4e, 0x00, 0x00}}, {1, [3]byte{0x4f, 0x00, 0x00}}, + {1, [3]byte{0x50, 0x00, 0x00}}, {1, [3]byte{0x51, 0x00, 0x00}}, + {1, [3]byte{0x52, 0x00, 0x00}}, {1, [3]byte{0x53, 0x00, 0x00}}, + {1, [3]byte{0x54, 0x00, 0x00}}, {1, [3]byte{0x55, 0x00, 0x00}}, + {1, [3]byte{0x56, 0x00, 0x00}}, {1, [3]byte{0x57, 0x00, 0x00}}, + {1, [3]byte{0x58, 0x00, 0x00}}, {1, [3]byte{0x59, 0x00, 0x00}}, + {1, [3]byte{0x5a, 0x00, 0x00}}, {1, [3]byte{0x5b, 0x00, 0x00}}, + {1, [3]byte{0x5c, 0x00, 0x00}}, {1, [3]byte{0x5d, 0x00, 0x00}}, + {1, [3]byte{0x5e, 0x00, 0x00}}, {1, [3]byte{0x5f, 0x00, 0x00}}, + {1, [3]byte{0x60, 0x00, 0x00}}, {1, [3]byte{0x61, 0x00, 0x00}}, + {1, [3]byte{0x62, 0x00, 0x00}}, {1, [3]byte{0x63, 0x00, 0x00}}, + {1, [3]byte{0x64, 0x00, 0x00}}, {1, [3]byte{0x65, 0x00, 0x00}}, + {1, [3]byte{0x66, 0x00, 0x00}}, {1, [3]byte{0x67, 0x00, 0x00}}, + {1, [3]byte{0x68, 0x00, 0x00}}, {1, [3]byte{0x69, 0x00, 0x00}}, + {1, [3]byte{0x6a, 0x00, 0x00}}, {1, [3]byte{0x6b, 0x00, 0x00}}, + {1, [3]byte{0x6c, 0x00, 0x00}}, {1, [3]byte{0x6d, 0x00, 0x00}}, + {1, [3]byte{0x6e, 0x00, 0x00}}, {1, [3]byte{0x6f, 0x00, 0x00}}, + {1, [3]byte{0x70, 0x00, 0x00}}, {1, [3]byte{0x71, 0x00, 0x00}}, + {1, [3]byte{0x72, 0x00, 0x00}}, {1, [3]byte{0x73, 0x00, 0x00}}, + {1, [3]byte{0x74, 0x00, 0x00}}, {1, [3]byte{0x75, 0x00, 0x00}}, + {1, [3]byte{0x76, 0x00, 0x00}}, {1, [3]byte{0x77, 0x00, 0x00}}, + {1, [3]byte{0x78, 0x00, 0x00}}, {1, [3]byte{0x79, 0x00, 0x00}}, + {1, [3]byte{0x7a, 0x00, 0x00}}, {1, [3]byte{0x7b, 0x00, 0x00}}, + {1, [3]byte{0x7c, 0x00, 0x00}}, {1, [3]byte{0x7d, 0x00, 0x00}}, + {1, [3]byte{0x7e, 0x00, 0x00}}, {1, [3]byte{0x7f, 0x00, 0x00}}, + {2, [3]byte{0xc3, 0x87, 0x00}}, {2, [3]byte{0xc3, 0xbc, 0x00}}, + {2, [3]byte{0xc3, 0xa9, 0x00}}, {2, [3]byte{0xc3, 0xa2, 0x00}}, + {2, [3]byte{0xc3, 0xa4, 0x00}}, {2, [3]byte{0xc3, 0xa0, 0x00}}, + {2, [3]byte{0xc3, 0xa5, 0x00}}, {2, [3]byte{0xc3, 0xa7, 0x00}}, + {2, [3]byte{0xc3, 0xaa, 0x00}}, {2, [3]byte{0xc3, 0xab, 0x00}}, + {2, [3]byte{0xc3, 0xa8, 0x00}}, {2, [3]byte{0xc3, 0xaf, 0x00}}, + {2, [3]byte{0xc3, 0xae, 0x00}}, {2, [3]byte{0xc3, 0xac, 0x00}}, + {2, [3]byte{0xc3, 0x84, 0x00}}, {2, [3]byte{0xc3, 0x85, 0x00}}, + {2, [3]byte{0xc3, 0x89, 0x00}}, {2, [3]byte{0xc3, 0xa6, 0x00}}, + {2, [3]byte{0xc3, 0x86, 0x00}}, {2, [3]byte{0xc3, 0xb4, 0x00}}, + {2, [3]byte{0xc3, 0xb6, 0x00}}, {2, [3]byte{0xc3, 0xb2, 0x00}}, + {2, [3]byte{0xc3, 0xbb, 0x00}}, {2, [3]byte{0xc3, 0xb9, 0x00}}, + {2, [3]byte{0xc3, 0xbf, 0x00}}, {2, [3]byte{0xc3, 0x96, 0x00}}, + {2, [3]byte{0xc3, 0x9c, 0x00}}, {2, [3]byte{0xc3, 0xb8, 0x00}}, + {2, [3]byte{0xc2, 0xa3, 0x00}}, {2, [3]byte{0xc3, 0x98, 0x00}}, + {2, [3]byte{0xc3, 0x97, 0x00}}, {2, [3]byte{0xc6, 0x92, 0x00}}, + {2, [3]byte{0xc3, 0xa1, 0x00}}, {2, [3]byte{0xc3, 0xad, 0x00}}, + {2, [3]byte{0xc3, 0xb3, 0x00}}, {2, [3]byte{0xc3, 0xba, 0x00}}, + {2, [3]byte{0xc3, 0xb1, 0x00}}, {2, [3]byte{0xc3, 0x91, 0x00}}, + {2, [3]byte{0xc2, 0xaa, 0x00}}, {2, [3]byte{0xc2, 0xba, 0x00}}, + {2, [3]byte{0xc2, 0xbf, 0x00}}, {2, [3]byte{0xc2, 0xae, 0x00}}, + {2, [3]byte{0xc2, 0xac, 0x00}}, {2, [3]byte{0xc2, 0xbd, 0x00}}, + {2, [3]byte{0xc2, 0xbc, 0x00}}, {2, [3]byte{0xc2, 0xa1, 0x00}}, + {2, [3]byte{0xc2, 0xab, 0x00}}, {2, [3]byte{0xc2, 0xbb, 0x00}}, + {3, [3]byte{0xe2, 0x96, 0x91}}, {3, [3]byte{0xe2, 0x96, 0x92}}, + {3, [3]byte{0xe2, 0x96, 0x93}}, {3, [3]byte{0xe2, 0x94, 0x82}}, + {3, [3]byte{0xe2, 0x94, 0xa4}}, {2, [3]byte{0xc3, 0x81, 0x00}}, + {2, [3]byte{0xc3, 0x82, 0x00}}, {2, [3]byte{0xc3, 0x80, 0x00}}, + {2, [3]byte{0xc2, 0xa9, 0x00}}, {3, [3]byte{0xe2, 0x95, 0xa3}}, + {3, [3]byte{0xe2, 0x95, 0x91}}, {3, [3]byte{0xe2, 0x95, 0x97}}, + {3, [3]byte{0xe2, 0x95, 0x9d}}, {2, [3]byte{0xc2, 0xa2, 0x00}}, + {2, [3]byte{0xc2, 0xa5, 0x00}}, {3, [3]byte{0xe2, 0x94, 0x90}}, + {3, [3]byte{0xe2, 0x94, 0x94}}, {3, [3]byte{0xe2, 0x94, 0xb4}}, + {3, [3]byte{0xe2, 0x94, 0xac}}, {3, [3]byte{0xe2, 0x94, 0x9c}}, + {3, [3]byte{0xe2, 0x94, 0x80}}, {3, [3]byte{0xe2, 0x94, 0xbc}}, + {2, [3]byte{0xc3, 0xa3, 0x00}}, {2, [3]byte{0xc3, 0x83, 0x00}}, + {3, [3]byte{0xe2, 0x95, 0x9a}}, {3, [3]byte{0xe2, 0x95, 0x94}}, + {3, [3]byte{0xe2, 0x95, 0xa9}}, {3, [3]byte{0xe2, 0x95, 0xa6}}, + {3, [3]byte{0xe2, 0x95, 0xa0}}, {3, [3]byte{0xe2, 0x95, 0x90}}, + {3, [3]byte{0xe2, 0x95, 0xac}}, {2, [3]byte{0xc2, 0xa4, 0x00}}, + {2, [3]byte{0xc3, 0xb0, 0x00}}, {2, [3]byte{0xc3, 0x90, 0x00}}, + {2, [3]byte{0xc3, 0x8a, 0x00}}, {2, [3]byte{0xc3, 0x8b, 0x00}}, + {2, [3]byte{0xc3, 0x88, 0x00}}, {2, [3]byte{0xc4, 0xb1, 0x00}}, + {2, [3]byte{0xc3, 0x8d, 0x00}}, {2, [3]byte{0xc3, 0x8e, 0x00}}, + {2, [3]byte{0xc3, 0x8f, 0x00}}, {3, [3]byte{0xe2, 0x94, 0x98}}, + {3, [3]byte{0xe2, 0x94, 0x8c}}, {3, [3]byte{0xe2, 0x96, 0x88}}, + {3, [3]byte{0xe2, 0x96, 0x84}}, {2, [3]byte{0xc2, 0xa6, 0x00}}, + {2, [3]byte{0xc3, 0x8c, 0x00}}, {3, [3]byte{0xe2, 0x96, 0x80}}, + {2, [3]byte{0xc3, 0x93, 0x00}}, {2, [3]byte{0xc3, 0x9f, 0x00}}, + {2, [3]byte{0xc3, 0x94, 0x00}}, {2, [3]byte{0xc3, 0x92, 0x00}}, + {2, [3]byte{0xc3, 0xb5, 0x00}}, {2, [3]byte{0xc3, 0x95, 0x00}}, + {2, [3]byte{0xc2, 0xb5, 0x00}}, {2, [3]byte{0xc3, 0xbe, 0x00}}, + {2, [3]byte{0xc3, 0x9e, 0x00}}, {2, [3]byte{0xc3, 0x9a, 0x00}}, + {2, [3]byte{0xc3, 0x9b, 0x00}}, {2, [3]byte{0xc3, 0x99, 0x00}}, + {2, [3]byte{0xc3, 0xbd, 0x00}}, {2, [3]byte{0xc3, 0x9d, 0x00}}, + {2, [3]byte{0xc2, 0xaf, 0x00}}, {2, [3]byte{0xc2, 0xb4, 0x00}}, + {2, [3]byte{0xc2, 0xad, 0x00}}, {2, [3]byte{0xc2, 0xb1, 0x00}}, + {3, [3]byte{0xe2, 0x80, 0x97}}, {2, [3]byte{0xc2, 0xbe, 0x00}}, + {2, [3]byte{0xc2, 0xb6, 0x00}}, {2, [3]byte{0xc2, 0xa7, 0x00}}, + {2, [3]byte{0xc3, 0xb7, 0x00}}, {2, [3]byte{0xc2, 0xb8, 0x00}}, + {2, [3]byte{0xc2, 0xb0, 0x00}}, {2, [3]byte{0xc2, 0xa8, 0x00}}, + {2, [3]byte{0xc2, 0xb7, 0x00}}, {2, [3]byte{0xc2, 0xb9, 0x00}}, + {2, [3]byte{0xc2, 0xb3, 0x00}}, {2, [3]byte{0xc2, 0xb2, 0x00}}, + {3, [3]byte{0xe2, 0x96, 0xa0}}, {2, [3]byte{0xc2, 0xa0, 0x00}}, + }, + encode: [256]uint32{ + 0x00000000, 0x01000001, 0x02000002, 0x03000003, 0x04000004, 0x05000005, 0x06000006, 0x07000007, + 0x08000008, 0x09000009, 0x0a00000a, 0x0b00000b, 0x0c00000c, 0x0d00000d, 0x0e00000e, 0x0f00000f, + 0x10000010, 0x11000011, 0x12000012, 0x13000013, 0x14000014, 0x15000015, 0x16000016, 0x17000017, + 0x18000018, 0x19000019, 0x1a00001a, 0x1b00001b, 0x1c00001c, 0x1d00001d, 0x1e00001e, 0x1f00001f, + 0x20000020, 0x21000021, 0x22000022, 0x23000023, 0x24000024, 0x25000025, 0x26000026, 0x27000027, + 0x28000028, 0x29000029, 0x2a00002a, 0x2b00002b, 0x2c00002c, 0x2d00002d, 0x2e00002e, 0x2f00002f, + 0x30000030, 0x31000031, 0x32000032, 0x33000033, 0x34000034, 0x35000035, 0x36000036, 0x37000037, + 0x38000038, 0x39000039, 0x3a00003a, 0x3b00003b, 0x3c00003c, 0x3d00003d, 0x3e00003e, 0x3f00003f, + 0x40000040, 0x41000041, 0x42000042, 0x43000043, 0x44000044, 0x45000045, 0x46000046, 0x47000047, + 0x48000048, 0x49000049, 0x4a00004a, 0x4b00004b, 0x4c00004c, 0x4d00004d, 0x4e00004e, 0x4f00004f, + 0x50000050, 0x51000051, 0x52000052, 0x53000053, 0x54000054, 0x55000055, 0x56000056, 0x57000057, + 0x58000058, 0x59000059, 0x5a00005a, 0x5b00005b, 0x5c00005c, 0x5d00005d, 0x5e00005e, 0x5f00005f, + 0x60000060, 0x61000061, 0x62000062, 0x63000063, 0x64000064, 0x65000065, 0x66000066, 0x67000067, + 0x68000068, 0x69000069, 0x6a00006a, 0x6b00006b, 0x6c00006c, 0x6d00006d, 0x6e00006e, 0x6f00006f, + 0x70000070, 0x71000071, 0x72000072, 0x73000073, 0x74000074, 0x75000075, 0x76000076, 0x77000077, + 0x78000078, 0x79000079, 0x7a00007a, 0x7b00007b, 0x7c00007c, 0x7d00007d, 0x7e00007e, 0x7f00007f, + 0xff0000a0, 0xad0000a1, 0xbd0000a2, 0x9c0000a3, 0xcf0000a4, 0xbe0000a5, 0xdd0000a6, 0xf50000a7, + 0xf90000a8, 0xb80000a9, 0xa60000aa, 0xae0000ab, 0xaa0000ac, 0xf00000ad, 0xa90000ae, 0xee0000af, + 0xf80000b0, 0xf10000b1, 0xfd0000b2, 0xfc0000b3, 0xef0000b4, 0xe60000b5, 0xf40000b6, 0xfa0000b7, + 0xf70000b8, 0xfb0000b9, 0xa70000ba, 0xaf0000bb, 0xac0000bc, 0xab0000bd, 0xf30000be, 0xa80000bf, + 0xb70000c0, 0xb50000c1, 0xb60000c2, 0xc70000c3, 0x8e0000c4, 0x8f0000c5, 0x920000c6, 0x800000c7, + 0xd40000c8, 0x900000c9, 0xd20000ca, 0xd30000cb, 0xde0000cc, 0xd60000cd, 0xd70000ce, 0xd80000cf, + 0xd10000d0, 0xa50000d1, 0xe30000d2, 0xe00000d3, 0xe20000d4, 0xe50000d5, 0x990000d6, 0x9e0000d7, + 0x9d0000d8, 0xeb0000d9, 0xe90000da, 0xea0000db, 0x9a0000dc, 0xed0000dd, 0xe80000de, 0xe10000df, + 0x850000e0, 0xa00000e1, 0x830000e2, 0xc60000e3, 0x840000e4, 0x860000e5, 0x910000e6, 0x870000e7, + 0x8a0000e8, 0x820000e9, 0x880000ea, 0x890000eb, 0x8d0000ec, 0xa10000ed, 0x8c0000ee, 0x8b0000ef, + 0xd00000f0, 0xa40000f1, 0x950000f2, 0xa20000f3, 0x930000f4, 0xe40000f5, 0x940000f6, 0xf60000f7, + 0x9b0000f8, 0x970000f9, 0xa30000fa, 0x960000fb, 0x810000fc, 0xec0000fd, 0xe70000fe, 0x980000ff, + 0xd5000131, 0x9f000192, 0xf2002017, 0xc4002500, 0xb3002502, 0xda00250c, 0xbf002510, 0xc0002514, + 0xd9002518, 0xc300251c, 0xb4002524, 0xc200252c, 0xc1002534, 0xc500253c, 0xcd002550, 0xba002551, + 0xc9002554, 0xbb002557, 0xc800255a, 0xbc00255d, 0xcc002560, 0xb9002563, 0xcb002566, 0xca002569, + 0xce00256c, 0xdf002580, 0xdc002584, 0xdb002588, 0xb0002591, 0xb1002592, 0xb2002593, 0xfe0025a0, + }, +} + +// CodePage852 is the IBM Code Page 852 encoding. +var CodePage852 *Charmap = &codePage852 + +var codePage852 = Charmap{ + name: "IBM Code Page 852", + mib: identifier.PCp852, + asciiSuperset: true, + low: 0x80, + replacement: 0x1a, + decode: [256]utf8Enc{ + {1, [3]byte{0x00, 0x00, 0x00}}, {1, [3]byte{0x01, 0x00, 0x00}}, + {1, [3]byte{0x02, 0x00, 0x00}}, {1, [3]byte{0x03, 0x00, 0x00}}, + {1, [3]byte{0x04, 0x00, 0x00}}, {1, [3]byte{0x05, 0x00, 0x00}}, + {1, [3]byte{0x06, 0x00, 0x00}}, {1, [3]byte{0x07, 0x00, 0x00}}, + {1, [3]byte{0x08, 0x00, 0x00}}, {1, [3]byte{0x09, 0x00, 0x00}}, + {1, [3]byte{0x0a, 0x00, 0x00}}, {1, [3]byte{0x0b, 0x00, 0x00}}, + {1, [3]byte{0x0c, 0x00, 0x00}}, {1, [3]byte{0x0d, 0x00, 0x00}}, + {1, [3]byte{0x0e, 0x00, 0x00}}, {1, [3]byte{0x0f, 0x00, 0x00}}, + {1, [3]byte{0x10, 0x00, 0x00}}, {1, [3]byte{0x11, 0x00, 0x00}}, + {1, [3]byte{0x12, 0x00, 0x00}}, {1, [3]byte{0x13, 0x00, 0x00}}, + {1, [3]byte{0x14, 0x00, 0x00}}, {1, [3]byte{0x15, 0x00, 0x00}}, + {1, [3]byte{0x16, 0x00, 0x00}}, {1, [3]byte{0x17, 0x00, 0x00}}, + {1, [3]byte{0x18, 0x00, 0x00}}, {1, [3]byte{0x19, 0x00, 0x00}}, + {1, [3]byte{0x1a, 0x00, 0x00}}, {1, [3]byte{0x1b, 0x00, 0x00}}, + {1, [3]byte{0x1c, 0x00, 0x00}}, {1, [3]byte{0x1d, 0x00, 0x00}}, + {1, [3]byte{0x1e, 0x00, 0x00}}, {1, [3]byte{0x1f, 0x00, 0x00}}, + {1, [3]byte{0x20, 0x00, 0x00}}, {1, [3]byte{0x21, 0x00, 0x00}}, + {1, [3]byte{0x22, 0x00, 0x00}}, {1, [3]byte{0x23, 0x00, 0x00}}, + {1, [3]byte{0x24, 0x00, 0x00}}, {1, [3]byte{0x25, 0x00, 0x00}}, + {1, [3]byte{0x26, 0x00, 0x00}}, {1, [3]byte{0x27, 0x00, 0x00}}, + {1, [3]byte{0x28, 0x00, 0x00}}, {1, [3]byte{0x29, 0x00, 0x00}}, + {1, [3]byte{0x2a, 0x00, 0x00}}, {1, [3]byte{0x2b, 0x00, 0x00}}, + {1, [3]byte{0x2c, 0x00, 0x00}}, {1, [3]byte{0x2d, 0x00, 0x00}}, + {1, [3]byte{0x2e, 0x00, 0x00}}, {1, [3]byte{0x2f, 0x00, 0x00}}, + {1, [3]byte{0x30, 0x00, 0x00}}, {1, [3]byte{0x31, 0x00, 0x00}}, + {1, [3]byte{0x32, 0x00, 0x00}}, {1, [3]byte{0x33, 0x00, 0x00}}, + {1, [3]byte{0x34, 0x00, 0x00}}, {1, [3]byte{0x35, 0x00, 0x00}}, + {1, [3]byte{0x36, 0x00, 0x00}}, {1, [3]byte{0x37, 0x00, 0x00}}, + {1, [3]byte{0x38, 0x00, 0x00}}, {1, [3]byte{0x39, 0x00, 0x00}}, + {1, [3]byte{0x3a, 0x00, 0x00}}, {1, [3]byte{0x3b, 0x00, 0x00}}, + {1, [3]byte{0x3c, 0x00, 0x00}}, {1, [3]byte{0x3d, 0x00, 0x00}}, + {1, [3]byte{0x3e, 0x00, 0x00}}, {1, [3]byte{0x3f, 0x00, 0x00}}, + {1, [3]byte{0x40, 0x00, 0x00}}, {1, [3]byte{0x41, 0x00, 0x00}}, + {1, [3]byte{0x42, 0x00, 0x00}}, {1, [3]byte{0x43, 0x00, 0x00}}, + {1, [3]byte{0x44, 0x00, 0x00}}, {1, [3]byte{0x45, 0x00, 0x00}}, + {1, [3]byte{0x46, 0x00, 0x00}}, {1, [3]byte{0x47, 0x00, 0x00}}, + {1, [3]byte{0x48, 0x00, 0x00}}, {1, [3]byte{0x49, 0x00, 0x00}}, + {1, [3]byte{0x4a, 0x00, 0x00}}, {1, [3]byte{0x4b, 0x00, 0x00}}, + {1, [3]byte{0x4c, 0x00, 0x00}}, {1, [3]byte{0x4d, 0x00, 0x00}}, + {1, [3]byte{0x4e, 0x00, 0x00}}, {1, [3]byte{0x4f, 0x00, 0x00}}, + {1, [3]byte{0x50, 0x00, 0x00}}, {1, [3]byte{0x51, 0x00, 0x00}}, + {1, [3]byte{0x52, 0x00, 0x00}}, {1, [3]byte{0x53, 0x00, 0x00}}, + {1, [3]byte{0x54, 0x00, 0x00}}, {1, [3]byte{0x55, 0x00, 0x00}}, + {1, [3]byte{0x56, 0x00, 0x00}}, {1, [3]byte{0x57, 0x00, 0x00}}, + {1, [3]byte{0x58, 0x00, 0x00}}, {1, [3]byte{0x59, 0x00, 0x00}}, + {1, [3]byte{0x5a, 0x00, 0x00}}, {1, [3]byte{0x5b, 0x00, 0x00}}, + {1, [3]byte{0x5c, 0x00, 0x00}}, {1, [3]byte{0x5d, 0x00, 0x00}}, + {1, [3]byte{0x5e, 0x00, 0x00}}, {1, [3]byte{0x5f, 0x00, 0x00}}, + {1, [3]byte{0x60, 0x00, 0x00}}, {1, [3]byte{0x61, 0x00, 0x00}}, + {1, [3]byte{0x62, 0x00, 0x00}}, {1, [3]byte{0x63, 0x00, 0x00}}, + {1, [3]byte{0x64, 0x00, 0x00}}, {1, [3]byte{0x65, 0x00, 0x00}}, + {1, [3]byte{0x66, 0x00, 0x00}}, {1, [3]byte{0x67, 0x00, 0x00}}, + {1, [3]byte{0x68, 0x00, 0x00}}, {1, [3]byte{0x69, 0x00, 0x00}}, + {1, [3]byte{0x6a, 0x00, 0x00}}, {1, [3]byte{0x6b, 0x00, 0x00}}, + {1, [3]byte{0x6c, 0x00, 0x00}}, {1, [3]byte{0x6d, 0x00, 0x00}}, + {1, [3]byte{0x6e, 0x00, 0x00}}, {1, [3]byte{0x6f, 0x00, 0x00}}, + {1, [3]byte{0x70, 0x00, 0x00}}, {1, [3]byte{0x71, 0x00, 0x00}}, + {1, [3]byte{0x72, 0x00, 0x00}}, {1, [3]byte{0x73, 0x00, 0x00}}, + {1, [3]byte{0x74, 0x00, 0x00}}, {1, [3]byte{0x75, 0x00, 0x00}}, + {1, [3]byte{0x76, 0x00, 0x00}}, {1, [3]byte{0x77, 0x00, 0x00}}, + {1, [3]byte{0x78, 0x00, 0x00}}, {1, [3]byte{0x79, 0x00, 0x00}}, + {1, [3]byte{0x7a, 0x00, 0x00}}, {1, [3]byte{0x7b, 0x00, 0x00}}, + {1, [3]byte{0x7c, 0x00, 0x00}}, {1, [3]byte{0x7d, 0x00, 0x00}}, + {1, [3]byte{0x7e, 0x00, 0x00}}, {1, [3]byte{0x7f, 0x00, 0x00}}, + {2, [3]byte{0xc3, 0x87, 0x00}}, {2, [3]byte{0xc3, 0xbc, 0x00}}, + {2, [3]byte{0xc3, 0xa9, 0x00}}, {2, [3]byte{0xc3, 0xa2, 0x00}}, + {2, [3]byte{0xc3, 0xa4, 0x00}}, {2, [3]byte{0xc5, 0xaf, 0x00}}, + {2, [3]byte{0xc4, 0x87, 0x00}}, {2, [3]byte{0xc3, 0xa7, 0x00}}, + {2, [3]byte{0xc5, 0x82, 0x00}}, {2, [3]byte{0xc3, 0xab, 0x00}}, + {2, [3]byte{0xc5, 0x90, 0x00}}, {2, [3]byte{0xc5, 0x91, 0x00}}, + {2, [3]byte{0xc3, 0xae, 0x00}}, {2, [3]byte{0xc5, 0xb9, 0x00}}, + {2, [3]byte{0xc3, 0x84, 0x00}}, {2, [3]byte{0xc4, 0x86, 0x00}}, + {2, [3]byte{0xc3, 0x89, 0x00}}, {2, [3]byte{0xc4, 0xb9, 0x00}}, + {2, [3]byte{0xc4, 0xba, 0x00}}, {2, [3]byte{0xc3, 0xb4, 0x00}}, + {2, [3]byte{0xc3, 0xb6, 0x00}}, {2, [3]byte{0xc4, 0xbd, 0x00}}, + {2, [3]byte{0xc4, 0xbe, 0x00}}, {2, [3]byte{0xc5, 0x9a, 0x00}}, + {2, [3]byte{0xc5, 0x9b, 0x00}}, {2, [3]byte{0xc3, 0x96, 0x00}}, + {2, [3]byte{0xc3, 0x9c, 0x00}}, {2, [3]byte{0xc5, 0xa4, 0x00}}, + {2, [3]byte{0xc5, 0xa5, 0x00}}, {2, [3]byte{0xc5, 0x81, 0x00}}, + {2, [3]byte{0xc3, 0x97, 0x00}}, {2, [3]byte{0xc4, 0x8d, 0x00}}, + {2, [3]byte{0xc3, 0xa1, 0x00}}, {2, [3]byte{0xc3, 0xad, 0x00}}, + {2, [3]byte{0xc3, 0xb3, 0x00}}, {2, [3]byte{0xc3, 0xba, 0x00}}, + {2, [3]byte{0xc4, 0x84, 0x00}}, {2, [3]byte{0xc4, 0x85, 0x00}}, + {2, [3]byte{0xc5, 0xbd, 0x00}}, {2, [3]byte{0xc5, 0xbe, 0x00}}, + {2, [3]byte{0xc4, 0x98, 0x00}}, {2, [3]byte{0xc4, 0x99, 0x00}}, + {2, [3]byte{0xc2, 0xac, 0x00}}, {2, [3]byte{0xc5, 0xba, 0x00}}, + {2, [3]byte{0xc4, 0x8c, 0x00}}, {2, [3]byte{0xc5, 0x9f, 0x00}}, + {2, [3]byte{0xc2, 0xab, 0x00}}, {2, [3]byte{0xc2, 0xbb, 0x00}}, + {3, [3]byte{0xe2, 0x96, 0x91}}, {3, [3]byte{0xe2, 0x96, 0x92}}, + {3, [3]byte{0xe2, 0x96, 0x93}}, {3, [3]byte{0xe2, 0x94, 0x82}}, + {3, [3]byte{0xe2, 0x94, 0xa4}}, {2, [3]byte{0xc3, 0x81, 0x00}}, + {2, [3]byte{0xc3, 0x82, 0x00}}, {2, [3]byte{0xc4, 0x9a, 0x00}}, + {2, [3]byte{0xc5, 0x9e, 0x00}}, {3, [3]byte{0xe2, 0x95, 0xa3}}, + {3, [3]byte{0xe2, 0x95, 0x91}}, {3, [3]byte{0xe2, 0x95, 0x97}}, + {3, [3]byte{0xe2, 0x95, 0x9d}}, {2, [3]byte{0xc5, 0xbb, 0x00}}, + {2, [3]byte{0xc5, 0xbc, 0x00}}, {3, [3]byte{0xe2, 0x94, 0x90}}, + {3, [3]byte{0xe2, 0x94, 0x94}}, {3, [3]byte{0xe2, 0x94, 0xb4}}, + {3, [3]byte{0xe2, 0x94, 0xac}}, {3, [3]byte{0xe2, 0x94, 0x9c}}, + {3, [3]byte{0xe2, 0x94, 0x80}}, {3, [3]byte{0xe2, 0x94, 0xbc}}, + {2, [3]byte{0xc4, 0x82, 0x00}}, {2, [3]byte{0xc4, 0x83, 0x00}}, + {3, [3]byte{0xe2, 0x95, 0x9a}}, {3, [3]byte{0xe2, 0x95, 0x94}}, + {3, [3]byte{0xe2, 0x95, 0xa9}}, {3, [3]byte{0xe2, 0x95, 0xa6}}, + {3, [3]byte{0xe2, 0x95, 0xa0}}, {3, [3]byte{0xe2, 0x95, 0x90}}, + {3, [3]byte{0xe2, 0x95, 0xac}}, {2, [3]byte{0xc2, 0xa4, 0x00}}, + {2, [3]byte{0xc4, 0x91, 0x00}}, {2, [3]byte{0xc4, 0x90, 0x00}}, + {2, [3]byte{0xc4, 0x8e, 0x00}}, {2, [3]byte{0xc3, 0x8b, 0x00}}, + {2, [3]byte{0xc4, 0x8f, 0x00}}, {2, [3]byte{0xc5, 0x87, 0x00}}, + {2, [3]byte{0xc3, 0x8d, 0x00}}, {2, [3]byte{0xc3, 0x8e, 0x00}}, + {2, [3]byte{0xc4, 0x9b, 0x00}}, {3, [3]byte{0xe2, 0x94, 0x98}}, + {3, [3]byte{0xe2, 0x94, 0x8c}}, {3, [3]byte{0xe2, 0x96, 0x88}}, + {3, [3]byte{0xe2, 0x96, 0x84}}, {2, [3]byte{0xc5, 0xa2, 0x00}}, + {2, [3]byte{0xc5, 0xae, 0x00}}, {3, [3]byte{0xe2, 0x96, 0x80}}, + {2, [3]byte{0xc3, 0x93, 0x00}}, {2, [3]byte{0xc3, 0x9f, 0x00}}, + {2, [3]byte{0xc3, 0x94, 0x00}}, {2, [3]byte{0xc5, 0x83, 0x00}}, + {2, [3]byte{0xc5, 0x84, 0x00}}, {2, [3]byte{0xc5, 0x88, 0x00}}, + {2, [3]byte{0xc5, 0xa0, 0x00}}, {2, [3]byte{0xc5, 0xa1, 0x00}}, + {2, [3]byte{0xc5, 0x94, 0x00}}, {2, [3]byte{0xc3, 0x9a, 0x00}}, + {2, [3]byte{0xc5, 0x95, 0x00}}, {2, [3]byte{0xc5, 0xb0, 0x00}}, + {2, [3]byte{0xc3, 0xbd, 0x00}}, {2, [3]byte{0xc3, 0x9d, 0x00}}, + {2, [3]byte{0xc5, 0xa3, 0x00}}, {2, [3]byte{0xc2, 0xb4, 0x00}}, + {2, [3]byte{0xc2, 0xad, 0x00}}, {2, [3]byte{0xcb, 0x9d, 0x00}}, + {2, [3]byte{0xcb, 0x9b, 0x00}}, {2, [3]byte{0xcb, 0x87, 0x00}}, + {2, [3]byte{0xcb, 0x98, 0x00}}, {2, [3]byte{0xc2, 0xa7, 0x00}}, + {2, [3]byte{0xc3, 0xb7, 0x00}}, {2, [3]byte{0xc2, 0xb8, 0x00}}, + {2, [3]byte{0xc2, 0xb0, 0x00}}, {2, [3]byte{0xc2, 0xa8, 0x00}}, + {2, [3]byte{0xcb, 0x99, 0x00}}, {2, [3]byte{0xc5, 0xb1, 0x00}}, + {2, [3]byte{0xc5, 0x98, 0x00}}, {2, [3]byte{0xc5, 0x99, 0x00}}, + {3, [3]byte{0xe2, 0x96, 0xa0}}, {2, [3]byte{0xc2, 0xa0, 0x00}}, + }, + encode: [256]uint32{ + 0x00000000, 0x01000001, 0x02000002, 0x03000003, 0x04000004, 0x05000005, 0x06000006, 0x07000007, + 0x08000008, 0x09000009, 0x0a00000a, 0x0b00000b, 0x0c00000c, 0x0d00000d, 0x0e00000e, 0x0f00000f, + 0x10000010, 0x11000011, 0x12000012, 0x13000013, 0x14000014, 0x15000015, 0x16000016, 0x17000017, + 0x18000018, 0x19000019, 0x1a00001a, 0x1b00001b, 0x1c00001c, 0x1d00001d, 0x1e00001e, 0x1f00001f, + 0x20000020, 0x21000021, 0x22000022, 0x23000023, 0x24000024, 0x25000025, 0x26000026, 0x27000027, + 0x28000028, 0x29000029, 0x2a00002a, 0x2b00002b, 0x2c00002c, 0x2d00002d, 0x2e00002e, 0x2f00002f, + 0x30000030, 0x31000031, 0x32000032, 0x33000033, 0x34000034, 0x35000035, 0x36000036, 0x37000037, + 0x38000038, 0x39000039, 0x3a00003a, 0x3b00003b, 0x3c00003c, 0x3d00003d, 0x3e00003e, 0x3f00003f, + 0x40000040, 0x41000041, 0x42000042, 0x43000043, 0x44000044, 0x45000045, 0x46000046, 0x47000047, + 0x48000048, 0x49000049, 0x4a00004a, 0x4b00004b, 0x4c00004c, 0x4d00004d, 0x4e00004e, 0x4f00004f, + 0x50000050, 0x51000051, 0x52000052, 0x53000053, 0x54000054, 0x55000055, 0x56000056, 0x57000057, + 0x58000058, 0x59000059, 0x5a00005a, 0x5b00005b, 0x5c00005c, 0x5d00005d, 0x5e00005e, 0x5f00005f, + 0x60000060, 0x61000061, 0x62000062, 0x63000063, 0x64000064, 0x65000065, 0x66000066, 0x67000067, + 0x68000068, 0x69000069, 0x6a00006a, 0x6b00006b, 0x6c00006c, 0x6d00006d, 0x6e00006e, 0x6f00006f, + 0x70000070, 0x71000071, 0x72000072, 0x73000073, 0x74000074, 0x75000075, 0x76000076, 0x77000077, + 0x78000078, 0x79000079, 0x7a00007a, 0x7b00007b, 0x7c00007c, 0x7d00007d, 0x7e00007e, 0x7f00007f, + 0xff0000a0, 0xcf0000a4, 0xf50000a7, 0xf90000a8, 0xae0000ab, 0xaa0000ac, 0xf00000ad, 0xf80000b0, + 0xef0000b4, 0xf70000b8, 0xaf0000bb, 0xb50000c1, 0xb60000c2, 0x8e0000c4, 0x800000c7, 0x900000c9, + 0xd30000cb, 0xd60000cd, 0xd70000ce, 0xe00000d3, 0xe20000d4, 0x990000d6, 0x9e0000d7, 0xe90000da, + 0x9a0000dc, 0xed0000dd, 0xe10000df, 0xa00000e1, 0x830000e2, 0x840000e4, 0x870000e7, 0x820000e9, + 0x890000eb, 0xa10000ed, 0x8c0000ee, 0xa20000f3, 0x930000f4, 0x940000f6, 0xf60000f7, 0xa30000fa, + 0x810000fc, 0xec0000fd, 0xc6000102, 0xc7000103, 0xa4000104, 0xa5000105, 0x8f000106, 0x86000107, + 0xac00010c, 0x9f00010d, 0xd200010e, 0xd400010f, 0xd1000110, 0xd0000111, 0xa8000118, 0xa9000119, + 0xb700011a, 0xd800011b, 0x91000139, 0x9200013a, 0x9500013d, 0x9600013e, 0x9d000141, 0x88000142, + 0xe3000143, 0xe4000144, 0xd5000147, 0xe5000148, 0x8a000150, 0x8b000151, 0xe8000154, 0xea000155, + 0xfc000158, 0xfd000159, 0x9700015a, 0x9800015b, 0xb800015e, 0xad00015f, 0xe6000160, 0xe7000161, + 0xdd000162, 0xee000163, 0x9b000164, 0x9c000165, 0xde00016e, 0x8500016f, 0xeb000170, 0xfb000171, + 0x8d000179, 0xab00017a, 0xbd00017b, 0xbe00017c, 0xa600017d, 0xa700017e, 0xf30002c7, 0xf40002d8, + 0xfa0002d9, 0xf20002db, 0xf10002dd, 0xc4002500, 0xb3002502, 0xda00250c, 0xbf002510, 0xc0002514, + 0xd9002518, 0xc300251c, 0xb4002524, 0xc200252c, 0xc1002534, 0xc500253c, 0xcd002550, 0xba002551, + 0xc9002554, 0xbb002557, 0xc800255a, 0xbc00255d, 0xcc002560, 0xb9002563, 0xcb002566, 0xca002569, + 0xce00256c, 0xdf002580, 0xdc002584, 0xdb002588, 0xb0002591, 0xb1002592, 0xb2002593, 0xfe0025a0, + }, +} + +// CodePage855 is the IBM Code Page 855 encoding. +var CodePage855 *Charmap = &codePage855 + +var codePage855 = Charmap{ + name: "IBM Code Page 855", + mib: identifier.IBM855, + asciiSuperset: true, + low: 0x80, + replacement: 0x1a, + decode: [256]utf8Enc{ + {1, [3]byte{0x00, 0x00, 0x00}}, {1, [3]byte{0x01, 0x00, 0x00}}, + {1, [3]byte{0x02, 0x00, 0x00}}, {1, [3]byte{0x03, 0x00, 0x00}}, + {1, [3]byte{0x04, 0x00, 0x00}}, {1, [3]byte{0x05, 0x00, 0x00}}, + {1, [3]byte{0x06, 0x00, 0x00}}, {1, [3]byte{0x07, 0x00, 0x00}}, + {1, [3]byte{0x08, 0x00, 0x00}}, {1, [3]byte{0x09, 0x00, 0x00}}, + {1, [3]byte{0x0a, 0x00, 0x00}}, {1, [3]byte{0x0b, 0x00, 0x00}}, + {1, [3]byte{0x0c, 0x00, 0x00}}, {1, [3]byte{0x0d, 0x00, 0x00}}, + {1, [3]byte{0x0e, 0x00, 0x00}}, {1, [3]byte{0x0f, 0x00, 0x00}}, + {1, [3]byte{0x10, 0x00, 0x00}}, {1, [3]byte{0x11, 0x00, 0x00}}, + {1, [3]byte{0x12, 0x00, 0x00}}, {1, [3]byte{0x13, 0x00, 0x00}}, + {1, [3]byte{0x14, 0x00, 0x00}}, {1, [3]byte{0x15, 0x00, 0x00}}, + {1, [3]byte{0x16, 0x00, 0x00}}, {1, [3]byte{0x17, 0x00, 0x00}}, + {1, [3]byte{0x18, 0x00, 0x00}}, {1, [3]byte{0x19, 0x00, 0x00}}, + {1, [3]byte{0x1a, 0x00, 0x00}}, {1, [3]byte{0x1b, 0x00, 0x00}}, + {1, [3]byte{0x1c, 0x00, 0x00}}, {1, [3]byte{0x1d, 0x00, 0x00}}, + {1, [3]byte{0x1e, 0x00, 0x00}}, {1, [3]byte{0x1f, 0x00, 0x00}}, + {1, [3]byte{0x20, 0x00, 0x00}}, {1, [3]byte{0x21, 0x00, 0x00}}, + {1, [3]byte{0x22, 0x00, 0x00}}, {1, [3]byte{0x23, 0x00, 0x00}}, + {1, [3]byte{0x24, 0x00, 0x00}}, {1, [3]byte{0x25, 0x00, 0x00}}, + {1, [3]byte{0x26, 0x00, 0x00}}, {1, [3]byte{0x27, 0x00, 0x00}}, + {1, [3]byte{0x28, 0x00, 0x00}}, {1, [3]byte{0x29, 0x00, 0x00}}, + {1, [3]byte{0x2a, 0x00, 0x00}}, {1, [3]byte{0x2b, 0x00, 0x00}}, + {1, [3]byte{0x2c, 0x00, 0x00}}, {1, [3]byte{0x2d, 0x00, 0x00}}, + {1, [3]byte{0x2e, 0x00, 0x00}}, {1, [3]byte{0x2f, 0x00, 0x00}}, + {1, [3]byte{0x30, 0x00, 0x00}}, {1, [3]byte{0x31, 0x00, 0x00}}, + {1, [3]byte{0x32, 0x00, 0x00}}, {1, [3]byte{0x33, 0x00, 0x00}}, + {1, [3]byte{0x34, 0x00, 0x00}}, {1, [3]byte{0x35, 0x00, 0x00}}, + {1, [3]byte{0x36, 0x00, 0x00}}, {1, [3]byte{0x37, 0x00, 0x00}}, + {1, [3]byte{0x38, 0x00, 0x00}}, {1, [3]byte{0x39, 0x00, 0x00}}, + {1, [3]byte{0x3a, 0x00, 0x00}}, {1, [3]byte{0x3b, 0x00, 0x00}}, + {1, [3]byte{0x3c, 0x00, 0x00}}, {1, [3]byte{0x3d, 0x00, 0x00}}, + {1, [3]byte{0x3e, 0x00, 0x00}}, {1, [3]byte{0x3f, 0x00, 0x00}}, + {1, [3]byte{0x40, 0x00, 0x00}}, {1, [3]byte{0x41, 0x00, 0x00}}, + {1, [3]byte{0x42, 0x00, 0x00}}, {1, [3]byte{0x43, 0x00, 0x00}}, + {1, [3]byte{0x44, 0x00, 0x00}}, {1, [3]byte{0x45, 0x00, 0x00}}, + {1, [3]byte{0x46, 0x00, 0x00}}, {1, [3]byte{0x47, 0x00, 0x00}}, + {1, [3]byte{0x48, 0x00, 0x00}}, {1, [3]byte{0x49, 0x00, 0x00}}, + {1, [3]byte{0x4a, 0x00, 0x00}}, {1, [3]byte{0x4b, 0x00, 0x00}}, + {1, [3]byte{0x4c, 0x00, 0x00}}, {1, [3]byte{0x4d, 0x00, 0x00}}, + {1, [3]byte{0x4e, 0x00, 0x00}}, {1, [3]byte{0x4f, 0x00, 0x00}}, + {1, [3]byte{0x50, 0x00, 0x00}}, {1, [3]byte{0x51, 0x00, 0x00}}, + {1, [3]byte{0x52, 0x00, 0x00}}, {1, [3]byte{0x53, 0x00, 0x00}}, + {1, [3]byte{0x54, 0x00, 0x00}}, {1, [3]byte{0x55, 0x00, 0x00}}, + {1, [3]byte{0x56, 0x00, 0x00}}, {1, [3]byte{0x57, 0x00, 0x00}}, + {1, [3]byte{0x58, 0x00, 0x00}}, {1, [3]byte{0x59, 0x00, 0x00}}, + {1, [3]byte{0x5a, 0x00, 0x00}}, {1, [3]byte{0x5b, 0x00, 0x00}}, + {1, [3]byte{0x5c, 0x00, 0x00}}, {1, [3]byte{0x5d, 0x00, 0x00}}, + {1, [3]byte{0x5e, 0x00, 0x00}}, {1, [3]byte{0x5f, 0x00, 0x00}}, + {1, [3]byte{0x60, 0x00, 0x00}}, {1, [3]byte{0x61, 0x00, 0x00}}, + {1, [3]byte{0x62, 0x00, 0x00}}, {1, [3]byte{0x63, 0x00, 0x00}}, + {1, [3]byte{0x64, 0x00, 0x00}}, {1, [3]byte{0x65, 0x00, 0x00}}, + {1, [3]byte{0x66, 0x00, 0x00}}, {1, [3]byte{0x67, 0x00, 0x00}}, + {1, [3]byte{0x68, 0x00, 0x00}}, {1, [3]byte{0x69, 0x00, 0x00}}, + {1, [3]byte{0x6a, 0x00, 0x00}}, {1, [3]byte{0x6b, 0x00, 0x00}}, + {1, [3]byte{0x6c, 0x00, 0x00}}, {1, [3]byte{0x6d, 0x00, 0x00}}, + {1, [3]byte{0x6e, 0x00, 0x00}}, {1, [3]byte{0x6f, 0x00, 0x00}}, + {1, [3]byte{0x70, 0x00, 0x00}}, {1, [3]byte{0x71, 0x00, 0x00}}, + {1, [3]byte{0x72, 0x00, 0x00}}, {1, [3]byte{0x73, 0x00, 0x00}}, + {1, [3]byte{0x74, 0x00, 0x00}}, {1, [3]byte{0x75, 0x00, 0x00}}, + {1, [3]byte{0x76, 0x00, 0x00}}, {1, [3]byte{0x77, 0x00, 0x00}}, + {1, [3]byte{0x78, 0x00, 0x00}}, {1, [3]byte{0x79, 0x00, 0x00}}, + {1, [3]byte{0x7a, 0x00, 0x00}}, {1, [3]byte{0x7b, 0x00, 0x00}}, + {1, [3]byte{0x7c, 0x00, 0x00}}, {1, [3]byte{0x7d, 0x00, 0x00}}, + {1, [3]byte{0x7e, 0x00, 0x00}}, {1, [3]byte{0x7f, 0x00, 0x00}}, + {2, [3]byte{0xd1, 0x92, 0x00}}, {2, [3]byte{0xd0, 0x82, 0x00}}, + {2, [3]byte{0xd1, 0x93, 0x00}}, {2, [3]byte{0xd0, 0x83, 0x00}}, + {2, [3]byte{0xd1, 0x91, 0x00}}, {2, [3]byte{0xd0, 0x81, 0x00}}, + {2, [3]byte{0xd1, 0x94, 0x00}}, {2, [3]byte{0xd0, 0x84, 0x00}}, + {2, [3]byte{0xd1, 0x95, 0x00}}, {2, [3]byte{0xd0, 0x85, 0x00}}, + {2, [3]byte{0xd1, 0x96, 0x00}}, {2, [3]byte{0xd0, 0x86, 0x00}}, + {2, [3]byte{0xd1, 0x97, 0x00}}, {2, [3]byte{0xd0, 0x87, 0x00}}, + {2, [3]byte{0xd1, 0x98, 0x00}}, {2, [3]byte{0xd0, 0x88, 0x00}}, + {2, [3]byte{0xd1, 0x99, 0x00}}, {2, [3]byte{0xd0, 0x89, 0x00}}, + {2, [3]byte{0xd1, 0x9a, 0x00}}, {2, [3]byte{0xd0, 0x8a, 0x00}}, + {2, [3]byte{0xd1, 0x9b, 0x00}}, {2, [3]byte{0xd0, 0x8b, 0x00}}, + {2, [3]byte{0xd1, 0x9c, 0x00}}, {2, [3]byte{0xd0, 0x8c, 0x00}}, + {2, [3]byte{0xd1, 0x9e, 0x00}}, {2, [3]byte{0xd0, 0x8e, 0x00}}, + {2, [3]byte{0xd1, 0x9f, 0x00}}, {2, [3]byte{0xd0, 0x8f, 0x00}}, + {2, [3]byte{0xd1, 0x8e, 0x00}}, {2, [3]byte{0xd0, 0xae, 0x00}}, + {2, [3]byte{0xd1, 0x8a, 0x00}}, {2, [3]byte{0xd0, 0xaa, 0x00}}, + {2, [3]byte{0xd0, 0xb0, 0x00}}, {2, [3]byte{0xd0, 0x90, 0x00}}, + {2, [3]byte{0xd0, 0xb1, 0x00}}, {2, [3]byte{0xd0, 0x91, 0x00}}, + {2, [3]byte{0xd1, 0x86, 0x00}}, {2, [3]byte{0xd0, 0xa6, 0x00}}, + {2, [3]byte{0xd0, 0xb4, 0x00}}, {2, [3]byte{0xd0, 0x94, 0x00}}, + {2, [3]byte{0xd0, 0xb5, 0x00}}, {2, [3]byte{0xd0, 0x95, 0x00}}, + {2, [3]byte{0xd1, 0x84, 0x00}}, {2, [3]byte{0xd0, 0xa4, 0x00}}, + {2, [3]byte{0xd0, 0xb3, 0x00}}, {2, [3]byte{0xd0, 0x93, 0x00}}, + {2, [3]byte{0xc2, 0xab, 0x00}}, {2, [3]byte{0xc2, 0xbb, 0x00}}, + {3, [3]byte{0xe2, 0x96, 0x91}}, {3, [3]byte{0xe2, 0x96, 0x92}}, + {3, [3]byte{0xe2, 0x96, 0x93}}, {3, [3]byte{0xe2, 0x94, 0x82}}, + {3, [3]byte{0xe2, 0x94, 0xa4}}, {2, [3]byte{0xd1, 0x85, 0x00}}, + {2, [3]byte{0xd0, 0xa5, 0x00}}, {2, [3]byte{0xd0, 0xb8, 0x00}}, + {2, [3]byte{0xd0, 0x98, 0x00}}, {3, [3]byte{0xe2, 0x95, 0xa3}}, + {3, [3]byte{0xe2, 0x95, 0x91}}, {3, [3]byte{0xe2, 0x95, 0x97}}, + {3, [3]byte{0xe2, 0x95, 0x9d}}, {2, [3]byte{0xd0, 0xb9, 0x00}}, + {2, [3]byte{0xd0, 0x99, 0x00}}, {3, [3]byte{0xe2, 0x94, 0x90}}, + {3, [3]byte{0xe2, 0x94, 0x94}}, {3, [3]byte{0xe2, 0x94, 0xb4}}, + {3, [3]byte{0xe2, 0x94, 0xac}}, {3, [3]byte{0xe2, 0x94, 0x9c}}, + {3, [3]byte{0xe2, 0x94, 0x80}}, {3, [3]byte{0xe2, 0x94, 0xbc}}, + {2, [3]byte{0xd0, 0xba, 0x00}}, {2, [3]byte{0xd0, 0x9a, 0x00}}, + {3, [3]byte{0xe2, 0x95, 0x9a}}, {3, [3]byte{0xe2, 0x95, 0x94}}, + {3, [3]byte{0xe2, 0x95, 0xa9}}, {3, [3]byte{0xe2, 0x95, 0xa6}}, + {3, [3]byte{0xe2, 0x95, 0xa0}}, {3, [3]byte{0xe2, 0x95, 0x90}}, + {3, [3]byte{0xe2, 0x95, 0xac}}, {2, [3]byte{0xc2, 0xa4, 0x00}}, + {2, [3]byte{0xd0, 0xbb, 0x00}}, {2, [3]byte{0xd0, 0x9b, 0x00}}, + {2, [3]byte{0xd0, 0xbc, 0x00}}, {2, [3]byte{0xd0, 0x9c, 0x00}}, + {2, [3]byte{0xd0, 0xbd, 0x00}}, {2, [3]byte{0xd0, 0x9d, 0x00}}, + {2, [3]byte{0xd0, 0xbe, 0x00}}, {2, [3]byte{0xd0, 0x9e, 0x00}}, + {2, [3]byte{0xd0, 0xbf, 0x00}}, {3, [3]byte{0xe2, 0x94, 0x98}}, + {3, [3]byte{0xe2, 0x94, 0x8c}}, {3, [3]byte{0xe2, 0x96, 0x88}}, + {3, [3]byte{0xe2, 0x96, 0x84}}, {2, [3]byte{0xd0, 0x9f, 0x00}}, + {2, [3]byte{0xd1, 0x8f, 0x00}}, {3, [3]byte{0xe2, 0x96, 0x80}}, + {2, [3]byte{0xd0, 0xaf, 0x00}}, {2, [3]byte{0xd1, 0x80, 0x00}}, + {2, [3]byte{0xd0, 0xa0, 0x00}}, {2, [3]byte{0xd1, 0x81, 0x00}}, + {2, [3]byte{0xd0, 0xa1, 0x00}}, {2, [3]byte{0xd1, 0x82, 0x00}}, + {2, [3]byte{0xd0, 0xa2, 0x00}}, {2, [3]byte{0xd1, 0x83, 0x00}}, + {2, [3]byte{0xd0, 0xa3, 0x00}}, {2, [3]byte{0xd0, 0xb6, 0x00}}, + {2, [3]byte{0xd0, 0x96, 0x00}}, {2, [3]byte{0xd0, 0xb2, 0x00}}, + {2, [3]byte{0xd0, 0x92, 0x00}}, {2, [3]byte{0xd1, 0x8c, 0x00}}, + {2, [3]byte{0xd0, 0xac, 0x00}}, {3, [3]byte{0xe2, 0x84, 0x96}}, + {2, [3]byte{0xc2, 0xad, 0x00}}, {2, [3]byte{0xd1, 0x8b, 0x00}}, + {2, [3]byte{0xd0, 0xab, 0x00}}, {2, [3]byte{0xd0, 0xb7, 0x00}}, + {2, [3]byte{0xd0, 0x97, 0x00}}, {2, [3]byte{0xd1, 0x88, 0x00}}, + {2, [3]byte{0xd0, 0xa8, 0x00}}, {2, [3]byte{0xd1, 0x8d, 0x00}}, + {2, [3]byte{0xd0, 0xad, 0x00}}, {2, [3]byte{0xd1, 0x89, 0x00}}, + {2, [3]byte{0xd0, 0xa9, 0x00}}, {2, [3]byte{0xd1, 0x87, 0x00}}, + {2, [3]byte{0xd0, 0xa7, 0x00}}, {2, [3]byte{0xc2, 0xa7, 0x00}}, + {3, [3]byte{0xe2, 0x96, 0xa0}}, {2, [3]byte{0xc2, 0xa0, 0x00}}, + }, + encode: [256]uint32{ + 0x00000000, 0x01000001, 0x02000002, 0x03000003, 0x04000004, 0x05000005, 0x06000006, 0x07000007, + 0x08000008, 0x09000009, 0x0a00000a, 0x0b00000b, 0x0c00000c, 0x0d00000d, 0x0e00000e, 0x0f00000f, + 0x10000010, 0x11000011, 0x12000012, 0x13000013, 0x14000014, 0x15000015, 0x16000016, 0x17000017, + 0x18000018, 0x19000019, 0x1a00001a, 0x1b00001b, 0x1c00001c, 0x1d00001d, 0x1e00001e, 0x1f00001f, + 0x20000020, 0x21000021, 0x22000022, 0x23000023, 0x24000024, 0x25000025, 0x26000026, 0x27000027, + 0x28000028, 0x29000029, 0x2a00002a, 0x2b00002b, 0x2c00002c, 0x2d00002d, 0x2e00002e, 0x2f00002f, + 0x30000030, 0x31000031, 0x32000032, 0x33000033, 0x34000034, 0x35000035, 0x36000036, 0x37000037, + 0x38000038, 0x39000039, 0x3a00003a, 0x3b00003b, 0x3c00003c, 0x3d00003d, 0x3e00003e, 0x3f00003f, + 0x40000040, 0x41000041, 0x42000042, 0x43000043, 0x44000044, 0x45000045, 0x46000046, 0x47000047, + 0x48000048, 0x49000049, 0x4a00004a, 0x4b00004b, 0x4c00004c, 0x4d00004d, 0x4e00004e, 0x4f00004f, + 0x50000050, 0x51000051, 0x52000052, 0x53000053, 0x54000054, 0x55000055, 0x56000056, 0x57000057, + 0x58000058, 0x59000059, 0x5a00005a, 0x5b00005b, 0x5c00005c, 0x5d00005d, 0x5e00005e, 0x5f00005f, + 0x60000060, 0x61000061, 0x62000062, 0x63000063, 0x64000064, 0x65000065, 0x66000066, 0x67000067, + 0x68000068, 0x69000069, 0x6a00006a, 0x6b00006b, 0x6c00006c, 0x6d00006d, 0x6e00006e, 0x6f00006f, + 0x70000070, 0x71000071, 0x72000072, 0x73000073, 0x74000074, 0x75000075, 0x76000076, 0x77000077, + 0x78000078, 0x79000079, 0x7a00007a, 0x7b00007b, 0x7c00007c, 0x7d00007d, 0x7e00007e, 0x7f00007f, + 0xff0000a0, 0xcf0000a4, 0xfd0000a7, 0xae0000ab, 0xf00000ad, 0xaf0000bb, 0x85000401, 0x81000402, + 0x83000403, 0x87000404, 0x89000405, 0x8b000406, 0x8d000407, 0x8f000408, 0x91000409, 0x9300040a, + 0x9500040b, 0x9700040c, 0x9900040e, 0x9b00040f, 0xa1000410, 0xa3000411, 0xec000412, 0xad000413, + 0xa7000414, 0xa9000415, 0xea000416, 0xf4000417, 0xb8000418, 0xbe000419, 0xc700041a, 0xd100041b, + 0xd300041c, 0xd500041d, 0xd700041e, 0xdd00041f, 0xe2000420, 0xe4000421, 0xe6000422, 0xe8000423, + 0xab000424, 0xb6000425, 0xa5000426, 0xfc000427, 0xf6000428, 0xfa000429, 0x9f00042a, 0xf200042b, + 0xee00042c, 0xf800042d, 0x9d00042e, 0xe000042f, 0xa0000430, 0xa2000431, 0xeb000432, 0xac000433, + 0xa6000434, 0xa8000435, 0xe9000436, 0xf3000437, 0xb7000438, 0xbd000439, 0xc600043a, 0xd000043b, + 0xd200043c, 0xd400043d, 0xd600043e, 0xd800043f, 0xe1000440, 0xe3000441, 0xe5000442, 0xe7000443, + 0xaa000444, 0xb5000445, 0xa4000446, 0xfb000447, 0xf5000448, 0xf9000449, 0x9e00044a, 0xf100044b, + 0xed00044c, 0xf700044d, 0x9c00044e, 0xde00044f, 0x84000451, 0x80000452, 0x82000453, 0x86000454, + 0x88000455, 0x8a000456, 0x8c000457, 0x8e000458, 0x90000459, 0x9200045a, 0x9400045b, 0x9600045c, + 0x9800045e, 0x9a00045f, 0xef002116, 0xc4002500, 0xb3002502, 0xda00250c, 0xbf002510, 0xc0002514, + 0xd9002518, 0xc300251c, 0xb4002524, 0xc200252c, 0xc1002534, 0xc500253c, 0xcd002550, 0xba002551, + 0xc9002554, 0xbb002557, 0xc800255a, 0xbc00255d, 0xcc002560, 0xb9002563, 0xcb002566, 0xca002569, + 0xce00256c, 0xdf002580, 0xdc002584, 0xdb002588, 0xb0002591, 0xb1002592, 0xb2002593, 0xfe0025a0, + }, +} + +// CodePage858 is the Windows Code Page 858 encoding. +var CodePage858 *Charmap = &codePage858 + +var codePage858 = Charmap{ + name: "Windows Code Page 858", + mib: identifier.IBM00858, + asciiSuperset: true, + low: 0x80, + replacement: 0x1a, + decode: [256]utf8Enc{ + {1, [3]byte{0x00, 0x00, 0x00}}, {1, [3]byte{0x01, 0x00, 0x00}}, + {1, [3]byte{0x02, 0x00, 0x00}}, {1, [3]byte{0x03, 0x00, 0x00}}, + {1, [3]byte{0x04, 0x00, 0x00}}, {1, [3]byte{0x05, 0x00, 0x00}}, + {1, [3]byte{0x06, 0x00, 0x00}}, {1, [3]byte{0x07, 0x00, 0x00}}, + {1, [3]byte{0x08, 0x00, 0x00}}, {1, [3]byte{0x09, 0x00, 0x00}}, + {1, [3]byte{0x0a, 0x00, 0x00}}, {1, [3]byte{0x0b, 0x00, 0x00}}, + {1, [3]byte{0x0c, 0x00, 0x00}}, {1, [3]byte{0x0d, 0x00, 0x00}}, + {1, [3]byte{0x0e, 0x00, 0x00}}, {1, [3]byte{0x0f, 0x00, 0x00}}, + {1, [3]byte{0x10, 0x00, 0x00}}, {1, [3]byte{0x11, 0x00, 0x00}}, + {1, [3]byte{0x12, 0x00, 0x00}}, {1, [3]byte{0x13, 0x00, 0x00}}, + {1, [3]byte{0x14, 0x00, 0x00}}, {1, [3]byte{0x15, 0x00, 0x00}}, + {1, [3]byte{0x16, 0x00, 0x00}}, {1, [3]byte{0x17, 0x00, 0x00}}, + {1, [3]byte{0x18, 0x00, 0x00}}, {1, [3]byte{0x19, 0x00, 0x00}}, + {1, [3]byte{0x1a, 0x00, 0x00}}, {1, [3]byte{0x1b, 0x00, 0x00}}, + {1, [3]byte{0x1c, 0x00, 0x00}}, {1, [3]byte{0x1d, 0x00, 0x00}}, + {1, [3]byte{0x1e, 0x00, 0x00}}, {1, [3]byte{0x1f, 0x00, 0x00}}, + {1, [3]byte{0x20, 0x00, 0x00}}, {1, [3]byte{0x21, 0x00, 0x00}}, + {1, [3]byte{0x22, 0x00, 0x00}}, {1, [3]byte{0x23, 0x00, 0x00}}, + {1, [3]byte{0x24, 0x00, 0x00}}, {1, [3]byte{0x25, 0x00, 0x00}}, + {1, [3]byte{0x26, 0x00, 0x00}}, {1, [3]byte{0x27, 0x00, 0x00}}, + {1, [3]byte{0x28, 0x00, 0x00}}, {1, [3]byte{0x29, 0x00, 0x00}}, + {1, [3]byte{0x2a, 0x00, 0x00}}, {1, [3]byte{0x2b, 0x00, 0x00}}, + {1, [3]byte{0x2c, 0x00, 0x00}}, {1, [3]byte{0x2d, 0x00, 0x00}}, + {1, [3]byte{0x2e, 0x00, 0x00}}, {1, [3]byte{0x2f, 0x00, 0x00}}, + {1, [3]byte{0x30, 0x00, 0x00}}, {1, [3]byte{0x31, 0x00, 0x00}}, + {1, [3]byte{0x32, 0x00, 0x00}}, {1, [3]byte{0x33, 0x00, 0x00}}, + {1, [3]byte{0x34, 0x00, 0x00}}, {1, [3]byte{0x35, 0x00, 0x00}}, + {1, [3]byte{0x36, 0x00, 0x00}}, {1, [3]byte{0x37, 0x00, 0x00}}, + {1, [3]byte{0x38, 0x00, 0x00}}, {1, [3]byte{0x39, 0x00, 0x00}}, + {1, [3]byte{0x3a, 0x00, 0x00}}, {1, [3]byte{0x3b, 0x00, 0x00}}, + {1, [3]byte{0x3c, 0x00, 0x00}}, {1, [3]byte{0x3d, 0x00, 0x00}}, + {1, [3]byte{0x3e, 0x00, 0x00}}, {1, [3]byte{0x3f, 0x00, 0x00}}, + {1, [3]byte{0x40, 0x00, 0x00}}, {1, [3]byte{0x41, 0x00, 0x00}}, + {1, [3]byte{0x42, 0x00, 0x00}}, {1, [3]byte{0x43, 0x00, 0x00}}, + {1, [3]byte{0x44, 0x00, 0x00}}, {1, [3]byte{0x45, 0x00, 0x00}}, + {1, [3]byte{0x46, 0x00, 0x00}}, {1, [3]byte{0x47, 0x00, 0x00}}, + {1, [3]byte{0x48, 0x00, 0x00}}, {1, [3]byte{0x49, 0x00, 0x00}}, + {1, [3]byte{0x4a, 0x00, 0x00}}, {1, [3]byte{0x4b, 0x00, 0x00}}, + {1, [3]byte{0x4c, 0x00, 0x00}}, {1, [3]byte{0x4d, 0x00, 0x00}}, + {1, [3]byte{0x4e, 0x00, 0x00}}, {1, [3]byte{0x4f, 0x00, 0x00}}, + {1, [3]byte{0x50, 0x00, 0x00}}, {1, [3]byte{0x51, 0x00, 0x00}}, + {1, [3]byte{0x52, 0x00, 0x00}}, {1, [3]byte{0x53, 0x00, 0x00}}, + {1, [3]byte{0x54, 0x00, 0x00}}, {1, [3]byte{0x55, 0x00, 0x00}}, + {1, [3]byte{0x56, 0x00, 0x00}}, {1, [3]byte{0x57, 0x00, 0x00}}, + {1, [3]byte{0x58, 0x00, 0x00}}, {1, [3]byte{0x59, 0x00, 0x00}}, + {1, [3]byte{0x5a, 0x00, 0x00}}, {1, [3]byte{0x5b, 0x00, 0x00}}, + {1, [3]byte{0x5c, 0x00, 0x00}}, {1, [3]byte{0x5d, 0x00, 0x00}}, + {1, [3]byte{0x5e, 0x00, 0x00}}, {1, [3]byte{0x5f, 0x00, 0x00}}, + {1, [3]byte{0x60, 0x00, 0x00}}, {1, [3]byte{0x61, 0x00, 0x00}}, + {1, [3]byte{0x62, 0x00, 0x00}}, {1, [3]byte{0x63, 0x00, 0x00}}, + {1, [3]byte{0x64, 0x00, 0x00}}, {1, [3]byte{0x65, 0x00, 0x00}}, + {1, [3]byte{0x66, 0x00, 0x00}}, {1, [3]byte{0x67, 0x00, 0x00}}, + {1, [3]byte{0x68, 0x00, 0x00}}, {1, [3]byte{0x69, 0x00, 0x00}}, + {1, [3]byte{0x6a, 0x00, 0x00}}, {1, [3]byte{0x6b, 0x00, 0x00}}, + {1, [3]byte{0x6c, 0x00, 0x00}}, {1, [3]byte{0x6d, 0x00, 0x00}}, + {1, [3]byte{0x6e, 0x00, 0x00}}, {1, [3]byte{0x6f, 0x00, 0x00}}, + {1, [3]byte{0x70, 0x00, 0x00}}, {1, [3]byte{0x71, 0x00, 0x00}}, + {1, [3]byte{0x72, 0x00, 0x00}}, {1, [3]byte{0x73, 0x00, 0x00}}, + {1, [3]byte{0x74, 0x00, 0x00}}, {1, [3]byte{0x75, 0x00, 0x00}}, + {1, [3]byte{0x76, 0x00, 0x00}}, {1, [3]byte{0x77, 0x00, 0x00}}, + {1, [3]byte{0x78, 0x00, 0x00}}, {1, [3]byte{0x79, 0x00, 0x00}}, + {1, [3]byte{0x7a, 0x00, 0x00}}, {1, [3]byte{0x7b, 0x00, 0x00}}, + {1, [3]byte{0x7c, 0x00, 0x00}}, {1, [3]byte{0x7d, 0x00, 0x00}}, + {1, [3]byte{0x7e, 0x00, 0x00}}, {1, [3]byte{0x7f, 0x00, 0x00}}, + {2, [3]byte{0xc3, 0x87, 0x00}}, {2, [3]byte{0xc3, 0xbc, 0x00}}, + {2, [3]byte{0xc3, 0xa9, 0x00}}, {2, [3]byte{0xc3, 0xa2, 0x00}}, + {2, [3]byte{0xc3, 0xa4, 0x00}}, {2, [3]byte{0xc3, 0xa0, 0x00}}, + {2, [3]byte{0xc3, 0xa5, 0x00}}, {2, [3]byte{0xc3, 0xa7, 0x00}}, + {2, [3]byte{0xc3, 0xaa, 0x00}}, {2, [3]byte{0xc3, 0xab, 0x00}}, + {2, [3]byte{0xc3, 0xa8, 0x00}}, {2, [3]byte{0xc3, 0xaf, 0x00}}, + {2, [3]byte{0xc3, 0xae, 0x00}}, {2, [3]byte{0xc3, 0xac, 0x00}}, + {2, [3]byte{0xc3, 0x84, 0x00}}, {2, [3]byte{0xc3, 0x85, 0x00}}, + {2, [3]byte{0xc3, 0x89, 0x00}}, {2, [3]byte{0xc3, 0xa6, 0x00}}, + {2, [3]byte{0xc3, 0x86, 0x00}}, {2, [3]byte{0xc3, 0xb4, 0x00}}, + {2, [3]byte{0xc3, 0xb6, 0x00}}, {2, [3]byte{0xc3, 0xb2, 0x00}}, + {2, [3]byte{0xc3, 0xbb, 0x00}}, {2, [3]byte{0xc3, 0xb9, 0x00}}, + {2, [3]byte{0xc3, 0xbf, 0x00}}, {2, [3]byte{0xc3, 0x96, 0x00}}, + {2, [3]byte{0xc3, 0x9c, 0x00}}, {2, [3]byte{0xc3, 0xb8, 0x00}}, + {2, [3]byte{0xc2, 0xa3, 0x00}}, {2, [3]byte{0xc3, 0x98, 0x00}}, + {2, [3]byte{0xc3, 0x97, 0x00}}, {2, [3]byte{0xc6, 0x92, 0x00}}, + {2, [3]byte{0xc3, 0xa1, 0x00}}, {2, [3]byte{0xc3, 0xad, 0x00}}, + {2, [3]byte{0xc3, 0xb3, 0x00}}, {2, [3]byte{0xc3, 0xba, 0x00}}, + {2, [3]byte{0xc3, 0xb1, 0x00}}, {2, [3]byte{0xc3, 0x91, 0x00}}, + {2, [3]byte{0xc2, 0xaa, 0x00}}, {2, [3]byte{0xc2, 0xba, 0x00}}, + {2, [3]byte{0xc2, 0xbf, 0x00}}, {2, [3]byte{0xc2, 0xae, 0x00}}, + {2, [3]byte{0xc2, 0xac, 0x00}}, {2, [3]byte{0xc2, 0xbd, 0x00}}, + {2, [3]byte{0xc2, 0xbc, 0x00}}, {2, [3]byte{0xc2, 0xa1, 0x00}}, + {2, [3]byte{0xc2, 0xab, 0x00}}, {2, [3]byte{0xc2, 0xbb, 0x00}}, + {3, [3]byte{0xe2, 0x96, 0x91}}, {3, [3]byte{0xe2, 0x96, 0x92}}, + {3, [3]byte{0xe2, 0x96, 0x93}}, {3, [3]byte{0xe2, 0x94, 0x82}}, + {3, [3]byte{0xe2, 0x94, 0xa4}}, {2, [3]byte{0xc3, 0x81, 0x00}}, + {2, [3]byte{0xc3, 0x82, 0x00}}, {2, [3]byte{0xc3, 0x80, 0x00}}, + {2, [3]byte{0xc2, 0xa9, 0x00}}, {3, [3]byte{0xe2, 0x95, 0xa3}}, + {3, [3]byte{0xe2, 0x95, 0x91}}, {3, [3]byte{0xe2, 0x95, 0x97}}, + {3, [3]byte{0xe2, 0x95, 0x9d}}, {2, [3]byte{0xc2, 0xa2, 0x00}}, + {2, [3]byte{0xc2, 0xa5, 0x00}}, {3, [3]byte{0xe2, 0x94, 0x90}}, + {3, [3]byte{0xe2, 0x94, 0x94}}, {3, [3]byte{0xe2, 0x94, 0xb4}}, + {3, [3]byte{0xe2, 0x94, 0xac}}, {3, [3]byte{0xe2, 0x94, 0x9c}}, + {3, [3]byte{0xe2, 0x94, 0x80}}, {3, [3]byte{0xe2, 0x94, 0xbc}}, + {2, [3]byte{0xc3, 0xa3, 0x00}}, {2, [3]byte{0xc3, 0x83, 0x00}}, + {3, [3]byte{0xe2, 0x95, 0x9a}}, {3, [3]byte{0xe2, 0x95, 0x94}}, + {3, [3]byte{0xe2, 0x95, 0xa9}}, {3, [3]byte{0xe2, 0x95, 0xa6}}, + {3, [3]byte{0xe2, 0x95, 0xa0}}, {3, [3]byte{0xe2, 0x95, 0x90}}, + {3, [3]byte{0xe2, 0x95, 0xac}}, {2, [3]byte{0xc2, 0xa4, 0x00}}, + {2, [3]byte{0xc3, 0xb0, 0x00}}, {2, [3]byte{0xc3, 0x90, 0x00}}, + {2, [3]byte{0xc3, 0x8a, 0x00}}, {2, [3]byte{0xc3, 0x8b, 0x00}}, + {2, [3]byte{0xc3, 0x88, 0x00}}, {3, [3]byte{0xe2, 0x82, 0xac}}, + {2, [3]byte{0xc3, 0x8d, 0x00}}, {2, [3]byte{0xc3, 0x8e, 0x00}}, + {2, [3]byte{0xc3, 0x8f, 0x00}}, {3, [3]byte{0xe2, 0x94, 0x98}}, + {3, [3]byte{0xe2, 0x94, 0x8c}}, {3, [3]byte{0xe2, 0x96, 0x88}}, + {3, [3]byte{0xe2, 0x96, 0x84}}, {2, [3]byte{0xc2, 0xa6, 0x00}}, + {2, [3]byte{0xc3, 0x8c, 0x00}}, {3, [3]byte{0xe2, 0x96, 0x80}}, + {2, [3]byte{0xc3, 0x93, 0x00}}, {2, [3]byte{0xc3, 0x9f, 0x00}}, + {2, [3]byte{0xc3, 0x94, 0x00}}, {2, [3]byte{0xc3, 0x92, 0x00}}, + {2, [3]byte{0xc3, 0xb5, 0x00}}, {2, [3]byte{0xc3, 0x95, 0x00}}, + {2, [3]byte{0xc2, 0xb5, 0x00}}, {2, [3]byte{0xc3, 0xbe, 0x00}}, + {2, [3]byte{0xc3, 0x9e, 0x00}}, {2, [3]byte{0xc3, 0x9a, 0x00}}, + {2, [3]byte{0xc3, 0x9b, 0x00}}, {2, [3]byte{0xc3, 0x99, 0x00}}, + {2, [3]byte{0xc3, 0xbd, 0x00}}, {2, [3]byte{0xc3, 0x9d, 0x00}}, + {2, [3]byte{0xc2, 0xaf, 0x00}}, {2, [3]byte{0xc2, 0xb4, 0x00}}, + {2, [3]byte{0xc2, 0xad, 0x00}}, {2, [3]byte{0xc2, 0xb1, 0x00}}, + {3, [3]byte{0xe2, 0x80, 0x97}}, {2, [3]byte{0xc2, 0xbe, 0x00}}, + {2, [3]byte{0xc2, 0xb6, 0x00}}, {2, [3]byte{0xc2, 0xa7, 0x00}}, + {2, [3]byte{0xc3, 0xb7, 0x00}}, {2, [3]byte{0xc2, 0xb8, 0x00}}, + {2, [3]byte{0xc2, 0xb0, 0x00}}, {2, [3]byte{0xc2, 0xa8, 0x00}}, + {2, [3]byte{0xc2, 0xb7, 0x00}}, {2, [3]byte{0xc2, 0xb9, 0x00}}, + {2, [3]byte{0xc2, 0xb3, 0x00}}, {2, [3]byte{0xc2, 0xb2, 0x00}}, + {3, [3]byte{0xe2, 0x96, 0xa0}}, {2, [3]byte{0xc2, 0xa0, 0x00}}, + }, + encode: [256]uint32{ + 0x00000000, 0x01000001, 0x02000002, 0x03000003, 0x04000004, 0x05000005, 0x06000006, 0x07000007, + 0x08000008, 0x09000009, 0x0a00000a, 0x0b00000b, 0x0c00000c, 0x0d00000d, 0x0e00000e, 0x0f00000f, + 0x10000010, 0x11000011, 0x12000012, 0x13000013, 0x14000014, 0x15000015, 0x16000016, 0x17000017, + 0x18000018, 0x19000019, 0x1a00001a, 0x1b00001b, 0x1c00001c, 0x1d00001d, 0x1e00001e, 0x1f00001f, + 0x20000020, 0x21000021, 0x22000022, 0x23000023, 0x24000024, 0x25000025, 0x26000026, 0x27000027, + 0x28000028, 0x29000029, 0x2a00002a, 0x2b00002b, 0x2c00002c, 0x2d00002d, 0x2e00002e, 0x2f00002f, + 0x30000030, 0x31000031, 0x32000032, 0x33000033, 0x34000034, 0x35000035, 0x36000036, 0x37000037, + 0x38000038, 0x39000039, 0x3a00003a, 0x3b00003b, 0x3c00003c, 0x3d00003d, 0x3e00003e, 0x3f00003f, + 0x40000040, 0x41000041, 0x42000042, 0x43000043, 0x44000044, 0x45000045, 0x46000046, 0x47000047, + 0x48000048, 0x49000049, 0x4a00004a, 0x4b00004b, 0x4c00004c, 0x4d00004d, 0x4e00004e, 0x4f00004f, + 0x50000050, 0x51000051, 0x52000052, 0x53000053, 0x54000054, 0x55000055, 0x56000056, 0x57000057, + 0x58000058, 0x59000059, 0x5a00005a, 0x5b00005b, 0x5c00005c, 0x5d00005d, 0x5e00005e, 0x5f00005f, + 0x60000060, 0x61000061, 0x62000062, 0x63000063, 0x64000064, 0x65000065, 0x66000066, 0x67000067, + 0x68000068, 0x69000069, 0x6a00006a, 0x6b00006b, 0x6c00006c, 0x6d00006d, 0x6e00006e, 0x6f00006f, + 0x70000070, 0x71000071, 0x72000072, 0x73000073, 0x74000074, 0x75000075, 0x76000076, 0x77000077, + 0x78000078, 0x79000079, 0x7a00007a, 0x7b00007b, 0x7c00007c, 0x7d00007d, 0x7e00007e, 0x7f00007f, + 0xff0000a0, 0xad0000a1, 0xbd0000a2, 0x9c0000a3, 0xcf0000a4, 0xbe0000a5, 0xdd0000a6, 0xf50000a7, + 0xf90000a8, 0xb80000a9, 0xa60000aa, 0xae0000ab, 0xaa0000ac, 0xf00000ad, 0xa90000ae, 0xee0000af, + 0xf80000b0, 0xf10000b1, 0xfd0000b2, 0xfc0000b3, 0xef0000b4, 0xe60000b5, 0xf40000b6, 0xfa0000b7, + 0xf70000b8, 0xfb0000b9, 0xa70000ba, 0xaf0000bb, 0xac0000bc, 0xab0000bd, 0xf30000be, 0xa80000bf, + 0xb70000c0, 0xb50000c1, 0xb60000c2, 0xc70000c3, 0x8e0000c4, 0x8f0000c5, 0x920000c6, 0x800000c7, + 0xd40000c8, 0x900000c9, 0xd20000ca, 0xd30000cb, 0xde0000cc, 0xd60000cd, 0xd70000ce, 0xd80000cf, + 0xd10000d0, 0xa50000d1, 0xe30000d2, 0xe00000d3, 0xe20000d4, 0xe50000d5, 0x990000d6, 0x9e0000d7, + 0x9d0000d8, 0xeb0000d9, 0xe90000da, 0xea0000db, 0x9a0000dc, 0xed0000dd, 0xe80000de, 0xe10000df, + 0x850000e0, 0xa00000e1, 0x830000e2, 0xc60000e3, 0x840000e4, 0x860000e5, 0x910000e6, 0x870000e7, + 0x8a0000e8, 0x820000e9, 0x880000ea, 0x890000eb, 0x8d0000ec, 0xa10000ed, 0x8c0000ee, 0x8b0000ef, + 0xd00000f0, 0xa40000f1, 0x950000f2, 0xa20000f3, 0x930000f4, 0xe40000f5, 0x940000f6, 0xf60000f7, + 0x9b0000f8, 0x970000f9, 0xa30000fa, 0x960000fb, 0x810000fc, 0xec0000fd, 0xe70000fe, 0x980000ff, + 0x9f000192, 0xf2002017, 0xd50020ac, 0xc4002500, 0xb3002502, 0xda00250c, 0xbf002510, 0xc0002514, + 0xd9002518, 0xc300251c, 0xb4002524, 0xc200252c, 0xc1002534, 0xc500253c, 0xcd002550, 0xba002551, + 0xc9002554, 0xbb002557, 0xc800255a, 0xbc00255d, 0xcc002560, 0xb9002563, 0xcb002566, 0xca002569, + 0xce00256c, 0xdf002580, 0xdc002584, 0xdb002588, 0xb0002591, 0xb1002592, 0xb2002593, 0xfe0025a0, + }, +} + +// CodePage860 is the IBM Code Page 860 encoding. +var CodePage860 *Charmap = &codePage860 + +var codePage860 = Charmap{ + name: "IBM Code Page 860", + mib: identifier.IBM860, + asciiSuperset: true, + low: 0x80, + replacement: 0x1a, + decode: [256]utf8Enc{ + {1, [3]byte{0x00, 0x00, 0x00}}, {1, [3]byte{0x01, 0x00, 0x00}}, + {1, [3]byte{0x02, 0x00, 0x00}}, {1, [3]byte{0x03, 0x00, 0x00}}, + {1, [3]byte{0x04, 0x00, 0x00}}, {1, [3]byte{0x05, 0x00, 0x00}}, + {1, [3]byte{0x06, 0x00, 0x00}}, {1, [3]byte{0x07, 0x00, 0x00}}, + {1, [3]byte{0x08, 0x00, 0x00}}, {1, [3]byte{0x09, 0x00, 0x00}}, + {1, [3]byte{0x0a, 0x00, 0x00}}, {1, [3]byte{0x0b, 0x00, 0x00}}, + {1, [3]byte{0x0c, 0x00, 0x00}}, {1, [3]byte{0x0d, 0x00, 0x00}}, + {1, [3]byte{0x0e, 0x00, 0x00}}, {1, [3]byte{0x0f, 0x00, 0x00}}, + {1, [3]byte{0x10, 0x00, 0x00}}, {1, [3]byte{0x11, 0x00, 0x00}}, + {1, [3]byte{0x12, 0x00, 0x00}}, {1, [3]byte{0x13, 0x00, 0x00}}, + {1, [3]byte{0x14, 0x00, 0x00}}, {1, [3]byte{0x15, 0x00, 0x00}}, + {1, [3]byte{0x16, 0x00, 0x00}}, {1, [3]byte{0x17, 0x00, 0x00}}, + {1, [3]byte{0x18, 0x00, 0x00}}, {1, [3]byte{0x19, 0x00, 0x00}}, + {1, [3]byte{0x1a, 0x00, 0x00}}, {1, [3]byte{0x1b, 0x00, 0x00}}, + {1, [3]byte{0x1c, 0x00, 0x00}}, {1, [3]byte{0x1d, 0x00, 0x00}}, + {1, [3]byte{0x1e, 0x00, 0x00}}, {1, [3]byte{0x1f, 0x00, 0x00}}, + {1, [3]byte{0x20, 0x00, 0x00}}, {1, [3]byte{0x21, 0x00, 0x00}}, + {1, [3]byte{0x22, 0x00, 0x00}}, {1, [3]byte{0x23, 0x00, 0x00}}, + {1, [3]byte{0x24, 0x00, 0x00}}, {1, [3]byte{0x25, 0x00, 0x00}}, + {1, [3]byte{0x26, 0x00, 0x00}}, {1, [3]byte{0x27, 0x00, 0x00}}, + {1, [3]byte{0x28, 0x00, 0x00}}, {1, [3]byte{0x29, 0x00, 0x00}}, + {1, [3]byte{0x2a, 0x00, 0x00}}, {1, [3]byte{0x2b, 0x00, 0x00}}, + {1, [3]byte{0x2c, 0x00, 0x00}}, {1, [3]byte{0x2d, 0x00, 0x00}}, + {1, [3]byte{0x2e, 0x00, 0x00}}, {1, [3]byte{0x2f, 0x00, 0x00}}, + {1, [3]byte{0x30, 0x00, 0x00}}, {1, [3]byte{0x31, 0x00, 0x00}}, + {1, [3]byte{0x32, 0x00, 0x00}}, {1, [3]byte{0x33, 0x00, 0x00}}, + {1, [3]byte{0x34, 0x00, 0x00}}, {1, [3]byte{0x35, 0x00, 0x00}}, + {1, [3]byte{0x36, 0x00, 0x00}}, {1, [3]byte{0x37, 0x00, 0x00}}, + {1, [3]byte{0x38, 0x00, 0x00}}, {1, [3]byte{0x39, 0x00, 0x00}}, + {1, [3]byte{0x3a, 0x00, 0x00}}, {1, [3]byte{0x3b, 0x00, 0x00}}, + {1, [3]byte{0x3c, 0x00, 0x00}}, {1, [3]byte{0x3d, 0x00, 0x00}}, + {1, [3]byte{0x3e, 0x00, 0x00}}, {1, [3]byte{0x3f, 0x00, 0x00}}, + {1, [3]byte{0x40, 0x00, 0x00}}, {1, [3]byte{0x41, 0x00, 0x00}}, + {1, [3]byte{0x42, 0x00, 0x00}}, {1, [3]byte{0x43, 0x00, 0x00}}, + {1, [3]byte{0x44, 0x00, 0x00}}, {1, [3]byte{0x45, 0x00, 0x00}}, + {1, [3]byte{0x46, 0x00, 0x00}}, {1, [3]byte{0x47, 0x00, 0x00}}, + {1, [3]byte{0x48, 0x00, 0x00}}, {1, [3]byte{0x49, 0x00, 0x00}}, + {1, [3]byte{0x4a, 0x00, 0x00}}, {1, [3]byte{0x4b, 0x00, 0x00}}, + {1, [3]byte{0x4c, 0x00, 0x00}}, {1, [3]byte{0x4d, 0x00, 0x00}}, + {1, [3]byte{0x4e, 0x00, 0x00}}, {1, [3]byte{0x4f, 0x00, 0x00}}, + {1, [3]byte{0x50, 0x00, 0x00}}, {1, [3]byte{0x51, 0x00, 0x00}}, + {1, [3]byte{0x52, 0x00, 0x00}}, {1, [3]byte{0x53, 0x00, 0x00}}, + {1, [3]byte{0x54, 0x00, 0x00}}, {1, [3]byte{0x55, 0x00, 0x00}}, + {1, [3]byte{0x56, 0x00, 0x00}}, {1, [3]byte{0x57, 0x00, 0x00}}, + {1, [3]byte{0x58, 0x00, 0x00}}, {1, [3]byte{0x59, 0x00, 0x00}}, + {1, [3]byte{0x5a, 0x00, 0x00}}, {1, [3]byte{0x5b, 0x00, 0x00}}, + {1, [3]byte{0x5c, 0x00, 0x00}}, {1, [3]byte{0x5d, 0x00, 0x00}}, + {1, [3]byte{0x5e, 0x00, 0x00}}, {1, [3]byte{0x5f, 0x00, 0x00}}, + {1, [3]byte{0x60, 0x00, 0x00}}, {1, [3]byte{0x61, 0x00, 0x00}}, + {1, [3]byte{0x62, 0x00, 0x00}}, {1, [3]byte{0x63, 0x00, 0x00}}, + {1, [3]byte{0x64, 0x00, 0x00}}, {1, [3]byte{0x65, 0x00, 0x00}}, + {1, [3]byte{0x66, 0x00, 0x00}}, {1, [3]byte{0x67, 0x00, 0x00}}, + {1, [3]byte{0x68, 0x00, 0x00}}, {1, [3]byte{0x69, 0x00, 0x00}}, + {1, [3]byte{0x6a, 0x00, 0x00}}, {1, [3]byte{0x6b, 0x00, 0x00}}, + {1, [3]byte{0x6c, 0x00, 0x00}}, {1, [3]byte{0x6d, 0x00, 0x00}}, + {1, [3]byte{0x6e, 0x00, 0x00}}, {1, [3]byte{0x6f, 0x00, 0x00}}, + {1, [3]byte{0x70, 0x00, 0x00}}, {1, [3]byte{0x71, 0x00, 0x00}}, + {1, [3]byte{0x72, 0x00, 0x00}}, {1, [3]byte{0x73, 0x00, 0x00}}, + {1, [3]byte{0x74, 0x00, 0x00}}, {1, [3]byte{0x75, 0x00, 0x00}}, + {1, [3]byte{0x76, 0x00, 0x00}}, {1, [3]byte{0x77, 0x00, 0x00}}, + {1, [3]byte{0x78, 0x00, 0x00}}, {1, [3]byte{0x79, 0x00, 0x00}}, + {1, [3]byte{0x7a, 0x00, 0x00}}, {1, [3]byte{0x7b, 0x00, 0x00}}, + {1, [3]byte{0x7c, 0x00, 0x00}}, {1, [3]byte{0x7d, 0x00, 0x00}}, + {1, [3]byte{0x7e, 0x00, 0x00}}, {1, [3]byte{0x7f, 0x00, 0x00}}, + {2, [3]byte{0xc3, 0x87, 0x00}}, {2, [3]byte{0xc3, 0xbc, 0x00}}, + {2, [3]byte{0xc3, 0xa9, 0x00}}, {2, [3]byte{0xc3, 0xa2, 0x00}}, + {2, [3]byte{0xc3, 0xa3, 0x00}}, {2, [3]byte{0xc3, 0xa0, 0x00}}, + {2, [3]byte{0xc3, 0x81, 0x00}}, {2, [3]byte{0xc3, 0xa7, 0x00}}, + {2, [3]byte{0xc3, 0xaa, 0x00}}, {2, [3]byte{0xc3, 0x8a, 0x00}}, + {2, [3]byte{0xc3, 0xa8, 0x00}}, {2, [3]byte{0xc3, 0x8d, 0x00}}, + {2, [3]byte{0xc3, 0x94, 0x00}}, {2, [3]byte{0xc3, 0xac, 0x00}}, + {2, [3]byte{0xc3, 0x83, 0x00}}, {2, [3]byte{0xc3, 0x82, 0x00}}, + {2, [3]byte{0xc3, 0x89, 0x00}}, {2, [3]byte{0xc3, 0x80, 0x00}}, + {2, [3]byte{0xc3, 0x88, 0x00}}, {2, [3]byte{0xc3, 0xb4, 0x00}}, + {2, [3]byte{0xc3, 0xb5, 0x00}}, {2, [3]byte{0xc3, 0xb2, 0x00}}, + {2, [3]byte{0xc3, 0x9a, 0x00}}, {2, [3]byte{0xc3, 0xb9, 0x00}}, + {2, [3]byte{0xc3, 0x8c, 0x00}}, {2, [3]byte{0xc3, 0x95, 0x00}}, + {2, [3]byte{0xc3, 0x9c, 0x00}}, {2, [3]byte{0xc2, 0xa2, 0x00}}, + {2, [3]byte{0xc2, 0xa3, 0x00}}, {2, [3]byte{0xc3, 0x99, 0x00}}, + {3, [3]byte{0xe2, 0x82, 0xa7}}, {2, [3]byte{0xc3, 0x93, 0x00}}, + {2, [3]byte{0xc3, 0xa1, 0x00}}, {2, [3]byte{0xc3, 0xad, 0x00}}, + {2, [3]byte{0xc3, 0xb3, 0x00}}, {2, [3]byte{0xc3, 0xba, 0x00}}, + {2, [3]byte{0xc3, 0xb1, 0x00}}, {2, [3]byte{0xc3, 0x91, 0x00}}, + {2, [3]byte{0xc2, 0xaa, 0x00}}, {2, [3]byte{0xc2, 0xba, 0x00}}, + {2, [3]byte{0xc2, 0xbf, 0x00}}, {2, [3]byte{0xc3, 0x92, 0x00}}, + {2, [3]byte{0xc2, 0xac, 0x00}}, {2, [3]byte{0xc2, 0xbd, 0x00}}, + {2, [3]byte{0xc2, 0xbc, 0x00}}, {2, [3]byte{0xc2, 0xa1, 0x00}}, + {2, [3]byte{0xc2, 0xab, 0x00}}, {2, [3]byte{0xc2, 0xbb, 0x00}}, + {3, [3]byte{0xe2, 0x96, 0x91}}, {3, [3]byte{0xe2, 0x96, 0x92}}, + {3, [3]byte{0xe2, 0x96, 0x93}}, {3, [3]byte{0xe2, 0x94, 0x82}}, + {3, [3]byte{0xe2, 0x94, 0xa4}}, {3, [3]byte{0xe2, 0x95, 0xa1}}, + {3, [3]byte{0xe2, 0x95, 0xa2}}, {3, [3]byte{0xe2, 0x95, 0x96}}, + {3, [3]byte{0xe2, 0x95, 0x95}}, {3, [3]byte{0xe2, 0x95, 0xa3}}, + {3, [3]byte{0xe2, 0x95, 0x91}}, {3, [3]byte{0xe2, 0x95, 0x97}}, + {3, [3]byte{0xe2, 0x95, 0x9d}}, {3, [3]byte{0xe2, 0x95, 0x9c}}, + {3, [3]byte{0xe2, 0x95, 0x9b}}, {3, [3]byte{0xe2, 0x94, 0x90}}, + {3, [3]byte{0xe2, 0x94, 0x94}}, {3, [3]byte{0xe2, 0x94, 0xb4}}, + {3, [3]byte{0xe2, 0x94, 0xac}}, {3, [3]byte{0xe2, 0x94, 0x9c}}, + {3, [3]byte{0xe2, 0x94, 0x80}}, {3, [3]byte{0xe2, 0x94, 0xbc}}, + {3, [3]byte{0xe2, 0x95, 0x9e}}, {3, [3]byte{0xe2, 0x95, 0x9f}}, + {3, [3]byte{0xe2, 0x95, 0x9a}}, {3, [3]byte{0xe2, 0x95, 0x94}}, + {3, [3]byte{0xe2, 0x95, 0xa9}}, {3, [3]byte{0xe2, 0x95, 0xa6}}, + {3, [3]byte{0xe2, 0x95, 0xa0}}, {3, [3]byte{0xe2, 0x95, 0x90}}, + {3, [3]byte{0xe2, 0x95, 0xac}}, {3, [3]byte{0xe2, 0x95, 0xa7}}, + {3, [3]byte{0xe2, 0x95, 0xa8}}, {3, [3]byte{0xe2, 0x95, 0xa4}}, + {3, [3]byte{0xe2, 0x95, 0xa5}}, {3, [3]byte{0xe2, 0x95, 0x99}}, + {3, [3]byte{0xe2, 0x95, 0x98}}, {3, [3]byte{0xe2, 0x95, 0x92}}, + {3, [3]byte{0xe2, 0x95, 0x93}}, {3, [3]byte{0xe2, 0x95, 0xab}}, + {3, [3]byte{0xe2, 0x95, 0xaa}}, {3, [3]byte{0xe2, 0x94, 0x98}}, + {3, [3]byte{0xe2, 0x94, 0x8c}}, {3, [3]byte{0xe2, 0x96, 0x88}}, + {3, [3]byte{0xe2, 0x96, 0x84}}, {3, [3]byte{0xe2, 0x96, 0x8c}}, + {3, [3]byte{0xe2, 0x96, 0x90}}, {3, [3]byte{0xe2, 0x96, 0x80}}, + {2, [3]byte{0xce, 0xb1, 0x00}}, {2, [3]byte{0xc3, 0x9f, 0x00}}, + {2, [3]byte{0xce, 0x93, 0x00}}, {2, [3]byte{0xcf, 0x80, 0x00}}, + {2, [3]byte{0xce, 0xa3, 0x00}}, {2, [3]byte{0xcf, 0x83, 0x00}}, + {2, [3]byte{0xc2, 0xb5, 0x00}}, {2, [3]byte{0xcf, 0x84, 0x00}}, + {2, [3]byte{0xce, 0xa6, 0x00}}, {2, [3]byte{0xce, 0x98, 0x00}}, + {2, [3]byte{0xce, 0xa9, 0x00}}, {2, [3]byte{0xce, 0xb4, 0x00}}, + {3, [3]byte{0xe2, 0x88, 0x9e}}, {2, [3]byte{0xcf, 0x86, 0x00}}, + {2, [3]byte{0xce, 0xb5, 0x00}}, {3, [3]byte{0xe2, 0x88, 0xa9}}, + {3, [3]byte{0xe2, 0x89, 0xa1}}, {2, [3]byte{0xc2, 0xb1, 0x00}}, + {3, [3]byte{0xe2, 0x89, 0xa5}}, {3, [3]byte{0xe2, 0x89, 0xa4}}, + {3, [3]byte{0xe2, 0x8c, 0xa0}}, {3, [3]byte{0xe2, 0x8c, 0xa1}}, + {2, [3]byte{0xc3, 0xb7, 0x00}}, {3, [3]byte{0xe2, 0x89, 0x88}}, + {2, [3]byte{0xc2, 0xb0, 0x00}}, {3, [3]byte{0xe2, 0x88, 0x99}}, + {2, [3]byte{0xc2, 0xb7, 0x00}}, {3, [3]byte{0xe2, 0x88, 0x9a}}, + {3, [3]byte{0xe2, 0x81, 0xbf}}, {2, [3]byte{0xc2, 0xb2, 0x00}}, + {3, [3]byte{0xe2, 0x96, 0xa0}}, {2, [3]byte{0xc2, 0xa0, 0x00}}, + }, + encode: [256]uint32{ + 0x00000000, 0x01000001, 0x02000002, 0x03000003, 0x04000004, 0x05000005, 0x06000006, 0x07000007, + 0x08000008, 0x09000009, 0x0a00000a, 0x0b00000b, 0x0c00000c, 0x0d00000d, 0x0e00000e, 0x0f00000f, + 0x10000010, 0x11000011, 0x12000012, 0x13000013, 0x14000014, 0x15000015, 0x16000016, 0x17000017, + 0x18000018, 0x19000019, 0x1a00001a, 0x1b00001b, 0x1c00001c, 0x1d00001d, 0x1e00001e, 0x1f00001f, + 0x20000020, 0x21000021, 0x22000022, 0x23000023, 0x24000024, 0x25000025, 0x26000026, 0x27000027, + 0x28000028, 0x29000029, 0x2a00002a, 0x2b00002b, 0x2c00002c, 0x2d00002d, 0x2e00002e, 0x2f00002f, + 0x30000030, 0x31000031, 0x32000032, 0x33000033, 0x34000034, 0x35000035, 0x36000036, 0x37000037, + 0x38000038, 0x39000039, 0x3a00003a, 0x3b00003b, 0x3c00003c, 0x3d00003d, 0x3e00003e, 0x3f00003f, + 0x40000040, 0x41000041, 0x42000042, 0x43000043, 0x44000044, 0x45000045, 0x46000046, 0x47000047, + 0x48000048, 0x49000049, 0x4a00004a, 0x4b00004b, 0x4c00004c, 0x4d00004d, 0x4e00004e, 0x4f00004f, + 0x50000050, 0x51000051, 0x52000052, 0x53000053, 0x54000054, 0x55000055, 0x56000056, 0x57000057, + 0x58000058, 0x59000059, 0x5a00005a, 0x5b00005b, 0x5c00005c, 0x5d00005d, 0x5e00005e, 0x5f00005f, + 0x60000060, 0x61000061, 0x62000062, 0x63000063, 0x64000064, 0x65000065, 0x66000066, 0x67000067, + 0x68000068, 0x69000069, 0x6a00006a, 0x6b00006b, 0x6c00006c, 0x6d00006d, 0x6e00006e, 0x6f00006f, + 0x70000070, 0x71000071, 0x72000072, 0x73000073, 0x74000074, 0x75000075, 0x76000076, 0x77000077, + 0x78000078, 0x79000079, 0x7a00007a, 0x7b00007b, 0x7c00007c, 0x7d00007d, 0x7e00007e, 0x7f00007f, + 0xff0000a0, 0xad0000a1, 0x9b0000a2, 0x9c0000a3, 0xa60000aa, 0xae0000ab, 0xaa0000ac, 0xf80000b0, + 0xf10000b1, 0xfd0000b2, 0xe60000b5, 0xfa0000b7, 0xa70000ba, 0xaf0000bb, 0xac0000bc, 0xab0000bd, + 0xa80000bf, 0x910000c0, 0x860000c1, 0x8f0000c2, 0x8e0000c3, 0x800000c7, 0x920000c8, 0x900000c9, + 0x890000ca, 0x980000cc, 0x8b0000cd, 0xa50000d1, 0xa90000d2, 0x9f0000d3, 0x8c0000d4, 0x990000d5, + 0x9d0000d9, 0x960000da, 0x9a0000dc, 0xe10000df, 0x850000e0, 0xa00000e1, 0x830000e2, 0x840000e3, + 0x870000e7, 0x8a0000e8, 0x820000e9, 0x880000ea, 0x8d0000ec, 0xa10000ed, 0xa40000f1, 0x950000f2, + 0xa20000f3, 0x930000f4, 0x940000f5, 0xf60000f7, 0x970000f9, 0xa30000fa, 0x810000fc, 0xe2000393, + 0xe9000398, 0xe40003a3, 0xe80003a6, 0xea0003a9, 0xe00003b1, 0xeb0003b4, 0xee0003b5, 0xe30003c0, + 0xe50003c3, 0xe70003c4, 0xed0003c6, 0xfc00207f, 0x9e0020a7, 0xf9002219, 0xfb00221a, 0xec00221e, + 0xef002229, 0xf7002248, 0xf0002261, 0xf3002264, 0xf2002265, 0xf4002320, 0xf5002321, 0xc4002500, + 0xb3002502, 0xda00250c, 0xbf002510, 0xc0002514, 0xd9002518, 0xc300251c, 0xb4002524, 0xc200252c, + 0xc1002534, 0xc500253c, 0xcd002550, 0xba002551, 0xd5002552, 0xd6002553, 0xc9002554, 0xb8002555, + 0xb7002556, 0xbb002557, 0xd4002558, 0xd3002559, 0xc800255a, 0xbe00255b, 0xbd00255c, 0xbc00255d, + 0xc600255e, 0xc700255f, 0xcc002560, 0xb5002561, 0xb6002562, 0xb9002563, 0xd1002564, 0xd2002565, + 0xcb002566, 0xcf002567, 0xd0002568, 0xca002569, 0xd800256a, 0xd700256b, 0xce00256c, 0xdf002580, + 0xdc002584, 0xdb002588, 0xdd00258c, 0xde002590, 0xb0002591, 0xb1002592, 0xb2002593, 0xfe0025a0, + }, +} + +// CodePage862 is the IBM Code Page 862 encoding. +var CodePage862 *Charmap = &codePage862 + +var codePage862 = Charmap{ + name: "IBM Code Page 862", + mib: identifier.PC862LatinHebrew, + asciiSuperset: true, + low: 0x80, + replacement: 0x1a, + decode: [256]utf8Enc{ + {1, [3]byte{0x00, 0x00, 0x00}}, {1, [3]byte{0x01, 0x00, 0x00}}, + {1, [3]byte{0x02, 0x00, 0x00}}, {1, [3]byte{0x03, 0x00, 0x00}}, + {1, [3]byte{0x04, 0x00, 0x00}}, {1, [3]byte{0x05, 0x00, 0x00}}, + {1, [3]byte{0x06, 0x00, 0x00}}, {1, [3]byte{0x07, 0x00, 0x00}}, + {1, [3]byte{0x08, 0x00, 0x00}}, {1, [3]byte{0x09, 0x00, 0x00}}, + {1, [3]byte{0x0a, 0x00, 0x00}}, {1, [3]byte{0x0b, 0x00, 0x00}}, + {1, [3]byte{0x0c, 0x00, 0x00}}, {1, [3]byte{0x0d, 0x00, 0x00}}, + {1, [3]byte{0x0e, 0x00, 0x00}}, {1, [3]byte{0x0f, 0x00, 0x00}}, + {1, [3]byte{0x10, 0x00, 0x00}}, {1, [3]byte{0x11, 0x00, 0x00}}, + {1, [3]byte{0x12, 0x00, 0x00}}, {1, [3]byte{0x13, 0x00, 0x00}}, + {1, [3]byte{0x14, 0x00, 0x00}}, {1, [3]byte{0x15, 0x00, 0x00}}, + {1, [3]byte{0x16, 0x00, 0x00}}, {1, [3]byte{0x17, 0x00, 0x00}}, + {1, [3]byte{0x18, 0x00, 0x00}}, {1, [3]byte{0x19, 0x00, 0x00}}, + {1, [3]byte{0x1a, 0x00, 0x00}}, {1, [3]byte{0x1b, 0x00, 0x00}}, + {1, [3]byte{0x1c, 0x00, 0x00}}, {1, [3]byte{0x1d, 0x00, 0x00}}, + {1, [3]byte{0x1e, 0x00, 0x00}}, {1, [3]byte{0x1f, 0x00, 0x00}}, + {1, [3]byte{0x20, 0x00, 0x00}}, {1, [3]byte{0x21, 0x00, 0x00}}, + {1, [3]byte{0x22, 0x00, 0x00}}, {1, [3]byte{0x23, 0x00, 0x00}}, + {1, [3]byte{0x24, 0x00, 0x00}}, {1, [3]byte{0x25, 0x00, 0x00}}, + {1, [3]byte{0x26, 0x00, 0x00}}, {1, [3]byte{0x27, 0x00, 0x00}}, + {1, [3]byte{0x28, 0x00, 0x00}}, {1, [3]byte{0x29, 0x00, 0x00}}, + {1, [3]byte{0x2a, 0x00, 0x00}}, {1, [3]byte{0x2b, 0x00, 0x00}}, + {1, [3]byte{0x2c, 0x00, 0x00}}, {1, [3]byte{0x2d, 0x00, 0x00}}, + {1, [3]byte{0x2e, 0x00, 0x00}}, {1, [3]byte{0x2f, 0x00, 0x00}}, + {1, [3]byte{0x30, 0x00, 0x00}}, {1, [3]byte{0x31, 0x00, 0x00}}, + {1, [3]byte{0x32, 0x00, 0x00}}, {1, [3]byte{0x33, 0x00, 0x00}}, + {1, [3]byte{0x34, 0x00, 0x00}}, {1, [3]byte{0x35, 0x00, 0x00}}, + {1, [3]byte{0x36, 0x00, 0x00}}, {1, [3]byte{0x37, 0x00, 0x00}}, + {1, [3]byte{0x38, 0x00, 0x00}}, {1, [3]byte{0x39, 0x00, 0x00}}, + {1, [3]byte{0x3a, 0x00, 0x00}}, {1, [3]byte{0x3b, 0x00, 0x00}}, + {1, [3]byte{0x3c, 0x00, 0x00}}, {1, [3]byte{0x3d, 0x00, 0x00}}, + {1, [3]byte{0x3e, 0x00, 0x00}}, {1, [3]byte{0x3f, 0x00, 0x00}}, + {1, [3]byte{0x40, 0x00, 0x00}}, {1, [3]byte{0x41, 0x00, 0x00}}, + {1, [3]byte{0x42, 0x00, 0x00}}, {1, [3]byte{0x43, 0x00, 0x00}}, + {1, [3]byte{0x44, 0x00, 0x00}}, {1, [3]byte{0x45, 0x00, 0x00}}, + {1, [3]byte{0x46, 0x00, 0x00}}, {1, [3]byte{0x47, 0x00, 0x00}}, + {1, [3]byte{0x48, 0x00, 0x00}}, {1, [3]byte{0x49, 0x00, 0x00}}, + {1, [3]byte{0x4a, 0x00, 0x00}}, {1, [3]byte{0x4b, 0x00, 0x00}}, + {1, [3]byte{0x4c, 0x00, 0x00}}, {1, [3]byte{0x4d, 0x00, 0x00}}, + {1, [3]byte{0x4e, 0x00, 0x00}}, {1, [3]byte{0x4f, 0x00, 0x00}}, + {1, [3]byte{0x50, 0x00, 0x00}}, {1, [3]byte{0x51, 0x00, 0x00}}, + {1, [3]byte{0x52, 0x00, 0x00}}, {1, [3]byte{0x53, 0x00, 0x00}}, + {1, [3]byte{0x54, 0x00, 0x00}}, {1, [3]byte{0x55, 0x00, 0x00}}, + {1, [3]byte{0x56, 0x00, 0x00}}, {1, [3]byte{0x57, 0x00, 0x00}}, + {1, [3]byte{0x58, 0x00, 0x00}}, {1, [3]byte{0x59, 0x00, 0x00}}, + {1, [3]byte{0x5a, 0x00, 0x00}}, {1, [3]byte{0x5b, 0x00, 0x00}}, + {1, [3]byte{0x5c, 0x00, 0x00}}, {1, [3]byte{0x5d, 0x00, 0x00}}, + {1, [3]byte{0x5e, 0x00, 0x00}}, {1, [3]byte{0x5f, 0x00, 0x00}}, + {1, [3]byte{0x60, 0x00, 0x00}}, {1, [3]byte{0x61, 0x00, 0x00}}, + {1, [3]byte{0x62, 0x00, 0x00}}, {1, [3]byte{0x63, 0x00, 0x00}}, + {1, [3]byte{0x64, 0x00, 0x00}}, {1, [3]byte{0x65, 0x00, 0x00}}, + {1, [3]byte{0x66, 0x00, 0x00}}, {1, [3]byte{0x67, 0x00, 0x00}}, + {1, [3]byte{0x68, 0x00, 0x00}}, {1, [3]byte{0x69, 0x00, 0x00}}, + {1, [3]byte{0x6a, 0x00, 0x00}}, {1, [3]byte{0x6b, 0x00, 0x00}}, + {1, [3]byte{0x6c, 0x00, 0x00}}, {1, [3]byte{0x6d, 0x00, 0x00}}, + {1, [3]byte{0x6e, 0x00, 0x00}}, {1, [3]byte{0x6f, 0x00, 0x00}}, + {1, [3]byte{0x70, 0x00, 0x00}}, {1, [3]byte{0x71, 0x00, 0x00}}, + {1, [3]byte{0x72, 0x00, 0x00}}, {1, [3]byte{0x73, 0x00, 0x00}}, + {1, [3]byte{0x74, 0x00, 0x00}}, {1, [3]byte{0x75, 0x00, 0x00}}, + {1, [3]byte{0x76, 0x00, 0x00}}, {1, [3]byte{0x77, 0x00, 0x00}}, + {1, [3]byte{0x78, 0x00, 0x00}}, {1, [3]byte{0x79, 0x00, 0x00}}, + {1, [3]byte{0x7a, 0x00, 0x00}}, {1, [3]byte{0x7b, 0x00, 0x00}}, + {1, [3]byte{0x7c, 0x00, 0x00}}, {1, [3]byte{0x7d, 0x00, 0x00}}, + {1, [3]byte{0x7e, 0x00, 0x00}}, {1, [3]byte{0x7f, 0x00, 0x00}}, + {2, [3]byte{0xd7, 0x90, 0x00}}, {2, [3]byte{0xd7, 0x91, 0x00}}, + {2, [3]byte{0xd7, 0x92, 0x00}}, {2, [3]byte{0xd7, 0x93, 0x00}}, + {2, [3]byte{0xd7, 0x94, 0x00}}, {2, [3]byte{0xd7, 0x95, 0x00}}, + {2, [3]byte{0xd7, 0x96, 0x00}}, {2, [3]byte{0xd7, 0x97, 0x00}}, + {2, [3]byte{0xd7, 0x98, 0x00}}, {2, [3]byte{0xd7, 0x99, 0x00}}, + {2, [3]byte{0xd7, 0x9a, 0x00}}, {2, [3]byte{0xd7, 0x9b, 0x00}}, + {2, [3]byte{0xd7, 0x9c, 0x00}}, {2, [3]byte{0xd7, 0x9d, 0x00}}, + {2, [3]byte{0xd7, 0x9e, 0x00}}, {2, [3]byte{0xd7, 0x9f, 0x00}}, + {2, [3]byte{0xd7, 0xa0, 0x00}}, {2, [3]byte{0xd7, 0xa1, 0x00}}, + {2, [3]byte{0xd7, 0xa2, 0x00}}, {2, [3]byte{0xd7, 0xa3, 0x00}}, + {2, [3]byte{0xd7, 0xa4, 0x00}}, {2, [3]byte{0xd7, 0xa5, 0x00}}, + {2, [3]byte{0xd7, 0xa6, 0x00}}, {2, [3]byte{0xd7, 0xa7, 0x00}}, + {2, [3]byte{0xd7, 0xa8, 0x00}}, {2, [3]byte{0xd7, 0xa9, 0x00}}, + {2, [3]byte{0xd7, 0xaa, 0x00}}, {2, [3]byte{0xc2, 0xa2, 0x00}}, + {2, [3]byte{0xc2, 0xa3, 0x00}}, {2, [3]byte{0xc2, 0xa5, 0x00}}, + {3, [3]byte{0xe2, 0x82, 0xa7}}, {2, [3]byte{0xc6, 0x92, 0x00}}, + {2, [3]byte{0xc3, 0xa1, 0x00}}, {2, [3]byte{0xc3, 0xad, 0x00}}, + {2, [3]byte{0xc3, 0xb3, 0x00}}, {2, [3]byte{0xc3, 0xba, 0x00}}, + {2, [3]byte{0xc3, 0xb1, 0x00}}, {2, [3]byte{0xc3, 0x91, 0x00}}, + {2, [3]byte{0xc2, 0xaa, 0x00}}, {2, [3]byte{0xc2, 0xba, 0x00}}, + {2, [3]byte{0xc2, 0xbf, 0x00}}, {3, [3]byte{0xe2, 0x8c, 0x90}}, + {2, [3]byte{0xc2, 0xac, 0x00}}, {2, [3]byte{0xc2, 0xbd, 0x00}}, + {2, [3]byte{0xc2, 0xbc, 0x00}}, {2, [3]byte{0xc2, 0xa1, 0x00}}, + {2, [3]byte{0xc2, 0xab, 0x00}}, {2, [3]byte{0xc2, 0xbb, 0x00}}, + {3, [3]byte{0xe2, 0x96, 0x91}}, {3, [3]byte{0xe2, 0x96, 0x92}}, + {3, [3]byte{0xe2, 0x96, 0x93}}, {3, [3]byte{0xe2, 0x94, 0x82}}, + {3, [3]byte{0xe2, 0x94, 0xa4}}, {3, [3]byte{0xe2, 0x95, 0xa1}}, + {3, [3]byte{0xe2, 0x95, 0xa2}}, {3, [3]byte{0xe2, 0x95, 0x96}}, + {3, [3]byte{0xe2, 0x95, 0x95}}, {3, [3]byte{0xe2, 0x95, 0xa3}}, + {3, [3]byte{0xe2, 0x95, 0x91}}, {3, [3]byte{0xe2, 0x95, 0x97}}, + {3, [3]byte{0xe2, 0x95, 0x9d}}, {3, [3]byte{0xe2, 0x95, 0x9c}}, + {3, [3]byte{0xe2, 0x95, 0x9b}}, {3, [3]byte{0xe2, 0x94, 0x90}}, + {3, [3]byte{0xe2, 0x94, 0x94}}, {3, [3]byte{0xe2, 0x94, 0xb4}}, + {3, [3]byte{0xe2, 0x94, 0xac}}, {3, [3]byte{0xe2, 0x94, 0x9c}}, + {3, [3]byte{0xe2, 0x94, 0x80}}, {3, [3]byte{0xe2, 0x94, 0xbc}}, + {3, [3]byte{0xe2, 0x95, 0x9e}}, {3, [3]byte{0xe2, 0x95, 0x9f}}, + {3, [3]byte{0xe2, 0x95, 0x9a}}, {3, [3]byte{0xe2, 0x95, 0x94}}, + {3, [3]byte{0xe2, 0x95, 0xa9}}, {3, [3]byte{0xe2, 0x95, 0xa6}}, + {3, [3]byte{0xe2, 0x95, 0xa0}}, {3, [3]byte{0xe2, 0x95, 0x90}}, + {3, [3]byte{0xe2, 0x95, 0xac}}, {3, [3]byte{0xe2, 0x95, 0xa7}}, + {3, [3]byte{0xe2, 0x95, 0xa8}}, {3, [3]byte{0xe2, 0x95, 0xa4}}, + {3, [3]byte{0xe2, 0x95, 0xa5}}, {3, [3]byte{0xe2, 0x95, 0x99}}, + {3, [3]byte{0xe2, 0x95, 0x98}}, {3, [3]byte{0xe2, 0x95, 0x92}}, + {3, [3]byte{0xe2, 0x95, 0x93}}, {3, [3]byte{0xe2, 0x95, 0xab}}, + {3, [3]byte{0xe2, 0x95, 0xaa}}, {3, [3]byte{0xe2, 0x94, 0x98}}, + {3, [3]byte{0xe2, 0x94, 0x8c}}, {3, [3]byte{0xe2, 0x96, 0x88}}, + {3, [3]byte{0xe2, 0x96, 0x84}}, {3, [3]byte{0xe2, 0x96, 0x8c}}, + {3, [3]byte{0xe2, 0x96, 0x90}}, {3, [3]byte{0xe2, 0x96, 0x80}}, + {2, [3]byte{0xce, 0xb1, 0x00}}, {2, [3]byte{0xc3, 0x9f, 0x00}}, + {2, [3]byte{0xce, 0x93, 0x00}}, {2, [3]byte{0xcf, 0x80, 0x00}}, + {2, [3]byte{0xce, 0xa3, 0x00}}, {2, [3]byte{0xcf, 0x83, 0x00}}, + {2, [3]byte{0xc2, 0xb5, 0x00}}, {2, [3]byte{0xcf, 0x84, 0x00}}, + {2, [3]byte{0xce, 0xa6, 0x00}}, {2, [3]byte{0xce, 0x98, 0x00}}, + {2, [3]byte{0xce, 0xa9, 0x00}}, {2, [3]byte{0xce, 0xb4, 0x00}}, + {3, [3]byte{0xe2, 0x88, 0x9e}}, {2, [3]byte{0xcf, 0x86, 0x00}}, + {2, [3]byte{0xce, 0xb5, 0x00}}, {3, [3]byte{0xe2, 0x88, 0xa9}}, + {3, [3]byte{0xe2, 0x89, 0xa1}}, {2, [3]byte{0xc2, 0xb1, 0x00}}, + {3, [3]byte{0xe2, 0x89, 0xa5}}, {3, [3]byte{0xe2, 0x89, 0xa4}}, + {3, [3]byte{0xe2, 0x8c, 0xa0}}, {3, [3]byte{0xe2, 0x8c, 0xa1}}, + {2, [3]byte{0xc3, 0xb7, 0x00}}, {3, [3]byte{0xe2, 0x89, 0x88}}, + {2, [3]byte{0xc2, 0xb0, 0x00}}, {3, [3]byte{0xe2, 0x88, 0x99}}, + {2, [3]byte{0xc2, 0xb7, 0x00}}, {3, [3]byte{0xe2, 0x88, 0x9a}}, + {3, [3]byte{0xe2, 0x81, 0xbf}}, {2, [3]byte{0xc2, 0xb2, 0x00}}, + {3, [3]byte{0xe2, 0x96, 0xa0}}, {2, [3]byte{0xc2, 0xa0, 0x00}}, + }, + encode: [256]uint32{ + 0x00000000, 0x01000001, 0x02000002, 0x03000003, 0x04000004, 0x05000005, 0x06000006, 0x07000007, + 0x08000008, 0x09000009, 0x0a00000a, 0x0b00000b, 0x0c00000c, 0x0d00000d, 0x0e00000e, 0x0f00000f, + 0x10000010, 0x11000011, 0x12000012, 0x13000013, 0x14000014, 0x15000015, 0x16000016, 0x17000017, + 0x18000018, 0x19000019, 0x1a00001a, 0x1b00001b, 0x1c00001c, 0x1d00001d, 0x1e00001e, 0x1f00001f, + 0x20000020, 0x21000021, 0x22000022, 0x23000023, 0x24000024, 0x25000025, 0x26000026, 0x27000027, + 0x28000028, 0x29000029, 0x2a00002a, 0x2b00002b, 0x2c00002c, 0x2d00002d, 0x2e00002e, 0x2f00002f, + 0x30000030, 0x31000031, 0x32000032, 0x33000033, 0x34000034, 0x35000035, 0x36000036, 0x37000037, + 0x38000038, 0x39000039, 0x3a00003a, 0x3b00003b, 0x3c00003c, 0x3d00003d, 0x3e00003e, 0x3f00003f, + 0x40000040, 0x41000041, 0x42000042, 0x43000043, 0x44000044, 0x45000045, 0x46000046, 0x47000047, + 0x48000048, 0x49000049, 0x4a00004a, 0x4b00004b, 0x4c00004c, 0x4d00004d, 0x4e00004e, 0x4f00004f, + 0x50000050, 0x51000051, 0x52000052, 0x53000053, 0x54000054, 0x55000055, 0x56000056, 0x57000057, + 0x58000058, 0x59000059, 0x5a00005a, 0x5b00005b, 0x5c00005c, 0x5d00005d, 0x5e00005e, 0x5f00005f, + 0x60000060, 0x61000061, 0x62000062, 0x63000063, 0x64000064, 0x65000065, 0x66000066, 0x67000067, + 0x68000068, 0x69000069, 0x6a00006a, 0x6b00006b, 0x6c00006c, 0x6d00006d, 0x6e00006e, 0x6f00006f, + 0x70000070, 0x71000071, 0x72000072, 0x73000073, 0x74000074, 0x75000075, 0x76000076, 0x77000077, + 0x78000078, 0x79000079, 0x7a00007a, 0x7b00007b, 0x7c00007c, 0x7d00007d, 0x7e00007e, 0x7f00007f, + 0xff0000a0, 0xad0000a1, 0x9b0000a2, 0x9c0000a3, 0x9d0000a5, 0xa60000aa, 0xae0000ab, 0xaa0000ac, + 0xf80000b0, 0xf10000b1, 0xfd0000b2, 0xe60000b5, 0xfa0000b7, 0xa70000ba, 0xaf0000bb, 0xac0000bc, + 0xab0000bd, 0xa80000bf, 0xa50000d1, 0xe10000df, 0xa00000e1, 0xa10000ed, 0xa40000f1, 0xa20000f3, + 0xf60000f7, 0xa30000fa, 0x9f000192, 0xe2000393, 0xe9000398, 0xe40003a3, 0xe80003a6, 0xea0003a9, + 0xe00003b1, 0xeb0003b4, 0xee0003b5, 0xe30003c0, 0xe50003c3, 0xe70003c4, 0xed0003c6, 0x800005d0, + 0x810005d1, 0x820005d2, 0x830005d3, 0x840005d4, 0x850005d5, 0x860005d6, 0x870005d7, 0x880005d8, + 0x890005d9, 0x8a0005da, 0x8b0005db, 0x8c0005dc, 0x8d0005dd, 0x8e0005de, 0x8f0005df, 0x900005e0, + 0x910005e1, 0x920005e2, 0x930005e3, 0x940005e4, 0x950005e5, 0x960005e6, 0x970005e7, 0x980005e8, + 0x990005e9, 0x9a0005ea, 0xfc00207f, 0x9e0020a7, 0xf9002219, 0xfb00221a, 0xec00221e, 0xef002229, + 0xf7002248, 0xf0002261, 0xf3002264, 0xf2002265, 0xa9002310, 0xf4002320, 0xf5002321, 0xc4002500, + 0xb3002502, 0xda00250c, 0xbf002510, 0xc0002514, 0xd9002518, 0xc300251c, 0xb4002524, 0xc200252c, + 0xc1002534, 0xc500253c, 0xcd002550, 0xba002551, 0xd5002552, 0xd6002553, 0xc9002554, 0xb8002555, + 0xb7002556, 0xbb002557, 0xd4002558, 0xd3002559, 0xc800255a, 0xbe00255b, 0xbd00255c, 0xbc00255d, + 0xc600255e, 0xc700255f, 0xcc002560, 0xb5002561, 0xb6002562, 0xb9002563, 0xd1002564, 0xd2002565, + 0xcb002566, 0xcf002567, 0xd0002568, 0xca002569, 0xd800256a, 0xd700256b, 0xce00256c, 0xdf002580, + 0xdc002584, 0xdb002588, 0xdd00258c, 0xde002590, 0xb0002591, 0xb1002592, 0xb2002593, 0xfe0025a0, + }, +} + +// CodePage863 is the IBM Code Page 863 encoding. +var CodePage863 *Charmap = &codePage863 + +var codePage863 = Charmap{ + name: "IBM Code Page 863", + mib: identifier.IBM863, + asciiSuperset: true, + low: 0x80, + replacement: 0x1a, + decode: [256]utf8Enc{ + {1, [3]byte{0x00, 0x00, 0x00}}, {1, [3]byte{0x01, 0x00, 0x00}}, + {1, [3]byte{0x02, 0x00, 0x00}}, {1, [3]byte{0x03, 0x00, 0x00}}, + {1, [3]byte{0x04, 0x00, 0x00}}, {1, [3]byte{0x05, 0x00, 0x00}}, + {1, [3]byte{0x06, 0x00, 0x00}}, {1, [3]byte{0x07, 0x00, 0x00}}, + {1, [3]byte{0x08, 0x00, 0x00}}, {1, [3]byte{0x09, 0x00, 0x00}}, + {1, [3]byte{0x0a, 0x00, 0x00}}, {1, [3]byte{0x0b, 0x00, 0x00}}, + {1, [3]byte{0x0c, 0x00, 0x00}}, {1, [3]byte{0x0d, 0x00, 0x00}}, + {1, [3]byte{0x0e, 0x00, 0x00}}, {1, [3]byte{0x0f, 0x00, 0x00}}, + {1, [3]byte{0x10, 0x00, 0x00}}, {1, [3]byte{0x11, 0x00, 0x00}}, + {1, [3]byte{0x12, 0x00, 0x00}}, {1, [3]byte{0x13, 0x00, 0x00}}, + {1, [3]byte{0x14, 0x00, 0x00}}, {1, [3]byte{0x15, 0x00, 0x00}}, + {1, [3]byte{0x16, 0x00, 0x00}}, {1, [3]byte{0x17, 0x00, 0x00}}, + {1, [3]byte{0x18, 0x00, 0x00}}, {1, [3]byte{0x19, 0x00, 0x00}}, + {1, [3]byte{0x1a, 0x00, 0x00}}, {1, [3]byte{0x1b, 0x00, 0x00}}, + {1, [3]byte{0x1c, 0x00, 0x00}}, {1, [3]byte{0x1d, 0x00, 0x00}}, + {1, [3]byte{0x1e, 0x00, 0x00}}, {1, [3]byte{0x1f, 0x00, 0x00}}, + {1, [3]byte{0x20, 0x00, 0x00}}, {1, [3]byte{0x21, 0x00, 0x00}}, + {1, [3]byte{0x22, 0x00, 0x00}}, {1, [3]byte{0x23, 0x00, 0x00}}, + {1, [3]byte{0x24, 0x00, 0x00}}, {1, [3]byte{0x25, 0x00, 0x00}}, + {1, [3]byte{0x26, 0x00, 0x00}}, {1, [3]byte{0x27, 0x00, 0x00}}, + {1, [3]byte{0x28, 0x00, 0x00}}, {1, [3]byte{0x29, 0x00, 0x00}}, + {1, [3]byte{0x2a, 0x00, 0x00}}, {1, [3]byte{0x2b, 0x00, 0x00}}, + {1, [3]byte{0x2c, 0x00, 0x00}}, {1, [3]byte{0x2d, 0x00, 0x00}}, + {1, [3]byte{0x2e, 0x00, 0x00}}, {1, [3]byte{0x2f, 0x00, 0x00}}, + {1, [3]byte{0x30, 0x00, 0x00}}, {1, [3]byte{0x31, 0x00, 0x00}}, + {1, [3]byte{0x32, 0x00, 0x00}}, {1, [3]byte{0x33, 0x00, 0x00}}, + {1, [3]byte{0x34, 0x00, 0x00}}, {1, [3]byte{0x35, 0x00, 0x00}}, + {1, [3]byte{0x36, 0x00, 0x00}}, {1, [3]byte{0x37, 0x00, 0x00}}, + {1, [3]byte{0x38, 0x00, 0x00}}, {1, [3]byte{0x39, 0x00, 0x00}}, + {1, [3]byte{0x3a, 0x00, 0x00}}, {1, [3]byte{0x3b, 0x00, 0x00}}, + {1, [3]byte{0x3c, 0x00, 0x00}}, {1, [3]byte{0x3d, 0x00, 0x00}}, + {1, [3]byte{0x3e, 0x00, 0x00}}, {1, [3]byte{0x3f, 0x00, 0x00}}, + {1, [3]byte{0x40, 0x00, 0x00}}, {1, [3]byte{0x41, 0x00, 0x00}}, + {1, [3]byte{0x42, 0x00, 0x00}}, {1, [3]byte{0x43, 0x00, 0x00}}, + {1, [3]byte{0x44, 0x00, 0x00}}, {1, [3]byte{0x45, 0x00, 0x00}}, + {1, [3]byte{0x46, 0x00, 0x00}}, {1, [3]byte{0x47, 0x00, 0x00}}, + {1, [3]byte{0x48, 0x00, 0x00}}, {1, [3]byte{0x49, 0x00, 0x00}}, + {1, [3]byte{0x4a, 0x00, 0x00}}, {1, [3]byte{0x4b, 0x00, 0x00}}, + {1, [3]byte{0x4c, 0x00, 0x00}}, {1, [3]byte{0x4d, 0x00, 0x00}}, + {1, [3]byte{0x4e, 0x00, 0x00}}, {1, [3]byte{0x4f, 0x00, 0x00}}, + {1, [3]byte{0x50, 0x00, 0x00}}, {1, [3]byte{0x51, 0x00, 0x00}}, + {1, [3]byte{0x52, 0x00, 0x00}}, {1, [3]byte{0x53, 0x00, 0x00}}, + {1, [3]byte{0x54, 0x00, 0x00}}, {1, [3]byte{0x55, 0x00, 0x00}}, + {1, [3]byte{0x56, 0x00, 0x00}}, {1, [3]byte{0x57, 0x00, 0x00}}, + {1, [3]byte{0x58, 0x00, 0x00}}, {1, [3]byte{0x59, 0x00, 0x00}}, + {1, [3]byte{0x5a, 0x00, 0x00}}, {1, [3]byte{0x5b, 0x00, 0x00}}, + {1, [3]byte{0x5c, 0x00, 0x00}}, {1, [3]byte{0x5d, 0x00, 0x00}}, + {1, [3]byte{0x5e, 0x00, 0x00}}, {1, [3]byte{0x5f, 0x00, 0x00}}, + {1, [3]byte{0x60, 0x00, 0x00}}, {1, [3]byte{0x61, 0x00, 0x00}}, + {1, [3]byte{0x62, 0x00, 0x00}}, {1, [3]byte{0x63, 0x00, 0x00}}, + {1, [3]byte{0x64, 0x00, 0x00}}, {1, [3]byte{0x65, 0x00, 0x00}}, + {1, [3]byte{0x66, 0x00, 0x00}}, {1, [3]byte{0x67, 0x00, 0x00}}, + {1, [3]byte{0x68, 0x00, 0x00}}, {1, [3]byte{0x69, 0x00, 0x00}}, + {1, [3]byte{0x6a, 0x00, 0x00}}, {1, [3]byte{0x6b, 0x00, 0x00}}, + {1, [3]byte{0x6c, 0x00, 0x00}}, {1, [3]byte{0x6d, 0x00, 0x00}}, + {1, [3]byte{0x6e, 0x00, 0x00}}, {1, [3]byte{0x6f, 0x00, 0x00}}, + {1, [3]byte{0x70, 0x00, 0x00}}, {1, [3]byte{0x71, 0x00, 0x00}}, + {1, [3]byte{0x72, 0x00, 0x00}}, {1, [3]byte{0x73, 0x00, 0x00}}, + {1, [3]byte{0x74, 0x00, 0x00}}, {1, [3]byte{0x75, 0x00, 0x00}}, + {1, [3]byte{0x76, 0x00, 0x00}}, {1, [3]byte{0x77, 0x00, 0x00}}, + {1, [3]byte{0x78, 0x00, 0x00}}, {1, [3]byte{0x79, 0x00, 0x00}}, + {1, [3]byte{0x7a, 0x00, 0x00}}, {1, [3]byte{0x7b, 0x00, 0x00}}, + {1, [3]byte{0x7c, 0x00, 0x00}}, {1, [3]byte{0x7d, 0x00, 0x00}}, + {1, [3]byte{0x7e, 0x00, 0x00}}, {1, [3]byte{0x7f, 0x00, 0x00}}, + {2, [3]byte{0xc3, 0x87, 0x00}}, {2, [3]byte{0xc3, 0xbc, 0x00}}, + {2, [3]byte{0xc3, 0xa9, 0x00}}, {2, [3]byte{0xc3, 0xa2, 0x00}}, + {2, [3]byte{0xc3, 0x82, 0x00}}, {2, [3]byte{0xc3, 0xa0, 0x00}}, + {2, [3]byte{0xc2, 0xb6, 0x00}}, {2, [3]byte{0xc3, 0xa7, 0x00}}, + {2, [3]byte{0xc3, 0xaa, 0x00}}, {2, [3]byte{0xc3, 0xab, 0x00}}, + {2, [3]byte{0xc3, 0xa8, 0x00}}, {2, [3]byte{0xc3, 0xaf, 0x00}}, + {2, [3]byte{0xc3, 0xae, 0x00}}, {3, [3]byte{0xe2, 0x80, 0x97}}, + {2, [3]byte{0xc3, 0x80, 0x00}}, {2, [3]byte{0xc2, 0xa7, 0x00}}, + {2, [3]byte{0xc3, 0x89, 0x00}}, {2, [3]byte{0xc3, 0x88, 0x00}}, + {2, [3]byte{0xc3, 0x8a, 0x00}}, {2, [3]byte{0xc3, 0xb4, 0x00}}, + {2, [3]byte{0xc3, 0x8b, 0x00}}, {2, [3]byte{0xc3, 0x8f, 0x00}}, + {2, [3]byte{0xc3, 0xbb, 0x00}}, {2, [3]byte{0xc3, 0xb9, 0x00}}, + {2, [3]byte{0xc2, 0xa4, 0x00}}, {2, [3]byte{0xc3, 0x94, 0x00}}, + {2, [3]byte{0xc3, 0x9c, 0x00}}, {2, [3]byte{0xc2, 0xa2, 0x00}}, + {2, [3]byte{0xc2, 0xa3, 0x00}}, {2, [3]byte{0xc3, 0x99, 0x00}}, + {2, [3]byte{0xc3, 0x9b, 0x00}}, {2, [3]byte{0xc6, 0x92, 0x00}}, + {2, [3]byte{0xc2, 0xa6, 0x00}}, {2, [3]byte{0xc2, 0xb4, 0x00}}, + {2, [3]byte{0xc3, 0xb3, 0x00}}, {2, [3]byte{0xc3, 0xba, 0x00}}, + {2, [3]byte{0xc2, 0xa8, 0x00}}, {2, [3]byte{0xc2, 0xb8, 0x00}}, + {2, [3]byte{0xc2, 0xb3, 0x00}}, {2, [3]byte{0xc2, 0xaf, 0x00}}, + {2, [3]byte{0xc3, 0x8e, 0x00}}, {3, [3]byte{0xe2, 0x8c, 0x90}}, + {2, [3]byte{0xc2, 0xac, 0x00}}, {2, [3]byte{0xc2, 0xbd, 0x00}}, + {2, [3]byte{0xc2, 0xbc, 0x00}}, {2, [3]byte{0xc2, 0xbe, 0x00}}, + {2, [3]byte{0xc2, 0xab, 0x00}}, {2, [3]byte{0xc2, 0xbb, 0x00}}, + {3, [3]byte{0xe2, 0x96, 0x91}}, {3, [3]byte{0xe2, 0x96, 0x92}}, + {3, [3]byte{0xe2, 0x96, 0x93}}, {3, [3]byte{0xe2, 0x94, 0x82}}, + {3, [3]byte{0xe2, 0x94, 0xa4}}, {3, [3]byte{0xe2, 0x95, 0xa1}}, + {3, [3]byte{0xe2, 0x95, 0xa2}}, {3, [3]byte{0xe2, 0x95, 0x96}}, + {3, [3]byte{0xe2, 0x95, 0x95}}, {3, [3]byte{0xe2, 0x95, 0xa3}}, + {3, [3]byte{0xe2, 0x95, 0x91}}, {3, [3]byte{0xe2, 0x95, 0x97}}, + {3, [3]byte{0xe2, 0x95, 0x9d}}, {3, [3]byte{0xe2, 0x95, 0x9c}}, + {3, [3]byte{0xe2, 0x95, 0x9b}}, {3, [3]byte{0xe2, 0x94, 0x90}}, + {3, [3]byte{0xe2, 0x94, 0x94}}, {3, [3]byte{0xe2, 0x94, 0xb4}}, + {3, [3]byte{0xe2, 0x94, 0xac}}, {3, [3]byte{0xe2, 0x94, 0x9c}}, + {3, [3]byte{0xe2, 0x94, 0x80}}, {3, [3]byte{0xe2, 0x94, 0xbc}}, + {3, [3]byte{0xe2, 0x95, 0x9e}}, {3, [3]byte{0xe2, 0x95, 0x9f}}, + {3, [3]byte{0xe2, 0x95, 0x9a}}, {3, [3]byte{0xe2, 0x95, 0x94}}, + {3, [3]byte{0xe2, 0x95, 0xa9}}, {3, [3]byte{0xe2, 0x95, 0xa6}}, + {3, [3]byte{0xe2, 0x95, 0xa0}}, {3, [3]byte{0xe2, 0x95, 0x90}}, + {3, [3]byte{0xe2, 0x95, 0xac}}, {3, [3]byte{0xe2, 0x95, 0xa7}}, + {3, [3]byte{0xe2, 0x95, 0xa8}}, {3, [3]byte{0xe2, 0x95, 0xa4}}, + {3, [3]byte{0xe2, 0x95, 0xa5}}, {3, [3]byte{0xe2, 0x95, 0x99}}, + {3, [3]byte{0xe2, 0x95, 0x98}}, {3, [3]byte{0xe2, 0x95, 0x92}}, + {3, [3]byte{0xe2, 0x95, 0x93}}, {3, [3]byte{0xe2, 0x95, 0xab}}, + {3, [3]byte{0xe2, 0x95, 0xaa}}, {3, [3]byte{0xe2, 0x94, 0x98}}, + {3, [3]byte{0xe2, 0x94, 0x8c}}, {3, [3]byte{0xe2, 0x96, 0x88}}, + {3, [3]byte{0xe2, 0x96, 0x84}}, {3, [3]byte{0xe2, 0x96, 0x8c}}, + {3, [3]byte{0xe2, 0x96, 0x90}}, {3, [3]byte{0xe2, 0x96, 0x80}}, + {2, [3]byte{0xce, 0xb1, 0x00}}, {2, [3]byte{0xc3, 0x9f, 0x00}}, + {2, [3]byte{0xce, 0x93, 0x00}}, {2, [3]byte{0xcf, 0x80, 0x00}}, + {2, [3]byte{0xce, 0xa3, 0x00}}, {2, [3]byte{0xcf, 0x83, 0x00}}, + {2, [3]byte{0xc2, 0xb5, 0x00}}, {2, [3]byte{0xcf, 0x84, 0x00}}, + {2, [3]byte{0xce, 0xa6, 0x00}}, {2, [3]byte{0xce, 0x98, 0x00}}, + {2, [3]byte{0xce, 0xa9, 0x00}}, {2, [3]byte{0xce, 0xb4, 0x00}}, + {3, [3]byte{0xe2, 0x88, 0x9e}}, {2, [3]byte{0xcf, 0x86, 0x00}}, + {2, [3]byte{0xce, 0xb5, 0x00}}, {3, [3]byte{0xe2, 0x88, 0xa9}}, + {3, [3]byte{0xe2, 0x89, 0xa1}}, {2, [3]byte{0xc2, 0xb1, 0x00}}, + {3, [3]byte{0xe2, 0x89, 0xa5}}, {3, [3]byte{0xe2, 0x89, 0xa4}}, + {3, [3]byte{0xe2, 0x8c, 0xa0}}, {3, [3]byte{0xe2, 0x8c, 0xa1}}, + {2, [3]byte{0xc3, 0xb7, 0x00}}, {3, [3]byte{0xe2, 0x89, 0x88}}, + {2, [3]byte{0xc2, 0xb0, 0x00}}, {3, [3]byte{0xe2, 0x88, 0x99}}, + {2, [3]byte{0xc2, 0xb7, 0x00}}, {3, [3]byte{0xe2, 0x88, 0x9a}}, + {3, [3]byte{0xe2, 0x81, 0xbf}}, {2, [3]byte{0xc2, 0xb2, 0x00}}, + {3, [3]byte{0xe2, 0x96, 0xa0}}, {2, [3]byte{0xc2, 0xa0, 0x00}}, + }, + encode: [256]uint32{ + 0x00000000, 0x01000001, 0x02000002, 0x03000003, 0x04000004, 0x05000005, 0x06000006, 0x07000007, + 0x08000008, 0x09000009, 0x0a00000a, 0x0b00000b, 0x0c00000c, 0x0d00000d, 0x0e00000e, 0x0f00000f, + 0x10000010, 0x11000011, 0x12000012, 0x13000013, 0x14000014, 0x15000015, 0x16000016, 0x17000017, + 0x18000018, 0x19000019, 0x1a00001a, 0x1b00001b, 0x1c00001c, 0x1d00001d, 0x1e00001e, 0x1f00001f, + 0x20000020, 0x21000021, 0x22000022, 0x23000023, 0x24000024, 0x25000025, 0x26000026, 0x27000027, + 0x28000028, 0x29000029, 0x2a00002a, 0x2b00002b, 0x2c00002c, 0x2d00002d, 0x2e00002e, 0x2f00002f, + 0x30000030, 0x31000031, 0x32000032, 0x33000033, 0x34000034, 0x35000035, 0x36000036, 0x37000037, + 0x38000038, 0x39000039, 0x3a00003a, 0x3b00003b, 0x3c00003c, 0x3d00003d, 0x3e00003e, 0x3f00003f, + 0x40000040, 0x41000041, 0x42000042, 0x43000043, 0x44000044, 0x45000045, 0x46000046, 0x47000047, + 0x48000048, 0x49000049, 0x4a00004a, 0x4b00004b, 0x4c00004c, 0x4d00004d, 0x4e00004e, 0x4f00004f, + 0x50000050, 0x51000051, 0x52000052, 0x53000053, 0x54000054, 0x55000055, 0x56000056, 0x57000057, + 0x58000058, 0x59000059, 0x5a00005a, 0x5b00005b, 0x5c00005c, 0x5d00005d, 0x5e00005e, 0x5f00005f, + 0x60000060, 0x61000061, 0x62000062, 0x63000063, 0x64000064, 0x65000065, 0x66000066, 0x67000067, + 0x68000068, 0x69000069, 0x6a00006a, 0x6b00006b, 0x6c00006c, 0x6d00006d, 0x6e00006e, 0x6f00006f, + 0x70000070, 0x71000071, 0x72000072, 0x73000073, 0x74000074, 0x75000075, 0x76000076, 0x77000077, + 0x78000078, 0x79000079, 0x7a00007a, 0x7b00007b, 0x7c00007c, 0x7d00007d, 0x7e00007e, 0x7f00007f, + 0xff0000a0, 0x9b0000a2, 0x9c0000a3, 0x980000a4, 0xa00000a6, 0x8f0000a7, 0xa40000a8, 0xae0000ab, + 0xaa0000ac, 0xa70000af, 0xf80000b0, 0xf10000b1, 0xfd0000b2, 0xa60000b3, 0xa10000b4, 0xe60000b5, + 0x860000b6, 0xfa0000b7, 0xa50000b8, 0xaf0000bb, 0xac0000bc, 0xab0000bd, 0xad0000be, 0x8e0000c0, + 0x840000c2, 0x800000c7, 0x910000c8, 0x900000c9, 0x920000ca, 0x940000cb, 0xa80000ce, 0x950000cf, + 0x990000d4, 0x9d0000d9, 0x9e0000db, 0x9a0000dc, 0xe10000df, 0x850000e0, 0x830000e2, 0x870000e7, + 0x8a0000e8, 0x820000e9, 0x880000ea, 0x890000eb, 0x8c0000ee, 0x8b0000ef, 0xa20000f3, 0x930000f4, + 0xf60000f7, 0x970000f9, 0xa30000fa, 0x960000fb, 0x810000fc, 0x9f000192, 0xe2000393, 0xe9000398, + 0xe40003a3, 0xe80003a6, 0xea0003a9, 0xe00003b1, 0xeb0003b4, 0xee0003b5, 0xe30003c0, 0xe50003c3, + 0xe70003c4, 0xed0003c6, 0x8d002017, 0xfc00207f, 0xf9002219, 0xfb00221a, 0xec00221e, 0xef002229, + 0xf7002248, 0xf0002261, 0xf3002264, 0xf2002265, 0xa9002310, 0xf4002320, 0xf5002321, 0xc4002500, + 0xb3002502, 0xda00250c, 0xbf002510, 0xc0002514, 0xd9002518, 0xc300251c, 0xb4002524, 0xc200252c, + 0xc1002534, 0xc500253c, 0xcd002550, 0xba002551, 0xd5002552, 0xd6002553, 0xc9002554, 0xb8002555, + 0xb7002556, 0xbb002557, 0xd4002558, 0xd3002559, 0xc800255a, 0xbe00255b, 0xbd00255c, 0xbc00255d, + 0xc600255e, 0xc700255f, 0xcc002560, 0xb5002561, 0xb6002562, 0xb9002563, 0xd1002564, 0xd2002565, + 0xcb002566, 0xcf002567, 0xd0002568, 0xca002569, 0xd800256a, 0xd700256b, 0xce00256c, 0xdf002580, + 0xdc002584, 0xdb002588, 0xdd00258c, 0xde002590, 0xb0002591, 0xb1002592, 0xb2002593, 0xfe0025a0, + }, +} + +// CodePage865 is the IBM Code Page 865 encoding. +var CodePage865 *Charmap = &codePage865 + +var codePage865 = Charmap{ + name: "IBM Code Page 865", + mib: identifier.IBM865, + asciiSuperset: true, + low: 0x80, + replacement: 0x1a, + decode: [256]utf8Enc{ + {1, [3]byte{0x00, 0x00, 0x00}}, {1, [3]byte{0x01, 0x00, 0x00}}, + {1, [3]byte{0x02, 0x00, 0x00}}, {1, [3]byte{0x03, 0x00, 0x00}}, + {1, [3]byte{0x04, 0x00, 0x00}}, {1, [3]byte{0x05, 0x00, 0x00}}, + {1, [3]byte{0x06, 0x00, 0x00}}, {1, [3]byte{0x07, 0x00, 0x00}}, + {1, [3]byte{0x08, 0x00, 0x00}}, {1, [3]byte{0x09, 0x00, 0x00}}, + {1, [3]byte{0x0a, 0x00, 0x00}}, {1, [3]byte{0x0b, 0x00, 0x00}}, + {1, [3]byte{0x0c, 0x00, 0x00}}, {1, [3]byte{0x0d, 0x00, 0x00}}, + {1, [3]byte{0x0e, 0x00, 0x00}}, {1, [3]byte{0x0f, 0x00, 0x00}}, + {1, [3]byte{0x10, 0x00, 0x00}}, {1, [3]byte{0x11, 0x00, 0x00}}, + {1, [3]byte{0x12, 0x00, 0x00}}, {1, [3]byte{0x13, 0x00, 0x00}}, + {1, [3]byte{0x14, 0x00, 0x00}}, {1, [3]byte{0x15, 0x00, 0x00}}, + {1, [3]byte{0x16, 0x00, 0x00}}, {1, [3]byte{0x17, 0x00, 0x00}}, + {1, [3]byte{0x18, 0x00, 0x00}}, {1, [3]byte{0x19, 0x00, 0x00}}, + {1, [3]byte{0x1a, 0x00, 0x00}}, {1, [3]byte{0x1b, 0x00, 0x00}}, + {1, [3]byte{0x1c, 0x00, 0x00}}, {1, [3]byte{0x1d, 0x00, 0x00}}, + {1, [3]byte{0x1e, 0x00, 0x00}}, {1, [3]byte{0x1f, 0x00, 0x00}}, + {1, [3]byte{0x20, 0x00, 0x00}}, {1, [3]byte{0x21, 0x00, 0x00}}, + {1, [3]byte{0x22, 0x00, 0x00}}, {1, [3]byte{0x23, 0x00, 0x00}}, + {1, [3]byte{0x24, 0x00, 0x00}}, {1, [3]byte{0x25, 0x00, 0x00}}, + {1, [3]byte{0x26, 0x00, 0x00}}, {1, [3]byte{0x27, 0x00, 0x00}}, + {1, [3]byte{0x28, 0x00, 0x00}}, {1, [3]byte{0x29, 0x00, 0x00}}, + {1, [3]byte{0x2a, 0x00, 0x00}}, {1, [3]byte{0x2b, 0x00, 0x00}}, + {1, [3]byte{0x2c, 0x00, 0x00}}, {1, [3]byte{0x2d, 0x00, 0x00}}, + {1, [3]byte{0x2e, 0x00, 0x00}}, {1, [3]byte{0x2f, 0x00, 0x00}}, + {1, [3]byte{0x30, 0x00, 0x00}}, {1, [3]byte{0x31, 0x00, 0x00}}, + {1, [3]byte{0x32, 0x00, 0x00}}, {1, [3]byte{0x33, 0x00, 0x00}}, + {1, [3]byte{0x34, 0x00, 0x00}}, {1, [3]byte{0x35, 0x00, 0x00}}, + {1, [3]byte{0x36, 0x00, 0x00}}, {1, [3]byte{0x37, 0x00, 0x00}}, + {1, [3]byte{0x38, 0x00, 0x00}}, {1, [3]byte{0x39, 0x00, 0x00}}, + {1, [3]byte{0x3a, 0x00, 0x00}}, {1, [3]byte{0x3b, 0x00, 0x00}}, + {1, [3]byte{0x3c, 0x00, 0x00}}, {1, [3]byte{0x3d, 0x00, 0x00}}, + {1, [3]byte{0x3e, 0x00, 0x00}}, {1, [3]byte{0x3f, 0x00, 0x00}}, + {1, [3]byte{0x40, 0x00, 0x00}}, {1, [3]byte{0x41, 0x00, 0x00}}, + {1, [3]byte{0x42, 0x00, 0x00}}, {1, [3]byte{0x43, 0x00, 0x00}}, + {1, [3]byte{0x44, 0x00, 0x00}}, {1, [3]byte{0x45, 0x00, 0x00}}, + {1, [3]byte{0x46, 0x00, 0x00}}, {1, [3]byte{0x47, 0x00, 0x00}}, + {1, [3]byte{0x48, 0x00, 0x00}}, {1, [3]byte{0x49, 0x00, 0x00}}, + {1, [3]byte{0x4a, 0x00, 0x00}}, {1, [3]byte{0x4b, 0x00, 0x00}}, + {1, [3]byte{0x4c, 0x00, 0x00}}, {1, [3]byte{0x4d, 0x00, 0x00}}, + {1, [3]byte{0x4e, 0x00, 0x00}}, {1, [3]byte{0x4f, 0x00, 0x00}}, + {1, [3]byte{0x50, 0x00, 0x00}}, {1, [3]byte{0x51, 0x00, 0x00}}, + {1, [3]byte{0x52, 0x00, 0x00}}, {1, [3]byte{0x53, 0x00, 0x00}}, + {1, [3]byte{0x54, 0x00, 0x00}}, {1, [3]byte{0x55, 0x00, 0x00}}, + {1, [3]byte{0x56, 0x00, 0x00}}, {1, [3]byte{0x57, 0x00, 0x00}}, + {1, [3]byte{0x58, 0x00, 0x00}}, {1, [3]byte{0x59, 0x00, 0x00}}, + {1, [3]byte{0x5a, 0x00, 0x00}}, {1, [3]byte{0x5b, 0x00, 0x00}}, + {1, [3]byte{0x5c, 0x00, 0x00}}, {1, [3]byte{0x5d, 0x00, 0x00}}, + {1, [3]byte{0x5e, 0x00, 0x00}}, {1, [3]byte{0x5f, 0x00, 0x00}}, + {1, [3]byte{0x60, 0x00, 0x00}}, {1, [3]byte{0x61, 0x00, 0x00}}, + {1, [3]byte{0x62, 0x00, 0x00}}, {1, [3]byte{0x63, 0x00, 0x00}}, + {1, [3]byte{0x64, 0x00, 0x00}}, {1, [3]byte{0x65, 0x00, 0x00}}, + {1, [3]byte{0x66, 0x00, 0x00}}, {1, [3]byte{0x67, 0x00, 0x00}}, + {1, [3]byte{0x68, 0x00, 0x00}}, {1, [3]byte{0x69, 0x00, 0x00}}, + {1, [3]byte{0x6a, 0x00, 0x00}}, {1, [3]byte{0x6b, 0x00, 0x00}}, + {1, [3]byte{0x6c, 0x00, 0x00}}, {1, [3]byte{0x6d, 0x00, 0x00}}, + {1, [3]byte{0x6e, 0x00, 0x00}}, {1, [3]byte{0x6f, 0x00, 0x00}}, + {1, [3]byte{0x70, 0x00, 0x00}}, {1, [3]byte{0x71, 0x00, 0x00}}, + {1, [3]byte{0x72, 0x00, 0x00}}, {1, [3]byte{0x73, 0x00, 0x00}}, + {1, [3]byte{0x74, 0x00, 0x00}}, {1, [3]byte{0x75, 0x00, 0x00}}, + {1, [3]byte{0x76, 0x00, 0x00}}, {1, [3]byte{0x77, 0x00, 0x00}}, + {1, [3]byte{0x78, 0x00, 0x00}}, {1, [3]byte{0x79, 0x00, 0x00}}, + {1, [3]byte{0x7a, 0x00, 0x00}}, {1, [3]byte{0x7b, 0x00, 0x00}}, + {1, [3]byte{0x7c, 0x00, 0x00}}, {1, [3]byte{0x7d, 0x00, 0x00}}, + {1, [3]byte{0x7e, 0x00, 0x00}}, {1, [3]byte{0x7f, 0x00, 0x00}}, + {2, [3]byte{0xc3, 0x87, 0x00}}, {2, [3]byte{0xc3, 0xbc, 0x00}}, + {2, [3]byte{0xc3, 0xa9, 0x00}}, {2, [3]byte{0xc3, 0xa2, 0x00}}, + {2, [3]byte{0xc3, 0xa4, 0x00}}, {2, [3]byte{0xc3, 0xa0, 0x00}}, + {2, [3]byte{0xc3, 0xa5, 0x00}}, {2, [3]byte{0xc3, 0xa7, 0x00}}, + {2, [3]byte{0xc3, 0xaa, 0x00}}, {2, [3]byte{0xc3, 0xab, 0x00}}, + {2, [3]byte{0xc3, 0xa8, 0x00}}, {2, [3]byte{0xc3, 0xaf, 0x00}}, + {2, [3]byte{0xc3, 0xae, 0x00}}, {2, [3]byte{0xc3, 0xac, 0x00}}, + {2, [3]byte{0xc3, 0x84, 0x00}}, {2, [3]byte{0xc3, 0x85, 0x00}}, + {2, [3]byte{0xc3, 0x89, 0x00}}, {2, [3]byte{0xc3, 0xa6, 0x00}}, + {2, [3]byte{0xc3, 0x86, 0x00}}, {2, [3]byte{0xc3, 0xb4, 0x00}}, + {2, [3]byte{0xc3, 0xb6, 0x00}}, {2, [3]byte{0xc3, 0xb2, 0x00}}, + {2, [3]byte{0xc3, 0xbb, 0x00}}, {2, [3]byte{0xc3, 0xb9, 0x00}}, + {2, [3]byte{0xc3, 0xbf, 0x00}}, {2, [3]byte{0xc3, 0x96, 0x00}}, + {2, [3]byte{0xc3, 0x9c, 0x00}}, {2, [3]byte{0xc3, 0xb8, 0x00}}, + {2, [3]byte{0xc2, 0xa3, 0x00}}, {2, [3]byte{0xc3, 0x98, 0x00}}, + {3, [3]byte{0xe2, 0x82, 0xa7}}, {2, [3]byte{0xc6, 0x92, 0x00}}, + {2, [3]byte{0xc3, 0xa1, 0x00}}, {2, [3]byte{0xc3, 0xad, 0x00}}, + {2, [3]byte{0xc3, 0xb3, 0x00}}, {2, [3]byte{0xc3, 0xba, 0x00}}, + {2, [3]byte{0xc3, 0xb1, 0x00}}, {2, [3]byte{0xc3, 0x91, 0x00}}, + {2, [3]byte{0xc2, 0xaa, 0x00}}, {2, [3]byte{0xc2, 0xba, 0x00}}, + {2, [3]byte{0xc2, 0xbf, 0x00}}, {3, [3]byte{0xe2, 0x8c, 0x90}}, + {2, [3]byte{0xc2, 0xac, 0x00}}, {2, [3]byte{0xc2, 0xbd, 0x00}}, + {2, [3]byte{0xc2, 0xbc, 0x00}}, {2, [3]byte{0xc2, 0xa1, 0x00}}, + {2, [3]byte{0xc2, 0xab, 0x00}}, {2, [3]byte{0xc2, 0xa4, 0x00}}, + {3, [3]byte{0xe2, 0x96, 0x91}}, {3, [3]byte{0xe2, 0x96, 0x92}}, + {3, [3]byte{0xe2, 0x96, 0x93}}, {3, [3]byte{0xe2, 0x94, 0x82}}, + {3, [3]byte{0xe2, 0x94, 0xa4}}, {3, [3]byte{0xe2, 0x95, 0xa1}}, + {3, [3]byte{0xe2, 0x95, 0xa2}}, {3, [3]byte{0xe2, 0x95, 0x96}}, + {3, [3]byte{0xe2, 0x95, 0x95}}, {3, [3]byte{0xe2, 0x95, 0xa3}}, + {3, [3]byte{0xe2, 0x95, 0x91}}, {3, [3]byte{0xe2, 0x95, 0x97}}, + {3, [3]byte{0xe2, 0x95, 0x9d}}, {3, [3]byte{0xe2, 0x95, 0x9c}}, + {3, [3]byte{0xe2, 0x95, 0x9b}}, {3, [3]byte{0xe2, 0x94, 0x90}}, + {3, [3]byte{0xe2, 0x94, 0x94}}, {3, [3]byte{0xe2, 0x94, 0xb4}}, + {3, [3]byte{0xe2, 0x94, 0xac}}, {3, [3]byte{0xe2, 0x94, 0x9c}}, + {3, [3]byte{0xe2, 0x94, 0x80}}, {3, [3]byte{0xe2, 0x94, 0xbc}}, + {3, [3]byte{0xe2, 0x95, 0x9e}}, {3, [3]byte{0xe2, 0x95, 0x9f}}, + {3, [3]byte{0xe2, 0x95, 0x9a}}, {3, [3]byte{0xe2, 0x95, 0x94}}, + {3, [3]byte{0xe2, 0x95, 0xa9}}, {3, [3]byte{0xe2, 0x95, 0xa6}}, + {3, [3]byte{0xe2, 0x95, 0xa0}}, {3, [3]byte{0xe2, 0x95, 0x90}}, + {3, [3]byte{0xe2, 0x95, 0xac}}, {3, [3]byte{0xe2, 0x95, 0xa7}}, + {3, [3]byte{0xe2, 0x95, 0xa8}}, {3, [3]byte{0xe2, 0x95, 0xa4}}, + {3, [3]byte{0xe2, 0x95, 0xa5}}, {3, [3]byte{0xe2, 0x95, 0x99}}, + {3, [3]byte{0xe2, 0x95, 0x98}}, {3, [3]byte{0xe2, 0x95, 0x92}}, + {3, [3]byte{0xe2, 0x95, 0x93}}, {3, [3]byte{0xe2, 0x95, 0xab}}, + {3, [3]byte{0xe2, 0x95, 0xaa}}, {3, [3]byte{0xe2, 0x94, 0x98}}, + {3, [3]byte{0xe2, 0x94, 0x8c}}, {3, [3]byte{0xe2, 0x96, 0x88}}, + {3, [3]byte{0xe2, 0x96, 0x84}}, {3, [3]byte{0xe2, 0x96, 0x8c}}, + {3, [3]byte{0xe2, 0x96, 0x90}}, {3, [3]byte{0xe2, 0x96, 0x80}}, + {2, [3]byte{0xce, 0xb1, 0x00}}, {2, [3]byte{0xc3, 0x9f, 0x00}}, + {2, [3]byte{0xce, 0x93, 0x00}}, {2, [3]byte{0xcf, 0x80, 0x00}}, + {2, [3]byte{0xce, 0xa3, 0x00}}, {2, [3]byte{0xcf, 0x83, 0x00}}, + {2, [3]byte{0xc2, 0xb5, 0x00}}, {2, [3]byte{0xcf, 0x84, 0x00}}, + {2, [3]byte{0xce, 0xa6, 0x00}}, {2, [3]byte{0xce, 0x98, 0x00}}, + {2, [3]byte{0xce, 0xa9, 0x00}}, {2, [3]byte{0xce, 0xb4, 0x00}}, + {3, [3]byte{0xe2, 0x88, 0x9e}}, {2, [3]byte{0xcf, 0x86, 0x00}}, + {2, [3]byte{0xce, 0xb5, 0x00}}, {3, [3]byte{0xe2, 0x88, 0xa9}}, + {3, [3]byte{0xe2, 0x89, 0xa1}}, {2, [3]byte{0xc2, 0xb1, 0x00}}, + {3, [3]byte{0xe2, 0x89, 0xa5}}, {3, [3]byte{0xe2, 0x89, 0xa4}}, + {3, [3]byte{0xe2, 0x8c, 0xa0}}, {3, [3]byte{0xe2, 0x8c, 0xa1}}, + {2, [3]byte{0xc3, 0xb7, 0x00}}, {3, [3]byte{0xe2, 0x89, 0x88}}, + {2, [3]byte{0xc2, 0xb0, 0x00}}, {3, [3]byte{0xe2, 0x88, 0x99}}, + {2, [3]byte{0xc2, 0xb7, 0x00}}, {3, [3]byte{0xe2, 0x88, 0x9a}}, + {3, [3]byte{0xe2, 0x81, 0xbf}}, {2, [3]byte{0xc2, 0xb2, 0x00}}, + {3, [3]byte{0xe2, 0x96, 0xa0}}, {2, [3]byte{0xc2, 0xa0, 0x00}}, + }, + encode: [256]uint32{ + 0x00000000, 0x01000001, 0x02000002, 0x03000003, 0x04000004, 0x05000005, 0x06000006, 0x07000007, + 0x08000008, 0x09000009, 0x0a00000a, 0x0b00000b, 0x0c00000c, 0x0d00000d, 0x0e00000e, 0x0f00000f, + 0x10000010, 0x11000011, 0x12000012, 0x13000013, 0x14000014, 0x15000015, 0x16000016, 0x17000017, + 0x18000018, 0x19000019, 0x1a00001a, 0x1b00001b, 0x1c00001c, 0x1d00001d, 0x1e00001e, 0x1f00001f, + 0x20000020, 0x21000021, 0x22000022, 0x23000023, 0x24000024, 0x25000025, 0x26000026, 0x27000027, + 0x28000028, 0x29000029, 0x2a00002a, 0x2b00002b, 0x2c00002c, 0x2d00002d, 0x2e00002e, 0x2f00002f, + 0x30000030, 0x31000031, 0x32000032, 0x33000033, 0x34000034, 0x35000035, 0x36000036, 0x37000037, + 0x38000038, 0x39000039, 0x3a00003a, 0x3b00003b, 0x3c00003c, 0x3d00003d, 0x3e00003e, 0x3f00003f, + 0x40000040, 0x41000041, 0x42000042, 0x43000043, 0x44000044, 0x45000045, 0x46000046, 0x47000047, + 0x48000048, 0x49000049, 0x4a00004a, 0x4b00004b, 0x4c00004c, 0x4d00004d, 0x4e00004e, 0x4f00004f, + 0x50000050, 0x51000051, 0x52000052, 0x53000053, 0x54000054, 0x55000055, 0x56000056, 0x57000057, + 0x58000058, 0x59000059, 0x5a00005a, 0x5b00005b, 0x5c00005c, 0x5d00005d, 0x5e00005e, 0x5f00005f, + 0x60000060, 0x61000061, 0x62000062, 0x63000063, 0x64000064, 0x65000065, 0x66000066, 0x67000067, + 0x68000068, 0x69000069, 0x6a00006a, 0x6b00006b, 0x6c00006c, 0x6d00006d, 0x6e00006e, 0x6f00006f, + 0x70000070, 0x71000071, 0x72000072, 0x73000073, 0x74000074, 0x75000075, 0x76000076, 0x77000077, + 0x78000078, 0x79000079, 0x7a00007a, 0x7b00007b, 0x7c00007c, 0x7d00007d, 0x7e00007e, 0x7f00007f, + 0xff0000a0, 0xad0000a1, 0x9c0000a3, 0xaf0000a4, 0xa60000aa, 0xae0000ab, 0xaa0000ac, 0xf80000b0, + 0xf10000b1, 0xfd0000b2, 0xe60000b5, 0xfa0000b7, 0xa70000ba, 0xac0000bc, 0xab0000bd, 0xa80000bf, + 0x8e0000c4, 0x8f0000c5, 0x920000c6, 0x800000c7, 0x900000c9, 0xa50000d1, 0x990000d6, 0x9d0000d8, + 0x9a0000dc, 0xe10000df, 0x850000e0, 0xa00000e1, 0x830000e2, 0x840000e4, 0x860000e5, 0x910000e6, + 0x870000e7, 0x8a0000e8, 0x820000e9, 0x880000ea, 0x890000eb, 0x8d0000ec, 0xa10000ed, 0x8c0000ee, + 0x8b0000ef, 0xa40000f1, 0x950000f2, 0xa20000f3, 0x930000f4, 0x940000f6, 0xf60000f7, 0x9b0000f8, + 0x970000f9, 0xa30000fa, 0x960000fb, 0x810000fc, 0x980000ff, 0x9f000192, 0xe2000393, 0xe9000398, + 0xe40003a3, 0xe80003a6, 0xea0003a9, 0xe00003b1, 0xeb0003b4, 0xee0003b5, 0xe30003c0, 0xe50003c3, + 0xe70003c4, 0xed0003c6, 0xfc00207f, 0x9e0020a7, 0xf9002219, 0xfb00221a, 0xec00221e, 0xef002229, + 0xf7002248, 0xf0002261, 0xf3002264, 0xf2002265, 0xa9002310, 0xf4002320, 0xf5002321, 0xc4002500, + 0xb3002502, 0xda00250c, 0xbf002510, 0xc0002514, 0xd9002518, 0xc300251c, 0xb4002524, 0xc200252c, + 0xc1002534, 0xc500253c, 0xcd002550, 0xba002551, 0xd5002552, 0xd6002553, 0xc9002554, 0xb8002555, + 0xb7002556, 0xbb002557, 0xd4002558, 0xd3002559, 0xc800255a, 0xbe00255b, 0xbd00255c, 0xbc00255d, + 0xc600255e, 0xc700255f, 0xcc002560, 0xb5002561, 0xb6002562, 0xb9002563, 0xd1002564, 0xd2002565, + 0xcb002566, 0xcf002567, 0xd0002568, 0xca002569, 0xd800256a, 0xd700256b, 0xce00256c, 0xdf002580, + 0xdc002584, 0xdb002588, 0xdd00258c, 0xde002590, 0xb0002591, 0xb1002592, 0xb2002593, 0xfe0025a0, + }, +} + +// CodePage866 is the IBM Code Page 866 encoding. +var CodePage866 *Charmap = &codePage866 + +var codePage866 = Charmap{ + name: "IBM Code Page 866", + mib: identifier.IBM866, + asciiSuperset: true, + low: 0x80, + replacement: 0x1a, + decode: [256]utf8Enc{ + {1, [3]byte{0x00, 0x00, 0x00}}, {1, [3]byte{0x01, 0x00, 0x00}}, + {1, [3]byte{0x02, 0x00, 0x00}}, {1, [3]byte{0x03, 0x00, 0x00}}, + {1, [3]byte{0x04, 0x00, 0x00}}, {1, [3]byte{0x05, 0x00, 0x00}}, + {1, [3]byte{0x06, 0x00, 0x00}}, {1, [3]byte{0x07, 0x00, 0x00}}, + {1, [3]byte{0x08, 0x00, 0x00}}, {1, [3]byte{0x09, 0x00, 0x00}}, + {1, [3]byte{0x0a, 0x00, 0x00}}, {1, [3]byte{0x0b, 0x00, 0x00}}, + {1, [3]byte{0x0c, 0x00, 0x00}}, {1, [3]byte{0x0d, 0x00, 0x00}}, + {1, [3]byte{0x0e, 0x00, 0x00}}, {1, [3]byte{0x0f, 0x00, 0x00}}, + {1, [3]byte{0x10, 0x00, 0x00}}, {1, [3]byte{0x11, 0x00, 0x00}}, + {1, [3]byte{0x12, 0x00, 0x00}}, {1, [3]byte{0x13, 0x00, 0x00}}, + {1, [3]byte{0x14, 0x00, 0x00}}, {1, [3]byte{0x15, 0x00, 0x00}}, + {1, [3]byte{0x16, 0x00, 0x00}}, {1, [3]byte{0x17, 0x00, 0x00}}, + {1, [3]byte{0x18, 0x00, 0x00}}, {1, [3]byte{0x19, 0x00, 0x00}}, + {1, [3]byte{0x1a, 0x00, 0x00}}, {1, [3]byte{0x1b, 0x00, 0x00}}, + {1, [3]byte{0x1c, 0x00, 0x00}}, {1, [3]byte{0x1d, 0x00, 0x00}}, + {1, [3]byte{0x1e, 0x00, 0x00}}, {1, [3]byte{0x1f, 0x00, 0x00}}, + {1, [3]byte{0x20, 0x00, 0x00}}, {1, [3]byte{0x21, 0x00, 0x00}}, + {1, [3]byte{0x22, 0x00, 0x00}}, {1, [3]byte{0x23, 0x00, 0x00}}, + {1, [3]byte{0x24, 0x00, 0x00}}, {1, [3]byte{0x25, 0x00, 0x00}}, + {1, [3]byte{0x26, 0x00, 0x00}}, {1, [3]byte{0x27, 0x00, 0x00}}, + {1, [3]byte{0x28, 0x00, 0x00}}, {1, [3]byte{0x29, 0x00, 0x00}}, + {1, [3]byte{0x2a, 0x00, 0x00}}, {1, [3]byte{0x2b, 0x00, 0x00}}, + {1, [3]byte{0x2c, 0x00, 0x00}}, {1, [3]byte{0x2d, 0x00, 0x00}}, + {1, [3]byte{0x2e, 0x00, 0x00}}, {1, [3]byte{0x2f, 0x00, 0x00}}, + {1, [3]byte{0x30, 0x00, 0x00}}, {1, [3]byte{0x31, 0x00, 0x00}}, + {1, [3]byte{0x32, 0x00, 0x00}}, {1, [3]byte{0x33, 0x00, 0x00}}, + {1, [3]byte{0x34, 0x00, 0x00}}, {1, [3]byte{0x35, 0x00, 0x00}}, + {1, [3]byte{0x36, 0x00, 0x00}}, {1, [3]byte{0x37, 0x00, 0x00}}, + {1, [3]byte{0x38, 0x00, 0x00}}, {1, [3]byte{0x39, 0x00, 0x00}}, + {1, [3]byte{0x3a, 0x00, 0x00}}, {1, [3]byte{0x3b, 0x00, 0x00}}, + {1, [3]byte{0x3c, 0x00, 0x00}}, {1, [3]byte{0x3d, 0x00, 0x00}}, + {1, [3]byte{0x3e, 0x00, 0x00}}, {1, [3]byte{0x3f, 0x00, 0x00}}, + {1, [3]byte{0x40, 0x00, 0x00}}, {1, [3]byte{0x41, 0x00, 0x00}}, + {1, [3]byte{0x42, 0x00, 0x00}}, {1, [3]byte{0x43, 0x00, 0x00}}, + {1, [3]byte{0x44, 0x00, 0x00}}, {1, [3]byte{0x45, 0x00, 0x00}}, + {1, [3]byte{0x46, 0x00, 0x00}}, {1, [3]byte{0x47, 0x00, 0x00}}, + {1, [3]byte{0x48, 0x00, 0x00}}, {1, [3]byte{0x49, 0x00, 0x00}}, + {1, [3]byte{0x4a, 0x00, 0x00}}, {1, [3]byte{0x4b, 0x00, 0x00}}, + {1, [3]byte{0x4c, 0x00, 0x00}}, {1, [3]byte{0x4d, 0x00, 0x00}}, + {1, [3]byte{0x4e, 0x00, 0x00}}, {1, [3]byte{0x4f, 0x00, 0x00}}, + {1, [3]byte{0x50, 0x00, 0x00}}, {1, [3]byte{0x51, 0x00, 0x00}}, + {1, [3]byte{0x52, 0x00, 0x00}}, {1, [3]byte{0x53, 0x00, 0x00}}, + {1, [3]byte{0x54, 0x00, 0x00}}, {1, [3]byte{0x55, 0x00, 0x00}}, + {1, [3]byte{0x56, 0x00, 0x00}}, {1, [3]byte{0x57, 0x00, 0x00}}, + {1, [3]byte{0x58, 0x00, 0x00}}, {1, [3]byte{0x59, 0x00, 0x00}}, + {1, [3]byte{0x5a, 0x00, 0x00}}, {1, [3]byte{0x5b, 0x00, 0x00}}, + {1, [3]byte{0x5c, 0x00, 0x00}}, {1, [3]byte{0x5d, 0x00, 0x00}}, + {1, [3]byte{0x5e, 0x00, 0x00}}, {1, [3]byte{0x5f, 0x00, 0x00}}, + {1, [3]byte{0x60, 0x00, 0x00}}, {1, [3]byte{0x61, 0x00, 0x00}}, + {1, [3]byte{0x62, 0x00, 0x00}}, {1, [3]byte{0x63, 0x00, 0x00}}, + {1, [3]byte{0x64, 0x00, 0x00}}, {1, [3]byte{0x65, 0x00, 0x00}}, + {1, [3]byte{0x66, 0x00, 0x00}}, {1, [3]byte{0x67, 0x00, 0x00}}, + {1, [3]byte{0x68, 0x00, 0x00}}, {1, [3]byte{0x69, 0x00, 0x00}}, + {1, [3]byte{0x6a, 0x00, 0x00}}, {1, [3]byte{0x6b, 0x00, 0x00}}, + {1, [3]byte{0x6c, 0x00, 0x00}}, {1, [3]byte{0x6d, 0x00, 0x00}}, + {1, [3]byte{0x6e, 0x00, 0x00}}, {1, [3]byte{0x6f, 0x00, 0x00}}, + {1, [3]byte{0x70, 0x00, 0x00}}, {1, [3]byte{0x71, 0x00, 0x00}}, + {1, [3]byte{0x72, 0x00, 0x00}}, {1, [3]byte{0x73, 0x00, 0x00}}, + {1, [3]byte{0x74, 0x00, 0x00}}, {1, [3]byte{0x75, 0x00, 0x00}}, + {1, [3]byte{0x76, 0x00, 0x00}}, {1, [3]byte{0x77, 0x00, 0x00}}, + {1, [3]byte{0x78, 0x00, 0x00}}, {1, [3]byte{0x79, 0x00, 0x00}}, + {1, [3]byte{0x7a, 0x00, 0x00}}, {1, [3]byte{0x7b, 0x00, 0x00}}, + {1, [3]byte{0x7c, 0x00, 0x00}}, {1, [3]byte{0x7d, 0x00, 0x00}}, + {1, [3]byte{0x7e, 0x00, 0x00}}, {1, [3]byte{0x7f, 0x00, 0x00}}, + {2, [3]byte{0xd0, 0x90, 0x00}}, {2, [3]byte{0xd0, 0x91, 0x00}}, + {2, [3]byte{0xd0, 0x92, 0x00}}, {2, [3]byte{0xd0, 0x93, 0x00}}, + {2, [3]byte{0xd0, 0x94, 0x00}}, {2, [3]byte{0xd0, 0x95, 0x00}}, + {2, [3]byte{0xd0, 0x96, 0x00}}, {2, [3]byte{0xd0, 0x97, 0x00}}, + {2, [3]byte{0xd0, 0x98, 0x00}}, {2, [3]byte{0xd0, 0x99, 0x00}}, + {2, [3]byte{0xd0, 0x9a, 0x00}}, {2, [3]byte{0xd0, 0x9b, 0x00}}, + {2, [3]byte{0xd0, 0x9c, 0x00}}, {2, [3]byte{0xd0, 0x9d, 0x00}}, + {2, [3]byte{0xd0, 0x9e, 0x00}}, {2, [3]byte{0xd0, 0x9f, 0x00}}, + {2, [3]byte{0xd0, 0xa0, 0x00}}, {2, [3]byte{0xd0, 0xa1, 0x00}}, + {2, [3]byte{0xd0, 0xa2, 0x00}}, {2, [3]byte{0xd0, 0xa3, 0x00}}, + {2, [3]byte{0xd0, 0xa4, 0x00}}, {2, [3]byte{0xd0, 0xa5, 0x00}}, + {2, [3]byte{0xd0, 0xa6, 0x00}}, {2, [3]byte{0xd0, 0xa7, 0x00}}, + {2, [3]byte{0xd0, 0xa8, 0x00}}, {2, [3]byte{0xd0, 0xa9, 0x00}}, + {2, [3]byte{0xd0, 0xaa, 0x00}}, {2, [3]byte{0xd0, 0xab, 0x00}}, + {2, [3]byte{0xd0, 0xac, 0x00}}, {2, [3]byte{0xd0, 0xad, 0x00}}, + {2, [3]byte{0xd0, 0xae, 0x00}}, {2, [3]byte{0xd0, 0xaf, 0x00}}, + {2, [3]byte{0xd0, 0xb0, 0x00}}, {2, [3]byte{0xd0, 0xb1, 0x00}}, + {2, [3]byte{0xd0, 0xb2, 0x00}}, {2, [3]byte{0xd0, 0xb3, 0x00}}, + {2, [3]byte{0xd0, 0xb4, 0x00}}, {2, [3]byte{0xd0, 0xb5, 0x00}}, + {2, [3]byte{0xd0, 0xb6, 0x00}}, {2, [3]byte{0xd0, 0xb7, 0x00}}, + {2, [3]byte{0xd0, 0xb8, 0x00}}, {2, [3]byte{0xd0, 0xb9, 0x00}}, + {2, [3]byte{0xd0, 0xba, 0x00}}, {2, [3]byte{0xd0, 0xbb, 0x00}}, + {2, [3]byte{0xd0, 0xbc, 0x00}}, {2, [3]byte{0xd0, 0xbd, 0x00}}, + {2, [3]byte{0xd0, 0xbe, 0x00}}, {2, [3]byte{0xd0, 0xbf, 0x00}}, + {3, [3]byte{0xe2, 0x96, 0x91}}, {3, [3]byte{0xe2, 0x96, 0x92}}, + {3, [3]byte{0xe2, 0x96, 0x93}}, {3, [3]byte{0xe2, 0x94, 0x82}}, + {3, [3]byte{0xe2, 0x94, 0xa4}}, {3, [3]byte{0xe2, 0x95, 0xa1}}, + {3, [3]byte{0xe2, 0x95, 0xa2}}, {3, [3]byte{0xe2, 0x95, 0x96}}, + {3, [3]byte{0xe2, 0x95, 0x95}}, {3, [3]byte{0xe2, 0x95, 0xa3}}, + {3, [3]byte{0xe2, 0x95, 0x91}}, {3, [3]byte{0xe2, 0x95, 0x97}}, + {3, [3]byte{0xe2, 0x95, 0x9d}}, {3, [3]byte{0xe2, 0x95, 0x9c}}, + {3, [3]byte{0xe2, 0x95, 0x9b}}, {3, [3]byte{0xe2, 0x94, 0x90}}, + {3, [3]byte{0xe2, 0x94, 0x94}}, {3, [3]byte{0xe2, 0x94, 0xb4}}, + {3, [3]byte{0xe2, 0x94, 0xac}}, {3, [3]byte{0xe2, 0x94, 0x9c}}, + {3, [3]byte{0xe2, 0x94, 0x80}}, {3, [3]byte{0xe2, 0x94, 0xbc}}, + {3, [3]byte{0xe2, 0x95, 0x9e}}, {3, [3]byte{0xe2, 0x95, 0x9f}}, + {3, [3]byte{0xe2, 0x95, 0x9a}}, {3, [3]byte{0xe2, 0x95, 0x94}}, + {3, [3]byte{0xe2, 0x95, 0xa9}}, {3, [3]byte{0xe2, 0x95, 0xa6}}, + {3, [3]byte{0xe2, 0x95, 0xa0}}, {3, [3]byte{0xe2, 0x95, 0x90}}, + {3, [3]byte{0xe2, 0x95, 0xac}}, {3, [3]byte{0xe2, 0x95, 0xa7}}, + {3, [3]byte{0xe2, 0x95, 0xa8}}, {3, [3]byte{0xe2, 0x95, 0xa4}}, + {3, [3]byte{0xe2, 0x95, 0xa5}}, {3, [3]byte{0xe2, 0x95, 0x99}}, + {3, [3]byte{0xe2, 0x95, 0x98}}, {3, [3]byte{0xe2, 0x95, 0x92}}, + {3, [3]byte{0xe2, 0x95, 0x93}}, {3, [3]byte{0xe2, 0x95, 0xab}}, + {3, [3]byte{0xe2, 0x95, 0xaa}}, {3, [3]byte{0xe2, 0x94, 0x98}}, + {3, [3]byte{0xe2, 0x94, 0x8c}}, {3, [3]byte{0xe2, 0x96, 0x88}}, + {3, [3]byte{0xe2, 0x96, 0x84}}, {3, [3]byte{0xe2, 0x96, 0x8c}}, + {3, [3]byte{0xe2, 0x96, 0x90}}, {3, [3]byte{0xe2, 0x96, 0x80}}, + {2, [3]byte{0xd1, 0x80, 0x00}}, {2, [3]byte{0xd1, 0x81, 0x00}}, + {2, [3]byte{0xd1, 0x82, 0x00}}, {2, [3]byte{0xd1, 0x83, 0x00}}, + {2, [3]byte{0xd1, 0x84, 0x00}}, {2, [3]byte{0xd1, 0x85, 0x00}}, + {2, [3]byte{0xd1, 0x86, 0x00}}, {2, [3]byte{0xd1, 0x87, 0x00}}, + {2, [3]byte{0xd1, 0x88, 0x00}}, {2, [3]byte{0xd1, 0x89, 0x00}}, + {2, [3]byte{0xd1, 0x8a, 0x00}}, {2, [3]byte{0xd1, 0x8b, 0x00}}, + {2, [3]byte{0xd1, 0x8c, 0x00}}, {2, [3]byte{0xd1, 0x8d, 0x00}}, + {2, [3]byte{0xd1, 0x8e, 0x00}}, {2, [3]byte{0xd1, 0x8f, 0x00}}, + {2, [3]byte{0xd0, 0x81, 0x00}}, {2, [3]byte{0xd1, 0x91, 0x00}}, + {2, [3]byte{0xd0, 0x84, 0x00}}, {2, [3]byte{0xd1, 0x94, 0x00}}, + {2, [3]byte{0xd0, 0x87, 0x00}}, {2, [3]byte{0xd1, 0x97, 0x00}}, + {2, [3]byte{0xd0, 0x8e, 0x00}}, {2, [3]byte{0xd1, 0x9e, 0x00}}, + {2, [3]byte{0xc2, 0xb0, 0x00}}, {3, [3]byte{0xe2, 0x88, 0x99}}, + {2, [3]byte{0xc2, 0xb7, 0x00}}, {3, [3]byte{0xe2, 0x88, 0x9a}}, + {3, [3]byte{0xe2, 0x84, 0x96}}, {2, [3]byte{0xc2, 0xa4, 0x00}}, + {3, [3]byte{0xe2, 0x96, 0xa0}}, {2, [3]byte{0xc2, 0xa0, 0x00}}, + }, + encode: [256]uint32{ + 0x00000000, 0x01000001, 0x02000002, 0x03000003, 0x04000004, 0x05000005, 0x06000006, 0x07000007, + 0x08000008, 0x09000009, 0x0a00000a, 0x0b00000b, 0x0c00000c, 0x0d00000d, 0x0e00000e, 0x0f00000f, + 0x10000010, 0x11000011, 0x12000012, 0x13000013, 0x14000014, 0x15000015, 0x16000016, 0x17000017, + 0x18000018, 0x19000019, 0x1a00001a, 0x1b00001b, 0x1c00001c, 0x1d00001d, 0x1e00001e, 0x1f00001f, + 0x20000020, 0x21000021, 0x22000022, 0x23000023, 0x24000024, 0x25000025, 0x26000026, 0x27000027, + 0x28000028, 0x29000029, 0x2a00002a, 0x2b00002b, 0x2c00002c, 0x2d00002d, 0x2e00002e, 0x2f00002f, + 0x30000030, 0x31000031, 0x32000032, 0x33000033, 0x34000034, 0x35000035, 0x36000036, 0x37000037, + 0x38000038, 0x39000039, 0x3a00003a, 0x3b00003b, 0x3c00003c, 0x3d00003d, 0x3e00003e, 0x3f00003f, + 0x40000040, 0x41000041, 0x42000042, 0x43000043, 0x44000044, 0x45000045, 0x46000046, 0x47000047, + 0x48000048, 0x49000049, 0x4a00004a, 0x4b00004b, 0x4c00004c, 0x4d00004d, 0x4e00004e, 0x4f00004f, + 0x50000050, 0x51000051, 0x52000052, 0x53000053, 0x54000054, 0x55000055, 0x56000056, 0x57000057, + 0x58000058, 0x59000059, 0x5a00005a, 0x5b00005b, 0x5c00005c, 0x5d00005d, 0x5e00005e, 0x5f00005f, + 0x60000060, 0x61000061, 0x62000062, 0x63000063, 0x64000064, 0x65000065, 0x66000066, 0x67000067, + 0x68000068, 0x69000069, 0x6a00006a, 0x6b00006b, 0x6c00006c, 0x6d00006d, 0x6e00006e, 0x6f00006f, + 0x70000070, 0x71000071, 0x72000072, 0x73000073, 0x74000074, 0x75000075, 0x76000076, 0x77000077, + 0x78000078, 0x79000079, 0x7a00007a, 0x7b00007b, 0x7c00007c, 0x7d00007d, 0x7e00007e, 0x7f00007f, + 0xff0000a0, 0xfd0000a4, 0xf80000b0, 0xfa0000b7, 0xf0000401, 0xf2000404, 0xf4000407, 0xf600040e, + 0x80000410, 0x81000411, 0x82000412, 0x83000413, 0x84000414, 0x85000415, 0x86000416, 0x87000417, + 0x88000418, 0x89000419, 0x8a00041a, 0x8b00041b, 0x8c00041c, 0x8d00041d, 0x8e00041e, 0x8f00041f, + 0x90000420, 0x91000421, 0x92000422, 0x93000423, 0x94000424, 0x95000425, 0x96000426, 0x97000427, + 0x98000428, 0x99000429, 0x9a00042a, 0x9b00042b, 0x9c00042c, 0x9d00042d, 0x9e00042e, 0x9f00042f, + 0xa0000430, 0xa1000431, 0xa2000432, 0xa3000433, 0xa4000434, 0xa5000435, 0xa6000436, 0xa7000437, + 0xa8000438, 0xa9000439, 0xaa00043a, 0xab00043b, 0xac00043c, 0xad00043d, 0xae00043e, 0xaf00043f, + 0xe0000440, 0xe1000441, 0xe2000442, 0xe3000443, 0xe4000444, 0xe5000445, 0xe6000446, 0xe7000447, + 0xe8000448, 0xe9000449, 0xea00044a, 0xeb00044b, 0xec00044c, 0xed00044d, 0xee00044e, 0xef00044f, + 0xf1000451, 0xf3000454, 0xf5000457, 0xf700045e, 0xfc002116, 0xf9002219, 0xfb00221a, 0xc4002500, + 0xb3002502, 0xda00250c, 0xbf002510, 0xc0002514, 0xd9002518, 0xc300251c, 0xb4002524, 0xc200252c, + 0xc1002534, 0xc500253c, 0xcd002550, 0xba002551, 0xd5002552, 0xd6002553, 0xc9002554, 0xb8002555, + 0xb7002556, 0xbb002557, 0xd4002558, 0xd3002559, 0xc800255a, 0xbe00255b, 0xbd00255c, 0xbc00255d, + 0xc600255e, 0xc700255f, 0xcc002560, 0xb5002561, 0xb6002562, 0xb9002563, 0xd1002564, 0xd2002565, + 0xcb002566, 0xcf002567, 0xd0002568, 0xca002569, 0xd800256a, 0xd700256b, 0xce00256c, 0xdf002580, + 0xdc002584, 0xdb002588, 0xdd00258c, 0xde002590, 0xb0002591, 0xb1002592, 0xb2002593, 0xfe0025a0, + }, +} + +// CodePage1047 is the IBM Code Page 1047 encoding. +var CodePage1047 *Charmap = &codePage1047 + +var codePage1047 = Charmap{ + name: "IBM Code Page 1047", + mib: identifier.IBM1047, + asciiSuperset: false, + low: 0x00, + replacement: 0x3f, + decode: [256]utf8Enc{ + {1, [3]byte{0x00, 0x00, 0x00}}, {1, [3]byte{0x01, 0x00, 0x00}}, + {1, [3]byte{0x02, 0x00, 0x00}}, {1, [3]byte{0x03, 0x00, 0x00}}, + {2, [3]byte{0xc2, 0x9c, 0x00}}, {1, [3]byte{0x09, 0x00, 0x00}}, + {2, [3]byte{0xc2, 0x86, 0x00}}, {1, [3]byte{0x7f, 0x00, 0x00}}, + {2, [3]byte{0xc2, 0x97, 0x00}}, {2, [3]byte{0xc2, 0x8d, 0x00}}, + {2, [3]byte{0xc2, 0x8e, 0x00}}, {1, [3]byte{0x0b, 0x00, 0x00}}, + {1, [3]byte{0x0c, 0x00, 0x00}}, {1, [3]byte{0x0d, 0x00, 0x00}}, + {1, [3]byte{0x0e, 0x00, 0x00}}, {1, [3]byte{0x0f, 0x00, 0x00}}, + {1, [3]byte{0x10, 0x00, 0x00}}, {1, [3]byte{0x11, 0x00, 0x00}}, + {1, [3]byte{0x12, 0x00, 0x00}}, {1, [3]byte{0x13, 0x00, 0x00}}, + {2, [3]byte{0xc2, 0x9d, 0x00}}, {2, [3]byte{0xc2, 0x85, 0x00}}, + {1, [3]byte{0x08, 0x00, 0x00}}, {2, [3]byte{0xc2, 0x87, 0x00}}, + {1, [3]byte{0x18, 0x00, 0x00}}, {1, [3]byte{0x19, 0x00, 0x00}}, + {2, [3]byte{0xc2, 0x92, 0x00}}, {2, [3]byte{0xc2, 0x8f, 0x00}}, + {1, [3]byte{0x1c, 0x00, 0x00}}, {1, [3]byte{0x1d, 0x00, 0x00}}, + {1, [3]byte{0x1e, 0x00, 0x00}}, {1, [3]byte{0x1f, 0x00, 0x00}}, + {2, [3]byte{0xc2, 0x80, 0x00}}, {2, [3]byte{0xc2, 0x81, 0x00}}, + {2, [3]byte{0xc2, 0x82, 0x00}}, {2, [3]byte{0xc2, 0x83, 0x00}}, + {2, [3]byte{0xc2, 0x84, 0x00}}, {1, [3]byte{0x0a, 0x00, 0x00}}, + {1, [3]byte{0x17, 0x00, 0x00}}, {1, [3]byte{0x1b, 0x00, 0x00}}, + {2, [3]byte{0xc2, 0x88, 0x00}}, {2, [3]byte{0xc2, 0x89, 0x00}}, + {2, [3]byte{0xc2, 0x8a, 0x00}}, {2, [3]byte{0xc2, 0x8b, 0x00}}, + {2, [3]byte{0xc2, 0x8c, 0x00}}, {1, [3]byte{0x05, 0x00, 0x00}}, + {1, [3]byte{0x06, 0x00, 0x00}}, {1, [3]byte{0x07, 0x00, 0x00}}, + {2, [3]byte{0xc2, 0x90, 0x00}}, {2, [3]byte{0xc2, 0x91, 0x00}}, + {1, [3]byte{0x16, 0x00, 0x00}}, {2, [3]byte{0xc2, 0x93, 0x00}}, + {2, [3]byte{0xc2, 0x94, 0x00}}, {2, [3]byte{0xc2, 0x95, 0x00}}, + {2, [3]byte{0xc2, 0x96, 0x00}}, {1, [3]byte{0x04, 0x00, 0x00}}, + {2, [3]byte{0xc2, 0x98, 0x00}}, {2, [3]byte{0xc2, 0x99, 0x00}}, + {2, [3]byte{0xc2, 0x9a, 0x00}}, {2, [3]byte{0xc2, 0x9b, 0x00}}, + {1, [3]byte{0x14, 0x00, 0x00}}, {1, [3]byte{0x15, 0x00, 0x00}}, + {2, [3]byte{0xc2, 0x9e, 0x00}}, {1, [3]byte{0x1a, 0x00, 0x00}}, + {1, [3]byte{0x20, 0x00, 0x00}}, {2, [3]byte{0xc2, 0xa0, 0x00}}, + {2, [3]byte{0xc3, 0xa2, 0x00}}, {2, [3]byte{0xc3, 0xa4, 0x00}}, + {2, [3]byte{0xc3, 0xa0, 0x00}}, {2, [3]byte{0xc3, 0xa1, 0x00}}, + {2, [3]byte{0xc3, 0xa3, 0x00}}, {2, [3]byte{0xc3, 0xa5, 0x00}}, + {2, [3]byte{0xc3, 0xa7, 0x00}}, {2, [3]byte{0xc3, 0xb1, 0x00}}, + {2, [3]byte{0xc2, 0xa2, 0x00}}, {1, [3]byte{0x2e, 0x00, 0x00}}, + {1, [3]byte{0x3c, 0x00, 0x00}}, {1, [3]byte{0x28, 0x00, 0x00}}, + {1, [3]byte{0x2b, 0x00, 0x00}}, {1, [3]byte{0x7c, 0x00, 0x00}}, + {1, [3]byte{0x26, 0x00, 0x00}}, {2, [3]byte{0xc3, 0xa9, 0x00}}, + {2, [3]byte{0xc3, 0xaa, 0x00}}, {2, [3]byte{0xc3, 0xab, 0x00}}, + {2, [3]byte{0xc3, 0xa8, 0x00}}, {2, [3]byte{0xc3, 0xad, 0x00}}, + {2, [3]byte{0xc3, 0xae, 0x00}}, {2, [3]byte{0xc3, 0xaf, 0x00}}, + {2, [3]byte{0xc3, 0xac, 0x00}}, {2, [3]byte{0xc3, 0x9f, 0x00}}, + {1, [3]byte{0x21, 0x00, 0x00}}, {1, [3]byte{0x24, 0x00, 0x00}}, + {1, [3]byte{0x2a, 0x00, 0x00}}, {1, [3]byte{0x29, 0x00, 0x00}}, + {1, [3]byte{0x3b, 0x00, 0x00}}, {1, [3]byte{0x5e, 0x00, 0x00}}, + {1, [3]byte{0x2d, 0x00, 0x00}}, {1, [3]byte{0x2f, 0x00, 0x00}}, + {2, [3]byte{0xc3, 0x82, 0x00}}, {2, [3]byte{0xc3, 0x84, 0x00}}, + {2, [3]byte{0xc3, 0x80, 0x00}}, {2, [3]byte{0xc3, 0x81, 0x00}}, + {2, [3]byte{0xc3, 0x83, 0x00}}, {2, [3]byte{0xc3, 0x85, 0x00}}, + {2, [3]byte{0xc3, 0x87, 0x00}}, {2, [3]byte{0xc3, 0x91, 0x00}}, + {2, [3]byte{0xc2, 0xa6, 0x00}}, {1, [3]byte{0x2c, 0x00, 0x00}}, + {1, [3]byte{0x25, 0x00, 0x00}}, {1, [3]byte{0x5f, 0x00, 0x00}}, + {1, [3]byte{0x3e, 0x00, 0x00}}, {1, [3]byte{0x3f, 0x00, 0x00}}, + {2, [3]byte{0xc3, 0xb8, 0x00}}, {2, [3]byte{0xc3, 0x89, 0x00}}, + {2, [3]byte{0xc3, 0x8a, 0x00}}, {2, [3]byte{0xc3, 0x8b, 0x00}}, + {2, [3]byte{0xc3, 0x88, 0x00}}, {2, [3]byte{0xc3, 0x8d, 0x00}}, + {2, [3]byte{0xc3, 0x8e, 0x00}}, {2, [3]byte{0xc3, 0x8f, 0x00}}, + {2, [3]byte{0xc3, 0x8c, 0x00}}, {1, [3]byte{0x60, 0x00, 0x00}}, + {1, [3]byte{0x3a, 0x00, 0x00}}, {1, [3]byte{0x23, 0x00, 0x00}}, + {1, [3]byte{0x40, 0x00, 0x00}}, {1, [3]byte{0x27, 0x00, 0x00}}, + {1, [3]byte{0x3d, 0x00, 0x00}}, {1, [3]byte{0x22, 0x00, 0x00}}, + {2, [3]byte{0xc3, 0x98, 0x00}}, {1, [3]byte{0x61, 0x00, 0x00}}, + {1, [3]byte{0x62, 0x00, 0x00}}, {1, [3]byte{0x63, 0x00, 0x00}}, + {1, [3]byte{0x64, 0x00, 0x00}}, {1, [3]byte{0x65, 0x00, 0x00}}, + {1, [3]byte{0x66, 0x00, 0x00}}, {1, [3]byte{0x67, 0x00, 0x00}}, + {1, [3]byte{0x68, 0x00, 0x00}}, {1, [3]byte{0x69, 0x00, 0x00}}, + {2, [3]byte{0xc2, 0xab, 0x00}}, {2, [3]byte{0xc2, 0xbb, 0x00}}, + {2, [3]byte{0xc3, 0xb0, 0x00}}, {2, [3]byte{0xc3, 0xbd, 0x00}}, + {2, [3]byte{0xc3, 0xbe, 0x00}}, {2, [3]byte{0xc2, 0xb1, 0x00}}, + {2, [3]byte{0xc2, 0xb0, 0x00}}, {1, [3]byte{0x6a, 0x00, 0x00}}, + {1, [3]byte{0x6b, 0x00, 0x00}}, {1, [3]byte{0x6c, 0x00, 0x00}}, + {1, [3]byte{0x6d, 0x00, 0x00}}, {1, [3]byte{0x6e, 0x00, 0x00}}, + {1, [3]byte{0x6f, 0x00, 0x00}}, {1, [3]byte{0x70, 0x00, 0x00}}, + {1, [3]byte{0x71, 0x00, 0x00}}, {1, [3]byte{0x72, 0x00, 0x00}}, + {2, [3]byte{0xc2, 0xaa, 0x00}}, {2, [3]byte{0xc2, 0xba, 0x00}}, + {2, [3]byte{0xc3, 0xa6, 0x00}}, {2, [3]byte{0xc2, 0xb8, 0x00}}, + {2, [3]byte{0xc3, 0x86, 0x00}}, {2, [3]byte{0xc2, 0xa4, 0x00}}, + {2, [3]byte{0xc2, 0xb5, 0x00}}, {1, [3]byte{0x7e, 0x00, 0x00}}, + {1, [3]byte{0x73, 0x00, 0x00}}, {1, [3]byte{0x74, 0x00, 0x00}}, + {1, [3]byte{0x75, 0x00, 0x00}}, {1, [3]byte{0x76, 0x00, 0x00}}, + {1, [3]byte{0x77, 0x00, 0x00}}, {1, [3]byte{0x78, 0x00, 0x00}}, + {1, [3]byte{0x79, 0x00, 0x00}}, {1, [3]byte{0x7a, 0x00, 0x00}}, + {2, [3]byte{0xc2, 0xa1, 0x00}}, {2, [3]byte{0xc2, 0xbf, 0x00}}, + {2, [3]byte{0xc3, 0x90, 0x00}}, {1, [3]byte{0x5b, 0x00, 0x00}}, + {2, [3]byte{0xc3, 0x9e, 0x00}}, {2, [3]byte{0xc2, 0xae, 0x00}}, + {2, [3]byte{0xc2, 0xac, 0x00}}, {2, [3]byte{0xc2, 0xa3, 0x00}}, + {2, [3]byte{0xc2, 0xa5, 0x00}}, {2, [3]byte{0xc2, 0xb7, 0x00}}, + {2, [3]byte{0xc2, 0xa9, 0x00}}, {2, [3]byte{0xc2, 0xa7, 0x00}}, + {2, [3]byte{0xc2, 0xb6, 0x00}}, {2, [3]byte{0xc2, 0xbc, 0x00}}, + {2, [3]byte{0xc2, 0xbd, 0x00}}, {2, [3]byte{0xc2, 0xbe, 0x00}}, + {2, [3]byte{0xc3, 0x9d, 0x00}}, {2, [3]byte{0xc2, 0xa8, 0x00}}, + {2, [3]byte{0xc2, 0xaf, 0x00}}, {1, [3]byte{0x5d, 0x00, 0x00}}, + {2, [3]byte{0xc2, 0xb4, 0x00}}, {2, [3]byte{0xc3, 0x97, 0x00}}, + {1, [3]byte{0x7b, 0x00, 0x00}}, {1, [3]byte{0x41, 0x00, 0x00}}, + {1, [3]byte{0x42, 0x00, 0x00}}, {1, [3]byte{0x43, 0x00, 0x00}}, + {1, [3]byte{0x44, 0x00, 0x00}}, {1, [3]byte{0x45, 0x00, 0x00}}, + {1, [3]byte{0x46, 0x00, 0x00}}, {1, [3]byte{0x47, 0x00, 0x00}}, + {1, [3]byte{0x48, 0x00, 0x00}}, {1, [3]byte{0x49, 0x00, 0x00}}, + {2, [3]byte{0xc2, 0xad, 0x00}}, {2, [3]byte{0xc3, 0xb4, 0x00}}, + {2, [3]byte{0xc3, 0xb6, 0x00}}, {2, [3]byte{0xc3, 0xb2, 0x00}}, + {2, [3]byte{0xc3, 0xb3, 0x00}}, {2, [3]byte{0xc3, 0xb5, 0x00}}, + {1, [3]byte{0x7d, 0x00, 0x00}}, {1, [3]byte{0x4a, 0x00, 0x00}}, + {1, [3]byte{0x4b, 0x00, 0x00}}, {1, [3]byte{0x4c, 0x00, 0x00}}, + {1, [3]byte{0x4d, 0x00, 0x00}}, {1, [3]byte{0x4e, 0x00, 0x00}}, + {1, [3]byte{0x4f, 0x00, 0x00}}, {1, [3]byte{0x50, 0x00, 0x00}}, + {1, [3]byte{0x51, 0x00, 0x00}}, {1, [3]byte{0x52, 0x00, 0x00}}, + {2, [3]byte{0xc2, 0xb9, 0x00}}, {2, [3]byte{0xc3, 0xbb, 0x00}}, + {2, [3]byte{0xc3, 0xbc, 0x00}}, {2, [3]byte{0xc3, 0xb9, 0x00}}, + {2, [3]byte{0xc3, 0xba, 0x00}}, {2, [3]byte{0xc3, 0xbf, 0x00}}, + {1, [3]byte{0x5c, 0x00, 0x00}}, {2, [3]byte{0xc3, 0xb7, 0x00}}, + {1, [3]byte{0x53, 0x00, 0x00}}, {1, [3]byte{0x54, 0x00, 0x00}}, + {1, [3]byte{0x55, 0x00, 0x00}}, {1, [3]byte{0x56, 0x00, 0x00}}, + {1, [3]byte{0x57, 0x00, 0x00}}, {1, [3]byte{0x58, 0x00, 0x00}}, + {1, [3]byte{0x59, 0x00, 0x00}}, {1, [3]byte{0x5a, 0x00, 0x00}}, + {2, [3]byte{0xc2, 0xb2, 0x00}}, {2, [3]byte{0xc3, 0x94, 0x00}}, + {2, [3]byte{0xc3, 0x96, 0x00}}, {2, [3]byte{0xc3, 0x92, 0x00}}, + {2, [3]byte{0xc3, 0x93, 0x00}}, {2, [3]byte{0xc3, 0x95, 0x00}}, + {1, [3]byte{0x30, 0x00, 0x00}}, {1, [3]byte{0x31, 0x00, 0x00}}, + {1, [3]byte{0x32, 0x00, 0x00}}, {1, [3]byte{0x33, 0x00, 0x00}}, + {1, [3]byte{0x34, 0x00, 0x00}}, {1, [3]byte{0x35, 0x00, 0x00}}, + {1, [3]byte{0x36, 0x00, 0x00}}, {1, [3]byte{0x37, 0x00, 0x00}}, + {1, [3]byte{0x38, 0x00, 0x00}}, {1, [3]byte{0x39, 0x00, 0x00}}, + {2, [3]byte{0xc2, 0xb3, 0x00}}, {2, [3]byte{0xc3, 0x9b, 0x00}}, + {2, [3]byte{0xc3, 0x9c, 0x00}}, {2, [3]byte{0xc3, 0x99, 0x00}}, + {2, [3]byte{0xc3, 0x9a, 0x00}}, {2, [3]byte{0xc2, 0x9f, 0x00}}, + }, + encode: [256]uint32{ + 0x00000000, 0x01000001, 0x02000002, 0x03000003, 0x37000004, 0x2d000005, 0x2e000006, 0x2f000007, + 0x16000008, 0x05000009, 0x2500000a, 0x0b00000b, 0x0c00000c, 0x0d00000d, 0x0e00000e, 0x0f00000f, + 0x10000010, 0x11000011, 0x12000012, 0x13000013, 0x3c000014, 0x3d000015, 0x32000016, 0x26000017, + 0x18000018, 0x19000019, 0x3f00001a, 0x2700001b, 0x1c00001c, 0x1d00001d, 0x1e00001e, 0x1f00001f, + 0x40000020, 0x5a000021, 0x7f000022, 0x7b000023, 0x5b000024, 0x6c000025, 0x50000026, 0x7d000027, + 0x4d000028, 0x5d000029, 0x5c00002a, 0x4e00002b, 0x6b00002c, 0x6000002d, 0x4b00002e, 0x6100002f, + 0xf0000030, 0xf1000031, 0xf2000032, 0xf3000033, 0xf4000034, 0xf5000035, 0xf6000036, 0xf7000037, + 0xf8000038, 0xf9000039, 0x7a00003a, 0x5e00003b, 0x4c00003c, 0x7e00003d, 0x6e00003e, 0x6f00003f, + 0x7c000040, 0xc1000041, 0xc2000042, 0xc3000043, 0xc4000044, 0xc5000045, 0xc6000046, 0xc7000047, + 0xc8000048, 0xc9000049, 0xd100004a, 0xd200004b, 0xd300004c, 0xd400004d, 0xd500004e, 0xd600004f, + 0xd7000050, 0xd8000051, 0xd9000052, 0xe2000053, 0xe3000054, 0xe4000055, 0xe5000056, 0xe6000057, + 0xe7000058, 0xe8000059, 0xe900005a, 0xad00005b, 0xe000005c, 0xbd00005d, 0x5f00005e, 0x6d00005f, + 0x79000060, 0x81000061, 0x82000062, 0x83000063, 0x84000064, 0x85000065, 0x86000066, 0x87000067, + 0x88000068, 0x89000069, 0x9100006a, 0x9200006b, 0x9300006c, 0x9400006d, 0x9500006e, 0x9600006f, + 0x97000070, 0x98000071, 0x99000072, 0xa2000073, 0xa3000074, 0xa4000075, 0xa5000076, 0xa6000077, + 0xa7000078, 0xa8000079, 0xa900007a, 0xc000007b, 0x4f00007c, 0xd000007d, 0xa100007e, 0x0700007f, + 0x20000080, 0x21000081, 0x22000082, 0x23000083, 0x24000084, 0x15000085, 0x06000086, 0x17000087, + 0x28000088, 0x29000089, 0x2a00008a, 0x2b00008b, 0x2c00008c, 0x0900008d, 0x0a00008e, 0x1b00008f, + 0x30000090, 0x31000091, 0x1a000092, 0x33000093, 0x34000094, 0x35000095, 0x36000096, 0x08000097, + 0x38000098, 0x39000099, 0x3a00009a, 0x3b00009b, 0x0400009c, 0x1400009d, 0x3e00009e, 0xff00009f, + 0x410000a0, 0xaa0000a1, 0x4a0000a2, 0xb10000a3, 0x9f0000a4, 0xb20000a5, 0x6a0000a6, 0xb50000a7, + 0xbb0000a8, 0xb40000a9, 0x9a0000aa, 0x8a0000ab, 0xb00000ac, 0xca0000ad, 0xaf0000ae, 0xbc0000af, + 0x900000b0, 0x8f0000b1, 0xea0000b2, 0xfa0000b3, 0xbe0000b4, 0xa00000b5, 0xb60000b6, 0xb30000b7, + 0x9d0000b8, 0xda0000b9, 0x9b0000ba, 0x8b0000bb, 0xb70000bc, 0xb80000bd, 0xb90000be, 0xab0000bf, + 0x640000c0, 0x650000c1, 0x620000c2, 0x660000c3, 0x630000c4, 0x670000c5, 0x9e0000c6, 0x680000c7, + 0x740000c8, 0x710000c9, 0x720000ca, 0x730000cb, 0x780000cc, 0x750000cd, 0x760000ce, 0x770000cf, + 0xac0000d0, 0x690000d1, 0xed0000d2, 0xee0000d3, 0xeb0000d4, 0xef0000d5, 0xec0000d6, 0xbf0000d7, + 0x800000d8, 0xfd0000d9, 0xfe0000da, 0xfb0000db, 0xfc0000dc, 0xba0000dd, 0xae0000de, 0x590000df, + 0x440000e0, 0x450000e1, 0x420000e2, 0x460000e3, 0x430000e4, 0x470000e5, 0x9c0000e6, 0x480000e7, + 0x540000e8, 0x510000e9, 0x520000ea, 0x530000eb, 0x580000ec, 0x550000ed, 0x560000ee, 0x570000ef, + 0x8c0000f0, 0x490000f1, 0xcd0000f2, 0xce0000f3, 0xcb0000f4, 0xcf0000f5, 0xcc0000f6, 0xe10000f7, + 0x700000f8, 0xdd0000f9, 0xde0000fa, 0xdb0000fb, 0xdc0000fc, 0x8d0000fd, 0x8e0000fe, 0xdf0000ff, + }, +} + +// CodePage1140 is the IBM Code Page 1140 encoding. +var CodePage1140 *Charmap = &codePage1140 + +var codePage1140 = Charmap{ + name: "IBM Code Page 1140", + mib: identifier.IBM01140, + asciiSuperset: false, + low: 0x00, + replacement: 0x3f, + decode: [256]utf8Enc{ + {1, [3]byte{0x00, 0x00, 0x00}}, {1, [3]byte{0x01, 0x00, 0x00}}, + {1, [3]byte{0x02, 0x00, 0x00}}, {1, [3]byte{0x03, 0x00, 0x00}}, + {2, [3]byte{0xc2, 0x9c, 0x00}}, {1, [3]byte{0x09, 0x00, 0x00}}, + {2, [3]byte{0xc2, 0x86, 0x00}}, {1, [3]byte{0x7f, 0x00, 0x00}}, + {2, [3]byte{0xc2, 0x97, 0x00}}, {2, [3]byte{0xc2, 0x8d, 0x00}}, + {2, [3]byte{0xc2, 0x8e, 0x00}}, {1, [3]byte{0x0b, 0x00, 0x00}}, + {1, [3]byte{0x0c, 0x00, 0x00}}, {1, [3]byte{0x0d, 0x00, 0x00}}, + {1, [3]byte{0x0e, 0x00, 0x00}}, {1, [3]byte{0x0f, 0x00, 0x00}}, + {1, [3]byte{0x10, 0x00, 0x00}}, {1, [3]byte{0x11, 0x00, 0x00}}, + {1, [3]byte{0x12, 0x00, 0x00}}, {1, [3]byte{0x13, 0x00, 0x00}}, + {2, [3]byte{0xc2, 0x9d, 0x00}}, {2, [3]byte{0xc2, 0x85, 0x00}}, + {1, [3]byte{0x08, 0x00, 0x00}}, {2, [3]byte{0xc2, 0x87, 0x00}}, + {1, [3]byte{0x18, 0x00, 0x00}}, {1, [3]byte{0x19, 0x00, 0x00}}, + {2, [3]byte{0xc2, 0x92, 0x00}}, {2, [3]byte{0xc2, 0x8f, 0x00}}, + {1, [3]byte{0x1c, 0x00, 0x00}}, {1, [3]byte{0x1d, 0x00, 0x00}}, + {1, [3]byte{0x1e, 0x00, 0x00}}, {1, [3]byte{0x1f, 0x00, 0x00}}, + {2, [3]byte{0xc2, 0x80, 0x00}}, {2, [3]byte{0xc2, 0x81, 0x00}}, + {2, [3]byte{0xc2, 0x82, 0x00}}, {2, [3]byte{0xc2, 0x83, 0x00}}, + {2, [3]byte{0xc2, 0x84, 0x00}}, {1, [3]byte{0x0a, 0x00, 0x00}}, + {1, [3]byte{0x17, 0x00, 0x00}}, {1, [3]byte{0x1b, 0x00, 0x00}}, + {2, [3]byte{0xc2, 0x88, 0x00}}, {2, [3]byte{0xc2, 0x89, 0x00}}, + {2, [3]byte{0xc2, 0x8a, 0x00}}, {2, [3]byte{0xc2, 0x8b, 0x00}}, + {2, [3]byte{0xc2, 0x8c, 0x00}}, {1, [3]byte{0x05, 0x00, 0x00}}, + {1, [3]byte{0x06, 0x00, 0x00}}, {1, [3]byte{0x07, 0x00, 0x00}}, + {2, [3]byte{0xc2, 0x90, 0x00}}, {2, [3]byte{0xc2, 0x91, 0x00}}, + {1, [3]byte{0x16, 0x00, 0x00}}, {2, [3]byte{0xc2, 0x93, 0x00}}, + {2, [3]byte{0xc2, 0x94, 0x00}}, {2, [3]byte{0xc2, 0x95, 0x00}}, + {2, [3]byte{0xc2, 0x96, 0x00}}, {1, [3]byte{0x04, 0x00, 0x00}}, + {2, [3]byte{0xc2, 0x98, 0x00}}, {2, [3]byte{0xc2, 0x99, 0x00}}, + {2, [3]byte{0xc2, 0x9a, 0x00}}, {2, [3]byte{0xc2, 0x9b, 0x00}}, + {1, [3]byte{0x14, 0x00, 0x00}}, {1, [3]byte{0x15, 0x00, 0x00}}, + {2, [3]byte{0xc2, 0x9e, 0x00}}, {1, [3]byte{0x1a, 0x00, 0x00}}, + {1, [3]byte{0x20, 0x00, 0x00}}, {2, [3]byte{0xc2, 0xa0, 0x00}}, + {2, [3]byte{0xc3, 0xa2, 0x00}}, {2, [3]byte{0xc3, 0xa4, 0x00}}, + {2, [3]byte{0xc3, 0xa0, 0x00}}, {2, [3]byte{0xc3, 0xa1, 0x00}}, + {2, [3]byte{0xc3, 0xa3, 0x00}}, {2, [3]byte{0xc3, 0xa5, 0x00}}, + {2, [3]byte{0xc3, 0xa7, 0x00}}, {2, [3]byte{0xc3, 0xb1, 0x00}}, + {2, [3]byte{0xc2, 0xa2, 0x00}}, {1, [3]byte{0x2e, 0x00, 0x00}}, + {1, [3]byte{0x3c, 0x00, 0x00}}, {1, [3]byte{0x28, 0x00, 0x00}}, + {1, [3]byte{0x2b, 0x00, 0x00}}, {1, [3]byte{0x7c, 0x00, 0x00}}, + {1, [3]byte{0x26, 0x00, 0x00}}, {2, [3]byte{0xc3, 0xa9, 0x00}}, + {2, [3]byte{0xc3, 0xaa, 0x00}}, {2, [3]byte{0xc3, 0xab, 0x00}}, + {2, [3]byte{0xc3, 0xa8, 0x00}}, {2, [3]byte{0xc3, 0xad, 0x00}}, + {2, [3]byte{0xc3, 0xae, 0x00}}, {2, [3]byte{0xc3, 0xaf, 0x00}}, + {2, [3]byte{0xc3, 0xac, 0x00}}, {2, [3]byte{0xc3, 0x9f, 0x00}}, + {1, [3]byte{0x21, 0x00, 0x00}}, {1, [3]byte{0x24, 0x00, 0x00}}, + {1, [3]byte{0x2a, 0x00, 0x00}}, {1, [3]byte{0x29, 0x00, 0x00}}, + {1, [3]byte{0x3b, 0x00, 0x00}}, {2, [3]byte{0xc2, 0xac, 0x00}}, + {1, [3]byte{0x2d, 0x00, 0x00}}, {1, [3]byte{0x2f, 0x00, 0x00}}, + {2, [3]byte{0xc3, 0x82, 0x00}}, {2, [3]byte{0xc3, 0x84, 0x00}}, + {2, [3]byte{0xc3, 0x80, 0x00}}, {2, [3]byte{0xc3, 0x81, 0x00}}, + {2, [3]byte{0xc3, 0x83, 0x00}}, {2, [3]byte{0xc3, 0x85, 0x00}}, + {2, [3]byte{0xc3, 0x87, 0x00}}, {2, [3]byte{0xc3, 0x91, 0x00}}, + {2, [3]byte{0xc2, 0xa6, 0x00}}, {1, [3]byte{0x2c, 0x00, 0x00}}, + {1, [3]byte{0x25, 0x00, 0x00}}, {1, [3]byte{0x5f, 0x00, 0x00}}, + {1, [3]byte{0x3e, 0x00, 0x00}}, {1, [3]byte{0x3f, 0x00, 0x00}}, + {2, [3]byte{0xc3, 0xb8, 0x00}}, {2, [3]byte{0xc3, 0x89, 0x00}}, + {2, [3]byte{0xc3, 0x8a, 0x00}}, {2, [3]byte{0xc3, 0x8b, 0x00}}, + {2, [3]byte{0xc3, 0x88, 0x00}}, {2, [3]byte{0xc3, 0x8d, 0x00}}, + {2, [3]byte{0xc3, 0x8e, 0x00}}, {2, [3]byte{0xc3, 0x8f, 0x00}}, + {2, [3]byte{0xc3, 0x8c, 0x00}}, {1, [3]byte{0x60, 0x00, 0x00}}, + {1, [3]byte{0x3a, 0x00, 0x00}}, {1, [3]byte{0x23, 0x00, 0x00}}, + {1, [3]byte{0x40, 0x00, 0x00}}, {1, [3]byte{0x27, 0x00, 0x00}}, + {1, [3]byte{0x3d, 0x00, 0x00}}, {1, [3]byte{0x22, 0x00, 0x00}}, + {2, [3]byte{0xc3, 0x98, 0x00}}, {1, [3]byte{0x61, 0x00, 0x00}}, + {1, [3]byte{0x62, 0x00, 0x00}}, {1, [3]byte{0x63, 0x00, 0x00}}, + {1, [3]byte{0x64, 0x00, 0x00}}, {1, [3]byte{0x65, 0x00, 0x00}}, + {1, [3]byte{0x66, 0x00, 0x00}}, {1, [3]byte{0x67, 0x00, 0x00}}, + {1, [3]byte{0x68, 0x00, 0x00}}, {1, [3]byte{0x69, 0x00, 0x00}}, + {2, [3]byte{0xc2, 0xab, 0x00}}, {2, [3]byte{0xc2, 0xbb, 0x00}}, + {2, [3]byte{0xc3, 0xb0, 0x00}}, {2, [3]byte{0xc3, 0xbd, 0x00}}, + {2, [3]byte{0xc3, 0xbe, 0x00}}, {2, [3]byte{0xc2, 0xb1, 0x00}}, + {2, [3]byte{0xc2, 0xb0, 0x00}}, {1, [3]byte{0x6a, 0x00, 0x00}}, + {1, [3]byte{0x6b, 0x00, 0x00}}, {1, [3]byte{0x6c, 0x00, 0x00}}, + {1, [3]byte{0x6d, 0x00, 0x00}}, {1, [3]byte{0x6e, 0x00, 0x00}}, + {1, [3]byte{0x6f, 0x00, 0x00}}, {1, [3]byte{0x70, 0x00, 0x00}}, + {1, [3]byte{0x71, 0x00, 0x00}}, {1, [3]byte{0x72, 0x00, 0x00}}, + {2, [3]byte{0xc2, 0xaa, 0x00}}, {2, [3]byte{0xc2, 0xba, 0x00}}, + {2, [3]byte{0xc3, 0xa6, 0x00}}, {2, [3]byte{0xc2, 0xb8, 0x00}}, + {2, [3]byte{0xc3, 0x86, 0x00}}, {3, [3]byte{0xe2, 0x82, 0xac}}, + {2, [3]byte{0xc2, 0xb5, 0x00}}, {1, [3]byte{0x7e, 0x00, 0x00}}, + {1, [3]byte{0x73, 0x00, 0x00}}, {1, [3]byte{0x74, 0x00, 0x00}}, + {1, [3]byte{0x75, 0x00, 0x00}}, {1, [3]byte{0x76, 0x00, 0x00}}, + {1, [3]byte{0x77, 0x00, 0x00}}, {1, [3]byte{0x78, 0x00, 0x00}}, + {1, [3]byte{0x79, 0x00, 0x00}}, {1, [3]byte{0x7a, 0x00, 0x00}}, + {2, [3]byte{0xc2, 0xa1, 0x00}}, {2, [3]byte{0xc2, 0xbf, 0x00}}, + {2, [3]byte{0xc3, 0x90, 0x00}}, {2, [3]byte{0xc3, 0x9d, 0x00}}, + {2, [3]byte{0xc3, 0x9e, 0x00}}, {2, [3]byte{0xc2, 0xae, 0x00}}, + {1, [3]byte{0x5e, 0x00, 0x00}}, {2, [3]byte{0xc2, 0xa3, 0x00}}, + {2, [3]byte{0xc2, 0xa5, 0x00}}, {2, [3]byte{0xc2, 0xb7, 0x00}}, + {2, [3]byte{0xc2, 0xa9, 0x00}}, {2, [3]byte{0xc2, 0xa7, 0x00}}, + {2, [3]byte{0xc2, 0xb6, 0x00}}, {2, [3]byte{0xc2, 0xbc, 0x00}}, + {2, [3]byte{0xc2, 0xbd, 0x00}}, {2, [3]byte{0xc2, 0xbe, 0x00}}, + {1, [3]byte{0x5b, 0x00, 0x00}}, {1, [3]byte{0x5d, 0x00, 0x00}}, + {2, [3]byte{0xc2, 0xaf, 0x00}}, {2, [3]byte{0xc2, 0xa8, 0x00}}, + {2, [3]byte{0xc2, 0xb4, 0x00}}, {2, [3]byte{0xc3, 0x97, 0x00}}, + {1, [3]byte{0x7b, 0x00, 0x00}}, {1, [3]byte{0x41, 0x00, 0x00}}, + {1, [3]byte{0x42, 0x00, 0x00}}, {1, [3]byte{0x43, 0x00, 0x00}}, + {1, [3]byte{0x44, 0x00, 0x00}}, {1, [3]byte{0x45, 0x00, 0x00}}, + {1, [3]byte{0x46, 0x00, 0x00}}, {1, [3]byte{0x47, 0x00, 0x00}}, + {1, [3]byte{0x48, 0x00, 0x00}}, {1, [3]byte{0x49, 0x00, 0x00}}, + {2, [3]byte{0xc2, 0xad, 0x00}}, {2, [3]byte{0xc3, 0xb4, 0x00}}, + {2, [3]byte{0xc3, 0xb6, 0x00}}, {2, [3]byte{0xc3, 0xb2, 0x00}}, + {2, [3]byte{0xc3, 0xb3, 0x00}}, {2, [3]byte{0xc3, 0xb5, 0x00}}, + {1, [3]byte{0x7d, 0x00, 0x00}}, {1, [3]byte{0x4a, 0x00, 0x00}}, + {1, [3]byte{0x4b, 0x00, 0x00}}, {1, [3]byte{0x4c, 0x00, 0x00}}, + {1, [3]byte{0x4d, 0x00, 0x00}}, {1, [3]byte{0x4e, 0x00, 0x00}}, + {1, [3]byte{0x4f, 0x00, 0x00}}, {1, [3]byte{0x50, 0x00, 0x00}}, + {1, [3]byte{0x51, 0x00, 0x00}}, {1, [3]byte{0x52, 0x00, 0x00}}, + {2, [3]byte{0xc2, 0xb9, 0x00}}, {2, [3]byte{0xc3, 0xbb, 0x00}}, + {2, [3]byte{0xc3, 0xbc, 0x00}}, {2, [3]byte{0xc3, 0xb9, 0x00}}, + {2, [3]byte{0xc3, 0xba, 0x00}}, {2, [3]byte{0xc3, 0xbf, 0x00}}, + {1, [3]byte{0x5c, 0x00, 0x00}}, {2, [3]byte{0xc3, 0xb7, 0x00}}, + {1, [3]byte{0x53, 0x00, 0x00}}, {1, [3]byte{0x54, 0x00, 0x00}}, + {1, [3]byte{0x55, 0x00, 0x00}}, {1, [3]byte{0x56, 0x00, 0x00}}, + {1, [3]byte{0x57, 0x00, 0x00}}, {1, [3]byte{0x58, 0x00, 0x00}}, + {1, [3]byte{0x59, 0x00, 0x00}}, {1, [3]byte{0x5a, 0x00, 0x00}}, + {2, [3]byte{0xc2, 0xb2, 0x00}}, {2, [3]byte{0xc3, 0x94, 0x00}}, + {2, [3]byte{0xc3, 0x96, 0x00}}, {2, [3]byte{0xc3, 0x92, 0x00}}, + {2, [3]byte{0xc3, 0x93, 0x00}}, {2, [3]byte{0xc3, 0x95, 0x00}}, + {1, [3]byte{0x30, 0x00, 0x00}}, {1, [3]byte{0x31, 0x00, 0x00}}, + {1, [3]byte{0x32, 0x00, 0x00}}, {1, [3]byte{0x33, 0x00, 0x00}}, + {1, [3]byte{0x34, 0x00, 0x00}}, {1, [3]byte{0x35, 0x00, 0x00}}, + {1, [3]byte{0x36, 0x00, 0x00}}, {1, [3]byte{0x37, 0x00, 0x00}}, + {1, [3]byte{0x38, 0x00, 0x00}}, {1, [3]byte{0x39, 0x00, 0x00}}, + {2, [3]byte{0xc2, 0xb3, 0x00}}, {2, [3]byte{0xc3, 0x9b, 0x00}}, + {2, [3]byte{0xc3, 0x9c, 0x00}}, {2, [3]byte{0xc3, 0x99, 0x00}}, + {2, [3]byte{0xc3, 0x9a, 0x00}}, {2, [3]byte{0xc2, 0x9f, 0x00}}, + }, + encode: [256]uint32{ + 0x00000000, 0x01000001, 0x02000002, 0x03000003, 0x37000004, 0x2d000005, 0x2e000006, 0x2f000007, + 0x16000008, 0x05000009, 0x2500000a, 0x0b00000b, 0x0c00000c, 0x0d00000d, 0x0e00000e, 0x0f00000f, + 0x10000010, 0x11000011, 0x12000012, 0x13000013, 0x3c000014, 0x3d000015, 0x32000016, 0x26000017, + 0x18000018, 0x19000019, 0x3f00001a, 0x2700001b, 0x1c00001c, 0x1d00001d, 0x1e00001e, 0x1f00001f, + 0x40000020, 0x5a000021, 0x7f000022, 0x7b000023, 0x5b000024, 0x6c000025, 0x50000026, 0x7d000027, + 0x4d000028, 0x5d000029, 0x5c00002a, 0x4e00002b, 0x6b00002c, 0x6000002d, 0x4b00002e, 0x6100002f, + 0xf0000030, 0xf1000031, 0xf2000032, 0xf3000033, 0xf4000034, 0xf5000035, 0xf6000036, 0xf7000037, + 0xf8000038, 0xf9000039, 0x7a00003a, 0x5e00003b, 0x4c00003c, 0x7e00003d, 0x6e00003e, 0x6f00003f, + 0x7c000040, 0xc1000041, 0xc2000042, 0xc3000043, 0xc4000044, 0xc5000045, 0xc6000046, 0xc7000047, + 0xc8000048, 0xc9000049, 0xd100004a, 0xd200004b, 0xd300004c, 0xd400004d, 0xd500004e, 0xd600004f, + 0xd7000050, 0xd8000051, 0xd9000052, 0xe2000053, 0xe3000054, 0xe4000055, 0xe5000056, 0xe6000057, + 0xe7000058, 0xe8000059, 0xe900005a, 0xba00005b, 0xe000005c, 0xbb00005d, 0xb000005e, 0x6d00005f, + 0x79000060, 0x81000061, 0x82000062, 0x83000063, 0x84000064, 0x85000065, 0x86000066, 0x87000067, + 0x88000068, 0x89000069, 0x9100006a, 0x9200006b, 0x9300006c, 0x9400006d, 0x9500006e, 0x9600006f, + 0x97000070, 0x98000071, 0x99000072, 0xa2000073, 0xa3000074, 0xa4000075, 0xa5000076, 0xa6000077, + 0xa7000078, 0xa8000079, 0xa900007a, 0xc000007b, 0x4f00007c, 0xd000007d, 0xa100007e, 0x0700007f, + 0x20000080, 0x21000081, 0x22000082, 0x23000083, 0x24000084, 0x15000085, 0x06000086, 0x17000087, + 0x28000088, 0x29000089, 0x2a00008a, 0x2b00008b, 0x2c00008c, 0x0900008d, 0x0a00008e, 0x1b00008f, + 0x30000090, 0x31000091, 0x1a000092, 0x33000093, 0x34000094, 0x35000095, 0x36000096, 0x08000097, + 0x38000098, 0x39000099, 0x3a00009a, 0x3b00009b, 0x0400009c, 0x1400009d, 0x3e00009e, 0xff00009f, + 0x410000a0, 0xaa0000a1, 0x4a0000a2, 0xb10000a3, 0xb20000a5, 0x6a0000a6, 0xb50000a7, 0xbd0000a8, + 0xb40000a9, 0x9a0000aa, 0x8a0000ab, 0x5f0000ac, 0xca0000ad, 0xaf0000ae, 0xbc0000af, 0x900000b0, + 0x8f0000b1, 0xea0000b2, 0xfa0000b3, 0xbe0000b4, 0xa00000b5, 0xb60000b6, 0xb30000b7, 0x9d0000b8, + 0xda0000b9, 0x9b0000ba, 0x8b0000bb, 0xb70000bc, 0xb80000bd, 0xb90000be, 0xab0000bf, 0x640000c0, + 0x650000c1, 0x620000c2, 0x660000c3, 0x630000c4, 0x670000c5, 0x9e0000c6, 0x680000c7, 0x740000c8, + 0x710000c9, 0x720000ca, 0x730000cb, 0x780000cc, 0x750000cd, 0x760000ce, 0x770000cf, 0xac0000d0, + 0x690000d1, 0xed0000d2, 0xee0000d3, 0xeb0000d4, 0xef0000d5, 0xec0000d6, 0xbf0000d7, 0x800000d8, + 0xfd0000d9, 0xfe0000da, 0xfb0000db, 0xfc0000dc, 0xad0000dd, 0xae0000de, 0x590000df, 0x440000e0, + 0x450000e1, 0x420000e2, 0x460000e3, 0x430000e4, 0x470000e5, 0x9c0000e6, 0x480000e7, 0x540000e8, + 0x510000e9, 0x520000ea, 0x530000eb, 0x580000ec, 0x550000ed, 0x560000ee, 0x570000ef, 0x8c0000f0, + 0x490000f1, 0xcd0000f2, 0xce0000f3, 0xcb0000f4, 0xcf0000f5, 0xcc0000f6, 0xe10000f7, 0x700000f8, + 0xdd0000f9, 0xde0000fa, 0xdb0000fb, 0xdc0000fc, 0x8d0000fd, 0x8e0000fe, 0xdf0000ff, 0x9f0020ac, + }, +} + +// ISO8859_1 is the ISO 8859-1 encoding. +var ISO8859_1 *Charmap = &iso8859_1 + +var iso8859_1 = Charmap{ + name: "ISO 8859-1", + mib: identifier.ISOLatin1, + asciiSuperset: true, + low: 0x80, + replacement: 0x1a, + decode: [256]utf8Enc{ + {1, [3]byte{0x00, 0x00, 0x00}}, {1, [3]byte{0x01, 0x00, 0x00}}, + {1, [3]byte{0x02, 0x00, 0x00}}, {1, [3]byte{0x03, 0x00, 0x00}}, + {1, [3]byte{0x04, 0x00, 0x00}}, {1, [3]byte{0x05, 0x00, 0x00}}, + {1, [3]byte{0x06, 0x00, 0x00}}, {1, [3]byte{0x07, 0x00, 0x00}}, + {1, [3]byte{0x08, 0x00, 0x00}}, {1, [3]byte{0x09, 0x00, 0x00}}, + {1, [3]byte{0x0a, 0x00, 0x00}}, {1, [3]byte{0x0b, 0x00, 0x00}}, + {1, [3]byte{0x0c, 0x00, 0x00}}, {1, [3]byte{0x0d, 0x00, 0x00}}, + {1, [3]byte{0x0e, 0x00, 0x00}}, {1, [3]byte{0x0f, 0x00, 0x00}}, + {1, [3]byte{0x10, 0x00, 0x00}}, {1, [3]byte{0x11, 0x00, 0x00}}, + {1, [3]byte{0x12, 0x00, 0x00}}, {1, [3]byte{0x13, 0x00, 0x00}}, + {1, [3]byte{0x14, 0x00, 0x00}}, {1, [3]byte{0x15, 0x00, 0x00}}, + {1, [3]byte{0x16, 0x00, 0x00}}, {1, [3]byte{0x17, 0x00, 0x00}}, + {1, [3]byte{0x18, 0x00, 0x00}}, {1, [3]byte{0x19, 0x00, 0x00}}, + {1, [3]byte{0x1a, 0x00, 0x00}}, {1, [3]byte{0x1b, 0x00, 0x00}}, + {1, [3]byte{0x1c, 0x00, 0x00}}, {1, [3]byte{0x1d, 0x00, 0x00}}, + {1, [3]byte{0x1e, 0x00, 0x00}}, {1, [3]byte{0x1f, 0x00, 0x00}}, + {1, [3]byte{0x20, 0x00, 0x00}}, {1, [3]byte{0x21, 0x00, 0x00}}, + {1, [3]byte{0x22, 0x00, 0x00}}, {1, [3]byte{0x23, 0x00, 0x00}}, + {1, [3]byte{0x24, 0x00, 0x00}}, {1, [3]byte{0x25, 0x00, 0x00}}, + {1, [3]byte{0x26, 0x00, 0x00}}, {1, [3]byte{0x27, 0x00, 0x00}}, + {1, [3]byte{0x28, 0x00, 0x00}}, {1, [3]byte{0x29, 0x00, 0x00}}, + {1, [3]byte{0x2a, 0x00, 0x00}}, {1, [3]byte{0x2b, 0x00, 0x00}}, + {1, [3]byte{0x2c, 0x00, 0x00}}, {1, [3]byte{0x2d, 0x00, 0x00}}, + {1, [3]byte{0x2e, 0x00, 0x00}}, {1, [3]byte{0x2f, 0x00, 0x00}}, + {1, [3]byte{0x30, 0x00, 0x00}}, {1, [3]byte{0x31, 0x00, 0x00}}, + {1, [3]byte{0x32, 0x00, 0x00}}, {1, [3]byte{0x33, 0x00, 0x00}}, + {1, [3]byte{0x34, 0x00, 0x00}}, {1, [3]byte{0x35, 0x00, 0x00}}, + {1, [3]byte{0x36, 0x00, 0x00}}, {1, [3]byte{0x37, 0x00, 0x00}}, + {1, [3]byte{0x38, 0x00, 0x00}}, {1, [3]byte{0x39, 0x00, 0x00}}, + {1, [3]byte{0x3a, 0x00, 0x00}}, {1, [3]byte{0x3b, 0x00, 0x00}}, + {1, [3]byte{0x3c, 0x00, 0x00}}, {1, [3]byte{0x3d, 0x00, 0x00}}, + {1, [3]byte{0x3e, 0x00, 0x00}}, {1, [3]byte{0x3f, 0x00, 0x00}}, + {1, [3]byte{0x40, 0x00, 0x00}}, {1, [3]byte{0x41, 0x00, 0x00}}, + {1, [3]byte{0x42, 0x00, 0x00}}, {1, [3]byte{0x43, 0x00, 0x00}}, + {1, [3]byte{0x44, 0x00, 0x00}}, {1, [3]byte{0x45, 0x00, 0x00}}, + {1, [3]byte{0x46, 0x00, 0x00}}, {1, [3]byte{0x47, 0x00, 0x00}}, + {1, [3]byte{0x48, 0x00, 0x00}}, {1, [3]byte{0x49, 0x00, 0x00}}, + {1, [3]byte{0x4a, 0x00, 0x00}}, {1, [3]byte{0x4b, 0x00, 0x00}}, + {1, [3]byte{0x4c, 0x00, 0x00}}, {1, [3]byte{0x4d, 0x00, 0x00}}, + {1, [3]byte{0x4e, 0x00, 0x00}}, {1, [3]byte{0x4f, 0x00, 0x00}}, + {1, [3]byte{0x50, 0x00, 0x00}}, {1, [3]byte{0x51, 0x00, 0x00}}, + {1, [3]byte{0x52, 0x00, 0x00}}, {1, [3]byte{0x53, 0x00, 0x00}}, + {1, [3]byte{0x54, 0x00, 0x00}}, {1, [3]byte{0x55, 0x00, 0x00}}, + {1, [3]byte{0x56, 0x00, 0x00}}, {1, [3]byte{0x57, 0x00, 0x00}}, + {1, [3]byte{0x58, 0x00, 0x00}}, {1, [3]byte{0x59, 0x00, 0x00}}, + {1, [3]byte{0x5a, 0x00, 0x00}}, {1, [3]byte{0x5b, 0x00, 0x00}}, + {1, [3]byte{0x5c, 0x00, 0x00}}, {1, [3]byte{0x5d, 0x00, 0x00}}, + {1, [3]byte{0x5e, 0x00, 0x00}}, {1, [3]byte{0x5f, 0x00, 0x00}}, + {1, [3]byte{0x60, 0x00, 0x00}}, {1, [3]byte{0x61, 0x00, 0x00}}, + {1, [3]byte{0x62, 0x00, 0x00}}, {1, [3]byte{0x63, 0x00, 0x00}}, + {1, [3]byte{0x64, 0x00, 0x00}}, {1, [3]byte{0x65, 0x00, 0x00}}, + {1, [3]byte{0x66, 0x00, 0x00}}, {1, [3]byte{0x67, 0x00, 0x00}}, + {1, [3]byte{0x68, 0x00, 0x00}}, {1, [3]byte{0x69, 0x00, 0x00}}, + {1, [3]byte{0x6a, 0x00, 0x00}}, {1, [3]byte{0x6b, 0x00, 0x00}}, + {1, [3]byte{0x6c, 0x00, 0x00}}, {1, [3]byte{0x6d, 0x00, 0x00}}, + {1, [3]byte{0x6e, 0x00, 0x00}}, {1, [3]byte{0x6f, 0x00, 0x00}}, + {1, [3]byte{0x70, 0x00, 0x00}}, {1, [3]byte{0x71, 0x00, 0x00}}, + {1, [3]byte{0x72, 0x00, 0x00}}, {1, [3]byte{0x73, 0x00, 0x00}}, + {1, [3]byte{0x74, 0x00, 0x00}}, {1, [3]byte{0x75, 0x00, 0x00}}, + {1, [3]byte{0x76, 0x00, 0x00}}, {1, [3]byte{0x77, 0x00, 0x00}}, + {1, [3]byte{0x78, 0x00, 0x00}}, {1, [3]byte{0x79, 0x00, 0x00}}, + {1, [3]byte{0x7a, 0x00, 0x00}}, {1, [3]byte{0x7b, 0x00, 0x00}}, + {1, [3]byte{0x7c, 0x00, 0x00}}, {1, [3]byte{0x7d, 0x00, 0x00}}, + {1, [3]byte{0x7e, 0x00, 0x00}}, {1, [3]byte{0x7f, 0x00, 0x00}}, + {2, [3]byte{0xc2, 0x80, 0x00}}, {2, [3]byte{0xc2, 0x81, 0x00}}, + {2, [3]byte{0xc2, 0x82, 0x00}}, {2, [3]byte{0xc2, 0x83, 0x00}}, + {2, [3]byte{0xc2, 0x84, 0x00}}, {2, [3]byte{0xc2, 0x85, 0x00}}, + {2, [3]byte{0xc2, 0x86, 0x00}}, {2, [3]byte{0xc2, 0x87, 0x00}}, + {2, [3]byte{0xc2, 0x88, 0x00}}, {2, [3]byte{0xc2, 0x89, 0x00}}, + {2, [3]byte{0xc2, 0x8a, 0x00}}, {2, [3]byte{0xc2, 0x8b, 0x00}}, + {2, [3]byte{0xc2, 0x8c, 0x00}}, {2, [3]byte{0xc2, 0x8d, 0x00}}, + {2, [3]byte{0xc2, 0x8e, 0x00}}, {2, [3]byte{0xc2, 0x8f, 0x00}}, + {2, [3]byte{0xc2, 0x90, 0x00}}, {2, [3]byte{0xc2, 0x91, 0x00}}, + {2, [3]byte{0xc2, 0x92, 0x00}}, {2, [3]byte{0xc2, 0x93, 0x00}}, + {2, [3]byte{0xc2, 0x94, 0x00}}, {2, [3]byte{0xc2, 0x95, 0x00}}, + {2, [3]byte{0xc2, 0x96, 0x00}}, {2, [3]byte{0xc2, 0x97, 0x00}}, + {2, [3]byte{0xc2, 0x98, 0x00}}, {2, [3]byte{0xc2, 0x99, 0x00}}, + {2, [3]byte{0xc2, 0x9a, 0x00}}, {2, [3]byte{0xc2, 0x9b, 0x00}}, + {2, [3]byte{0xc2, 0x9c, 0x00}}, {2, [3]byte{0xc2, 0x9d, 0x00}}, + {2, [3]byte{0xc2, 0x9e, 0x00}}, {2, [3]byte{0xc2, 0x9f, 0x00}}, + {2, [3]byte{0xc2, 0xa0, 0x00}}, {2, [3]byte{0xc2, 0xa1, 0x00}}, + {2, [3]byte{0xc2, 0xa2, 0x00}}, {2, [3]byte{0xc2, 0xa3, 0x00}}, + {2, [3]byte{0xc2, 0xa4, 0x00}}, {2, [3]byte{0xc2, 0xa5, 0x00}}, + {2, [3]byte{0xc2, 0xa6, 0x00}}, {2, [3]byte{0xc2, 0xa7, 0x00}}, + {2, [3]byte{0xc2, 0xa8, 0x00}}, {2, [3]byte{0xc2, 0xa9, 0x00}}, + {2, [3]byte{0xc2, 0xaa, 0x00}}, {2, [3]byte{0xc2, 0xab, 0x00}}, + {2, [3]byte{0xc2, 0xac, 0x00}}, {2, [3]byte{0xc2, 0xad, 0x00}}, + {2, [3]byte{0xc2, 0xae, 0x00}}, {2, [3]byte{0xc2, 0xaf, 0x00}}, + {2, [3]byte{0xc2, 0xb0, 0x00}}, {2, [3]byte{0xc2, 0xb1, 0x00}}, + {2, [3]byte{0xc2, 0xb2, 0x00}}, {2, [3]byte{0xc2, 0xb3, 0x00}}, + {2, [3]byte{0xc2, 0xb4, 0x00}}, {2, [3]byte{0xc2, 0xb5, 0x00}}, + {2, [3]byte{0xc2, 0xb6, 0x00}}, {2, [3]byte{0xc2, 0xb7, 0x00}}, + {2, [3]byte{0xc2, 0xb8, 0x00}}, {2, [3]byte{0xc2, 0xb9, 0x00}}, + {2, [3]byte{0xc2, 0xba, 0x00}}, {2, [3]byte{0xc2, 0xbb, 0x00}}, + {2, [3]byte{0xc2, 0xbc, 0x00}}, {2, [3]byte{0xc2, 0xbd, 0x00}}, + {2, [3]byte{0xc2, 0xbe, 0x00}}, {2, [3]byte{0xc2, 0xbf, 0x00}}, + {2, [3]byte{0xc3, 0x80, 0x00}}, {2, [3]byte{0xc3, 0x81, 0x00}}, + {2, [3]byte{0xc3, 0x82, 0x00}}, {2, [3]byte{0xc3, 0x83, 0x00}}, + {2, [3]byte{0xc3, 0x84, 0x00}}, {2, [3]byte{0xc3, 0x85, 0x00}}, + {2, [3]byte{0xc3, 0x86, 0x00}}, {2, [3]byte{0xc3, 0x87, 0x00}}, + {2, [3]byte{0xc3, 0x88, 0x00}}, {2, [3]byte{0xc3, 0x89, 0x00}}, + {2, [3]byte{0xc3, 0x8a, 0x00}}, {2, [3]byte{0xc3, 0x8b, 0x00}}, + {2, [3]byte{0xc3, 0x8c, 0x00}}, {2, [3]byte{0xc3, 0x8d, 0x00}}, + {2, [3]byte{0xc3, 0x8e, 0x00}}, {2, [3]byte{0xc3, 0x8f, 0x00}}, + {2, [3]byte{0xc3, 0x90, 0x00}}, {2, [3]byte{0xc3, 0x91, 0x00}}, + {2, [3]byte{0xc3, 0x92, 0x00}}, {2, [3]byte{0xc3, 0x93, 0x00}}, + {2, [3]byte{0xc3, 0x94, 0x00}}, {2, [3]byte{0xc3, 0x95, 0x00}}, + {2, [3]byte{0xc3, 0x96, 0x00}}, {2, [3]byte{0xc3, 0x97, 0x00}}, + {2, [3]byte{0xc3, 0x98, 0x00}}, {2, [3]byte{0xc3, 0x99, 0x00}}, + {2, [3]byte{0xc3, 0x9a, 0x00}}, {2, [3]byte{0xc3, 0x9b, 0x00}}, + {2, [3]byte{0xc3, 0x9c, 0x00}}, {2, [3]byte{0xc3, 0x9d, 0x00}}, + {2, [3]byte{0xc3, 0x9e, 0x00}}, {2, [3]byte{0xc3, 0x9f, 0x00}}, + {2, [3]byte{0xc3, 0xa0, 0x00}}, {2, [3]byte{0xc3, 0xa1, 0x00}}, + {2, [3]byte{0xc3, 0xa2, 0x00}}, {2, [3]byte{0xc3, 0xa3, 0x00}}, + {2, [3]byte{0xc3, 0xa4, 0x00}}, {2, [3]byte{0xc3, 0xa5, 0x00}}, + {2, [3]byte{0xc3, 0xa6, 0x00}}, {2, [3]byte{0xc3, 0xa7, 0x00}}, + {2, [3]byte{0xc3, 0xa8, 0x00}}, {2, [3]byte{0xc3, 0xa9, 0x00}}, + {2, [3]byte{0xc3, 0xaa, 0x00}}, {2, [3]byte{0xc3, 0xab, 0x00}}, + {2, [3]byte{0xc3, 0xac, 0x00}}, {2, [3]byte{0xc3, 0xad, 0x00}}, + {2, [3]byte{0xc3, 0xae, 0x00}}, {2, [3]byte{0xc3, 0xaf, 0x00}}, + {2, [3]byte{0xc3, 0xb0, 0x00}}, {2, [3]byte{0xc3, 0xb1, 0x00}}, + {2, [3]byte{0xc3, 0xb2, 0x00}}, {2, [3]byte{0xc3, 0xb3, 0x00}}, + {2, [3]byte{0xc3, 0xb4, 0x00}}, {2, [3]byte{0xc3, 0xb5, 0x00}}, + {2, [3]byte{0xc3, 0xb6, 0x00}}, {2, [3]byte{0xc3, 0xb7, 0x00}}, + {2, [3]byte{0xc3, 0xb8, 0x00}}, {2, [3]byte{0xc3, 0xb9, 0x00}}, + {2, [3]byte{0xc3, 0xba, 0x00}}, {2, [3]byte{0xc3, 0xbb, 0x00}}, + {2, [3]byte{0xc3, 0xbc, 0x00}}, {2, [3]byte{0xc3, 0xbd, 0x00}}, + {2, [3]byte{0xc3, 0xbe, 0x00}}, {2, [3]byte{0xc3, 0xbf, 0x00}}, + }, + encode: [256]uint32{ + 0x00000000, 0x01000001, 0x02000002, 0x03000003, 0x04000004, 0x05000005, 0x06000006, 0x07000007, + 0x08000008, 0x09000009, 0x0a00000a, 0x0b00000b, 0x0c00000c, 0x0d00000d, 0x0e00000e, 0x0f00000f, + 0x10000010, 0x11000011, 0x12000012, 0x13000013, 0x14000014, 0x15000015, 0x16000016, 0x17000017, + 0x18000018, 0x19000019, 0x1a00001a, 0x1b00001b, 0x1c00001c, 0x1d00001d, 0x1e00001e, 0x1f00001f, + 0x20000020, 0x21000021, 0x22000022, 0x23000023, 0x24000024, 0x25000025, 0x26000026, 0x27000027, + 0x28000028, 0x29000029, 0x2a00002a, 0x2b00002b, 0x2c00002c, 0x2d00002d, 0x2e00002e, 0x2f00002f, + 0x30000030, 0x31000031, 0x32000032, 0x33000033, 0x34000034, 0x35000035, 0x36000036, 0x37000037, + 0x38000038, 0x39000039, 0x3a00003a, 0x3b00003b, 0x3c00003c, 0x3d00003d, 0x3e00003e, 0x3f00003f, + 0x40000040, 0x41000041, 0x42000042, 0x43000043, 0x44000044, 0x45000045, 0x46000046, 0x47000047, + 0x48000048, 0x49000049, 0x4a00004a, 0x4b00004b, 0x4c00004c, 0x4d00004d, 0x4e00004e, 0x4f00004f, + 0x50000050, 0x51000051, 0x52000052, 0x53000053, 0x54000054, 0x55000055, 0x56000056, 0x57000057, + 0x58000058, 0x59000059, 0x5a00005a, 0x5b00005b, 0x5c00005c, 0x5d00005d, 0x5e00005e, 0x5f00005f, + 0x60000060, 0x61000061, 0x62000062, 0x63000063, 0x64000064, 0x65000065, 0x66000066, 0x67000067, + 0x68000068, 0x69000069, 0x6a00006a, 0x6b00006b, 0x6c00006c, 0x6d00006d, 0x6e00006e, 0x6f00006f, + 0x70000070, 0x71000071, 0x72000072, 0x73000073, 0x74000074, 0x75000075, 0x76000076, 0x77000077, + 0x78000078, 0x79000079, 0x7a00007a, 0x7b00007b, 0x7c00007c, 0x7d00007d, 0x7e00007e, 0x7f00007f, + 0x80000080, 0x81000081, 0x82000082, 0x83000083, 0x84000084, 0x85000085, 0x86000086, 0x87000087, + 0x88000088, 0x89000089, 0x8a00008a, 0x8b00008b, 0x8c00008c, 0x8d00008d, 0x8e00008e, 0x8f00008f, + 0x90000090, 0x91000091, 0x92000092, 0x93000093, 0x94000094, 0x95000095, 0x96000096, 0x97000097, + 0x98000098, 0x99000099, 0x9a00009a, 0x9b00009b, 0x9c00009c, 0x9d00009d, 0x9e00009e, 0x9f00009f, + 0xa00000a0, 0xa10000a1, 0xa20000a2, 0xa30000a3, 0xa40000a4, 0xa50000a5, 0xa60000a6, 0xa70000a7, + 0xa80000a8, 0xa90000a9, 0xaa0000aa, 0xab0000ab, 0xac0000ac, 0xad0000ad, 0xae0000ae, 0xaf0000af, + 0xb00000b0, 0xb10000b1, 0xb20000b2, 0xb30000b3, 0xb40000b4, 0xb50000b5, 0xb60000b6, 0xb70000b7, + 0xb80000b8, 0xb90000b9, 0xba0000ba, 0xbb0000bb, 0xbc0000bc, 0xbd0000bd, 0xbe0000be, 0xbf0000bf, + 0xc00000c0, 0xc10000c1, 0xc20000c2, 0xc30000c3, 0xc40000c4, 0xc50000c5, 0xc60000c6, 0xc70000c7, + 0xc80000c8, 0xc90000c9, 0xca0000ca, 0xcb0000cb, 0xcc0000cc, 0xcd0000cd, 0xce0000ce, 0xcf0000cf, + 0xd00000d0, 0xd10000d1, 0xd20000d2, 0xd30000d3, 0xd40000d4, 0xd50000d5, 0xd60000d6, 0xd70000d7, + 0xd80000d8, 0xd90000d9, 0xda0000da, 0xdb0000db, 0xdc0000dc, 0xdd0000dd, 0xde0000de, 0xdf0000df, + 0xe00000e0, 0xe10000e1, 0xe20000e2, 0xe30000e3, 0xe40000e4, 0xe50000e5, 0xe60000e6, 0xe70000e7, + 0xe80000e8, 0xe90000e9, 0xea0000ea, 0xeb0000eb, 0xec0000ec, 0xed0000ed, 0xee0000ee, 0xef0000ef, + 0xf00000f0, 0xf10000f1, 0xf20000f2, 0xf30000f3, 0xf40000f4, 0xf50000f5, 0xf60000f6, 0xf70000f7, + 0xf80000f8, 0xf90000f9, 0xfa0000fa, 0xfb0000fb, 0xfc0000fc, 0xfd0000fd, 0xfe0000fe, 0xff0000ff, + }, +} + +// ISO8859_2 is the ISO 8859-2 encoding. +var ISO8859_2 *Charmap = &iso8859_2 + +var iso8859_2 = Charmap{ + name: "ISO 8859-2", + mib: identifier.ISOLatin2, + asciiSuperset: true, + low: 0x80, + replacement: 0x1a, + decode: [256]utf8Enc{ + {1, [3]byte{0x00, 0x00, 0x00}}, {1, [3]byte{0x01, 0x00, 0x00}}, + {1, [3]byte{0x02, 0x00, 0x00}}, {1, [3]byte{0x03, 0x00, 0x00}}, + {1, [3]byte{0x04, 0x00, 0x00}}, {1, [3]byte{0x05, 0x00, 0x00}}, + {1, [3]byte{0x06, 0x00, 0x00}}, {1, [3]byte{0x07, 0x00, 0x00}}, + {1, [3]byte{0x08, 0x00, 0x00}}, {1, [3]byte{0x09, 0x00, 0x00}}, + {1, [3]byte{0x0a, 0x00, 0x00}}, {1, [3]byte{0x0b, 0x00, 0x00}}, + {1, [3]byte{0x0c, 0x00, 0x00}}, {1, [3]byte{0x0d, 0x00, 0x00}}, + {1, [3]byte{0x0e, 0x00, 0x00}}, {1, [3]byte{0x0f, 0x00, 0x00}}, + {1, [3]byte{0x10, 0x00, 0x00}}, {1, [3]byte{0x11, 0x00, 0x00}}, + {1, [3]byte{0x12, 0x00, 0x00}}, {1, [3]byte{0x13, 0x00, 0x00}}, + {1, [3]byte{0x14, 0x00, 0x00}}, {1, [3]byte{0x15, 0x00, 0x00}}, + {1, [3]byte{0x16, 0x00, 0x00}}, {1, [3]byte{0x17, 0x00, 0x00}}, + {1, [3]byte{0x18, 0x00, 0x00}}, {1, [3]byte{0x19, 0x00, 0x00}}, + {1, [3]byte{0x1a, 0x00, 0x00}}, {1, [3]byte{0x1b, 0x00, 0x00}}, + {1, [3]byte{0x1c, 0x00, 0x00}}, {1, [3]byte{0x1d, 0x00, 0x00}}, + {1, [3]byte{0x1e, 0x00, 0x00}}, {1, [3]byte{0x1f, 0x00, 0x00}}, + {1, [3]byte{0x20, 0x00, 0x00}}, {1, [3]byte{0x21, 0x00, 0x00}}, + {1, [3]byte{0x22, 0x00, 0x00}}, {1, [3]byte{0x23, 0x00, 0x00}}, + {1, [3]byte{0x24, 0x00, 0x00}}, {1, [3]byte{0x25, 0x00, 0x00}}, + {1, [3]byte{0x26, 0x00, 0x00}}, {1, [3]byte{0x27, 0x00, 0x00}}, + {1, [3]byte{0x28, 0x00, 0x00}}, {1, [3]byte{0x29, 0x00, 0x00}}, + {1, [3]byte{0x2a, 0x00, 0x00}}, {1, [3]byte{0x2b, 0x00, 0x00}}, + {1, [3]byte{0x2c, 0x00, 0x00}}, {1, [3]byte{0x2d, 0x00, 0x00}}, + {1, [3]byte{0x2e, 0x00, 0x00}}, {1, [3]byte{0x2f, 0x00, 0x00}}, + {1, [3]byte{0x30, 0x00, 0x00}}, {1, [3]byte{0x31, 0x00, 0x00}}, + {1, [3]byte{0x32, 0x00, 0x00}}, {1, [3]byte{0x33, 0x00, 0x00}}, + {1, [3]byte{0x34, 0x00, 0x00}}, {1, [3]byte{0x35, 0x00, 0x00}}, + {1, [3]byte{0x36, 0x00, 0x00}}, {1, [3]byte{0x37, 0x00, 0x00}}, + {1, [3]byte{0x38, 0x00, 0x00}}, {1, [3]byte{0x39, 0x00, 0x00}}, + {1, [3]byte{0x3a, 0x00, 0x00}}, {1, [3]byte{0x3b, 0x00, 0x00}}, + {1, [3]byte{0x3c, 0x00, 0x00}}, {1, [3]byte{0x3d, 0x00, 0x00}}, + {1, [3]byte{0x3e, 0x00, 0x00}}, {1, [3]byte{0x3f, 0x00, 0x00}}, + {1, [3]byte{0x40, 0x00, 0x00}}, {1, [3]byte{0x41, 0x00, 0x00}}, + {1, [3]byte{0x42, 0x00, 0x00}}, {1, [3]byte{0x43, 0x00, 0x00}}, + {1, [3]byte{0x44, 0x00, 0x00}}, {1, [3]byte{0x45, 0x00, 0x00}}, + {1, [3]byte{0x46, 0x00, 0x00}}, {1, [3]byte{0x47, 0x00, 0x00}}, + {1, [3]byte{0x48, 0x00, 0x00}}, {1, [3]byte{0x49, 0x00, 0x00}}, + {1, [3]byte{0x4a, 0x00, 0x00}}, {1, [3]byte{0x4b, 0x00, 0x00}}, + {1, [3]byte{0x4c, 0x00, 0x00}}, {1, [3]byte{0x4d, 0x00, 0x00}}, + {1, [3]byte{0x4e, 0x00, 0x00}}, {1, [3]byte{0x4f, 0x00, 0x00}}, + {1, [3]byte{0x50, 0x00, 0x00}}, {1, [3]byte{0x51, 0x00, 0x00}}, + {1, [3]byte{0x52, 0x00, 0x00}}, {1, [3]byte{0x53, 0x00, 0x00}}, + {1, [3]byte{0x54, 0x00, 0x00}}, {1, [3]byte{0x55, 0x00, 0x00}}, + {1, [3]byte{0x56, 0x00, 0x00}}, {1, [3]byte{0x57, 0x00, 0x00}}, + {1, [3]byte{0x58, 0x00, 0x00}}, {1, [3]byte{0x59, 0x00, 0x00}}, + {1, [3]byte{0x5a, 0x00, 0x00}}, {1, [3]byte{0x5b, 0x00, 0x00}}, + {1, [3]byte{0x5c, 0x00, 0x00}}, {1, [3]byte{0x5d, 0x00, 0x00}}, + {1, [3]byte{0x5e, 0x00, 0x00}}, {1, [3]byte{0x5f, 0x00, 0x00}}, + {1, [3]byte{0x60, 0x00, 0x00}}, {1, [3]byte{0x61, 0x00, 0x00}}, + {1, [3]byte{0x62, 0x00, 0x00}}, {1, [3]byte{0x63, 0x00, 0x00}}, + {1, [3]byte{0x64, 0x00, 0x00}}, {1, [3]byte{0x65, 0x00, 0x00}}, + {1, [3]byte{0x66, 0x00, 0x00}}, {1, [3]byte{0x67, 0x00, 0x00}}, + {1, [3]byte{0x68, 0x00, 0x00}}, {1, [3]byte{0x69, 0x00, 0x00}}, + {1, [3]byte{0x6a, 0x00, 0x00}}, {1, [3]byte{0x6b, 0x00, 0x00}}, + {1, [3]byte{0x6c, 0x00, 0x00}}, {1, [3]byte{0x6d, 0x00, 0x00}}, + {1, [3]byte{0x6e, 0x00, 0x00}}, {1, [3]byte{0x6f, 0x00, 0x00}}, + {1, [3]byte{0x70, 0x00, 0x00}}, {1, [3]byte{0x71, 0x00, 0x00}}, + {1, [3]byte{0x72, 0x00, 0x00}}, {1, [3]byte{0x73, 0x00, 0x00}}, + {1, [3]byte{0x74, 0x00, 0x00}}, {1, [3]byte{0x75, 0x00, 0x00}}, + {1, [3]byte{0x76, 0x00, 0x00}}, {1, [3]byte{0x77, 0x00, 0x00}}, + {1, [3]byte{0x78, 0x00, 0x00}}, {1, [3]byte{0x79, 0x00, 0x00}}, + {1, [3]byte{0x7a, 0x00, 0x00}}, {1, [3]byte{0x7b, 0x00, 0x00}}, + {1, [3]byte{0x7c, 0x00, 0x00}}, {1, [3]byte{0x7d, 0x00, 0x00}}, + {1, [3]byte{0x7e, 0x00, 0x00}}, {1, [3]byte{0x7f, 0x00, 0x00}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {3, [3]byte{0xef, 0xbf, 0xbd}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {3, [3]byte{0xef, 0xbf, 0xbd}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {3, [3]byte{0xef, 0xbf, 0xbd}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {3, [3]byte{0xef, 0xbf, 0xbd}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {3, [3]byte{0xef, 0xbf, 0xbd}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {3, [3]byte{0xef, 0xbf, 0xbd}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {3, [3]byte{0xef, 0xbf, 0xbd}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {3, [3]byte{0xef, 0xbf, 0xbd}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {3, [3]byte{0xef, 0xbf, 0xbd}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {3, [3]byte{0xef, 0xbf, 0xbd}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {3, [3]byte{0xef, 0xbf, 0xbd}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {3, [3]byte{0xef, 0xbf, 0xbd}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {3, [3]byte{0xef, 0xbf, 0xbd}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {3, [3]byte{0xef, 0xbf, 0xbd}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {3, [3]byte{0xef, 0xbf, 0xbd}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {3, [3]byte{0xef, 0xbf, 0xbd}}, + {2, [3]byte{0xc2, 0xa0, 0x00}}, {2, [3]byte{0xc4, 0x84, 0x00}}, + {2, [3]byte{0xcb, 0x98, 0x00}}, {2, [3]byte{0xc5, 0x81, 0x00}}, + {2, [3]byte{0xc2, 0xa4, 0x00}}, {2, [3]byte{0xc4, 0xbd, 0x00}}, + {2, [3]byte{0xc5, 0x9a, 0x00}}, {2, [3]byte{0xc2, 0xa7, 0x00}}, + {2, [3]byte{0xc2, 0xa8, 0x00}}, {2, [3]byte{0xc5, 0xa0, 0x00}}, + {2, [3]byte{0xc5, 0x9e, 0x00}}, {2, [3]byte{0xc5, 0xa4, 0x00}}, + {2, [3]byte{0xc5, 0xb9, 0x00}}, {2, [3]byte{0xc2, 0xad, 0x00}}, + {2, [3]byte{0xc5, 0xbd, 0x00}}, {2, [3]byte{0xc5, 0xbb, 0x00}}, + {2, [3]byte{0xc2, 0xb0, 0x00}}, {2, [3]byte{0xc4, 0x85, 0x00}}, + {2, [3]byte{0xcb, 0x9b, 0x00}}, {2, [3]byte{0xc5, 0x82, 0x00}}, + {2, [3]byte{0xc2, 0xb4, 0x00}}, {2, [3]byte{0xc4, 0xbe, 0x00}}, + {2, [3]byte{0xc5, 0x9b, 0x00}}, {2, [3]byte{0xcb, 0x87, 0x00}}, + {2, [3]byte{0xc2, 0xb8, 0x00}}, {2, [3]byte{0xc5, 0xa1, 0x00}}, + {2, [3]byte{0xc5, 0x9f, 0x00}}, {2, [3]byte{0xc5, 0xa5, 0x00}}, + {2, [3]byte{0xc5, 0xba, 0x00}}, {2, [3]byte{0xcb, 0x9d, 0x00}}, + {2, [3]byte{0xc5, 0xbe, 0x00}}, {2, [3]byte{0xc5, 0xbc, 0x00}}, + {2, [3]byte{0xc5, 0x94, 0x00}}, {2, [3]byte{0xc3, 0x81, 0x00}}, + {2, [3]byte{0xc3, 0x82, 0x00}}, {2, [3]byte{0xc4, 0x82, 0x00}}, + {2, [3]byte{0xc3, 0x84, 0x00}}, {2, [3]byte{0xc4, 0xb9, 0x00}}, + {2, [3]byte{0xc4, 0x86, 0x00}}, {2, [3]byte{0xc3, 0x87, 0x00}}, + {2, [3]byte{0xc4, 0x8c, 0x00}}, {2, [3]byte{0xc3, 0x89, 0x00}}, + {2, [3]byte{0xc4, 0x98, 0x00}}, {2, [3]byte{0xc3, 0x8b, 0x00}}, + {2, [3]byte{0xc4, 0x9a, 0x00}}, {2, [3]byte{0xc3, 0x8d, 0x00}}, + {2, [3]byte{0xc3, 0x8e, 0x00}}, {2, [3]byte{0xc4, 0x8e, 0x00}}, + {2, [3]byte{0xc4, 0x90, 0x00}}, {2, [3]byte{0xc5, 0x83, 0x00}}, + {2, [3]byte{0xc5, 0x87, 0x00}}, {2, [3]byte{0xc3, 0x93, 0x00}}, + {2, [3]byte{0xc3, 0x94, 0x00}}, {2, [3]byte{0xc5, 0x90, 0x00}}, + {2, [3]byte{0xc3, 0x96, 0x00}}, {2, [3]byte{0xc3, 0x97, 0x00}}, + {2, [3]byte{0xc5, 0x98, 0x00}}, {2, [3]byte{0xc5, 0xae, 0x00}}, + {2, [3]byte{0xc3, 0x9a, 0x00}}, {2, [3]byte{0xc5, 0xb0, 0x00}}, + {2, [3]byte{0xc3, 0x9c, 0x00}}, {2, [3]byte{0xc3, 0x9d, 0x00}}, + {2, [3]byte{0xc5, 0xa2, 0x00}}, {2, [3]byte{0xc3, 0x9f, 0x00}}, + {2, [3]byte{0xc5, 0x95, 0x00}}, {2, [3]byte{0xc3, 0xa1, 0x00}}, + {2, [3]byte{0xc3, 0xa2, 0x00}}, {2, [3]byte{0xc4, 0x83, 0x00}}, + {2, [3]byte{0xc3, 0xa4, 0x00}}, {2, [3]byte{0xc4, 0xba, 0x00}}, + {2, [3]byte{0xc4, 0x87, 0x00}}, {2, [3]byte{0xc3, 0xa7, 0x00}}, + {2, [3]byte{0xc4, 0x8d, 0x00}}, {2, [3]byte{0xc3, 0xa9, 0x00}}, + {2, [3]byte{0xc4, 0x99, 0x00}}, {2, [3]byte{0xc3, 0xab, 0x00}}, + {2, [3]byte{0xc4, 0x9b, 0x00}}, {2, [3]byte{0xc3, 0xad, 0x00}}, + {2, [3]byte{0xc3, 0xae, 0x00}}, {2, [3]byte{0xc4, 0x8f, 0x00}}, + {2, [3]byte{0xc4, 0x91, 0x00}}, {2, [3]byte{0xc5, 0x84, 0x00}}, + {2, [3]byte{0xc5, 0x88, 0x00}}, {2, [3]byte{0xc3, 0xb3, 0x00}}, + {2, [3]byte{0xc3, 0xb4, 0x00}}, {2, [3]byte{0xc5, 0x91, 0x00}}, + {2, [3]byte{0xc3, 0xb6, 0x00}}, {2, [3]byte{0xc3, 0xb7, 0x00}}, + {2, [3]byte{0xc5, 0x99, 0x00}}, {2, [3]byte{0xc5, 0xaf, 0x00}}, + {2, [3]byte{0xc3, 0xba, 0x00}}, {2, [3]byte{0xc5, 0xb1, 0x00}}, + {2, [3]byte{0xc3, 0xbc, 0x00}}, {2, [3]byte{0xc3, 0xbd, 0x00}}, + {2, [3]byte{0xc5, 0xa3, 0x00}}, {2, [3]byte{0xcb, 0x99, 0x00}}, + }, + encode: [256]uint32{ + 0x00000000, 0x01000001, 0x02000002, 0x03000003, 0x04000004, 0x05000005, 0x06000006, 0x07000007, + 0x08000008, 0x09000009, 0x0a00000a, 0x0b00000b, 0x0c00000c, 0x0d00000d, 0x0e00000e, 0x0f00000f, + 0x10000010, 0x11000011, 0x12000012, 0x13000013, 0x14000014, 0x15000015, 0x16000016, 0x17000017, + 0x18000018, 0x19000019, 0x1a00001a, 0x1b00001b, 0x1c00001c, 0x1d00001d, 0x1e00001e, 0x1f00001f, + 0x20000020, 0x21000021, 0x22000022, 0x23000023, 0x24000024, 0x25000025, 0x26000026, 0x27000027, + 0x28000028, 0x29000029, 0x2a00002a, 0x2b00002b, 0x2c00002c, 0x2d00002d, 0x2e00002e, 0x2f00002f, + 0x30000030, 0x31000031, 0x32000032, 0x33000033, 0x34000034, 0x35000035, 0x36000036, 0x37000037, + 0x38000038, 0x39000039, 0x3a00003a, 0x3b00003b, 0x3c00003c, 0x3d00003d, 0x3e00003e, 0x3f00003f, + 0x40000040, 0x41000041, 0x42000042, 0x43000043, 0x44000044, 0x45000045, 0x46000046, 0x47000047, + 0x48000048, 0x49000049, 0x4a00004a, 0x4b00004b, 0x4c00004c, 0x4d00004d, 0x4e00004e, 0x4f00004f, + 0x50000050, 0x51000051, 0x52000052, 0x53000053, 0x54000054, 0x55000055, 0x56000056, 0x57000057, + 0x58000058, 0x59000059, 0x5a00005a, 0x5b00005b, 0x5c00005c, 0x5d00005d, 0x5e00005e, 0x5f00005f, + 0x60000060, 0x61000061, 0x62000062, 0x63000063, 0x64000064, 0x65000065, 0x66000066, 0x67000067, + 0x68000068, 0x69000069, 0x6a00006a, 0x6b00006b, 0x6c00006c, 0x6d00006d, 0x6e00006e, 0x6f00006f, + 0x70000070, 0x71000071, 0x72000072, 0x73000073, 0x74000074, 0x75000075, 0x76000076, 0x77000077, + 0x78000078, 0x79000079, 0x7a00007a, 0x7b00007b, 0x7c00007c, 0x7d00007d, 0x7e00007e, 0x7f00007f, + 0xa00000a0, 0xa40000a4, 0xa70000a7, 0xa80000a8, 0xad0000ad, 0xb00000b0, 0xb40000b4, 0xb80000b8, + 0xc10000c1, 0xc20000c2, 0xc40000c4, 0xc70000c7, 0xc90000c9, 0xcb0000cb, 0xcd0000cd, 0xce0000ce, + 0xd30000d3, 0xd40000d4, 0xd60000d6, 0xd70000d7, 0xda0000da, 0xdc0000dc, 0xdd0000dd, 0xdf0000df, + 0xe10000e1, 0xe20000e2, 0xe40000e4, 0xe70000e7, 0xe90000e9, 0xeb0000eb, 0xed0000ed, 0xee0000ee, + 0xf30000f3, 0xf40000f4, 0xf60000f6, 0xf70000f7, 0xfa0000fa, 0xfc0000fc, 0xfd0000fd, 0xc3000102, + 0xe3000103, 0xa1000104, 0xb1000105, 0xc6000106, 0xe6000107, 0xc800010c, 0xe800010d, 0xcf00010e, + 0xef00010f, 0xd0000110, 0xf0000111, 0xca000118, 0xea000119, 0xcc00011a, 0xec00011b, 0xc5000139, + 0xe500013a, 0xa500013d, 0xb500013e, 0xa3000141, 0xb3000142, 0xd1000143, 0xf1000144, 0xd2000147, + 0xf2000148, 0xd5000150, 0xf5000151, 0xc0000154, 0xe0000155, 0xd8000158, 0xf8000159, 0xa600015a, + 0xb600015b, 0xaa00015e, 0xba00015f, 0xa9000160, 0xb9000161, 0xde000162, 0xfe000163, 0xab000164, + 0xbb000165, 0xd900016e, 0xf900016f, 0xdb000170, 0xfb000171, 0xac000179, 0xbc00017a, 0xaf00017b, + 0xbf00017c, 0xae00017d, 0xbe00017e, 0xb70002c7, 0xa20002d8, 0xff0002d9, 0xb20002db, 0xbd0002dd, + 0xbd0002dd, 0xbd0002dd, 0xbd0002dd, 0xbd0002dd, 0xbd0002dd, 0xbd0002dd, 0xbd0002dd, 0xbd0002dd, + 0xbd0002dd, 0xbd0002dd, 0xbd0002dd, 0xbd0002dd, 0xbd0002dd, 0xbd0002dd, 0xbd0002dd, 0xbd0002dd, + 0xbd0002dd, 0xbd0002dd, 0xbd0002dd, 0xbd0002dd, 0xbd0002dd, 0xbd0002dd, 0xbd0002dd, 0xbd0002dd, + 0xbd0002dd, 0xbd0002dd, 0xbd0002dd, 0xbd0002dd, 0xbd0002dd, 0xbd0002dd, 0xbd0002dd, 0xbd0002dd, + }, +} + +// ISO8859_3 is the ISO 8859-3 encoding. +var ISO8859_3 *Charmap = &iso8859_3 + +var iso8859_3 = Charmap{ + name: "ISO 8859-3", + mib: identifier.ISOLatin3, + asciiSuperset: true, + low: 0x80, + replacement: 0x1a, + decode: [256]utf8Enc{ + {1, [3]byte{0x00, 0x00, 0x00}}, {1, [3]byte{0x01, 0x00, 0x00}}, + {1, [3]byte{0x02, 0x00, 0x00}}, {1, [3]byte{0x03, 0x00, 0x00}}, + {1, [3]byte{0x04, 0x00, 0x00}}, {1, [3]byte{0x05, 0x00, 0x00}}, + {1, [3]byte{0x06, 0x00, 0x00}}, {1, [3]byte{0x07, 0x00, 0x00}}, + {1, [3]byte{0x08, 0x00, 0x00}}, {1, [3]byte{0x09, 0x00, 0x00}}, + {1, [3]byte{0x0a, 0x00, 0x00}}, {1, [3]byte{0x0b, 0x00, 0x00}}, + {1, [3]byte{0x0c, 0x00, 0x00}}, {1, [3]byte{0x0d, 0x00, 0x00}}, + {1, [3]byte{0x0e, 0x00, 0x00}}, {1, [3]byte{0x0f, 0x00, 0x00}}, + {1, [3]byte{0x10, 0x00, 0x00}}, {1, [3]byte{0x11, 0x00, 0x00}}, + {1, [3]byte{0x12, 0x00, 0x00}}, {1, [3]byte{0x13, 0x00, 0x00}}, + {1, [3]byte{0x14, 0x00, 0x00}}, {1, [3]byte{0x15, 0x00, 0x00}}, + {1, [3]byte{0x16, 0x00, 0x00}}, {1, [3]byte{0x17, 0x00, 0x00}}, + {1, [3]byte{0x18, 0x00, 0x00}}, {1, [3]byte{0x19, 0x00, 0x00}}, + {1, [3]byte{0x1a, 0x00, 0x00}}, {1, [3]byte{0x1b, 0x00, 0x00}}, + {1, [3]byte{0x1c, 0x00, 0x00}}, {1, [3]byte{0x1d, 0x00, 0x00}}, + {1, [3]byte{0x1e, 0x00, 0x00}}, {1, [3]byte{0x1f, 0x00, 0x00}}, + {1, [3]byte{0x20, 0x00, 0x00}}, {1, [3]byte{0x21, 0x00, 0x00}}, + {1, [3]byte{0x22, 0x00, 0x00}}, {1, [3]byte{0x23, 0x00, 0x00}}, + {1, [3]byte{0x24, 0x00, 0x00}}, {1, [3]byte{0x25, 0x00, 0x00}}, + {1, [3]byte{0x26, 0x00, 0x00}}, {1, [3]byte{0x27, 0x00, 0x00}}, + {1, [3]byte{0x28, 0x00, 0x00}}, {1, [3]byte{0x29, 0x00, 0x00}}, + {1, [3]byte{0x2a, 0x00, 0x00}}, {1, [3]byte{0x2b, 0x00, 0x00}}, + {1, [3]byte{0x2c, 0x00, 0x00}}, {1, [3]byte{0x2d, 0x00, 0x00}}, + {1, [3]byte{0x2e, 0x00, 0x00}}, {1, [3]byte{0x2f, 0x00, 0x00}}, + {1, [3]byte{0x30, 0x00, 0x00}}, {1, [3]byte{0x31, 0x00, 0x00}}, + {1, [3]byte{0x32, 0x00, 0x00}}, {1, [3]byte{0x33, 0x00, 0x00}}, + {1, [3]byte{0x34, 0x00, 0x00}}, {1, [3]byte{0x35, 0x00, 0x00}}, + {1, [3]byte{0x36, 0x00, 0x00}}, {1, [3]byte{0x37, 0x00, 0x00}}, + {1, [3]byte{0x38, 0x00, 0x00}}, {1, [3]byte{0x39, 0x00, 0x00}}, + {1, [3]byte{0x3a, 0x00, 0x00}}, {1, [3]byte{0x3b, 0x00, 0x00}}, + {1, [3]byte{0x3c, 0x00, 0x00}}, {1, [3]byte{0x3d, 0x00, 0x00}}, + {1, [3]byte{0x3e, 0x00, 0x00}}, {1, [3]byte{0x3f, 0x00, 0x00}}, + {1, [3]byte{0x40, 0x00, 0x00}}, {1, [3]byte{0x41, 0x00, 0x00}}, + {1, [3]byte{0x42, 0x00, 0x00}}, {1, [3]byte{0x43, 0x00, 0x00}}, + {1, [3]byte{0x44, 0x00, 0x00}}, {1, [3]byte{0x45, 0x00, 0x00}}, + {1, [3]byte{0x46, 0x00, 0x00}}, {1, [3]byte{0x47, 0x00, 0x00}}, + {1, [3]byte{0x48, 0x00, 0x00}}, {1, [3]byte{0x49, 0x00, 0x00}}, + {1, [3]byte{0x4a, 0x00, 0x00}}, {1, [3]byte{0x4b, 0x00, 0x00}}, + {1, [3]byte{0x4c, 0x00, 0x00}}, {1, [3]byte{0x4d, 0x00, 0x00}}, + {1, [3]byte{0x4e, 0x00, 0x00}}, {1, [3]byte{0x4f, 0x00, 0x00}}, + {1, [3]byte{0x50, 0x00, 0x00}}, {1, [3]byte{0x51, 0x00, 0x00}}, + {1, [3]byte{0x52, 0x00, 0x00}}, {1, [3]byte{0x53, 0x00, 0x00}}, + {1, [3]byte{0x54, 0x00, 0x00}}, {1, [3]byte{0x55, 0x00, 0x00}}, + {1, [3]byte{0x56, 0x00, 0x00}}, {1, [3]byte{0x57, 0x00, 0x00}}, + {1, [3]byte{0x58, 0x00, 0x00}}, {1, [3]byte{0x59, 0x00, 0x00}}, + {1, [3]byte{0x5a, 0x00, 0x00}}, {1, [3]byte{0x5b, 0x00, 0x00}}, + {1, [3]byte{0x5c, 0x00, 0x00}}, {1, [3]byte{0x5d, 0x00, 0x00}}, + {1, [3]byte{0x5e, 0x00, 0x00}}, {1, [3]byte{0x5f, 0x00, 0x00}}, + {1, [3]byte{0x60, 0x00, 0x00}}, {1, [3]byte{0x61, 0x00, 0x00}}, + {1, [3]byte{0x62, 0x00, 0x00}}, {1, [3]byte{0x63, 0x00, 0x00}}, + {1, [3]byte{0x64, 0x00, 0x00}}, {1, [3]byte{0x65, 0x00, 0x00}}, + {1, [3]byte{0x66, 0x00, 0x00}}, {1, [3]byte{0x67, 0x00, 0x00}}, + {1, [3]byte{0x68, 0x00, 0x00}}, {1, [3]byte{0x69, 0x00, 0x00}}, + {1, [3]byte{0x6a, 0x00, 0x00}}, {1, [3]byte{0x6b, 0x00, 0x00}}, + {1, [3]byte{0x6c, 0x00, 0x00}}, {1, [3]byte{0x6d, 0x00, 0x00}}, + {1, [3]byte{0x6e, 0x00, 0x00}}, {1, [3]byte{0x6f, 0x00, 0x00}}, + {1, [3]byte{0x70, 0x00, 0x00}}, {1, [3]byte{0x71, 0x00, 0x00}}, + {1, [3]byte{0x72, 0x00, 0x00}}, {1, [3]byte{0x73, 0x00, 0x00}}, + {1, [3]byte{0x74, 0x00, 0x00}}, {1, [3]byte{0x75, 0x00, 0x00}}, + {1, [3]byte{0x76, 0x00, 0x00}}, {1, [3]byte{0x77, 0x00, 0x00}}, + {1, [3]byte{0x78, 0x00, 0x00}}, {1, [3]byte{0x79, 0x00, 0x00}}, + {1, [3]byte{0x7a, 0x00, 0x00}}, {1, [3]byte{0x7b, 0x00, 0x00}}, + {1, [3]byte{0x7c, 0x00, 0x00}}, {1, [3]byte{0x7d, 0x00, 0x00}}, + {1, [3]byte{0x7e, 0x00, 0x00}}, {1, [3]byte{0x7f, 0x00, 0x00}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {3, [3]byte{0xef, 0xbf, 0xbd}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {3, [3]byte{0xef, 0xbf, 0xbd}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {3, [3]byte{0xef, 0xbf, 0xbd}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {3, [3]byte{0xef, 0xbf, 0xbd}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {3, [3]byte{0xef, 0xbf, 0xbd}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {3, [3]byte{0xef, 0xbf, 0xbd}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {3, [3]byte{0xef, 0xbf, 0xbd}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {3, [3]byte{0xef, 0xbf, 0xbd}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {3, [3]byte{0xef, 0xbf, 0xbd}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {3, [3]byte{0xef, 0xbf, 0xbd}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {3, [3]byte{0xef, 0xbf, 0xbd}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {3, [3]byte{0xef, 0xbf, 0xbd}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {3, [3]byte{0xef, 0xbf, 0xbd}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {3, [3]byte{0xef, 0xbf, 0xbd}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {3, [3]byte{0xef, 0xbf, 0xbd}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {3, [3]byte{0xef, 0xbf, 0xbd}}, + {2, [3]byte{0xc2, 0xa0, 0x00}}, {2, [3]byte{0xc4, 0xa6, 0x00}}, + {2, [3]byte{0xcb, 0x98, 0x00}}, {2, [3]byte{0xc2, 0xa3, 0x00}}, + {2, [3]byte{0xc2, 0xa4, 0x00}}, {3, [3]byte{0xef, 0xbf, 0xbd}}, + {2, [3]byte{0xc4, 0xa4, 0x00}}, {2, [3]byte{0xc2, 0xa7, 0x00}}, + {2, [3]byte{0xc2, 0xa8, 0x00}}, {2, [3]byte{0xc4, 0xb0, 0x00}}, + {2, [3]byte{0xc5, 0x9e, 0x00}}, {2, [3]byte{0xc4, 0x9e, 0x00}}, + {2, [3]byte{0xc4, 0xb4, 0x00}}, {2, [3]byte{0xc2, 0xad, 0x00}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {2, [3]byte{0xc5, 0xbb, 0x00}}, + {2, [3]byte{0xc2, 0xb0, 0x00}}, {2, [3]byte{0xc4, 0xa7, 0x00}}, + {2, [3]byte{0xc2, 0xb2, 0x00}}, {2, [3]byte{0xc2, 0xb3, 0x00}}, + {2, [3]byte{0xc2, 0xb4, 0x00}}, {2, [3]byte{0xc2, 0xb5, 0x00}}, + {2, [3]byte{0xc4, 0xa5, 0x00}}, {2, [3]byte{0xc2, 0xb7, 0x00}}, + {2, [3]byte{0xc2, 0xb8, 0x00}}, {2, [3]byte{0xc4, 0xb1, 0x00}}, + {2, [3]byte{0xc5, 0x9f, 0x00}}, {2, [3]byte{0xc4, 0x9f, 0x00}}, + {2, [3]byte{0xc4, 0xb5, 0x00}}, {2, [3]byte{0xc2, 0xbd, 0x00}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {2, [3]byte{0xc5, 0xbc, 0x00}}, + {2, [3]byte{0xc3, 0x80, 0x00}}, {2, [3]byte{0xc3, 0x81, 0x00}}, + {2, [3]byte{0xc3, 0x82, 0x00}}, {3, [3]byte{0xef, 0xbf, 0xbd}}, + {2, [3]byte{0xc3, 0x84, 0x00}}, {2, [3]byte{0xc4, 0x8a, 0x00}}, + {2, [3]byte{0xc4, 0x88, 0x00}}, {2, [3]byte{0xc3, 0x87, 0x00}}, + {2, [3]byte{0xc3, 0x88, 0x00}}, {2, [3]byte{0xc3, 0x89, 0x00}}, + {2, [3]byte{0xc3, 0x8a, 0x00}}, {2, [3]byte{0xc3, 0x8b, 0x00}}, + {2, [3]byte{0xc3, 0x8c, 0x00}}, {2, [3]byte{0xc3, 0x8d, 0x00}}, + {2, [3]byte{0xc3, 0x8e, 0x00}}, {2, [3]byte{0xc3, 0x8f, 0x00}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {2, [3]byte{0xc3, 0x91, 0x00}}, + {2, [3]byte{0xc3, 0x92, 0x00}}, {2, [3]byte{0xc3, 0x93, 0x00}}, + {2, [3]byte{0xc3, 0x94, 0x00}}, {2, [3]byte{0xc4, 0xa0, 0x00}}, + {2, [3]byte{0xc3, 0x96, 0x00}}, {2, [3]byte{0xc3, 0x97, 0x00}}, + {2, [3]byte{0xc4, 0x9c, 0x00}}, {2, [3]byte{0xc3, 0x99, 0x00}}, + {2, [3]byte{0xc3, 0x9a, 0x00}}, {2, [3]byte{0xc3, 0x9b, 0x00}}, + {2, [3]byte{0xc3, 0x9c, 0x00}}, {2, [3]byte{0xc5, 0xac, 0x00}}, + {2, [3]byte{0xc5, 0x9c, 0x00}}, {2, [3]byte{0xc3, 0x9f, 0x00}}, + {2, [3]byte{0xc3, 0xa0, 0x00}}, {2, [3]byte{0xc3, 0xa1, 0x00}}, + {2, [3]byte{0xc3, 0xa2, 0x00}}, {3, [3]byte{0xef, 0xbf, 0xbd}}, + {2, [3]byte{0xc3, 0xa4, 0x00}}, {2, [3]byte{0xc4, 0x8b, 0x00}}, + {2, [3]byte{0xc4, 0x89, 0x00}}, {2, [3]byte{0xc3, 0xa7, 0x00}}, + {2, [3]byte{0xc3, 0xa8, 0x00}}, {2, [3]byte{0xc3, 0xa9, 0x00}}, + {2, [3]byte{0xc3, 0xaa, 0x00}}, {2, [3]byte{0xc3, 0xab, 0x00}}, + {2, [3]byte{0xc3, 0xac, 0x00}}, {2, [3]byte{0xc3, 0xad, 0x00}}, + {2, [3]byte{0xc3, 0xae, 0x00}}, {2, [3]byte{0xc3, 0xaf, 0x00}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {2, [3]byte{0xc3, 0xb1, 0x00}}, + {2, [3]byte{0xc3, 0xb2, 0x00}}, {2, [3]byte{0xc3, 0xb3, 0x00}}, + {2, [3]byte{0xc3, 0xb4, 0x00}}, {2, [3]byte{0xc4, 0xa1, 0x00}}, + {2, [3]byte{0xc3, 0xb6, 0x00}}, {2, [3]byte{0xc3, 0xb7, 0x00}}, + {2, [3]byte{0xc4, 0x9d, 0x00}}, {2, [3]byte{0xc3, 0xb9, 0x00}}, + {2, [3]byte{0xc3, 0xba, 0x00}}, {2, [3]byte{0xc3, 0xbb, 0x00}}, + {2, [3]byte{0xc3, 0xbc, 0x00}}, {2, [3]byte{0xc5, 0xad, 0x00}}, + {2, [3]byte{0xc5, 0x9d, 0x00}}, {2, [3]byte{0xcb, 0x99, 0x00}}, + }, + encode: [256]uint32{ + 0x00000000, 0x01000001, 0x02000002, 0x03000003, 0x04000004, 0x05000005, 0x06000006, 0x07000007, + 0x08000008, 0x09000009, 0x0a00000a, 0x0b00000b, 0x0c00000c, 0x0d00000d, 0x0e00000e, 0x0f00000f, + 0x10000010, 0x11000011, 0x12000012, 0x13000013, 0x14000014, 0x15000015, 0x16000016, 0x17000017, + 0x18000018, 0x19000019, 0x1a00001a, 0x1b00001b, 0x1c00001c, 0x1d00001d, 0x1e00001e, 0x1f00001f, + 0x20000020, 0x21000021, 0x22000022, 0x23000023, 0x24000024, 0x25000025, 0x26000026, 0x27000027, + 0x28000028, 0x29000029, 0x2a00002a, 0x2b00002b, 0x2c00002c, 0x2d00002d, 0x2e00002e, 0x2f00002f, + 0x30000030, 0x31000031, 0x32000032, 0x33000033, 0x34000034, 0x35000035, 0x36000036, 0x37000037, + 0x38000038, 0x39000039, 0x3a00003a, 0x3b00003b, 0x3c00003c, 0x3d00003d, 0x3e00003e, 0x3f00003f, + 0x40000040, 0x41000041, 0x42000042, 0x43000043, 0x44000044, 0x45000045, 0x46000046, 0x47000047, + 0x48000048, 0x49000049, 0x4a00004a, 0x4b00004b, 0x4c00004c, 0x4d00004d, 0x4e00004e, 0x4f00004f, + 0x50000050, 0x51000051, 0x52000052, 0x53000053, 0x54000054, 0x55000055, 0x56000056, 0x57000057, + 0x58000058, 0x59000059, 0x5a00005a, 0x5b00005b, 0x5c00005c, 0x5d00005d, 0x5e00005e, 0x5f00005f, + 0x60000060, 0x61000061, 0x62000062, 0x63000063, 0x64000064, 0x65000065, 0x66000066, 0x67000067, + 0x68000068, 0x69000069, 0x6a00006a, 0x6b00006b, 0x6c00006c, 0x6d00006d, 0x6e00006e, 0x6f00006f, + 0x70000070, 0x71000071, 0x72000072, 0x73000073, 0x74000074, 0x75000075, 0x76000076, 0x77000077, + 0x78000078, 0x79000079, 0x7a00007a, 0x7b00007b, 0x7c00007c, 0x7d00007d, 0x7e00007e, 0x7f00007f, + 0xa00000a0, 0xa30000a3, 0xa40000a4, 0xa70000a7, 0xa80000a8, 0xad0000ad, 0xb00000b0, 0xb20000b2, + 0xb30000b3, 0xb40000b4, 0xb50000b5, 0xb70000b7, 0xb80000b8, 0xbd0000bd, 0xc00000c0, 0xc10000c1, + 0xc20000c2, 0xc40000c4, 0xc70000c7, 0xc80000c8, 0xc90000c9, 0xca0000ca, 0xcb0000cb, 0xcc0000cc, + 0xcd0000cd, 0xce0000ce, 0xcf0000cf, 0xd10000d1, 0xd20000d2, 0xd30000d3, 0xd40000d4, 0xd60000d6, + 0xd70000d7, 0xd90000d9, 0xda0000da, 0xdb0000db, 0xdc0000dc, 0xdf0000df, 0xe00000e0, 0xe10000e1, + 0xe20000e2, 0xe40000e4, 0xe70000e7, 0xe80000e8, 0xe90000e9, 0xea0000ea, 0xeb0000eb, 0xec0000ec, + 0xed0000ed, 0xee0000ee, 0xef0000ef, 0xf10000f1, 0xf20000f2, 0xf30000f3, 0xf40000f4, 0xf60000f6, + 0xf70000f7, 0xf90000f9, 0xfa0000fa, 0xfb0000fb, 0xfc0000fc, 0xc6000108, 0xe6000109, 0xc500010a, + 0xe500010b, 0xd800011c, 0xf800011d, 0xab00011e, 0xbb00011f, 0xd5000120, 0xf5000121, 0xa6000124, + 0xb6000125, 0xa1000126, 0xb1000127, 0xa9000130, 0xb9000131, 0xac000134, 0xbc000135, 0xde00015c, + 0xfe00015d, 0xaa00015e, 0xba00015f, 0xdd00016c, 0xfd00016d, 0xaf00017b, 0xbf00017c, 0xa20002d8, + 0xff0002d9, 0xff0002d9, 0xff0002d9, 0xff0002d9, 0xff0002d9, 0xff0002d9, 0xff0002d9, 0xff0002d9, + 0xff0002d9, 0xff0002d9, 0xff0002d9, 0xff0002d9, 0xff0002d9, 0xff0002d9, 0xff0002d9, 0xff0002d9, + 0xff0002d9, 0xff0002d9, 0xff0002d9, 0xff0002d9, 0xff0002d9, 0xff0002d9, 0xff0002d9, 0xff0002d9, + 0xff0002d9, 0xff0002d9, 0xff0002d9, 0xff0002d9, 0xff0002d9, 0xff0002d9, 0xff0002d9, 0xff0002d9, + 0xff0002d9, 0xff0002d9, 0xff0002d9, 0xff0002d9, 0xff0002d9, 0xff0002d9, 0xff0002d9, 0xff0002d9, + }, +} + +// ISO8859_4 is the ISO 8859-4 encoding. +var ISO8859_4 *Charmap = &iso8859_4 + +var iso8859_4 = Charmap{ + name: "ISO 8859-4", + mib: identifier.ISOLatin4, + asciiSuperset: true, + low: 0x80, + replacement: 0x1a, + decode: [256]utf8Enc{ + {1, [3]byte{0x00, 0x00, 0x00}}, {1, [3]byte{0x01, 0x00, 0x00}}, + {1, [3]byte{0x02, 0x00, 0x00}}, {1, [3]byte{0x03, 0x00, 0x00}}, + {1, [3]byte{0x04, 0x00, 0x00}}, {1, [3]byte{0x05, 0x00, 0x00}}, + {1, [3]byte{0x06, 0x00, 0x00}}, {1, [3]byte{0x07, 0x00, 0x00}}, + {1, [3]byte{0x08, 0x00, 0x00}}, {1, [3]byte{0x09, 0x00, 0x00}}, + {1, [3]byte{0x0a, 0x00, 0x00}}, {1, [3]byte{0x0b, 0x00, 0x00}}, + {1, [3]byte{0x0c, 0x00, 0x00}}, {1, [3]byte{0x0d, 0x00, 0x00}}, + {1, [3]byte{0x0e, 0x00, 0x00}}, {1, [3]byte{0x0f, 0x00, 0x00}}, + {1, [3]byte{0x10, 0x00, 0x00}}, {1, [3]byte{0x11, 0x00, 0x00}}, + {1, [3]byte{0x12, 0x00, 0x00}}, {1, [3]byte{0x13, 0x00, 0x00}}, + {1, [3]byte{0x14, 0x00, 0x00}}, {1, [3]byte{0x15, 0x00, 0x00}}, + {1, [3]byte{0x16, 0x00, 0x00}}, {1, [3]byte{0x17, 0x00, 0x00}}, + {1, [3]byte{0x18, 0x00, 0x00}}, {1, [3]byte{0x19, 0x00, 0x00}}, + {1, [3]byte{0x1a, 0x00, 0x00}}, {1, [3]byte{0x1b, 0x00, 0x00}}, + {1, [3]byte{0x1c, 0x00, 0x00}}, {1, [3]byte{0x1d, 0x00, 0x00}}, + {1, [3]byte{0x1e, 0x00, 0x00}}, {1, [3]byte{0x1f, 0x00, 0x00}}, + {1, [3]byte{0x20, 0x00, 0x00}}, {1, [3]byte{0x21, 0x00, 0x00}}, + {1, [3]byte{0x22, 0x00, 0x00}}, {1, [3]byte{0x23, 0x00, 0x00}}, + {1, [3]byte{0x24, 0x00, 0x00}}, {1, [3]byte{0x25, 0x00, 0x00}}, + {1, [3]byte{0x26, 0x00, 0x00}}, {1, [3]byte{0x27, 0x00, 0x00}}, + {1, [3]byte{0x28, 0x00, 0x00}}, {1, [3]byte{0x29, 0x00, 0x00}}, + {1, [3]byte{0x2a, 0x00, 0x00}}, {1, [3]byte{0x2b, 0x00, 0x00}}, + {1, [3]byte{0x2c, 0x00, 0x00}}, {1, [3]byte{0x2d, 0x00, 0x00}}, + {1, [3]byte{0x2e, 0x00, 0x00}}, {1, [3]byte{0x2f, 0x00, 0x00}}, + {1, [3]byte{0x30, 0x00, 0x00}}, {1, [3]byte{0x31, 0x00, 0x00}}, + {1, [3]byte{0x32, 0x00, 0x00}}, {1, [3]byte{0x33, 0x00, 0x00}}, + {1, [3]byte{0x34, 0x00, 0x00}}, {1, [3]byte{0x35, 0x00, 0x00}}, + {1, [3]byte{0x36, 0x00, 0x00}}, {1, [3]byte{0x37, 0x00, 0x00}}, + {1, [3]byte{0x38, 0x00, 0x00}}, {1, [3]byte{0x39, 0x00, 0x00}}, + {1, [3]byte{0x3a, 0x00, 0x00}}, {1, [3]byte{0x3b, 0x00, 0x00}}, + {1, [3]byte{0x3c, 0x00, 0x00}}, {1, [3]byte{0x3d, 0x00, 0x00}}, + {1, [3]byte{0x3e, 0x00, 0x00}}, {1, [3]byte{0x3f, 0x00, 0x00}}, + {1, [3]byte{0x40, 0x00, 0x00}}, {1, [3]byte{0x41, 0x00, 0x00}}, + {1, [3]byte{0x42, 0x00, 0x00}}, {1, [3]byte{0x43, 0x00, 0x00}}, + {1, [3]byte{0x44, 0x00, 0x00}}, {1, [3]byte{0x45, 0x00, 0x00}}, + {1, [3]byte{0x46, 0x00, 0x00}}, {1, [3]byte{0x47, 0x00, 0x00}}, + {1, [3]byte{0x48, 0x00, 0x00}}, {1, [3]byte{0x49, 0x00, 0x00}}, + {1, [3]byte{0x4a, 0x00, 0x00}}, {1, [3]byte{0x4b, 0x00, 0x00}}, + {1, [3]byte{0x4c, 0x00, 0x00}}, {1, [3]byte{0x4d, 0x00, 0x00}}, + {1, [3]byte{0x4e, 0x00, 0x00}}, {1, [3]byte{0x4f, 0x00, 0x00}}, + {1, [3]byte{0x50, 0x00, 0x00}}, {1, [3]byte{0x51, 0x00, 0x00}}, + {1, [3]byte{0x52, 0x00, 0x00}}, {1, [3]byte{0x53, 0x00, 0x00}}, + {1, [3]byte{0x54, 0x00, 0x00}}, {1, [3]byte{0x55, 0x00, 0x00}}, + {1, [3]byte{0x56, 0x00, 0x00}}, {1, [3]byte{0x57, 0x00, 0x00}}, + {1, [3]byte{0x58, 0x00, 0x00}}, {1, [3]byte{0x59, 0x00, 0x00}}, + {1, [3]byte{0x5a, 0x00, 0x00}}, {1, [3]byte{0x5b, 0x00, 0x00}}, + {1, [3]byte{0x5c, 0x00, 0x00}}, {1, [3]byte{0x5d, 0x00, 0x00}}, + {1, [3]byte{0x5e, 0x00, 0x00}}, {1, [3]byte{0x5f, 0x00, 0x00}}, + {1, [3]byte{0x60, 0x00, 0x00}}, {1, [3]byte{0x61, 0x00, 0x00}}, + {1, [3]byte{0x62, 0x00, 0x00}}, {1, [3]byte{0x63, 0x00, 0x00}}, + {1, [3]byte{0x64, 0x00, 0x00}}, {1, [3]byte{0x65, 0x00, 0x00}}, + {1, [3]byte{0x66, 0x00, 0x00}}, {1, [3]byte{0x67, 0x00, 0x00}}, + {1, [3]byte{0x68, 0x00, 0x00}}, {1, [3]byte{0x69, 0x00, 0x00}}, + {1, [3]byte{0x6a, 0x00, 0x00}}, {1, [3]byte{0x6b, 0x00, 0x00}}, + {1, [3]byte{0x6c, 0x00, 0x00}}, {1, [3]byte{0x6d, 0x00, 0x00}}, + {1, [3]byte{0x6e, 0x00, 0x00}}, {1, [3]byte{0x6f, 0x00, 0x00}}, + {1, [3]byte{0x70, 0x00, 0x00}}, {1, [3]byte{0x71, 0x00, 0x00}}, + {1, [3]byte{0x72, 0x00, 0x00}}, {1, [3]byte{0x73, 0x00, 0x00}}, + {1, [3]byte{0x74, 0x00, 0x00}}, {1, [3]byte{0x75, 0x00, 0x00}}, + {1, [3]byte{0x76, 0x00, 0x00}}, {1, [3]byte{0x77, 0x00, 0x00}}, + {1, [3]byte{0x78, 0x00, 0x00}}, {1, [3]byte{0x79, 0x00, 0x00}}, + {1, [3]byte{0x7a, 0x00, 0x00}}, {1, [3]byte{0x7b, 0x00, 0x00}}, + {1, [3]byte{0x7c, 0x00, 0x00}}, {1, [3]byte{0x7d, 0x00, 0x00}}, + {1, [3]byte{0x7e, 0x00, 0x00}}, {1, [3]byte{0x7f, 0x00, 0x00}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {3, [3]byte{0xef, 0xbf, 0xbd}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {3, [3]byte{0xef, 0xbf, 0xbd}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {3, [3]byte{0xef, 0xbf, 0xbd}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {3, [3]byte{0xef, 0xbf, 0xbd}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {3, [3]byte{0xef, 0xbf, 0xbd}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {3, [3]byte{0xef, 0xbf, 0xbd}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {3, [3]byte{0xef, 0xbf, 0xbd}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {3, [3]byte{0xef, 0xbf, 0xbd}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {3, [3]byte{0xef, 0xbf, 0xbd}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {3, [3]byte{0xef, 0xbf, 0xbd}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {3, [3]byte{0xef, 0xbf, 0xbd}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {3, [3]byte{0xef, 0xbf, 0xbd}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {3, [3]byte{0xef, 0xbf, 0xbd}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {3, [3]byte{0xef, 0xbf, 0xbd}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {3, [3]byte{0xef, 0xbf, 0xbd}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {3, [3]byte{0xef, 0xbf, 0xbd}}, + {2, [3]byte{0xc2, 0xa0, 0x00}}, {2, [3]byte{0xc4, 0x84, 0x00}}, + {2, [3]byte{0xc4, 0xb8, 0x00}}, {2, [3]byte{0xc5, 0x96, 0x00}}, + {2, [3]byte{0xc2, 0xa4, 0x00}}, {2, [3]byte{0xc4, 0xa8, 0x00}}, + {2, [3]byte{0xc4, 0xbb, 0x00}}, {2, [3]byte{0xc2, 0xa7, 0x00}}, + {2, [3]byte{0xc2, 0xa8, 0x00}}, {2, [3]byte{0xc5, 0xa0, 0x00}}, + {2, [3]byte{0xc4, 0x92, 0x00}}, {2, [3]byte{0xc4, 0xa2, 0x00}}, + {2, [3]byte{0xc5, 0xa6, 0x00}}, {2, [3]byte{0xc2, 0xad, 0x00}}, + {2, [3]byte{0xc5, 0xbd, 0x00}}, {2, [3]byte{0xc2, 0xaf, 0x00}}, + {2, [3]byte{0xc2, 0xb0, 0x00}}, {2, [3]byte{0xc4, 0x85, 0x00}}, + {2, [3]byte{0xcb, 0x9b, 0x00}}, {2, [3]byte{0xc5, 0x97, 0x00}}, + {2, [3]byte{0xc2, 0xb4, 0x00}}, {2, [3]byte{0xc4, 0xa9, 0x00}}, + {2, [3]byte{0xc4, 0xbc, 0x00}}, {2, [3]byte{0xcb, 0x87, 0x00}}, + {2, [3]byte{0xc2, 0xb8, 0x00}}, {2, [3]byte{0xc5, 0xa1, 0x00}}, + {2, [3]byte{0xc4, 0x93, 0x00}}, {2, [3]byte{0xc4, 0xa3, 0x00}}, + {2, [3]byte{0xc5, 0xa7, 0x00}}, {2, [3]byte{0xc5, 0x8a, 0x00}}, + {2, [3]byte{0xc5, 0xbe, 0x00}}, {2, [3]byte{0xc5, 0x8b, 0x00}}, + {2, [3]byte{0xc4, 0x80, 0x00}}, {2, [3]byte{0xc3, 0x81, 0x00}}, + {2, [3]byte{0xc3, 0x82, 0x00}}, {2, [3]byte{0xc3, 0x83, 0x00}}, + {2, [3]byte{0xc3, 0x84, 0x00}}, {2, [3]byte{0xc3, 0x85, 0x00}}, + {2, [3]byte{0xc3, 0x86, 0x00}}, {2, [3]byte{0xc4, 0xae, 0x00}}, + {2, [3]byte{0xc4, 0x8c, 0x00}}, {2, [3]byte{0xc3, 0x89, 0x00}}, + {2, [3]byte{0xc4, 0x98, 0x00}}, {2, [3]byte{0xc3, 0x8b, 0x00}}, + {2, [3]byte{0xc4, 0x96, 0x00}}, {2, [3]byte{0xc3, 0x8d, 0x00}}, + {2, [3]byte{0xc3, 0x8e, 0x00}}, {2, [3]byte{0xc4, 0xaa, 0x00}}, + {2, [3]byte{0xc4, 0x90, 0x00}}, {2, [3]byte{0xc5, 0x85, 0x00}}, + {2, [3]byte{0xc5, 0x8c, 0x00}}, {2, [3]byte{0xc4, 0xb6, 0x00}}, + {2, [3]byte{0xc3, 0x94, 0x00}}, {2, [3]byte{0xc3, 0x95, 0x00}}, + {2, [3]byte{0xc3, 0x96, 0x00}}, {2, [3]byte{0xc3, 0x97, 0x00}}, + {2, [3]byte{0xc3, 0x98, 0x00}}, {2, [3]byte{0xc5, 0xb2, 0x00}}, + {2, [3]byte{0xc3, 0x9a, 0x00}}, {2, [3]byte{0xc3, 0x9b, 0x00}}, + {2, [3]byte{0xc3, 0x9c, 0x00}}, {2, [3]byte{0xc5, 0xa8, 0x00}}, + {2, [3]byte{0xc5, 0xaa, 0x00}}, {2, [3]byte{0xc3, 0x9f, 0x00}}, + {2, [3]byte{0xc4, 0x81, 0x00}}, {2, [3]byte{0xc3, 0xa1, 0x00}}, + {2, [3]byte{0xc3, 0xa2, 0x00}}, {2, [3]byte{0xc3, 0xa3, 0x00}}, + {2, [3]byte{0xc3, 0xa4, 0x00}}, {2, [3]byte{0xc3, 0xa5, 0x00}}, + {2, [3]byte{0xc3, 0xa6, 0x00}}, {2, [3]byte{0xc4, 0xaf, 0x00}}, + {2, [3]byte{0xc4, 0x8d, 0x00}}, {2, [3]byte{0xc3, 0xa9, 0x00}}, + {2, [3]byte{0xc4, 0x99, 0x00}}, {2, [3]byte{0xc3, 0xab, 0x00}}, + {2, [3]byte{0xc4, 0x97, 0x00}}, {2, [3]byte{0xc3, 0xad, 0x00}}, + {2, [3]byte{0xc3, 0xae, 0x00}}, {2, [3]byte{0xc4, 0xab, 0x00}}, + {2, [3]byte{0xc4, 0x91, 0x00}}, {2, [3]byte{0xc5, 0x86, 0x00}}, + {2, [3]byte{0xc5, 0x8d, 0x00}}, {2, [3]byte{0xc4, 0xb7, 0x00}}, + {2, [3]byte{0xc3, 0xb4, 0x00}}, {2, [3]byte{0xc3, 0xb5, 0x00}}, + {2, [3]byte{0xc3, 0xb6, 0x00}}, {2, [3]byte{0xc3, 0xb7, 0x00}}, + {2, [3]byte{0xc3, 0xb8, 0x00}}, {2, [3]byte{0xc5, 0xb3, 0x00}}, + {2, [3]byte{0xc3, 0xba, 0x00}}, {2, [3]byte{0xc3, 0xbb, 0x00}}, + {2, [3]byte{0xc3, 0xbc, 0x00}}, {2, [3]byte{0xc5, 0xa9, 0x00}}, + {2, [3]byte{0xc5, 0xab, 0x00}}, {2, [3]byte{0xcb, 0x99, 0x00}}, + }, + encode: [256]uint32{ + 0x00000000, 0x01000001, 0x02000002, 0x03000003, 0x04000004, 0x05000005, 0x06000006, 0x07000007, + 0x08000008, 0x09000009, 0x0a00000a, 0x0b00000b, 0x0c00000c, 0x0d00000d, 0x0e00000e, 0x0f00000f, + 0x10000010, 0x11000011, 0x12000012, 0x13000013, 0x14000014, 0x15000015, 0x16000016, 0x17000017, + 0x18000018, 0x19000019, 0x1a00001a, 0x1b00001b, 0x1c00001c, 0x1d00001d, 0x1e00001e, 0x1f00001f, + 0x20000020, 0x21000021, 0x22000022, 0x23000023, 0x24000024, 0x25000025, 0x26000026, 0x27000027, + 0x28000028, 0x29000029, 0x2a00002a, 0x2b00002b, 0x2c00002c, 0x2d00002d, 0x2e00002e, 0x2f00002f, + 0x30000030, 0x31000031, 0x32000032, 0x33000033, 0x34000034, 0x35000035, 0x36000036, 0x37000037, + 0x38000038, 0x39000039, 0x3a00003a, 0x3b00003b, 0x3c00003c, 0x3d00003d, 0x3e00003e, 0x3f00003f, + 0x40000040, 0x41000041, 0x42000042, 0x43000043, 0x44000044, 0x45000045, 0x46000046, 0x47000047, + 0x48000048, 0x49000049, 0x4a00004a, 0x4b00004b, 0x4c00004c, 0x4d00004d, 0x4e00004e, 0x4f00004f, + 0x50000050, 0x51000051, 0x52000052, 0x53000053, 0x54000054, 0x55000055, 0x56000056, 0x57000057, + 0x58000058, 0x59000059, 0x5a00005a, 0x5b00005b, 0x5c00005c, 0x5d00005d, 0x5e00005e, 0x5f00005f, + 0x60000060, 0x61000061, 0x62000062, 0x63000063, 0x64000064, 0x65000065, 0x66000066, 0x67000067, + 0x68000068, 0x69000069, 0x6a00006a, 0x6b00006b, 0x6c00006c, 0x6d00006d, 0x6e00006e, 0x6f00006f, + 0x70000070, 0x71000071, 0x72000072, 0x73000073, 0x74000074, 0x75000075, 0x76000076, 0x77000077, + 0x78000078, 0x79000079, 0x7a00007a, 0x7b00007b, 0x7c00007c, 0x7d00007d, 0x7e00007e, 0x7f00007f, + 0xa00000a0, 0xa40000a4, 0xa70000a7, 0xa80000a8, 0xad0000ad, 0xaf0000af, 0xb00000b0, 0xb40000b4, + 0xb80000b8, 0xc10000c1, 0xc20000c2, 0xc30000c3, 0xc40000c4, 0xc50000c5, 0xc60000c6, 0xc90000c9, + 0xcb0000cb, 0xcd0000cd, 0xce0000ce, 0xd40000d4, 0xd50000d5, 0xd60000d6, 0xd70000d7, 0xd80000d8, + 0xda0000da, 0xdb0000db, 0xdc0000dc, 0xdf0000df, 0xe10000e1, 0xe20000e2, 0xe30000e3, 0xe40000e4, + 0xe50000e5, 0xe60000e6, 0xe90000e9, 0xeb0000eb, 0xed0000ed, 0xee0000ee, 0xf40000f4, 0xf50000f5, + 0xf60000f6, 0xf70000f7, 0xf80000f8, 0xfa0000fa, 0xfb0000fb, 0xfc0000fc, 0xc0000100, 0xe0000101, + 0xa1000104, 0xb1000105, 0xc800010c, 0xe800010d, 0xd0000110, 0xf0000111, 0xaa000112, 0xba000113, + 0xcc000116, 0xec000117, 0xca000118, 0xea000119, 0xab000122, 0xbb000123, 0xa5000128, 0xb5000129, + 0xcf00012a, 0xef00012b, 0xc700012e, 0xe700012f, 0xd3000136, 0xf3000137, 0xa2000138, 0xa600013b, + 0xb600013c, 0xd1000145, 0xf1000146, 0xbd00014a, 0xbf00014b, 0xd200014c, 0xf200014d, 0xa3000156, + 0xb3000157, 0xa9000160, 0xb9000161, 0xac000166, 0xbc000167, 0xdd000168, 0xfd000169, 0xde00016a, + 0xfe00016b, 0xd9000172, 0xf9000173, 0xae00017d, 0xbe00017e, 0xb70002c7, 0xff0002d9, 0xb20002db, + 0xb20002db, 0xb20002db, 0xb20002db, 0xb20002db, 0xb20002db, 0xb20002db, 0xb20002db, 0xb20002db, + 0xb20002db, 0xb20002db, 0xb20002db, 0xb20002db, 0xb20002db, 0xb20002db, 0xb20002db, 0xb20002db, + 0xb20002db, 0xb20002db, 0xb20002db, 0xb20002db, 0xb20002db, 0xb20002db, 0xb20002db, 0xb20002db, + 0xb20002db, 0xb20002db, 0xb20002db, 0xb20002db, 0xb20002db, 0xb20002db, 0xb20002db, 0xb20002db, + }, +} + +// ISO8859_5 is the ISO 8859-5 encoding. +var ISO8859_5 *Charmap = &iso8859_5 + +var iso8859_5 = Charmap{ + name: "ISO 8859-5", + mib: identifier.ISOLatinCyrillic, + asciiSuperset: true, + low: 0x80, + replacement: 0x1a, + decode: [256]utf8Enc{ + {1, [3]byte{0x00, 0x00, 0x00}}, {1, [3]byte{0x01, 0x00, 0x00}}, + {1, [3]byte{0x02, 0x00, 0x00}}, {1, [3]byte{0x03, 0x00, 0x00}}, + {1, [3]byte{0x04, 0x00, 0x00}}, {1, [3]byte{0x05, 0x00, 0x00}}, + {1, [3]byte{0x06, 0x00, 0x00}}, {1, [3]byte{0x07, 0x00, 0x00}}, + {1, [3]byte{0x08, 0x00, 0x00}}, {1, [3]byte{0x09, 0x00, 0x00}}, + {1, [3]byte{0x0a, 0x00, 0x00}}, {1, [3]byte{0x0b, 0x00, 0x00}}, + {1, [3]byte{0x0c, 0x00, 0x00}}, {1, [3]byte{0x0d, 0x00, 0x00}}, + {1, [3]byte{0x0e, 0x00, 0x00}}, {1, [3]byte{0x0f, 0x00, 0x00}}, + {1, [3]byte{0x10, 0x00, 0x00}}, {1, [3]byte{0x11, 0x00, 0x00}}, + {1, [3]byte{0x12, 0x00, 0x00}}, {1, [3]byte{0x13, 0x00, 0x00}}, + {1, [3]byte{0x14, 0x00, 0x00}}, {1, [3]byte{0x15, 0x00, 0x00}}, + {1, [3]byte{0x16, 0x00, 0x00}}, {1, [3]byte{0x17, 0x00, 0x00}}, + {1, [3]byte{0x18, 0x00, 0x00}}, {1, [3]byte{0x19, 0x00, 0x00}}, + {1, [3]byte{0x1a, 0x00, 0x00}}, {1, [3]byte{0x1b, 0x00, 0x00}}, + {1, [3]byte{0x1c, 0x00, 0x00}}, {1, [3]byte{0x1d, 0x00, 0x00}}, + {1, [3]byte{0x1e, 0x00, 0x00}}, {1, [3]byte{0x1f, 0x00, 0x00}}, + {1, [3]byte{0x20, 0x00, 0x00}}, {1, [3]byte{0x21, 0x00, 0x00}}, + {1, [3]byte{0x22, 0x00, 0x00}}, {1, [3]byte{0x23, 0x00, 0x00}}, + {1, [3]byte{0x24, 0x00, 0x00}}, {1, [3]byte{0x25, 0x00, 0x00}}, + {1, [3]byte{0x26, 0x00, 0x00}}, {1, [3]byte{0x27, 0x00, 0x00}}, + {1, [3]byte{0x28, 0x00, 0x00}}, {1, [3]byte{0x29, 0x00, 0x00}}, + {1, [3]byte{0x2a, 0x00, 0x00}}, {1, [3]byte{0x2b, 0x00, 0x00}}, + {1, [3]byte{0x2c, 0x00, 0x00}}, {1, [3]byte{0x2d, 0x00, 0x00}}, + {1, [3]byte{0x2e, 0x00, 0x00}}, {1, [3]byte{0x2f, 0x00, 0x00}}, + {1, [3]byte{0x30, 0x00, 0x00}}, {1, [3]byte{0x31, 0x00, 0x00}}, + {1, [3]byte{0x32, 0x00, 0x00}}, {1, [3]byte{0x33, 0x00, 0x00}}, + {1, [3]byte{0x34, 0x00, 0x00}}, {1, [3]byte{0x35, 0x00, 0x00}}, + {1, [3]byte{0x36, 0x00, 0x00}}, {1, [3]byte{0x37, 0x00, 0x00}}, + {1, [3]byte{0x38, 0x00, 0x00}}, {1, [3]byte{0x39, 0x00, 0x00}}, + {1, [3]byte{0x3a, 0x00, 0x00}}, {1, [3]byte{0x3b, 0x00, 0x00}}, + {1, [3]byte{0x3c, 0x00, 0x00}}, {1, [3]byte{0x3d, 0x00, 0x00}}, + {1, [3]byte{0x3e, 0x00, 0x00}}, {1, [3]byte{0x3f, 0x00, 0x00}}, + {1, [3]byte{0x40, 0x00, 0x00}}, {1, [3]byte{0x41, 0x00, 0x00}}, + {1, [3]byte{0x42, 0x00, 0x00}}, {1, [3]byte{0x43, 0x00, 0x00}}, + {1, [3]byte{0x44, 0x00, 0x00}}, {1, [3]byte{0x45, 0x00, 0x00}}, + {1, [3]byte{0x46, 0x00, 0x00}}, {1, [3]byte{0x47, 0x00, 0x00}}, + {1, [3]byte{0x48, 0x00, 0x00}}, {1, [3]byte{0x49, 0x00, 0x00}}, + {1, [3]byte{0x4a, 0x00, 0x00}}, {1, [3]byte{0x4b, 0x00, 0x00}}, + {1, [3]byte{0x4c, 0x00, 0x00}}, {1, [3]byte{0x4d, 0x00, 0x00}}, + {1, [3]byte{0x4e, 0x00, 0x00}}, {1, [3]byte{0x4f, 0x00, 0x00}}, + {1, [3]byte{0x50, 0x00, 0x00}}, {1, [3]byte{0x51, 0x00, 0x00}}, + {1, [3]byte{0x52, 0x00, 0x00}}, {1, [3]byte{0x53, 0x00, 0x00}}, + {1, [3]byte{0x54, 0x00, 0x00}}, {1, [3]byte{0x55, 0x00, 0x00}}, + {1, [3]byte{0x56, 0x00, 0x00}}, {1, [3]byte{0x57, 0x00, 0x00}}, + {1, [3]byte{0x58, 0x00, 0x00}}, {1, [3]byte{0x59, 0x00, 0x00}}, + {1, [3]byte{0x5a, 0x00, 0x00}}, {1, [3]byte{0x5b, 0x00, 0x00}}, + {1, [3]byte{0x5c, 0x00, 0x00}}, {1, [3]byte{0x5d, 0x00, 0x00}}, + {1, [3]byte{0x5e, 0x00, 0x00}}, {1, [3]byte{0x5f, 0x00, 0x00}}, + {1, [3]byte{0x60, 0x00, 0x00}}, {1, [3]byte{0x61, 0x00, 0x00}}, + {1, [3]byte{0x62, 0x00, 0x00}}, {1, [3]byte{0x63, 0x00, 0x00}}, + {1, [3]byte{0x64, 0x00, 0x00}}, {1, [3]byte{0x65, 0x00, 0x00}}, + {1, [3]byte{0x66, 0x00, 0x00}}, {1, [3]byte{0x67, 0x00, 0x00}}, + {1, [3]byte{0x68, 0x00, 0x00}}, {1, [3]byte{0x69, 0x00, 0x00}}, + {1, [3]byte{0x6a, 0x00, 0x00}}, {1, [3]byte{0x6b, 0x00, 0x00}}, + {1, [3]byte{0x6c, 0x00, 0x00}}, {1, [3]byte{0x6d, 0x00, 0x00}}, + {1, [3]byte{0x6e, 0x00, 0x00}}, {1, [3]byte{0x6f, 0x00, 0x00}}, + {1, [3]byte{0x70, 0x00, 0x00}}, {1, [3]byte{0x71, 0x00, 0x00}}, + {1, [3]byte{0x72, 0x00, 0x00}}, {1, [3]byte{0x73, 0x00, 0x00}}, + {1, [3]byte{0x74, 0x00, 0x00}}, {1, [3]byte{0x75, 0x00, 0x00}}, + {1, [3]byte{0x76, 0x00, 0x00}}, {1, [3]byte{0x77, 0x00, 0x00}}, + {1, [3]byte{0x78, 0x00, 0x00}}, {1, [3]byte{0x79, 0x00, 0x00}}, + {1, [3]byte{0x7a, 0x00, 0x00}}, {1, [3]byte{0x7b, 0x00, 0x00}}, + {1, [3]byte{0x7c, 0x00, 0x00}}, {1, [3]byte{0x7d, 0x00, 0x00}}, + {1, [3]byte{0x7e, 0x00, 0x00}}, {1, [3]byte{0x7f, 0x00, 0x00}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {3, [3]byte{0xef, 0xbf, 0xbd}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {3, [3]byte{0xef, 0xbf, 0xbd}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {3, [3]byte{0xef, 0xbf, 0xbd}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {3, [3]byte{0xef, 0xbf, 0xbd}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {3, [3]byte{0xef, 0xbf, 0xbd}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {3, [3]byte{0xef, 0xbf, 0xbd}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {3, [3]byte{0xef, 0xbf, 0xbd}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {3, [3]byte{0xef, 0xbf, 0xbd}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {3, [3]byte{0xef, 0xbf, 0xbd}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {3, [3]byte{0xef, 0xbf, 0xbd}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {3, [3]byte{0xef, 0xbf, 0xbd}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {3, [3]byte{0xef, 0xbf, 0xbd}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {3, [3]byte{0xef, 0xbf, 0xbd}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {3, [3]byte{0xef, 0xbf, 0xbd}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {3, [3]byte{0xef, 0xbf, 0xbd}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {3, [3]byte{0xef, 0xbf, 0xbd}}, + {2, [3]byte{0xc2, 0xa0, 0x00}}, {2, [3]byte{0xd0, 0x81, 0x00}}, + {2, [3]byte{0xd0, 0x82, 0x00}}, {2, [3]byte{0xd0, 0x83, 0x00}}, + {2, [3]byte{0xd0, 0x84, 0x00}}, {2, [3]byte{0xd0, 0x85, 0x00}}, + {2, [3]byte{0xd0, 0x86, 0x00}}, {2, [3]byte{0xd0, 0x87, 0x00}}, + {2, [3]byte{0xd0, 0x88, 0x00}}, {2, [3]byte{0xd0, 0x89, 0x00}}, + {2, [3]byte{0xd0, 0x8a, 0x00}}, {2, [3]byte{0xd0, 0x8b, 0x00}}, + {2, [3]byte{0xd0, 0x8c, 0x00}}, {2, [3]byte{0xc2, 0xad, 0x00}}, + {2, [3]byte{0xd0, 0x8e, 0x00}}, {2, [3]byte{0xd0, 0x8f, 0x00}}, + {2, [3]byte{0xd0, 0x90, 0x00}}, {2, [3]byte{0xd0, 0x91, 0x00}}, + {2, [3]byte{0xd0, 0x92, 0x00}}, {2, [3]byte{0xd0, 0x93, 0x00}}, + {2, [3]byte{0xd0, 0x94, 0x00}}, {2, [3]byte{0xd0, 0x95, 0x00}}, + {2, [3]byte{0xd0, 0x96, 0x00}}, {2, [3]byte{0xd0, 0x97, 0x00}}, + {2, [3]byte{0xd0, 0x98, 0x00}}, {2, [3]byte{0xd0, 0x99, 0x00}}, + {2, [3]byte{0xd0, 0x9a, 0x00}}, {2, [3]byte{0xd0, 0x9b, 0x00}}, + {2, [3]byte{0xd0, 0x9c, 0x00}}, {2, [3]byte{0xd0, 0x9d, 0x00}}, + {2, [3]byte{0xd0, 0x9e, 0x00}}, {2, [3]byte{0xd0, 0x9f, 0x00}}, + {2, [3]byte{0xd0, 0xa0, 0x00}}, {2, [3]byte{0xd0, 0xa1, 0x00}}, + {2, [3]byte{0xd0, 0xa2, 0x00}}, {2, [3]byte{0xd0, 0xa3, 0x00}}, + {2, [3]byte{0xd0, 0xa4, 0x00}}, {2, [3]byte{0xd0, 0xa5, 0x00}}, + {2, [3]byte{0xd0, 0xa6, 0x00}}, {2, [3]byte{0xd0, 0xa7, 0x00}}, + {2, [3]byte{0xd0, 0xa8, 0x00}}, {2, [3]byte{0xd0, 0xa9, 0x00}}, + {2, [3]byte{0xd0, 0xaa, 0x00}}, {2, [3]byte{0xd0, 0xab, 0x00}}, + {2, [3]byte{0xd0, 0xac, 0x00}}, {2, [3]byte{0xd0, 0xad, 0x00}}, + {2, [3]byte{0xd0, 0xae, 0x00}}, {2, [3]byte{0xd0, 0xaf, 0x00}}, + {2, [3]byte{0xd0, 0xb0, 0x00}}, {2, [3]byte{0xd0, 0xb1, 0x00}}, + {2, [3]byte{0xd0, 0xb2, 0x00}}, {2, [3]byte{0xd0, 0xb3, 0x00}}, + {2, [3]byte{0xd0, 0xb4, 0x00}}, {2, [3]byte{0xd0, 0xb5, 0x00}}, + {2, [3]byte{0xd0, 0xb6, 0x00}}, {2, [3]byte{0xd0, 0xb7, 0x00}}, + {2, [3]byte{0xd0, 0xb8, 0x00}}, {2, [3]byte{0xd0, 0xb9, 0x00}}, + {2, [3]byte{0xd0, 0xba, 0x00}}, {2, [3]byte{0xd0, 0xbb, 0x00}}, + {2, [3]byte{0xd0, 0xbc, 0x00}}, {2, [3]byte{0xd0, 0xbd, 0x00}}, + {2, [3]byte{0xd0, 0xbe, 0x00}}, {2, [3]byte{0xd0, 0xbf, 0x00}}, + {2, [3]byte{0xd1, 0x80, 0x00}}, {2, [3]byte{0xd1, 0x81, 0x00}}, + {2, [3]byte{0xd1, 0x82, 0x00}}, {2, [3]byte{0xd1, 0x83, 0x00}}, + {2, [3]byte{0xd1, 0x84, 0x00}}, {2, [3]byte{0xd1, 0x85, 0x00}}, + {2, [3]byte{0xd1, 0x86, 0x00}}, {2, [3]byte{0xd1, 0x87, 0x00}}, + {2, [3]byte{0xd1, 0x88, 0x00}}, {2, [3]byte{0xd1, 0x89, 0x00}}, + {2, [3]byte{0xd1, 0x8a, 0x00}}, {2, [3]byte{0xd1, 0x8b, 0x00}}, + {2, [3]byte{0xd1, 0x8c, 0x00}}, {2, [3]byte{0xd1, 0x8d, 0x00}}, + {2, [3]byte{0xd1, 0x8e, 0x00}}, {2, [3]byte{0xd1, 0x8f, 0x00}}, + {3, [3]byte{0xe2, 0x84, 0x96}}, {2, [3]byte{0xd1, 0x91, 0x00}}, + {2, [3]byte{0xd1, 0x92, 0x00}}, {2, [3]byte{0xd1, 0x93, 0x00}}, + {2, [3]byte{0xd1, 0x94, 0x00}}, {2, [3]byte{0xd1, 0x95, 0x00}}, + {2, [3]byte{0xd1, 0x96, 0x00}}, {2, [3]byte{0xd1, 0x97, 0x00}}, + {2, [3]byte{0xd1, 0x98, 0x00}}, {2, [3]byte{0xd1, 0x99, 0x00}}, + {2, [3]byte{0xd1, 0x9a, 0x00}}, {2, [3]byte{0xd1, 0x9b, 0x00}}, + {2, [3]byte{0xd1, 0x9c, 0x00}}, {2, [3]byte{0xc2, 0xa7, 0x00}}, + {2, [3]byte{0xd1, 0x9e, 0x00}}, {2, [3]byte{0xd1, 0x9f, 0x00}}, + }, + encode: [256]uint32{ + 0x00000000, 0x01000001, 0x02000002, 0x03000003, 0x04000004, 0x05000005, 0x06000006, 0x07000007, + 0x08000008, 0x09000009, 0x0a00000a, 0x0b00000b, 0x0c00000c, 0x0d00000d, 0x0e00000e, 0x0f00000f, + 0x10000010, 0x11000011, 0x12000012, 0x13000013, 0x14000014, 0x15000015, 0x16000016, 0x17000017, + 0x18000018, 0x19000019, 0x1a00001a, 0x1b00001b, 0x1c00001c, 0x1d00001d, 0x1e00001e, 0x1f00001f, + 0x20000020, 0x21000021, 0x22000022, 0x23000023, 0x24000024, 0x25000025, 0x26000026, 0x27000027, + 0x28000028, 0x29000029, 0x2a00002a, 0x2b00002b, 0x2c00002c, 0x2d00002d, 0x2e00002e, 0x2f00002f, + 0x30000030, 0x31000031, 0x32000032, 0x33000033, 0x34000034, 0x35000035, 0x36000036, 0x37000037, + 0x38000038, 0x39000039, 0x3a00003a, 0x3b00003b, 0x3c00003c, 0x3d00003d, 0x3e00003e, 0x3f00003f, + 0x40000040, 0x41000041, 0x42000042, 0x43000043, 0x44000044, 0x45000045, 0x46000046, 0x47000047, + 0x48000048, 0x49000049, 0x4a00004a, 0x4b00004b, 0x4c00004c, 0x4d00004d, 0x4e00004e, 0x4f00004f, + 0x50000050, 0x51000051, 0x52000052, 0x53000053, 0x54000054, 0x55000055, 0x56000056, 0x57000057, + 0x58000058, 0x59000059, 0x5a00005a, 0x5b00005b, 0x5c00005c, 0x5d00005d, 0x5e00005e, 0x5f00005f, + 0x60000060, 0x61000061, 0x62000062, 0x63000063, 0x64000064, 0x65000065, 0x66000066, 0x67000067, + 0x68000068, 0x69000069, 0x6a00006a, 0x6b00006b, 0x6c00006c, 0x6d00006d, 0x6e00006e, 0x6f00006f, + 0x70000070, 0x71000071, 0x72000072, 0x73000073, 0x74000074, 0x75000075, 0x76000076, 0x77000077, + 0x78000078, 0x79000079, 0x7a00007a, 0x7b00007b, 0x7c00007c, 0x7d00007d, 0x7e00007e, 0x7f00007f, + 0xa00000a0, 0xfd0000a7, 0xad0000ad, 0xa1000401, 0xa2000402, 0xa3000403, 0xa4000404, 0xa5000405, + 0xa6000406, 0xa7000407, 0xa8000408, 0xa9000409, 0xaa00040a, 0xab00040b, 0xac00040c, 0xae00040e, + 0xaf00040f, 0xb0000410, 0xb1000411, 0xb2000412, 0xb3000413, 0xb4000414, 0xb5000415, 0xb6000416, + 0xb7000417, 0xb8000418, 0xb9000419, 0xba00041a, 0xbb00041b, 0xbc00041c, 0xbd00041d, 0xbe00041e, + 0xbf00041f, 0xc0000420, 0xc1000421, 0xc2000422, 0xc3000423, 0xc4000424, 0xc5000425, 0xc6000426, + 0xc7000427, 0xc8000428, 0xc9000429, 0xca00042a, 0xcb00042b, 0xcc00042c, 0xcd00042d, 0xce00042e, + 0xcf00042f, 0xd0000430, 0xd1000431, 0xd2000432, 0xd3000433, 0xd4000434, 0xd5000435, 0xd6000436, + 0xd7000437, 0xd8000438, 0xd9000439, 0xda00043a, 0xdb00043b, 0xdc00043c, 0xdd00043d, 0xde00043e, + 0xdf00043f, 0xe0000440, 0xe1000441, 0xe2000442, 0xe3000443, 0xe4000444, 0xe5000445, 0xe6000446, + 0xe7000447, 0xe8000448, 0xe9000449, 0xea00044a, 0xeb00044b, 0xec00044c, 0xed00044d, 0xee00044e, + 0xef00044f, 0xf1000451, 0xf2000452, 0xf3000453, 0xf4000454, 0xf5000455, 0xf6000456, 0xf7000457, + 0xf8000458, 0xf9000459, 0xfa00045a, 0xfb00045b, 0xfc00045c, 0xfe00045e, 0xff00045f, 0xf0002116, + 0xf0002116, 0xf0002116, 0xf0002116, 0xf0002116, 0xf0002116, 0xf0002116, 0xf0002116, 0xf0002116, + 0xf0002116, 0xf0002116, 0xf0002116, 0xf0002116, 0xf0002116, 0xf0002116, 0xf0002116, 0xf0002116, + 0xf0002116, 0xf0002116, 0xf0002116, 0xf0002116, 0xf0002116, 0xf0002116, 0xf0002116, 0xf0002116, + 0xf0002116, 0xf0002116, 0xf0002116, 0xf0002116, 0xf0002116, 0xf0002116, 0xf0002116, 0xf0002116, + }, +} + +// ISO8859_6 is the ISO 8859-6 encoding. +var ISO8859_6 *Charmap = &iso8859_6 + +var iso8859_6 = Charmap{ + name: "ISO 8859-6", + mib: identifier.ISOLatinArabic, + asciiSuperset: true, + low: 0x80, + replacement: 0x1a, + decode: [256]utf8Enc{ + {1, [3]byte{0x00, 0x00, 0x00}}, {1, [3]byte{0x01, 0x00, 0x00}}, + {1, [3]byte{0x02, 0x00, 0x00}}, {1, [3]byte{0x03, 0x00, 0x00}}, + {1, [3]byte{0x04, 0x00, 0x00}}, {1, [3]byte{0x05, 0x00, 0x00}}, + {1, [3]byte{0x06, 0x00, 0x00}}, {1, [3]byte{0x07, 0x00, 0x00}}, + {1, [3]byte{0x08, 0x00, 0x00}}, {1, [3]byte{0x09, 0x00, 0x00}}, + {1, [3]byte{0x0a, 0x00, 0x00}}, {1, [3]byte{0x0b, 0x00, 0x00}}, + {1, [3]byte{0x0c, 0x00, 0x00}}, {1, [3]byte{0x0d, 0x00, 0x00}}, + {1, [3]byte{0x0e, 0x00, 0x00}}, {1, [3]byte{0x0f, 0x00, 0x00}}, + {1, [3]byte{0x10, 0x00, 0x00}}, {1, [3]byte{0x11, 0x00, 0x00}}, + {1, [3]byte{0x12, 0x00, 0x00}}, {1, [3]byte{0x13, 0x00, 0x00}}, + {1, [3]byte{0x14, 0x00, 0x00}}, {1, [3]byte{0x15, 0x00, 0x00}}, + {1, [3]byte{0x16, 0x00, 0x00}}, {1, [3]byte{0x17, 0x00, 0x00}}, + {1, [3]byte{0x18, 0x00, 0x00}}, {1, [3]byte{0x19, 0x00, 0x00}}, + {1, [3]byte{0x1a, 0x00, 0x00}}, {1, [3]byte{0x1b, 0x00, 0x00}}, + {1, [3]byte{0x1c, 0x00, 0x00}}, {1, [3]byte{0x1d, 0x00, 0x00}}, + {1, [3]byte{0x1e, 0x00, 0x00}}, {1, [3]byte{0x1f, 0x00, 0x00}}, + {1, [3]byte{0x20, 0x00, 0x00}}, {1, [3]byte{0x21, 0x00, 0x00}}, + {1, [3]byte{0x22, 0x00, 0x00}}, {1, [3]byte{0x23, 0x00, 0x00}}, + {1, [3]byte{0x24, 0x00, 0x00}}, {1, [3]byte{0x25, 0x00, 0x00}}, + {1, [3]byte{0x26, 0x00, 0x00}}, {1, [3]byte{0x27, 0x00, 0x00}}, + {1, [3]byte{0x28, 0x00, 0x00}}, {1, [3]byte{0x29, 0x00, 0x00}}, + {1, [3]byte{0x2a, 0x00, 0x00}}, {1, [3]byte{0x2b, 0x00, 0x00}}, + {1, [3]byte{0x2c, 0x00, 0x00}}, {1, [3]byte{0x2d, 0x00, 0x00}}, + {1, [3]byte{0x2e, 0x00, 0x00}}, {1, [3]byte{0x2f, 0x00, 0x00}}, + {1, [3]byte{0x30, 0x00, 0x00}}, {1, [3]byte{0x31, 0x00, 0x00}}, + {1, [3]byte{0x32, 0x00, 0x00}}, {1, [3]byte{0x33, 0x00, 0x00}}, + {1, [3]byte{0x34, 0x00, 0x00}}, {1, [3]byte{0x35, 0x00, 0x00}}, + {1, [3]byte{0x36, 0x00, 0x00}}, {1, [3]byte{0x37, 0x00, 0x00}}, + {1, [3]byte{0x38, 0x00, 0x00}}, {1, [3]byte{0x39, 0x00, 0x00}}, + {1, [3]byte{0x3a, 0x00, 0x00}}, {1, [3]byte{0x3b, 0x00, 0x00}}, + {1, [3]byte{0x3c, 0x00, 0x00}}, {1, [3]byte{0x3d, 0x00, 0x00}}, + {1, [3]byte{0x3e, 0x00, 0x00}}, {1, [3]byte{0x3f, 0x00, 0x00}}, + {1, [3]byte{0x40, 0x00, 0x00}}, {1, [3]byte{0x41, 0x00, 0x00}}, + {1, [3]byte{0x42, 0x00, 0x00}}, {1, [3]byte{0x43, 0x00, 0x00}}, + {1, [3]byte{0x44, 0x00, 0x00}}, {1, [3]byte{0x45, 0x00, 0x00}}, + {1, [3]byte{0x46, 0x00, 0x00}}, {1, [3]byte{0x47, 0x00, 0x00}}, + {1, [3]byte{0x48, 0x00, 0x00}}, {1, [3]byte{0x49, 0x00, 0x00}}, + {1, [3]byte{0x4a, 0x00, 0x00}}, {1, [3]byte{0x4b, 0x00, 0x00}}, + {1, [3]byte{0x4c, 0x00, 0x00}}, {1, [3]byte{0x4d, 0x00, 0x00}}, + {1, [3]byte{0x4e, 0x00, 0x00}}, {1, [3]byte{0x4f, 0x00, 0x00}}, + {1, [3]byte{0x50, 0x00, 0x00}}, {1, [3]byte{0x51, 0x00, 0x00}}, + {1, [3]byte{0x52, 0x00, 0x00}}, {1, [3]byte{0x53, 0x00, 0x00}}, + {1, [3]byte{0x54, 0x00, 0x00}}, {1, [3]byte{0x55, 0x00, 0x00}}, + {1, [3]byte{0x56, 0x00, 0x00}}, {1, [3]byte{0x57, 0x00, 0x00}}, + {1, [3]byte{0x58, 0x00, 0x00}}, {1, [3]byte{0x59, 0x00, 0x00}}, + {1, [3]byte{0x5a, 0x00, 0x00}}, {1, [3]byte{0x5b, 0x00, 0x00}}, + {1, [3]byte{0x5c, 0x00, 0x00}}, {1, [3]byte{0x5d, 0x00, 0x00}}, + {1, [3]byte{0x5e, 0x00, 0x00}}, {1, [3]byte{0x5f, 0x00, 0x00}}, + {1, [3]byte{0x60, 0x00, 0x00}}, {1, [3]byte{0x61, 0x00, 0x00}}, + {1, [3]byte{0x62, 0x00, 0x00}}, {1, [3]byte{0x63, 0x00, 0x00}}, + {1, [3]byte{0x64, 0x00, 0x00}}, {1, [3]byte{0x65, 0x00, 0x00}}, + {1, [3]byte{0x66, 0x00, 0x00}}, {1, [3]byte{0x67, 0x00, 0x00}}, + {1, [3]byte{0x68, 0x00, 0x00}}, {1, [3]byte{0x69, 0x00, 0x00}}, + {1, [3]byte{0x6a, 0x00, 0x00}}, {1, [3]byte{0x6b, 0x00, 0x00}}, + {1, [3]byte{0x6c, 0x00, 0x00}}, {1, [3]byte{0x6d, 0x00, 0x00}}, + {1, [3]byte{0x6e, 0x00, 0x00}}, {1, [3]byte{0x6f, 0x00, 0x00}}, + {1, [3]byte{0x70, 0x00, 0x00}}, {1, [3]byte{0x71, 0x00, 0x00}}, + {1, [3]byte{0x72, 0x00, 0x00}}, {1, [3]byte{0x73, 0x00, 0x00}}, + {1, [3]byte{0x74, 0x00, 0x00}}, {1, [3]byte{0x75, 0x00, 0x00}}, + {1, [3]byte{0x76, 0x00, 0x00}}, {1, [3]byte{0x77, 0x00, 0x00}}, + {1, [3]byte{0x78, 0x00, 0x00}}, {1, [3]byte{0x79, 0x00, 0x00}}, + {1, [3]byte{0x7a, 0x00, 0x00}}, {1, [3]byte{0x7b, 0x00, 0x00}}, + {1, [3]byte{0x7c, 0x00, 0x00}}, {1, [3]byte{0x7d, 0x00, 0x00}}, + {1, [3]byte{0x7e, 0x00, 0x00}}, {1, [3]byte{0x7f, 0x00, 0x00}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {3, [3]byte{0xef, 0xbf, 0xbd}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {3, [3]byte{0xef, 0xbf, 0xbd}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {3, [3]byte{0xef, 0xbf, 0xbd}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {3, [3]byte{0xef, 0xbf, 0xbd}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {3, [3]byte{0xef, 0xbf, 0xbd}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {3, [3]byte{0xef, 0xbf, 0xbd}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {3, [3]byte{0xef, 0xbf, 0xbd}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {3, [3]byte{0xef, 0xbf, 0xbd}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {3, [3]byte{0xef, 0xbf, 0xbd}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {3, [3]byte{0xef, 0xbf, 0xbd}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {3, [3]byte{0xef, 0xbf, 0xbd}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {3, [3]byte{0xef, 0xbf, 0xbd}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {3, [3]byte{0xef, 0xbf, 0xbd}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {3, [3]byte{0xef, 0xbf, 0xbd}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {3, [3]byte{0xef, 0xbf, 0xbd}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {3, [3]byte{0xef, 0xbf, 0xbd}}, + {2, [3]byte{0xc2, 0xa0, 0x00}}, {3, [3]byte{0xef, 0xbf, 0xbd}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {3, [3]byte{0xef, 0xbf, 0xbd}}, + {2, [3]byte{0xc2, 0xa4, 0x00}}, {3, [3]byte{0xef, 0xbf, 0xbd}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {3, [3]byte{0xef, 0xbf, 0xbd}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {3, [3]byte{0xef, 0xbf, 0xbd}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {3, [3]byte{0xef, 0xbf, 0xbd}}, + {2, [3]byte{0xd8, 0x8c, 0x00}}, {2, [3]byte{0xc2, 0xad, 0x00}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {3, [3]byte{0xef, 0xbf, 0xbd}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {3, [3]byte{0xef, 0xbf, 0xbd}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {3, [3]byte{0xef, 0xbf, 0xbd}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {3, [3]byte{0xef, 0xbf, 0xbd}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {3, [3]byte{0xef, 0xbf, 0xbd}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {3, [3]byte{0xef, 0xbf, 0xbd}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {2, [3]byte{0xd8, 0x9b, 0x00}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {3, [3]byte{0xef, 0xbf, 0xbd}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {2, [3]byte{0xd8, 0x9f, 0x00}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {2, [3]byte{0xd8, 0xa1, 0x00}}, + {2, [3]byte{0xd8, 0xa2, 0x00}}, {2, [3]byte{0xd8, 0xa3, 0x00}}, + {2, [3]byte{0xd8, 0xa4, 0x00}}, {2, [3]byte{0xd8, 0xa5, 0x00}}, + {2, [3]byte{0xd8, 0xa6, 0x00}}, {2, [3]byte{0xd8, 0xa7, 0x00}}, + {2, [3]byte{0xd8, 0xa8, 0x00}}, {2, [3]byte{0xd8, 0xa9, 0x00}}, + {2, [3]byte{0xd8, 0xaa, 0x00}}, {2, [3]byte{0xd8, 0xab, 0x00}}, + {2, [3]byte{0xd8, 0xac, 0x00}}, {2, [3]byte{0xd8, 0xad, 0x00}}, + {2, [3]byte{0xd8, 0xae, 0x00}}, {2, [3]byte{0xd8, 0xaf, 0x00}}, + {2, [3]byte{0xd8, 0xb0, 0x00}}, {2, [3]byte{0xd8, 0xb1, 0x00}}, + {2, [3]byte{0xd8, 0xb2, 0x00}}, {2, [3]byte{0xd8, 0xb3, 0x00}}, + {2, [3]byte{0xd8, 0xb4, 0x00}}, {2, [3]byte{0xd8, 0xb5, 0x00}}, + {2, [3]byte{0xd8, 0xb6, 0x00}}, {2, [3]byte{0xd8, 0xb7, 0x00}}, + {2, [3]byte{0xd8, 0xb8, 0x00}}, {2, [3]byte{0xd8, 0xb9, 0x00}}, + {2, [3]byte{0xd8, 0xba, 0x00}}, {3, [3]byte{0xef, 0xbf, 0xbd}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {3, [3]byte{0xef, 0xbf, 0xbd}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {3, [3]byte{0xef, 0xbf, 0xbd}}, + {2, [3]byte{0xd9, 0x80, 0x00}}, {2, [3]byte{0xd9, 0x81, 0x00}}, + {2, [3]byte{0xd9, 0x82, 0x00}}, {2, [3]byte{0xd9, 0x83, 0x00}}, + {2, [3]byte{0xd9, 0x84, 0x00}}, {2, [3]byte{0xd9, 0x85, 0x00}}, + {2, [3]byte{0xd9, 0x86, 0x00}}, {2, [3]byte{0xd9, 0x87, 0x00}}, + {2, [3]byte{0xd9, 0x88, 0x00}}, {2, [3]byte{0xd9, 0x89, 0x00}}, + {2, [3]byte{0xd9, 0x8a, 0x00}}, {2, [3]byte{0xd9, 0x8b, 0x00}}, + {2, [3]byte{0xd9, 0x8c, 0x00}}, {2, [3]byte{0xd9, 0x8d, 0x00}}, + {2, [3]byte{0xd9, 0x8e, 0x00}}, {2, [3]byte{0xd9, 0x8f, 0x00}}, + {2, [3]byte{0xd9, 0x90, 0x00}}, {2, [3]byte{0xd9, 0x91, 0x00}}, + {2, [3]byte{0xd9, 0x92, 0x00}}, {3, [3]byte{0xef, 0xbf, 0xbd}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {3, [3]byte{0xef, 0xbf, 0xbd}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {3, [3]byte{0xef, 0xbf, 0xbd}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {3, [3]byte{0xef, 0xbf, 0xbd}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {3, [3]byte{0xef, 0xbf, 0xbd}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {3, [3]byte{0xef, 0xbf, 0xbd}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {3, [3]byte{0xef, 0xbf, 0xbd}}, + }, + encode: [256]uint32{ + 0x00000000, 0x01000001, 0x02000002, 0x03000003, 0x04000004, 0x05000005, 0x06000006, 0x07000007, + 0x08000008, 0x09000009, 0x0a00000a, 0x0b00000b, 0x0c00000c, 0x0d00000d, 0x0e00000e, 0x0f00000f, + 0x10000010, 0x11000011, 0x12000012, 0x13000013, 0x14000014, 0x15000015, 0x16000016, 0x17000017, + 0x18000018, 0x19000019, 0x1a00001a, 0x1b00001b, 0x1c00001c, 0x1d00001d, 0x1e00001e, 0x1f00001f, + 0x20000020, 0x21000021, 0x22000022, 0x23000023, 0x24000024, 0x25000025, 0x26000026, 0x27000027, + 0x28000028, 0x29000029, 0x2a00002a, 0x2b00002b, 0x2c00002c, 0x2d00002d, 0x2e00002e, 0x2f00002f, + 0x30000030, 0x31000031, 0x32000032, 0x33000033, 0x34000034, 0x35000035, 0x36000036, 0x37000037, + 0x38000038, 0x39000039, 0x3a00003a, 0x3b00003b, 0x3c00003c, 0x3d00003d, 0x3e00003e, 0x3f00003f, + 0x40000040, 0x41000041, 0x42000042, 0x43000043, 0x44000044, 0x45000045, 0x46000046, 0x47000047, + 0x48000048, 0x49000049, 0x4a00004a, 0x4b00004b, 0x4c00004c, 0x4d00004d, 0x4e00004e, 0x4f00004f, + 0x50000050, 0x51000051, 0x52000052, 0x53000053, 0x54000054, 0x55000055, 0x56000056, 0x57000057, + 0x58000058, 0x59000059, 0x5a00005a, 0x5b00005b, 0x5c00005c, 0x5d00005d, 0x5e00005e, 0x5f00005f, + 0x60000060, 0x61000061, 0x62000062, 0x63000063, 0x64000064, 0x65000065, 0x66000066, 0x67000067, + 0x68000068, 0x69000069, 0x6a00006a, 0x6b00006b, 0x6c00006c, 0x6d00006d, 0x6e00006e, 0x6f00006f, + 0x70000070, 0x71000071, 0x72000072, 0x73000073, 0x74000074, 0x75000075, 0x76000076, 0x77000077, + 0x78000078, 0x79000079, 0x7a00007a, 0x7b00007b, 0x7c00007c, 0x7d00007d, 0x7e00007e, 0x7f00007f, + 0xa00000a0, 0xa40000a4, 0xad0000ad, 0xac00060c, 0xbb00061b, 0xbf00061f, 0xc1000621, 0xc2000622, + 0xc3000623, 0xc4000624, 0xc5000625, 0xc6000626, 0xc7000627, 0xc8000628, 0xc9000629, 0xca00062a, + 0xcb00062b, 0xcc00062c, 0xcd00062d, 0xce00062e, 0xcf00062f, 0xd0000630, 0xd1000631, 0xd2000632, + 0xd3000633, 0xd4000634, 0xd5000635, 0xd6000636, 0xd7000637, 0xd8000638, 0xd9000639, 0xda00063a, + 0xe0000640, 0xe1000641, 0xe2000642, 0xe3000643, 0xe4000644, 0xe5000645, 0xe6000646, 0xe7000647, + 0xe8000648, 0xe9000649, 0xea00064a, 0xeb00064b, 0xec00064c, 0xed00064d, 0xee00064e, 0xef00064f, + 0xf0000650, 0xf1000651, 0xf2000652, 0xf2000652, 0xf2000652, 0xf2000652, 0xf2000652, 0xf2000652, + 0xf2000652, 0xf2000652, 0xf2000652, 0xf2000652, 0xf2000652, 0xf2000652, 0xf2000652, 0xf2000652, + 0xf2000652, 0xf2000652, 0xf2000652, 0xf2000652, 0xf2000652, 0xf2000652, 0xf2000652, 0xf2000652, + 0xf2000652, 0xf2000652, 0xf2000652, 0xf2000652, 0xf2000652, 0xf2000652, 0xf2000652, 0xf2000652, + 0xf2000652, 0xf2000652, 0xf2000652, 0xf2000652, 0xf2000652, 0xf2000652, 0xf2000652, 0xf2000652, + 0xf2000652, 0xf2000652, 0xf2000652, 0xf2000652, 0xf2000652, 0xf2000652, 0xf2000652, 0xf2000652, + 0xf2000652, 0xf2000652, 0xf2000652, 0xf2000652, 0xf2000652, 0xf2000652, 0xf2000652, 0xf2000652, + 0xf2000652, 0xf2000652, 0xf2000652, 0xf2000652, 0xf2000652, 0xf2000652, 0xf2000652, 0xf2000652, + 0xf2000652, 0xf2000652, 0xf2000652, 0xf2000652, 0xf2000652, 0xf2000652, 0xf2000652, 0xf2000652, + 0xf2000652, 0xf2000652, 0xf2000652, 0xf2000652, 0xf2000652, 0xf2000652, 0xf2000652, 0xf2000652, + }, +} + +// ISO8859_7 is the ISO 8859-7 encoding. +var ISO8859_7 *Charmap = &iso8859_7 + +var iso8859_7 = Charmap{ + name: "ISO 8859-7", + mib: identifier.ISOLatinGreek, + asciiSuperset: true, + low: 0x80, + replacement: 0x1a, + decode: [256]utf8Enc{ + {1, [3]byte{0x00, 0x00, 0x00}}, {1, [3]byte{0x01, 0x00, 0x00}}, + {1, [3]byte{0x02, 0x00, 0x00}}, {1, [3]byte{0x03, 0x00, 0x00}}, + {1, [3]byte{0x04, 0x00, 0x00}}, {1, [3]byte{0x05, 0x00, 0x00}}, + {1, [3]byte{0x06, 0x00, 0x00}}, {1, [3]byte{0x07, 0x00, 0x00}}, + {1, [3]byte{0x08, 0x00, 0x00}}, {1, [3]byte{0x09, 0x00, 0x00}}, + {1, [3]byte{0x0a, 0x00, 0x00}}, {1, [3]byte{0x0b, 0x00, 0x00}}, + {1, [3]byte{0x0c, 0x00, 0x00}}, {1, [3]byte{0x0d, 0x00, 0x00}}, + {1, [3]byte{0x0e, 0x00, 0x00}}, {1, [3]byte{0x0f, 0x00, 0x00}}, + {1, [3]byte{0x10, 0x00, 0x00}}, {1, [3]byte{0x11, 0x00, 0x00}}, + {1, [3]byte{0x12, 0x00, 0x00}}, {1, [3]byte{0x13, 0x00, 0x00}}, + {1, [3]byte{0x14, 0x00, 0x00}}, {1, [3]byte{0x15, 0x00, 0x00}}, + {1, [3]byte{0x16, 0x00, 0x00}}, {1, [3]byte{0x17, 0x00, 0x00}}, + {1, [3]byte{0x18, 0x00, 0x00}}, {1, [3]byte{0x19, 0x00, 0x00}}, + {1, [3]byte{0x1a, 0x00, 0x00}}, {1, [3]byte{0x1b, 0x00, 0x00}}, + {1, [3]byte{0x1c, 0x00, 0x00}}, {1, [3]byte{0x1d, 0x00, 0x00}}, + {1, [3]byte{0x1e, 0x00, 0x00}}, {1, [3]byte{0x1f, 0x00, 0x00}}, + {1, [3]byte{0x20, 0x00, 0x00}}, {1, [3]byte{0x21, 0x00, 0x00}}, + {1, [3]byte{0x22, 0x00, 0x00}}, {1, [3]byte{0x23, 0x00, 0x00}}, + {1, [3]byte{0x24, 0x00, 0x00}}, {1, [3]byte{0x25, 0x00, 0x00}}, + {1, [3]byte{0x26, 0x00, 0x00}}, {1, [3]byte{0x27, 0x00, 0x00}}, + {1, [3]byte{0x28, 0x00, 0x00}}, {1, [3]byte{0x29, 0x00, 0x00}}, + {1, [3]byte{0x2a, 0x00, 0x00}}, {1, [3]byte{0x2b, 0x00, 0x00}}, + {1, [3]byte{0x2c, 0x00, 0x00}}, {1, [3]byte{0x2d, 0x00, 0x00}}, + {1, [3]byte{0x2e, 0x00, 0x00}}, {1, [3]byte{0x2f, 0x00, 0x00}}, + {1, [3]byte{0x30, 0x00, 0x00}}, {1, [3]byte{0x31, 0x00, 0x00}}, + {1, [3]byte{0x32, 0x00, 0x00}}, {1, [3]byte{0x33, 0x00, 0x00}}, + {1, [3]byte{0x34, 0x00, 0x00}}, {1, [3]byte{0x35, 0x00, 0x00}}, + {1, [3]byte{0x36, 0x00, 0x00}}, {1, [3]byte{0x37, 0x00, 0x00}}, + {1, [3]byte{0x38, 0x00, 0x00}}, {1, [3]byte{0x39, 0x00, 0x00}}, + {1, [3]byte{0x3a, 0x00, 0x00}}, {1, [3]byte{0x3b, 0x00, 0x00}}, + {1, [3]byte{0x3c, 0x00, 0x00}}, {1, [3]byte{0x3d, 0x00, 0x00}}, + {1, [3]byte{0x3e, 0x00, 0x00}}, {1, [3]byte{0x3f, 0x00, 0x00}}, + {1, [3]byte{0x40, 0x00, 0x00}}, {1, [3]byte{0x41, 0x00, 0x00}}, + {1, [3]byte{0x42, 0x00, 0x00}}, {1, [3]byte{0x43, 0x00, 0x00}}, + {1, [3]byte{0x44, 0x00, 0x00}}, {1, [3]byte{0x45, 0x00, 0x00}}, + {1, [3]byte{0x46, 0x00, 0x00}}, {1, [3]byte{0x47, 0x00, 0x00}}, + {1, [3]byte{0x48, 0x00, 0x00}}, {1, [3]byte{0x49, 0x00, 0x00}}, + {1, [3]byte{0x4a, 0x00, 0x00}}, {1, [3]byte{0x4b, 0x00, 0x00}}, + {1, [3]byte{0x4c, 0x00, 0x00}}, {1, [3]byte{0x4d, 0x00, 0x00}}, + {1, [3]byte{0x4e, 0x00, 0x00}}, {1, [3]byte{0x4f, 0x00, 0x00}}, + {1, [3]byte{0x50, 0x00, 0x00}}, {1, [3]byte{0x51, 0x00, 0x00}}, + {1, [3]byte{0x52, 0x00, 0x00}}, {1, [3]byte{0x53, 0x00, 0x00}}, + {1, [3]byte{0x54, 0x00, 0x00}}, {1, [3]byte{0x55, 0x00, 0x00}}, + {1, [3]byte{0x56, 0x00, 0x00}}, {1, [3]byte{0x57, 0x00, 0x00}}, + {1, [3]byte{0x58, 0x00, 0x00}}, {1, [3]byte{0x59, 0x00, 0x00}}, + {1, [3]byte{0x5a, 0x00, 0x00}}, {1, [3]byte{0x5b, 0x00, 0x00}}, + {1, [3]byte{0x5c, 0x00, 0x00}}, {1, [3]byte{0x5d, 0x00, 0x00}}, + {1, [3]byte{0x5e, 0x00, 0x00}}, {1, [3]byte{0x5f, 0x00, 0x00}}, + {1, [3]byte{0x60, 0x00, 0x00}}, {1, [3]byte{0x61, 0x00, 0x00}}, + {1, [3]byte{0x62, 0x00, 0x00}}, {1, [3]byte{0x63, 0x00, 0x00}}, + {1, [3]byte{0x64, 0x00, 0x00}}, {1, [3]byte{0x65, 0x00, 0x00}}, + {1, [3]byte{0x66, 0x00, 0x00}}, {1, [3]byte{0x67, 0x00, 0x00}}, + {1, [3]byte{0x68, 0x00, 0x00}}, {1, [3]byte{0x69, 0x00, 0x00}}, + {1, [3]byte{0x6a, 0x00, 0x00}}, {1, [3]byte{0x6b, 0x00, 0x00}}, + {1, [3]byte{0x6c, 0x00, 0x00}}, {1, [3]byte{0x6d, 0x00, 0x00}}, + {1, [3]byte{0x6e, 0x00, 0x00}}, {1, [3]byte{0x6f, 0x00, 0x00}}, + {1, [3]byte{0x70, 0x00, 0x00}}, {1, [3]byte{0x71, 0x00, 0x00}}, + {1, [3]byte{0x72, 0x00, 0x00}}, {1, [3]byte{0x73, 0x00, 0x00}}, + {1, [3]byte{0x74, 0x00, 0x00}}, {1, [3]byte{0x75, 0x00, 0x00}}, + {1, [3]byte{0x76, 0x00, 0x00}}, {1, [3]byte{0x77, 0x00, 0x00}}, + {1, [3]byte{0x78, 0x00, 0x00}}, {1, [3]byte{0x79, 0x00, 0x00}}, + {1, [3]byte{0x7a, 0x00, 0x00}}, {1, [3]byte{0x7b, 0x00, 0x00}}, + {1, [3]byte{0x7c, 0x00, 0x00}}, {1, [3]byte{0x7d, 0x00, 0x00}}, + {1, [3]byte{0x7e, 0x00, 0x00}}, {1, [3]byte{0x7f, 0x00, 0x00}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {3, [3]byte{0xef, 0xbf, 0xbd}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {3, [3]byte{0xef, 0xbf, 0xbd}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {3, [3]byte{0xef, 0xbf, 0xbd}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {3, [3]byte{0xef, 0xbf, 0xbd}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {3, [3]byte{0xef, 0xbf, 0xbd}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {3, [3]byte{0xef, 0xbf, 0xbd}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {3, [3]byte{0xef, 0xbf, 0xbd}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {3, [3]byte{0xef, 0xbf, 0xbd}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {3, [3]byte{0xef, 0xbf, 0xbd}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {3, [3]byte{0xef, 0xbf, 0xbd}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {3, [3]byte{0xef, 0xbf, 0xbd}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {3, [3]byte{0xef, 0xbf, 0xbd}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {3, [3]byte{0xef, 0xbf, 0xbd}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {3, [3]byte{0xef, 0xbf, 0xbd}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {3, [3]byte{0xef, 0xbf, 0xbd}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {3, [3]byte{0xef, 0xbf, 0xbd}}, + {2, [3]byte{0xc2, 0xa0, 0x00}}, {3, [3]byte{0xe2, 0x80, 0x98}}, + {3, [3]byte{0xe2, 0x80, 0x99}}, {2, [3]byte{0xc2, 0xa3, 0x00}}, + {3, [3]byte{0xe2, 0x82, 0xac}}, {3, [3]byte{0xe2, 0x82, 0xaf}}, + {2, [3]byte{0xc2, 0xa6, 0x00}}, {2, [3]byte{0xc2, 0xa7, 0x00}}, + {2, [3]byte{0xc2, 0xa8, 0x00}}, {2, [3]byte{0xc2, 0xa9, 0x00}}, + {2, [3]byte{0xcd, 0xba, 0x00}}, {2, [3]byte{0xc2, 0xab, 0x00}}, + {2, [3]byte{0xc2, 0xac, 0x00}}, {2, [3]byte{0xc2, 0xad, 0x00}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {3, [3]byte{0xe2, 0x80, 0x95}}, + {2, [3]byte{0xc2, 0xb0, 0x00}}, {2, [3]byte{0xc2, 0xb1, 0x00}}, + {2, [3]byte{0xc2, 0xb2, 0x00}}, {2, [3]byte{0xc2, 0xb3, 0x00}}, + {2, [3]byte{0xce, 0x84, 0x00}}, {2, [3]byte{0xce, 0x85, 0x00}}, + {2, [3]byte{0xce, 0x86, 0x00}}, {2, [3]byte{0xc2, 0xb7, 0x00}}, + {2, [3]byte{0xce, 0x88, 0x00}}, {2, [3]byte{0xce, 0x89, 0x00}}, + {2, [3]byte{0xce, 0x8a, 0x00}}, {2, [3]byte{0xc2, 0xbb, 0x00}}, + {2, [3]byte{0xce, 0x8c, 0x00}}, {2, [3]byte{0xc2, 0xbd, 0x00}}, + {2, [3]byte{0xce, 0x8e, 0x00}}, {2, [3]byte{0xce, 0x8f, 0x00}}, + {2, [3]byte{0xce, 0x90, 0x00}}, {2, [3]byte{0xce, 0x91, 0x00}}, + {2, [3]byte{0xce, 0x92, 0x00}}, {2, [3]byte{0xce, 0x93, 0x00}}, + {2, [3]byte{0xce, 0x94, 0x00}}, {2, [3]byte{0xce, 0x95, 0x00}}, + {2, [3]byte{0xce, 0x96, 0x00}}, {2, [3]byte{0xce, 0x97, 0x00}}, + {2, [3]byte{0xce, 0x98, 0x00}}, {2, [3]byte{0xce, 0x99, 0x00}}, + {2, [3]byte{0xce, 0x9a, 0x00}}, {2, [3]byte{0xce, 0x9b, 0x00}}, + {2, [3]byte{0xce, 0x9c, 0x00}}, {2, [3]byte{0xce, 0x9d, 0x00}}, + {2, [3]byte{0xce, 0x9e, 0x00}}, {2, [3]byte{0xce, 0x9f, 0x00}}, + {2, [3]byte{0xce, 0xa0, 0x00}}, {2, [3]byte{0xce, 0xa1, 0x00}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {2, [3]byte{0xce, 0xa3, 0x00}}, + {2, [3]byte{0xce, 0xa4, 0x00}}, {2, [3]byte{0xce, 0xa5, 0x00}}, + {2, [3]byte{0xce, 0xa6, 0x00}}, {2, [3]byte{0xce, 0xa7, 0x00}}, + {2, [3]byte{0xce, 0xa8, 0x00}}, {2, [3]byte{0xce, 0xa9, 0x00}}, + {2, [3]byte{0xce, 0xaa, 0x00}}, {2, [3]byte{0xce, 0xab, 0x00}}, + {2, [3]byte{0xce, 0xac, 0x00}}, {2, [3]byte{0xce, 0xad, 0x00}}, + {2, [3]byte{0xce, 0xae, 0x00}}, {2, [3]byte{0xce, 0xaf, 0x00}}, + {2, [3]byte{0xce, 0xb0, 0x00}}, {2, [3]byte{0xce, 0xb1, 0x00}}, + {2, [3]byte{0xce, 0xb2, 0x00}}, {2, [3]byte{0xce, 0xb3, 0x00}}, + {2, [3]byte{0xce, 0xb4, 0x00}}, {2, [3]byte{0xce, 0xb5, 0x00}}, + {2, [3]byte{0xce, 0xb6, 0x00}}, {2, [3]byte{0xce, 0xb7, 0x00}}, + {2, [3]byte{0xce, 0xb8, 0x00}}, {2, [3]byte{0xce, 0xb9, 0x00}}, + {2, [3]byte{0xce, 0xba, 0x00}}, {2, [3]byte{0xce, 0xbb, 0x00}}, + {2, [3]byte{0xce, 0xbc, 0x00}}, {2, [3]byte{0xce, 0xbd, 0x00}}, + {2, [3]byte{0xce, 0xbe, 0x00}}, {2, [3]byte{0xce, 0xbf, 0x00}}, + {2, [3]byte{0xcf, 0x80, 0x00}}, {2, [3]byte{0xcf, 0x81, 0x00}}, + {2, [3]byte{0xcf, 0x82, 0x00}}, {2, [3]byte{0xcf, 0x83, 0x00}}, + {2, [3]byte{0xcf, 0x84, 0x00}}, {2, [3]byte{0xcf, 0x85, 0x00}}, + {2, [3]byte{0xcf, 0x86, 0x00}}, {2, [3]byte{0xcf, 0x87, 0x00}}, + {2, [3]byte{0xcf, 0x88, 0x00}}, {2, [3]byte{0xcf, 0x89, 0x00}}, + {2, [3]byte{0xcf, 0x8a, 0x00}}, {2, [3]byte{0xcf, 0x8b, 0x00}}, + {2, [3]byte{0xcf, 0x8c, 0x00}}, {2, [3]byte{0xcf, 0x8d, 0x00}}, + {2, [3]byte{0xcf, 0x8e, 0x00}}, {3, [3]byte{0xef, 0xbf, 0xbd}}, + }, + encode: [256]uint32{ + 0x00000000, 0x01000001, 0x02000002, 0x03000003, 0x04000004, 0x05000005, 0x06000006, 0x07000007, + 0x08000008, 0x09000009, 0x0a00000a, 0x0b00000b, 0x0c00000c, 0x0d00000d, 0x0e00000e, 0x0f00000f, + 0x10000010, 0x11000011, 0x12000012, 0x13000013, 0x14000014, 0x15000015, 0x16000016, 0x17000017, + 0x18000018, 0x19000019, 0x1a00001a, 0x1b00001b, 0x1c00001c, 0x1d00001d, 0x1e00001e, 0x1f00001f, + 0x20000020, 0x21000021, 0x22000022, 0x23000023, 0x24000024, 0x25000025, 0x26000026, 0x27000027, + 0x28000028, 0x29000029, 0x2a00002a, 0x2b00002b, 0x2c00002c, 0x2d00002d, 0x2e00002e, 0x2f00002f, + 0x30000030, 0x31000031, 0x32000032, 0x33000033, 0x34000034, 0x35000035, 0x36000036, 0x37000037, + 0x38000038, 0x39000039, 0x3a00003a, 0x3b00003b, 0x3c00003c, 0x3d00003d, 0x3e00003e, 0x3f00003f, + 0x40000040, 0x41000041, 0x42000042, 0x43000043, 0x44000044, 0x45000045, 0x46000046, 0x47000047, + 0x48000048, 0x49000049, 0x4a00004a, 0x4b00004b, 0x4c00004c, 0x4d00004d, 0x4e00004e, 0x4f00004f, + 0x50000050, 0x51000051, 0x52000052, 0x53000053, 0x54000054, 0x55000055, 0x56000056, 0x57000057, + 0x58000058, 0x59000059, 0x5a00005a, 0x5b00005b, 0x5c00005c, 0x5d00005d, 0x5e00005e, 0x5f00005f, + 0x60000060, 0x61000061, 0x62000062, 0x63000063, 0x64000064, 0x65000065, 0x66000066, 0x67000067, + 0x68000068, 0x69000069, 0x6a00006a, 0x6b00006b, 0x6c00006c, 0x6d00006d, 0x6e00006e, 0x6f00006f, + 0x70000070, 0x71000071, 0x72000072, 0x73000073, 0x74000074, 0x75000075, 0x76000076, 0x77000077, + 0x78000078, 0x79000079, 0x7a00007a, 0x7b00007b, 0x7c00007c, 0x7d00007d, 0x7e00007e, 0x7f00007f, + 0xa00000a0, 0xa30000a3, 0xa60000a6, 0xa70000a7, 0xa80000a8, 0xa90000a9, 0xab0000ab, 0xac0000ac, + 0xad0000ad, 0xb00000b0, 0xb10000b1, 0xb20000b2, 0xb30000b3, 0xb70000b7, 0xbb0000bb, 0xbd0000bd, + 0xaa00037a, 0xb4000384, 0xb5000385, 0xb6000386, 0xb8000388, 0xb9000389, 0xba00038a, 0xbc00038c, + 0xbe00038e, 0xbf00038f, 0xc0000390, 0xc1000391, 0xc2000392, 0xc3000393, 0xc4000394, 0xc5000395, + 0xc6000396, 0xc7000397, 0xc8000398, 0xc9000399, 0xca00039a, 0xcb00039b, 0xcc00039c, 0xcd00039d, + 0xce00039e, 0xcf00039f, 0xd00003a0, 0xd10003a1, 0xd30003a3, 0xd40003a4, 0xd50003a5, 0xd60003a6, + 0xd70003a7, 0xd80003a8, 0xd90003a9, 0xda0003aa, 0xdb0003ab, 0xdc0003ac, 0xdd0003ad, 0xde0003ae, + 0xdf0003af, 0xe00003b0, 0xe10003b1, 0xe20003b2, 0xe30003b3, 0xe40003b4, 0xe50003b5, 0xe60003b6, + 0xe70003b7, 0xe80003b8, 0xe90003b9, 0xea0003ba, 0xeb0003bb, 0xec0003bc, 0xed0003bd, 0xee0003be, + 0xef0003bf, 0xf00003c0, 0xf10003c1, 0xf20003c2, 0xf30003c3, 0xf40003c4, 0xf50003c5, 0xf60003c6, + 0xf70003c7, 0xf80003c8, 0xf90003c9, 0xfa0003ca, 0xfb0003cb, 0xfc0003cc, 0xfd0003cd, 0xfe0003ce, + 0xaf002015, 0xa1002018, 0xa2002019, 0xa40020ac, 0xa50020af, 0xa50020af, 0xa50020af, 0xa50020af, + 0xa50020af, 0xa50020af, 0xa50020af, 0xa50020af, 0xa50020af, 0xa50020af, 0xa50020af, 0xa50020af, + 0xa50020af, 0xa50020af, 0xa50020af, 0xa50020af, 0xa50020af, 0xa50020af, 0xa50020af, 0xa50020af, + 0xa50020af, 0xa50020af, 0xa50020af, 0xa50020af, 0xa50020af, 0xa50020af, 0xa50020af, 0xa50020af, + 0xa50020af, 0xa50020af, 0xa50020af, 0xa50020af, 0xa50020af, 0xa50020af, 0xa50020af, 0xa50020af, + }, +} + +// ISO8859_8 is the ISO 8859-8 encoding. +var ISO8859_8 *Charmap = &iso8859_8 + +var iso8859_8 = Charmap{ + name: "ISO 8859-8", + mib: identifier.ISOLatinHebrew, + asciiSuperset: true, + low: 0x80, + replacement: 0x1a, + decode: [256]utf8Enc{ + {1, [3]byte{0x00, 0x00, 0x00}}, {1, [3]byte{0x01, 0x00, 0x00}}, + {1, [3]byte{0x02, 0x00, 0x00}}, {1, [3]byte{0x03, 0x00, 0x00}}, + {1, [3]byte{0x04, 0x00, 0x00}}, {1, [3]byte{0x05, 0x00, 0x00}}, + {1, [3]byte{0x06, 0x00, 0x00}}, {1, [3]byte{0x07, 0x00, 0x00}}, + {1, [3]byte{0x08, 0x00, 0x00}}, {1, [3]byte{0x09, 0x00, 0x00}}, + {1, [3]byte{0x0a, 0x00, 0x00}}, {1, [3]byte{0x0b, 0x00, 0x00}}, + {1, [3]byte{0x0c, 0x00, 0x00}}, {1, [3]byte{0x0d, 0x00, 0x00}}, + {1, [3]byte{0x0e, 0x00, 0x00}}, {1, [3]byte{0x0f, 0x00, 0x00}}, + {1, [3]byte{0x10, 0x00, 0x00}}, {1, [3]byte{0x11, 0x00, 0x00}}, + {1, [3]byte{0x12, 0x00, 0x00}}, {1, [3]byte{0x13, 0x00, 0x00}}, + {1, [3]byte{0x14, 0x00, 0x00}}, {1, [3]byte{0x15, 0x00, 0x00}}, + {1, [3]byte{0x16, 0x00, 0x00}}, {1, [3]byte{0x17, 0x00, 0x00}}, + {1, [3]byte{0x18, 0x00, 0x00}}, {1, [3]byte{0x19, 0x00, 0x00}}, + {1, [3]byte{0x1a, 0x00, 0x00}}, {1, [3]byte{0x1b, 0x00, 0x00}}, + {1, [3]byte{0x1c, 0x00, 0x00}}, {1, [3]byte{0x1d, 0x00, 0x00}}, + {1, [3]byte{0x1e, 0x00, 0x00}}, {1, [3]byte{0x1f, 0x00, 0x00}}, + {1, [3]byte{0x20, 0x00, 0x00}}, {1, [3]byte{0x21, 0x00, 0x00}}, + {1, [3]byte{0x22, 0x00, 0x00}}, {1, [3]byte{0x23, 0x00, 0x00}}, + {1, [3]byte{0x24, 0x00, 0x00}}, {1, [3]byte{0x25, 0x00, 0x00}}, + {1, [3]byte{0x26, 0x00, 0x00}}, {1, [3]byte{0x27, 0x00, 0x00}}, + {1, [3]byte{0x28, 0x00, 0x00}}, {1, [3]byte{0x29, 0x00, 0x00}}, + {1, [3]byte{0x2a, 0x00, 0x00}}, {1, [3]byte{0x2b, 0x00, 0x00}}, + {1, [3]byte{0x2c, 0x00, 0x00}}, {1, [3]byte{0x2d, 0x00, 0x00}}, + {1, [3]byte{0x2e, 0x00, 0x00}}, {1, [3]byte{0x2f, 0x00, 0x00}}, + {1, [3]byte{0x30, 0x00, 0x00}}, {1, [3]byte{0x31, 0x00, 0x00}}, + {1, [3]byte{0x32, 0x00, 0x00}}, {1, [3]byte{0x33, 0x00, 0x00}}, + {1, [3]byte{0x34, 0x00, 0x00}}, {1, [3]byte{0x35, 0x00, 0x00}}, + {1, [3]byte{0x36, 0x00, 0x00}}, {1, [3]byte{0x37, 0x00, 0x00}}, + {1, [3]byte{0x38, 0x00, 0x00}}, {1, [3]byte{0x39, 0x00, 0x00}}, + {1, [3]byte{0x3a, 0x00, 0x00}}, {1, [3]byte{0x3b, 0x00, 0x00}}, + {1, [3]byte{0x3c, 0x00, 0x00}}, {1, [3]byte{0x3d, 0x00, 0x00}}, + {1, [3]byte{0x3e, 0x00, 0x00}}, {1, [3]byte{0x3f, 0x00, 0x00}}, + {1, [3]byte{0x40, 0x00, 0x00}}, {1, [3]byte{0x41, 0x00, 0x00}}, + {1, [3]byte{0x42, 0x00, 0x00}}, {1, [3]byte{0x43, 0x00, 0x00}}, + {1, [3]byte{0x44, 0x00, 0x00}}, {1, [3]byte{0x45, 0x00, 0x00}}, + {1, [3]byte{0x46, 0x00, 0x00}}, {1, [3]byte{0x47, 0x00, 0x00}}, + {1, [3]byte{0x48, 0x00, 0x00}}, {1, [3]byte{0x49, 0x00, 0x00}}, + {1, [3]byte{0x4a, 0x00, 0x00}}, {1, [3]byte{0x4b, 0x00, 0x00}}, + {1, [3]byte{0x4c, 0x00, 0x00}}, {1, [3]byte{0x4d, 0x00, 0x00}}, + {1, [3]byte{0x4e, 0x00, 0x00}}, {1, [3]byte{0x4f, 0x00, 0x00}}, + {1, [3]byte{0x50, 0x00, 0x00}}, {1, [3]byte{0x51, 0x00, 0x00}}, + {1, [3]byte{0x52, 0x00, 0x00}}, {1, [3]byte{0x53, 0x00, 0x00}}, + {1, [3]byte{0x54, 0x00, 0x00}}, {1, [3]byte{0x55, 0x00, 0x00}}, + {1, [3]byte{0x56, 0x00, 0x00}}, {1, [3]byte{0x57, 0x00, 0x00}}, + {1, [3]byte{0x58, 0x00, 0x00}}, {1, [3]byte{0x59, 0x00, 0x00}}, + {1, [3]byte{0x5a, 0x00, 0x00}}, {1, [3]byte{0x5b, 0x00, 0x00}}, + {1, [3]byte{0x5c, 0x00, 0x00}}, {1, [3]byte{0x5d, 0x00, 0x00}}, + {1, [3]byte{0x5e, 0x00, 0x00}}, {1, [3]byte{0x5f, 0x00, 0x00}}, + {1, [3]byte{0x60, 0x00, 0x00}}, {1, [3]byte{0x61, 0x00, 0x00}}, + {1, [3]byte{0x62, 0x00, 0x00}}, {1, [3]byte{0x63, 0x00, 0x00}}, + {1, [3]byte{0x64, 0x00, 0x00}}, {1, [3]byte{0x65, 0x00, 0x00}}, + {1, [3]byte{0x66, 0x00, 0x00}}, {1, [3]byte{0x67, 0x00, 0x00}}, + {1, [3]byte{0x68, 0x00, 0x00}}, {1, [3]byte{0x69, 0x00, 0x00}}, + {1, [3]byte{0x6a, 0x00, 0x00}}, {1, [3]byte{0x6b, 0x00, 0x00}}, + {1, [3]byte{0x6c, 0x00, 0x00}}, {1, [3]byte{0x6d, 0x00, 0x00}}, + {1, [3]byte{0x6e, 0x00, 0x00}}, {1, [3]byte{0x6f, 0x00, 0x00}}, + {1, [3]byte{0x70, 0x00, 0x00}}, {1, [3]byte{0x71, 0x00, 0x00}}, + {1, [3]byte{0x72, 0x00, 0x00}}, {1, [3]byte{0x73, 0x00, 0x00}}, + {1, [3]byte{0x74, 0x00, 0x00}}, {1, [3]byte{0x75, 0x00, 0x00}}, + {1, [3]byte{0x76, 0x00, 0x00}}, {1, [3]byte{0x77, 0x00, 0x00}}, + {1, [3]byte{0x78, 0x00, 0x00}}, {1, [3]byte{0x79, 0x00, 0x00}}, + {1, [3]byte{0x7a, 0x00, 0x00}}, {1, [3]byte{0x7b, 0x00, 0x00}}, + {1, [3]byte{0x7c, 0x00, 0x00}}, {1, [3]byte{0x7d, 0x00, 0x00}}, + {1, [3]byte{0x7e, 0x00, 0x00}}, {1, [3]byte{0x7f, 0x00, 0x00}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {3, [3]byte{0xef, 0xbf, 0xbd}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {3, [3]byte{0xef, 0xbf, 0xbd}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {3, [3]byte{0xef, 0xbf, 0xbd}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {3, [3]byte{0xef, 0xbf, 0xbd}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {3, [3]byte{0xef, 0xbf, 0xbd}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {3, [3]byte{0xef, 0xbf, 0xbd}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {3, [3]byte{0xef, 0xbf, 0xbd}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {3, [3]byte{0xef, 0xbf, 0xbd}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {3, [3]byte{0xef, 0xbf, 0xbd}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {3, [3]byte{0xef, 0xbf, 0xbd}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {3, [3]byte{0xef, 0xbf, 0xbd}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {3, [3]byte{0xef, 0xbf, 0xbd}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {3, [3]byte{0xef, 0xbf, 0xbd}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {3, [3]byte{0xef, 0xbf, 0xbd}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {3, [3]byte{0xef, 0xbf, 0xbd}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {3, [3]byte{0xef, 0xbf, 0xbd}}, + {2, [3]byte{0xc2, 0xa0, 0x00}}, {3, [3]byte{0xef, 0xbf, 0xbd}}, + {2, [3]byte{0xc2, 0xa2, 0x00}}, {2, [3]byte{0xc2, 0xa3, 0x00}}, + {2, [3]byte{0xc2, 0xa4, 0x00}}, {2, [3]byte{0xc2, 0xa5, 0x00}}, + {2, [3]byte{0xc2, 0xa6, 0x00}}, {2, [3]byte{0xc2, 0xa7, 0x00}}, + {2, [3]byte{0xc2, 0xa8, 0x00}}, {2, [3]byte{0xc2, 0xa9, 0x00}}, + {2, [3]byte{0xc3, 0x97, 0x00}}, {2, [3]byte{0xc2, 0xab, 0x00}}, + {2, [3]byte{0xc2, 0xac, 0x00}}, {2, [3]byte{0xc2, 0xad, 0x00}}, + {2, [3]byte{0xc2, 0xae, 0x00}}, {2, [3]byte{0xc2, 0xaf, 0x00}}, + {2, [3]byte{0xc2, 0xb0, 0x00}}, {2, [3]byte{0xc2, 0xb1, 0x00}}, + {2, [3]byte{0xc2, 0xb2, 0x00}}, {2, [3]byte{0xc2, 0xb3, 0x00}}, + {2, [3]byte{0xc2, 0xb4, 0x00}}, {2, [3]byte{0xc2, 0xb5, 0x00}}, + {2, [3]byte{0xc2, 0xb6, 0x00}}, {2, [3]byte{0xc2, 0xb7, 0x00}}, + {2, [3]byte{0xc2, 0xb8, 0x00}}, {2, [3]byte{0xc2, 0xb9, 0x00}}, + {2, [3]byte{0xc3, 0xb7, 0x00}}, {2, [3]byte{0xc2, 0xbb, 0x00}}, + {2, [3]byte{0xc2, 0xbc, 0x00}}, {2, [3]byte{0xc2, 0xbd, 0x00}}, + {2, [3]byte{0xc2, 0xbe, 0x00}}, {3, [3]byte{0xef, 0xbf, 0xbd}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {3, [3]byte{0xef, 0xbf, 0xbd}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {3, [3]byte{0xef, 0xbf, 0xbd}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {3, [3]byte{0xef, 0xbf, 0xbd}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {3, [3]byte{0xef, 0xbf, 0xbd}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {3, [3]byte{0xef, 0xbf, 0xbd}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {3, [3]byte{0xef, 0xbf, 0xbd}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {3, [3]byte{0xef, 0xbf, 0xbd}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {3, [3]byte{0xef, 0xbf, 0xbd}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {3, [3]byte{0xef, 0xbf, 0xbd}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {3, [3]byte{0xef, 0xbf, 0xbd}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {3, [3]byte{0xef, 0xbf, 0xbd}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {3, [3]byte{0xef, 0xbf, 0xbd}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {3, [3]byte{0xef, 0xbf, 0xbd}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {3, [3]byte{0xef, 0xbf, 0xbd}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {3, [3]byte{0xef, 0xbf, 0xbd}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {3, [3]byte{0xe2, 0x80, 0x97}}, + {2, [3]byte{0xd7, 0x90, 0x00}}, {2, [3]byte{0xd7, 0x91, 0x00}}, + {2, [3]byte{0xd7, 0x92, 0x00}}, {2, [3]byte{0xd7, 0x93, 0x00}}, + {2, [3]byte{0xd7, 0x94, 0x00}}, {2, [3]byte{0xd7, 0x95, 0x00}}, + {2, [3]byte{0xd7, 0x96, 0x00}}, {2, [3]byte{0xd7, 0x97, 0x00}}, + {2, [3]byte{0xd7, 0x98, 0x00}}, {2, [3]byte{0xd7, 0x99, 0x00}}, + {2, [3]byte{0xd7, 0x9a, 0x00}}, {2, [3]byte{0xd7, 0x9b, 0x00}}, + {2, [3]byte{0xd7, 0x9c, 0x00}}, {2, [3]byte{0xd7, 0x9d, 0x00}}, + {2, [3]byte{0xd7, 0x9e, 0x00}}, {2, [3]byte{0xd7, 0x9f, 0x00}}, + {2, [3]byte{0xd7, 0xa0, 0x00}}, {2, [3]byte{0xd7, 0xa1, 0x00}}, + {2, [3]byte{0xd7, 0xa2, 0x00}}, {2, [3]byte{0xd7, 0xa3, 0x00}}, + {2, [3]byte{0xd7, 0xa4, 0x00}}, {2, [3]byte{0xd7, 0xa5, 0x00}}, + {2, [3]byte{0xd7, 0xa6, 0x00}}, {2, [3]byte{0xd7, 0xa7, 0x00}}, + {2, [3]byte{0xd7, 0xa8, 0x00}}, {2, [3]byte{0xd7, 0xa9, 0x00}}, + {2, [3]byte{0xd7, 0xaa, 0x00}}, {3, [3]byte{0xef, 0xbf, 0xbd}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {3, [3]byte{0xe2, 0x80, 0x8e}}, + {3, [3]byte{0xe2, 0x80, 0x8f}}, {3, [3]byte{0xef, 0xbf, 0xbd}}, + }, + encode: [256]uint32{ + 0x00000000, 0x01000001, 0x02000002, 0x03000003, 0x04000004, 0x05000005, 0x06000006, 0x07000007, + 0x08000008, 0x09000009, 0x0a00000a, 0x0b00000b, 0x0c00000c, 0x0d00000d, 0x0e00000e, 0x0f00000f, + 0x10000010, 0x11000011, 0x12000012, 0x13000013, 0x14000014, 0x15000015, 0x16000016, 0x17000017, + 0x18000018, 0x19000019, 0x1a00001a, 0x1b00001b, 0x1c00001c, 0x1d00001d, 0x1e00001e, 0x1f00001f, + 0x20000020, 0x21000021, 0x22000022, 0x23000023, 0x24000024, 0x25000025, 0x26000026, 0x27000027, + 0x28000028, 0x29000029, 0x2a00002a, 0x2b00002b, 0x2c00002c, 0x2d00002d, 0x2e00002e, 0x2f00002f, + 0x30000030, 0x31000031, 0x32000032, 0x33000033, 0x34000034, 0x35000035, 0x36000036, 0x37000037, + 0x38000038, 0x39000039, 0x3a00003a, 0x3b00003b, 0x3c00003c, 0x3d00003d, 0x3e00003e, 0x3f00003f, + 0x40000040, 0x41000041, 0x42000042, 0x43000043, 0x44000044, 0x45000045, 0x46000046, 0x47000047, + 0x48000048, 0x49000049, 0x4a00004a, 0x4b00004b, 0x4c00004c, 0x4d00004d, 0x4e00004e, 0x4f00004f, + 0x50000050, 0x51000051, 0x52000052, 0x53000053, 0x54000054, 0x55000055, 0x56000056, 0x57000057, + 0x58000058, 0x59000059, 0x5a00005a, 0x5b00005b, 0x5c00005c, 0x5d00005d, 0x5e00005e, 0x5f00005f, + 0x60000060, 0x61000061, 0x62000062, 0x63000063, 0x64000064, 0x65000065, 0x66000066, 0x67000067, + 0x68000068, 0x69000069, 0x6a00006a, 0x6b00006b, 0x6c00006c, 0x6d00006d, 0x6e00006e, 0x6f00006f, + 0x70000070, 0x71000071, 0x72000072, 0x73000073, 0x74000074, 0x75000075, 0x76000076, 0x77000077, + 0x78000078, 0x79000079, 0x7a00007a, 0x7b00007b, 0x7c00007c, 0x7d00007d, 0x7e00007e, 0x7f00007f, + 0xa00000a0, 0xa20000a2, 0xa30000a3, 0xa40000a4, 0xa50000a5, 0xa60000a6, 0xa70000a7, 0xa80000a8, + 0xa90000a9, 0xab0000ab, 0xac0000ac, 0xad0000ad, 0xae0000ae, 0xaf0000af, 0xb00000b0, 0xb10000b1, + 0xb20000b2, 0xb30000b3, 0xb40000b4, 0xb50000b5, 0xb60000b6, 0xb70000b7, 0xb80000b8, 0xb90000b9, + 0xbb0000bb, 0xbc0000bc, 0xbd0000bd, 0xbe0000be, 0xaa0000d7, 0xba0000f7, 0xe00005d0, 0xe10005d1, + 0xe20005d2, 0xe30005d3, 0xe40005d4, 0xe50005d5, 0xe60005d6, 0xe70005d7, 0xe80005d8, 0xe90005d9, + 0xea0005da, 0xeb0005db, 0xec0005dc, 0xed0005dd, 0xee0005de, 0xef0005df, 0xf00005e0, 0xf10005e1, + 0xf20005e2, 0xf30005e3, 0xf40005e4, 0xf50005e5, 0xf60005e6, 0xf70005e7, 0xf80005e8, 0xf90005e9, + 0xfa0005ea, 0xfd00200e, 0xfe00200f, 0xdf002017, 0xdf002017, 0xdf002017, 0xdf002017, 0xdf002017, + 0xdf002017, 0xdf002017, 0xdf002017, 0xdf002017, 0xdf002017, 0xdf002017, 0xdf002017, 0xdf002017, + 0xdf002017, 0xdf002017, 0xdf002017, 0xdf002017, 0xdf002017, 0xdf002017, 0xdf002017, 0xdf002017, + 0xdf002017, 0xdf002017, 0xdf002017, 0xdf002017, 0xdf002017, 0xdf002017, 0xdf002017, 0xdf002017, + 0xdf002017, 0xdf002017, 0xdf002017, 0xdf002017, 0xdf002017, 0xdf002017, 0xdf002017, 0xdf002017, + 0xdf002017, 0xdf002017, 0xdf002017, 0xdf002017, 0xdf002017, 0xdf002017, 0xdf002017, 0xdf002017, + 0xdf002017, 0xdf002017, 0xdf002017, 0xdf002017, 0xdf002017, 0xdf002017, 0xdf002017, 0xdf002017, + 0xdf002017, 0xdf002017, 0xdf002017, 0xdf002017, 0xdf002017, 0xdf002017, 0xdf002017, 0xdf002017, + 0xdf002017, 0xdf002017, 0xdf002017, 0xdf002017, 0xdf002017, 0xdf002017, 0xdf002017, 0xdf002017, + }, +} + +// ISO8859_9 is the ISO 8859-9 encoding. +var ISO8859_9 *Charmap = &iso8859_9 + +var iso8859_9 = Charmap{ + name: "ISO 8859-9", + mib: identifier.ISOLatin5, + asciiSuperset: true, + low: 0x80, + replacement: 0x1a, + decode: [256]utf8Enc{ + {1, [3]byte{0x00, 0x00, 0x00}}, {1, [3]byte{0x01, 0x00, 0x00}}, + {1, [3]byte{0x02, 0x00, 0x00}}, {1, [3]byte{0x03, 0x00, 0x00}}, + {1, [3]byte{0x04, 0x00, 0x00}}, {1, [3]byte{0x05, 0x00, 0x00}}, + {1, [3]byte{0x06, 0x00, 0x00}}, {1, [3]byte{0x07, 0x00, 0x00}}, + {1, [3]byte{0x08, 0x00, 0x00}}, {1, [3]byte{0x09, 0x00, 0x00}}, + {1, [3]byte{0x0a, 0x00, 0x00}}, {1, [3]byte{0x0b, 0x00, 0x00}}, + {1, [3]byte{0x0c, 0x00, 0x00}}, {1, [3]byte{0x0d, 0x00, 0x00}}, + {1, [3]byte{0x0e, 0x00, 0x00}}, {1, [3]byte{0x0f, 0x00, 0x00}}, + {1, [3]byte{0x10, 0x00, 0x00}}, {1, [3]byte{0x11, 0x00, 0x00}}, + {1, [3]byte{0x12, 0x00, 0x00}}, {1, [3]byte{0x13, 0x00, 0x00}}, + {1, [3]byte{0x14, 0x00, 0x00}}, {1, [3]byte{0x15, 0x00, 0x00}}, + {1, [3]byte{0x16, 0x00, 0x00}}, {1, [3]byte{0x17, 0x00, 0x00}}, + {1, [3]byte{0x18, 0x00, 0x00}}, {1, [3]byte{0x19, 0x00, 0x00}}, + {1, [3]byte{0x1a, 0x00, 0x00}}, {1, [3]byte{0x1b, 0x00, 0x00}}, + {1, [3]byte{0x1c, 0x00, 0x00}}, {1, [3]byte{0x1d, 0x00, 0x00}}, + {1, [3]byte{0x1e, 0x00, 0x00}}, {1, [3]byte{0x1f, 0x00, 0x00}}, + {1, [3]byte{0x20, 0x00, 0x00}}, {1, [3]byte{0x21, 0x00, 0x00}}, + {1, [3]byte{0x22, 0x00, 0x00}}, {1, [3]byte{0x23, 0x00, 0x00}}, + {1, [3]byte{0x24, 0x00, 0x00}}, {1, [3]byte{0x25, 0x00, 0x00}}, + {1, [3]byte{0x26, 0x00, 0x00}}, {1, [3]byte{0x27, 0x00, 0x00}}, + {1, [3]byte{0x28, 0x00, 0x00}}, {1, [3]byte{0x29, 0x00, 0x00}}, + {1, [3]byte{0x2a, 0x00, 0x00}}, {1, [3]byte{0x2b, 0x00, 0x00}}, + {1, [3]byte{0x2c, 0x00, 0x00}}, {1, [3]byte{0x2d, 0x00, 0x00}}, + {1, [3]byte{0x2e, 0x00, 0x00}}, {1, [3]byte{0x2f, 0x00, 0x00}}, + {1, [3]byte{0x30, 0x00, 0x00}}, {1, [3]byte{0x31, 0x00, 0x00}}, + {1, [3]byte{0x32, 0x00, 0x00}}, {1, [3]byte{0x33, 0x00, 0x00}}, + {1, [3]byte{0x34, 0x00, 0x00}}, {1, [3]byte{0x35, 0x00, 0x00}}, + {1, [3]byte{0x36, 0x00, 0x00}}, {1, [3]byte{0x37, 0x00, 0x00}}, + {1, [3]byte{0x38, 0x00, 0x00}}, {1, [3]byte{0x39, 0x00, 0x00}}, + {1, [3]byte{0x3a, 0x00, 0x00}}, {1, [3]byte{0x3b, 0x00, 0x00}}, + {1, [3]byte{0x3c, 0x00, 0x00}}, {1, [3]byte{0x3d, 0x00, 0x00}}, + {1, [3]byte{0x3e, 0x00, 0x00}}, {1, [3]byte{0x3f, 0x00, 0x00}}, + {1, [3]byte{0x40, 0x00, 0x00}}, {1, [3]byte{0x41, 0x00, 0x00}}, + {1, [3]byte{0x42, 0x00, 0x00}}, {1, [3]byte{0x43, 0x00, 0x00}}, + {1, [3]byte{0x44, 0x00, 0x00}}, {1, [3]byte{0x45, 0x00, 0x00}}, + {1, [3]byte{0x46, 0x00, 0x00}}, {1, [3]byte{0x47, 0x00, 0x00}}, + {1, [3]byte{0x48, 0x00, 0x00}}, {1, [3]byte{0x49, 0x00, 0x00}}, + {1, [3]byte{0x4a, 0x00, 0x00}}, {1, [3]byte{0x4b, 0x00, 0x00}}, + {1, [3]byte{0x4c, 0x00, 0x00}}, {1, [3]byte{0x4d, 0x00, 0x00}}, + {1, [3]byte{0x4e, 0x00, 0x00}}, {1, [3]byte{0x4f, 0x00, 0x00}}, + {1, [3]byte{0x50, 0x00, 0x00}}, {1, [3]byte{0x51, 0x00, 0x00}}, + {1, [3]byte{0x52, 0x00, 0x00}}, {1, [3]byte{0x53, 0x00, 0x00}}, + {1, [3]byte{0x54, 0x00, 0x00}}, {1, [3]byte{0x55, 0x00, 0x00}}, + {1, [3]byte{0x56, 0x00, 0x00}}, {1, [3]byte{0x57, 0x00, 0x00}}, + {1, [3]byte{0x58, 0x00, 0x00}}, {1, [3]byte{0x59, 0x00, 0x00}}, + {1, [3]byte{0x5a, 0x00, 0x00}}, {1, [3]byte{0x5b, 0x00, 0x00}}, + {1, [3]byte{0x5c, 0x00, 0x00}}, {1, [3]byte{0x5d, 0x00, 0x00}}, + {1, [3]byte{0x5e, 0x00, 0x00}}, {1, [3]byte{0x5f, 0x00, 0x00}}, + {1, [3]byte{0x60, 0x00, 0x00}}, {1, [3]byte{0x61, 0x00, 0x00}}, + {1, [3]byte{0x62, 0x00, 0x00}}, {1, [3]byte{0x63, 0x00, 0x00}}, + {1, [3]byte{0x64, 0x00, 0x00}}, {1, [3]byte{0x65, 0x00, 0x00}}, + {1, [3]byte{0x66, 0x00, 0x00}}, {1, [3]byte{0x67, 0x00, 0x00}}, + {1, [3]byte{0x68, 0x00, 0x00}}, {1, [3]byte{0x69, 0x00, 0x00}}, + {1, [3]byte{0x6a, 0x00, 0x00}}, {1, [3]byte{0x6b, 0x00, 0x00}}, + {1, [3]byte{0x6c, 0x00, 0x00}}, {1, [3]byte{0x6d, 0x00, 0x00}}, + {1, [3]byte{0x6e, 0x00, 0x00}}, {1, [3]byte{0x6f, 0x00, 0x00}}, + {1, [3]byte{0x70, 0x00, 0x00}}, {1, [3]byte{0x71, 0x00, 0x00}}, + {1, [3]byte{0x72, 0x00, 0x00}}, {1, [3]byte{0x73, 0x00, 0x00}}, + {1, [3]byte{0x74, 0x00, 0x00}}, {1, [3]byte{0x75, 0x00, 0x00}}, + {1, [3]byte{0x76, 0x00, 0x00}}, {1, [3]byte{0x77, 0x00, 0x00}}, + {1, [3]byte{0x78, 0x00, 0x00}}, {1, [3]byte{0x79, 0x00, 0x00}}, + {1, [3]byte{0x7a, 0x00, 0x00}}, {1, [3]byte{0x7b, 0x00, 0x00}}, + {1, [3]byte{0x7c, 0x00, 0x00}}, {1, [3]byte{0x7d, 0x00, 0x00}}, + {1, [3]byte{0x7e, 0x00, 0x00}}, {1, [3]byte{0x7f, 0x00, 0x00}}, + {2, [3]byte{0xc2, 0x80, 0x00}}, {2, [3]byte{0xc2, 0x81, 0x00}}, + {2, [3]byte{0xc2, 0x82, 0x00}}, {2, [3]byte{0xc2, 0x83, 0x00}}, + {2, [3]byte{0xc2, 0x84, 0x00}}, {2, [3]byte{0xc2, 0x85, 0x00}}, + {2, [3]byte{0xc2, 0x86, 0x00}}, {2, [3]byte{0xc2, 0x87, 0x00}}, + {2, [3]byte{0xc2, 0x88, 0x00}}, {2, [3]byte{0xc2, 0x89, 0x00}}, + {2, [3]byte{0xc2, 0x8a, 0x00}}, {2, [3]byte{0xc2, 0x8b, 0x00}}, + {2, [3]byte{0xc2, 0x8c, 0x00}}, {2, [3]byte{0xc2, 0x8d, 0x00}}, + {2, [3]byte{0xc2, 0x8e, 0x00}}, {2, [3]byte{0xc2, 0x8f, 0x00}}, + {2, [3]byte{0xc2, 0x90, 0x00}}, {2, [3]byte{0xc2, 0x91, 0x00}}, + {2, [3]byte{0xc2, 0x92, 0x00}}, {2, [3]byte{0xc2, 0x93, 0x00}}, + {2, [3]byte{0xc2, 0x94, 0x00}}, {2, [3]byte{0xc2, 0x95, 0x00}}, + {2, [3]byte{0xc2, 0x96, 0x00}}, {2, [3]byte{0xc2, 0x97, 0x00}}, + {2, [3]byte{0xc2, 0x98, 0x00}}, {2, [3]byte{0xc2, 0x99, 0x00}}, + {2, [3]byte{0xc2, 0x9a, 0x00}}, {2, [3]byte{0xc2, 0x9b, 0x00}}, + {2, [3]byte{0xc2, 0x9c, 0x00}}, {2, [3]byte{0xc2, 0x9d, 0x00}}, + {2, [3]byte{0xc2, 0x9e, 0x00}}, {2, [3]byte{0xc2, 0x9f, 0x00}}, + {2, [3]byte{0xc2, 0xa0, 0x00}}, {2, [3]byte{0xc2, 0xa1, 0x00}}, + {2, [3]byte{0xc2, 0xa2, 0x00}}, {2, [3]byte{0xc2, 0xa3, 0x00}}, + {2, [3]byte{0xc2, 0xa4, 0x00}}, {2, [3]byte{0xc2, 0xa5, 0x00}}, + {2, [3]byte{0xc2, 0xa6, 0x00}}, {2, [3]byte{0xc2, 0xa7, 0x00}}, + {2, [3]byte{0xc2, 0xa8, 0x00}}, {2, [3]byte{0xc2, 0xa9, 0x00}}, + {2, [3]byte{0xc2, 0xaa, 0x00}}, {2, [3]byte{0xc2, 0xab, 0x00}}, + {2, [3]byte{0xc2, 0xac, 0x00}}, {2, [3]byte{0xc2, 0xad, 0x00}}, + {2, [3]byte{0xc2, 0xae, 0x00}}, {2, [3]byte{0xc2, 0xaf, 0x00}}, + {2, [3]byte{0xc2, 0xb0, 0x00}}, {2, [3]byte{0xc2, 0xb1, 0x00}}, + {2, [3]byte{0xc2, 0xb2, 0x00}}, {2, [3]byte{0xc2, 0xb3, 0x00}}, + {2, [3]byte{0xc2, 0xb4, 0x00}}, {2, [3]byte{0xc2, 0xb5, 0x00}}, + {2, [3]byte{0xc2, 0xb6, 0x00}}, {2, [3]byte{0xc2, 0xb7, 0x00}}, + {2, [3]byte{0xc2, 0xb8, 0x00}}, {2, [3]byte{0xc2, 0xb9, 0x00}}, + {2, [3]byte{0xc2, 0xba, 0x00}}, {2, [3]byte{0xc2, 0xbb, 0x00}}, + {2, [3]byte{0xc2, 0xbc, 0x00}}, {2, [3]byte{0xc2, 0xbd, 0x00}}, + {2, [3]byte{0xc2, 0xbe, 0x00}}, {2, [3]byte{0xc2, 0xbf, 0x00}}, + {2, [3]byte{0xc3, 0x80, 0x00}}, {2, [3]byte{0xc3, 0x81, 0x00}}, + {2, [3]byte{0xc3, 0x82, 0x00}}, {2, [3]byte{0xc3, 0x83, 0x00}}, + {2, [3]byte{0xc3, 0x84, 0x00}}, {2, [3]byte{0xc3, 0x85, 0x00}}, + {2, [3]byte{0xc3, 0x86, 0x00}}, {2, [3]byte{0xc3, 0x87, 0x00}}, + {2, [3]byte{0xc3, 0x88, 0x00}}, {2, [3]byte{0xc3, 0x89, 0x00}}, + {2, [3]byte{0xc3, 0x8a, 0x00}}, {2, [3]byte{0xc3, 0x8b, 0x00}}, + {2, [3]byte{0xc3, 0x8c, 0x00}}, {2, [3]byte{0xc3, 0x8d, 0x00}}, + {2, [3]byte{0xc3, 0x8e, 0x00}}, {2, [3]byte{0xc3, 0x8f, 0x00}}, + {2, [3]byte{0xc4, 0x9e, 0x00}}, {2, [3]byte{0xc3, 0x91, 0x00}}, + {2, [3]byte{0xc3, 0x92, 0x00}}, {2, [3]byte{0xc3, 0x93, 0x00}}, + {2, [3]byte{0xc3, 0x94, 0x00}}, {2, [3]byte{0xc3, 0x95, 0x00}}, + {2, [3]byte{0xc3, 0x96, 0x00}}, {2, [3]byte{0xc3, 0x97, 0x00}}, + {2, [3]byte{0xc3, 0x98, 0x00}}, {2, [3]byte{0xc3, 0x99, 0x00}}, + {2, [3]byte{0xc3, 0x9a, 0x00}}, {2, [3]byte{0xc3, 0x9b, 0x00}}, + {2, [3]byte{0xc3, 0x9c, 0x00}}, {2, [3]byte{0xc4, 0xb0, 0x00}}, + {2, [3]byte{0xc5, 0x9e, 0x00}}, {2, [3]byte{0xc3, 0x9f, 0x00}}, + {2, [3]byte{0xc3, 0xa0, 0x00}}, {2, [3]byte{0xc3, 0xa1, 0x00}}, + {2, [3]byte{0xc3, 0xa2, 0x00}}, {2, [3]byte{0xc3, 0xa3, 0x00}}, + {2, [3]byte{0xc3, 0xa4, 0x00}}, {2, [3]byte{0xc3, 0xa5, 0x00}}, + {2, [3]byte{0xc3, 0xa6, 0x00}}, {2, [3]byte{0xc3, 0xa7, 0x00}}, + {2, [3]byte{0xc3, 0xa8, 0x00}}, {2, [3]byte{0xc3, 0xa9, 0x00}}, + {2, [3]byte{0xc3, 0xaa, 0x00}}, {2, [3]byte{0xc3, 0xab, 0x00}}, + {2, [3]byte{0xc3, 0xac, 0x00}}, {2, [3]byte{0xc3, 0xad, 0x00}}, + {2, [3]byte{0xc3, 0xae, 0x00}}, {2, [3]byte{0xc3, 0xaf, 0x00}}, + {2, [3]byte{0xc4, 0x9f, 0x00}}, {2, [3]byte{0xc3, 0xb1, 0x00}}, + {2, [3]byte{0xc3, 0xb2, 0x00}}, {2, [3]byte{0xc3, 0xb3, 0x00}}, + {2, [3]byte{0xc3, 0xb4, 0x00}}, {2, [3]byte{0xc3, 0xb5, 0x00}}, + {2, [3]byte{0xc3, 0xb6, 0x00}}, {2, [3]byte{0xc3, 0xb7, 0x00}}, + {2, [3]byte{0xc3, 0xb8, 0x00}}, {2, [3]byte{0xc3, 0xb9, 0x00}}, + {2, [3]byte{0xc3, 0xba, 0x00}}, {2, [3]byte{0xc3, 0xbb, 0x00}}, + {2, [3]byte{0xc3, 0xbc, 0x00}}, {2, [3]byte{0xc4, 0xb1, 0x00}}, + {2, [3]byte{0xc5, 0x9f, 0x00}}, {2, [3]byte{0xc3, 0xbf, 0x00}}, + }, + encode: [256]uint32{ + 0x00000000, 0x01000001, 0x02000002, 0x03000003, 0x04000004, 0x05000005, 0x06000006, 0x07000007, + 0x08000008, 0x09000009, 0x0a00000a, 0x0b00000b, 0x0c00000c, 0x0d00000d, 0x0e00000e, 0x0f00000f, + 0x10000010, 0x11000011, 0x12000012, 0x13000013, 0x14000014, 0x15000015, 0x16000016, 0x17000017, + 0x18000018, 0x19000019, 0x1a00001a, 0x1b00001b, 0x1c00001c, 0x1d00001d, 0x1e00001e, 0x1f00001f, + 0x20000020, 0x21000021, 0x22000022, 0x23000023, 0x24000024, 0x25000025, 0x26000026, 0x27000027, + 0x28000028, 0x29000029, 0x2a00002a, 0x2b00002b, 0x2c00002c, 0x2d00002d, 0x2e00002e, 0x2f00002f, + 0x30000030, 0x31000031, 0x32000032, 0x33000033, 0x34000034, 0x35000035, 0x36000036, 0x37000037, + 0x38000038, 0x39000039, 0x3a00003a, 0x3b00003b, 0x3c00003c, 0x3d00003d, 0x3e00003e, 0x3f00003f, + 0x40000040, 0x41000041, 0x42000042, 0x43000043, 0x44000044, 0x45000045, 0x46000046, 0x47000047, + 0x48000048, 0x49000049, 0x4a00004a, 0x4b00004b, 0x4c00004c, 0x4d00004d, 0x4e00004e, 0x4f00004f, + 0x50000050, 0x51000051, 0x52000052, 0x53000053, 0x54000054, 0x55000055, 0x56000056, 0x57000057, + 0x58000058, 0x59000059, 0x5a00005a, 0x5b00005b, 0x5c00005c, 0x5d00005d, 0x5e00005e, 0x5f00005f, + 0x60000060, 0x61000061, 0x62000062, 0x63000063, 0x64000064, 0x65000065, 0x66000066, 0x67000067, + 0x68000068, 0x69000069, 0x6a00006a, 0x6b00006b, 0x6c00006c, 0x6d00006d, 0x6e00006e, 0x6f00006f, + 0x70000070, 0x71000071, 0x72000072, 0x73000073, 0x74000074, 0x75000075, 0x76000076, 0x77000077, + 0x78000078, 0x79000079, 0x7a00007a, 0x7b00007b, 0x7c00007c, 0x7d00007d, 0x7e00007e, 0x7f00007f, + 0x80000080, 0x81000081, 0x82000082, 0x83000083, 0x84000084, 0x85000085, 0x86000086, 0x87000087, + 0x88000088, 0x89000089, 0x8a00008a, 0x8b00008b, 0x8c00008c, 0x8d00008d, 0x8e00008e, 0x8f00008f, + 0x90000090, 0x91000091, 0x92000092, 0x93000093, 0x94000094, 0x95000095, 0x96000096, 0x97000097, + 0x98000098, 0x99000099, 0x9a00009a, 0x9b00009b, 0x9c00009c, 0x9d00009d, 0x9e00009e, 0x9f00009f, + 0xa00000a0, 0xa10000a1, 0xa20000a2, 0xa30000a3, 0xa40000a4, 0xa50000a5, 0xa60000a6, 0xa70000a7, + 0xa80000a8, 0xa90000a9, 0xaa0000aa, 0xab0000ab, 0xac0000ac, 0xad0000ad, 0xae0000ae, 0xaf0000af, + 0xb00000b0, 0xb10000b1, 0xb20000b2, 0xb30000b3, 0xb40000b4, 0xb50000b5, 0xb60000b6, 0xb70000b7, + 0xb80000b8, 0xb90000b9, 0xba0000ba, 0xbb0000bb, 0xbc0000bc, 0xbd0000bd, 0xbe0000be, 0xbf0000bf, + 0xc00000c0, 0xc10000c1, 0xc20000c2, 0xc30000c3, 0xc40000c4, 0xc50000c5, 0xc60000c6, 0xc70000c7, + 0xc80000c8, 0xc90000c9, 0xca0000ca, 0xcb0000cb, 0xcc0000cc, 0xcd0000cd, 0xce0000ce, 0xcf0000cf, + 0xd10000d1, 0xd20000d2, 0xd30000d3, 0xd40000d4, 0xd50000d5, 0xd60000d6, 0xd70000d7, 0xd80000d8, + 0xd90000d9, 0xda0000da, 0xdb0000db, 0xdc0000dc, 0xdf0000df, 0xe00000e0, 0xe10000e1, 0xe20000e2, + 0xe30000e3, 0xe40000e4, 0xe50000e5, 0xe60000e6, 0xe70000e7, 0xe80000e8, 0xe90000e9, 0xea0000ea, + 0xeb0000eb, 0xec0000ec, 0xed0000ed, 0xee0000ee, 0xef0000ef, 0xf10000f1, 0xf20000f2, 0xf30000f3, + 0xf40000f4, 0xf50000f5, 0xf60000f6, 0xf70000f7, 0xf80000f8, 0xf90000f9, 0xfa0000fa, 0xfb0000fb, + 0xfc0000fc, 0xff0000ff, 0xd000011e, 0xf000011f, 0xdd000130, 0xfd000131, 0xde00015e, 0xfe00015f, + }, +} + +// ISO8859_10 is the ISO 8859-10 encoding. +var ISO8859_10 *Charmap = &iso8859_10 + +var iso8859_10 = Charmap{ + name: "ISO 8859-10", + mib: identifier.ISOLatin6, + asciiSuperset: true, + low: 0x80, + replacement: 0x1a, + decode: [256]utf8Enc{ + {1, [3]byte{0x00, 0x00, 0x00}}, {1, [3]byte{0x01, 0x00, 0x00}}, + {1, [3]byte{0x02, 0x00, 0x00}}, {1, [3]byte{0x03, 0x00, 0x00}}, + {1, [3]byte{0x04, 0x00, 0x00}}, {1, [3]byte{0x05, 0x00, 0x00}}, + {1, [3]byte{0x06, 0x00, 0x00}}, {1, [3]byte{0x07, 0x00, 0x00}}, + {1, [3]byte{0x08, 0x00, 0x00}}, {1, [3]byte{0x09, 0x00, 0x00}}, + {1, [3]byte{0x0a, 0x00, 0x00}}, {1, [3]byte{0x0b, 0x00, 0x00}}, + {1, [3]byte{0x0c, 0x00, 0x00}}, {1, [3]byte{0x0d, 0x00, 0x00}}, + {1, [3]byte{0x0e, 0x00, 0x00}}, {1, [3]byte{0x0f, 0x00, 0x00}}, + {1, [3]byte{0x10, 0x00, 0x00}}, {1, [3]byte{0x11, 0x00, 0x00}}, + {1, [3]byte{0x12, 0x00, 0x00}}, {1, [3]byte{0x13, 0x00, 0x00}}, + {1, [3]byte{0x14, 0x00, 0x00}}, {1, [3]byte{0x15, 0x00, 0x00}}, + {1, [3]byte{0x16, 0x00, 0x00}}, {1, [3]byte{0x17, 0x00, 0x00}}, + {1, [3]byte{0x18, 0x00, 0x00}}, {1, [3]byte{0x19, 0x00, 0x00}}, + {1, [3]byte{0x1a, 0x00, 0x00}}, {1, [3]byte{0x1b, 0x00, 0x00}}, + {1, [3]byte{0x1c, 0x00, 0x00}}, {1, [3]byte{0x1d, 0x00, 0x00}}, + {1, [3]byte{0x1e, 0x00, 0x00}}, {1, [3]byte{0x1f, 0x00, 0x00}}, + {1, [3]byte{0x20, 0x00, 0x00}}, {1, [3]byte{0x21, 0x00, 0x00}}, + {1, [3]byte{0x22, 0x00, 0x00}}, {1, [3]byte{0x23, 0x00, 0x00}}, + {1, [3]byte{0x24, 0x00, 0x00}}, {1, [3]byte{0x25, 0x00, 0x00}}, + {1, [3]byte{0x26, 0x00, 0x00}}, {1, [3]byte{0x27, 0x00, 0x00}}, + {1, [3]byte{0x28, 0x00, 0x00}}, {1, [3]byte{0x29, 0x00, 0x00}}, + {1, [3]byte{0x2a, 0x00, 0x00}}, {1, [3]byte{0x2b, 0x00, 0x00}}, + {1, [3]byte{0x2c, 0x00, 0x00}}, {1, [3]byte{0x2d, 0x00, 0x00}}, + {1, [3]byte{0x2e, 0x00, 0x00}}, {1, [3]byte{0x2f, 0x00, 0x00}}, + {1, [3]byte{0x30, 0x00, 0x00}}, {1, [3]byte{0x31, 0x00, 0x00}}, + {1, [3]byte{0x32, 0x00, 0x00}}, {1, [3]byte{0x33, 0x00, 0x00}}, + {1, [3]byte{0x34, 0x00, 0x00}}, {1, [3]byte{0x35, 0x00, 0x00}}, + {1, [3]byte{0x36, 0x00, 0x00}}, {1, [3]byte{0x37, 0x00, 0x00}}, + {1, [3]byte{0x38, 0x00, 0x00}}, {1, [3]byte{0x39, 0x00, 0x00}}, + {1, [3]byte{0x3a, 0x00, 0x00}}, {1, [3]byte{0x3b, 0x00, 0x00}}, + {1, [3]byte{0x3c, 0x00, 0x00}}, {1, [3]byte{0x3d, 0x00, 0x00}}, + {1, [3]byte{0x3e, 0x00, 0x00}}, {1, [3]byte{0x3f, 0x00, 0x00}}, + {1, [3]byte{0x40, 0x00, 0x00}}, {1, [3]byte{0x41, 0x00, 0x00}}, + {1, [3]byte{0x42, 0x00, 0x00}}, {1, [3]byte{0x43, 0x00, 0x00}}, + {1, [3]byte{0x44, 0x00, 0x00}}, {1, [3]byte{0x45, 0x00, 0x00}}, + {1, [3]byte{0x46, 0x00, 0x00}}, {1, [3]byte{0x47, 0x00, 0x00}}, + {1, [3]byte{0x48, 0x00, 0x00}}, {1, [3]byte{0x49, 0x00, 0x00}}, + {1, [3]byte{0x4a, 0x00, 0x00}}, {1, [3]byte{0x4b, 0x00, 0x00}}, + {1, [3]byte{0x4c, 0x00, 0x00}}, {1, [3]byte{0x4d, 0x00, 0x00}}, + {1, [3]byte{0x4e, 0x00, 0x00}}, {1, [3]byte{0x4f, 0x00, 0x00}}, + {1, [3]byte{0x50, 0x00, 0x00}}, {1, [3]byte{0x51, 0x00, 0x00}}, + {1, [3]byte{0x52, 0x00, 0x00}}, {1, [3]byte{0x53, 0x00, 0x00}}, + {1, [3]byte{0x54, 0x00, 0x00}}, {1, [3]byte{0x55, 0x00, 0x00}}, + {1, [3]byte{0x56, 0x00, 0x00}}, {1, [3]byte{0x57, 0x00, 0x00}}, + {1, [3]byte{0x58, 0x00, 0x00}}, {1, [3]byte{0x59, 0x00, 0x00}}, + {1, [3]byte{0x5a, 0x00, 0x00}}, {1, [3]byte{0x5b, 0x00, 0x00}}, + {1, [3]byte{0x5c, 0x00, 0x00}}, {1, [3]byte{0x5d, 0x00, 0x00}}, + {1, [3]byte{0x5e, 0x00, 0x00}}, {1, [3]byte{0x5f, 0x00, 0x00}}, + {1, [3]byte{0x60, 0x00, 0x00}}, {1, [3]byte{0x61, 0x00, 0x00}}, + {1, [3]byte{0x62, 0x00, 0x00}}, {1, [3]byte{0x63, 0x00, 0x00}}, + {1, [3]byte{0x64, 0x00, 0x00}}, {1, [3]byte{0x65, 0x00, 0x00}}, + {1, [3]byte{0x66, 0x00, 0x00}}, {1, [3]byte{0x67, 0x00, 0x00}}, + {1, [3]byte{0x68, 0x00, 0x00}}, {1, [3]byte{0x69, 0x00, 0x00}}, + {1, [3]byte{0x6a, 0x00, 0x00}}, {1, [3]byte{0x6b, 0x00, 0x00}}, + {1, [3]byte{0x6c, 0x00, 0x00}}, {1, [3]byte{0x6d, 0x00, 0x00}}, + {1, [3]byte{0x6e, 0x00, 0x00}}, {1, [3]byte{0x6f, 0x00, 0x00}}, + {1, [3]byte{0x70, 0x00, 0x00}}, {1, [3]byte{0x71, 0x00, 0x00}}, + {1, [3]byte{0x72, 0x00, 0x00}}, {1, [3]byte{0x73, 0x00, 0x00}}, + {1, [3]byte{0x74, 0x00, 0x00}}, {1, [3]byte{0x75, 0x00, 0x00}}, + {1, [3]byte{0x76, 0x00, 0x00}}, {1, [3]byte{0x77, 0x00, 0x00}}, + {1, [3]byte{0x78, 0x00, 0x00}}, {1, [3]byte{0x79, 0x00, 0x00}}, + {1, [3]byte{0x7a, 0x00, 0x00}}, {1, [3]byte{0x7b, 0x00, 0x00}}, + {1, [3]byte{0x7c, 0x00, 0x00}}, {1, [3]byte{0x7d, 0x00, 0x00}}, + {1, [3]byte{0x7e, 0x00, 0x00}}, {1, [3]byte{0x7f, 0x00, 0x00}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {3, [3]byte{0xef, 0xbf, 0xbd}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {3, [3]byte{0xef, 0xbf, 0xbd}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {3, [3]byte{0xef, 0xbf, 0xbd}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {3, [3]byte{0xef, 0xbf, 0xbd}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {3, [3]byte{0xef, 0xbf, 0xbd}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {3, [3]byte{0xef, 0xbf, 0xbd}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {3, [3]byte{0xef, 0xbf, 0xbd}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {3, [3]byte{0xef, 0xbf, 0xbd}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {3, [3]byte{0xef, 0xbf, 0xbd}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {3, [3]byte{0xef, 0xbf, 0xbd}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {3, [3]byte{0xef, 0xbf, 0xbd}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {3, [3]byte{0xef, 0xbf, 0xbd}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {3, [3]byte{0xef, 0xbf, 0xbd}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {3, [3]byte{0xef, 0xbf, 0xbd}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {3, [3]byte{0xef, 0xbf, 0xbd}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {3, [3]byte{0xef, 0xbf, 0xbd}}, + {2, [3]byte{0xc2, 0xa0, 0x00}}, {2, [3]byte{0xc4, 0x84, 0x00}}, + {2, [3]byte{0xc4, 0x92, 0x00}}, {2, [3]byte{0xc4, 0xa2, 0x00}}, + {2, [3]byte{0xc4, 0xaa, 0x00}}, {2, [3]byte{0xc4, 0xa8, 0x00}}, + {2, [3]byte{0xc4, 0xb6, 0x00}}, {2, [3]byte{0xc2, 0xa7, 0x00}}, + {2, [3]byte{0xc4, 0xbb, 0x00}}, {2, [3]byte{0xc4, 0x90, 0x00}}, + {2, [3]byte{0xc5, 0xa0, 0x00}}, {2, [3]byte{0xc5, 0xa6, 0x00}}, + {2, [3]byte{0xc5, 0xbd, 0x00}}, {2, [3]byte{0xc2, 0xad, 0x00}}, + {2, [3]byte{0xc5, 0xaa, 0x00}}, {2, [3]byte{0xc5, 0x8a, 0x00}}, + {2, [3]byte{0xc2, 0xb0, 0x00}}, {2, [3]byte{0xc4, 0x85, 0x00}}, + {2, [3]byte{0xc4, 0x93, 0x00}}, {2, [3]byte{0xc4, 0xa3, 0x00}}, + {2, [3]byte{0xc4, 0xab, 0x00}}, {2, [3]byte{0xc4, 0xa9, 0x00}}, + {2, [3]byte{0xc4, 0xb7, 0x00}}, {2, [3]byte{0xc2, 0xb7, 0x00}}, + {2, [3]byte{0xc4, 0xbc, 0x00}}, {2, [3]byte{0xc4, 0x91, 0x00}}, + {2, [3]byte{0xc5, 0xa1, 0x00}}, {2, [3]byte{0xc5, 0xa7, 0x00}}, + {2, [3]byte{0xc5, 0xbe, 0x00}}, {3, [3]byte{0xe2, 0x80, 0x95}}, + {2, [3]byte{0xc5, 0xab, 0x00}}, {2, [3]byte{0xc5, 0x8b, 0x00}}, + {2, [3]byte{0xc4, 0x80, 0x00}}, {2, [3]byte{0xc3, 0x81, 0x00}}, + {2, [3]byte{0xc3, 0x82, 0x00}}, {2, [3]byte{0xc3, 0x83, 0x00}}, + {2, [3]byte{0xc3, 0x84, 0x00}}, {2, [3]byte{0xc3, 0x85, 0x00}}, + {2, [3]byte{0xc3, 0x86, 0x00}}, {2, [3]byte{0xc4, 0xae, 0x00}}, + {2, [3]byte{0xc4, 0x8c, 0x00}}, {2, [3]byte{0xc3, 0x89, 0x00}}, + {2, [3]byte{0xc4, 0x98, 0x00}}, {2, [3]byte{0xc3, 0x8b, 0x00}}, + {2, [3]byte{0xc4, 0x96, 0x00}}, {2, [3]byte{0xc3, 0x8d, 0x00}}, + {2, [3]byte{0xc3, 0x8e, 0x00}}, {2, [3]byte{0xc3, 0x8f, 0x00}}, + {2, [3]byte{0xc3, 0x90, 0x00}}, {2, [3]byte{0xc5, 0x85, 0x00}}, + {2, [3]byte{0xc5, 0x8c, 0x00}}, {2, [3]byte{0xc3, 0x93, 0x00}}, + {2, [3]byte{0xc3, 0x94, 0x00}}, {2, [3]byte{0xc3, 0x95, 0x00}}, + {2, [3]byte{0xc3, 0x96, 0x00}}, {2, [3]byte{0xc5, 0xa8, 0x00}}, + {2, [3]byte{0xc3, 0x98, 0x00}}, {2, [3]byte{0xc5, 0xb2, 0x00}}, + {2, [3]byte{0xc3, 0x9a, 0x00}}, {2, [3]byte{0xc3, 0x9b, 0x00}}, + {2, [3]byte{0xc3, 0x9c, 0x00}}, {2, [3]byte{0xc3, 0x9d, 0x00}}, + {2, [3]byte{0xc3, 0x9e, 0x00}}, {2, [3]byte{0xc3, 0x9f, 0x00}}, + {2, [3]byte{0xc4, 0x81, 0x00}}, {2, [3]byte{0xc3, 0xa1, 0x00}}, + {2, [3]byte{0xc3, 0xa2, 0x00}}, {2, [3]byte{0xc3, 0xa3, 0x00}}, + {2, [3]byte{0xc3, 0xa4, 0x00}}, {2, [3]byte{0xc3, 0xa5, 0x00}}, + {2, [3]byte{0xc3, 0xa6, 0x00}}, {2, [3]byte{0xc4, 0xaf, 0x00}}, + {2, [3]byte{0xc4, 0x8d, 0x00}}, {2, [3]byte{0xc3, 0xa9, 0x00}}, + {2, [3]byte{0xc4, 0x99, 0x00}}, {2, [3]byte{0xc3, 0xab, 0x00}}, + {2, [3]byte{0xc4, 0x97, 0x00}}, {2, [3]byte{0xc3, 0xad, 0x00}}, + {2, [3]byte{0xc3, 0xae, 0x00}}, {2, [3]byte{0xc3, 0xaf, 0x00}}, + {2, [3]byte{0xc3, 0xb0, 0x00}}, {2, [3]byte{0xc5, 0x86, 0x00}}, + {2, [3]byte{0xc5, 0x8d, 0x00}}, {2, [3]byte{0xc3, 0xb3, 0x00}}, + {2, [3]byte{0xc3, 0xb4, 0x00}}, {2, [3]byte{0xc3, 0xb5, 0x00}}, + {2, [3]byte{0xc3, 0xb6, 0x00}}, {2, [3]byte{0xc5, 0xa9, 0x00}}, + {2, [3]byte{0xc3, 0xb8, 0x00}}, {2, [3]byte{0xc5, 0xb3, 0x00}}, + {2, [3]byte{0xc3, 0xba, 0x00}}, {2, [3]byte{0xc3, 0xbb, 0x00}}, + {2, [3]byte{0xc3, 0xbc, 0x00}}, {2, [3]byte{0xc3, 0xbd, 0x00}}, + {2, [3]byte{0xc3, 0xbe, 0x00}}, {2, [3]byte{0xc4, 0xb8, 0x00}}, + }, + encode: [256]uint32{ + 0x00000000, 0x01000001, 0x02000002, 0x03000003, 0x04000004, 0x05000005, 0x06000006, 0x07000007, + 0x08000008, 0x09000009, 0x0a00000a, 0x0b00000b, 0x0c00000c, 0x0d00000d, 0x0e00000e, 0x0f00000f, + 0x10000010, 0x11000011, 0x12000012, 0x13000013, 0x14000014, 0x15000015, 0x16000016, 0x17000017, + 0x18000018, 0x19000019, 0x1a00001a, 0x1b00001b, 0x1c00001c, 0x1d00001d, 0x1e00001e, 0x1f00001f, + 0x20000020, 0x21000021, 0x22000022, 0x23000023, 0x24000024, 0x25000025, 0x26000026, 0x27000027, + 0x28000028, 0x29000029, 0x2a00002a, 0x2b00002b, 0x2c00002c, 0x2d00002d, 0x2e00002e, 0x2f00002f, + 0x30000030, 0x31000031, 0x32000032, 0x33000033, 0x34000034, 0x35000035, 0x36000036, 0x37000037, + 0x38000038, 0x39000039, 0x3a00003a, 0x3b00003b, 0x3c00003c, 0x3d00003d, 0x3e00003e, 0x3f00003f, + 0x40000040, 0x41000041, 0x42000042, 0x43000043, 0x44000044, 0x45000045, 0x46000046, 0x47000047, + 0x48000048, 0x49000049, 0x4a00004a, 0x4b00004b, 0x4c00004c, 0x4d00004d, 0x4e00004e, 0x4f00004f, + 0x50000050, 0x51000051, 0x52000052, 0x53000053, 0x54000054, 0x55000055, 0x56000056, 0x57000057, + 0x58000058, 0x59000059, 0x5a00005a, 0x5b00005b, 0x5c00005c, 0x5d00005d, 0x5e00005e, 0x5f00005f, + 0x60000060, 0x61000061, 0x62000062, 0x63000063, 0x64000064, 0x65000065, 0x66000066, 0x67000067, + 0x68000068, 0x69000069, 0x6a00006a, 0x6b00006b, 0x6c00006c, 0x6d00006d, 0x6e00006e, 0x6f00006f, + 0x70000070, 0x71000071, 0x72000072, 0x73000073, 0x74000074, 0x75000075, 0x76000076, 0x77000077, + 0x78000078, 0x79000079, 0x7a00007a, 0x7b00007b, 0x7c00007c, 0x7d00007d, 0x7e00007e, 0x7f00007f, + 0xa00000a0, 0xa70000a7, 0xad0000ad, 0xb00000b0, 0xb70000b7, 0xc10000c1, 0xc20000c2, 0xc30000c3, + 0xc40000c4, 0xc50000c5, 0xc60000c6, 0xc90000c9, 0xcb0000cb, 0xcd0000cd, 0xce0000ce, 0xcf0000cf, + 0xd00000d0, 0xd30000d3, 0xd40000d4, 0xd50000d5, 0xd60000d6, 0xd80000d8, 0xda0000da, 0xdb0000db, + 0xdc0000dc, 0xdd0000dd, 0xde0000de, 0xdf0000df, 0xe10000e1, 0xe20000e2, 0xe30000e3, 0xe40000e4, + 0xe50000e5, 0xe60000e6, 0xe90000e9, 0xeb0000eb, 0xed0000ed, 0xee0000ee, 0xef0000ef, 0xf00000f0, + 0xf30000f3, 0xf40000f4, 0xf50000f5, 0xf60000f6, 0xf80000f8, 0xfa0000fa, 0xfb0000fb, 0xfc0000fc, + 0xfd0000fd, 0xfe0000fe, 0xc0000100, 0xe0000101, 0xa1000104, 0xb1000105, 0xc800010c, 0xe800010d, + 0xa9000110, 0xb9000111, 0xa2000112, 0xb2000113, 0xcc000116, 0xec000117, 0xca000118, 0xea000119, + 0xa3000122, 0xb3000123, 0xa5000128, 0xb5000129, 0xa400012a, 0xb400012b, 0xc700012e, 0xe700012f, + 0xa6000136, 0xb6000137, 0xff000138, 0xa800013b, 0xb800013c, 0xd1000145, 0xf1000146, 0xaf00014a, + 0xbf00014b, 0xd200014c, 0xf200014d, 0xaa000160, 0xba000161, 0xab000166, 0xbb000167, 0xd7000168, + 0xf7000169, 0xae00016a, 0xbe00016b, 0xd9000172, 0xf9000173, 0xac00017d, 0xbc00017e, 0xbd002015, + 0xbd002015, 0xbd002015, 0xbd002015, 0xbd002015, 0xbd002015, 0xbd002015, 0xbd002015, 0xbd002015, + 0xbd002015, 0xbd002015, 0xbd002015, 0xbd002015, 0xbd002015, 0xbd002015, 0xbd002015, 0xbd002015, + 0xbd002015, 0xbd002015, 0xbd002015, 0xbd002015, 0xbd002015, 0xbd002015, 0xbd002015, 0xbd002015, + 0xbd002015, 0xbd002015, 0xbd002015, 0xbd002015, 0xbd002015, 0xbd002015, 0xbd002015, 0xbd002015, + }, +} + +// ISO8859_13 is the ISO 8859-13 encoding. +var ISO8859_13 *Charmap = &iso8859_13 + +var iso8859_13 = Charmap{ + name: "ISO 8859-13", + mib: identifier.ISO885913, + asciiSuperset: true, + low: 0x80, + replacement: 0x1a, + decode: [256]utf8Enc{ + {1, [3]byte{0x00, 0x00, 0x00}}, {1, [3]byte{0x01, 0x00, 0x00}}, + {1, [3]byte{0x02, 0x00, 0x00}}, {1, [3]byte{0x03, 0x00, 0x00}}, + {1, [3]byte{0x04, 0x00, 0x00}}, {1, [3]byte{0x05, 0x00, 0x00}}, + {1, [3]byte{0x06, 0x00, 0x00}}, {1, [3]byte{0x07, 0x00, 0x00}}, + {1, [3]byte{0x08, 0x00, 0x00}}, {1, [3]byte{0x09, 0x00, 0x00}}, + {1, [3]byte{0x0a, 0x00, 0x00}}, {1, [3]byte{0x0b, 0x00, 0x00}}, + {1, [3]byte{0x0c, 0x00, 0x00}}, {1, [3]byte{0x0d, 0x00, 0x00}}, + {1, [3]byte{0x0e, 0x00, 0x00}}, {1, [3]byte{0x0f, 0x00, 0x00}}, + {1, [3]byte{0x10, 0x00, 0x00}}, {1, [3]byte{0x11, 0x00, 0x00}}, + {1, [3]byte{0x12, 0x00, 0x00}}, {1, [3]byte{0x13, 0x00, 0x00}}, + {1, [3]byte{0x14, 0x00, 0x00}}, {1, [3]byte{0x15, 0x00, 0x00}}, + {1, [3]byte{0x16, 0x00, 0x00}}, {1, [3]byte{0x17, 0x00, 0x00}}, + {1, [3]byte{0x18, 0x00, 0x00}}, {1, [3]byte{0x19, 0x00, 0x00}}, + {1, [3]byte{0x1a, 0x00, 0x00}}, {1, [3]byte{0x1b, 0x00, 0x00}}, + {1, [3]byte{0x1c, 0x00, 0x00}}, {1, [3]byte{0x1d, 0x00, 0x00}}, + {1, [3]byte{0x1e, 0x00, 0x00}}, {1, [3]byte{0x1f, 0x00, 0x00}}, + {1, [3]byte{0x20, 0x00, 0x00}}, {1, [3]byte{0x21, 0x00, 0x00}}, + {1, [3]byte{0x22, 0x00, 0x00}}, {1, [3]byte{0x23, 0x00, 0x00}}, + {1, [3]byte{0x24, 0x00, 0x00}}, {1, [3]byte{0x25, 0x00, 0x00}}, + {1, [3]byte{0x26, 0x00, 0x00}}, {1, [3]byte{0x27, 0x00, 0x00}}, + {1, [3]byte{0x28, 0x00, 0x00}}, {1, [3]byte{0x29, 0x00, 0x00}}, + {1, [3]byte{0x2a, 0x00, 0x00}}, {1, [3]byte{0x2b, 0x00, 0x00}}, + {1, [3]byte{0x2c, 0x00, 0x00}}, {1, [3]byte{0x2d, 0x00, 0x00}}, + {1, [3]byte{0x2e, 0x00, 0x00}}, {1, [3]byte{0x2f, 0x00, 0x00}}, + {1, [3]byte{0x30, 0x00, 0x00}}, {1, [3]byte{0x31, 0x00, 0x00}}, + {1, [3]byte{0x32, 0x00, 0x00}}, {1, [3]byte{0x33, 0x00, 0x00}}, + {1, [3]byte{0x34, 0x00, 0x00}}, {1, [3]byte{0x35, 0x00, 0x00}}, + {1, [3]byte{0x36, 0x00, 0x00}}, {1, [3]byte{0x37, 0x00, 0x00}}, + {1, [3]byte{0x38, 0x00, 0x00}}, {1, [3]byte{0x39, 0x00, 0x00}}, + {1, [3]byte{0x3a, 0x00, 0x00}}, {1, [3]byte{0x3b, 0x00, 0x00}}, + {1, [3]byte{0x3c, 0x00, 0x00}}, {1, [3]byte{0x3d, 0x00, 0x00}}, + {1, [3]byte{0x3e, 0x00, 0x00}}, {1, [3]byte{0x3f, 0x00, 0x00}}, + {1, [3]byte{0x40, 0x00, 0x00}}, {1, [3]byte{0x41, 0x00, 0x00}}, + {1, [3]byte{0x42, 0x00, 0x00}}, {1, [3]byte{0x43, 0x00, 0x00}}, + {1, [3]byte{0x44, 0x00, 0x00}}, {1, [3]byte{0x45, 0x00, 0x00}}, + {1, [3]byte{0x46, 0x00, 0x00}}, {1, [3]byte{0x47, 0x00, 0x00}}, + {1, [3]byte{0x48, 0x00, 0x00}}, {1, [3]byte{0x49, 0x00, 0x00}}, + {1, [3]byte{0x4a, 0x00, 0x00}}, {1, [3]byte{0x4b, 0x00, 0x00}}, + {1, [3]byte{0x4c, 0x00, 0x00}}, {1, [3]byte{0x4d, 0x00, 0x00}}, + {1, [3]byte{0x4e, 0x00, 0x00}}, {1, [3]byte{0x4f, 0x00, 0x00}}, + {1, [3]byte{0x50, 0x00, 0x00}}, {1, [3]byte{0x51, 0x00, 0x00}}, + {1, [3]byte{0x52, 0x00, 0x00}}, {1, [3]byte{0x53, 0x00, 0x00}}, + {1, [3]byte{0x54, 0x00, 0x00}}, {1, [3]byte{0x55, 0x00, 0x00}}, + {1, [3]byte{0x56, 0x00, 0x00}}, {1, [3]byte{0x57, 0x00, 0x00}}, + {1, [3]byte{0x58, 0x00, 0x00}}, {1, [3]byte{0x59, 0x00, 0x00}}, + {1, [3]byte{0x5a, 0x00, 0x00}}, {1, [3]byte{0x5b, 0x00, 0x00}}, + {1, [3]byte{0x5c, 0x00, 0x00}}, {1, [3]byte{0x5d, 0x00, 0x00}}, + {1, [3]byte{0x5e, 0x00, 0x00}}, {1, [3]byte{0x5f, 0x00, 0x00}}, + {1, [3]byte{0x60, 0x00, 0x00}}, {1, [3]byte{0x61, 0x00, 0x00}}, + {1, [3]byte{0x62, 0x00, 0x00}}, {1, [3]byte{0x63, 0x00, 0x00}}, + {1, [3]byte{0x64, 0x00, 0x00}}, {1, [3]byte{0x65, 0x00, 0x00}}, + {1, [3]byte{0x66, 0x00, 0x00}}, {1, [3]byte{0x67, 0x00, 0x00}}, + {1, [3]byte{0x68, 0x00, 0x00}}, {1, [3]byte{0x69, 0x00, 0x00}}, + {1, [3]byte{0x6a, 0x00, 0x00}}, {1, [3]byte{0x6b, 0x00, 0x00}}, + {1, [3]byte{0x6c, 0x00, 0x00}}, {1, [3]byte{0x6d, 0x00, 0x00}}, + {1, [3]byte{0x6e, 0x00, 0x00}}, {1, [3]byte{0x6f, 0x00, 0x00}}, + {1, [3]byte{0x70, 0x00, 0x00}}, {1, [3]byte{0x71, 0x00, 0x00}}, + {1, [3]byte{0x72, 0x00, 0x00}}, {1, [3]byte{0x73, 0x00, 0x00}}, + {1, [3]byte{0x74, 0x00, 0x00}}, {1, [3]byte{0x75, 0x00, 0x00}}, + {1, [3]byte{0x76, 0x00, 0x00}}, {1, [3]byte{0x77, 0x00, 0x00}}, + {1, [3]byte{0x78, 0x00, 0x00}}, {1, [3]byte{0x79, 0x00, 0x00}}, + {1, [3]byte{0x7a, 0x00, 0x00}}, {1, [3]byte{0x7b, 0x00, 0x00}}, + {1, [3]byte{0x7c, 0x00, 0x00}}, {1, [3]byte{0x7d, 0x00, 0x00}}, + {1, [3]byte{0x7e, 0x00, 0x00}}, {1, [3]byte{0x7f, 0x00, 0x00}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {3, [3]byte{0xef, 0xbf, 0xbd}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {3, [3]byte{0xef, 0xbf, 0xbd}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {3, [3]byte{0xef, 0xbf, 0xbd}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {3, [3]byte{0xef, 0xbf, 0xbd}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {3, [3]byte{0xef, 0xbf, 0xbd}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {3, [3]byte{0xef, 0xbf, 0xbd}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {3, [3]byte{0xef, 0xbf, 0xbd}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {3, [3]byte{0xef, 0xbf, 0xbd}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {3, [3]byte{0xef, 0xbf, 0xbd}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {3, [3]byte{0xef, 0xbf, 0xbd}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {3, [3]byte{0xef, 0xbf, 0xbd}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {3, [3]byte{0xef, 0xbf, 0xbd}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {3, [3]byte{0xef, 0xbf, 0xbd}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {3, [3]byte{0xef, 0xbf, 0xbd}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {3, [3]byte{0xef, 0xbf, 0xbd}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {3, [3]byte{0xef, 0xbf, 0xbd}}, + {2, [3]byte{0xc2, 0xa0, 0x00}}, {3, [3]byte{0xe2, 0x80, 0x9d}}, + {2, [3]byte{0xc2, 0xa2, 0x00}}, {2, [3]byte{0xc2, 0xa3, 0x00}}, + {2, [3]byte{0xc2, 0xa4, 0x00}}, {3, [3]byte{0xe2, 0x80, 0x9e}}, + {2, [3]byte{0xc2, 0xa6, 0x00}}, {2, [3]byte{0xc2, 0xa7, 0x00}}, + {2, [3]byte{0xc3, 0x98, 0x00}}, {2, [3]byte{0xc2, 0xa9, 0x00}}, + {2, [3]byte{0xc5, 0x96, 0x00}}, {2, [3]byte{0xc2, 0xab, 0x00}}, + {2, [3]byte{0xc2, 0xac, 0x00}}, {2, [3]byte{0xc2, 0xad, 0x00}}, + {2, [3]byte{0xc2, 0xae, 0x00}}, {2, [3]byte{0xc3, 0x86, 0x00}}, + {2, [3]byte{0xc2, 0xb0, 0x00}}, {2, [3]byte{0xc2, 0xb1, 0x00}}, + {2, [3]byte{0xc2, 0xb2, 0x00}}, {2, [3]byte{0xc2, 0xb3, 0x00}}, + {3, [3]byte{0xe2, 0x80, 0x9c}}, {2, [3]byte{0xc2, 0xb5, 0x00}}, + {2, [3]byte{0xc2, 0xb6, 0x00}}, {2, [3]byte{0xc2, 0xb7, 0x00}}, + {2, [3]byte{0xc3, 0xb8, 0x00}}, {2, [3]byte{0xc2, 0xb9, 0x00}}, + {2, [3]byte{0xc5, 0x97, 0x00}}, {2, [3]byte{0xc2, 0xbb, 0x00}}, + {2, [3]byte{0xc2, 0xbc, 0x00}}, {2, [3]byte{0xc2, 0xbd, 0x00}}, + {2, [3]byte{0xc2, 0xbe, 0x00}}, {2, [3]byte{0xc3, 0xa6, 0x00}}, + {2, [3]byte{0xc4, 0x84, 0x00}}, {2, [3]byte{0xc4, 0xae, 0x00}}, + {2, [3]byte{0xc4, 0x80, 0x00}}, {2, [3]byte{0xc4, 0x86, 0x00}}, + {2, [3]byte{0xc3, 0x84, 0x00}}, {2, [3]byte{0xc3, 0x85, 0x00}}, + {2, [3]byte{0xc4, 0x98, 0x00}}, {2, [3]byte{0xc4, 0x92, 0x00}}, + {2, [3]byte{0xc4, 0x8c, 0x00}}, {2, [3]byte{0xc3, 0x89, 0x00}}, + {2, [3]byte{0xc5, 0xb9, 0x00}}, {2, [3]byte{0xc4, 0x96, 0x00}}, + {2, [3]byte{0xc4, 0xa2, 0x00}}, {2, [3]byte{0xc4, 0xb6, 0x00}}, + {2, [3]byte{0xc4, 0xaa, 0x00}}, {2, [3]byte{0xc4, 0xbb, 0x00}}, + {2, [3]byte{0xc5, 0xa0, 0x00}}, {2, [3]byte{0xc5, 0x83, 0x00}}, + {2, [3]byte{0xc5, 0x85, 0x00}}, {2, [3]byte{0xc3, 0x93, 0x00}}, + {2, [3]byte{0xc5, 0x8c, 0x00}}, {2, [3]byte{0xc3, 0x95, 0x00}}, + {2, [3]byte{0xc3, 0x96, 0x00}}, {2, [3]byte{0xc3, 0x97, 0x00}}, + {2, [3]byte{0xc5, 0xb2, 0x00}}, {2, [3]byte{0xc5, 0x81, 0x00}}, + {2, [3]byte{0xc5, 0x9a, 0x00}}, {2, [3]byte{0xc5, 0xaa, 0x00}}, + {2, [3]byte{0xc3, 0x9c, 0x00}}, {2, [3]byte{0xc5, 0xbb, 0x00}}, + {2, [3]byte{0xc5, 0xbd, 0x00}}, {2, [3]byte{0xc3, 0x9f, 0x00}}, + {2, [3]byte{0xc4, 0x85, 0x00}}, {2, [3]byte{0xc4, 0xaf, 0x00}}, + {2, [3]byte{0xc4, 0x81, 0x00}}, {2, [3]byte{0xc4, 0x87, 0x00}}, + {2, [3]byte{0xc3, 0xa4, 0x00}}, {2, [3]byte{0xc3, 0xa5, 0x00}}, + {2, [3]byte{0xc4, 0x99, 0x00}}, {2, [3]byte{0xc4, 0x93, 0x00}}, + {2, [3]byte{0xc4, 0x8d, 0x00}}, {2, [3]byte{0xc3, 0xa9, 0x00}}, + {2, [3]byte{0xc5, 0xba, 0x00}}, {2, [3]byte{0xc4, 0x97, 0x00}}, + {2, [3]byte{0xc4, 0xa3, 0x00}}, {2, [3]byte{0xc4, 0xb7, 0x00}}, + {2, [3]byte{0xc4, 0xab, 0x00}}, {2, [3]byte{0xc4, 0xbc, 0x00}}, + {2, [3]byte{0xc5, 0xa1, 0x00}}, {2, [3]byte{0xc5, 0x84, 0x00}}, + {2, [3]byte{0xc5, 0x86, 0x00}}, {2, [3]byte{0xc3, 0xb3, 0x00}}, + {2, [3]byte{0xc5, 0x8d, 0x00}}, {2, [3]byte{0xc3, 0xb5, 0x00}}, + {2, [3]byte{0xc3, 0xb6, 0x00}}, {2, [3]byte{0xc3, 0xb7, 0x00}}, + {2, [3]byte{0xc5, 0xb3, 0x00}}, {2, [3]byte{0xc5, 0x82, 0x00}}, + {2, [3]byte{0xc5, 0x9b, 0x00}}, {2, [3]byte{0xc5, 0xab, 0x00}}, + {2, [3]byte{0xc3, 0xbc, 0x00}}, {2, [3]byte{0xc5, 0xbc, 0x00}}, + {2, [3]byte{0xc5, 0xbe, 0x00}}, {3, [3]byte{0xe2, 0x80, 0x99}}, + }, + encode: [256]uint32{ + 0x00000000, 0x01000001, 0x02000002, 0x03000003, 0x04000004, 0x05000005, 0x06000006, 0x07000007, + 0x08000008, 0x09000009, 0x0a00000a, 0x0b00000b, 0x0c00000c, 0x0d00000d, 0x0e00000e, 0x0f00000f, + 0x10000010, 0x11000011, 0x12000012, 0x13000013, 0x14000014, 0x15000015, 0x16000016, 0x17000017, + 0x18000018, 0x19000019, 0x1a00001a, 0x1b00001b, 0x1c00001c, 0x1d00001d, 0x1e00001e, 0x1f00001f, + 0x20000020, 0x21000021, 0x22000022, 0x23000023, 0x24000024, 0x25000025, 0x26000026, 0x27000027, + 0x28000028, 0x29000029, 0x2a00002a, 0x2b00002b, 0x2c00002c, 0x2d00002d, 0x2e00002e, 0x2f00002f, + 0x30000030, 0x31000031, 0x32000032, 0x33000033, 0x34000034, 0x35000035, 0x36000036, 0x37000037, + 0x38000038, 0x39000039, 0x3a00003a, 0x3b00003b, 0x3c00003c, 0x3d00003d, 0x3e00003e, 0x3f00003f, + 0x40000040, 0x41000041, 0x42000042, 0x43000043, 0x44000044, 0x45000045, 0x46000046, 0x47000047, + 0x48000048, 0x49000049, 0x4a00004a, 0x4b00004b, 0x4c00004c, 0x4d00004d, 0x4e00004e, 0x4f00004f, + 0x50000050, 0x51000051, 0x52000052, 0x53000053, 0x54000054, 0x55000055, 0x56000056, 0x57000057, + 0x58000058, 0x59000059, 0x5a00005a, 0x5b00005b, 0x5c00005c, 0x5d00005d, 0x5e00005e, 0x5f00005f, + 0x60000060, 0x61000061, 0x62000062, 0x63000063, 0x64000064, 0x65000065, 0x66000066, 0x67000067, + 0x68000068, 0x69000069, 0x6a00006a, 0x6b00006b, 0x6c00006c, 0x6d00006d, 0x6e00006e, 0x6f00006f, + 0x70000070, 0x71000071, 0x72000072, 0x73000073, 0x74000074, 0x75000075, 0x76000076, 0x77000077, + 0x78000078, 0x79000079, 0x7a00007a, 0x7b00007b, 0x7c00007c, 0x7d00007d, 0x7e00007e, 0x7f00007f, + 0xa00000a0, 0xa20000a2, 0xa30000a3, 0xa40000a4, 0xa60000a6, 0xa70000a7, 0xa90000a9, 0xab0000ab, + 0xac0000ac, 0xad0000ad, 0xae0000ae, 0xb00000b0, 0xb10000b1, 0xb20000b2, 0xb30000b3, 0xb50000b5, + 0xb60000b6, 0xb70000b7, 0xb90000b9, 0xbb0000bb, 0xbc0000bc, 0xbd0000bd, 0xbe0000be, 0xc40000c4, + 0xc50000c5, 0xaf0000c6, 0xc90000c9, 0xd30000d3, 0xd50000d5, 0xd60000d6, 0xd70000d7, 0xa80000d8, + 0xdc0000dc, 0xdf0000df, 0xe40000e4, 0xe50000e5, 0xbf0000e6, 0xe90000e9, 0xf30000f3, 0xf50000f5, + 0xf60000f6, 0xf70000f7, 0xb80000f8, 0xfc0000fc, 0xc2000100, 0xe2000101, 0xc0000104, 0xe0000105, + 0xc3000106, 0xe3000107, 0xc800010c, 0xe800010d, 0xc7000112, 0xe7000113, 0xcb000116, 0xeb000117, + 0xc6000118, 0xe6000119, 0xcc000122, 0xec000123, 0xce00012a, 0xee00012b, 0xc100012e, 0xe100012f, + 0xcd000136, 0xed000137, 0xcf00013b, 0xef00013c, 0xd9000141, 0xf9000142, 0xd1000143, 0xf1000144, + 0xd2000145, 0xf2000146, 0xd400014c, 0xf400014d, 0xaa000156, 0xba000157, 0xda00015a, 0xfa00015b, + 0xd0000160, 0xf0000161, 0xdb00016a, 0xfb00016b, 0xd8000172, 0xf8000173, 0xca000179, 0xea00017a, + 0xdd00017b, 0xfd00017c, 0xde00017d, 0xfe00017e, 0xff002019, 0xb400201c, 0xa100201d, 0xa500201e, + 0xa500201e, 0xa500201e, 0xa500201e, 0xa500201e, 0xa500201e, 0xa500201e, 0xa500201e, 0xa500201e, + 0xa500201e, 0xa500201e, 0xa500201e, 0xa500201e, 0xa500201e, 0xa500201e, 0xa500201e, 0xa500201e, + 0xa500201e, 0xa500201e, 0xa500201e, 0xa500201e, 0xa500201e, 0xa500201e, 0xa500201e, 0xa500201e, + 0xa500201e, 0xa500201e, 0xa500201e, 0xa500201e, 0xa500201e, 0xa500201e, 0xa500201e, 0xa500201e, + }, +} + +// ISO8859_14 is the ISO 8859-14 encoding. +var ISO8859_14 *Charmap = &iso8859_14 + +var iso8859_14 = Charmap{ + name: "ISO 8859-14", + mib: identifier.ISO885914, + asciiSuperset: true, + low: 0x80, + replacement: 0x1a, + decode: [256]utf8Enc{ + {1, [3]byte{0x00, 0x00, 0x00}}, {1, [3]byte{0x01, 0x00, 0x00}}, + {1, [3]byte{0x02, 0x00, 0x00}}, {1, [3]byte{0x03, 0x00, 0x00}}, + {1, [3]byte{0x04, 0x00, 0x00}}, {1, [3]byte{0x05, 0x00, 0x00}}, + {1, [3]byte{0x06, 0x00, 0x00}}, {1, [3]byte{0x07, 0x00, 0x00}}, + {1, [3]byte{0x08, 0x00, 0x00}}, {1, [3]byte{0x09, 0x00, 0x00}}, + {1, [3]byte{0x0a, 0x00, 0x00}}, {1, [3]byte{0x0b, 0x00, 0x00}}, + {1, [3]byte{0x0c, 0x00, 0x00}}, {1, [3]byte{0x0d, 0x00, 0x00}}, + {1, [3]byte{0x0e, 0x00, 0x00}}, {1, [3]byte{0x0f, 0x00, 0x00}}, + {1, [3]byte{0x10, 0x00, 0x00}}, {1, [3]byte{0x11, 0x00, 0x00}}, + {1, [3]byte{0x12, 0x00, 0x00}}, {1, [3]byte{0x13, 0x00, 0x00}}, + {1, [3]byte{0x14, 0x00, 0x00}}, {1, [3]byte{0x15, 0x00, 0x00}}, + {1, [3]byte{0x16, 0x00, 0x00}}, {1, [3]byte{0x17, 0x00, 0x00}}, + {1, [3]byte{0x18, 0x00, 0x00}}, {1, [3]byte{0x19, 0x00, 0x00}}, + {1, [3]byte{0x1a, 0x00, 0x00}}, {1, [3]byte{0x1b, 0x00, 0x00}}, + {1, [3]byte{0x1c, 0x00, 0x00}}, {1, [3]byte{0x1d, 0x00, 0x00}}, + {1, [3]byte{0x1e, 0x00, 0x00}}, {1, [3]byte{0x1f, 0x00, 0x00}}, + {1, [3]byte{0x20, 0x00, 0x00}}, {1, [3]byte{0x21, 0x00, 0x00}}, + {1, [3]byte{0x22, 0x00, 0x00}}, {1, [3]byte{0x23, 0x00, 0x00}}, + {1, [3]byte{0x24, 0x00, 0x00}}, {1, [3]byte{0x25, 0x00, 0x00}}, + {1, [3]byte{0x26, 0x00, 0x00}}, {1, [3]byte{0x27, 0x00, 0x00}}, + {1, [3]byte{0x28, 0x00, 0x00}}, {1, [3]byte{0x29, 0x00, 0x00}}, + {1, [3]byte{0x2a, 0x00, 0x00}}, {1, [3]byte{0x2b, 0x00, 0x00}}, + {1, [3]byte{0x2c, 0x00, 0x00}}, {1, [3]byte{0x2d, 0x00, 0x00}}, + {1, [3]byte{0x2e, 0x00, 0x00}}, {1, [3]byte{0x2f, 0x00, 0x00}}, + {1, [3]byte{0x30, 0x00, 0x00}}, {1, [3]byte{0x31, 0x00, 0x00}}, + {1, [3]byte{0x32, 0x00, 0x00}}, {1, [3]byte{0x33, 0x00, 0x00}}, + {1, [3]byte{0x34, 0x00, 0x00}}, {1, [3]byte{0x35, 0x00, 0x00}}, + {1, [3]byte{0x36, 0x00, 0x00}}, {1, [3]byte{0x37, 0x00, 0x00}}, + {1, [3]byte{0x38, 0x00, 0x00}}, {1, [3]byte{0x39, 0x00, 0x00}}, + {1, [3]byte{0x3a, 0x00, 0x00}}, {1, [3]byte{0x3b, 0x00, 0x00}}, + {1, [3]byte{0x3c, 0x00, 0x00}}, {1, [3]byte{0x3d, 0x00, 0x00}}, + {1, [3]byte{0x3e, 0x00, 0x00}}, {1, [3]byte{0x3f, 0x00, 0x00}}, + {1, [3]byte{0x40, 0x00, 0x00}}, {1, [3]byte{0x41, 0x00, 0x00}}, + {1, [3]byte{0x42, 0x00, 0x00}}, {1, [3]byte{0x43, 0x00, 0x00}}, + {1, [3]byte{0x44, 0x00, 0x00}}, {1, [3]byte{0x45, 0x00, 0x00}}, + {1, [3]byte{0x46, 0x00, 0x00}}, {1, [3]byte{0x47, 0x00, 0x00}}, + {1, [3]byte{0x48, 0x00, 0x00}}, {1, [3]byte{0x49, 0x00, 0x00}}, + {1, [3]byte{0x4a, 0x00, 0x00}}, {1, [3]byte{0x4b, 0x00, 0x00}}, + {1, [3]byte{0x4c, 0x00, 0x00}}, {1, [3]byte{0x4d, 0x00, 0x00}}, + {1, [3]byte{0x4e, 0x00, 0x00}}, {1, [3]byte{0x4f, 0x00, 0x00}}, + {1, [3]byte{0x50, 0x00, 0x00}}, {1, [3]byte{0x51, 0x00, 0x00}}, + {1, [3]byte{0x52, 0x00, 0x00}}, {1, [3]byte{0x53, 0x00, 0x00}}, + {1, [3]byte{0x54, 0x00, 0x00}}, {1, [3]byte{0x55, 0x00, 0x00}}, + {1, [3]byte{0x56, 0x00, 0x00}}, {1, [3]byte{0x57, 0x00, 0x00}}, + {1, [3]byte{0x58, 0x00, 0x00}}, {1, [3]byte{0x59, 0x00, 0x00}}, + {1, [3]byte{0x5a, 0x00, 0x00}}, {1, [3]byte{0x5b, 0x00, 0x00}}, + {1, [3]byte{0x5c, 0x00, 0x00}}, {1, [3]byte{0x5d, 0x00, 0x00}}, + {1, [3]byte{0x5e, 0x00, 0x00}}, {1, [3]byte{0x5f, 0x00, 0x00}}, + {1, [3]byte{0x60, 0x00, 0x00}}, {1, [3]byte{0x61, 0x00, 0x00}}, + {1, [3]byte{0x62, 0x00, 0x00}}, {1, [3]byte{0x63, 0x00, 0x00}}, + {1, [3]byte{0x64, 0x00, 0x00}}, {1, [3]byte{0x65, 0x00, 0x00}}, + {1, [3]byte{0x66, 0x00, 0x00}}, {1, [3]byte{0x67, 0x00, 0x00}}, + {1, [3]byte{0x68, 0x00, 0x00}}, {1, [3]byte{0x69, 0x00, 0x00}}, + {1, [3]byte{0x6a, 0x00, 0x00}}, {1, [3]byte{0x6b, 0x00, 0x00}}, + {1, [3]byte{0x6c, 0x00, 0x00}}, {1, [3]byte{0x6d, 0x00, 0x00}}, + {1, [3]byte{0x6e, 0x00, 0x00}}, {1, [3]byte{0x6f, 0x00, 0x00}}, + {1, [3]byte{0x70, 0x00, 0x00}}, {1, [3]byte{0x71, 0x00, 0x00}}, + {1, [3]byte{0x72, 0x00, 0x00}}, {1, [3]byte{0x73, 0x00, 0x00}}, + {1, [3]byte{0x74, 0x00, 0x00}}, {1, [3]byte{0x75, 0x00, 0x00}}, + {1, [3]byte{0x76, 0x00, 0x00}}, {1, [3]byte{0x77, 0x00, 0x00}}, + {1, [3]byte{0x78, 0x00, 0x00}}, {1, [3]byte{0x79, 0x00, 0x00}}, + {1, [3]byte{0x7a, 0x00, 0x00}}, {1, [3]byte{0x7b, 0x00, 0x00}}, + {1, [3]byte{0x7c, 0x00, 0x00}}, {1, [3]byte{0x7d, 0x00, 0x00}}, + {1, [3]byte{0x7e, 0x00, 0x00}}, {1, [3]byte{0x7f, 0x00, 0x00}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {3, [3]byte{0xef, 0xbf, 0xbd}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {3, [3]byte{0xef, 0xbf, 0xbd}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {3, [3]byte{0xef, 0xbf, 0xbd}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {3, [3]byte{0xef, 0xbf, 0xbd}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {3, [3]byte{0xef, 0xbf, 0xbd}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {3, [3]byte{0xef, 0xbf, 0xbd}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {3, [3]byte{0xef, 0xbf, 0xbd}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {3, [3]byte{0xef, 0xbf, 0xbd}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {3, [3]byte{0xef, 0xbf, 0xbd}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {3, [3]byte{0xef, 0xbf, 0xbd}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {3, [3]byte{0xef, 0xbf, 0xbd}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {3, [3]byte{0xef, 0xbf, 0xbd}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {3, [3]byte{0xef, 0xbf, 0xbd}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {3, [3]byte{0xef, 0xbf, 0xbd}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {3, [3]byte{0xef, 0xbf, 0xbd}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {3, [3]byte{0xef, 0xbf, 0xbd}}, + {2, [3]byte{0xc2, 0xa0, 0x00}}, {3, [3]byte{0xe1, 0xb8, 0x82}}, + {3, [3]byte{0xe1, 0xb8, 0x83}}, {2, [3]byte{0xc2, 0xa3, 0x00}}, + {2, [3]byte{0xc4, 0x8a, 0x00}}, {2, [3]byte{0xc4, 0x8b, 0x00}}, + {3, [3]byte{0xe1, 0xb8, 0x8a}}, {2, [3]byte{0xc2, 0xa7, 0x00}}, + {3, [3]byte{0xe1, 0xba, 0x80}}, {2, [3]byte{0xc2, 0xa9, 0x00}}, + {3, [3]byte{0xe1, 0xba, 0x82}}, {3, [3]byte{0xe1, 0xb8, 0x8b}}, + {3, [3]byte{0xe1, 0xbb, 0xb2}}, {2, [3]byte{0xc2, 0xad, 0x00}}, + {2, [3]byte{0xc2, 0xae, 0x00}}, {2, [3]byte{0xc5, 0xb8, 0x00}}, + {3, [3]byte{0xe1, 0xb8, 0x9e}}, {3, [3]byte{0xe1, 0xb8, 0x9f}}, + {2, [3]byte{0xc4, 0xa0, 0x00}}, {2, [3]byte{0xc4, 0xa1, 0x00}}, + {3, [3]byte{0xe1, 0xb9, 0x80}}, {3, [3]byte{0xe1, 0xb9, 0x81}}, + {2, [3]byte{0xc2, 0xb6, 0x00}}, {3, [3]byte{0xe1, 0xb9, 0x96}}, + {3, [3]byte{0xe1, 0xba, 0x81}}, {3, [3]byte{0xe1, 0xb9, 0x97}}, + {3, [3]byte{0xe1, 0xba, 0x83}}, {3, [3]byte{0xe1, 0xb9, 0xa0}}, + {3, [3]byte{0xe1, 0xbb, 0xb3}}, {3, [3]byte{0xe1, 0xba, 0x84}}, + {3, [3]byte{0xe1, 0xba, 0x85}}, {3, [3]byte{0xe1, 0xb9, 0xa1}}, + {2, [3]byte{0xc3, 0x80, 0x00}}, {2, [3]byte{0xc3, 0x81, 0x00}}, + {2, [3]byte{0xc3, 0x82, 0x00}}, {2, [3]byte{0xc3, 0x83, 0x00}}, + {2, [3]byte{0xc3, 0x84, 0x00}}, {2, [3]byte{0xc3, 0x85, 0x00}}, + {2, [3]byte{0xc3, 0x86, 0x00}}, {2, [3]byte{0xc3, 0x87, 0x00}}, + {2, [3]byte{0xc3, 0x88, 0x00}}, {2, [3]byte{0xc3, 0x89, 0x00}}, + {2, [3]byte{0xc3, 0x8a, 0x00}}, {2, [3]byte{0xc3, 0x8b, 0x00}}, + {2, [3]byte{0xc3, 0x8c, 0x00}}, {2, [3]byte{0xc3, 0x8d, 0x00}}, + {2, [3]byte{0xc3, 0x8e, 0x00}}, {2, [3]byte{0xc3, 0x8f, 0x00}}, + {2, [3]byte{0xc5, 0xb4, 0x00}}, {2, [3]byte{0xc3, 0x91, 0x00}}, + {2, [3]byte{0xc3, 0x92, 0x00}}, {2, [3]byte{0xc3, 0x93, 0x00}}, + {2, [3]byte{0xc3, 0x94, 0x00}}, {2, [3]byte{0xc3, 0x95, 0x00}}, + {2, [3]byte{0xc3, 0x96, 0x00}}, {3, [3]byte{0xe1, 0xb9, 0xaa}}, + {2, [3]byte{0xc3, 0x98, 0x00}}, {2, [3]byte{0xc3, 0x99, 0x00}}, + {2, [3]byte{0xc3, 0x9a, 0x00}}, {2, [3]byte{0xc3, 0x9b, 0x00}}, + {2, [3]byte{0xc3, 0x9c, 0x00}}, {2, [3]byte{0xc3, 0x9d, 0x00}}, + {2, [3]byte{0xc5, 0xb6, 0x00}}, {2, [3]byte{0xc3, 0x9f, 0x00}}, + {2, [3]byte{0xc3, 0xa0, 0x00}}, {2, [3]byte{0xc3, 0xa1, 0x00}}, + {2, [3]byte{0xc3, 0xa2, 0x00}}, {2, [3]byte{0xc3, 0xa3, 0x00}}, + {2, [3]byte{0xc3, 0xa4, 0x00}}, {2, [3]byte{0xc3, 0xa5, 0x00}}, + {2, [3]byte{0xc3, 0xa6, 0x00}}, {2, [3]byte{0xc3, 0xa7, 0x00}}, + {2, [3]byte{0xc3, 0xa8, 0x00}}, {2, [3]byte{0xc3, 0xa9, 0x00}}, + {2, [3]byte{0xc3, 0xaa, 0x00}}, {2, [3]byte{0xc3, 0xab, 0x00}}, + {2, [3]byte{0xc3, 0xac, 0x00}}, {2, [3]byte{0xc3, 0xad, 0x00}}, + {2, [3]byte{0xc3, 0xae, 0x00}}, {2, [3]byte{0xc3, 0xaf, 0x00}}, + {2, [3]byte{0xc5, 0xb5, 0x00}}, {2, [3]byte{0xc3, 0xb1, 0x00}}, + {2, [3]byte{0xc3, 0xb2, 0x00}}, {2, [3]byte{0xc3, 0xb3, 0x00}}, + {2, [3]byte{0xc3, 0xb4, 0x00}}, {2, [3]byte{0xc3, 0xb5, 0x00}}, + {2, [3]byte{0xc3, 0xb6, 0x00}}, {3, [3]byte{0xe1, 0xb9, 0xab}}, + {2, [3]byte{0xc3, 0xb8, 0x00}}, {2, [3]byte{0xc3, 0xb9, 0x00}}, + {2, [3]byte{0xc3, 0xba, 0x00}}, {2, [3]byte{0xc3, 0xbb, 0x00}}, + {2, [3]byte{0xc3, 0xbc, 0x00}}, {2, [3]byte{0xc3, 0xbd, 0x00}}, + {2, [3]byte{0xc5, 0xb7, 0x00}}, {2, [3]byte{0xc3, 0xbf, 0x00}}, + }, + encode: [256]uint32{ + 0x00000000, 0x01000001, 0x02000002, 0x03000003, 0x04000004, 0x05000005, 0x06000006, 0x07000007, + 0x08000008, 0x09000009, 0x0a00000a, 0x0b00000b, 0x0c00000c, 0x0d00000d, 0x0e00000e, 0x0f00000f, + 0x10000010, 0x11000011, 0x12000012, 0x13000013, 0x14000014, 0x15000015, 0x16000016, 0x17000017, + 0x18000018, 0x19000019, 0x1a00001a, 0x1b00001b, 0x1c00001c, 0x1d00001d, 0x1e00001e, 0x1f00001f, + 0x20000020, 0x21000021, 0x22000022, 0x23000023, 0x24000024, 0x25000025, 0x26000026, 0x27000027, + 0x28000028, 0x29000029, 0x2a00002a, 0x2b00002b, 0x2c00002c, 0x2d00002d, 0x2e00002e, 0x2f00002f, + 0x30000030, 0x31000031, 0x32000032, 0x33000033, 0x34000034, 0x35000035, 0x36000036, 0x37000037, + 0x38000038, 0x39000039, 0x3a00003a, 0x3b00003b, 0x3c00003c, 0x3d00003d, 0x3e00003e, 0x3f00003f, + 0x40000040, 0x41000041, 0x42000042, 0x43000043, 0x44000044, 0x45000045, 0x46000046, 0x47000047, + 0x48000048, 0x49000049, 0x4a00004a, 0x4b00004b, 0x4c00004c, 0x4d00004d, 0x4e00004e, 0x4f00004f, + 0x50000050, 0x51000051, 0x52000052, 0x53000053, 0x54000054, 0x55000055, 0x56000056, 0x57000057, + 0x58000058, 0x59000059, 0x5a00005a, 0x5b00005b, 0x5c00005c, 0x5d00005d, 0x5e00005e, 0x5f00005f, + 0x60000060, 0x61000061, 0x62000062, 0x63000063, 0x64000064, 0x65000065, 0x66000066, 0x67000067, + 0x68000068, 0x69000069, 0x6a00006a, 0x6b00006b, 0x6c00006c, 0x6d00006d, 0x6e00006e, 0x6f00006f, + 0x70000070, 0x71000071, 0x72000072, 0x73000073, 0x74000074, 0x75000075, 0x76000076, 0x77000077, + 0x78000078, 0x79000079, 0x7a00007a, 0x7b00007b, 0x7c00007c, 0x7d00007d, 0x7e00007e, 0x7f00007f, + 0xa00000a0, 0xa30000a3, 0xa70000a7, 0xa90000a9, 0xad0000ad, 0xae0000ae, 0xb60000b6, 0xc00000c0, + 0xc10000c1, 0xc20000c2, 0xc30000c3, 0xc40000c4, 0xc50000c5, 0xc60000c6, 0xc70000c7, 0xc80000c8, + 0xc90000c9, 0xca0000ca, 0xcb0000cb, 0xcc0000cc, 0xcd0000cd, 0xce0000ce, 0xcf0000cf, 0xd10000d1, + 0xd20000d2, 0xd30000d3, 0xd40000d4, 0xd50000d5, 0xd60000d6, 0xd80000d8, 0xd90000d9, 0xda0000da, + 0xdb0000db, 0xdc0000dc, 0xdd0000dd, 0xdf0000df, 0xe00000e0, 0xe10000e1, 0xe20000e2, 0xe30000e3, + 0xe40000e4, 0xe50000e5, 0xe60000e6, 0xe70000e7, 0xe80000e8, 0xe90000e9, 0xea0000ea, 0xeb0000eb, + 0xec0000ec, 0xed0000ed, 0xee0000ee, 0xef0000ef, 0xf10000f1, 0xf20000f2, 0xf30000f3, 0xf40000f4, + 0xf50000f5, 0xf60000f6, 0xf80000f8, 0xf90000f9, 0xfa0000fa, 0xfb0000fb, 0xfc0000fc, 0xfd0000fd, + 0xff0000ff, 0xa400010a, 0xa500010b, 0xb2000120, 0xb3000121, 0xd0000174, 0xf0000175, 0xde000176, + 0xfe000177, 0xaf000178, 0xa1001e02, 0xa2001e03, 0xa6001e0a, 0xab001e0b, 0xb0001e1e, 0xb1001e1f, + 0xb4001e40, 0xb5001e41, 0xb7001e56, 0xb9001e57, 0xbb001e60, 0xbf001e61, 0xd7001e6a, 0xf7001e6b, + 0xa8001e80, 0xb8001e81, 0xaa001e82, 0xba001e83, 0xbd001e84, 0xbe001e85, 0xac001ef2, 0xbc001ef3, + 0xbc001ef3, 0xbc001ef3, 0xbc001ef3, 0xbc001ef3, 0xbc001ef3, 0xbc001ef3, 0xbc001ef3, 0xbc001ef3, + 0xbc001ef3, 0xbc001ef3, 0xbc001ef3, 0xbc001ef3, 0xbc001ef3, 0xbc001ef3, 0xbc001ef3, 0xbc001ef3, + 0xbc001ef3, 0xbc001ef3, 0xbc001ef3, 0xbc001ef3, 0xbc001ef3, 0xbc001ef3, 0xbc001ef3, 0xbc001ef3, + 0xbc001ef3, 0xbc001ef3, 0xbc001ef3, 0xbc001ef3, 0xbc001ef3, 0xbc001ef3, 0xbc001ef3, 0xbc001ef3, + }, +} + +// ISO8859_15 is the ISO 8859-15 encoding. +var ISO8859_15 *Charmap = &iso8859_15 + +var iso8859_15 = Charmap{ + name: "ISO 8859-15", + mib: identifier.ISO885915, + asciiSuperset: true, + low: 0x80, + replacement: 0x1a, + decode: [256]utf8Enc{ + {1, [3]byte{0x00, 0x00, 0x00}}, {1, [3]byte{0x01, 0x00, 0x00}}, + {1, [3]byte{0x02, 0x00, 0x00}}, {1, [3]byte{0x03, 0x00, 0x00}}, + {1, [3]byte{0x04, 0x00, 0x00}}, {1, [3]byte{0x05, 0x00, 0x00}}, + {1, [3]byte{0x06, 0x00, 0x00}}, {1, [3]byte{0x07, 0x00, 0x00}}, + {1, [3]byte{0x08, 0x00, 0x00}}, {1, [3]byte{0x09, 0x00, 0x00}}, + {1, [3]byte{0x0a, 0x00, 0x00}}, {1, [3]byte{0x0b, 0x00, 0x00}}, + {1, [3]byte{0x0c, 0x00, 0x00}}, {1, [3]byte{0x0d, 0x00, 0x00}}, + {1, [3]byte{0x0e, 0x00, 0x00}}, {1, [3]byte{0x0f, 0x00, 0x00}}, + {1, [3]byte{0x10, 0x00, 0x00}}, {1, [3]byte{0x11, 0x00, 0x00}}, + {1, [3]byte{0x12, 0x00, 0x00}}, {1, [3]byte{0x13, 0x00, 0x00}}, + {1, [3]byte{0x14, 0x00, 0x00}}, {1, [3]byte{0x15, 0x00, 0x00}}, + {1, [3]byte{0x16, 0x00, 0x00}}, {1, [3]byte{0x17, 0x00, 0x00}}, + {1, [3]byte{0x18, 0x00, 0x00}}, {1, [3]byte{0x19, 0x00, 0x00}}, + {1, [3]byte{0x1a, 0x00, 0x00}}, {1, [3]byte{0x1b, 0x00, 0x00}}, + {1, [3]byte{0x1c, 0x00, 0x00}}, {1, [3]byte{0x1d, 0x00, 0x00}}, + {1, [3]byte{0x1e, 0x00, 0x00}}, {1, [3]byte{0x1f, 0x00, 0x00}}, + {1, [3]byte{0x20, 0x00, 0x00}}, {1, [3]byte{0x21, 0x00, 0x00}}, + {1, [3]byte{0x22, 0x00, 0x00}}, {1, [3]byte{0x23, 0x00, 0x00}}, + {1, [3]byte{0x24, 0x00, 0x00}}, {1, [3]byte{0x25, 0x00, 0x00}}, + {1, [3]byte{0x26, 0x00, 0x00}}, {1, [3]byte{0x27, 0x00, 0x00}}, + {1, [3]byte{0x28, 0x00, 0x00}}, {1, [3]byte{0x29, 0x00, 0x00}}, + {1, [3]byte{0x2a, 0x00, 0x00}}, {1, [3]byte{0x2b, 0x00, 0x00}}, + {1, [3]byte{0x2c, 0x00, 0x00}}, {1, [3]byte{0x2d, 0x00, 0x00}}, + {1, [3]byte{0x2e, 0x00, 0x00}}, {1, [3]byte{0x2f, 0x00, 0x00}}, + {1, [3]byte{0x30, 0x00, 0x00}}, {1, [3]byte{0x31, 0x00, 0x00}}, + {1, [3]byte{0x32, 0x00, 0x00}}, {1, [3]byte{0x33, 0x00, 0x00}}, + {1, [3]byte{0x34, 0x00, 0x00}}, {1, [3]byte{0x35, 0x00, 0x00}}, + {1, [3]byte{0x36, 0x00, 0x00}}, {1, [3]byte{0x37, 0x00, 0x00}}, + {1, [3]byte{0x38, 0x00, 0x00}}, {1, [3]byte{0x39, 0x00, 0x00}}, + {1, [3]byte{0x3a, 0x00, 0x00}}, {1, [3]byte{0x3b, 0x00, 0x00}}, + {1, [3]byte{0x3c, 0x00, 0x00}}, {1, [3]byte{0x3d, 0x00, 0x00}}, + {1, [3]byte{0x3e, 0x00, 0x00}}, {1, [3]byte{0x3f, 0x00, 0x00}}, + {1, [3]byte{0x40, 0x00, 0x00}}, {1, [3]byte{0x41, 0x00, 0x00}}, + {1, [3]byte{0x42, 0x00, 0x00}}, {1, [3]byte{0x43, 0x00, 0x00}}, + {1, [3]byte{0x44, 0x00, 0x00}}, {1, [3]byte{0x45, 0x00, 0x00}}, + {1, [3]byte{0x46, 0x00, 0x00}}, {1, [3]byte{0x47, 0x00, 0x00}}, + {1, [3]byte{0x48, 0x00, 0x00}}, {1, [3]byte{0x49, 0x00, 0x00}}, + {1, [3]byte{0x4a, 0x00, 0x00}}, {1, [3]byte{0x4b, 0x00, 0x00}}, + {1, [3]byte{0x4c, 0x00, 0x00}}, {1, [3]byte{0x4d, 0x00, 0x00}}, + {1, [3]byte{0x4e, 0x00, 0x00}}, {1, [3]byte{0x4f, 0x00, 0x00}}, + {1, [3]byte{0x50, 0x00, 0x00}}, {1, [3]byte{0x51, 0x00, 0x00}}, + {1, [3]byte{0x52, 0x00, 0x00}}, {1, [3]byte{0x53, 0x00, 0x00}}, + {1, [3]byte{0x54, 0x00, 0x00}}, {1, [3]byte{0x55, 0x00, 0x00}}, + {1, [3]byte{0x56, 0x00, 0x00}}, {1, [3]byte{0x57, 0x00, 0x00}}, + {1, [3]byte{0x58, 0x00, 0x00}}, {1, [3]byte{0x59, 0x00, 0x00}}, + {1, [3]byte{0x5a, 0x00, 0x00}}, {1, [3]byte{0x5b, 0x00, 0x00}}, + {1, [3]byte{0x5c, 0x00, 0x00}}, {1, [3]byte{0x5d, 0x00, 0x00}}, + {1, [3]byte{0x5e, 0x00, 0x00}}, {1, [3]byte{0x5f, 0x00, 0x00}}, + {1, [3]byte{0x60, 0x00, 0x00}}, {1, [3]byte{0x61, 0x00, 0x00}}, + {1, [3]byte{0x62, 0x00, 0x00}}, {1, [3]byte{0x63, 0x00, 0x00}}, + {1, [3]byte{0x64, 0x00, 0x00}}, {1, [3]byte{0x65, 0x00, 0x00}}, + {1, [3]byte{0x66, 0x00, 0x00}}, {1, [3]byte{0x67, 0x00, 0x00}}, + {1, [3]byte{0x68, 0x00, 0x00}}, {1, [3]byte{0x69, 0x00, 0x00}}, + {1, [3]byte{0x6a, 0x00, 0x00}}, {1, [3]byte{0x6b, 0x00, 0x00}}, + {1, [3]byte{0x6c, 0x00, 0x00}}, {1, [3]byte{0x6d, 0x00, 0x00}}, + {1, [3]byte{0x6e, 0x00, 0x00}}, {1, [3]byte{0x6f, 0x00, 0x00}}, + {1, [3]byte{0x70, 0x00, 0x00}}, {1, [3]byte{0x71, 0x00, 0x00}}, + {1, [3]byte{0x72, 0x00, 0x00}}, {1, [3]byte{0x73, 0x00, 0x00}}, + {1, [3]byte{0x74, 0x00, 0x00}}, {1, [3]byte{0x75, 0x00, 0x00}}, + {1, [3]byte{0x76, 0x00, 0x00}}, {1, [3]byte{0x77, 0x00, 0x00}}, + {1, [3]byte{0x78, 0x00, 0x00}}, {1, [3]byte{0x79, 0x00, 0x00}}, + {1, [3]byte{0x7a, 0x00, 0x00}}, {1, [3]byte{0x7b, 0x00, 0x00}}, + {1, [3]byte{0x7c, 0x00, 0x00}}, {1, [3]byte{0x7d, 0x00, 0x00}}, + {1, [3]byte{0x7e, 0x00, 0x00}}, {1, [3]byte{0x7f, 0x00, 0x00}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {3, [3]byte{0xef, 0xbf, 0xbd}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {3, [3]byte{0xef, 0xbf, 0xbd}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {3, [3]byte{0xef, 0xbf, 0xbd}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {3, [3]byte{0xef, 0xbf, 0xbd}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {3, [3]byte{0xef, 0xbf, 0xbd}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {3, [3]byte{0xef, 0xbf, 0xbd}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {3, [3]byte{0xef, 0xbf, 0xbd}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {3, [3]byte{0xef, 0xbf, 0xbd}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {3, [3]byte{0xef, 0xbf, 0xbd}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {3, [3]byte{0xef, 0xbf, 0xbd}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {3, [3]byte{0xef, 0xbf, 0xbd}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {3, [3]byte{0xef, 0xbf, 0xbd}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {3, [3]byte{0xef, 0xbf, 0xbd}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {3, [3]byte{0xef, 0xbf, 0xbd}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {3, [3]byte{0xef, 0xbf, 0xbd}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {3, [3]byte{0xef, 0xbf, 0xbd}}, + {2, [3]byte{0xc2, 0xa0, 0x00}}, {2, [3]byte{0xc2, 0xa1, 0x00}}, + {2, [3]byte{0xc2, 0xa2, 0x00}}, {2, [3]byte{0xc2, 0xa3, 0x00}}, + {3, [3]byte{0xe2, 0x82, 0xac}}, {2, [3]byte{0xc2, 0xa5, 0x00}}, + {2, [3]byte{0xc5, 0xa0, 0x00}}, {2, [3]byte{0xc2, 0xa7, 0x00}}, + {2, [3]byte{0xc5, 0xa1, 0x00}}, {2, [3]byte{0xc2, 0xa9, 0x00}}, + {2, [3]byte{0xc2, 0xaa, 0x00}}, {2, [3]byte{0xc2, 0xab, 0x00}}, + {2, [3]byte{0xc2, 0xac, 0x00}}, {2, [3]byte{0xc2, 0xad, 0x00}}, + {2, [3]byte{0xc2, 0xae, 0x00}}, {2, [3]byte{0xc2, 0xaf, 0x00}}, + {2, [3]byte{0xc2, 0xb0, 0x00}}, {2, [3]byte{0xc2, 0xb1, 0x00}}, + {2, [3]byte{0xc2, 0xb2, 0x00}}, {2, [3]byte{0xc2, 0xb3, 0x00}}, + {2, [3]byte{0xc5, 0xbd, 0x00}}, {2, [3]byte{0xc2, 0xb5, 0x00}}, + {2, [3]byte{0xc2, 0xb6, 0x00}}, {2, [3]byte{0xc2, 0xb7, 0x00}}, + {2, [3]byte{0xc5, 0xbe, 0x00}}, {2, [3]byte{0xc2, 0xb9, 0x00}}, + {2, [3]byte{0xc2, 0xba, 0x00}}, {2, [3]byte{0xc2, 0xbb, 0x00}}, + {2, [3]byte{0xc5, 0x92, 0x00}}, {2, [3]byte{0xc5, 0x93, 0x00}}, + {2, [3]byte{0xc5, 0xb8, 0x00}}, {2, [3]byte{0xc2, 0xbf, 0x00}}, + {2, [3]byte{0xc3, 0x80, 0x00}}, {2, [3]byte{0xc3, 0x81, 0x00}}, + {2, [3]byte{0xc3, 0x82, 0x00}}, {2, [3]byte{0xc3, 0x83, 0x00}}, + {2, [3]byte{0xc3, 0x84, 0x00}}, {2, [3]byte{0xc3, 0x85, 0x00}}, + {2, [3]byte{0xc3, 0x86, 0x00}}, {2, [3]byte{0xc3, 0x87, 0x00}}, + {2, [3]byte{0xc3, 0x88, 0x00}}, {2, [3]byte{0xc3, 0x89, 0x00}}, + {2, [3]byte{0xc3, 0x8a, 0x00}}, {2, [3]byte{0xc3, 0x8b, 0x00}}, + {2, [3]byte{0xc3, 0x8c, 0x00}}, {2, [3]byte{0xc3, 0x8d, 0x00}}, + {2, [3]byte{0xc3, 0x8e, 0x00}}, {2, [3]byte{0xc3, 0x8f, 0x00}}, + {2, [3]byte{0xc3, 0x90, 0x00}}, {2, [3]byte{0xc3, 0x91, 0x00}}, + {2, [3]byte{0xc3, 0x92, 0x00}}, {2, [3]byte{0xc3, 0x93, 0x00}}, + {2, [3]byte{0xc3, 0x94, 0x00}}, {2, [3]byte{0xc3, 0x95, 0x00}}, + {2, [3]byte{0xc3, 0x96, 0x00}}, {2, [3]byte{0xc3, 0x97, 0x00}}, + {2, [3]byte{0xc3, 0x98, 0x00}}, {2, [3]byte{0xc3, 0x99, 0x00}}, + {2, [3]byte{0xc3, 0x9a, 0x00}}, {2, [3]byte{0xc3, 0x9b, 0x00}}, + {2, [3]byte{0xc3, 0x9c, 0x00}}, {2, [3]byte{0xc3, 0x9d, 0x00}}, + {2, [3]byte{0xc3, 0x9e, 0x00}}, {2, [3]byte{0xc3, 0x9f, 0x00}}, + {2, [3]byte{0xc3, 0xa0, 0x00}}, {2, [3]byte{0xc3, 0xa1, 0x00}}, + {2, [3]byte{0xc3, 0xa2, 0x00}}, {2, [3]byte{0xc3, 0xa3, 0x00}}, + {2, [3]byte{0xc3, 0xa4, 0x00}}, {2, [3]byte{0xc3, 0xa5, 0x00}}, + {2, [3]byte{0xc3, 0xa6, 0x00}}, {2, [3]byte{0xc3, 0xa7, 0x00}}, + {2, [3]byte{0xc3, 0xa8, 0x00}}, {2, [3]byte{0xc3, 0xa9, 0x00}}, + {2, [3]byte{0xc3, 0xaa, 0x00}}, {2, [3]byte{0xc3, 0xab, 0x00}}, + {2, [3]byte{0xc3, 0xac, 0x00}}, {2, [3]byte{0xc3, 0xad, 0x00}}, + {2, [3]byte{0xc3, 0xae, 0x00}}, {2, [3]byte{0xc3, 0xaf, 0x00}}, + {2, [3]byte{0xc3, 0xb0, 0x00}}, {2, [3]byte{0xc3, 0xb1, 0x00}}, + {2, [3]byte{0xc3, 0xb2, 0x00}}, {2, [3]byte{0xc3, 0xb3, 0x00}}, + {2, [3]byte{0xc3, 0xb4, 0x00}}, {2, [3]byte{0xc3, 0xb5, 0x00}}, + {2, [3]byte{0xc3, 0xb6, 0x00}}, {2, [3]byte{0xc3, 0xb7, 0x00}}, + {2, [3]byte{0xc3, 0xb8, 0x00}}, {2, [3]byte{0xc3, 0xb9, 0x00}}, + {2, [3]byte{0xc3, 0xba, 0x00}}, {2, [3]byte{0xc3, 0xbb, 0x00}}, + {2, [3]byte{0xc3, 0xbc, 0x00}}, {2, [3]byte{0xc3, 0xbd, 0x00}}, + {2, [3]byte{0xc3, 0xbe, 0x00}}, {2, [3]byte{0xc3, 0xbf, 0x00}}, + }, + encode: [256]uint32{ + 0x00000000, 0x01000001, 0x02000002, 0x03000003, 0x04000004, 0x05000005, 0x06000006, 0x07000007, + 0x08000008, 0x09000009, 0x0a00000a, 0x0b00000b, 0x0c00000c, 0x0d00000d, 0x0e00000e, 0x0f00000f, + 0x10000010, 0x11000011, 0x12000012, 0x13000013, 0x14000014, 0x15000015, 0x16000016, 0x17000017, + 0x18000018, 0x19000019, 0x1a00001a, 0x1b00001b, 0x1c00001c, 0x1d00001d, 0x1e00001e, 0x1f00001f, + 0x20000020, 0x21000021, 0x22000022, 0x23000023, 0x24000024, 0x25000025, 0x26000026, 0x27000027, + 0x28000028, 0x29000029, 0x2a00002a, 0x2b00002b, 0x2c00002c, 0x2d00002d, 0x2e00002e, 0x2f00002f, + 0x30000030, 0x31000031, 0x32000032, 0x33000033, 0x34000034, 0x35000035, 0x36000036, 0x37000037, + 0x38000038, 0x39000039, 0x3a00003a, 0x3b00003b, 0x3c00003c, 0x3d00003d, 0x3e00003e, 0x3f00003f, + 0x40000040, 0x41000041, 0x42000042, 0x43000043, 0x44000044, 0x45000045, 0x46000046, 0x47000047, + 0x48000048, 0x49000049, 0x4a00004a, 0x4b00004b, 0x4c00004c, 0x4d00004d, 0x4e00004e, 0x4f00004f, + 0x50000050, 0x51000051, 0x52000052, 0x53000053, 0x54000054, 0x55000055, 0x56000056, 0x57000057, + 0x58000058, 0x59000059, 0x5a00005a, 0x5b00005b, 0x5c00005c, 0x5d00005d, 0x5e00005e, 0x5f00005f, + 0x60000060, 0x61000061, 0x62000062, 0x63000063, 0x64000064, 0x65000065, 0x66000066, 0x67000067, + 0x68000068, 0x69000069, 0x6a00006a, 0x6b00006b, 0x6c00006c, 0x6d00006d, 0x6e00006e, 0x6f00006f, + 0x70000070, 0x71000071, 0x72000072, 0x73000073, 0x74000074, 0x75000075, 0x76000076, 0x77000077, + 0x78000078, 0x79000079, 0x7a00007a, 0x7b00007b, 0x7c00007c, 0x7d00007d, 0x7e00007e, 0x7f00007f, + 0xa00000a0, 0xa10000a1, 0xa20000a2, 0xa30000a3, 0xa50000a5, 0xa70000a7, 0xa90000a9, 0xaa0000aa, + 0xab0000ab, 0xac0000ac, 0xad0000ad, 0xae0000ae, 0xaf0000af, 0xb00000b0, 0xb10000b1, 0xb20000b2, + 0xb30000b3, 0xb50000b5, 0xb60000b6, 0xb70000b7, 0xb90000b9, 0xba0000ba, 0xbb0000bb, 0xbf0000bf, + 0xc00000c0, 0xc10000c1, 0xc20000c2, 0xc30000c3, 0xc40000c4, 0xc50000c5, 0xc60000c6, 0xc70000c7, + 0xc80000c8, 0xc90000c9, 0xca0000ca, 0xcb0000cb, 0xcc0000cc, 0xcd0000cd, 0xce0000ce, 0xcf0000cf, + 0xd00000d0, 0xd10000d1, 0xd20000d2, 0xd30000d3, 0xd40000d4, 0xd50000d5, 0xd60000d6, 0xd70000d7, + 0xd80000d8, 0xd90000d9, 0xda0000da, 0xdb0000db, 0xdc0000dc, 0xdd0000dd, 0xde0000de, 0xdf0000df, + 0xe00000e0, 0xe10000e1, 0xe20000e2, 0xe30000e3, 0xe40000e4, 0xe50000e5, 0xe60000e6, 0xe70000e7, + 0xe80000e8, 0xe90000e9, 0xea0000ea, 0xeb0000eb, 0xec0000ec, 0xed0000ed, 0xee0000ee, 0xef0000ef, + 0xf00000f0, 0xf10000f1, 0xf20000f2, 0xf30000f3, 0xf40000f4, 0xf50000f5, 0xf60000f6, 0xf70000f7, + 0xf80000f8, 0xf90000f9, 0xfa0000fa, 0xfb0000fb, 0xfc0000fc, 0xfd0000fd, 0xfe0000fe, 0xff0000ff, + 0xbc000152, 0xbd000153, 0xa6000160, 0xa8000161, 0xbe000178, 0xb400017d, 0xb800017e, 0xa40020ac, + 0xa40020ac, 0xa40020ac, 0xa40020ac, 0xa40020ac, 0xa40020ac, 0xa40020ac, 0xa40020ac, 0xa40020ac, + 0xa40020ac, 0xa40020ac, 0xa40020ac, 0xa40020ac, 0xa40020ac, 0xa40020ac, 0xa40020ac, 0xa40020ac, + 0xa40020ac, 0xa40020ac, 0xa40020ac, 0xa40020ac, 0xa40020ac, 0xa40020ac, 0xa40020ac, 0xa40020ac, + 0xa40020ac, 0xa40020ac, 0xa40020ac, 0xa40020ac, 0xa40020ac, 0xa40020ac, 0xa40020ac, 0xa40020ac, + }, +} + +// ISO8859_16 is the ISO 8859-16 encoding. +var ISO8859_16 *Charmap = &iso8859_16 + +var iso8859_16 = Charmap{ + name: "ISO 8859-16", + mib: identifier.ISO885916, + asciiSuperset: true, + low: 0x80, + replacement: 0x1a, + decode: [256]utf8Enc{ + {1, [3]byte{0x00, 0x00, 0x00}}, {1, [3]byte{0x01, 0x00, 0x00}}, + {1, [3]byte{0x02, 0x00, 0x00}}, {1, [3]byte{0x03, 0x00, 0x00}}, + {1, [3]byte{0x04, 0x00, 0x00}}, {1, [3]byte{0x05, 0x00, 0x00}}, + {1, [3]byte{0x06, 0x00, 0x00}}, {1, [3]byte{0x07, 0x00, 0x00}}, + {1, [3]byte{0x08, 0x00, 0x00}}, {1, [3]byte{0x09, 0x00, 0x00}}, + {1, [3]byte{0x0a, 0x00, 0x00}}, {1, [3]byte{0x0b, 0x00, 0x00}}, + {1, [3]byte{0x0c, 0x00, 0x00}}, {1, [3]byte{0x0d, 0x00, 0x00}}, + {1, [3]byte{0x0e, 0x00, 0x00}}, {1, [3]byte{0x0f, 0x00, 0x00}}, + {1, [3]byte{0x10, 0x00, 0x00}}, {1, [3]byte{0x11, 0x00, 0x00}}, + {1, [3]byte{0x12, 0x00, 0x00}}, {1, [3]byte{0x13, 0x00, 0x00}}, + {1, [3]byte{0x14, 0x00, 0x00}}, {1, [3]byte{0x15, 0x00, 0x00}}, + {1, [3]byte{0x16, 0x00, 0x00}}, {1, [3]byte{0x17, 0x00, 0x00}}, + {1, [3]byte{0x18, 0x00, 0x00}}, {1, [3]byte{0x19, 0x00, 0x00}}, + {1, [3]byte{0x1a, 0x00, 0x00}}, {1, [3]byte{0x1b, 0x00, 0x00}}, + {1, [3]byte{0x1c, 0x00, 0x00}}, {1, [3]byte{0x1d, 0x00, 0x00}}, + {1, [3]byte{0x1e, 0x00, 0x00}}, {1, [3]byte{0x1f, 0x00, 0x00}}, + {1, [3]byte{0x20, 0x00, 0x00}}, {1, [3]byte{0x21, 0x00, 0x00}}, + {1, [3]byte{0x22, 0x00, 0x00}}, {1, [3]byte{0x23, 0x00, 0x00}}, + {1, [3]byte{0x24, 0x00, 0x00}}, {1, [3]byte{0x25, 0x00, 0x00}}, + {1, [3]byte{0x26, 0x00, 0x00}}, {1, [3]byte{0x27, 0x00, 0x00}}, + {1, [3]byte{0x28, 0x00, 0x00}}, {1, [3]byte{0x29, 0x00, 0x00}}, + {1, [3]byte{0x2a, 0x00, 0x00}}, {1, [3]byte{0x2b, 0x00, 0x00}}, + {1, [3]byte{0x2c, 0x00, 0x00}}, {1, [3]byte{0x2d, 0x00, 0x00}}, + {1, [3]byte{0x2e, 0x00, 0x00}}, {1, [3]byte{0x2f, 0x00, 0x00}}, + {1, [3]byte{0x30, 0x00, 0x00}}, {1, [3]byte{0x31, 0x00, 0x00}}, + {1, [3]byte{0x32, 0x00, 0x00}}, {1, [3]byte{0x33, 0x00, 0x00}}, + {1, [3]byte{0x34, 0x00, 0x00}}, {1, [3]byte{0x35, 0x00, 0x00}}, + {1, [3]byte{0x36, 0x00, 0x00}}, {1, [3]byte{0x37, 0x00, 0x00}}, + {1, [3]byte{0x38, 0x00, 0x00}}, {1, [3]byte{0x39, 0x00, 0x00}}, + {1, [3]byte{0x3a, 0x00, 0x00}}, {1, [3]byte{0x3b, 0x00, 0x00}}, + {1, [3]byte{0x3c, 0x00, 0x00}}, {1, [3]byte{0x3d, 0x00, 0x00}}, + {1, [3]byte{0x3e, 0x00, 0x00}}, {1, [3]byte{0x3f, 0x00, 0x00}}, + {1, [3]byte{0x40, 0x00, 0x00}}, {1, [3]byte{0x41, 0x00, 0x00}}, + {1, [3]byte{0x42, 0x00, 0x00}}, {1, [3]byte{0x43, 0x00, 0x00}}, + {1, [3]byte{0x44, 0x00, 0x00}}, {1, [3]byte{0x45, 0x00, 0x00}}, + {1, [3]byte{0x46, 0x00, 0x00}}, {1, [3]byte{0x47, 0x00, 0x00}}, + {1, [3]byte{0x48, 0x00, 0x00}}, {1, [3]byte{0x49, 0x00, 0x00}}, + {1, [3]byte{0x4a, 0x00, 0x00}}, {1, [3]byte{0x4b, 0x00, 0x00}}, + {1, [3]byte{0x4c, 0x00, 0x00}}, {1, [3]byte{0x4d, 0x00, 0x00}}, + {1, [3]byte{0x4e, 0x00, 0x00}}, {1, [3]byte{0x4f, 0x00, 0x00}}, + {1, [3]byte{0x50, 0x00, 0x00}}, {1, [3]byte{0x51, 0x00, 0x00}}, + {1, [3]byte{0x52, 0x00, 0x00}}, {1, [3]byte{0x53, 0x00, 0x00}}, + {1, [3]byte{0x54, 0x00, 0x00}}, {1, [3]byte{0x55, 0x00, 0x00}}, + {1, [3]byte{0x56, 0x00, 0x00}}, {1, [3]byte{0x57, 0x00, 0x00}}, + {1, [3]byte{0x58, 0x00, 0x00}}, {1, [3]byte{0x59, 0x00, 0x00}}, + {1, [3]byte{0x5a, 0x00, 0x00}}, {1, [3]byte{0x5b, 0x00, 0x00}}, + {1, [3]byte{0x5c, 0x00, 0x00}}, {1, [3]byte{0x5d, 0x00, 0x00}}, + {1, [3]byte{0x5e, 0x00, 0x00}}, {1, [3]byte{0x5f, 0x00, 0x00}}, + {1, [3]byte{0x60, 0x00, 0x00}}, {1, [3]byte{0x61, 0x00, 0x00}}, + {1, [3]byte{0x62, 0x00, 0x00}}, {1, [3]byte{0x63, 0x00, 0x00}}, + {1, [3]byte{0x64, 0x00, 0x00}}, {1, [3]byte{0x65, 0x00, 0x00}}, + {1, [3]byte{0x66, 0x00, 0x00}}, {1, [3]byte{0x67, 0x00, 0x00}}, + {1, [3]byte{0x68, 0x00, 0x00}}, {1, [3]byte{0x69, 0x00, 0x00}}, + {1, [3]byte{0x6a, 0x00, 0x00}}, {1, [3]byte{0x6b, 0x00, 0x00}}, + {1, [3]byte{0x6c, 0x00, 0x00}}, {1, [3]byte{0x6d, 0x00, 0x00}}, + {1, [3]byte{0x6e, 0x00, 0x00}}, {1, [3]byte{0x6f, 0x00, 0x00}}, + {1, [3]byte{0x70, 0x00, 0x00}}, {1, [3]byte{0x71, 0x00, 0x00}}, + {1, [3]byte{0x72, 0x00, 0x00}}, {1, [3]byte{0x73, 0x00, 0x00}}, + {1, [3]byte{0x74, 0x00, 0x00}}, {1, [3]byte{0x75, 0x00, 0x00}}, + {1, [3]byte{0x76, 0x00, 0x00}}, {1, [3]byte{0x77, 0x00, 0x00}}, + {1, [3]byte{0x78, 0x00, 0x00}}, {1, [3]byte{0x79, 0x00, 0x00}}, + {1, [3]byte{0x7a, 0x00, 0x00}}, {1, [3]byte{0x7b, 0x00, 0x00}}, + {1, [3]byte{0x7c, 0x00, 0x00}}, {1, [3]byte{0x7d, 0x00, 0x00}}, + {1, [3]byte{0x7e, 0x00, 0x00}}, {1, [3]byte{0x7f, 0x00, 0x00}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {3, [3]byte{0xef, 0xbf, 0xbd}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {3, [3]byte{0xef, 0xbf, 0xbd}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {3, [3]byte{0xef, 0xbf, 0xbd}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {3, [3]byte{0xef, 0xbf, 0xbd}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {3, [3]byte{0xef, 0xbf, 0xbd}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {3, [3]byte{0xef, 0xbf, 0xbd}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {3, [3]byte{0xef, 0xbf, 0xbd}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {3, [3]byte{0xef, 0xbf, 0xbd}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {3, [3]byte{0xef, 0xbf, 0xbd}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {3, [3]byte{0xef, 0xbf, 0xbd}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {3, [3]byte{0xef, 0xbf, 0xbd}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {3, [3]byte{0xef, 0xbf, 0xbd}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {3, [3]byte{0xef, 0xbf, 0xbd}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {3, [3]byte{0xef, 0xbf, 0xbd}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {3, [3]byte{0xef, 0xbf, 0xbd}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {3, [3]byte{0xef, 0xbf, 0xbd}}, + {2, [3]byte{0xc2, 0xa0, 0x00}}, {2, [3]byte{0xc4, 0x84, 0x00}}, + {2, [3]byte{0xc4, 0x85, 0x00}}, {2, [3]byte{0xc5, 0x81, 0x00}}, + {3, [3]byte{0xe2, 0x82, 0xac}}, {3, [3]byte{0xe2, 0x80, 0x9e}}, + {2, [3]byte{0xc5, 0xa0, 0x00}}, {2, [3]byte{0xc2, 0xa7, 0x00}}, + {2, [3]byte{0xc5, 0xa1, 0x00}}, {2, [3]byte{0xc2, 0xa9, 0x00}}, + {2, [3]byte{0xc8, 0x98, 0x00}}, {2, [3]byte{0xc2, 0xab, 0x00}}, + {2, [3]byte{0xc5, 0xb9, 0x00}}, {2, [3]byte{0xc2, 0xad, 0x00}}, + {2, [3]byte{0xc5, 0xba, 0x00}}, {2, [3]byte{0xc5, 0xbb, 0x00}}, + {2, [3]byte{0xc2, 0xb0, 0x00}}, {2, [3]byte{0xc2, 0xb1, 0x00}}, + {2, [3]byte{0xc4, 0x8c, 0x00}}, {2, [3]byte{0xc5, 0x82, 0x00}}, + {2, [3]byte{0xc5, 0xbd, 0x00}}, {3, [3]byte{0xe2, 0x80, 0x9d}}, + {2, [3]byte{0xc2, 0xb6, 0x00}}, {2, [3]byte{0xc2, 0xb7, 0x00}}, + {2, [3]byte{0xc5, 0xbe, 0x00}}, {2, [3]byte{0xc4, 0x8d, 0x00}}, + {2, [3]byte{0xc8, 0x99, 0x00}}, {2, [3]byte{0xc2, 0xbb, 0x00}}, + {2, [3]byte{0xc5, 0x92, 0x00}}, {2, [3]byte{0xc5, 0x93, 0x00}}, + {2, [3]byte{0xc5, 0xb8, 0x00}}, {2, [3]byte{0xc5, 0xbc, 0x00}}, + {2, [3]byte{0xc3, 0x80, 0x00}}, {2, [3]byte{0xc3, 0x81, 0x00}}, + {2, [3]byte{0xc3, 0x82, 0x00}}, {2, [3]byte{0xc4, 0x82, 0x00}}, + {2, [3]byte{0xc3, 0x84, 0x00}}, {2, [3]byte{0xc4, 0x86, 0x00}}, + {2, [3]byte{0xc3, 0x86, 0x00}}, {2, [3]byte{0xc3, 0x87, 0x00}}, + {2, [3]byte{0xc3, 0x88, 0x00}}, {2, [3]byte{0xc3, 0x89, 0x00}}, + {2, [3]byte{0xc3, 0x8a, 0x00}}, {2, [3]byte{0xc3, 0x8b, 0x00}}, + {2, [3]byte{0xc3, 0x8c, 0x00}}, {2, [3]byte{0xc3, 0x8d, 0x00}}, + {2, [3]byte{0xc3, 0x8e, 0x00}}, {2, [3]byte{0xc3, 0x8f, 0x00}}, + {2, [3]byte{0xc4, 0x90, 0x00}}, {2, [3]byte{0xc5, 0x83, 0x00}}, + {2, [3]byte{0xc3, 0x92, 0x00}}, {2, [3]byte{0xc3, 0x93, 0x00}}, + {2, [3]byte{0xc3, 0x94, 0x00}}, {2, [3]byte{0xc5, 0x90, 0x00}}, + {2, [3]byte{0xc3, 0x96, 0x00}}, {2, [3]byte{0xc5, 0x9a, 0x00}}, + {2, [3]byte{0xc5, 0xb0, 0x00}}, {2, [3]byte{0xc3, 0x99, 0x00}}, + {2, [3]byte{0xc3, 0x9a, 0x00}}, {2, [3]byte{0xc3, 0x9b, 0x00}}, + {2, [3]byte{0xc3, 0x9c, 0x00}}, {2, [3]byte{0xc4, 0x98, 0x00}}, + {2, [3]byte{0xc8, 0x9a, 0x00}}, {2, [3]byte{0xc3, 0x9f, 0x00}}, + {2, [3]byte{0xc3, 0xa0, 0x00}}, {2, [3]byte{0xc3, 0xa1, 0x00}}, + {2, [3]byte{0xc3, 0xa2, 0x00}}, {2, [3]byte{0xc4, 0x83, 0x00}}, + {2, [3]byte{0xc3, 0xa4, 0x00}}, {2, [3]byte{0xc4, 0x87, 0x00}}, + {2, [3]byte{0xc3, 0xa6, 0x00}}, {2, [3]byte{0xc3, 0xa7, 0x00}}, + {2, [3]byte{0xc3, 0xa8, 0x00}}, {2, [3]byte{0xc3, 0xa9, 0x00}}, + {2, [3]byte{0xc3, 0xaa, 0x00}}, {2, [3]byte{0xc3, 0xab, 0x00}}, + {2, [3]byte{0xc3, 0xac, 0x00}}, {2, [3]byte{0xc3, 0xad, 0x00}}, + {2, [3]byte{0xc3, 0xae, 0x00}}, {2, [3]byte{0xc3, 0xaf, 0x00}}, + {2, [3]byte{0xc4, 0x91, 0x00}}, {2, [3]byte{0xc5, 0x84, 0x00}}, + {2, [3]byte{0xc3, 0xb2, 0x00}}, {2, [3]byte{0xc3, 0xb3, 0x00}}, + {2, [3]byte{0xc3, 0xb4, 0x00}}, {2, [3]byte{0xc5, 0x91, 0x00}}, + {2, [3]byte{0xc3, 0xb6, 0x00}}, {2, [3]byte{0xc5, 0x9b, 0x00}}, + {2, [3]byte{0xc5, 0xb1, 0x00}}, {2, [3]byte{0xc3, 0xb9, 0x00}}, + {2, [3]byte{0xc3, 0xba, 0x00}}, {2, [3]byte{0xc3, 0xbb, 0x00}}, + {2, [3]byte{0xc3, 0xbc, 0x00}}, {2, [3]byte{0xc4, 0x99, 0x00}}, + {2, [3]byte{0xc8, 0x9b, 0x00}}, {2, [3]byte{0xc3, 0xbf, 0x00}}, + }, + encode: [256]uint32{ + 0x00000000, 0x01000001, 0x02000002, 0x03000003, 0x04000004, 0x05000005, 0x06000006, 0x07000007, + 0x08000008, 0x09000009, 0x0a00000a, 0x0b00000b, 0x0c00000c, 0x0d00000d, 0x0e00000e, 0x0f00000f, + 0x10000010, 0x11000011, 0x12000012, 0x13000013, 0x14000014, 0x15000015, 0x16000016, 0x17000017, + 0x18000018, 0x19000019, 0x1a00001a, 0x1b00001b, 0x1c00001c, 0x1d00001d, 0x1e00001e, 0x1f00001f, + 0x20000020, 0x21000021, 0x22000022, 0x23000023, 0x24000024, 0x25000025, 0x26000026, 0x27000027, + 0x28000028, 0x29000029, 0x2a00002a, 0x2b00002b, 0x2c00002c, 0x2d00002d, 0x2e00002e, 0x2f00002f, + 0x30000030, 0x31000031, 0x32000032, 0x33000033, 0x34000034, 0x35000035, 0x36000036, 0x37000037, + 0x38000038, 0x39000039, 0x3a00003a, 0x3b00003b, 0x3c00003c, 0x3d00003d, 0x3e00003e, 0x3f00003f, + 0x40000040, 0x41000041, 0x42000042, 0x43000043, 0x44000044, 0x45000045, 0x46000046, 0x47000047, + 0x48000048, 0x49000049, 0x4a00004a, 0x4b00004b, 0x4c00004c, 0x4d00004d, 0x4e00004e, 0x4f00004f, + 0x50000050, 0x51000051, 0x52000052, 0x53000053, 0x54000054, 0x55000055, 0x56000056, 0x57000057, + 0x58000058, 0x59000059, 0x5a00005a, 0x5b00005b, 0x5c00005c, 0x5d00005d, 0x5e00005e, 0x5f00005f, + 0x60000060, 0x61000061, 0x62000062, 0x63000063, 0x64000064, 0x65000065, 0x66000066, 0x67000067, + 0x68000068, 0x69000069, 0x6a00006a, 0x6b00006b, 0x6c00006c, 0x6d00006d, 0x6e00006e, 0x6f00006f, + 0x70000070, 0x71000071, 0x72000072, 0x73000073, 0x74000074, 0x75000075, 0x76000076, 0x77000077, + 0x78000078, 0x79000079, 0x7a00007a, 0x7b00007b, 0x7c00007c, 0x7d00007d, 0x7e00007e, 0x7f00007f, + 0xa00000a0, 0xa70000a7, 0xa90000a9, 0xab0000ab, 0xad0000ad, 0xb00000b0, 0xb10000b1, 0xb60000b6, + 0xb70000b7, 0xbb0000bb, 0xc00000c0, 0xc10000c1, 0xc20000c2, 0xc40000c4, 0xc60000c6, 0xc70000c7, + 0xc80000c8, 0xc90000c9, 0xca0000ca, 0xcb0000cb, 0xcc0000cc, 0xcd0000cd, 0xce0000ce, 0xcf0000cf, + 0xd20000d2, 0xd30000d3, 0xd40000d4, 0xd60000d6, 0xd90000d9, 0xda0000da, 0xdb0000db, 0xdc0000dc, + 0xdf0000df, 0xe00000e0, 0xe10000e1, 0xe20000e2, 0xe40000e4, 0xe60000e6, 0xe70000e7, 0xe80000e8, + 0xe90000e9, 0xea0000ea, 0xeb0000eb, 0xec0000ec, 0xed0000ed, 0xee0000ee, 0xef0000ef, 0xf20000f2, + 0xf30000f3, 0xf40000f4, 0xf60000f6, 0xf90000f9, 0xfa0000fa, 0xfb0000fb, 0xfc0000fc, 0xff0000ff, + 0xc3000102, 0xe3000103, 0xa1000104, 0xa2000105, 0xc5000106, 0xe5000107, 0xb200010c, 0xb900010d, + 0xd0000110, 0xf0000111, 0xdd000118, 0xfd000119, 0xa3000141, 0xb3000142, 0xd1000143, 0xf1000144, + 0xd5000150, 0xf5000151, 0xbc000152, 0xbd000153, 0xd700015a, 0xf700015b, 0xa6000160, 0xa8000161, + 0xd8000170, 0xf8000171, 0xbe000178, 0xac000179, 0xae00017a, 0xaf00017b, 0xbf00017c, 0xb400017d, + 0xb800017e, 0xaa000218, 0xba000219, 0xde00021a, 0xfe00021b, 0xb500201d, 0xa500201e, 0xa40020ac, + 0xa40020ac, 0xa40020ac, 0xa40020ac, 0xa40020ac, 0xa40020ac, 0xa40020ac, 0xa40020ac, 0xa40020ac, + 0xa40020ac, 0xa40020ac, 0xa40020ac, 0xa40020ac, 0xa40020ac, 0xa40020ac, 0xa40020ac, 0xa40020ac, + 0xa40020ac, 0xa40020ac, 0xa40020ac, 0xa40020ac, 0xa40020ac, 0xa40020ac, 0xa40020ac, 0xa40020ac, + 0xa40020ac, 0xa40020ac, 0xa40020ac, 0xa40020ac, 0xa40020ac, 0xa40020ac, 0xa40020ac, 0xa40020ac, + }, +} + +// KOI8R is the KOI8-R encoding. +var KOI8R *Charmap = &koi8R + +var koi8R = Charmap{ + name: "KOI8-R", + mib: identifier.KOI8R, + asciiSuperset: true, + low: 0x80, + replacement: 0x1a, + decode: [256]utf8Enc{ + {1, [3]byte{0x00, 0x00, 0x00}}, {1, [3]byte{0x01, 0x00, 0x00}}, + {1, [3]byte{0x02, 0x00, 0x00}}, {1, [3]byte{0x03, 0x00, 0x00}}, + {1, [3]byte{0x04, 0x00, 0x00}}, {1, [3]byte{0x05, 0x00, 0x00}}, + {1, [3]byte{0x06, 0x00, 0x00}}, {1, [3]byte{0x07, 0x00, 0x00}}, + {1, [3]byte{0x08, 0x00, 0x00}}, {1, [3]byte{0x09, 0x00, 0x00}}, + {1, [3]byte{0x0a, 0x00, 0x00}}, {1, [3]byte{0x0b, 0x00, 0x00}}, + {1, [3]byte{0x0c, 0x00, 0x00}}, {1, [3]byte{0x0d, 0x00, 0x00}}, + {1, [3]byte{0x0e, 0x00, 0x00}}, {1, [3]byte{0x0f, 0x00, 0x00}}, + {1, [3]byte{0x10, 0x00, 0x00}}, {1, [3]byte{0x11, 0x00, 0x00}}, + {1, [3]byte{0x12, 0x00, 0x00}}, {1, [3]byte{0x13, 0x00, 0x00}}, + {1, [3]byte{0x14, 0x00, 0x00}}, {1, [3]byte{0x15, 0x00, 0x00}}, + {1, [3]byte{0x16, 0x00, 0x00}}, {1, [3]byte{0x17, 0x00, 0x00}}, + {1, [3]byte{0x18, 0x00, 0x00}}, {1, [3]byte{0x19, 0x00, 0x00}}, + {1, [3]byte{0x1a, 0x00, 0x00}}, {1, [3]byte{0x1b, 0x00, 0x00}}, + {1, [3]byte{0x1c, 0x00, 0x00}}, {1, [3]byte{0x1d, 0x00, 0x00}}, + {1, [3]byte{0x1e, 0x00, 0x00}}, {1, [3]byte{0x1f, 0x00, 0x00}}, + {1, [3]byte{0x20, 0x00, 0x00}}, {1, [3]byte{0x21, 0x00, 0x00}}, + {1, [3]byte{0x22, 0x00, 0x00}}, {1, [3]byte{0x23, 0x00, 0x00}}, + {1, [3]byte{0x24, 0x00, 0x00}}, {1, [3]byte{0x25, 0x00, 0x00}}, + {1, [3]byte{0x26, 0x00, 0x00}}, {1, [3]byte{0x27, 0x00, 0x00}}, + {1, [3]byte{0x28, 0x00, 0x00}}, {1, [3]byte{0x29, 0x00, 0x00}}, + {1, [3]byte{0x2a, 0x00, 0x00}}, {1, [3]byte{0x2b, 0x00, 0x00}}, + {1, [3]byte{0x2c, 0x00, 0x00}}, {1, [3]byte{0x2d, 0x00, 0x00}}, + {1, [3]byte{0x2e, 0x00, 0x00}}, {1, [3]byte{0x2f, 0x00, 0x00}}, + {1, [3]byte{0x30, 0x00, 0x00}}, {1, [3]byte{0x31, 0x00, 0x00}}, + {1, [3]byte{0x32, 0x00, 0x00}}, {1, [3]byte{0x33, 0x00, 0x00}}, + {1, [3]byte{0x34, 0x00, 0x00}}, {1, [3]byte{0x35, 0x00, 0x00}}, + {1, [3]byte{0x36, 0x00, 0x00}}, {1, [3]byte{0x37, 0x00, 0x00}}, + {1, [3]byte{0x38, 0x00, 0x00}}, {1, [3]byte{0x39, 0x00, 0x00}}, + {1, [3]byte{0x3a, 0x00, 0x00}}, {1, [3]byte{0x3b, 0x00, 0x00}}, + {1, [3]byte{0x3c, 0x00, 0x00}}, {1, [3]byte{0x3d, 0x00, 0x00}}, + {1, [3]byte{0x3e, 0x00, 0x00}}, {1, [3]byte{0x3f, 0x00, 0x00}}, + {1, [3]byte{0x40, 0x00, 0x00}}, {1, [3]byte{0x41, 0x00, 0x00}}, + {1, [3]byte{0x42, 0x00, 0x00}}, {1, [3]byte{0x43, 0x00, 0x00}}, + {1, [3]byte{0x44, 0x00, 0x00}}, {1, [3]byte{0x45, 0x00, 0x00}}, + {1, [3]byte{0x46, 0x00, 0x00}}, {1, [3]byte{0x47, 0x00, 0x00}}, + {1, [3]byte{0x48, 0x00, 0x00}}, {1, [3]byte{0x49, 0x00, 0x00}}, + {1, [3]byte{0x4a, 0x00, 0x00}}, {1, [3]byte{0x4b, 0x00, 0x00}}, + {1, [3]byte{0x4c, 0x00, 0x00}}, {1, [3]byte{0x4d, 0x00, 0x00}}, + {1, [3]byte{0x4e, 0x00, 0x00}}, {1, [3]byte{0x4f, 0x00, 0x00}}, + {1, [3]byte{0x50, 0x00, 0x00}}, {1, [3]byte{0x51, 0x00, 0x00}}, + {1, [3]byte{0x52, 0x00, 0x00}}, {1, [3]byte{0x53, 0x00, 0x00}}, + {1, [3]byte{0x54, 0x00, 0x00}}, {1, [3]byte{0x55, 0x00, 0x00}}, + {1, [3]byte{0x56, 0x00, 0x00}}, {1, [3]byte{0x57, 0x00, 0x00}}, + {1, [3]byte{0x58, 0x00, 0x00}}, {1, [3]byte{0x59, 0x00, 0x00}}, + {1, [3]byte{0x5a, 0x00, 0x00}}, {1, [3]byte{0x5b, 0x00, 0x00}}, + {1, [3]byte{0x5c, 0x00, 0x00}}, {1, [3]byte{0x5d, 0x00, 0x00}}, + {1, [3]byte{0x5e, 0x00, 0x00}}, {1, [3]byte{0x5f, 0x00, 0x00}}, + {1, [3]byte{0x60, 0x00, 0x00}}, {1, [3]byte{0x61, 0x00, 0x00}}, + {1, [3]byte{0x62, 0x00, 0x00}}, {1, [3]byte{0x63, 0x00, 0x00}}, + {1, [3]byte{0x64, 0x00, 0x00}}, {1, [3]byte{0x65, 0x00, 0x00}}, + {1, [3]byte{0x66, 0x00, 0x00}}, {1, [3]byte{0x67, 0x00, 0x00}}, + {1, [3]byte{0x68, 0x00, 0x00}}, {1, [3]byte{0x69, 0x00, 0x00}}, + {1, [3]byte{0x6a, 0x00, 0x00}}, {1, [3]byte{0x6b, 0x00, 0x00}}, + {1, [3]byte{0x6c, 0x00, 0x00}}, {1, [3]byte{0x6d, 0x00, 0x00}}, + {1, [3]byte{0x6e, 0x00, 0x00}}, {1, [3]byte{0x6f, 0x00, 0x00}}, + {1, [3]byte{0x70, 0x00, 0x00}}, {1, [3]byte{0x71, 0x00, 0x00}}, + {1, [3]byte{0x72, 0x00, 0x00}}, {1, [3]byte{0x73, 0x00, 0x00}}, + {1, [3]byte{0x74, 0x00, 0x00}}, {1, [3]byte{0x75, 0x00, 0x00}}, + {1, [3]byte{0x76, 0x00, 0x00}}, {1, [3]byte{0x77, 0x00, 0x00}}, + {1, [3]byte{0x78, 0x00, 0x00}}, {1, [3]byte{0x79, 0x00, 0x00}}, + {1, [3]byte{0x7a, 0x00, 0x00}}, {1, [3]byte{0x7b, 0x00, 0x00}}, + {1, [3]byte{0x7c, 0x00, 0x00}}, {1, [3]byte{0x7d, 0x00, 0x00}}, + {1, [3]byte{0x7e, 0x00, 0x00}}, {1, [3]byte{0x7f, 0x00, 0x00}}, + {3, [3]byte{0xe2, 0x94, 0x80}}, {3, [3]byte{0xe2, 0x94, 0x82}}, + {3, [3]byte{0xe2, 0x94, 0x8c}}, {3, [3]byte{0xe2, 0x94, 0x90}}, + {3, [3]byte{0xe2, 0x94, 0x94}}, {3, [3]byte{0xe2, 0x94, 0x98}}, + {3, [3]byte{0xe2, 0x94, 0x9c}}, {3, [3]byte{0xe2, 0x94, 0xa4}}, + {3, [3]byte{0xe2, 0x94, 0xac}}, {3, [3]byte{0xe2, 0x94, 0xb4}}, + {3, [3]byte{0xe2, 0x94, 0xbc}}, {3, [3]byte{0xe2, 0x96, 0x80}}, + {3, [3]byte{0xe2, 0x96, 0x84}}, {3, [3]byte{0xe2, 0x96, 0x88}}, + {3, [3]byte{0xe2, 0x96, 0x8c}}, {3, [3]byte{0xe2, 0x96, 0x90}}, + {3, [3]byte{0xe2, 0x96, 0x91}}, {3, [3]byte{0xe2, 0x96, 0x92}}, + {3, [3]byte{0xe2, 0x96, 0x93}}, {3, [3]byte{0xe2, 0x8c, 0xa0}}, + {3, [3]byte{0xe2, 0x96, 0xa0}}, {3, [3]byte{0xe2, 0x88, 0x99}}, + {3, [3]byte{0xe2, 0x88, 0x9a}}, {3, [3]byte{0xe2, 0x89, 0x88}}, + {3, [3]byte{0xe2, 0x89, 0xa4}}, {3, [3]byte{0xe2, 0x89, 0xa5}}, + {2, [3]byte{0xc2, 0xa0, 0x00}}, {3, [3]byte{0xe2, 0x8c, 0xa1}}, + {2, [3]byte{0xc2, 0xb0, 0x00}}, {2, [3]byte{0xc2, 0xb2, 0x00}}, + {2, [3]byte{0xc2, 0xb7, 0x00}}, {2, [3]byte{0xc3, 0xb7, 0x00}}, + {3, [3]byte{0xe2, 0x95, 0x90}}, {3, [3]byte{0xe2, 0x95, 0x91}}, + {3, [3]byte{0xe2, 0x95, 0x92}}, {2, [3]byte{0xd1, 0x91, 0x00}}, + {3, [3]byte{0xe2, 0x95, 0x93}}, {3, [3]byte{0xe2, 0x95, 0x94}}, + {3, [3]byte{0xe2, 0x95, 0x95}}, {3, [3]byte{0xe2, 0x95, 0x96}}, + {3, [3]byte{0xe2, 0x95, 0x97}}, {3, [3]byte{0xe2, 0x95, 0x98}}, + {3, [3]byte{0xe2, 0x95, 0x99}}, {3, [3]byte{0xe2, 0x95, 0x9a}}, + {3, [3]byte{0xe2, 0x95, 0x9b}}, {3, [3]byte{0xe2, 0x95, 0x9c}}, + {3, [3]byte{0xe2, 0x95, 0x9d}}, {3, [3]byte{0xe2, 0x95, 0x9e}}, + {3, [3]byte{0xe2, 0x95, 0x9f}}, {3, [3]byte{0xe2, 0x95, 0xa0}}, + {3, [3]byte{0xe2, 0x95, 0xa1}}, {2, [3]byte{0xd0, 0x81, 0x00}}, + {3, [3]byte{0xe2, 0x95, 0xa2}}, {3, [3]byte{0xe2, 0x95, 0xa3}}, + {3, [3]byte{0xe2, 0x95, 0xa4}}, {3, [3]byte{0xe2, 0x95, 0xa5}}, + {3, [3]byte{0xe2, 0x95, 0xa6}}, {3, [3]byte{0xe2, 0x95, 0xa7}}, + {3, [3]byte{0xe2, 0x95, 0xa8}}, {3, [3]byte{0xe2, 0x95, 0xa9}}, + {3, [3]byte{0xe2, 0x95, 0xaa}}, {3, [3]byte{0xe2, 0x95, 0xab}}, + {3, [3]byte{0xe2, 0x95, 0xac}}, {2, [3]byte{0xc2, 0xa9, 0x00}}, + {2, [3]byte{0xd1, 0x8e, 0x00}}, {2, [3]byte{0xd0, 0xb0, 0x00}}, + {2, [3]byte{0xd0, 0xb1, 0x00}}, {2, [3]byte{0xd1, 0x86, 0x00}}, + {2, [3]byte{0xd0, 0xb4, 0x00}}, {2, [3]byte{0xd0, 0xb5, 0x00}}, + {2, [3]byte{0xd1, 0x84, 0x00}}, {2, [3]byte{0xd0, 0xb3, 0x00}}, + {2, [3]byte{0xd1, 0x85, 0x00}}, {2, [3]byte{0xd0, 0xb8, 0x00}}, + {2, [3]byte{0xd0, 0xb9, 0x00}}, {2, [3]byte{0xd0, 0xba, 0x00}}, + {2, [3]byte{0xd0, 0xbb, 0x00}}, {2, [3]byte{0xd0, 0xbc, 0x00}}, + {2, [3]byte{0xd0, 0xbd, 0x00}}, {2, [3]byte{0xd0, 0xbe, 0x00}}, + {2, [3]byte{0xd0, 0xbf, 0x00}}, {2, [3]byte{0xd1, 0x8f, 0x00}}, + {2, [3]byte{0xd1, 0x80, 0x00}}, {2, [3]byte{0xd1, 0x81, 0x00}}, + {2, [3]byte{0xd1, 0x82, 0x00}}, {2, [3]byte{0xd1, 0x83, 0x00}}, + {2, [3]byte{0xd0, 0xb6, 0x00}}, {2, [3]byte{0xd0, 0xb2, 0x00}}, + {2, [3]byte{0xd1, 0x8c, 0x00}}, {2, [3]byte{0xd1, 0x8b, 0x00}}, + {2, [3]byte{0xd0, 0xb7, 0x00}}, {2, [3]byte{0xd1, 0x88, 0x00}}, + {2, [3]byte{0xd1, 0x8d, 0x00}}, {2, [3]byte{0xd1, 0x89, 0x00}}, + {2, [3]byte{0xd1, 0x87, 0x00}}, {2, [3]byte{0xd1, 0x8a, 0x00}}, + {2, [3]byte{0xd0, 0xae, 0x00}}, {2, [3]byte{0xd0, 0x90, 0x00}}, + {2, [3]byte{0xd0, 0x91, 0x00}}, {2, [3]byte{0xd0, 0xa6, 0x00}}, + {2, [3]byte{0xd0, 0x94, 0x00}}, {2, [3]byte{0xd0, 0x95, 0x00}}, + {2, [3]byte{0xd0, 0xa4, 0x00}}, {2, [3]byte{0xd0, 0x93, 0x00}}, + {2, [3]byte{0xd0, 0xa5, 0x00}}, {2, [3]byte{0xd0, 0x98, 0x00}}, + {2, [3]byte{0xd0, 0x99, 0x00}}, {2, [3]byte{0xd0, 0x9a, 0x00}}, + {2, [3]byte{0xd0, 0x9b, 0x00}}, {2, [3]byte{0xd0, 0x9c, 0x00}}, + {2, [3]byte{0xd0, 0x9d, 0x00}}, {2, [3]byte{0xd0, 0x9e, 0x00}}, + {2, [3]byte{0xd0, 0x9f, 0x00}}, {2, [3]byte{0xd0, 0xaf, 0x00}}, + {2, [3]byte{0xd0, 0xa0, 0x00}}, {2, [3]byte{0xd0, 0xa1, 0x00}}, + {2, [3]byte{0xd0, 0xa2, 0x00}}, {2, [3]byte{0xd0, 0xa3, 0x00}}, + {2, [3]byte{0xd0, 0x96, 0x00}}, {2, [3]byte{0xd0, 0x92, 0x00}}, + {2, [3]byte{0xd0, 0xac, 0x00}}, {2, [3]byte{0xd0, 0xab, 0x00}}, + {2, [3]byte{0xd0, 0x97, 0x00}}, {2, [3]byte{0xd0, 0xa8, 0x00}}, + {2, [3]byte{0xd0, 0xad, 0x00}}, {2, [3]byte{0xd0, 0xa9, 0x00}}, + {2, [3]byte{0xd0, 0xa7, 0x00}}, {2, [3]byte{0xd0, 0xaa, 0x00}}, + }, + encode: [256]uint32{ + 0x00000000, 0x01000001, 0x02000002, 0x03000003, 0x04000004, 0x05000005, 0x06000006, 0x07000007, + 0x08000008, 0x09000009, 0x0a00000a, 0x0b00000b, 0x0c00000c, 0x0d00000d, 0x0e00000e, 0x0f00000f, + 0x10000010, 0x11000011, 0x12000012, 0x13000013, 0x14000014, 0x15000015, 0x16000016, 0x17000017, + 0x18000018, 0x19000019, 0x1a00001a, 0x1b00001b, 0x1c00001c, 0x1d00001d, 0x1e00001e, 0x1f00001f, + 0x20000020, 0x21000021, 0x22000022, 0x23000023, 0x24000024, 0x25000025, 0x26000026, 0x27000027, + 0x28000028, 0x29000029, 0x2a00002a, 0x2b00002b, 0x2c00002c, 0x2d00002d, 0x2e00002e, 0x2f00002f, + 0x30000030, 0x31000031, 0x32000032, 0x33000033, 0x34000034, 0x35000035, 0x36000036, 0x37000037, + 0x38000038, 0x39000039, 0x3a00003a, 0x3b00003b, 0x3c00003c, 0x3d00003d, 0x3e00003e, 0x3f00003f, + 0x40000040, 0x41000041, 0x42000042, 0x43000043, 0x44000044, 0x45000045, 0x46000046, 0x47000047, + 0x48000048, 0x49000049, 0x4a00004a, 0x4b00004b, 0x4c00004c, 0x4d00004d, 0x4e00004e, 0x4f00004f, + 0x50000050, 0x51000051, 0x52000052, 0x53000053, 0x54000054, 0x55000055, 0x56000056, 0x57000057, + 0x58000058, 0x59000059, 0x5a00005a, 0x5b00005b, 0x5c00005c, 0x5d00005d, 0x5e00005e, 0x5f00005f, + 0x60000060, 0x61000061, 0x62000062, 0x63000063, 0x64000064, 0x65000065, 0x66000066, 0x67000067, + 0x68000068, 0x69000069, 0x6a00006a, 0x6b00006b, 0x6c00006c, 0x6d00006d, 0x6e00006e, 0x6f00006f, + 0x70000070, 0x71000071, 0x72000072, 0x73000073, 0x74000074, 0x75000075, 0x76000076, 0x77000077, + 0x78000078, 0x79000079, 0x7a00007a, 0x7b00007b, 0x7c00007c, 0x7d00007d, 0x7e00007e, 0x7f00007f, + 0x9a0000a0, 0xbf0000a9, 0x9c0000b0, 0x9d0000b2, 0x9e0000b7, 0x9f0000f7, 0xb3000401, 0xe1000410, + 0xe2000411, 0xf7000412, 0xe7000413, 0xe4000414, 0xe5000415, 0xf6000416, 0xfa000417, 0xe9000418, + 0xea000419, 0xeb00041a, 0xec00041b, 0xed00041c, 0xee00041d, 0xef00041e, 0xf000041f, 0xf2000420, + 0xf3000421, 0xf4000422, 0xf5000423, 0xe6000424, 0xe8000425, 0xe3000426, 0xfe000427, 0xfb000428, + 0xfd000429, 0xff00042a, 0xf900042b, 0xf800042c, 0xfc00042d, 0xe000042e, 0xf100042f, 0xc1000430, + 0xc2000431, 0xd7000432, 0xc7000433, 0xc4000434, 0xc5000435, 0xd6000436, 0xda000437, 0xc9000438, + 0xca000439, 0xcb00043a, 0xcc00043b, 0xcd00043c, 0xce00043d, 0xcf00043e, 0xd000043f, 0xd2000440, + 0xd3000441, 0xd4000442, 0xd5000443, 0xc6000444, 0xc8000445, 0xc3000446, 0xde000447, 0xdb000448, + 0xdd000449, 0xdf00044a, 0xd900044b, 0xd800044c, 0xdc00044d, 0xc000044e, 0xd100044f, 0xa3000451, + 0x95002219, 0x9600221a, 0x97002248, 0x98002264, 0x99002265, 0x93002320, 0x9b002321, 0x80002500, + 0x81002502, 0x8200250c, 0x83002510, 0x84002514, 0x85002518, 0x8600251c, 0x87002524, 0x8800252c, + 0x89002534, 0x8a00253c, 0xa0002550, 0xa1002551, 0xa2002552, 0xa4002553, 0xa5002554, 0xa6002555, + 0xa7002556, 0xa8002557, 0xa9002558, 0xaa002559, 0xab00255a, 0xac00255b, 0xad00255c, 0xae00255d, + 0xaf00255e, 0xb000255f, 0xb1002560, 0xb2002561, 0xb4002562, 0xb5002563, 0xb6002564, 0xb7002565, + 0xb8002566, 0xb9002567, 0xba002568, 0xbb002569, 0xbc00256a, 0xbd00256b, 0xbe00256c, 0x8b002580, + 0x8c002584, 0x8d002588, 0x8e00258c, 0x8f002590, 0x90002591, 0x91002592, 0x92002593, 0x940025a0, + }, +} + +// KOI8U is the KOI8-U encoding. +var KOI8U *Charmap = &koi8U + +var koi8U = Charmap{ + name: "KOI8-U", + mib: identifier.KOI8U, + asciiSuperset: true, + low: 0x80, + replacement: 0x1a, + decode: [256]utf8Enc{ + {1, [3]byte{0x00, 0x00, 0x00}}, {1, [3]byte{0x01, 0x00, 0x00}}, + {1, [3]byte{0x02, 0x00, 0x00}}, {1, [3]byte{0x03, 0x00, 0x00}}, + {1, [3]byte{0x04, 0x00, 0x00}}, {1, [3]byte{0x05, 0x00, 0x00}}, + {1, [3]byte{0x06, 0x00, 0x00}}, {1, [3]byte{0x07, 0x00, 0x00}}, + {1, [3]byte{0x08, 0x00, 0x00}}, {1, [3]byte{0x09, 0x00, 0x00}}, + {1, [3]byte{0x0a, 0x00, 0x00}}, {1, [3]byte{0x0b, 0x00, 0x00}}, + {1, [3]byte{0x0c, 0x00, 0x00}}, {1, [3]byte{0x0d, 0x00, 0x00}}, + {1, [3]byte{0x0e, 0x00, 0x00}}, {1, [3]byte{0x0f, 0x00, 0x00}}, + {1, [3]byte{0x10, 0x00, 0x00}}, {1, [3]byte{0x11, 0x00, 0x00}}, + {1, [3]byte{0x12, 0x00, 0x00}}, {1, [3]byte{0x13, 0x00, 0x00}}, + {1, [3]byte{0x14, 0x00, 0x00}}, {1, [3]byte{0x15, 0x00, 0x00}}, + {1, [3]byte{0x16, 0x00, 0x00}}, {1, [3]byte{0x17, 0x00, 0x00}}, + {1, [3]byte{0x18, 0x00, 0x00}}, {1, [3]byte{0x19, 0x00, 0x00}}, + {1, [3]byte{0x1a, 0x00, 0x00}}, {1, [3]byte{0x1b, 0x00, 0x00}}, + {1, [3]byte{0x1c, 0x00, 0x00}}, {1, [3]byte{0x1d, 0x00, 0x00}}, + {1, [3]byte{0x1e, 0x00, 0x00}}, {1, [3]byte{0x1f, 0x00, 0x00}}, + {1, [3]byte{0x20, 0x00, 0x00}}, {1, [3]byte{0x21, 0x00, 0x00}}, + {1, [3]byte{0x22, 0x00, 0x00}}, {1, [3]byte{0x23, 0x00, 0x00}}, + {1, [3]byte{0x24, 0x00, 0x00}}, {1, [3]byte{0x25, 0x00, 0x00}}, + {1, [3]byte{0x26, 0x00, 0x00}}, {1, [3]byte{0x27, 0x00, 0x00}}, + {1, [3]byte{0x28, 0x00, 0x00}}, {1, [3]byte{0x29, 0x00, 0x00}}, + {1, [3]byte{0x2a, 0x00, 0x00}}, {1, [3]byte{0x2b, 0x00, 0x00}}, + {1, [3]byte{0x2c, 0x00, 0x00}}, {1, [3]byte{0x2d, 0x00, 0x00}}, + {1, [3]byte{0x2e, 0x00, 0x00}}, {1, [3]byte{0x2f, 0x00, 0x00}}, + {1, [3]byte{0x30, 0x00, 0x00}}, {1, [3]byte{0x31, 0x00, 0x00}}, + {1, [3]byte{0x32, 0x00, 0x00}}, {1, [3]byte{0x33, 0x00, 0x00}}, + {1, [3]byte{0x34, 0x00, 0x00}}, {1, [3]byte{0x35, 0x00, 0x00}}, + {1, [3]byte{0x36, 0x00, 0x00}}, {1, [3]byte{0x37, 0x00, 0x00}}, + {1, [3]byte{0x38, 0x00, 0x00}}, {1, [3]byte{0x39, 0x00, 0x00}}, + {1, [3]byte{0x3a, 0x00, 0x00}}, {1, [3]byte{0x3b, 0x00, 0x00}}, + {1, [3]byte{0x3c, 0x00, 0x00}}, {1, [3]byte{0x3d, 0x00, 0x00}}, + {1, [3]byte{0x3e, 0x00, 0x00}}, {1, [3]byte{0x3f, 0x00, 0x00}}, + {1, [3]byte{0x40, 0x00, 0x00}}, {1, [3]byte{0x41, 0x00, 0x00}}, + {1, [3]byte{0x42, 0x00, 0x00}}, {1, [3]byte{0x43, 0x00, 0x00}}, + {1, [3]byte{0x44, 0x00, 0x00}}, {1, [3]byte{0x45, 0x00, 0x00}}, + {1, [3]byte{0x46, 0x00, 0x00}}, {1, [3]byte{0x47, 0x00, 0x00}}, + {1, [3]byte{0x48, 0x00, 0x00}}, {1, [3]byte{0x49, 0x00, 0x00}}, + {1, [3]byte{0x4a, 0x00, 0x00}}, {1, [3]byte{0x4b, 0x00, 0x00}}, + {1, [3]byte{0x4c, 0x00, 0x00}}, {1, [3]byte{0x4d, 0x00, 0x00}}, + {1, [3]byte{0x4e, 0x00, 0x00}}, {1, [3]byte{0x4f, 0x00, 0x00}}, + {1, [3]byte{0x50, 0x00, 0x00}}, {1, [3]byte{0x51, 0x00, 0x00}}, + {1, [3]byte{0x52, 0x00, 0x00}}, {1, [3]byte{0x53, 0x00, 0x00}}, + {1, [3]byte{0x54, 0x00, 0x00}}, {1, [3]byte{0x55, 0x00, 0x00}}, + {1, [3]byte{0x56, 0x00, 0x00}}, {1, [3]byte{0x57, 0x00, 0x00}}, + {1, [3]byte{0x58, 0x00, 0x00}}, {1, [3]byte{0x59, 0x00, 0x00}}, + {1, [3]byte{0x5a, 0x00, 0x00}}, {1, [3]byte{0x5b, 0x00, 0x00}}, + {1, [3]byte{0x5c, 0x00, 0x00}}, {1, [3]byte{0x5d, 0x00, 0x00}}, + {1, [3]byte{0x5e, 0x00, 0x00}}, {1, [3]byte{0x5f, 0x00, 0x00}}, + {1, [3]byte{0x60, 0x00, 0x00}}, {1, [3]byte{0x61, 0x00, 0x00}}, + {1, [3]byte{0x62, 0x00, 0x00}}, {1, [3]byte{0x63, 0x00, 0x00}}, + {1, [3]byte{0x64, 0x00, 0x00}}, {1, [3]byte{0x65, 0x00, 0x00}}, + {1, [3]byte{0x66, 0x00, 0x00}}, {1, [3]byte{0x67, 0x00, 0x00}}, + {1, [3]byte{0x68, 0x00, 0x00}}, {1, [3]byte{0x69, 0x00, 0x00}}, + {1, [3]byte{0x6a, 0x00, 0x00}}, {1, [3]byte{0x6b, 0x00, 0x00}}, + {1, [3]byte{0x6c, 0x00, 0x00}}, {1, [3]byte{0x6d, 0x00, 0x00}}, + {1, [3]byte{0x6e, 0x00, 0x00}}, {1, [3]byte{0x6f, 0x00, 0x00}}, + {1, [3]byte{0x70, 0x00, 0x00}}, {1, [3]byte{0x71, 0x00, 0x00}}, + {1, [3]byte{0x72, 0x00, 0x00}}, {1, [3]byte{0x73, 0x00, 0x00}}, + {1, [3]byte{0x74, 0x00, 0x00}}, {1, [3]byte{0x75, 0x00, 0x00}}, + {1, [3]byte{0x76, 0x00, 0x00}}, {1, [3]byte{0x77, 0x00, 0x00}}, + {1, [3]byte{0x78, 0x00, 0x00}}, {1, [3]byte{0x79, 0x00, 0x00}}, + {1, [3]byte{0x7a, 0x00, 0x00}}, {1, [3]byte{0x7b, 0x00, 0x00}}, + {1, [3]byte{0x7c, 0x00, 0x00}}, {1, [3]byte{0x7d, 0x00, 0x00}}, + {1, [3]byte{0x7e, 0x00, 0x00}}, {1, [3]byte{0x7f, 0x00, 0x00}}, + {3, [3]byte{0xe2, 0x94, 0x80}}, {3, [3]byte{0xe2, 0x94, 0x82}}, + {3, [3]byte{0xe2, 0x94, 0x8c}}, {3, [3]byte{0xe2, 0x94, 0x90}}, + {3, [3]byte{0xe2, 0x94, 0x94}}, {3, [3]byte{0xe2, 0x94, 0x98}}, + {3, [3]byte{0xe2, 0x94, 0x9c}}, {3, [3]byte{0xe2, 0x94, 0xa4}}, + {3, [3]byte{0xe2, 0x94, 0xac}}, {3, [3]byte{0xe2, 0x94, 0xb4}}, + {3, [3]byte{0xe2, 0x94, 0xbc}}, {3, [3]byte{0xe2, 0x96, 0x80}}, + {3, [3]byte{0xe2, 0x96, 0x84}}, {3, [3]byte{0xe2, 0x96, 0x88}}, + {3, [3]byte{0xe2, 0x96, 0x8c}}, {3, [3]byte{0xe2, 0x96, 0x90}}, + {3, [3]byte{0xe2, 0x96, 0x91}}, {3, [3]byte{0xe2, 0x96, 0x92}}, + {3, [3]byte{0xe2, 0x96, 0x93}}, {3, [3]byte{0xe2, 0x8c, 0xa0}}, + {3, [3]byte{0xe2, 0x96, 0xa0}}, {3, [3]byte{0xe2, 0x88, 0x99}}, + {3, [3]byte{0xe2, 0x88, 0x9a}}, {3, [3]byte{0xe2, 0x89, 0x88}}, + {3, [3]byte{0xe2, 0x89, 0xa4}}, {3, [3]byte{0xe2, 0x89, 0xa5}}, + {2, [3]byte{0xc2, 0xa0, 0x00}}, {3, [3]byte{0xe2, 0x8c, 0xa1}}, + {2, [3]byte{0xc2, 0xb0, 0x00}}, {2, [3]byte{0xc2, 0xb2, 0x00}}, + {2, [3]byte{0xc2, 0xb7, 0x00}}, {2, [3]byte{0xc3, 0xb7, 0x00}}, + {3, [3]byte{0xe2, 0x95, 0x90}}, {3, [3]byte{0xe2, 0x95, 0x91}}, + {3, [3]byte{0xe2, 0x95, 0x92}}, {2, [3]byte{0xd1, 0x91, 0x00}}, + {2, [3]byte{0xd1, 0x94, 0x00}}, {3, [3]byte{0xe2, 0x95, 0x94}}, + {2, [3]byte{0xd1, 0x96, 0x00}}, {2, [3]byte{0xd1, 0x97, 0x00}}, + {3, [3]byte{0xe2, 0x95, 0x97}}, {3, [3]byte{0xe2, 0x95, 0x98}}, + {3, [3]byte{0xe2, 0x95, 0x99}}, {3, [3]byte{0xe2, 0x95, 0x9a}}, + {3, [3]byte{0xe2, 0x95, 0x9b}}, {2, [3]byte{0xd2, 0x91, 0x00}}, + {2, [3]byte{0xd1, 0x9e, 0x00}}, {3, [3]byte{0xe2, 0x95, 0x9e}}, + {3, [3]byte{0xe2, 0x95, 0x9f}}, {3, [3]byte{0xe2, 0x95, 0xa0}}, + {3, [3]byte{0xe2, 0x95, 0xa1}}, {2, [3]byte{0xd0, 0x81, 0x00}}, + {2, [3]byte{0xd0, 0x84, 0x00}}, {3, [3]byte{0xe2, 0x95, 0xa3}}, + {2, [3]byte{0xd0, 0x86, 0x00}}, {2, [3]byte{0xd0, 0x87, 0x00}}, + {3, [3]byte{0xe2, 0x95, 0xa6}}, {3, [3]byte{0xe2, 0x95, 0xa7}}, + {3, [3]byte{0xe2, 0x95, 0xa8}}, {3, [3]byte{0xe2, 0x95, 0xa9}}, + {3, [3]byte{0xe2, 0x95, 0xaa}}, {2, [3]byte{0xd2, 0x90, 0x00}}, + {2, [3]byte{0xd0, 0x8e, 0x00}}, {2, [3]byte{0xc2, 0xa9, 0x00}}, + {2, [3]byte{0xd1, 0x8e, 0x00}}, {2, [3]byte{0xd0, 0xb0, 0x00}}, + {2, [3]byte{0xd0, 0xb1, 0x00}}, {2, [3]byte{0xd1, 0x86, 0x00}}, + {2, [3]byte{0xd0, 0xb4, 0x00}}, {2, [3]byte{0xd0, 0xb5, 0x00}}, + {2, [3]byte{0xd1, 0x84, 0x00}}, {2, [3]byte{0xd0, 0xb3, 0x00}}, + {2, [3]byte{0xd1, 0x85, 0x00}}, {2, [3]byte{0xd0, 0xb8, 0x00}}, + {2, [3]byte{0xd0, 0xb9, 0x00}}, {2, [3]byte{0xd0, 0xba, 0x00}}, + {2, [3]byte{0xd0, 0xbb, 0x00}}, {2, [3]byte{0xd0, 0xbc, 0x00}}, + {2, [3]byte{0xd0, 0xbd, 0x00}}, {2, [3]byte{0xd0, 0xbe, 0x00}}, + {2, [3]byte{0xd0, 0xbf, 0x00}}, {2, [3]byte{0xd1, 0x8f, 0x00}}, + {2, [3]byte{0xd1, 0x80, 0x00}}, {2, [3]byte{0xd1, 0x81, 0x00}}, + {2, [3]byte{0xd1, 0x82, 0x00}}, {2, [3]byte{0xd1, 0x83, 0x00}}, + {2, [3]byte{0xd0, 0xb6, 0x00}}, {2, [3]byte{0xd0, 0xb2, 0x00}}, + {2, [3]byte{0xd1, 0x8c, 0x00}}, {2, [3]byte{0xd1, 0x8b, 0x00}}, + {2, [3]byte{0xd0, 0xb7, 0x00}}, {2, [3]byte{0xd1, 0x88, 0x00}}, + {2, [3]byte{0xd1, 0x8d, 0x00}}, {2, [3]byte{0xd1, 0x89, 0x00}}, + {2, [3]byte{0xd1, 0x87, 0x00}}, {2, [3]byte{0xd1, 0x8a, 0x00}}, + {2, [3]byte{0xd0, 0xae, 0x00}}, {2, [3]byte{0xd0, 0x90, 0x00}}, + {2, [3]byte{0xd0, 0x91, 0x00}}, {2, [3]byte{0xd0, 0xa6, 0x00}}, + {2, [3]byte{0xd0, 0x94, 0x00}}, {2, [3]byte{0xd0, 0x95, 0x00}}, + {2, [3]byte{0xd0, 0xa4, 0x00}}, {2, [3]byte{0xd0, 0x93, 0x00}}, + {2, [3]byte{0xd0, 0xa5, 0x00}}, {2, [3]byte{0xd0, 0x98, 0x00}}, + {2, [3]byte{0xd0, 0x99, 0x00}}, {2, [3]byte{0xd0, 0x9a, 0x00}}, + {2, [3]byte{0xd0, 0x9b, 0x00}}, {2, [3]byte{0xd0, 0x9c, 0x00}}, + {2, [3]byte{0xd0, 0x9d, 0x00}}, {2, [3]byte{0xd0, 0x9e, 0x00}}, + {2, [3]byte{0xd0, 0x9f, 0x00}}, {2, [3]byte{0xd0, 0xaf, 0x00}}, + {2, [3]byte{0xd0, 0xa0, 0x00}}, {2, [3]byte{0xd0, 0xa1, 0x00}}, + {2, [3]byte{0xd0, 0xa2, 0x00}}, {2, [3]byte{0xd0, 0xa3, 0x00}}, + {2, [3]byte{0xd0, 0x96, 0x00}}, {2, [3]byte{0xd0, 0x92, 0x00}}, + {2, [3]byte{0xd0, 0xac, 0x00}}, {2, [3]byte{0xd0, 0xab, 0x00}}, + {2, [3]byte{0xd0, 0x97, 0x00}}, {2, [3]byte{0xd0, 0xa8, 0x00}}, + {2, [3]byte{0xd0, 0xad, 0x00}}, {2, [3]byte{0xd0, 0xa9, 0x00}}, + {2, [3]byte{0xd0, 0xa7, 0x00}}, {2, [3]byte{0xd0, 0xaa, 0x00}}, + }, + encode: [256]uint32{ + 0x00000000, 0x01000001, 0x02000002, 0x03000003, 0x04000004, 0x05000005, 0x06000006, 0x07000007, + 0x08000008, 0x09000009, 0x0a00000a, 0x0b00000b, 0x0c00000c, 0x0d00000d, 0x0e00000e, 0x0f00000f, + 0x10000010, 0x11000011, 0x12000012, 0x13000013, 0x14000014, 0x15000015, 0x16000016, 0x17000017, + 0x18000018, 0x19000019, 0x1a00001a, 0x1b00001b, 0x1c00001c, 0x1d00001d, 0x1e00001e, 0x1f00001f, + 0x20000020, 0x21000021, 0x22000022, 0x23000023, 0x24000024, 0x25000025, 0x26000026, 0x27000027, + 0x28000028, 0x29000029, 0x2a00002a, 0x2b00002b, 0x2c00002c, 0x2d00002d, 0x2e00002e, 0x2f00002f, + 0x30000030, 0x31000031, 0x32000032, 0x33000033, 0x34000034, 0x35000035, 0x36000036, 0x37000037, + 0x38000038, 0x39000039, 0x3a00003a, 0x3b00003b, 0x3c00003c, 0x3d00003d, 0x3e00003e, 0x3f00003f, + 0x40000040, 0x41000041, 0x42000042, 0x43000043, 0x44000044, 0x45000045, 0x46000046, 0x47000047, + 0x48000048, 0x49000049, 0x4a00004a, 0x4b00004b, 0x4c00004c, 0x4d00004d, 0x4e00004e, 0x4f00004f, + 0x50000050, 0x51000051, 0x52000052, 0x53000053, 0x54000054, 0x55000055, 0x56000056, 0x57000057, + 0x58000058, 0x59000059, 0x5a00005a, 0x5b00005b, 0x5c00005c, 0x5d00005d, 0x5e00005e, 0x5f00005f, + 0x60000060, 0x61000061, 0x62000062, 0x63000063, 0x64000064, 0x65000065, 0x66000066, 0x67000067, + 0x68000068, 0x69000069, 0x6a00006a, 0x6b00006b, 0x6c00006c, 0x6d00006d, 0x6e00006e, 0x6f00006f, + 0x70000070, 0x71000071, 0x72000072, 0x73000073, 0x74000074, 0x75000075, 0x76000076, 0x77000077, + 0x78000078, 0x79000079, 0x7a00007a, 0x7b00007b, 0x7c00007c, 0x7d00007d, 0x7e00007e, 0x7f00007f, + 0x9a0000a0, 0xbf0000a9, 0x9c0000b0, 0x9d0000b2, 0x9e0000b7, 0x9f0000f7, 0xb3000401, 0xb4000404, + 0xb6000406, 0xb7000407, 0xbe00040e, 0xe1000410, 0xe2000411, 0xf7000412, 0xe7000413, 0xe4000414, + 0xe5000415, 0xf6000416, 0xfa000417, 0xe9000418, 0xea000419, 0xeb00041a, 0xec00041b, 0xed00041c, + 0xee00041d, 0xef00041e, 0xf000041f, 0xf2000420, 0xf3000421, 0xf4000422, 0xf5000423, 0xe6000424, + 0xe8000425, 0xe3000426, 0xfe000427, 0xfb000428, 0xfd000429, 0xff00042a, 0xf900042b, 0xf800042c, + 0xfc00042d, 0xe000042e, 0xf100042f, 0xc1000430, 0xc2000431, 0xd7000432, 0xc7000433, 0xc4000434, + 0xc5000435, 0xd6000436, 0xda000437, 0xc9000438, 0xca000439, 0xcb00043a, 0xcc00043b, 0xcd00043c, + 0xce00043d, 0xcf00043e, 0xd000043f, 0xd2000440, 0xd3000441, 0xd4000442, 0xd5000443, 0xc6000444, + 0xc8000445, 0xc3000446, 0xde000447, 0xdb000448, 0xdd000449, 0xdf00044a, 0xd900044b, 0xd800044c, + 0xdc00044d, 0xc000044e, 0xd100044f, 0xa3000451, 0xa4000454, 0xa6000456, 0xa7000457, 0xae00045e, + 0xbd000490, 0xad000491, 0x95002219, 0x9600221a, 0x97002248, 0x98002264, 0x99002265, 0x93002320, + 0x9b002321, 0x80002500, 0x81002502, 0x8200250c, 0x83002510, 0x84002514, 0x85002518, 0x8600251c, + 0x87002524, 0x8800252c, 0x89002534, 0x8a00253c, 0xa0002550, 0xa1002551, 0xa2002552, 0xa5002554, + 0xa8002557, 0xa9002558, 0xaa002559, 0xab00255a, 0xac00255b, 0xaf00255e, 0xb000255f, 0xb1002560, + 0xb2002561, 0xb5002563, 0xb8002566, 0xb9002567, 0xba002568, 0xbb002569, 0xbc00256a, 0x8b002580, + 0x8c002584, 0x8d002588, 0x8e00258c, 0x8f002590, 0x90002591, 0x91002592, 0x92002593, 0x940025a0, + }, +} + +// Macintosh is the Macintosh encoding. +var Macintosh *Charmap = &macintosh + +var macintosh = Charmap{ + name: "Macintosh", + mib: identifier.Macintosh, + asciiSuperset: true, + low: 0x80, + replacement: 0x1a, + decode: [256]utf8Enc{ + {1, [3]byte{0x00, 0x00, 0x00}}, {1, [3]byte{0x01, 0x00, 0x00}}, + {1, [3]byte{0x02, 0x00, 0x00}}, {1, [3]byte{0x03, 0x00, 0x00}}, + {1, [3]byte{0x04, 0x00, 0x00}}, {1, [3]byte{0x05, 0x00, 0x00}}, + {1, [3]byte{0x06, 0x00, 0x00}}, {1, [3]byte{0x07, 0x00, 0x00}}, + {1, [3]byte{0x08, 0x00, 0x00}}, {1, [3]byte{0x09, 0x00, 0x00}}, + {1, [3]byte{0x0a, 0x00, 0x00}}, {1, [3]byte{0x0b, 0x00, 0x00}}, + {1, [3]byte{0x0c, 0x00, 0x00}}, {1, [3]byte{0x0d, 0x00, 0x00}}, + {1, [3]byte{0x0e, 0x00, 0x00}}, {1, [3]byte{0x0f, 0x00, 0x00}}, + {1, [3]byte{0x10, 0x00, 0x00}}, {1, [3]byte{0x11, 0x00, 0x00}}, + {1, [3]byte{0x12, 0x00, 0x00}}, {1, [3]byte{0x13, 0x00, 0x00}}, + {1, [3]byte{0x14, 0x00, 0x00}}, {1, [3]byte{0x15, 0x00, 0x00}}, + {1, [3]byte{0x16, 0x00, 0x00}}, {1, [3]byte{0x17, 0x00, 0x00}}, + {1, [3]byte{0x18, 0x00, 0x00}}, {1, [3]byte{0x19, 0x00, 0x00}}, + {1, [3]byte{0x1a, 0x00, 0x00}}, {1, [3]byte{0x1b, 0x00, 0x00}}, + {1, [3]byte{0x1c, 0x00, 0x00}}, {1, [3]byte{0x1d, 0x00, 0x00}}, + {1, [3]byte{0x1e, 0x00, 0x00}}, {1, [3]byte{0x1f, 0x00, 0x00}}, + {1, [3]byte{0x20, 0x00, 0x00}}, {1, [3]byte{0x21, 0x00, 0x00}}, + {1, [3]byte{0x22, 0x00, 0x00}}, {1, [3]byte{0x23, 0x00, 0x00}}, + {1, [3]byte{0x24, 0x00, 0x00}}, {1, [3]byte{0x25, 0x00, 0x00}}, + {1, [3]byte{0x26, 0x00, 0x00}}, {1, [3]byte{0x27, 0x00, 0x00}}, + {1, [3]byte{0x28, 0x00, 0x00}}, {1, [3]byte{0x29, 0x00, 0x00}}, + {1, [3]byte{0x2a, 0x00, 0x00}}, {1, [3]byte{0x2b, 0x00, 0x00}}, + {1, [3]byte{0x2c, 0x00, 0x00}}, {1, [3]byte{0x2d, 0x00, 0x00}}, + {1, [3]byte{0x2e, 0x00, 0x00}}, {1, [3]byte{0x2f, 0x00, 0x00}}, + {1, [3]byte{0x30, 0x00, 0x00}}, {1, [3]byte{0x31, 0x00, 0x00}}, + {1, [3]byte{0x32, 0x00, 0x00}}, {1, [3]byte{0x33, 0x00, 0x00}}, + {1, [3]byte{0x34, 0x00, 0x00}}, {1, [3]byte{0x35, 0x00, 0x00}}, + {1, [3]byte{0x36, 0x00, 0x00}}, {1, [3]byte{0x37, 0x00, 0x00}}, + {1, [3]byte{0x38, 0x00, 0x00}}, {1, [3]byte{0x39, 0x00, 0x00}}, + {1, [3]byte{0x3a, 0x00, 0x00}}, {1, [3]byte{0x3b, 0x00, 0x00}}, + {1, [3]byte{0x3c, 0x00, 0x00}}, {1, [3]byte{0x3d, 0x00, 0x00}}, + {1, [3]byte{0x3e, 0x00, 0x00}}, {1, [3]byte{0x3f, 0x00, 0x00}}, + {1, [3]byte{0x40, 0x00, 0x00}}, {1, [3]byte{0x41, 0x00, 0x00}}, + {1, [3]byte{0x42, 0x00, 0x00}}, {1, [3]byte{0x43, 0x00, 0x00}}, + {1, [3]byte{0x44, 0x00, 0x00}}, {1, [3]byte{0x45, 0x00, 0x00}}, + {1, [3]byte{0x46, 0x00, 0x00}}, {1, [3]byte{0x47, 0x00, 0x00}}, + {1, [3]byte{0x48, 0x00, 0x00}}, {1, [3]byte{0x49, 0x00, 0x00}}, + {1, [3]byte{0x4a, 0x00, 0x00}}, {1, [3]byte{0x4b, 0x00, 0x00}}, + {1, [3]byte{0x4c, 0x00, 0x00}}, {1, [3]byte{0x4d, 0x00, 0x00}}, + {1, [3]byte{0x4e, 0x00, 0x00}}, {1, [3]byte{0x4f, 0x00, 0x00}}, + {1, [3]byte{0x50, 0x00, 0x00}}, {1, [3]byte{0x51, 0x00, 0x00}}, + {1, [3]byte{0x52, 0x00, 0x00}}, {1, [3]byte{0x53, 0x00, 0x00}}, + {1, [3]byte{0x54, 0x00, 0x00}}, {1, [3]byte{0x55, 0x00, 0x00}}, + {1, [3]byte{0x56, 0x00, 0x00}}, {1, [3]byte{0x57, 0x00, 0x00}}, + {1, [3]byte{0x58, 0x00, 0x00}}, {1, [3]byte{0x59, 0x00, 0x00}}, + {1, [3]byte{0x5a, 0x00, 0x00}}, {1, [3]byte{0x5b, 0x00, 0x00}}, + {1, [3]byte{0x5c, 0x00, 0x00}}, {1, [3]byte{0x5d, 0x00, 0x00}}, + {1, [3]byte{0x5e, 0x00, 0x00}}, {1, [3]byte{0x5f, 0x00, 0x00}}, + {1, [3]byte{0x60, 0x00, 0x00}}, {1, [3]byte{0x61, 0x00, 0x00}}, + {1, [3]byte{0x62, 0x00, 0x00}}, {1, [3]byte{0x63, 0x00, 0x00}}, + {1, [3]byte{0x64, 0x00, 0x00}}, {1, [3]byte{0x65, 0x00, 0x00}}, + {1, [3]byte{0x66, 0x00, 0x00}}, {1, [3]byte{0x67, 0x00, 0x00}}, + {1, [3]byte{0x68, 0x00, 0x00}}, {1, [3]byte{0x69, 0x00, 0x00}}, + {1, [3]byte{0x6a, 0x00, 0x00}}, {1, [3]byte{0x6b, 0x00, 0x00}}, + {1, [3]byte{0x6c, 0x00, 0x00}}, {1, [3]byte{0x6d, 0x00, 0x00}}, + {1, [3]byte{0x6e, 0x00, 0x00}}, {1, [3]byte{0x6f, 0x00, 0x00}}, + {1, [3]byte{0x70, 0x00, 0x00}}, {1, [3]byte{0x71, 0x00, 0x00}}, + {1, [3]byte{0x72, 0x00, 0x00}}, {1, [3]byte{0x73, 0x00, 0x00}}, + {1, [3]byte{0x74, 0x00, 0x00}}, {1, [3]byte{0x75, 0x00, 0x00}}, + {1, [3]byte{0x76, 0x00, 0x00}}, {1, [3]byte{0x77, 0x00, 0x00}}, + {1, [3]byte{0x78, 0x00, 0x00}}, {1, [3]byte{0x79, 0x00, 0x00}}, + {1, [3]byte{0x7a, 0x00, 0x00}}, {1, [3]byte{0x7b, 0x00, 0x00}}, + {1, [3]byte{0x7c, 0x00, 0x00}}, {1, [3]byte{0x7d, 0x00, 0x00}}, + {1, [3]byte{0x7e, 0x00, 0x00}}, {1, [3]byte{0x7f, 0x00, 0x00}}, + {2, [3]byte{0xc3, 0x84, 0x00}}, {2, [3]byte{0xc3, 0x85, 0x00}}, + {2, [3]byte{0xc3, 0x87, 0x00}}, {2, [3]byte{0xc3, 0x89, 0x00}}, + {2, [3]byte{0xc3, 0x91, 0x00}}, {2, [3]byte{0xc3, 0x96, 0x00}}, + {2, [3]byte{0xc3, 0x9c, 0x00}}, {2, [3]byte{0xc3, 0xa1, 0x00}}, + {2, [3]byte{0xc3, 0xa0, 0x00}}, {2, [3]byte{0xc3, 0xa2, 0x00}}, + {2, [3]byte{0xc3, 0xa4, 0x00}}, {2, [3]byte{0xc3, 0xa3, 0x00}}, + {2, [3]byte{0xc3, 0xa5, 0x00}}, {2, [3]byte{0xc3, 0xa7, 0x00}}, + {2, [3]byte{0xc3, 0xa9, 0x00}}, {2, [3]byte{0xc3, 0xa8, 0x00}}, + {2, [3]byte{0xc3, 0xaa, 0x00}}, {2, [3]byte{0xc3, 0xab, 0x00}}, + {2, [3]byte{0xc3, 0xad, 0x00}}, {2, [3]byte{0xc3, 0xac, 0x00}}, + {2, [3]byte{0xc3, 0xae, 0x00}}, {2, [3]byte{0xc3, 0xaf, 0x00}}, + {2, [3]byte{0xc3, 0xb1, 0x00}}, {2, [3]byte{0xc3, 0xb3, 0x00}}, + {2, [3]byte{0xc3, 0xb2, 0x00}}, {2, [3]byte{0xc3, 0xb4, 0x00}}, + {2, [3]byte{0xc3, 0xb6, 0x00}}, {2, [3]byte{0xc3, 0xb5, 0x00}}, + {2, [3]byte{0xc3, 0xba, 0x00}}, {2, [3]byte{0xc3, 0xb9, 0x00}}, + {2, [3]byte{0xc3, 0xbb, 0x00}}, {2, [3]byte{0xc3, 0xbc, 0x00}}, + {3, [3]byte{0xe2, 0x80, 0xa0}}, {2, [3]byte{0xc2, 0xb0, 0x00}}, + {2, [3]byte{0xc2, 0xa2, 0x00}}, {2, [3]byte{0xc2, 0xa3, 0x00}}, + {2, [3]byte{0xc2, 0xa7, 0x00}}, {3, [3]byte{0xe2, 0x80, 0xa2}}, + {2, [3]byte{0xc2, 0xb6, 0x00}}, {2, [3]byte{0xc3, 0x9f, 0x00}}, + {2, [3]byte{0xc2, 0xae, 0x00}}, {2, [3]byte{0xc2, 0xa9, 0x00}}, + {3, [3]byte{0xe2, 0x84, 0xa2}}, {2, [3]byte{0xc2, 0xb4, 0x00}}, + {2, [3]byte{0xc2, 0xa8, 0x00}}, {3, [3]byte{0xe2, 0x89, 0xa0}}, + {2, [3]byte{0xc3, 0x86, 0x00}}, {2, [3]byte{0xc3, 0x98, 0x00}}, + {3, [3]byte{0xe2, 0x88, 0x9e}}, {2, [3]byte{0xc2, 0xb1, 0x00}}, + {3, [3]byte{0xe2, 0x89, 0xa4}}, {3, [3]byte{0xe2, 0x89, 0xa5}}, + {2, [3]byte{0xc2, 0xa5, 0x00}}, {2, [3]byte{0xc2, 0xb5, 0x00}}, + {3, [3]byte{0xe2, 0x88, 0x82}}, {3, [3]byte{0xe2, 0x88, 0x91}}, + {3, [3]byte{0xe2, 0x88, 0x8f}}, {2, [3]byte{0xcf, 0x80, 0x00}}, + {3, [3]byte{0xe2, 0x88, 0xab}}, {2, [3]byte{0xc2, 0xaa, 0x00}}, + {2, [3]byte{0xc2, 0xba, 0x00}}, {2, [3]byte{0xce, 0xa9, 0x00}}, + {2, [3]byte{0xc3, 0xa6, 0x00}}, {2, [3]byte{0xc3, 0xb8, 0x00}}, + {2, [3]byte{0xc2, 0xbf, 0x00}}, {2, [3]byte{0xc2, 0xa1, 0x00}}, + {2, [3]byte{0xc2, 0xac, 0x00}}, {3, [3]byte{0xe2, 0x88, 0x9a}}, + {2, [3]byte{0xc6, 0x92, 0x00}}, {3, [3]byte{0xe2, 0x89, 0x88}}, + {3, [3]byte{0xe2, 0x88, 0x86}}, {2, [3]byte{0xc2, 0xab, 0x00}}, + {2, [3]byte{0xc2, 0xbb, 0x00}}, {3, [3]byte{0xe2, 0x80, 0xa6}}, + {2, [3]byte{0xc2, 0xa0, 0x00}}, {2, [3]byte{0xc3, 0x80, 0x00}}, + {2, [3]byte{0xc3, 0x83, 0x00}}, {2, [3]byte{0xc3, 0x95, 0x00}}, + {2, [3]byte{0xc5, 0x92, 0x00}}, {2, [3]byte{0xc5, 0x93, 0x00}}, + {3, [3]byte{0xe2, 0x80, 0x93}}, {3, [3]byte{0xe2, 0x80, 0x94}}, + {3, [3]byte{0xe2, 0x80, 0x9c}}, {3, [3]byte{0xe2, 0x80, 0x9d}}, + {3, [3]byte{0xe2, 0x80, 0x98}}, {3, [3]byte{0xe2, 0x80, 0x99}}, + {2, [3]byte{0xc3, 0xb7, 0x00}}, {3, [3]byte{0xe2, 0x97, 0x8a}}, + {2, [3]byte{0xc3, 0xbf, 0x00}}, {2, [3]byte{0xc5, 0xb8, 0x00}}, + {3, [3]byte{0xe2, 0x81, 0x84}}, {3, [3]byte{0xe2, 0x82, 0xac}}, + {3, [3]byte{0xe2, 0x80, 0xb9}}, {3, [3]byte{0xe2, 0x80, 0xba}}, + {3, [3]byte{0xef, 0xac, 0x81}}, {3, [3]byte{0xef, 0xac, 0x82}}, + {3, [3]byte{0xe2, 0x80, 0xa1}}, {2, [3]byte{0xc2, 0xb7, 0x00}}, + {3, [3]byte{0xe2, 0x80, 0x9a}}, {3, [3]byte{0xe2, 0x80, 0x9e}}, + {3, [3]byte{0xe2, 0x80, 0xb0}}, {2, [3]byte{0xc3, 0x82, 0x00}}, + {2, [3]byte{0xc3, 0x8a, 0x00}}, {2, [3]byte{0xc3, 0x81, 0x00}}, + {2, [3]byte{0xc3, 0x8b, 0x00}}, {2, [3]byte{0xc3, 0x88, 0x00}}, + {2, [3]byte{0xc3, 0x8d, 0x00}}, {2, [3]byte{0xc3, 0x8e, 0x00}}, + {2, [3]byte{0xc3, 0x8f, 0x00}}, {2, [3]byte{0xc3, 0x8c, 0x00}}, + {2, [3]byte{0xc3, 0x93, 0x00}}, {2, [3]byte{0xc3, 0x94, 0x00}}, + {3, [3]byte{0xef, 0xa3, 0xbf}}, {2, [3]byte{0xc3, 0x92, 0x00}}, + {2, [3]byte{0xc3, 0x9a, 0x00}}, {2, [3]byte{0xc3, 0x9b, 0x00}}, + {2, [3]byte{0xc3, 0x99, 0x00}}, {2, [3]byte{0xc4, 0xb1, 0x00}}, + {2, [3]byte{0xcb, 0x86, 0x00}}, {2, [3]byte{0xcb, 0x9c, 0x00}}, + {2, [3]byte{0xc2, 0xaf, 0x00}}, {2, [3]byte{0xcb, 0x98, 0x00}}, + {2, [3]byte{0xcb, 0x99, 0x00}}, {2, [3]byte{0xcb, 0x9a, 0x00}}, + {2, [3]byte{0xc2, 0xb8, 0x00}}, {2, [3]byte{0xcb, 0x9d, 0x00}}, + {2, [3]byte{0xcb, 0x9b, 0x00}}, {2, [3]byte{0xcb, 0x87, 0x00}}, + }, + encode: [256]uint32{ + 0x00000000, 0x01000001, 0x02000002, 0x03000003, 0x04000004, 0x05000005, 0x06000006, 0x07000007, + 0x08000008, 0x09000009, 0x0a00000a, 0x0b00000b, 0x0c00000c, 0x0d00000d, 0x0e00000e, 0x0f00000f, + 0x10000010, 0x11000011, 0x12000012, 0x13000013, 0x14000014, 0x15000015, 0x16000016, 0x17000017, + 0x18000018, 0x19000019, 0x1a00001a, 0x1b00001b, 0x1c00001c, 0x1d00001d, 0x1e00001e, 0x1f00001f, + 0x20000020, 0x21000021, 0x22000022, 0x23000023, 0x24000024, 0x25000025, 0x26000026, 0x27000027, + 0x28000028, 0x29000029, 0x2a00002a, 0x2b00002b, 0x2c00002c, 0x2d00002d, 0x2e00002e, 0x2f00002f, + 0x30000030, 0x31000031, 0x32000032, 0x33000033, 0x34000034, 0x35000035, 0x36000036, 0x37000037, + 0x38000038, 0x39000039, 0x3a00003a, 0x3b00003b, 0x3c00003c, 0x3d00003d, 0x3e00003e, 0x3f00003f, + 0x40000040, 0x41000041, 0x42000042, 0x43000043, 0x44000044, 0x45000045, 0x46000046, 0x47000047, + 0x48000048, 0x49000049, 0x4a00004a, 0x4b00004b, 0x4c00004c, 0x4d00004d, 0x4e00004e, 0x4f00004f, + 0x50000050, 0x51000051, 0x52000052, 0x53000053, 0x54000054, 0x55000055, 0x56000056, 0x57000057, + 0x58000058, 0x59000059, 0x5a00005a, 0x5b00005b, 0x5c00005c, 0x5d00005d, 0x5e00005e, 0x5f00005f, + 0x60000060, 0x61000061, 0x62000062, 0x63000063, 0x64000064, 0x65000065, 0x66000066, 0x67000067, + 0x68000068, 0x69000069, 0x6a00006a, 0x6b00006b, 0x6c00006c, 0x6d00006d, 0x6e00006e, 0x6f00006f, + 0x70000070, 0x71000071, 0x72000072, 0x73000073, 0x74000074, 0x75000075, 0x76000076, 0x77000077, + 0x78000078, 0x79000079, 0x7a00007a, 0x7b00007b, 0x7c00007c, 0x7d00007d, 0x7e00007e, 0x7f00007f, + 0xca0000a0, 0xc10000a1, 0xa20000a2, 0xa30000a3, 0xb40000a5, 0xa40000a7, 0xac0000a8, 0xa90000a9, + 0xbb0000aa, 0xc70000ab, 0xc20000ac, 0xa80000ae, 0xf80000af, 0xa10000b0, 0xb10000b1, 0xab0000b4, + 0xb50000b5, 0xa60000b6, 0xe10000b7, 0xfc0000b8, 0xbc0000ba, 0xc80000bb, 0xc00000bf, 0xcb0000c0, + 0xe70000c1, 0xe50000c2, 0xcc0000c3, 0x800000c4, 0x810000c5, 0xae0000c6, 0x820000c7, 0xe90000c8, + 0x830000c9, 0xe60000ca, 0xe80000cb, 0xed0000cc, 0xea0000cd, 0xeb0000ce, 0xec0000cf, 0x840000d1, + 0xf10000d2, 0xee0000d3, 0xef0000d4, 0xcd0000d5, 0x850000d6, 0xaf0000d8, 0xf40000d9, 0xf20000da, + 0xf30000db, 0x860000dc, 0xa70000df, 0x880000e0, 0x870000e1, 0x890000e2, 0x8b0000e3, 0x8a0000e4, + 0x8c0000e5, 0xbe0000e6, 0x8d0000e7, 0x8f0000e8, 0x8e0000e9, 0x900000ea, 0x910000eb, 0x930000ec, + 0x920000ed, 0x940000ee, 0x950000ef, 0x960000f1, 0x980000f2, 0x970000f3, 0x990000f4, 0x9b0000f5, + 0x9a0000f6, 0xd60000f7, 0xbf0000f8, 0x9d0000f9, 0x9c0000fa, 0x9e0000fb, 0x9f0000fc, 0xd80000ff, + 0xf5000131, 0xce000152, 0xcf000153, 0xd9000178, 0xc4000192, 0xf60002c6, 0xff0002c7, 0xf90002d8, + 0xfa0002d9, 0xfb0002da, 0xfe0002db, 0xf70002dc, 0xfd0002dd, 0xbd0003a9, 0xb90003c0, 0xd0002013, + 0xd1002014, 0xd4002018, 0xd5002019, 0xe200201a, 0xd200201c, 0xd300201d, 0xe300201e, 0xa0002020, + 0xe0002021, 0xa5002022, 0xc9002026, 0xe4002030, 0xdc002039, 0xdd00203a, 0xda002044, 0xdb0020ac, + 0xaa002122, 0xb6002202, 0xc6002206, 0xb800220f, 0xb7002211, 0xc300221a, 0xb000221e, 0xba00222b, + 0xc5002248, 0xad002260, 0xb2002264, 0xb3002265, 0xd70025ca, 0xf000f8ff, 0xde00fb01, 0xdf00fb02, + }, +} + +// MacintoshCyrillic is the Macintosh Cyrillic encoding. +var MacintoshCyrillic *Charmap = &macintoshCyrillic + +var macintoshCyrillic = Charmap{ + name: "Macintosh Cyrillic", + mib: identifier.MacintoshCyrillic, + asciiSuperset: true, + low: 0x80, + replacement: 0x1a, + decode: [256]utf8Enc{ + {1, [3]byte{0x00, 0x00, 0x00}}, {1, [3]byte{0x01, 0x00, 0x00}}, + {1, [3]byte{0x02, 0x00, 0x00}}, {1, [3]byte{0x03, 0x00, 0x00}}, + {1, [3]byte{0x04, 0x00, 0x00}}, {1, [3]byte{0x05, 0x00, 0x00}}, + {1, [3]byte{0x06, 0x00, 0x00}}, {1, [3]byte{0x07, 0x00, 0x00}}, + {1, [3]byte{0x08, 0x00, 0x00}}, {1, [3]byte{0x09, 0x00, 0x00}}, + {1, [3]byte{0x0a, 0x00, 0x00}}, {1, [3]byte{0x0b, 0x00, 0x00}}, + {1, [3]byte{0x0c, 0x00, 0x00}}, {1, [3]byte{0x0d, 0x00, 0x00}}, + {1, [3]byte{0x0e, 0x00, 0x00}}, {1, [3]byte{0x0f, 0x00, 0x00}}, + {1, [3]byte{0x10, 0x00, 0x00}}, {1, [3]byte{0x11, 0x00, 0x00}}, + {1, [3]byte{0x12, 0x00, 0x00}}, {1, [3]byte{0x13, 0x00, 0x00}}, + {1, [3]byte{0x14, 0x00, 0x00}}, {1, [3]byte{0x15, 0x00, 0x00}}, + {1, [3]byte{0x16, 0x00, 0x00}}, {1, [3]byte{0x17, 0x00, 0x00}}, + {1, [3]byte{0x18, 0x00, 0x00}}, {1, [3]byte{0x19, 0x00, 0x00}}, + {1, [3]byte{0x1a, 0x00, 0x00}}, {1, [3]byte{0x1b, 0x00, 0x00}}, + {1, [3]byte{0x1c, 0x00, 0x00}}, {1, [3]byte{0x1d, 0x00, 0x00}}, + {1, [3]byte{0x1e, 0x00, 0x00}}, {1, [3]byte{0x1f, 0x00, 0x00}}, + {1, [3]byte{0x20, 0x00, 0x00}}, {1, [3]byte{0x21, 0x00, 0x00}}, + {1, [3]byte{0x22, 0x00, 0x00}}, {1, [3]byte{0x23, 0x00, 0x00}}, + {1, [3]byte{0x24, 0x00, 0x00}}, {1, [3]byte{0x25, 0x00, 0x00}}, + {1, [3]byte{0x26, 0x00, 0x00}}, {1, [3]byte{0x27, 0x00, 0x00}}, + {1, [3]byte{0x28, 0x00, 0x00}}, {1, [3]byte{0x29, 0x00, 0x00}}, + {1, [3]byte{0x2a, 0x00, 0x00}}, {1, [3]byte{0x2b, 0x00, 0x00}}, + {1, [3]byte{0x2c, 0x00, 0x00}}, {1, [3]byte{0x2d, 0x00, 0x00}}, + {1, [3]byte{0x2e, 0x00, 0x00}}, {1, [3]byte{0x2f, 0x00, 0x00}}, + {1, [3]byte{0x30, 0x00, 0x00}}, {1, [3]byte{0x31, 0x00, 0x00}}, + {1, [3]byte{0x32, 0x00, 0x00}}, {1, [3]byte{0x33, 0x00, 0x00}}, + {1, [3]byte{0x34, 0x00, 0x00}}, {1, [3]byte{0x35, 0x00, 0x00}}, + {1, [3]byte{0x36, 0x00, 0x00}}, {1, [3]byte{0x37, 0x00, 0x00}}, + {1, [3]byte{0x38, 0x00, 0x00}}, {1, [3]byte{0x39, 0x00, 0x00}}, + {1, [3]byte{0x3a, 0x00, 0x00}}, {1, [3]byte{0x3b, 0x00, 0x00}}, + {1, [3]byte{0x3c, 0x00, 0x00}}, {1, [3]byte{0x3d, 0x00, 0x00}}, + {1, [3]byte{0x3e, 0x00, 0x00}}, {1, [3]byte{0x3f, 0x00, 0x00}}, + {1, [3]byte{0x40, 0x00, 0x00}}, {1, [3]byte{0x41, 0x00, 0x00}}, + {1, [3]byte{0x42, 0x00, 0x00}}, {1, [3]byte{0x43, 0x00, 0x00}}, + {1, [3]byte{0x44, 0x00, 0x00}}, {1, [3]byte{0x45, 0x00, 0x00}}, + {1, [3]byte{0x46, 0x00, 0x00}}, {1, [3]byte{0x47, 0x00, 0x00}}, + {1, [3]byte{0x48, 0x00, 0x00}}, {1, [3]byte{0x49, 0x00, 0x00}}, + {1, [3]byte{0x4a, 0x00, 0x00}}, {1, [3]byte{0x4b, 0x00, 0x00}}, + {1, [3]byte{0x4c, 0x00, 0x00}}, {1, [3]byte{0x4d, 0x00, 0x00}}, + {1, [3]byte{0x4e, 0x00, 0x00}}, {1, [3]byte{0x4f, 0x00, 0x00}}, + {1, [3]byte{0x50, 0x00, 0x00}}, {1, [3]byte{0x51, 0x00, 0x00}}, + {1, [3]byte{0x52, 0x00, 0x00}}, {1, [3]byte{0x53, 0x00, 0x00}}, + {1, [3]byte{0x54, 0x00, 0x00}}, {1, [3]byte{0x55, 0x00, 0x00}}, + {1, [3]byte{0x56, 0x00, 0x00}}, {1, [3]byte{0x57, 0x00, 0x00}}, + {1, [3]byte{0x58, 0x00, 0x00}}, {1, [3]byte{0x59, 0x00, 0x00}}, + {1, [3]byte{0x5a, 0x00, 0x00}}, {1, [3]byte{0x5b, 0x00, 0x00}}, + {1, [3]byte{0x5c, 0x00, 0x00}}, {1, [3]byte{0x5d, 0x00, 0x00}}, + {1, [3]byte{0x5e, 0x00, 0x00}}, {1, [3]byte{0x5f, 0x00, 0x00}}, + {1, [3]byte{0x60, 0x00, 0x00}}, {1, [3]byte{0x61, 0x00, 0x00}}, + {1, [3]byte{0x62, 0x00, 0x00}}, {1, [3]byte{0x63, 0x00, 0x00}}, + {1, [3]byte{0x64, 0x00, 0x00}}, {1, [3]byte{0x65, 0x00, 0x00}}, + {1, [3]byte{0x66, 0x00, 0x00}}, {1, [3]byte{0x67, 0x00, 0x00}}, + {1, [3]byte{0x68, 0x00, 0x00}}, {1, [3]byte{0x69, 0x00, 0x00}}, + {1, [3]byte{0x6a, 0x00, 0x00}}, {1, [3]byte{0x6b, 0x00, 0x00}}, + {1, [3]byte{0x6c, 0x00, 0x00}}, {1, [3]byte{0x6d, 0x00, 0x00}}, + {1, [3]byte{0x6e, 0x00, 0x00}}, {1, [3]byte{0x6f, 0x00, 0x00}}, + {1, [3]byte{0x70, 0x00, 0x00}}, {1, [3]byte{0x71, 0x00, 0x00}}, + {1, [3]byte{0x72, 0x00, 0x00}}, {1, [3]byte{0x73, 0x00, 0x00}}, + {1, [3]byte{0x74, 0x00, 0x00}}, {1, [3]byte{0x75, 0x00, 0x00}}, + {1, [3]byte{0x76, 0x00, 0x00}}, {1, [3]byte{0x77, 0x00, 0x00}}, + {1, [3]byte{0x78, 0x00, 0x00}}, {1, [3]byte{0x79, 0x00, 0x00}}, + {1, [3]byte{0x7a, 0x00, 0x00}}, {1, [3]byte{0x7b, 0x00, 0x00}}, + {1, [3]byte{0x7c, 0x00, 0x00}}, {1, [3]byte{0x7d, 0x00, 0x00}}, + {1, [3]byte{0x7e, 0x00, 0x00}}, {1, [3]byte{0x7f, 0x00, 0x00}}, + {2, [3]byte{0xd0, 0x90, 0x00}}, {2, [3]byte{0xd0, 0x91, 0x00}}, + {2, [3]byte{0xd0, 0x92, 0x00}}, {2, [3]byte{0xd0, 0x93, 0x00}}, + {2, [3]byte{0xd0, 0x94, 0x00}}, {2, [3]byte{0xd0, 0x95, 0x00}}, + {2, [3]byte{0xd0, 0x96, 0x00}}, {2, [3]byte{0xd0, 0x97, 0x00}}, + {2, [3]byte{0xd0, 0x98, 0x00}}, {2, [3]byte{0xd0, 0x99, 0x00}}, + {2, [3]byte{0xd0, 0x9a, 0x00}}, {2, [3]byte{0xd0, 0x9b, 0x00}}, + {2, [3]byte{0xd0, 0x9c, 0x00}}, {2, [3]byte{0xd0, 0x9d, 0x00}}, + {2, [3]byte{0xd0, 0x9e, 0x00}}, {2, [3]byte{0xd0, 0x9f, 0x00}}, + {2, [3]byte{0xd0, 0xa0, 0x00}}, {2, [3]byte{0xd0, 0xa1, 0x00}}, + {2, [3]byte{0xd0, 0xa2, 0x00}}, {2, [3]byte{0xd0, 0xa3, 0x00}}, + {2, [3]byte{0xd0, 0xa4, 0x00}}, {2, [3]byte{0xd0, 0xa5, 0x00}}, + {2, [3]byte{0xd0, 0xa6, 0x00}}, {2, [3]byte{0xd0, 0xa7, 0x00}}, + {2, [3]byte{0xd0, 0xa8, 0x00}}, {2, [3]byte{0xd0, 0xa9, 0x00}}, + {2, [3]byte{0xd0, 0xaa, 0x00}}, {2, [3]byte{0xd0, 0xab, 0x00}}, + {2, [3]byte{0xd0, 0xac, 0x00}}, {2, [3]byte{0xd0, 0xad, 0x00}}, + {2, [3]byte{0xd0, 0xae, 0x00}}, {2, [3]byte{0xd0, 0xaf, 0x00}}, + {3, [3]byte{0xe2, 0x80, 0xa0}}, {2, [3]byte{0xc2, 0xb0, 0x00}}, + {2, [3]byte{0xd2, 0x90, 0x00}}, {2, [3]byte{0xc2, 0xa3, 0x00}}, + {2, [3]byte{0xc2, 0xa7, 0x00}}, {3, [3]byte{0xe2, 0x80, 0xa2}}, + {2, [3]byte{0xc2, 0xb6, 0x00}}, {2, [3]byte{0xd0, 0x86, 0x00}}, + {2, [3]byte{0xc2, 0xae, 0x00}}, {2, [3]byte{0xc2, 0xa9, 0x00}}, + {3, [3]byte{0xe2, 0x84, 0xa2}}, {2, [3]byte{0xd0, 0x82, 0x00}}, + {2, [3]byte{0xd1, 0x92, 0x00}}, {3, [3]byte{0xe2, 0x89, 0xa0}}, + {2, [3]byte{0xd0, 0x83, 0x00}}, {2, [3]byte{0xd1, 0x93, 0x00}}, + {3, [3]byte{0xe2, 0x88, 0x9e}}, {2, [3]byte{0xc2, 0xb1, 0x00}}, + {3, [3]byte{0xe2, 0x89, 0xa4}}, {3, [3]byte{0xe2, 0x89, 0xa5}}, + {2, [3]byte{0xd1, 0x96, 0x00}}, {2, [3]byte{0xc2, 0xb5, 0x00}}, + {2, [3]byte{0xd2, 0x91, 0x00}}, {2, [3]byte{0xd0, 0x88, 0x00}}, + {2, [3]byte{0xd0, 0x84, 0x00}}, {2, [3]byte{0xd1, 0x94, 0x00}}, + {2, [3]byte{0xd0, 0x87, 0x00}}, {2, [3]byte{0xd1, 0x97, 0x00}}, + {2, [3]byte{0xd0, 0x89, 0x00}}, {2, [3]byte{0xd1, 0x99, 0x00}}, + {2, [3]byte{0xd0, 0x8a, 0x00}}, {2, [3]byte{0xd1, 0x9a, 0x00}}, + {2, [3]byte{0xd1, 0x98, 0x00}}, {2, [3]byte{0xd0, 0x85, 0x00}}, + {2, [3]byte{0xc2, 0xac, 0x00}}, {3, [3]byte{0xe2, 0x88, 0x9a}}, + {2, [3]byte{0xc6, 0x92, 0x00}}, {3, [3]byte{0xe2, 0x89, 0x88}}, + {3, [3]byte{0xe2, 0x88, 0x86}}, {2, [3]byte{0xc2, 0xab, 0x00}}, + {2, [3]byte{0xc2, 0xbb, 0x00}}, {3, [3]byte{0xe2, 0x80, 0xa6}}, + {2, [3]byte{0xc2, 0xa0, 0x00}}, {2, [3]byte{0xd0, 0x8b, 0x00}}, + {2, [3]byte{0xd1, 0x9b, 0x00}}, {2, [3]byte{0xd0, 0x8c, 0x00}}, + {2, [3]byte{0xd1, 0x9c, 0x00}}, {2, [3]byte{0xd1, 0x95, 0x00}}, + {3, [3]byte{0xe2, 0x80, 0x93}}, {3, [3]byte{0xe2, 0x80, 0x94}}, + {3, [3]byte{0xe2, 0x80, 0x9c}}, {3, [3]byte{0xe2, 0x80, 0x9d}}, + {3, [3]byte{0xe2, 0x80, 0x98}}, {3, [3]byte{0xe2, 0x80, 0x99}}, + {2, [3]byte{0xc3, 0xb7, 0x00}}, {3, [3]byte{0xe2, 0x80, 0x9e}}, + {2, [3]byte{0xd0, 0x8e, 0x00}}, {2, [3]byte{0xd1, 0x9e, 0x00}}, + {2, [3]byte{0xd0, 0x8f, 0x00}}, {2, [3]byte{0xd1, 0x9f, 0x00}}, + {3, [3]byte{0xe2, 0x84, 0x96}}, {2, [3]byte{0xd0, 0x81, 0x00}}, + {2, [3]byte{0xd1, 0x91, 0x00}}, {2, [3]byte{0xd1, 0x8f, 0x00}}, + {2, [3]byte{0xd0, 0xb0, 0x00}}, {2, [3]byte{0xd0, 0xb1, 0x00}}, + {2, [3]byte{0xd0, 0xb2, 0x00}}, {2, [3]byte{0xd0, 0xb3, 0x00}}, + {2, [3]byte{0xd0, 0xb4, 0x00}}, {2, [3]byte{0xd0, 0xb5, 0x00}}, + {2, [3]byte{0xd0, 0xb6, 0x00}}, {2, [3]byte{0xd0, 0xb7, 0x00}}, + {2, [3]byte{0xd0, 0xb8, 0x00}}, {2, [3]byte{0xd0, 0xb9, 0x00}}, + {2, [3]byte{0xd0, 0xba, 0x00}}, {2, [3]byte{0xd0, 0xbb, 0x00}}, + {2, [3]byte{0xd0, 0xbc, 0x00}}, {2, [3]byte{0xd0, 0xbd, 0x00}}, + {2, [3]byte{0xd0, 0xbe, 0x00}}, {2, [3]byte{0xd0, 0xbf, 0x00}}, + {2, [3]byte{0xd1, 0x80, 0x00}}, {2, [3]byte{0xd1, 0x81, 0x00}}, + {2, [3]byte{0xd1, 0x82, 0x00}}, {2, [3]byte{0xd1, 0x83, 0x00}}, + {2, [3]byte{0xd1, 0x84, 0x00}}, {2, [3]byte{0xd1, 0x85, 0x00}}, + {2, [3]byte{0xd1, 0x86, 0x00}}, {2, [3]byte{0xd1, 0x87, 0x00}}, + {2, [3]byte{0xd1, 0x88, 0x00}}, {2, [3]byte{0xd1, 0x89, 0x00}}, + {2, [3]byte{0xd1, 0x8a, 0x00}}, {2, [3]byte{0xd1, 0x8b, 0x00}}, + {2, [3]byte{0xd1, 0x8c, 0x00}}, {2, [3]byte{0xd1, 0x8d, 0x00}}, + {2, [3]byte{0xd1, 0x8e, 0x00}}, {3, [3]byte{0xe2, 0x82, 0xac}}, + }, + encode: [256]uint32{ + 0x00000000, 0x01000001, 0x02000002, 0x03000003, 0x04000004, 0x05000005, 0x06000006, 0x07000007, + 0x08000008, 0x09000009, 0x0a00000a, 0x0b00000b, 0x0c00000c, 0x0d00000d, 0x0e00000e, 0x0f00000f, + 0x10000010, 0x11000011, 0x12000012, 0x13000013, 0x14000014, 0x15000015, 0x16000016, 0x17000017, + 0x18000018, 0x19000019, 0x1a00001a, 0x1b00001b, 0x1c00001c, 0x1d00001d, 0x1e00001e, 0x1f00001f, + 0x20000020, 0x21000021, 0x22000022, 0x23000023, 0x24000024, 0x25000025, 0x26000026, 0x27000027, + 0x28000028, 0x29000029, 0x2a00002a, 0x2b00002b, 0x2c00002c, 0x2d00002d, 0x2e00002e, 0x2f00002f, + 0x30000030, 0x31000031, 0x32000032, 0x33000033, 0x34000034, 0x35000035, 0x36000036, 0x37000037, + 0x38000038, 0x39000039, 0x3a00003a, 0x3b00003b, 0x3c00003c, 0x3d00003d, 0x3e00003e, 0x3f00003f, + 0x40000040, 0x41000041, 0x42000042, 0x43000043, 0x44000044, 0x45000045, 0x46000046, 0x47000047, + 0x48000048, 0x49000049, 0x4a00004a, 0x4b00004b, 0x4c00004c, 0x4d00004d, 0x4e00004e, 0x4f00004f, + 0x50000050, 0x51000051, 0x52000052, 0x53000053, 0x54000054, 0x55000055, 0x56000056, 0x57000057, + 0x58000058, 0x59000059, 0x5a00005a, 0x5b00005b, 0x5c00005c, 0x5d00005d, 0x5e00005e, 0x5f00005f, + 0x60000060, 0x61000061, 0x62000062, 0x63000063, 0x64000064, 0x65000065, 0x66000066, 0x67000067, + 0x68000068, 0x69000069, 0x6a00006a, 0x6b00006b, 0x6c00006c, 0x6d00006d, 0x6e00006e, 0x6f00006f, + 0x70000070, 0x71000071, 0x72000072, 0x73000073, 0x74000074, 0x75000075, 0x76000076, 0x77000077, + 0x78000078, 0x79000079, 0x7a00007a, 0x7b00007b, 0x7c00007c, 0x7d00007d, 0x7e00007e, 0x7f00007f, + 0xca0000a0, 0xa30000a3, 0xa40000a7, 0xa90000a9, 0xc70000ab, 0xc20000ac, 0xa80000ae, 0xa10000b0, + 0xb10000b1, 0xb50000b5, 0xa60000b6, 0xc80000bb, 0xd60000f7, 0xc4000192, 0xdd000401, 0xab000402, + 0xae000403, 0xb8000404, 0xc1000405, 0xa7000406, 0xba000407, 0xb7000408, 0xbc000409, 0xbe00040a, + 0xcb00040b, 0xcd00040c, 0xd800040e, 0xda00040f, 0x80000410, 0x81000411, 0x82000412, 0x83000413, + 0x84000414, 0x85000415, 0x86000416, 0x87000417, 0x88000418, 0x89000419, 0x8a00041a, 0x8b00041b, + 0x8c00041c, 0x8d00041d, 0x8e00041e, 0x8f00041f, 0x90000420, 0x91000421, 0x92000422, 0x93000423, + 0x94000424, 0x95000425, 0x96000426, 0x97000427, 0x98000428, 0x99000429, 0x9a00042a, 0x9b00042b, + 0x9c00042c, 0x9d00042d, 0x9e00042e, 0x9f00042f, 0xe0000430, 0xe1000431, 0xe2000432, 0xe3000433, + 0xe4000434, 0xe5000435, 0xe6000436, 0xe7000437, 0xe8000438, 0xe9000439, 0xea00043a, 0xeb00043b, + 0xec00043c, 0xed00043d, 0xee00043e, 0xef00043f, 0xf0000440, 0xf1000441, 0xf2000442, 0xf3000443, + 0xf4000444, 0xf5000445, 0xf6000446, 0xf7000447, 0xf8000448, 0xf9000449, 0xfa00044a, 0xfb00044b, + 0xfc00044c, 0xfd00044d, 0xfe00044e, 0xdf00044f, 0xde000451, 0xac000452, 0xaf000453, 0xb9000454, + 0xcf000455, 0xb4000456, 0xbb000457, 0xc0000458, 0xbd000459, 0xbf00045a, 0xcc00045b, 0xce00045c, + 0xd900045e, 0xdb00045f, 0xa2000490, 0xb6000491, 0xd0002013, 0xd1002014, 0xd4002018, 0xd5002019, + 0xd200201c, 0xd300201d, 0xd700201e, 0xa0002020, 0xa5002022, 0xc9002026, 0xff0020ac, 0xdc002116, + 0xaa002122, 0xc6002206, 0xc300221a, 0xb000221e, 0xc5002248, 0xad002260, 0xb2002264, 0xb3002265, + }, +} + +// Windows874 is the Windows 874 encoding. +var Windows874 *Charmap = &windows874 + +var windows874 = Charmap{ + name: "Windows 874", + mib: identifier.Windows874, + asciiSuperset: true, + low: 0x80, + replacement: 0x1a, + decode: [256]utf8Enc{ + {1, [3]byte{0x00, 0x00, 0x00}}, {1, [3]byte{0x01, 0x00, 0x00}}, + {1, [3]byte{0x02, 0x00, 0x00}}, {1, [3]byte{0x03, 0x00, 0x00}}, + {1, [3]byte{0x04, 0x00, 0x00}}, {1, [3]byte{0x05, 0x00, 0x00}}, + {1, [3]byte{0x06, 0x00, 0x00}}, {1, [3]byte{0x07, 0x00, 0x00}}, + {1, [3]byte{0x08, 0x00, 0x00}}, {1, [3]byte{0x09, 0x00, 0x00}}, + {1, [3]byte{0x0a, 0x00, 0x00}}, {1, [3]byte{0x0b, 0x00, 0x00}}, + {1, [3]byte{0x0c, 0x00, 0x00}}, {1, [3]byte{0x0d, 0x00, 0x00}}, + {1, [3]byte{0x0e, 0x00, 0x00}}, {1, [3]byte{0x0f, 0x00, 0x00}}, + {1, [3]byte{0x10, 0x00, 0x00}}, {1, [3]byte{0x11, 0x00, 0x00}}, + {1, [3]byte{0x12, 0x00, 0x00}}, {1, [3]byte{0x13, 0x00, 0x00}}, + {1, [3]byte{0x14, 0x00, 0x00}}, {1, [3]byte{0x15, 0x00, 0x00}}, + {1, [3]byte{0x16, 0x00, 0x00}}, {1, [3]byte{0x17, 0x00, 0x00}}, + {1, [3]byte{0x18, 0x00, 0x00}}, {1, [3]byte{0x19, 0x00, 0x00}}, + {1, [3]byte{0x1a, 0x00, 0x00}}, {1, [3]byte{0x1b, 0x00, 0x00}}, + {1, [3]byte{0x1c, 0x00, 0x00}}, {1, [3]byte{0x1d, 0x00, 0x00}}, + {1, [3]byte{0x1e, 0x00, 0x00}}, {1, [3]byte{0x1f, 0x00, 0x00}}, + {1, [3]byte{0x20, 0x00, 0x00}}, {1, [3]byte{0x21, 0x00, 0x00}}, + {1, [3]byte{0x22, 0x00, 0x00}}, {1, [3]byte{0x23, 0x00, 0x00}}, + {1, [3]byte{0x24, 0x00, 0x00}}, {1, [3]byte{0x25, 0x00, 0x00}}, + {1, [3]byte{0x26, 0x00, 0x00}}, {1, [3]byte{0x27, 0x00, 0x00}}, + {1, [3]byte{0x28, 0x00, 0x00}}, {1, [3]byte{0x29, 0x00, 0x00}}, + {1, [3]byte{0x2a, 0x00, 0x00}}, {1, [3]byte{0x2b, 0x00, 0x00}}, + {1, [3]byte{0x2c, 0x00, 0x00}}, {1, [3]byte{0x2d, 0x00, 0x00}}, + {1, [3]byte{0x2e, 0x00, 0x00}}, {1, [3]byte{0x2f, 0x00, 0x00}}, + {1, [3]byte{0x30, 0x00, 0x00}}, {1, [3]byte{0x31, 0x00, 0x00}}, + {1, [3]byte{0x32, 0x00, 0x00}}, {1, [3]byte{0x33, 0x00, 0x00}}, + {1, [3]byte{0x34, 0x00, 0x00}}, {1, [3]byte{0x35, 0x00, 0x00}}, + {1, [3]byte{0x36, 0x00, 0x00}}, {1, [3]byte{0x37, 0x00, 0x00}}, + {1, [3]byte{0x38, 0x00, 0x00}}, {1, [3]byte{0x39, 0x00, 0x00}}, + {1, [3]byte{0x3a, 0x00, 0x00}}, {1, [3]byte{0x3b, 0x00, 0x00}}, + {1, [3]byte{0x3c, 0x00, 0x00}}, {1, [3]byte{0x3d, 0x00, 0x00}}, + {1, [3]byte{0x3e, 0x00, 0x00}}, {1, [3]byte{0x3f, 0x00, 0x00}}, + {1, [3]byte{0x40, 0x00, 0x00}}, {1, [3]byte{0x41, 0x00, 0x00}}, + {1, [3]byte{0x42, 0x00, 0x00}}, {1, [3]byte{0x43, 0x00, 0x00}}, + {1, [3]byte{0x44, 0x00, 0x00}}, {1, [3]byte{0x45, 0x00, 0x00}}, + {1, [3]byte{0x46, 0x00, 0x00}}, {1, [3]byte{0x47, 0x00, 0x00}}, + {1, [3]byte{0x48, 0x00, 0x00}}, {1, [3]byte{0x49, 0x00, 0x00}}, + {1, [3]byte{0x4a, 0x00, 0x00}}, {1, [3]byte{0x4b, 0x00, 0x00}}, + {1, [3]byte{0x4c, 0x00, 0x00}}, {1, [3]byte{0x4d, 0x00, 0x00}}, + {1, [3]byte{0x4e, 0x00, 0x00}}, {1, [3]byte{0x4f, 0x00, 0x00}}, + {1, [3]byte{0x50, 0x00, 0x00}}, {1, [3]byte{0x51, 0x00, 0x00}}, + {1, [3]byte{0x52, 0x00, 0x00}}, {1, [3]byte{0x53, 0x00, 0x00}}, + {1, [3]byte{0x54, 0x00, 0x00}}, {1, [3]byte{0x55, 0x00, 0x00}}, + {1, [3]byte{0x56, 0x00, 0x00}}, {1, [3]byte{0x57, 0x00, 0x00}}, + {1, [3]byte{0x58, 0x00, 0x00}}, {1, [3]byte{0x59, 0x00, 0x00}}, + {1, [3]byte{0x5a, 0x00, 0x00}}, {1, [3]byte{0x5b, 0x00, 0x00}}, + {1, [3]byte{0x5c, 0x00, 0x00}}, {1, [3]byte{0x5d, 0x00, 0x00}}, + {1, [3]byte{0x5e, 0x00, 0x00}}, {1, [3]byte{0x5f, 0x00, 0x00}}, + {1, [3]byte{0x60, 0x00, 0x00}}, {1, [3]byte{0x61, 0x00, 0x00}}, + {1, [3]byte{0x62, 0x00, 0x00}}, {1, [3]byte{0x63, 0x00, 0x00}}, + {1, [3]byte{0x64, 0x00, 0x00}}, {1, [3]byte{0x65, 0x00, 0x00}}, + {1, [3]byte{0x66, 0x00, 0x00}}, {1, [3]byte{0x67, 0x00, 0x00}}, + {1, [3]byte{0x68, 0x00, 0x00}}, {1, [3]byte{0x69, 0x00, 0x00}}, + {1, [3]byte{0x6a, 0x00, 0x00}}, {1, [3]byte{0x6b, 0x00, 0x00}}, + {1, [3]byte{0x6c, 0x00, 0x00}}, {1, [3]byte{0x6d, 0x00, 0x00}}, + {1, [3]byte{0x6e, 0x00, 0x00}}, {1, [3]byte{0x6f, 0x00, 0x00}}, + {1, [3]byte{0x70, 0x00, 0x00}}, {1, [3]byte{0x71, 0x00, 0x00}}, + {1, [3]byte{0x72, 0x00, 0x00}}, {1, [3]byte{0x73, 0x00, 0x00}}, + {1, [3]byte{0x74, 0x00, 0x00}}, {1, [3]byte{0x75, 0x00, 0x00}}, + {1, [3]byte{0x76, 0x00, 0x00}}, {1, [3]byte{0x77, 0x00, 0x00}}, + {1, [3]byte{0x78, 0x00, 0x00}}, {1, [3]byte{0x79, 0x00, 0x00}}, + {1, [3]byte{0x7a, 0x00, 0x00}}, {1, [3]byte{0x7b, 0x00, 0x00}}, + {1, [3]byte{0x7c, 0x00, 0x00}}, {1, [3]byte{0x7d, 0x00, 0x00}}, + {1, [3]byte{0x7e, 0x00, 0x00}}, {1, [3]byte{0x7f, 0x00, 0x00}}, + {3, [3]byte{0xe2, 0x82, 0xac}}, {3, [3]byte{0xef, 0xbf, 0xbd}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {3, [3]byte{0xef, 0xbf, 0xbd}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {3, [3]byte{0xe2, 0x80, 0xa6}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {3, [3]byte{0xef, 0xbf, 0xbd}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {3, [3]byte{0xef, 0xbf, 0xbd}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {3, [3]byte{0xef, 0xbf, 0xbd}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {3, [3]byte{0xef, 0xbf, 0xbd}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {3, [3]byte{0xef, 0xbf, 0xbd}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {3, [3]byte{0xe2, 0x80, 0x98}}, + {3, [3]byte{0xe2, 0x80, 0x99}}, {3, [3]byte{0xe2, 0x80, 0x9c}}, + {3, [3]byte{0xe2, 0x80, 0x9d}}, {3, [3]byte{0xe2, 0x80, 0xa2}}, + {3, [3]byte{0xe2, 0x80, 0x93}}, {3, [3]byte{0xe2, 0x80, 0x94}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {3, [3]byte{0xef, 0xbf, 0xbd}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {3, [3]byte{0xef, 0xbf, 0xbd}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {3, [3]byte{0xef, 0xbf, 0xbd}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {3, [3]byte{0xef, 0xbf, 0xbd}}, + {2, [3]byte{0xc2, 0xa0, 0x00}}, {3, [3]byte{0xe0, 0xb8, 0x81}}, + {3, [3]byte{0xe0, 0xb8, 0x82}}, {3, [3]byte{0xe0, 0xb8, 0x83}}, + {3, [3]byte{0xe0, 0xb8, 0x84}}, {3, [3]byte{0xe0, 0xb8, 0x85}}, + {3, [3]byte{0xe0, 0xb8, 0x86}}, {3, [3]byte{0xe0, 0xb8, 0x87}}, + {3, [3]byte{0xe0, 0xb8, 0x88}}, {3, [3]byte{0xe0, 0xb8, 0x89}}, + {3, [3]byte{0xe0, 0xb8, 0x8a}}, {3, [3]byte{0xe0, 0xb8, 0x8b}}, + {3, [3]byte{0xe0, 0xb8, 0x8c}}, {3, [3]byte{0xe0, 0xb8, 0x8d}}, + {3, [3]byte{0xe0, 0xb8, 0x8e}}, {3, [3]byte{0xe0, 0xb8, 0x8f}}, + {3, [3]byte{0xe0, 0xb8, 0x90}}, {3, [3]byte{0xe0, 0xb8, 0x91}}, + {3, [3]byte{0xe0, 0xb8, 0x92}}, {3, [3]byte{0xe0, 0xb8, 0x93}}, + {3, [3]byte{0xe0, 0xb8, 0x94}}, {3, [3]byte{0xe0, 0xb8, 0x95}}, + {3, [3]byte{0xe0, 0xb8, 0x96}}, {3, [3]byte{0xe0, 0xb8, 0x97}}, + {3, [3]byte{0xe0, 0xb8, 0x98}}, {3, [3]byte{0xe0, 0xb8, 0x99}}, + {3, [3]byte{0xe0, 0xb8, 0x9a}}, {3, [3]byte{0xe0, 0xb8, 0x9b}}, + {3, [3]byte{0xe0, 0xb8, 0x9c}}, {3, [3]byte{0xe0, 0xb8, 0x9d}}, + {3, [3]byte{0xe0, 0xb8, 0x9e}}, {3, [3]byte{0xe0, 0xb8, 0x9f}}, + {3, [3]byte{0xe0, 0xb8, 0xa0}}, {3, [3]byte{0xe0, 0xb8, 0xa1}}, + {3, [3]byte{0xe0, 0xb8, 0xa2}}, {3, [3]byte{0xe0, 0xb8, 0xa3}}, + {3, [3]byte{0xe0, 0xb8, 0xa4}}, {3, [3]byte{0xe0, 0xb8, 0xa5}}, + {3, [3]byte{0xe0, 0xb8, 0xa6}}, {3, [3]byte{0xe0, 0xb8, 0xa7}}, + {3, [3]byte{0xe0, 0xb8, 0xa8}}, {3, [3]byte{0xe0, 0xb8, 0xa9}}, + {3, [3]byte{0xe0, 0xb8, 0xaa}}, {3, [3]byte{0xe0, 0xb8, 0xab}}, + {3, [3]byte{0xe0, 0xb8, 0xac}}, {3, [3]byte{0xe0, 0xb8, 0xad}}, + {3, [3]byte{0xe0, 0xb8, 0xae}}, {3, [3]byte{0xe0, 0xb8, 0xaf}}, + {3, [3]byte{0xe0, 0xb8, 0xb0}}, {3, [3]byte{0xe0, 0xb8, 0xb1}}, + {3, [3]byte{0xe0, 0xb8, 0xb2}}, {3, [3]byte{0xe0, 0xb8, 0xb3}}, + {3, [3]byte{0xe0, 0xb8, 0xb4}}, {3, [3]byte{0xe0, 0xb8, 0xb5}}, + {3, [3]byte{0xe0, 0xb8, 0xb6}}, {3, [3]byte{0xe0, 0xb8, 0xb7}}, + {3, [3]byte{0xe0, 0xb8, 0xb8}}, {3, [3]byte{0xe0, 0xb8, 0xb9}}, + {3, [3]byte{0xe0, 0xb8, 0xba}}, {3, [3]byte{0xef, 0xbf, 0xbd}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {3, [3]byte{0xef, 0xbf, 0xbd}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {3, [3]byte{0xe0, 0xb8, 0xbf}}, + {3, [3]byte{0xe0, 0xb9, 0x80}}, {3, [3]byte{0xe0, 0xb9, 0x81}}, + {3, [3]byte{0xe0, 0xb9, 0x82}}, {3, [3]byte{0xe0, 0xb9, 0x83}}, + {3, [3]byte{0xe0, 0xb9, 0x84}}, {3, [3]byte{0xe0, 0xb9, 0x85}}, + {3, [3]byte{0xe0, 0xb9, 0x86}}, {3, [3]byte{0xe0, 0xb9, 0x87}}, + {3, [3]byte{0xe0, 0xb9, 0x88}}, {3, [3]byte{0xe0, 0xb9, 0x89}}, + {3, [3]byte{0xe0, 0xb9, 0x8a}}, {3, [3]byte{0xe0, 0xb9, 0x8b}}, + {3, [3]byte{0xe0, 0xb9, 0x8c}}, {3, [3]byte{0xe0, 0xb9, 0x8d}}, + {3, [3]byte{0xe0, 0xb9, 0x8e}}, {3, [3]byte{0xe0, 0xb9, 0x8f}}, + {3, [3]byte{0xe0, 0xb9, 0x90}}, {3, [3]byte{0xe0, 0xb9, 0x91}}, + {3, [3]byte{0xe0, 0xb9, 0x92}}, {3, [3]byte{0xe0, 0xb9, 0x93}}, + {3, [3]byte{0xe0, 0xb9, 0x94}}, {3, [3]byte{0xe0, 0xb9, 0x95}}, + {3, [3]byte{0xe0, 0xb9, 0x96}}, {3, [3]byte{0xe0, 0xb9, 0x97}}, + {3, [3]byte{0xe0, 0xb9, 0x98}}, {3, [3]byte{0xe0, 0xb9, 0x99}}, + {3, [3]byte{0xe0, 0xb9, 0x9a}}, {3, [3]byte{0xe0, 0xb9, 0x9b}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {3, [3]byte{0xef, 0xbf, 0xbd}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {3, [3]byte{0xef, 0xbf, 0xbd}}, + }, + encode: [256]uint32{ + 0x00000000, 0x01000001, 0x02000002, 0x03000003, 0x04000004, 0x05000005, 0x06000006, 0x07000007, + 0x08000008, 0x09000009, 0x0a00000a, 0x0b00000b, 0x0c00000c, 0x0d00000d, 0x0e00000e, 0x0f00000f, + 0x10000010, 0x11000011, 0x12000012, 0x13000013, 0x14000014, 0x15000015, 0x16000016, 0x17000017, + 0x18000018, 0x19000019, 0x1a00001a, 0x1b00001b, 0x1c00001c, 0x1d00001d, 0x1e00001e, 0x1f00001f, + 0x20000020, 0x21000021, 0x22000022, 0x23000023, 0x24000024, 0x25000025, 0x26000026, 0x27000027, + 0x28000028, 0x29000029, 0x2a00002a, 0x2b00002b, 0x2c00002c, 0x2d00002d, 0x2e00002e, 0x2f00002f, + 0x30000030, 0x31000031, 0x32000032, 0x33000033, 0x34000034, 0x35000035, 0x36000036, 0x37000037, + 0x38000038, 0x39000039, 0x3a00003a, 0x3b00003b, 0x3c00003c, 0x3d00003d, 0x3e00003e, 0x3f00003f, + 0x40000040, 0x41000041, 0x42000042, 0x43000043, 0x44000044, 0x45000045, 0x46000046, 0x47000047, + 0x48000048, 0x49000049, 0x4a00004a, 0x4b00004b, 0x4c00004c, 0x4d00004d, 0x4e00004e, 0x4f00004f, + 0x50000050, 0x51000051, 0x52000052, 0x53000053, 0x54000054, 0x55000055, 0x56000056, 0x57000057, + 0x58000058, 0x59000059, 0x5a00005a, 0x5b00005b, 0x5c00005c, 0x5d00005d, 0x5e00005e, 0x5f00005f, + 0x60000060, 0x61000061, 0x62000062, 0x63000063, 0x64000064, 0x65000065, 0x66000066, 0x67000067, + 0x68000068, 0x69000069, 0x6a00006a, 0x6b00006b, 0x6c00006c, 0x6d00006d, 0x6e00006e, 0x6f00006f, + 0x70000070, 0x71000071, 0x72000072, 0x73000073, 0x74000074, 0x75000075, 0x76000076, 0x77000077, + 0x78000078, 0x79000079, 0x7a00007a, 0x7b00007b, 0x7c00007c, 0x7d00007d, 0x7e00007e, 0x7f00007f, + 0xa00000a0, 0xa1000e01, 0xa2000e02, 0xa3000e03, 0xa4000e04, 0xa5000e05, 0xa6000e06, 0xa7000e07, + 0xa8000e08, 0xa9000e09, 0xaa000e0a, 0xab000e0b, 0xac000e0c, 0xad000e0d, 0xae000e0e, 0xaf000e0f, + 0xb0000e10, 0xb1000e11, 0xb2000e12, 0xb3000e13, 0xb4000e14, 0xb5000e15, 0xb6000e16, 0xb7000e17, + 0xb8000e18, 0xb9000e19, 0xba000e1a, 0xbb000e1b, 0xbc000e1c, 0xbd000e1d, 0xbe000e1e, 0xbf000e1f, + 0xc0000e20, 0xc1000e21, 0xc2000e22, 0xc3000e23, 0xc4000e24, 0xc5000e25, 0xc6000e26, 0xc7000e27, + 0xc8000e28, 0xc9000e29, 0xca000e2a, 0xcb000e2b, 0xcc000e2c, 0xcd000e2d, 0xce000e2e, 0xcf000e2f, + 0xd0000e30, 0xd1000e31, 0xd2000e32, 0xd3000e33, 0xd4000e34, 0xd5000e35, 0xd6000e36, 0xd7000e37, + 0xd8000e38, 0xd9000e39, 0xda000e3a, 0xdf000e3f, 0xe0000e40, 0xe1000e41, 0xe2000e42, 0xe3000e43, + 0xe4000e44, 0xe5000e45, 0xe6000e46, 0xe7000e47, 0xe8000e48, 0xe9000e49, 0xea000e4a, 0xeb000e4b, + 0xec000e4c, 0xed000e4d, 0xee000e4e, 0xef000e4f, 0xf0000e50, 0xf1000e51, 0xf2000e52, 0xf3000e53, + 0xf4000e54, 0xf5000e55, 0xf6000e56, 0xf7000e57, 0xf8000e58, 0xf9000e59, 0xfa000e5a, 0xfb000e5b, + 0x96002013, 0x97002014, 0x91002018, 0x92002019, 0x9300201c, 0x9400201d, 0x95002022, 0x85002026, + 0x800020ac, 0x800020ac, 0x800020ac, 0x800020ac, 0x800020ac, 0x800020ac, 0x800020ac, 0x800020ac, + 0x800020ac, 0x800020ac, 0x800020ac, 0x800020ac, 0x800020ac, 0x800020ac, 0x800020ac, 0x800020ac, + 0x800020ac, 0x800020ac, 0x800020ac, 0x800020ac, 0x800020ac, 0x800020ac, 0x800020ac, 0x800020ac, + 0x800020ac, 0x800020ac, 0x800020ac, 0x800020ac, 0x800020ac, 0x800020ac, 0x800020ac, 0x800020ac, + }, +} + +// Windows1250 is the Windows 1250 encoding. +var Windows1250 *Charmap = &windows1250 + +var windows1250 = Charmap{ + name: "Windows 1250", + mib: identifier.Windows1250, + asciiSuperset: true, + low: 0x80, + replacement: 0x1a, + decode: [256]utf8Enc{ + {1, [3]byte{0x00, 0x00, 0x00}}, {1, [3]byte{0x01, 0x00, 0x00}}, + {1, [3]byte{0x02, 0x00, 0x00}}, {1, [3]byte{0x03, 0x00, 0x00}}, + {1, [3]byte{0x04, 0x00, 0x00}}, {1, [3]byte{0x05, 0x00, 0x00}}, + {1, [3]byte{0x06, 0x00, 0x00}}, {1, [3]byte{0x07, 0x00, 0x00}}, + {1, [3]byte{0x08, 0x00, 0x00}}, {1, [3]byte{0x09, 0x00, 0x00}}, + {1, [3]byte{0x0a, 0x00, 0x00}}, {1, [3]byte{0x0b, 0x00, 0x00}}, + {1, [3]byte{0x0c, 0x00, 0x00}}, {1, [3]byte{0x0d, 0x00, 0x00}}, + {1, [3]byte{0x0e, 0x00, 0x00}}, {1, [3]byte{0x0f, 0x00, 0x00}}, + {1, [3]byte{0x10, 0x00, 0x00}}, {1, [3]byte{0x11, 0x00, 0x00}}, + {1, [3]byte{0x12, 0x00, 0x00}}, {1, [3]byte{0x13, 0x00, 0x00}}, + {1, [3]byte{0x14, 0x00, 0x00}}, {1, [3]byte{0x15, 0x00, 0x00}}, + {1, [3]byte{0x16, 0x00, 0x00}}, {1, [3]byte{0x17, 0x00, 0x00}}, + {1, [3]byte{0x18, 0x00, 0x00}}, {1, [3]byte{0x19, 0x00, 0x00}}, + {1, [3]byte{0x1a, 0x00, 0x00}}, {1, [3]byte{0x1b, 0x00, 0x00}}, + {1, [3]byte{0x1c, 0x00, 0x00}}, {1, [3]byte{0x1d, 0x00, 0x00}}, + {1, [3]byte{0x1e, 0x00, 0x00}}, {1, [3]byte{0x1f, 0x00, 0x00}}, + {1, [3]byte{0x20, 0x00, 0x00}}, {1, [3]byte{0x21, 0x00, 0x00}}, + {1, [3]byte{0x22, 0x00, 0x00}}, {1, [3]byte{0x23, 0x00, 0x00}}, + {1, [3]byte{0x24, 0x00, 0x00}}, {1, [3]byte{0x25, 0x00, 0x00}}, + {1, [3]byte{0x26, 0x00, 0x00}}, {1, [3]byte{0x27, 0x00, 0x00}}, + {1, [3]byte{0x28, 0x00, 0x00}}, {1, [3]byte{0x29, 0x00, 0x00}}, + {1, [3]byte{0x2a, 0x00, 0x00}}, {1, [3]byte{0x2b, 0x00, 0x00}}, + {1, [3]byte{0x2c, 0x00, 0x00}}, {1, [3]byte{0x2d, 0x00, 0x00}}, + {1, [3]byte{0x2e, 0x00, 0x00}}, {1, [3]byte{0x2f, 0x00, 0x00}}, + {1, [3]byte{0x30, 0x00, 0x00}}, {1, [3]byte{0x31, 0x00, 0x00}}, + {1, [3]byte{0x32, 0x00, 0x00}}, {1, [3]byte{0x33, 0x00, 0x00}}, + {1, [3]byte{0x34, 0x00, 0x00}}, {1, [3]byte{0x35, 0x00, 0x00}}, + {1, [3]byte{0x36, 0x00, 0x00}}, {1, [3]byte{0x37, 0x00, 0x00}}, + {1, [3]byte{0x38, 0x00, 0x00}}, {1, [3]byte{0x39, 0x00, 0x00}}, + {1, [3]byte{0x3a, 0x00, 0x00}}, {1, [3]byte{0x3b, 0x00, 0x00}}, + {1, [3]byte{0x3c, 0x00, 0x00}}, {1, [3]byte{0x3d, 0x00, 0x00}}, + {1, [3]byte{0x3e, 0x00, 0x00}}, {1, [3]byte{0x3f, 0x00, 0x00}}, + {1, [3]byte{0x40, 0x00, 0x00}}, {1, [3]byte{0x41, 0x00, 0x00}}, + {1, [3]byte{0x42, 0x00, 0x00}}, {1, [3]byte{0x43, 0x00, 0x00}}, + {1, [3]byte{0x44, 0x00, 0x00}}, {1, [3]byte{0x45, 0x00, 0x00}}, + {1, [3]byte{0x46, 0x00, 0x00}}, {1, [3]byte{0x47, 0x00, 0x00}}, + {1, [3]byte{0x48, 0x00, 0x00}}, {1, [3]byte{0x49, 0x00, 0x00}}, + {1, [3]byte{0x4a, 0x00, 0x00}}, {1, [3]byte{0x4b, 0x00, 0x00}}, + {1, [3]byte{0x4c, 0x00, 0x00}}, {1, [3]byte{0x4d, 0x00, 0x00}}, + {1, [3]byte{0x4e, 0x00, 0x00}}, {1, [3]byte{0x4f, 0x00, 0x00}}, + {1, [3]byte{0x50, 0x00, 0x00}}, {1, [3]byte{0x51, 0x00, 0x00}}, + {1, [3]byte{0x52, 0x00, 0x00}}, {1, [3]byte{0x53, 0x00, 0x00}}, + {1, [3]byte{0x54, 0x00, 0x00}}, {1, [3]byte{0x55, 0x00, 0x00}}, + {1, [3]byte{0x56, 0x00, 0x00}}, {1, [3]byte{0x57, 0x00, 0x00}}, + {1, [3]byte{0x58, 0x00, 0x00}}, {1, [3]byte{0x59, 0x00, 0x00}}, + {1, [3]byte{0x5a, 0x00, 0x00}}, {1, [3]byte{0x5b, 0x00, 0x00}}, + {1, [3]byte{0x5c, 0x00, 0x00}}, {1, [3]byte{0x5d, 0x00, 0x00}}, + {1, [3]byte{0x5e, 0x00, 0x00}}, {1, [3]byte{0x5f, 0x00, 0x00}}, + {1, [3]byte{0x60, 0x00, 0x00}}, {1, [3]byte{0x61, 0x00, 0x00}}, + {1, [3]byte{0x62, 0x00, 0x00}}, {1, [3]byte{0x63, 0x00, 0x00}}, + {1, [3]byte{0x64, 0x00, 0x00}}, {1, [3]byte{0x65, 0x00, 0x00}}, + {1, [3]byte{0x66, 0x00, 0x00}}, {1, [3]byte{0x67, 0x00, 0x00}}, + {1, [3]byte{0x68, 0x00, 0x00}}, {1, [3]byte{0x69, 0x00, 0x00}}, + {1, [3]byte{0x6a, 0x00, 0x00}}, {1, [3]byte{0x6b, 0x00, 0x00}}, + {1, [3]byte{0x6c, 0x00, 0x00}}, {1, [3]byte{0x6d, 0x00, 0x00}}, + {1, [3]byte{0x6e, 0x00, 0x00}}, {1, [3]byte{0x6f, 0x00, 0x00}}, + {1, [3]byte{0x70, 0x00, 0x00}}, {1, [3]byte{0x71, 0x00, 0x00}}, + {1, [3]byte{0x72, 0x00, 0x00}}, {1, [3]byte{0x73, 0x00, 0x00}}, + {1, [3]byte{0x74, 0x00, 0x00}}, {1, [3]byte{0x75, 0x00, 0x00}}, + {1, [3]byte{0x76, 0x00, 0x00}}, {1, [3]byte{0x77, 0x00, 0x00}}, + {1, [3]byte{0x78, 0x00, 0x00}}, {1, [3]byte{0x79, 0x00, 0x00}}, + {1, [3]byte{0x7a, 0x00, 0x00}}, {1, [3]byte{0x7b, 0x00, 0x00}}, + {1, [3]byte{0x7c, 0x00, 0x00}}, {1, [3]byte{0x7d, 0x00, 0x00}}, + {1, [3]byte{0x7e, 0x00, 0x00}}, {1, [3]byte{0x7f, 0x00, 0x00}}, + {3, [3]byte{0xe2, 0x82, 0xac}}, {3, [3]byte{0xef, 0xbf, 0xbd}}, + {3, [3]byte{0xe2, 0x80, 0x9a}}, {3, [3]byte{0xef, 0xbf, 0xbd}}, + {3, [3]byte{0xe2, 0x80, 0x9e}}, {3, [3]byte{0xe2, 0x80, 0xa6}}, + {3, [3]byte{0xe2, 0x80, 0xa0}}, {3, [3]byte{0xe2, 0x80, 0xa1}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {3, [3]byte{0xe2, 0x80, 0xb0}}, + {2, [3]byte{0xc5, 0xa0, 0x00}}, {3, [3]byte{0xe2, 0x80, 0xb9}}, + {2, [3]byte{0xc5, 0x9a, 0x00}}, {2, [3]byte{0xc5, 0xa4, 0x00}}, + {2, [3]byte{0xc5, 0xbd, 0x00}}, {2, [3]byte{0xc5, 0xb9, 0x00}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {3, [3]byte{0xe2, 0x80, 0x98}}, + {3, [3]byte{0xe2, 0x80, 0x99}}, {3, [3]byte{0xe2, 0x80, 0x9c}}, + {3, [3]byte{0xe2, 0x80, 0x9d}}, {3, [3]byte{0xe2, 0x80, 0xa2}}, + {3, [3]byte{0xe2, 0x80, 0x93}}, {3, [3]byte{0xe2, 0x80, 0x94}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {3, [3]byte{0xe2, 0x84, 0xa2}}, + {2, [3]byte{0xc5, 0xa1, 0x00}}, {3, [3]byte{0xe2, 0x80, 0xba}}, + {2, [3]byte{0xc5, 0x9b, 0x00}}, {2, [3]byte{0xc5, 0xa5, 0x00}}, + {2, [3]byte{0xc5, 0xbe, 0x00}}, {2, [3]byte{0xc5, 0xba, 0x00}}, + {2, [3]byte{0xc2, 0xa0, 0x00}}, {2, [3]byte{0xcb, 0x87, 0x00}}, + {2, [3]byte{0xcb, 0x98, 0x00}}, {2, [3]byte{0xc5, 0x81, 0x00}}, + {2, [3]byte{0xc2, 0xa4, 0x00}}, {2, [3]byte{0xc4, 0x84, 0x00}}, + {2, [3]byte{0xc2, 0xa6, 0x00}}, {2, [3]byte{0xc2, 0xa7, 0x00}}, + {2, [3]byte{0xc2, 0xa8, 0x00}}, {2, [3]byte{0xc2, 0xa9, 0x00}}, + {2, [3]byte{0xc5, 0x9e, 0x00}}, {2, [3]byte{0xc2, 0xab, 0x00}}, + {2, [3]byte{0xc2, 0xac, 0x00}}, {2, [3]byte{0xc2, 0xad, 0x00}}, + {2, [3]byte{0xc2, 0xae, 0x00}}, {2, [3]byte{0xc5, 0xbb, 0x00}}, + {2, [3]byte{0xc2, 0xb0, 0x00}}, {2, [3]byte{0xc2, 0xb1, 0x00}}, + {2, [3]byte{0xcb, 0x9b, 0x00}}, {2, [3]byte{0xc5, 0x82, 0x00}}, + {2, [3]byte{0xc2, 0xb4, 0x00}}, {2, [3]byte{0xc2, 0xb5, 0x00}}, + {2, [3]byte{0xc2, 0xb6, 0x00}}, {2, [3]byte{0xc2, 0xb7, 0x00}}, + {2, [3]byte{0xc2, 0xb8, 0x00}}, {2, [3]byte{0xc4, 0x85, 0x00}}, + {2, [3]byte{0xc5, 0x9f, 0x00}}, {2, [3]byte{0xc2, 0xbb, 0x00}}, + {2, [3]byte{0xc4, 0xbd, 0x00}}, {2, [3]byte{0xcb, 0x9d, 0x00}}, + {2, [3]byte{0xc4, 0xbe, 0x00}}, {2, [3]byte{0xc5, 0xbc, 0x00}}, + {2, [3]byte{0xc5, 0x94, 0x00}}, {2, [3]byte{0xc3, 0x81, 0x00}}, + {2, [3]byte{0xc3, 0x82, 0x00}}, {2, [3]byte{0xc4, 0x82, 0x00}}, + {2, [3]byte{0xc3, 0x84, 0x00}}, {2, [3]byte{0xc4, 0xb9, 0x00}}, + {2, [3]byte{0xc4, 0x86, 0x00}}, {2, [3]byte{0xc3, 0x87, 0x00}}, + {2, [3]byte{0xc4, 0x8c, 0x00}}, {2, [3]byte{0xc3, 0x89, 0x00}}, + {2, [3]byte{0xc4, 0x98, 0x00}}, {2, [3]byte{0xc3, 0x8b, 0x00}}, + {2, [3]byte{0xc4, 0x9a, 0x00}}, {2, [3]byte{0xc3, 0x8d, 0x00}}, + {2, [3]byte{0xc3, 0x8e, 0x00}}, {2, [3]byte{0xc4, 0x8e, 0x00}}, + {2, [3]byte{0xc4, 0x90, 0x00}}, {2, [3]byte{0xc5, 0x83, 0x00}}, + {2, [3]byte{0xc5, 0x87, 0x00}}, {2, [3]byte{0xc3, 0x93, 0x00}}, + {2, [3]byte{0xc3, 0x94, 0x00}}, {2, [3]byte{0xc5, 0x90, 0x00}}, + {2, [3]byte{0xc3, 0x96, 0x00}}, {2, [3]byte{0xc3, 0x97, 0x00}}, + {2, [3]byte{0xc5, 0x98, 0x00}}, {2, [3]byte{0xc5, 0xae, 0x00}}, + {2, [3]byte{0xc3, 0x9a, 0x00}}, {2, [3]byte{0xc5, 0xb0, 0x00}}, + {2, [3]byte{0xc3, 0x9c, 0x00}}, {2, [3]byte{0xc3, 0x9d, 0x00}}, + {2, [3]byte{0xc5, 0xa2, 0x00}}, {2, [3]byte{0xc3, 0x9f, 0x00}}, + {2, [3]byte{0xc5, 0x95, 0x00}}, {2, [3]byte{0xc3, 0xa1, 0x00}}, + {2, [3]byte{0xc3, 0xa2, 0x00}}, {2, [3]byte{0xc4, 0x83, 0x00}}, + {2, [3]byte{0xc3, 0xa4, 0x00}}, {2, [3]byte{0xc4, 0xba, 0x00}}, + {2, [3]byte{0xc4, 0x87, 0x00}}, {2, [3]byte{0xc3, 0xa7, 0x00}}, + {2, [3]byte{0xc4, 0x8d, 0x00}}, {2, [3]byte{0xc3, 0xa9, 0x00}}, + {2, [3]byte{0xc4, 0x99, 0x00}}, {2, [3]byte{0xc3, 0xab, 0x00}}, + {2, [3]byte{0xc4, 0x9b, 0x00}}, {2, [3]byte{0xc3, 0xad, 0x00}}, + {2, [3]byte{0xc3, 0xae, 0x00}}, {2, [3]byte{0xc4, 0x8f, 0x00}}, + {2, [3]byte{0xc4, 0x91, 0x00}}, {2, [3]byte{0xc5, 0x84, 0x00}}, + {2, [3]byte{0xc5, 0x88, 0x00}}, {2, [3]byte{0xc3, 0xb3, 0x00}}, + {2, [3]byte{0xc3, 0xb4, 0x00}}, {2, [3]byte{0xc5, 0x91, 0x00}}, + {2, [3]byte{0xc3, 0xb6, 0x00}}, {2, [3]byte{0xc3, 0xb7, 0x00}}, + {2, [3]byte{0xc5, 0x99, 0x00}}, {2, [3]byte{0xc5, 0xaf, 0x00}}, + {2, [3]byte{0xc3, 0xba, 0x00}}, {2, [3]byte{0xc5, 0xb1, 0x00}}, + {2, [3]byte{0xc3, 0xbc, 0x00}}, {2, [3]byte{0xc3, 0xbd, 0x00}}, + {2, [3]byte{0xc5, 0xa3, 0x00}}, {2, [3]byte{0xcb, 0x99, 0x00}}, + }, + encode: [256]uint32{ + 0x00000000, 0x01000001, 0x02000002, 0x03000003, 0x04000004, 0x05000005, 0x06000006, 0x07000007, + 0x08000008, 0x09000009, 0x0a00000a, 0x0b00000b, 0x0c00000c, 0x0d00000d, 0x0e00000e, 0x0f00000f, + 0x10000010, 0x11000011, 0x12000012, 0x13000013, 0x14000014, 0x15000015, 0x16000016, 0x17000017, + 0x18000018, 0x19000019, 0x1a00001a, 0x1b00001b, 0x1c00001c, 0x1d00001d, 0x1e00001e, 0x1f00001f, + 0x20000020, 0x21000021, 0x22000022, 0x23000023, 0x24000024, 0x25000025, 0x26000026, 0x27000027, + 0x28000028, 0x29000029, 0x2a00002a, 0x2b00002b, 0x2c00002c, 0x2d00002d, 0x2e00002e, 0x2f00002f, + 0x30000030, 0x31000031, 0x32000032, 0x33000033, 0x34000034, 0x35000035, 0x36000036, 0x37000037, + 0x38000038, 0x39000039, 0x3a00003a, 0x3b00003b, 0x3c00003c, 0x3d00003d, 0x3e00003e, 0x3f00003f, + 0x40000040, 0x41000041, 0x42000042, 0x43000043, 0x44000044, 0x45000045, 0x46000046, 0x47000047, + 0x48000048, 0x49000049, 0x4a00004a, 0x4b00004b, 0x4c00004c, 0x4d00004d, 0x4e00004e, 0x4f00004f, + 0x50000050, 0x51000051, 0x52000052, 0x53000053, 0x54000054, 0x55000055, 0x56000056, 0x57000057, + 0x58000058, 0x59000059, 0x5a00005a, 0x5b00005b, 0x5c00005c, 0x5d00005d, 0x5e00005e, 0x5f00005f, + 0x60000060, 0x61000061, 0x62000062, 0x63000063, 0x64000064, 0x65000065, 0x66000066, 0x67000067, + 0x68000068, 0x69000069, 0x6a00006a, 0x6b00006b, 0x6c00006c, 0x6d00006d, 0x6e00006e, 0x6f00006f, + 0x70000070, 0x71000071, 0x72000072, 0x73000073, 0x74000074, 0x75000075, 0x76000076, 0x77000077, + 0x78000078, 0x79000079, 0x7a00007a, 0x7b00007b, 0x7c00007c, 0x7d00007d, 0x7e00007e, 0x7f00007f, + 0xa00000a0, 0xa40000a4, 0xa60000a6, 0xa70000a7, 0xa80000a8, 0xa90000a9, 0xab0000ab, 0xac0000ac, + 0xad0000ad, 0xae0000ae, 0xb00000b0, 0xb10000b1, 0xb40000b4, 0xb50000b5, 0xb60000b6, 0xb70000b7, + 0xb80000b8, 0xbb0000bb, 0xc10000c1, 0xc20000c2, 0xc40000c4, 0xc70000c7, 0xc90000c9, 0xcb0000cb, + 0xcd0000cd, 0xce0000ce, 0xd30000d3, 0xd40000d4, 0xd60000d6, 0xd70000d7, 0xda0000da, 0xdc0000dc, + 0xdd0000dd, 0xdf0000df, 0xe10000e1, 0xe20000e2, 0xe40000e4, 0xe70000e7, 0xe90000e9, 0xeb0000eb, + 0xed0000ed, 0xee0000ee, 0xf30000f3, 0xf40000f4, 0xf60000f6, 0xf70000f7, 0xfa0000fa, 0xfc0000fc, + 0xfd0000fd, 0xc3000102, 0xe3000103, 0xa5000104, 0xb9000105, 0xc6000106, 0xe6000107, 0xc800010c, + 0xe800010d, 0xcf00010e, 0xef00010f, 0xd0000110, 0xf0000111, 0xca000118, 0xea000119, 0xcc00011a, + 0xec00011b, 0xc5000139, 0xe500013a, 0xbc00013d, 0xbe00013e, 0xa3000141, 0xb3000142, 0xd1000143, + 0xf1000144, 0xd2000147, 0xf2000148, 0xd5000150, 0xf5000151, 0xc0000154, 0xe0000155, 0xd8000158, + 0xf8000159, 0x8c00015a, 0x9c00015b, 0xaa00015e, 0xba00015f, 0x8a000160, 0x9a000161, 0xde000162, + 0xfe000163, 0x8d000164, 0x9d000165, 0xd900016e, 0xf900016f, 0xdb000170, 0xfb000171, 0x8f000179, + 0x9f00017a, 0xaf00017b, 0xbf00017c, 0x8e00017d, 0x9e00017e, 0xa10002c7, 0xa20002d8, 0xff0002d9, + 0xb20002db, 0xbd0002dd, 0x96002013, 0x97002014, 0x91002018, 0x92002019, 0x8200201a, 0x9300201c, + 0x9400201d, 0x8400201e, 0x86002020, 0x87002021, 0x95002022, 0x85002026, 0x89002030, 0x8b002039, + 0x9b00203a, 0x800020ac, 0x99002122, 0x99002122, 0x99002122, 0x99002122, 0x99002122, 0x99002122, + }, +} + +// Windows1251 is the Windows 1251 encoding. +var Windows1251 *Charmap = &windows1251 + +var windows1251 = Charmap{ + name: "Windows 1251", + mib: identifier.Windows1251, + asciiSuperset: true, + low: 0x80, + replacement: 0x1a, + decode: [256]utf8Enc{ + {1, [3]byte{0x00, 0x00, 0x00}}, {1, [3]byte{0x01, 0x00, 0x00}}, + {1, [3]byte{0x02, 0x00, 0x00}}, {1, [3]byte{0x03, 0x00, 0x00}}, + {1, [3]byte{0x04, 0x00, 0x00}}, {1, [3]byte{0x05, 0x00, 0x00}}, + {1, [3]byte{0x06, 0x00, 0x00}}, {1, [3]byte{0x07, 0x00, 0x00}}, + {1, [3]byte{0x08, 0x00, 0x00}}, {1, [3]byte{0x09, 0x00, 0x00}}, + {1, [3]byte{0x0a, 0x00, 0x00}}, {1, [3]byte{0x0b, 0x00, 0x00}}, + {1, [3]byte{0x0c, 0x00, 0x00}}, {1, [3]byte{0x0d, 0x00, 0x00}}, + {1, [3]byte{0x0e, 0x00, 0x00}}, {1, [3]byte{0x0f, 0x00, 0x00}}, + {1, [3]byte{0x10, 0x00, 0x00}}, {1, [3]byte{0x11, 0x00, 0x00}}, + {1, [3]byte{0x12, 0x00, 0x00}}, {1, [3]byte{0x13, 0x00, 0x00}}, + {1, [3]byte{0x14, 0x00, 0x00}}, {1, [3]byte{0x15, 0x00, 0x00}}, + {1, [3]byte{0x16, 0x00, 0x00}}, {1, [3]byte{0x17, 0x00, 0x00}}, + {1, [3]byte{0x18, 0x00, 0x00}}, {1, [3]byte{0x19, 0x00, 0x00}}, + {1, [3]byte{0x1a, 0x00, 0x00}}, {1, [3]byte{0x1b, 0x00, 0x00}}, + {1, [3]byte{0x1c, 0x00, 0x00}}, {1, [3]byte{0x1d, 0x00, 0x00}}, + {1, [3]byte{0x1e, 0x00, 0x00}}, {1, [3]byte{0x1f, 0x00, 0x00}}, + {1, [3]byte{0x20, 0x00, 0x00}}, {1, [3]byte{0x21, 0x00, 0x00}}, + {1, [3]byte{0x22, 0x00, 0x00}}, {1, [3]byte{0x23, 0x00, 0x00}}, + {1, [3]byte{0x24, 0x00, 0x00}}, {1, [3]byte{0x25, 0x00, 0x00}}, + {1, [3]byte{0x26, 0x00, 0x00}}, {1, [3]byte{0x27, 0x00, 0x00}}, + {1, [3]byte{0x28, 0x00, 0x00}}, {1, [3]byte{0x29, 0x00, 0x00}}, + {1, [3]byte{0x2a, 0x00, 0x00}}, {1, [3]byte{0x2b, 0x00, 0x00}}, + {1, [3]byte{0x2c, 0x00, 0x00}}, {1, [3]byte{0x2d, 0x00, 0x00}}, + {1, [3]byte{0x2e, 0x00, 0x00}}, {1, [3]byte{0x2f, 0x00, 0x00}}, + {1, [3]byte{0x30, 0x00, 0x00}}, {1, [3]byte{0x31, 0x00, 0x00}}, + {1, [3]byte{0x32, 0x00, 0x00}}, {1, [3]byte{0x33, 0x00, 0x00}}, + {1, [3]byte{0x34, 0x00, 0x00}}, {1, [3]byte{0x35, 0x00, 0x00}}, + {1, [3]byte{0x36, 0x00, 0x00}}, {1, [3]byte{0x37, 0x00, 0x00}}, + {1, [3]byte{0x38, 0x00, 0x00}}, {1, [3]byte{0x39, 0x00, 0x00}}, + {1, [3]byte{0x3a, 0x00, 0x00}}, {1, [3]byte{0x3b, 0x00, 0x00}}, + {1, [3]byte{0x3c, 0x00, 0x00}}, {1, [3]byte{0x3d, 0x00, 0x00}}, + {1, [3]byte{0x3e, 0x00, 0x00}}, {1, [3]byte{0x3f, 0x00, 0x00}}, + {1, [3]byte{0x40, 0x00, 0x00}}, {1, [3]byte{0x41, 0x00, 0x00}}, + {1, [3]byte{0x42, 0x00, 0x00}}, {1, [3]byte{0x43, 0x00, 0x00}}, + {1, [3]byte{0x44, 0x00, 0x00}}, {1, [3]byte{0x45, 0x00, 0x00}}, + {1, [3]byte{0x46, 0x00, 0x00}}, {1, [3]byte{0x47, 0x00, 0x00}}, + {1, [3]byte{0x48, 0x00, 0x00}}, {1, [3]byte{0x49, 0x00, 0x00}}, + {1, [3]byte{0x4a, 0x00, 0x00}}, {1, [3]byte{0x4b, 0x00, 0x00}}, + {1, [3]byte{0x4c, 0x00, 0x00}}, {1, [3]byte{0x4d, 0x00, 0x00}}, + {1, [3]byte{0x4e, 0x00, 0x00}}, {1, [3]byte{0x4f, 0x00, 0x00}}, + {1, [3]byte{0x50, 0x00, 0x00}}, {1, [3]byte{0x51, 0x00, 0x00}}, + {1, [3]byte{0x52, 0x00, 0x00}}, {1, [3]byte{0x53, 0x00, 0x00}}, + {1, [3]byte{0x54, 0x00, 0x00}}, {1, [3]byte{0x55, 0x00, 0x00}}, + {1, [3]byte{0x56, 0x00, 0x00}}, {1, [3]byte{0x57, 0x00, 0x00}}, + {1, [3]byte{0x58, 0x00, 0x00}}, {1, [3]byte{0x59, 0x00, 0x00}}, + {1, [3]byte{0x5a, 0x00, 0x00}}, {1, [3]byte{0x5b, 0x00, 0x00}}, + {1, [3]byte{0x5c, 0x00, 0x00}}, {1, [3]byte{0x5d, 0x00, 0x00}}, + {1, [3]byte{0x5e, 0x00, 0x00}}, {1, [3]byte{0x5f, 0x00, 0x00}}, + {1, [3]byte{0x60, 0x00, 0x00}}, {1, [3]byte{0x61, 0x00, 0x00}}, + {1, [3]byte{0x62, 0x00, 0x00}}, {1, [3]byte{0x63, 0x00, 0x00}}, + {1, [3]byte{0x64, 0x00, 0x00}}, {1, [3]byte{0x65, 0x00, 0x00}}, + {1, [3]byte{0x66, 0x00, 0x00}}, {1, [3]byte{0x67, 0x00, 0x00}}, + {1, [3]byte{0x68, 0x00, 0x00}}, {1, [3]byte{0x69, 0x00, 0x00}}, + {1, [3]byte{0x6a, 0x00, 0x00}}, {1, [3]byte{0x6b, 0x00, 0x00}}, + {1, [3]byte{0x6c, 0x00, 0x00}}, {1, [3]byte{0x6d, 0x00, 0x00}}, + {1, [3]byte{0x6e, 0x00, 0x00}}, {1, [3]byte{0x6f, 0x00, 0x00}}, + {1, [3]byte{0x70, 0x00, 0x00}}, {1, [3]byte{0x71, 0x00, 0x00}}, + {1, [3]byte{0x72, 0x00, 0x00}}, {1, [3]byte{0x73, 0x00, 0x00}}, + {1, [3]byte{0x74, 0x00, 0x00}}, {1, [3]byte{0x75, 0x00, 0x00}}, + {1, [3]byte{0x76, 0x00, 0x00}}, {1, [3]byte{0x77, 0x00, 0x00}}, + {1, [3]byte{0x78, 0x00, 0x00}}, {1, [3]byte{0x79, 0x00, 0x00}}, + {1, [3]byte{0x7a, 0x00, 0x00}}, {1, [3]byte{0x7b, 0x00, 0x00}}, + {1, [3]byte{0x7c, 0x00, 0x00}}, {1, [3]byte{0x7d, 0x00, 0x00}}, + {1, [3]byte{0x7e, 0x00, 0x00}}, {1, [3]byte{0x7f, 0x00, 0x00}}, + {2, [3]byte{0xd0, 0x82, 0x00}}, {2, [3]byte{0xd0, 0x83, 0x00}}, + {3, [3]byte{0xe2, 0x80, 0x9a}}, {2, [3]byte{0xd1, 0x93, 0x00}}, + {3, [3]byte{0xe2, 0x80, 0x9e}}, {3, [3]byte{0xe2, 0x80, 0xa6}}, + {3, [3]byte{0xe2, 0x80, 0xa0}}, {3, [3]byte{0xe2, 0x80, 0xa1}}, + {3, [3]byte{0xe2, 0x82, 0xac}}, {3, [3]byte{0xe2, 0x80, 0xb0}}, + {2, [3]byte{0xd0, 0x89, 0x00}}, {3, [3]byte{0xe2, 0x80, 0xb9}}, + {2, [3]byte{0xd0, 0x8a, 0x00}}, {2, [3]byte{0xd0, 0x8c, 0x00}}, + {2, [3]byte{0xd0, 0x8b, 0x00}}, {2, [3]byte{0xd0, 0x8f, 0x00}}, + {2, [3]byte{0xd1, 0x92, 0x00}}, {3, [3]byte{0xe2, 0x80, 0x98}}, + {3, [3]byte{0xe2, 0x80, 0x99}}, {3, [3]byte{0xe2, 0x80, 0x9c}}, + {3, [3]byte{0xe2, 0x80, 0x9d}}, {3, [3]byte{0xe2, 0x80, 0xa2}}, + {3, [3]byte{0xe2, 0x80, 0x93}}, {3, [3]byte{0xe2, 0x80, 0x94}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {3, [3]byte{0xe2, 0x84, 0xa2}}, + {2, [3]byte{0xd1, 0x99, 0x00}}, {3, [3]byte{0xe2, 0x80, 0xba}}, + {2, [3]byte{0xd1, 0x9a, 0x00}}, {2, [3]byte{0xd1, 0x9c, 0x00}}, + {2, [3]byte{0xd1, 0x9b, 0x00}}, {2, [3]byte{0xd1, 0x9f, 0x00}}, + {2, [3]byte{0xc2, 0xa0, 0x00}}, {2, [3]byte{0xd0, 0x8e, 0x00}}, + {2, [3]byte{0xd1, 0x9e, 0x00}}, {2, [3]byte{0xd0, 0x88, 0x00}}, + {2, [3]byte{0xc2, 0xa4, 0x00}}, {2, [3]byte{0xd2, 0x90, 0x00}}, + {2, [3]byte{0xc2, 0xa6, 0x00}}, {2, [3]byte{0xc2, 0xa7, 0x00}}, + {2, [3]byte{0xd0, 0x81, 0x00}}, {2, [3]byte{0xc2, 0xa9, 0x00}}, + {2, [3]byte{0xd0, 0x84, 0x00}}, {2, [3]byte{0xc2, 0xab, 0x00}}, + {2, [3]byte{0xc2, 0xac, 0x00}}, {2, [3]byte{0xc2, 0xad, 0x00}}, + {2, [3]byte{0xc2, 0xae, 0x00}}, {2, [3]byte{0xd0, 0x87, 0x00}}, + {2, [3]byte{0xc2, 0xb0, 0x00}}, {2, [3]byte{0xc2, 0xb1, 0x00}}, + {2, [3]byte{0xd0, 0x86, 0x00}}, {2, [3]byte{0xd1, 0x96, 0x00}}, + {2, [3]byte{0xd2, 0x91, 0x00}}, {2, [3]byte{0xc2, 0xb5, 0x00}}, + {2, [3]byte{0xc2, 0xb6, 0x00}}, {2, [3]byte{0xc2, 0xb7, 0x00}}, + {2, [3]byte{0xd1, 0x91, 0x00}}, {3, [3]byte{0xe2, 0x84, 0x96}}, + {2, [3]byte{0xd1, 0x94, 0x00}}, {2, [3]byte{0xc2, 0xbb, 0x00}}, + {2, [3]byte{0xd1, 0x98, 0x00}}, {2, [3]byte{0xd0, 0x85, 0x00}}, + {2, [3]byte{0xd1, 0x95, 0x00}}, {2, [3]byte{0xd1, 0x97, 0x00}}, + {2, [3]byte{0xd0, 0x90, 0x00}}, {2, [3]byte{0xd0, 0x91, 0x00}}, + {2, [3]byte{0xd0, 0x92, 0x00}}, {2, [3]byte{0xd0, 0x93, 0x00}}, + {2, [3]byte{0xd0, 0x94, 0x00}}, {2, [3]byte{0xd0, 0x95, 0x00}}, + {2, [3]byte{0xd0, 0x96, 0x00}}, {2, [3]byte{0xd0, 0x97, 0x00}}, + {2, [3]byte{0xd0, 0x98, 0x00}}, {2, [3]byte{0xd0, 0x99, 0x00}}, + {2, [3]byte{0xd0, 0x9a, 0x00}}, {2, [3]byte{0xd0, 0x9b, 0x00}}, + {2, [3]byte{0xd0, 0x9c, 0x00}}, {2, [3]byte{0xd0, 0x9d, 0x00}}, + {2, [3]byte{0xd0, 0x9e, 0x00}}, {2, [3]byte{0xd0, 0x9f, 0x00}}, + {2, [3]byte{0xd0, 0xa0, 0x00}}, {2, [3]byte{0xd0, 0xa1, 0x00}}, + {2, [3]byte{0xd0, 0xa2, 0x00}}, {2, [3]byte{0xd0, 0xa3, 0x00}}, + {2, [3]byte{0xd0, 0xa4, 0x00}}, {2, [3]byte{0xd0, 0xa5, 0x00}}, + {2, [3]byte{0xd0, 0xa6, 0x00}}, {2, [3]byte{0xd0, 0xa7, 0x00}}, + {2, [3]byte{0xd0, 0xa8, 0x00}}, {2, [3]byte{0xd0, 0xa9, 0x00}}, + {2, [3]byte{0xd0, 0xaa, 0x00}}, {2, [3]byte{0xd0, 0xab, 0x00}}, + {2, [3]byte{0xd0, 0xac, 0x00}}, {2, [3]byte{0xd0, 0xad, 0x00}}, + {2, [3]byte{0xd0, 0xae, 0x00}}, {2, [3]byte{0xd0, 0xaf, 0x00}}, + {2, [3]byte{0xd0, 0xb0, 0x00}}, {2, [3]byte{0xd0, 0xb1, 0x00}}, + {2, [3]byte{0xd0, 0xb2, 0x00}}, {2, [3]byte{0xd0, 0xb3, 0x00}}, + {2, [3]byte{0xd0, 0xb4, 0x00}}, {2, [3]byte{0xd0, 0xb5, 0x00}}, + {2, [3]byte{0xd0, 0xb6, 0x00}}, {2, [3]byte{0xd0, 0xb7, 0x00}}, + {2, [3]byte{0xd0, 0xb8, 0x00}}, {2, [3]byte{0xd0, 0xb9, 0x00}}, + {2, [3]byte{0xd0, 0xba, 0x00}}, {2, [3]byte{0xd0, 0xbb, 0x00}}, + {2, [3]byte{0xd0, 0xbc, 0x00}}, {2, [3]byte{0xd0, 0xbd, 0x00}}, + {2, [3]byte{0xd0, 0xbe, 0x00}}, {2, [3]byte{0xd0, 0xbf, 0x00}}, + {2, [3]byte{0xd1, 0x80, 0x00}}, {2, [3]byte{0xd1, 0x81, 0x00}}, + {2, [3]byte{0xd1, 0x82, 0x00}}, {2, [3]byte{0xd1, 0x83, 0x00}}, + {2, [3]byte{0xd1, 0x84, 0x00}}, {2, [3]byte{0xd1, 0x85, 0x00}}, + {2, [3]byte{0xd1, 0x86, 0x00}}, {2, [3]byte{0xd1, 0x87, 0x00}}, + {2, [3]byte{0xd1, 0x88, 0x00}}, {2, [3]byte{0xd1, 0x89, 0x00}}, + {2, [3]byte{0xd1, 0x8a, 0x00}}, {2, [3]byte{0xd1, 0x8b, 0x00}}, + {2, [3]byte{0xd1, 0x8c, 0x00}}, {2, [3]byte{0xd1, 0x8d, 0x00}}, + {2, [3]byte{0xd1, 0x8e, 0x00}}, {2, [3]byte{0xd1, 0x8f, 0x00}}, + }, + encode: [256]uint32{ + 0x00000000, 0x01000001, 0x02000002, 0x03000003, 0x04000004, 0x05000005, 0x06000006, 0x07000007, + 0x08000008, 0x09000009, 0x0a00000a, 0x0b00000b, 0x0c00000c, 0x0d00000d, 0x0e00000e, 0x0f00000f, + 0x10000010, 0x11000011, 0x12000012, 0x13000013, 0x14000014, 0x15000015, 0x16000016, 0x17000017, + 0x18000018, 0x19000019, 0x1a00001a, 0x1b00001b, 0x1c00001c, 0x1d00001d, 0x1e00001e, 0x1f00001f, + 0x20000020, 0x21000021, 0x22000022, 0x23000023, 0x24000024, 0x25000025, 0x26000026, 0x27000027, + 0x28000028, 0x29000029, 0x2a00002a, 0x2b00002b, 0x2c00002c, 0x2d00002d, 0x2e00002e, 0x2f00002f, + 0x30000030, 0x31000031, 0x32000032, 0x33000033, 0x34000034, 0x35000035, 0x36000036, 0x37000037, + 0x38000038, 0x39000039, 0x3a00003a, 0x3b00003b, 0x3c00003c, 0x3d00003d, 0x3e00003e, 0x3f00003f, + 0x40000040, 0x41000041, 0x42000042, 0x43000043, 0x44000044, 0x45000045, 0x46000046, 0x47000047, + 0x48000048, 0x49000049, 0x4a00004a, 0x4b00004b, 0x4c00004c, 0x4d00004d, 0x4e00004e, 0x4f00004f, + 0x50000050, 0x51000051, 0x52000052, 0x53000053, 0x54000054, 0x55000055, 0x56000056, 0x57000057, + 0x58000058, 0x59000059, 0x5a00005a, 0x5b00005b, 0x5c00005c, 0x5d00005d, 0x5e00005e, 0x5f00005f, + 0x60000060, 0x61000061, 0x62000062, 0x63000063, 0x64000064, 0x65000065, 0x66000066, 0x67000067, + 0x68000068, 0x69000069, 0x6a00006a, 0x6b00006b, 0x6c00006c, 0x6d00006d, 0x6e00006e, 0x6f00006f, + 0x70000070, 0x71000071, 0x72000072, 0x73000073, 0x74000074, 0x75000075, 0x76000076, 0x77000077, + 0x78000078, 0x79000079, 0x7a00007a, 0x7b00007b, 0x7c00007c, 0x7d00007d, 0x7e00007e, 0x7f00007f, + 0xa00000a0, 0xa40000a4, 0xa60000a6, 0xa70000a7, 0xa90000a9, 0xab0000ab, 0xac0000ac, 0xad0000ad, + 0xae0000ae, 0xb00000b0, 0xb10000b1, 0xb50000b5, 0xb60000b6, 0xb70000b7, 0xbb0000bb, 0xa8000401, + 0x80000402, 0x81000403, 0xaa000404, 0xbd000405, 0xb2000406, 0xaf000407, 0xa3000408, 0x8a000409, + 0x8c00040a, 0x8e00040b, 0x8d00040c, 0xa100040e, 0x8f00040f, 0xc0000410, 0xc1000411, 0xc2000412, + 0xc3000413, 0xc4000414, 0xc5000415, 0xc6000416, 0xc7000417, 0xc8000418, 0xc9000419, 0xca00041a, + 0xcb00041b, 0xcc00041c, 0xcd00041d, 0xce00041e, 0xcf00041f, 0xd0000420, 0xd1000421, 0xd2000422, + 0xd3000423, 0xd4000424, 0xd5000425, 0xd6000426, 0xd7000427, 0xd8000428, 0xd9000429, 0xda00042a, + 0xdb00042b, 0xdc00042c, 0xdd00042d, 0xde00042e, 0xdf00042f, 0xe0000430, 0xe1000431, 0xe2000432, + 0xe3000433, 0xe4000434, 0xe5000435, 0xe6000436, 0xe7000437, 0xe8000438, 0xe9000439, 0xea00043a, + 0xeb00043b, 0xec00043c, 0xed00043d, 0xee00043e, 0xef00043f, 0xf0000440, 0xf1000441, 0xf2000442, + 0xf3000443, 0xf4000444, 0xf5000445, 0xf6000446, 0xf7000447, 0xf8000448, 0xf9000449, 0xfa00044a, + 0xfb00044b, 0xfc00044c, 0xfd00044d, 0xfe00044e, 0xff00044f, 0xb8000451, 0x90000452, 0x83000453, + 0xba000454, 0xbe000455, 0xb3000456, 0xbf000457, 0xbc000458, 0x9a000459, 0x9c00045a, 0x9e00045b, + 0x9d00045c, 0xa200045e, 0x9f00045f, 0xa5000490, 0xb4000491, 0x96002013, 0x97002014, 0x91002018, + 0x92002019, 0x8200201a, 0x9300201c, 0x9400201d, 0x8400201e, 0x86002020, 0x87002021, 0x95002022, + 0x85002026, 0x89002030, 0x8b002039, 0x9b00203a, 0x880020ac, 0xb9002116, 0x99002122, 0x99002122, + }, +} + +// Windows1252 is the Windows 1252 encoding. +var Windows1252 *Charmap = &windows1252 + +var windows1252 = Charmap{ + name: "Windows 1252", + mib: identifier.Windows1252, + asciiSuperset: true, + low: 0x80, + replacement: 0x1a, + decode: [256]utf8Enc{ + {1, [3]byte{0x00, 0x00, 0x00}}, {1, [3]byte{0x01, 0x00, 0x00}}, + {1, [3]byte{0x02, 0x00, 0x00}}, {1, [3]byte{0x03, 0x00, 0x00}}, + {1, [3]byte{0x04, 0x00, 0x00}}, {1, [3]byte{0x05, 0x00, 0x00}}, + {1, [3]byte{0x06, 0x00, 0x00}}, {1, [3]byte{0x07, 0x00, 0x00}}, + {1, [3]byte{0x08, 0x00, 0x00}}, {1, [3]byte{0x09, 0x00, 0x00}}, + {1, [3]byte{0x0a, 0x00, 0x00}}, {1, [3]byte{0x0b, 0x00, 0x00}}, + {1, [3]byte{0x0c, 0x00, 0x00}}, {1, [3]byte{0x0d, 0x00, 0x00}}, + {1, [3]byte{0x0e, 0x00, 0x00}}, {1, [3]byte{0x0f, 0x00, 0x00}}, + {1, [3]byte{0x10, 0x00, 0x00}}, {1, [3]byte{0x11, 0x00, 0x00}}, + {1, [3]byte{0x12, 0x00, 0x00}}, {1, [3]byte{0x13, 0x00, 0x00}}, + {1, [3]byte{0x14, 0x00, 0x00}}, {1, [3]byte{0x15, 0x00, 0x00}}, + {1, [3]byte{0x16, 0x00, 0x00}}, {1, [3]byte{0x17, 0x00, 0x00}}, + {1, [3]byte{0x18, 0x00, 0x00}}, {1, [3]byte{0x19, 0x00, 0x00}}, + {1, [3]byte{0x1a, 0x00, 0x00}}, {1, [3]byte{0x1b, 0x00, 0x00}}, + {1, [3]byte{0x1c, 0x00, 0x00}}, {1, [3]byte{0x1d, 0x00, 0x00}}, + {1, [3]byte{0x1e, 0x00, 0x00}}, {1, [3]byte{0x1f, 0x00, 0x00}}, + {1, [3]byte{0x20, 0x00, 0x00}}, {1, [3]byte{0x21, 0x00, 0x00}}, + {1, [3]byte{0x22, 0x00, 0x00}}, {1, [3]byte{0x23, 0x00, 0x00}}, + {1, [3]byte{0x24, 0x00, 0x00}}, {1, [3]byte{0x25, 0x00, 0x00}}, + {1, [3]byte{0x26, 0x00, 0x00}}, {1, [3]byte{0x27, 0x00, 0x00}}, + {1, [3]byte{0x28, 0x00, 0x00}}, {1, [3]byte{0x29, 0x00, 0x00}}, + {1, [3]byte{0x2a, 0x00, 0x00}}, {1, [3]byte{0x2b, 0x00, 0x00}}, + {1, [3]byte{0x2c, 0x00, 0x00}}, {1, [3]byte{0x2d, 0x00, 0x00}}, + {1, [3]byte{0x2e, 0x00, 0x00}}, {1, [3]byte{0x2f, 0x00, 0x00}}, + {1, [3]byte{0x30, 0x00, 0x00}}, {1, [3]byte{0x31, 0x00, 0x00}}, + {1, [3]byte{0x32, 0x00, 0x00}}, {1, [3]byte{0x33, 0x00, 0x00}}, + {1, [3]byte{0x34, 0x00, 0x00}}, {1, [3]byte{0x35, 0x00, 0x00}}, + {1, [3]byte{0x36, 0x00, 0x00}}, {1, [3]byte{0x37, 0x00, 0x00}}, + {1, [3]byte{0x38, 0x00, 0x00}}, {1, [3]byte{0x39, 0x00, 0x00}}, + {1, [3]byte{0x3a, 0x00, 0x00}}, {1, [3]byte{0x3b, 0x00, 0x00}}, + {1, [3]byte{0x3c, 0x00, 0x00}}, {1, [3]byte{0x3d, 0x00, 0x00}}, + {1, [3]byte{0x3e, 0x00, 0x00}}, {1, [3]byte{0x3f, 0x00, 0x00}}, + {1, [3]byte{0x40, 0x00, 0x00}}, {1, [3]byte{0x41, 0x00, 0x00}}, + {1, [3]byte{0x42, 0x00, 0x00}}, {1, [3]byte{0x43, 0x00, 0x00}}, + {1, [3]byte{0x44, 0x00, 0x00}}, {1, [3]byte{0x45, 0x00, 0x00}}, + {1, [3]byte{0x46, 0x00, 0x00}}, {1, [3]byte{0x47, 0x00, 0x00}}, + {1, [3]byte{0x48, 0x00, 0x00}}, {1, [3]byte{0x49, 0x00, 0x00}}, + {1, [3]byte{0x4a, 0x00, 0x00}}, {1, [3]byte{0x4b, 0x00, 0x00}}, + {1, [3]byte{0x4c, 0x00, 0x00}}, {1, [3]byte{0x4d, 0x00, 0x00}}, + {1, [3]byte{0x4e, 0x00, 0x00}}, {1, [3]byte{0x4f, 0x00, 0x00}}, + {1, [3]byte{0x50, 0x00, 0x00}}, {1, [3]byte{0x51, 0x00, 0x00}}, + {1, [3]byte{0x52, 0x00, 0x00}}, {1, [3]byte{0x53, 0x00, 0x00}}, + {1, [3]byte{0x54, 0x00, 0x00}}, {1, [3]byte{0x55, 0x00, 0x00}}, + {1, [3]byte{0x56, 0x00, 0x00}}, {1, [3]byte{0x57, 0x00, 0x00}}, + {1, [3]byte{0x58, 0x00, 0x00}}, {1, [3]byte{0x59, 0x00, 0x00}}, + {1, [3]byte{0x5a, 0x00, 0x00}}, {1, [3]byte{0x5b, 0x00, 0x00}}, + {1, [3]byte{0x5c, 0x00, 0x00}}, {1, [3]byte{0x5d, 0x00, 0x00}}, + {1, [3]byte{0x5e, 0x00, 0x00}}, {1, [3]byte{0x5f, 0x00, 0x00}}, + {1, [3]byte{0x60, 0x00, 0x00}}, {1, [3]byte{0x61, 0x00, 0x00}}, + {1, [3]byte{0x62, 0x00, 0x00}}, {1, [3]byte{0x63, 0x00, 0x00}}, + {1, [3]byte{0x64, 0x00, 0x00}}, {1, [3]byte{0x65, 0x00, 0x00}}, + {1, [3]byte{0x66, 0x00, 0x00}}, {1, [3]byte{0x67, 0x00, 0x00}}, + {1, [3]byte{0x68, 0x00, 0x00}}, {1, [3]byte{0x69, 0x00, 0x00}}, + {1, [3]byte{0x6a, 0x00, 0x00}}, {1, [3]byte{0x6b, 0x00, 0x00}}, + {1, [3]byte{0x6c, 0x00, 0x00}}, {1, [3]byte{0x6d, 0x00, 0x00}}, + {1, [3]byte{0x6e, 0x00, 0x00}}, {1, [3]byte{0x6f, 0x00, 0x00}}, + {1, [3]byte{0x70, 0x00, 0x00}}, {1, [3]byte{0x71, 0x00, 0x00}}, + {1, [3]byte{0x72, 0x00, 0x00}}, {1, [3]byte{0x73, 0x00, 0x00}}, + {1, [3]byte{0x74, 0x00, 0x00}}, {1, [3]byte{0x75, 0x00, 0x00}}, + {1, [3]byte{0x76, 0x00, 0x00}}, {1, [3]byte{0x77, 0x00, 0x00}}, + {1, [3]byte{0x78, 0x00, 0x00}}, {1, [3]byte{0x79, 0x00, 0x00}}, + {1, [3]byte{0x7a, 0x00, 0x00}}, {1, [3]byte{0x7b, 0x00, 0x00}}, + {1, [3]byte{0x7c, 0x00, 0x00}}, {1, [3]byte{0x7d, 0x00, 0x00}}, + {1, [3]byte{0x7e, 0x00, 0x00}}, {1, [3]byte{0x7f, 0x00, 0x00}}, + {3, [3]byte{0xe2, 0x82, 0xac}}, {3, [3]byte{0xef, 0xbf, 0xbd}}, + {3, [3]byte{0xe2, 0x80, 0x9a}}, {2, [3]byte{0xc6, 0x92, 0x00}}, + {3, [3]byte{0xe2, 0x80, 0x9e}}, {3, [3]byte{0xe2, 0x80, 0xa6}}, + {3, [3]byte{0xe2, 0x80, 0xa0}}, {3, [3]byte{0xe2, 0x80, 0xa1}}, + {2, [3]byte{0xcb, 0x86, 0x00}}, {3, [3]byte{0xe2, 0x80, 0xb0}}, + {2, [3]byte{0xc5, 0xa0, 0x00}}, {3, [3]byte{0xe2, 0x80, 0xb9}}, + {2, [3]byte{0xc5, 0x92, 0x00}}, {3, [3]byte{0xef, 0xbf, 0xbd}}, + {2, [3]byte{0xc5, 0xbd, 0x00}}, {3, [3]byte{0xef, 0xbf, 0xbd}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {3, [3]byte{0xe2, 0x80, 0x98}}, + {3, [3]byte{0xe2, 0x80, 0x99}}, {3, [3]byte{0xe2, 0x80, 0x9c}}, + {3, [3]byte{0xe2, 0x80, 0x9d}}, {3, [3]byte{0xe2, 0x80, 0xa2}}, + {3, [3]byte{0xe2, 0x80, 0x93}}, {3, [3]byte{0xe2, 0x80, 0x94}}, + {2, [3]byte{0xcb, 0x9c, 0x00}}, {3, [3]byte{0xe2, 0x84, 0xa2}}, + {2, [3]byte{0xc5, 0xa1, 0x00}}, {3, [3]byte{0xe2, 0x80, 0xba}}, + {2, [3]byte{0xc5, 0x93, 0x00}}, {3, [3]byte{0xef, 0xbf, 0xbd}}, + {2, [3]byte{0xc5, 0xbe, 0x00}}, {2, [3]byte{0xc5, 0xb8, 0x00}}, + {2, [3]byte{0xc2, 0xa0, 0x00}}, {2, [3]byte{0xc2, 0xa1, 0x00}}, + {2, [3]byte{0xc2, 0xa2, 0x00}}, {2, [3]byte{0xc2, 0xa3, 0x00}}, + {2, [3]byte{0xc2, 0xa4, 0x00}}, {2, [3]byte{0xc2, 0xa5, 0x00}}, + {2, [3]byte{0xc2, 0xa6, 0x00}}, {2, [3]byte{0xc2, 0xa7, 0x00}}, + {2, [3]byte{0xc2, 0xa8, 0x00}}, {2, [3]byte{0xc2, 0xa9, 0x00}}, + {2, [3]byte{0xc2, 0xaa, 0x00}}, {2, [3]byte{0xc2, 0xab, 0x00}}, + {2, [3]byte{0xc2, 0xac, 0x00}}, {2, [3]byte{0xc2, 0xad, 0x00}}, + {2, [3]byte{0xc2, 0xae, 0x00}}, {2, [3]byte{0xc2, 0xaf, 0x00}}, + {2, [3]byte{0xc2, 0xb0, 0x00}}, {2, [3]byte{0xc2, 0xb1, 0x00}}, + {2, [3]byte{0xc2, 0xb2, 0x00}}, {2, [3]byte{0xc2, 0xb3, 0x00}}, + {2, [3]byte{0xc2, 0xb4, 0x00}}, {2, [3]byte{0xc2, 0xb5, 0x00}}, + {2, [3]byte{0xc2, 0xb6, 0x00}}, {2, [3]byte{0xc2, 0xb7, 0x00}}, + {2, [3]byte{0xc2, 0xb8, 0x00}}, {2, [3]byte{0xc2, 0xb9, 0x00}}, + {2, [3]byte{0xc2, 0xba, 0x00}}, {2, [3]byte{0xc2, 0xbb, 0x00}}, + {2, [3]byte{0xc2, 0xbc, 0x00}}, {2, [3]byte{0xc2, 0xbd, 0x00}}, + {2, [3]byte{0xc2, 0xbe, 0x00}}, {2, [3]byte{0xc2, 0xbf, 0x00}}, + {2, [3]byte{0xc3, 0x80, 0x00}}, {2, [3]byte{0xc3, 0x81, 0x00}}, + {2, [3]byte{0xc3, 0x82, 0x00}}, {2, [3]byte{0xc3, 0x83, 0x00}}, + {2, [3]byte{0xc3, 0x84, 0x00}}, {2, [3]byte{0xc3, 0x85, 0x00}}, + {2, [3]byte{0xc3, 0x86, 0x00}}, {2, [3]byte{0xc3, 0x87, 0x00}}, + {2, [3]byte{0xc3, 0x88, 0x00}}, {2, [3]byte{0xc3, 0x89, 0x00}}, + {2, [3]byte{0xc3, 0x8a, 0x00}}, {2, [3]byte{0xc3, 0x8b, 0x00}}, + {2, [3]byte{0xc3, 0x8c, 0x00}}, {2, [3]byte{0xc3, 0x8d, 0x00}}, + {2, [3]byte{0xc3, 0x8e, 0x00}}, {2, [3]byte{0xc3, 0x8f, 0x00}}, + {2, [3]byte{0xc3, 0x90, 0x00}}, {2, [3]byte{0xc3, 0x91, 0x00}}, + {2, [3]byte{0xc3, 0x92, 0x00}}, {2, [3]byte{0xc3, 0x93, 0x00}}, + {2, [3]byte{0xc3, 0x94, 0x00}}, {2, [3]byte{0xc3, 0x95, 0x00}}, + {2, [3]byte{0xc3, 0x96, 0x00}}, {2, [3]byte{0xc3, 0x97, 0x00}}, + {2, [3]byte{0xc3, 0x98, 0x00}}, {2, [3]byte{0xc3, 0x99, 0x00}}, + {2, [3]byte{0xc3, 0x9a, 0x00}}, {2, [3]byte{0xc3, 0x9b, 0x00}}, + {2, [3]byte{0xc3, 0x9c, 0x00}}, {2, [3]byte{0xc3, 0x9d, 0x00}}, + {2, [3]byte{0xc3, 0x9e, 0x00}}, {2, [3]byte{0xc3, 0x9f, 0x00}}, + {2, [3]byte{0xc3, 0xa0, 0x00}}, {2, [3]byte{0xc3, 0xa1, 0x00}}, + {2, [3]byte{0xc3, 0xa2, 0x00}}, {2, [3]byte{0xc3, 0xa3, 0x00}}, + {2, [3]byte{0xc3, 0xa4, 0x00}}, {2, [3]byte{0xc3, 0xa5, 0x00}}, + {2, [3]byte{0xc3, 0xa6, 0x00}}, {2, [3]byte{0xc3, 0xa7, 0x00}}, + {2, [3]byte{0xc3, 0xa8, 0x00}}, {2, [3]byte{0xc3, 0xa9, 0x00}}, + {2, [3]byte{0xc3, 0xaa, 0x00}}, {2, [3]byte{0xc3, 0xab, 0x00}}, + {2, [3]byte{0xc3, 0xac, 0x00}}, {2, [3]byte{0xc3, 0xad, 0x00}}, + {2, [3]byte{0xc3, 0xae, 0x00}}, {2, [3]byte{0xc3, 0xaf, 0x00}}, + {2, [3]byte{0xc3, 0xb0, 0x00}}, {2, [3]byte{0xc3, 0xb1, 0x00}}, + {2, [3]byte{0xc3, 0xb2, 0x00}}, {2, [3]byte{0xc3, 0xb3, 0x00}}, + {2, [3]byte{0xc3, 0xb4, 0x00}}, {2, [3]byte{0xc3, 0xb5, 0x00}}, + {2, [3]byte{0xc3, 0xb6, 0x00}}, {2, [3]byte{0xc3, 0xb7, 0x00}}, + {2, [3]byte{0xc3, 0xb8, 0x00}}, {2, [3]byte{0xc3, 0xb9, 0x00}}, + {2, [3]byte{0xc3, 0xba, 0x00}}, {2, [3]byte{0xc3, 0xbb, 0x00}}, + {2, [3]byte{0xc3, 0xbc, 0x00}}, {2, [3]byte{0xc3, 0xbd, 0x00}}, + {2, [3]byte{0xc3, 0xbe, 0x00}}, {2, [3]byte{0xc3, 0xbf, 0x00}}, + }, + encode: [256]uint32{ + 0x00000000, 0x01000001, 0x02000002, 0x03000003, 0x04000004, 0x05000005, 0x06000006, 0x07000007, + 0x08000008, 0x09000009, 0x0a00000a, 0x0b00000b, 0x0c00000c, 0x0d00000d, 0x0e00000e, 0x0f00000f, + 0x10000010, 0x11000011, 0x12000012, 0x13000013, 0x14000014, 0x15000015, 0x16000016, 0x17000017, + 0x18000018, 0x19000019, 0x1a00001a, 0x1b00001b, 0x1c00001c, 0x1d00001d, 0x1e00001e, 0x1f00001f, + 0x20000020, 0x21000021, 0x22000022, 0x23000023, 0x24000024, 0x25000025, 0x26000026, 0x27000027, + 0x28000028, 0x29000029, 0x2a00002a, 0x2b00002b, 0x2c00002c, 0x2d00002d, 0x2e00002e, 0x2f00002f, + 0x30000030, 0x31000031, 0x32000032, 0x33000033, 0x34000034, 0x35000035, 0x36000036, 0x37000037, + 0x38000038, 0x39000039, 0x3a00003a, 0x3b00003b, 0x3c00003c, 0x3d00003d, 0x3e00003e, 0x3f00003f, + 0x40000040, 0x41000041, 0x42000042, 0x43000043, 0x44000044, 0x45000045, 0x46000046, 0x47000047, + 0x48000048, 0x49000049, 0x4a00004a, 0x4b00004b, 0x4c00004c, 0x4d00004d, 0x4e00004e, 0x4f00004f, + 0x50000050, 0x51000051, 0x52000052, 0x53000053, 0x54000054, 0x55000055, 0x56000056, 0x57000057, + 0x58000058, 0x59000059, 0x5a00005a, 0x5b00005b, 0x5c00005c, 0x5d00005d, 0x5e00005e, 0x5f00005f, + 0x60000060, 0x61000061, 0x62000062, 0x63000063, 0x64000064, 0x65000065, 0x66000066, 0x67000067, + 0x68000068, 0x69000069, 0x6a00006a, 0x6b00006b, 0x6c00006c, 0x6d00006d, 0x6e00006e, 0x6f00006f, + 0x70000070, 0x71000071, 0x72000072, 0x73000073, 0x74000074, 0x75000075, 0x76000076, 0x77000077, + 0x78000078, 0x79000079, 0x7a00007a, 0x7b00007b, 0x7c00007c, 0x7d00007d, 0x7e00007e, 0x7f00007f, + 0xa00000a0, 0xa10000a1, 0xa20000a2, 0xa30000a3, 0xa40000a4, 0xa50000a5, 0xa60000a6, 0xa70000a7, + 0xa80000a8, 0xa90000a9, 0xaa0000aa, 0xab0000ab, 0xac0000ac, 0xad0000ad, 0xae0000ae, 0xaf0000af, + 0xb00000b0, 0xb10000b1, 0xb20000b2, 0xb30000b3, 0xb40000b4, 0xb50000b5, 0xb60000b6, 0xb70000b7, + 0xb80000b8, 0xb90000b9, 0xba0000ba, 0xbb0000bb, 0xbc0000bc, 0xbd0000bd, 0xbe0000be, 0xbf0000bf, + 0xc00000c0, 0xc10000c1, 0xc20000c2, 0xc30000c3, 0xc40000c4, 0xc50000c5, 0xc60000c6, 0xc70000c7, + 0xc80000c8, 0xc90000c9, 0xca0000ca, 0xcb0000cb, 0xcc0000cc, 0xcd0000cd, 0xce0000ce, 0xcf0000cf, + 0xd00000d0, 0xd10000d1, 0xd20000d2, 0xd30000d3, 0xd40000d4, 0xd50000d5, 0xd60000d6, 0xd70000d7, + 0xd80000d8, 0xd90000d9, 0xda0000da, 0xdb0000db, 0xdc0000dc, 0xdd0000dd, 0xde0000de, 0xdf0000df, + 0xe00000e0, 0xe10000e1, 0xe20000e2, 0xe30000e3, 0xe40000e4, 0xe50000e5, 0xe60000e6, 0xe70000e7, + 0xe80000e8, 0xe90000e9, 0xea0000ea, 0xeb0000eb, 0xec0000ec, 0xed0000ed, 0xee0000ee, 0xef0000ef, + 0xf00000f0, 0xf10000f1, 0xf20000f2, 0xf30000f3, 0xf40000f4, 0xf50000f5, 0xf60000f6, 0xf70000f7, + 0xf80000f8, 0xf90000f9, 0xfa0000fa, 0xfb0000fb, 0xfc0000fc, 0xfd0000fd, 0xfe0000fe, 0xff0000ff, + 0x8c000152, 0x9c000153, 0x8a000160, 0x9a000161, 0x9f000178, 0x8e00017d, 0x9e00017e, 0x83000192, + 0x880002c6, 0x980002dc, 0x96002013, 0x97002014, 0x91002018, 0x92002019, 0x8200201a, 0x9300201c, + 0x9400201d, 0x8400201e, 0x86002020, 0x87002021, 0x95002022, 0x85002026, 0x89002030, 0x8b002039, + 0x9b00203a, 0x800020ac, 0x99002122, 0x99002122, 0x99002122, 0x99002122, 0x99002122, 0x99002122, + }, +} + +// Windows1253 is the Windows 1253 encoding. +var Windows1253 *Charmap = &windows1253 + +var windows1253 = Charmap{ + name: "Windows 1253", + mib: identifier.Windows1253, + asciiSuperset: true, + low: 0x80, + replacement: 0x1a, + decode: [256]utf8Enc{ + {1, [3]byte{0x00, 0x00, 0x00}}, {1, [3]byte{0x01, 0x00, 0x00}}, + {1, [3]byte{0x02, 0x00, 0x00}}, {1, [3]byte{0x03, 0x00, 0x00}}, + {1, [3]byte{0x04, 0x00, 0x00}}, {1, [3]byte{0x05, 0x00, 0x00}}, + {1, [3]byte{0x06, 0x00, 0x00}}, {1, [3]byte{0x07, 0x00, 0x00}}, + {1, [3]byte{0x08, 0x00, 0x00}}, {1, [3]byte{0x09, 0x00, 0x00}}, + {1, [3]byte{0x0a, 0x00, 0x00}}, {1, [3]byte{0x0b, 0x00, 0x00}}, + {1, [3]byte{0x0c, 0x00, 0x00}}, {1, [3]byte{0x0d, 0x00, 0x00}}, + {1, [3]byte{0x0e, 0x00, 0x00}}, {1, [3]byte{0x0f, 0x00, 0x00}}, + {1, [3]byte{0x10, 0x00, 0x00}}, {1, [3]byte{0x11, 0x00, 0x00}}, + {1, [3]byte{0x12, 0x00, 0x00}}, {1, [3]byte{0x13, 0x00, 0x00}}, + {1, [3]byte{0x14, 0x00, 0x00}}, {1, [3]byte{0x15, 0x00, 0x00}}, + {1, [3]byte{0x16, 0x00, 0x00}}, {1, [3]byte{0x17, 0x00, 0x00}}, + {1, [3]byte{0x18, 0x00, 0x00}}, {1, [3]byte{0x19, 0x00, 0x00}}, + {1, [3]byte{0x1a, 0x00, 0x00}}, {1, [3]byte{0x1b, 0x00, 0x00}}, + {1, [3]byte{0x1c, 0x00, 0x00}}, {1, [3]byte{0x1d, 0x00, 0x00}}, + {1, [3]byte{0x1e, 0x00, 0x00}}, {1, [3]byte{0x1f, 0x00, 0x00}}, + {1, [3]byte{0x20, 0x00, 0x00}}, {1, [3]byte{0x21, 0x00, 0x00}}, + {1, [3]byte{0x22, 0x00, 0x00}}, {1, [3]byte{0x23, 0x00, 0x00}}, + {1, [3]byte{0x24, 0x00, 0x00}}, {1, [3]byte{0x25, 0x00, 0x00}}, + {1, [3]byte{0x26, 0x00, 0x00}}, {1, [3]byte{0x27, 0x00, 0x00}}, + {1, [3]byte{0x28, 0x00, 0x00}}, {1, [3]byte{0x29, 0x00, 0x00}}, + {1, [3]byte{0x2a, 0x00, 0x00}}, {1, [3]byte{0x2b, 0x00, 0x00}}, + {1, [3]byte{0x2c, 0x00, 0x00}}, {1, [3]byte{0x2d, 0x00, 0x00}}, + {1, [3]byte{0x2e, 0x00, 0x00}}, {1, [3]byte{0x2f, 0x00, 0x00}}, + {1, [3]byte{0x30, 0x00, 0x00}}, {1, [3]byte{0x31, 0x00, 0x00}}, + {1, [3]byte{0x32, 0x00, 0x00}}, {1, [3]byte{0x33, 0x00, 0x00}}, + {1, [3]byte{0x34, 0x00, 0x00}}, {1, [3]byte{0x35, 0x00, 0x00}}, + {1, [3]byte{0x36, 0x00, 0x00}}, {1, [3]byte{0x37, 0x00, 0x00}}, + {1, [3]byte{0x38, 0x00, 0x00}}, {1, [3]byte{0x39, 0x00, 0x00}}, + {1, [3]byte{0x3a, 0x00, 0x00}}, {1, [3]byte{0x3b, 0x00, 0x00}}, + {1, [3]byte{0x3c, 0x00, 0x00}}, {1, [3]byte{0x3d, 0x00, 0x00}}, + {1, [3]byte{0x3e, 0x00, 0x00}}, {1, [3]byte{0x3f, 0x00, 0x00}}, + {1, [3]byte{0x40, 0x00, 0x00}}, {1, [3]byte{0x41, 0x00, 0x00}}, + {1, [3]byte{0x42, 0x00, 0x00}}, {1, [3]byte{0x43, 0x00, 0x00}}, + {1, [3]byte{0x44, 0x00, 0x00}}, {1, [3]byte{0x45, 0x00, 0x00}}, + {1, [3]byte{0x46, 0x00, 0x00}}, {1, [3]byte{0x47, 0x00, 0x00}}, + {1, [3]byte{0x48, 0x00, 0x00}}, {1, [3]byte{0x49, 0x00, 0x00}}, + {1, [3]byte{0x4a, 0x00, 0x00}}, {1, [3]byte{0x4b, 0x00, 0x00}}, + {1, [3]byte{0x4c, 0x00, 0x00}}, {1, [3]byte{0x4d, 0x00, 0x00}}, + {1, [3]byte{0x4e, 0x00, 0x00}}, {1, [3]byte{0x4f, 0x00, 0x00}}, + {1, [3]byte{0x50, 0x00, 0x00}}, {1, [3]byte{0x51, 0x00, 0x00}}, + {1, [3]byte{0x52, 0x00, 0x00}}, {1, [3]byte{0x53, 0x00, 0x00}}, + {1, [3]byte{0x54, 0x00, 0x00}}, {1, [3]byte{0x55, 0x00, 0x00}}, + {1, [3]byte{0x56, 0x00, 0x00}}, {1, [3]byte{0x57, 0x00, 0x00}}, + {1, [3]byte{0x58, 0x00, 0x00}}, {1, [3]byte{0x59, 0x00, 0x00}}, + {1, [3]byte{0x5a, 0x00, 0x00}}, {1, [3]byte{0x5b, 0x00, 0x00}}, + {1, [3]byte{0x5c, 0x00, 0x00}}, {1, [3]byte{0x5d, 0x00, 0x00}}, + {1, [3]byte{0x5e, 0x00, 0x00}}, {1, [3]byte{0x5f, 0x00, 0x00}}, + {1, [3]byte{0x60, 0x00, 0x00}}, {1, [3]byte{0x61, 0x00, 0x00}}, + {1, [3]byte{0x62, 0x00, 0x00}}, {1, [3]byte{0x63, 0x00, 0x00}}, + {1, [3]byte{0x64, 0x00, 0x00}}, {1, [3]byte{0x65, 0x00, 0x00}}, + {1, [3]byte{0x66, 0x00, 0x00}}, {1, [3]byte{0x67, 0x00, 0x00}}, + {1, [3]byte{0x68, 0x00, 0x00}}, {1, [3]byte{0x69, 0x00, 0x00}}, + {1, [3]byte{0x6a, 0x00, 0x00}}, {1, [3]byte{0x6b, 0x00, 0x00}}, + {1, [3]byte{0x6c, 0x00, 0x00}}, {1, [3]byte{0x6d, 0x00, 0x00}}, + {1, [3]byte{0x6e, 0x00, 0x00}}, {1, [3]byte{0x6f, 0x00, 0x00}}, + {1, [3]byte{0x70, 0x00, 0x00}}, {1, [3]byte{0x71, 0x00, 0x00}}, + {1, [3]byte{0x72, 0x00, 0x00}}, {1, [3]byte{0x73, 0x00, 0x00}}, + {1, [3]byte{0x74, 0x00, 0x00}}, {1, [3]byte{0x75, 0x00, 0x00}}, + {1, [3]byte{0x76, 0x00, 0x00}}, {1, [3]byte{0x77, 0x00, 0x00}}, + {1, [3]byte{0x78, 0x00, 0x00}}, {1, [3]byte{0x79, 0x00, 0x00}}, + {1, [3]byte{0x7a, 0x00, 0x00}}, {1, [3]byte{0x7b, 0x00, 0x00}}, + {1, [3]byte{0x7c, 0x00, 0x00}}, {1, [3]byte{0x7d, 0x00, 0x00}}, + {1, [3]byte{0x7e, 0x00, 0x00}}, {1, [3]byte{0x7f, 0x00, 0x00}}, + {3, [3]byte{0xe2, 0x82, 0xac}}, {3, [3]byte{0xef, 0xbf, 0xbd}}, + {3, [3]byte{0xe2, 0x80, 0x9a}}, {2, [3]byte{0xc6, 0x92, 0x00}}, + {3, [3]byte{0xe2, 0x80, 0x9e}}, {3, [3]byte{0xe2, 0x80, 0xa6}}, + {3, [3]byte{0xe2, 0x80, 0xa0}}, {3, [3]byte{0xe2, 0x80, 0xa1}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {3, [3]byte{0xe2, 0x80, 0xb0}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {3, [3]byte{0xe2, 0x80, 0xb9}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {3, [3]byte{0xef, 0xbf, 0xbd}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {3, [3]byte{0xef, 0xbf, 0xbd}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {3, [3]byte{0xe2, 0x80, 0x98}}, + {3, [3]byte{0xe2, 0x80, 0x99}}, {3, [3]byte{0xe2, 0x80, 0x9c}}, + {3, [3]byte{0xe2, 0x80, 0x9d}}, {3, [3]byte{0xe2, 0x80, 0xa2}}, + {3, [3]byte{0xe2, 0x80, 0x93}}, {3, [3]byte{0xe2, 0x80, 0x94}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {3, [3]byte{0xe2, 0x84, 0xa2}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {3, [3]byte{0xe2, 0x80, 0xba}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {3, [3]byte{0xef, 0xbf, 0xbd}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {3, [3]byte{0xef, 0xbf, 0xbd}}, + {2, [3]byte{0xc2, 0xa0, 0x00}}, {2, [3]byte{0xce, 0x85, 0x00}}, + {2, [3]byte{0xce, 0x86, 0x00}}, {2, [3]byte{0xc2, 0xa3, 0x00}}, + {2, [3]byte{0xc2, 0xa4, 0x00}}, {2, [3]byte{0xc2, 0xa5, 0x00}}, + {2, [3]byte{0xc2, 0xa6, 0x00}}, {2, [3]byte{0xc2, 0xa7, 0x00}}, + {2, [3]byte{0xc2, 0xa8, 0x00}}, {2, [3]byte{0xc2, 0xa9, 0x00}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {2, [3]byte{0xc2, 0xab, 0x00}}, + {2, [3]byte{0xc2, 0xac, 0x00}}, {2, [3]byte{0xc2, 0xad, 0x00}}, + {2, [3]byte{0xc2, 0xae, 0x00}}, {3, [3]byte{0xe2, 0x80, 0x95}}, + {2, [3]byte{0xc2, 0xb0, 0x00}}, {2, [3]byte{0xc2, 0xb1, 0x00}}, + {2, [3]byte{0xc2, 0xb2, 0x00}}, {2, [3]byte{0xc2, 0xb3, 0x00}}, + {2, [3]byte{0xce, 0x84, 0x00}}, {2, [3]byte{0xc2, 0xb5, 0x00}}, + {2, [3]byte{0xc2, 0xb6, 0x00}}, {2, [3]byte{0xc2, 0xb7, 0x00}}, + {2, [3]byte{0xce, 0x88, 0x00}}, {2, [3]byte{0xce, 0x89, 0x00}}, + {2, [3]byte{0xce, 0x8a, 0x00}}, {2, [3]byte{0xc2, 0xbb, 0x00}}, + {2, [3]byte{0xce, 0x8c, 0x00}}, {2, [3]byte{0xc2, 0xbd, 0x00}}, + {2, [3]byte{0xce, 0x8e, 0x00}}, {2, [3]byte{0xce, 0x8f, 0x00}}, + {2, [3]byte{0xce, 0x90, 0x00}}, {2, [3]byte{0xce, 0x91, 0x00}}, + {2, [3]byte{0xce, 0x92, 0x00}}, {2, [3]byte{0xce, 0x93, 0x00}}, + {2, [3]byte{0xce, 0x94, 0x00}}, {2, [3]byte{0xce, 0x95, 0x00}}, + {2, [3]byte{0xce, 0x96, 0x00}}, {2, [3]byte{0xce, 0x97, 0x00}}, + {2, [3]byte{0xce, 0x98, 0x00}}, {2, [3]byte{0xce, 0x99, 0x00}}, + {2, [3]byte{0xce, 0x9a, 0x00}}, {2, [3]byte{0xce, 0x9b, 0x00}}, + {2, [3]byte{0xce, 0x9c, 0x00}}, {2, [3]byte{0xce, 0x9d, 0x00}}, + {2, [3]byte{0xce, 0x9e, 0x00}}, {2, [3]byte{0xce, 0x9f, 0x00}}, + {2, [3]byte{0xce, 0xa0, 0x00}}, {2, [3]byte{0xce, 0xa1, 0x00}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {2, [3]byte{0xce, 0xa3, 0x00}}, + {2, [3]byte{0xce, 0xa4, 0x00}}, {2, [3]byte{0xce, 0xa5, 0x00}}, + {2, [3]byte{0xce, 0xa6, 0x00}}, {2, [3]byte{0xce, 0xa7, 0x00}}, + {2, [3]byte{0xce, 0xa8, 0x00}}, {2, [3]byte{0xce, 0xa9, 0x00}}, + {2, [3]byte{0xce, 0xaa, 0x00}}, {2, [3]byte{0xce, 0xab, 0x00}}, + {2, [3]byte{0xce, 0xac, 0x00}}, {2, [3]byte{0xce, 0xad, 0x00}}, + {2, [3]byte{0xce, 0xae, 0x00}}, {2, [3]byte{0xce, 0xaf, 0x00}}, + {2, [3]byte{0xce, 0xb0, 0x00}}, {2, [3]byte{0xce, 0xb1, 0x00}}, + {2, [3]byte{0xce, 0xb2, 0x00}}, {2, [3]byte{0xce, 0xb3, 0x00}}, + {2, [3]byte{0xce, 0xb4, 0x00}}, {2, [3]byte{0xce, 0xb5, 0x00}}, + {2, [3]byte{0xce, 0xb6, 0x00}}, {2, [3]byte{0xce, 0xb7, 0x00}}, + {2, [3]byte{0xce, 0xb8, 0x00}}, {2, [3]byte{0xce, 0xb9, 0x00}}, + {2, [3]byte{0xce, 0xba, 0x00}}, {2, [3]byte{0xce, 0xbb, 0x00}}, + {2, [3]byte{0xce, 0xbc, 0x00}}, {2, [3]byte{0xce, 0xbd, 0x00}}, + {2, [3]byte{0xce, 0xbe, 0x00}}, {2, [3]byte{0xce, 0xbf, 0x00}}, + {2, [3]byte{0xcf, 0x80, 0x00}}, {2, [3]byte{0xcf, 0x81, 0x00}}, + {2, [3]byte{0xcf, 0x82, 0x00}}, {2, [3]byte{0xcf, 0x83, 0x00}}, + {2, [3]byte{0xcf, 0x84, 0x00}}, {2, [3]byte{0xcf, 0x85, 0x00}}, + {2, [3]byte{0xcf, 0x86, 0x00}}, {2, [3]byte{0xcf, 0x87, 0x00}}, + {2, [3]byte{0xcf, 0x88, 0x00}}, {2, [3]byte{0xcf, 0x89, 0x00}}, + {2, [3]byte{0xcf, 0x8a, 0x00}}, {2, [3]byte{0xcf, 0x8b, 0x00}}, + {2, [3]byte{0xcf, 0x8c, 0x00}}, {2, [3]byte{0xcf, 0x8d, 0x00}}, + {2, [3]byte{0xcf, 0x8e, 0x00}}, {3, [3]byte{0xef, 0xbf, 0xbd}}, + }, + encode: [256]uint32{ + 0x00000000, 0x01000001, 0x02000002, 0x03000003, 0x04000004, 0x05000005, 0x06000006, 0x07000007, + 0x08000008, 0x09000009, 0x0a00000a, 0x0b00000b, 0x0c00000c, 0x0d00000d, 0x0e00000e, 0x0f00000f, + 0x10000010, 0x11000011, 0x12000012, 0x13000013, 0x14000014, 0x15000015, 0x16000016, 0x17000017, + 0x18000018, 0x19000019, 0x1a00001a, 0x1b00001b, 0x1c00001c, 0x1d00001d, 0x1e00001e, 0x1f00001f, + 0x20000020, 0x21000021, 0x22000022, 0x23000023, 0x24000024, 0x25000025, 0x26000026, 0x27000027, + 0x28000028, 0x29000029, 0x2a00002a, 0x2b00002b, 0x2c00002c, 0x2d00002d, 0x2e00002e, 0x2f00002f, + 0x30000030, 0x31000031, 0x32000032, 0x33000033, 0x34000034, 0x35000035, 0x36000036, 0x37000037, + 0x38000038, 0x39000039, 0x3a00003a, 0x3b00003b, 0x3c00003c, 0x3d00003d, 0x3e00003e, 0x3f00003f, + 0x40000040, 0x41000041, 0x42000042, 0x43000043, 0x44000044, 0x45000045, 0x46000046, 0x47000047, + 0x48000048, 0x49000049, 0x4a00004a, 0x4b00004b, 0x4c00004c, 0x4d00004d, 0x4e00004e, 0x4f00004f, + 0x50000050, 0x51000051, 0x52000052, 0x53000053, 0x54000054, 0x55000055, 0x56000056, 0x57000057, + 0x58000058, 0x59000059, 0x5a00005a, 0x5b00005b, 0x5c00005c, 0x5d00005d, 0x5e00005e, 0x5f00005f, + 0x60000060, 0x61000061, 0x62000062, 0x63000063, 0x64000064, 0x65000065, 0x66000066, 0x67000067, + 0x68000068, 0x69000069, 0x6a00006a, 0x6b00006b, 0x6c00006c, 0x6d00006d, 0x6e00006e, 0x6f00006f, + 0x70000070, 0x71000071, 0x72000072, 0x73000073, 0x74000074, 0x75000075, 0x76000076, 0x77000077, + 0x78000078, 0x79000079, 0x7a00007a, 0x7b00007b, 0x7c00007c, 0x7d00007d, 0x7e00007e, 0x7f00007f, + 0xa00000a0, 0xa30000a3, 0xa40000a4, 0xa50000a5, 0xa60000a6, 0xa70000a7, 0xa80000a8, 0xa90000a9, + 0xab0000ab, 0xac0000ac, 0xad0000ad, 0xae0000ae, 0xb00000b0, 0xb10000b1, 0xb20000b2, 0xb30000b3, + 0xb50000b5, 0xb60000b6, 0xb70000b7, 0xbb0000bb, 0xbd0000bd, 0x83000192, 0xb4000384, 0xa1000385, + 0xa2000386, 0xb8000388, 0xb9000389, 0xba00038a, 0xbc00038c, 0xbe00038e, 0xbf00038f, 0xc0000390, + 0xc1000391, 0xc2000392, 0xc3000393, 0xc4000394, 0xc5000395, 0xc6000396, 0xc7000397, 0xc8000398, + 0xc9000399, 0xca00039a, 0xcb00039b, 0xcc00039c, 0xcd00039d, 0xce00039e, 0xcf00039f, 0xd00003a0, + 0xd10003a1, 0xd30003a3, 0xd40003a4, 0xd50003a5, 0xd60003a6, 0xd70003a7, 0xd80003a8, 0xd90003a9, + 0xda0003aa, 0xdb0003ab, 0xdc0003ac, 0xdd0003ad, 0xde0003ae, 0xdf0003af, 0xe00003b0, 0xe10003b1, + 0xe20003b2, 0xe30003b3, 0xe40003b4, 0xe50003b5, 0xe60003b6, 0xe70003b7, 0xe80003b8, 0xe90003b9, + 0xea0003ba, 0xeb0003bb, 0xec0003bc, 0xed0003bd, 0xee0003be, 0xef0003bf, 0xf00003c0, 0xf10003c1, + 0xf20003c2, 0xf30003c3, 0xf40003c4, 0xf50003c5, 0xf60003c6, 0xf70003c7, 0xf80003c8, 0xf90003c9, + 0xfa0003ca, 0xfb0003cb, 0xfc0003cc, 0xfd0003cd, 0xfe0003ce, 0x96002013, 0x97002014, 0xaf002015, + 0x91002018, 0x92002019, 0x8200201a, 0x9300201c, 0x9400201d, 0x8400201e, 0x86002020, 0x87002021, + 0x95002022, 0x85002026, 0x89002030, 0x8b002039, 0x9b00203a, 0x800020ac, 0x99002122, 0x99002122, + 0x99002122, 0x99002122, 0x99002122, 0x99002122, 0x99002122, 0x99002122, 0x99002122, 0x99002122, + 0x99002122, 0x99002122, 0x99002122, 0x99002122, 0x99002122, 0x99002122, 0x99002122, 0x99002122, + }, +} + +// Windows1254 is the Windows 1254 encoding. +var Windows1254 *Charmap = &windows1254 + +var windows1254 = Charmap{ + name: "Windows 1254", + mib: identifier.Windows1254, + asciiSuperset: true, + low: 0x80, + replacement: 0x1a, + decode: [256]utf8Enc{ + {1, [3]byte{0x00, 0x00, 0x00}}, {1, [3]byte{0x01, 0x00, 0x00}}, + {1, [3]byte{0x02, 0x00, 0x00}}, {1, [3]byte{0x03, 0x00, 0x00}}, + {1, [3]byte{0x04, 0x00, 0x00}}, {1, [3]byte{0x05, 0x00, 0x00}}, + {1, [3]byte{0x06, 0x00, 0x00}}, {1, [3]byte{0x07, 0x00, 0x00}}, + {1, [3]byte{0x08, 0x00, 0x00}}, {1, [3]byte{0x09, 0x00, 0x00}}, + {1, [3]byte{0x0a, 0x00, 0x00}}, {1, [3]byte{0x0b, 0x00, 0x00}}, + {1, [3]byte{0x0c, 0x00, 0x00}}, {1, [3]byte{0x0d, 0x00, 0x00}}, + {1, [3]byte{0x0e, 0x00, 0x00}}, {1, [3]byte{0x0f, 0x00, 0x00}}, + {1, [3]byte{0x10, 0x00, 0x00}}, {1, [3]byte{0x11, 0x00, 0x00}}, + {1, [3]byte{0x12, 0x00, 0x00}}, {1, [3]byte{0x13, 0x00, 0x00}}, + {1, [3]byte{0x14, 0x00, 0x00}}, {1, [3]byte{0x15, 0x00, 0x00}}, + {1, [3]byte{0x16, 0x00, 0x00}}, {1, [3]byte{0x17, 0x00, 0x00}}, + {1, [3]byte{0x18, 0x00, 0x00}}, {1, [3]byte{0x19, 0x00, 0x00}}, + {1, [3]byte{0x1a, 0x00, 0x00}}, {1, [3]byte{0x1b, 0x00, 0x00}}, + {1, [3]byte{0x1c, 0x00, 0x00}}, {1, [3]byte{0x1d, 0x00, 0x00}}, + {1, [3]byte{0x1e, 0x00, 0x00}}, {1, [3]byte{0x1f, 0x00, 0x00}}, + {1, [3]byte{0x20, 0x00, 0x00}}, {1, [3]byte{0x21, 0x00, 0x00}}, + {1, [3]byte{0x22, 0x00, 0x00}}, {1, [3]byte{0x23, 0x00, 0x00}}, + {1, [3]byte{0x24, 0x00, 0x00}}, {1, [3]byte{0x25, 0x00, 0x00}}, + {1, [3]byte{0x26, 0x00, 0x00}}, {1, [3]byte{0x27, 0x00, 0x00}}, + {1, [3]byte{0x28, 0x00, 0x00}}, {1, [3]byte{0x29, 0x00, 0x00}}, + {1, [3]byte{0x2a, 0x00, 0x00}}, {1, [3]byte{0x2b, 0x00, 0x00}}, + {1, [3]byte{0x2c, 0x00, 0x00}}, {1, [3]byte{0x2d, 0x00, 0x00}}, + {1, [3]byte{0x2e, 0x00, 0x00}}, {1, [3]byte{0x2f, 0x00, 0x00}}, + {1, [3]byte{0x30, 0x00, 0x00}}, {1, [3]byte{0x31, 0x00, 0x00}}, + {1, [3]byte{0x32, 0x00, 0x00}}, {1, [3]byte{0x33, 0x00, 0x00}}, + {1, [3]byte{0x34, 0x00, 0x00}}, {1, [3]byte{0x35, 0x00, 0x00}}, + {1, [3]byte{0x36, 0x00, 0x00}}, {1, [3]byte{0x37, 0x00, 0x00}}, + {1, [3]byte{0x38, 0x00, 0x00}}, {1, [3]byte{0x39, 0x00, 0x00}}, + {1, [3]byte{0x3a, 0x00, 0x00}}, {1, [3]byte{0x3b, 0x00, 0x00}}, + {1, [3]byte{0x3c, 0x00, 0x00}}, {1, [3]byte{0x3d, 0x00, 0x00}}, + {1, [3]byte{0x3e, 0x00, 0x00}}, {1, [3]byte{0x3f, 0x00, 0x00}}, + {1, [3]byte{0x40, 0x00, 0x00}}, {1, [3]byte{0x41, 0x00, 0x00}}, + {1, [3]byte{0x42, 0x00, 0x00}}, {1, [3]byte{0x43, 0x00, 0x00}}, + {1, [3]byte{0x44, 0x00, 0x00}}, {1, [3]byte{0x45, 0x00, 0x00}}, + {1, [3]byte{0x46, 0x00, 0x00}}, {1, [3]byte{0x47, 0x00, 0x00}}, + {1, [3]byte{0x48, 0x00, 0x00}}, {1, [3]byte{0x49, 0x00, 0x00}}, + {1, [3]byte{0x4a, 0x00, 0x00}}, {1, [3]byte{0x4b, 0x00, 0x00}}, + {1, [3]byte{0x4c, 0x00, 0x00}}, {1, [3]byte{0x4d, 0x00, 0x00}}, + {1, [3]byte{0x4e, 0x00, 0x00}}, {1, [3]byte{0x4f, 0x00, 0x00}}, + {1, [3]byte{0x50, 0x00, 0x00}}, {1, [3]byte{0x51, 0x00, 0x00}}, + {1, [3]byte{0x52, 0x00, 0x00}}, {1, [3]byte{0x53, 0x00, 0x00}}, + {1, [3]byte{0x54, 0x00, 0x00}}, {1, [3]byte{0x55, 0x00, 0x00}}, + {1, [3]byte{0x56, 0x00, 0x00}}, {1, [3]byte{0x57, 0x00, 0x00}}, + {1, [3]byte{0x58, 0x00, 0x00}}, {1, [3]byte{0x59, 0x00, 0x00}}, + {1, [3]byte{0x5a, 0x00, 0x00}}, {1, [3]byte{0x5b, 0x00, 0x00}}, + {1, [3]byte{0x5c, 0x00, 0x00}}, {1, [3]byte{0x5d, 0x00, 0x00}}, + {1, [3]byte{0x5e, 0x00, 0x00}}, {1, [3]byte{0x5f, 0x00, 0x00}}, + {1, [3]byte{0x60, 0x00, 0x00}}, {1, [3]byte{0x61, 0x00, 0x00}}, + {1, [3]byte{0x62, 0x00, 0x00}}, {1, [3]byte{0x63, 0x00, 0x00}}, + {1, [3]byte{0x64, 0x00, 0x00}}, {1, [3]byte{0x65, 0x00, 0x00}}, + {1, [3]byte{0x66, 0x00, 0x00}}, {1, [3]byte{0x67, 0x00, 0x00}}, + {1, [3]byte{0x68, 0x00, 0x00}}, {1, [3]byte{0x69, 0x00, 0x00}}, + {1, [3]byte{0x6a, 0x00, 0x00}}, {1, [3]byte{0x6b, 0x00, 0x00}}, + {1, [3]byte{0x6c, 0x00, 0x00}}, {1, [3]byte{0x6d, 0x00, 0x00}}, + {1, [3]byte{0x6e, 0x00, 0x00}}, {1, [3]byte{0x6f, 0x00, 0x00}}, + {1, [3]byte{0x70, 0x00, 0x00}}, {1, [3]byte{0x71, 0x00, 0x00}}, + {1, [3]byte{0x72, 0x00, 0x00}}, {1, [3]byte{0x73, 0x00, 0x00}}, + {1, [3]byte{0x74, 0x00, 0x00}}, {1, [3]byte{0x75, 0x00, 0x00}}, + {1, [3]byte{0x76, 0x00, 0x00}}, {1, [3]byte{0x77, 0x00, 0x00}}, + {1, [3]byte{0x78, 0x00, 0x00}}, {1, [3]byte{0x79, 0x00, 0x00}}, + {1, [3]byte{0x7a, 0x00, 0x00}}, {1, [3]byte{0x7b, 0x00, 0x00}}, + {1, [3]byte{0x7c, 0x00, 0x00}}, {1, [3]byte{0x7d, 0x00, 0x00}}, + {1, [3]byte{0x7e, 0x00, 0x00}}, {1, [3]byte{0x7f, 0x00, 0x00}}, + {3, [3]byte{0xe2, 0x82, 0xac}}, {3, [3]byte{0xef, 0xbf, 0xbd}}, + {3, [3]byte{0xe2, 0x80, 0x9a}}, {2, [3]byte{0xc6, 0x92, 0x00}}, + {3, [3]byte{0xe2, 0x80, 0x9e}}, {3, [3]byte{0xe2, 0x80, 0xa6}}, + {3, [3]byte{0xe2, 0x80, 0xa0}}, {3, [3]byte{0xe2, 0x80, 0xa1}}, + {2, [3]byte{0xcb, 0x86, 0x00}}, {3, [3]byte{0xe2, 0x80, 0xb0}}, + {2, [3]byte{0xc5, 0xa0, 0x00}}, {3, [3]byte{0xe2, 0x80, 0xb9}}, + {2, [3]byte{0xc5, 0x92, 0x00}}, {3, [3]byte{0xef, 0xbf, 0xbd}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {3, [3]byte{0xef, 0xbf, 0xbd}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {3, [3]byte{0xe2, 0x80, 0x98}}, + {3, [3]byte{0xe2, 0x80, 0x99}}, {3, [3]byte{0xe2, 0x80, 0x9c}}, + {3, [3]byte{0xe2, 0x80, 0x9d}}, {3, [3]byte{0xe2, 0x80, 0xa2}}, + {3, [3]byte{0xe2, 0x80, 0x93}}, {3, [3]byte{0xe2, 0x80, 0x94}}, + {2, [3]byte{0xcb, 0x9c, 0x00}}, {3, [3]byte{0xe2, 0x84, 0xa2}}, + {2, [3]byte{0xc5, 0xa1, 0x00}}, {3, [3]byte{0xe2, 0x80, 0xba}}, + {2, [3]byte{0xc5, 0x93, 0x00}}, {3, [3]byte{0xef, 0xbf, 0xbd}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {2, [3]byte{0xc5, 0xb8, 0x00}}, + {2, [3]byte{0xc2, 0xa0, 0x00}}, {2, [3]byte{0xc2, 0xa1, 0x00}}, + {2, [3]byte{0xc2, 0xa2, 0x00}}, {2, [3]byte{0xc2, 0xa3, 0x00}}, + {2, [3]byte{0xc2, 0xa4, 0x00}}, {2, [3]byte{0xc2, 0xa5, 0x00}}, + {2, [3]byte{0xc2, 0xa6, 0x00}}, {2, [3]byte{0xc2, 0xa7, 0x00}}, + {2, [3]byte{0xc2, 0xa8, 0x00}}, {2, [3]byte{0xc2, 0xa9, 0x00}}, + {2, [3]byte{0xc2, 0xaa, 0x00}}, {2, [3]byte{0xc2, 0xab, 0x00}}, + {2, [3]byte{0xc2, 0xac, 0x00}}, {2, [3]byte{0xc2, 0xad, 0x00}}, + {2, [3]byte{0xc2, 0xae, 0x00}}, {2, [3]byte{0xc2, 0xaf, 0x00}}, + {2, [3]byte{0xc2, 0xb0, 0x00}}, {2, [3]byte{0xc2, 0xb1, 0x00}}, + {2, [3]byte{0xc2, 0xb2, 0x00}}, {2, [3]byte{0xc2, 0xb3, 0x00}}, + {2, [3]byte{0xc2, 0xb4, 0x00}}, {2, [3]byte{0xc2, 0xb5, 0x00}}, + {2, [3]byte{0xc2, 0xb6, 0x00}}, {2, [3]byte{0xc2, 0xb7, 0x00}}, + {2, [3]byte{0xc2, 0xb8, 0x00}}, {2, [3]byte{0xc2, 0xb9, 0x00}}, + {2, [3]byte{0xc2, 0xba, 0x00}}, {2, [3]byte{0xc2, 0xbb, 0x00}}, + {2, [3]byte{0xc2, 0xbc, 0x00}}, {2, [3]byte{0xc2, 0xbd, 0x00}}, + {2, [3]byte{0xc2, 0xbe, 0x00}}, {2, [3]byte{0xc2, 0xbf, 0x00}}, + {2, [3]byte{0xc3, 0x80, 0x00}}, {2, [3]byte{0xc3, 0x81, 0x00}}, + {2, [3]byte{0xc3, 0x82, 0x00}}, {2, [3]byte{0xc3, 0x83, 0x00}}, + {2, [3]byte{0xc3, 0x84, 0x00}}, {2, [3]byte{0xc3, 0x85, 0x00}}, + {2, [3]byte{0xc3, 0x86, 0x00}}, {2, [3]byte{0xc3, 0x87, 0x00}}, + {2, [3]byte{0xc3, 0x88, 0x00}}, {2, [3]byte{0xc3, 0x89, 0x00}}, + {2, [3]byte{0xc3, 0x8a, 0x00}}, {2, [3]byte{0xc3, 0x8b, 0x00}}, + {2, [3]byte{0xc3, 0x8c, 0x00}}, {2, [3]byte{0xc3, 0x8d, 0x00}}, + {2, [3]byte{0xc3, 0x8e, 0x00}}, {2, [3]byte{0xc3, 0x8f, 0x00}}, + {2, [3]byte{0xc4, 0x9e, 0x00}}, {2, [3]byte{0xc3, 0x91, 0x00}}, + {2, [3]byte{0xc3, 0x92, 0x00}}, {2, [3]byte{0xc3, 0x93, 0x00}}, + {2, [3]byte{0xc3, 0x94, 0x00}}, {2, [3]byte{0xc3, 0x95, 0x00}}, + {2, [3]byte{0xc3, 0x96, 0x00}}, {2, [3]byte{0xc3, 0x97, 0x00}}, + {2, [3]byte{0xc3, 0x98, 0x00}}, {2, [3]byte{0xc3, 0x99, 0x00}}, + {2, [3]byte{0xc3, 0x9a, 0x00}}, {2, [3]byte{0xc3, 0x9b, 0x00}}, + {2, [3]byte{0xc3, 0x9c, 0x00}}, {2, [3]byte{0xc4, 0xb0, 0x00}}, + {2, [3]byte{0xc5, 0x9e, 0x00}}, {2, [3]byte{0xc3, 0x9f, 0x00}}, + {2, [3]byte{0xc3, 0xa0, 0x00}}, {2, [3]byte{0xc3, 0xa1, 0x00}}, + {2, [3]byte{0xc3, 0xa2, 0x00}}, {2, [3]byte{0xc3, 0xa3, 0x00}}, + {2, [3]byte{0xc3, 0xa4, 0x00}}, {2, [3]byte{0xc3, 0xa5, 0x00}}, + {2, [3]byte{0xc3, 0xa6, 0x00}}, {2, [3]byte{0xc3, 0xa7, 0x00}}, + {2, [3]byte{0xc3, 0xa8, 0x00}}, {2, [3]byte{0xc3, 0xa9, 0x00}}, + {2, [3]byte{0xc3, 0xaa, 0x00}}, {2, [3]byte{0xc3, 0xab, 0x00}}, + {2, [3]byte{0xc3, 0xac, 0x00}}, {2, [3]byte{0xc3, 0xad, 0x00}}, + {2, [3]byte{0xc3, 0xae, 0x00}}, {2, [3]byte{0xc3, 0xaf, 0x00}}, + {2, [3]byte{0xc4, 0x9f, 0x00}}, {2, [3]byte{0xc3, 0xb1, 0x00}}, + {2, [3]byte{0xc3, 0xb2, 0x00}}, {2, [3]byte{0xc3, 0xb3, 0x00}}, + {2, [3]byte{0xc3, 0xb4, 0x00}}, {2, [3]byte{0xc3, 0xb5, 0x00}}, + {2, [3]byte{0xc3, 0xb6, 0x00}}, {2, [3]byte{0xc3, 0xb7, 0x00}}, + {2, [3]byte{0xc3, 0xb8, 0x00}}, {2, [3]byte{0xc3, 0xb9, 0x00}}, + {2, [3]byte{0xc3, 0xba, 0x00}}, {2, [3]byte{0xc3, 0xbb, 0x00}}, + {2, [3]byte{0xc3, 0xbc, 0x00}}, {2, [3]byte{0xc4, 0xb1, 0x00}}, + {2, [3]byte{0xc5, 0x9f, 0x00}}, {2, [3]byte{0xc3, 0xbf, 0x00}}, + }, + encode: [256]uint32{ + 0x00000000, 0x01000001, 0x02000002, 0x03000003, 0x04000004, 0x05000005, 0x06000006, 0x07000007, + 0x08000008, 0x09000009, 0x0a00000a, 0x0b00000b, 0x0c00000c, 0x0d00000d, 0x0e00000e, 0x0f00000f, + 0x10000010, 0x11000011, 0x12000012, 0x13000013, 0x14000014, 0x15000015, 0x16000016, 0x17000017, + 0x18000018, 0x19000019, 0x1a00001a, 0x1b00001b, 0x1c00001c, 0x1d00001d, 0x1e00001e, 0x1f00001f, + 0x20000020, 0x21000021, 0x22000022, 0x23000023, 0x24000024, 0x25000025, 0x26000026, 0x27000027, + 0x28000028, 0x29000029, 0x2a00002a, 0x2b00002b, 0x2c00002c, 0x2d00002d, 0x2e00002e, 0x2f00002f, + 0x30000030, 0x31000031, 0x32000032, 0x33000033, 0x34000034, 0x35000035, 0x36000036, 0x37000037, + 0x38000038, 0x39000039, 0x3a00003a, 0x3b00003b, 0x3c00003c, 0x3d00003d, 0x3e00003e, 0x3f00003f, + 0x40000040, 0x41000041, 0x42000042, 0x43000043, 0x44000044, 0x45000045, 0x46000046, 0x47000047, + 0x48000048, 0x49000049, 0x4a00004a, 0x4b00004b, 0x4c00004c, 0x4d00004d, 0x4e00004e, 0x4f00004f, + 0x50000050, 0x51000051, 0x52000052, 0x53000053, 0x54000054, 0x55000055, 0x56000056, 0x57000057, + 0x58000058, 0x59000059, 0x5a00005a, 0x5b00005b, 0x5c00005c, 0x5d00005d, 0x5e00005e, 0x5f00005f, + 0x60000060, 0x61000061, 0x62000062, 0x63000063, 0x64000064, 0x65000065, 0x66000066, 0x67000067, + 0x68000068, 0x69000069, 0x6a00006a, 0x6b00006b, 0x6c00006c, 0x6d00006d, 0x6e00006e, 0x6f00006f, + 0x70000070, 0x71000071, 0x72000072, 0x73000073, 0x74000074, 0x75000075, 0x76000076, 0x77000077, + 0x78000078, 0x79000079, 0x7a00007a, 0x7b00007b, 0x7c00007c, 0x7d00007d, 0x7e00007e, 0x7f00007f, + 0xa00000a0, 0xa10000a1, 0xa20000a2, 0xa30000a3, 0xa40000a4, 0xa50000a5, 0xa60000a6, 0xa70000a7, + 0xa80000a8, 0xa90000a9, 0xaa0000aa, 0xab0000ab, 0xac0000ac, 0xad0000ad, 0xae0000ae, 0xaf0000af, + 0xb00000b0, 0xb10000b1, 0xb20000b2, 0xb30000b3, 0xb40000b4, 0xb50000b5, 0xb60000b6, 0xb70000b7, + 0xb80000b8, 0xb90000b9, 0xba0000ba, 0xbb0000bb, 0xbc0000bc, 0xbd0000bd, 0xbe0000be, 0xbf0000bf, + 0xc00000c0, 0xc10000c1, 0xc20000c2, 0xc30000c3, 0xc40000c4, 0xc50000c5, 0xc60000c6, 0xc70000c7, + 0xc80000c8, 0xc90000c9, 0xca0000ca, 0xcb0000cb, 0xcc0000cc, 0xcd0000cd, 0xce0000ce, 0xcf0000cf, + 0xd10000d1, 0xd20000d2, 0xd30000d3, 0xd40000d4, 0xd50000d5, 0xd60000d6, 0xd70000d7, 0xd80000d8, + 0xd90000d9, 0xda0000da, 0xdb0000db, 0xdc0000dc, 0xdf0000df, 0xe00000e0, 0xe10000e1, 0xe20000e2, + 0xe30000e3, 0xe40000e4, 0xe50000e5, 0xe60000e6, 0xe70000e7, 0xe80000e8, 0xe90000e9, 0xea0000ea, + 0xeb0000eb, 0xec0000ec, 0xed0000ed, 0xee0000ee, 0xef0000ef, 0xf10000f1, 0xf20000f2, 0xf30000f3, + 0xf40000f4, 0xf50000f5, 0xf60000f6, 0xf70000f7, 0xf80000f8, 0xf90000f9, 0xfa0000fa, 0xfb0000fb, + 0xfc0000fc, 0xff0000ff, 0xd000011e, 0xf000011f, 0xdd000130, 0xfd000131, 0x8c000152, 0x9c000153, + 0xde00015e, 0xfe00015f, 0x8a000160, 0x9a000161, 0x9f000178, 0x83000192, 0x880002c6, 0x980002dc, + 0x96002013, 0x97002014, 0x91002018, 0x92002019, 0x8200201a, 0x9300201c, 0x9400201d, 0x8400201e, + 0x86002020, 0x87002021, 0x95002022, 0x85002026, 0x89002030, 0x8b002039, 0x9b00203a, 0x800020ac, + 0x99002122, 0x99002122, 0x99002122, 0x99002122, 0x99002122, 0x99002122, 0x99002122, 0x99002122, + }, +} + +// Windows1255 is the Windows 1255 encoding. +var Windows1255 *Charmap = &windows1255 + +var windows1255 = Charmap{ + name: "Windows 1255", + mib: identifier.Windows1255, + asciiSuperset: true, + low: 0x80, + replacement: 0x1a, + decode: [256]utf8Enc{ + {1, [3]byte{0x00, 0x00, 0x00}}, {1, [3]byte{0x01, 0x00, 0x00}}, + {1, [3]byte{0x02, 0x00, 0x00}}, {1, [3]byte{0x03, 0x00, 0x00}}, + {1, [3]byte{0x04, 0x00, 0x00}}, {1, [3]byte{0x05, 0x00, 0x00}}, + {1, [3]byte{0x06, 0x00, 0x00}}, {1, [3]byte{0x07, 0x00, 0x00}}, + {1, [3]byte{0x08, 0x00, 0x00}}, {1, [3]byte{0x09, 0x00, 0x00}}, + {1, [3]byte{0x0a, 0x00, 0x00}}, {1, [3]byte{0x0b, 0x00, 0x00}}, + {1, [3]byte{0x0c, 0x00, 0x00}}, {1, [3]byte{0x0d, 0x00, 0x00}}, + {1, [3]byte{0x0e, 0x00, 0x00}}, {1, [3]byte{0x0f, 0x00, 0x00}}, + {1, [3]byte{0x10, 0x00, 0x00}}, {1, [3]byte{0x11, 0x00, 0x00}}, + {1, [3]byte{0x12, 0x00, 0x00}}, {1, [3]byte{0x13, 0x00, 0x00}}, + {1, [3]byte{0x14, 0x00, 0x00}}, {1, [3]byte{0x15, 0x00, 0x00}}, + {1, [3]byte{0x16, 0x00, 0x00}}, {1, [3]byte{0x17, 0x00, 0x00}}, + {1, [3]byte{0x18, 0x00, 0x00}}, {1, [3]byte{0x19, 0x00, 0x00}}, + {1, [3]byte{0x1a, 0x00, 0x00}}, {1, [3]byte{0x1b, 0x00, 0x00}}, + {1, [3]byte{0x1c, 0x00, 0x00}}, {1, [3]byte{0x1d, 0x00, 0x00}}, + {1, [3]byte{0x1e, 0x00, 0x00}}, {1, [3]byte{0x1f, 0x00, 0x00}}, + {1, [3]byte{0x20, 0x00, 0x00}}, {1, [3]byte{0x21, 0x00, 0x00}}, + {1, [3]byte{0x22, 0x00, 0x00}}, {1, [3]byte{0x23, 0x00, 0x00}}, + {1, [3]byte{0x24, 0x00, 0x00}}, {1, [3]byte{0x25, 0x00, 0x00}}, + {1, [3]byte{0x26, 0x00, 0x00}}, {1, [3]byte{0x27, 0x00, 0x00}}, + {1, [3]byte{0x28, 0x00, 0x00}}, {1, [3]byte{0x29, 0x00, 0x00}}, + {1, [3]byte{0x2a, 0x00, 0x00}}, {1, [3]byte{0x2b, 0x00, 0x00}}, + {1, [3]byte{0x2c, 0x00, 0x00}}, {1, [3]byte{0x2d, 0x00, 0x00}}, + {1, [3]byte{0x2e, 0x00, 0x00}}, {1, [3]byte{0x2f, 0x00, 0x00}}, + {1, [3]byte{0x30, 0x00, 0x00}}, {1, [3]byte{0x31, 0x00, 0x00}}, + {1, [3]byte{0x32, 0x00, 0x00}}, {1, [3]byte{0x33, 0x00, 0x00}}, + {1, [3]byte{0x34, 0x00, 0x00}}, {1, [3]byte{0x35, 0x00, 0x00}}, + {1, [3]byte{0x36, 0x00, 0x00}}, {1, [3]byte{0x37, 0x00, 0x00}}, + {1, [3]byte{0x38, 0x00, 0x00}}, {1, [3]byte{0x39, 0x00, 0x00}}, + {1, [3]byte{0x3a, 0x00, 0x00}}, {1, [3]byte{0x3b, 0x00, 0x00}}, + {1, [3]byte{0x3c, 0x00, 0x00}}, {1, [3]byte{0x3d, 0x00, 0x00}}, + {1, [3]byte{0x3e, 0x00, 0x00}}, {1, [3]byte{0x3f, 0x00, 0x00}}, + {1, [3]byte{0x40, 0x00, 0x00}}, {1, [3]byte{0x41, 0x00, 0x00}}, + {1, [3]byte{0x42, 0x00, 0x00}}, {1, [3]byte{0x43, 0x00, 0x00}}, + {1, [3]byte{0x44, 0x00, 0x00}}, {1, [3]byte{0x45, 0x00, 0x00}}, + {1, [3]byte{0x46, 0x00, 0x00}}, {1, [3]byte{0x47, 0x00, 0x00}}, + {1, [3]byte{0x48, 0x00, 0x00}}, {1, [3]byte{0x49, 0x00, 0x00}}, + {1, [3]byte{0x4a, 0x00, 0x00}}, {1, [3]byte{0x4b, 0x00, 0x00}}, + {1, [3]byte{0x4c, 0x00, 0x00}}, {1, [3]byte{0x4d, 0x00, 0x00}}, + {1, [3]byte{0x4e, 0x00, 0x00}}, {1, [3]byte{0x4f, 0x00, 0x00}}, + {1, [3]byte{0x50, 0x00, 0x00}}, {1, [3]byte{0x51, 0x00, 0x00}}, + {1, [3]byte{0x52, 0x00, 0x00}}, {1, [3]byte{0x53, 0x00, 0x00}}, + {1, [3]byte{0x54, 0x00, 0x00}}, {1, [3]byte{0x55, 0x00, 0x00}}, + {1, [3]byte{0x56, 0x00, 0x00}}, {1, [3]byte{0x57, 0x00, 0x00}}, + {1, [3]byte{0x58, 0x00, 0x00}}, {1, [3]byte{0x59, 0x00, 0x00}}, + {1, [3]byte{0x5a, 0x00, 0x00}}, {1, [3]byte{0x5b, 0x00, 0x00}}, + {1, [3]byte{0x5c, 0x00, 0x00}}, {1, [3]byte{0x5d, 0x00, 0x00}}, + {1, [3]byte{0x5e, 0x00, 0x00}}, {1, [3]byte{0x5f, 0x00, 0x00}}, + {1, [3]byte{0x60, 0x00, 0x00}}, {1, [3]byte{0x61, 0x00, 0x00}}, + {1, [3]byte{0x62, 0x00, 0x00}}, {1, [3]byte{0x63, 0x00, 0x00}}, + {1, [3]byte{0x64, 0x00, 0x00}}, {1, [3]byte{0x65, 0x00, 0x00}}, + {1, [3]byte{0x66, 0x00, 0x00}}, {1, [3]byte{0x67, 0x00, 0x00}}, + {1, [3]byte{0x68, 0x00, 0x00}}, {1, [3]byte{0x69, 0x00, 0x00}}, + {1, [3]byte{0x6a, 0x00, 0x00}}, {1, [3]byte{0x6b, 0x00, 0x00}}, + {1, [3]byte{0x6c, 0x00, 0x00}}, {1, [3]byte{0x6d, 0x00, 0x00}}, + {1, [3]byte{0x6e, 0x00, 0x00}}, {1, [3]byte{0x6f, 0x00, 0x00}}, + {1, [3]byte{0x70, 0x00, 0x00}}, {1, [3]byte{0x71, 0x00, 0x00}}, + {1, [3]byte{0x72, 0x00, 0x00}}, {1, [3]byte{0x73, 0x00, 0x00}}, + {1, [3]byte{0x74, 0x00, 0x00}}, {1, [3]byte{0x75, 0x00, 0x00}}, + {1, [3]byte{0x76, 0x00, 0x00}}, {1, [3]byte{0x77, 0x00, 0x00}}, + {1, [3]byte{0x78, 0x00, 0x00}}, {1, [3]byte{0x79, 0x00, 0x00}}, + {1, [3]byte{0x7a, 0x00, 0x00}}, {1, [3]byte{0x7b, 0x00, 0x00}}, + {1, [3]byte{0x7c, 0x00, 0x00}}, {1, [3]byte{0x7d, 0x00, 0x00}}, + {1, [3]byte{0x7e, 0x00, 0x00}}, {1, [3]byte{0x7f, 0x00, 0x00}}, + {3, [3]byte{0xe2, 0x82, 0xac}}, {3, [3]byte{0xef, 0xbf, 0xbd}}, + {3, [3]byte{0xe2, 0x80, 0x9a}}, {2, [3]byte{0xc6, 0x92, 0x00}}, + {3, [3]byte{0xe2, 0x80, 0x9e}}, {3, [3]byte{0xe2, 0x80, 0xa6}}, + {3, [3]byte{0xe2, 0x80, 0xa0}}, {3, [3]byte{0xe2, 0x80, 0xa1}}, + {2, [3]byte{0xcb, 0x86, 0x00}}, {3, [3]byte{0xe2, 0x80, 0xb0}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {3, [3]byte{0xe2, 0x80, 0xb9}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {3, [3]byte{0xef, 0xbf, 0xbd}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {3, [3]byte{0xef, 0xbf, 0xbd}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {3, [3]byte{0xe2, 0x80, 0x98}}, + {3, [3]byte{0xe2, 0x80, 0x99}}, {3, [3]byte{0xe2, 0x80, 0x9c}}, + {3, [3]byte{0xe2, 0x80, 0x9d}}, {3, [3]byte{0xe2, 0x80, 0xa2}}, + {3, [3]byte{0xe2, 0x80, 0x93}}, {3, [3]byte{0xe2, 0x80, 0x94}}, + {2, [3]byte{0xcb, 0x9c, 0x00}}, {3, [3]byte{0xe2, 0x84, 0xa2}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {3, [3]byte{0xe2, 0x80, 0xba}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {3, [3]byte{0xef, 0xbf, 0xbd}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {3, [3]byte{0xef, 0xbf, 0xbd}}, + {2, [3]byte{0xc2, 0xa0, 0x00}}, {2, [3]byte{0xc2, 0xa1, 0x00}}, + {2, [3]byte{0xc2, 0xa2, 0x00}}, {2, [3]byte{0xc2, 0xa3, 0x00}}, + {3, [3]byte{0xe2, 0x82, 0xaa}}, {2, [3]byte{0xc2, 0xa5, 0x00}}, + {2, [3]byte{0xc2, 0xa6, 0x00}}, {2, [3]byte{0xc2, 0xa7, 0x00}}, + {2, [3]byte{0xc2, 0xa8, 0x00}}, {2, [3]byte{0xc2, 0xa9, 0x00}}, + {2, [3]byte{0xc3, 0x97, 0x00}}, {2, [3]byte{0xc2, 0xab, 0x00}}, + {2, [3]byte{0xc2, 0xac, 0x00}}, {2, [3]byte{0xc2, 0xad, 0x00}}, + {2, [3]byte{0xc2, 0xae, 0x00}}, {2, [3]byte{0xc2, 0xaf, 0x00}}, + {2, [3]byte{0xc2, 0xb0, 0x00}}, {2, [3]byte{0xc2, 0xb1, 0x00}}, + {2, [3]byte{0xc2, 0xb2, 0x00}}, {2, [3]byte{0xc2, 0xb3, 0x00}}, + {2, [3]byte{0xc2, 0xb4, 0x00}}, {2, [3]byte{0xc2, 0xb5, 0x00}}, + {2, [3]byte{0xc2, 0xb6, 0x00}}, {2, [3]byte{0xc2, 0xb7, 0x00}}, + {2, [3]byte{0xc2, 0xb8, 0x00}}, {2, [3]byte{0xc2, 0xb9, 0x00}}, + {2, [3]byte{0xc3, 0xb7, 0x00}}, {2, [3]byte{0xc2, 0xbb, 0x00}}, + {2, [3]byte{0xc2, 0xbc, 0x00}}, {2, [3]byte{0xc2, 0xbd, 0x00}}, + {2, [3]byte{0xc2, 0xbe, 0x00}}, {2, [3]byte{0xc2, 0xbf, 0x00}}, + {2, [3]byte{0xd6, 0xb0, 0x00}}, {2, [3]byte{0xd6, 0xb1, 0x00}}, + {2, [3]byte{0xd6, 0xb2, 0x00}}, {2, [3]byte{0xd6, 0xb3, 0x00}}, + {2, [3]byte{0xd6, 0xb4, 0x00}}, {2, [3]byte{0xd6, 0xb5, 0x00}}, + {2, [3]byte{0xd6, 0xb6, 0x00}}, {2, [3]byte{0xd6, 0xb7, 0x00}}, + {2, [3]byte{0xd6, 0xb8, 0x00}}, {2, [3]byte{0xd6, 0xb9, 0x00}}, + {2, [3]byte{0xd6, 0xba, 0x00}}, {2, [3]byte{0xd6, 0xbb, 0x00}}, + {2, [3]byte{0xd6, 0xbc, 0x00}}, {2, [3]byte{0xd6, 0xbd, 0x00}}, + {2, [3]byte{0xd6, 0xbe, 0x00}}, {2, [3]byte{0xd6, 0xbf, 0x00}}, + {2, [3]byte{0xd7, 0x80, 0x00}}, {2, [3]byte{0xd7, 0x81, 0x00}}, + {2, [3]byte{0xd7, 0x82, 0x00}}, {2, [3]byte{0xd7, 0x83, 0x00}}, + {2, [3]byte{0xd7, 0xb0, 0x00}}, {2, [3]byte{0xd7, 0xb1, 0x00}}, + {2, [3]byte{0xd7, 0xb2, 0x00}}, {2, [3]byte{0xd7, 0xb3, 0x00}}, + {2, [3]byte{0xd7, 0xb4, 0x00}}, {3, [3]byte{0xef, 0xbf, 0xbd}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {3, [3]byte{0xef, 0xbf, 0xbd}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {3, [3]byte{0xef, 0xbf, 0xbd}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {3, [3]byte{0xef, 0xbf, 0xbd}}, + {2, [3]byte{0xd7, 0x90, 0x00}}, {2, [3]byte{0xd7, 0x91, 0x00}}, + {2, [3]byte{0xd7, 0x92, 0x00}}, {2, [3]byte{0xd7, 0x93, 0x00}}, + {2, [3]byte{0xd7, 0x94, 0x00}}, {2, [3]byte{0xd7, 0x95, 0x00}}, + {2, [3]byte{0xd7, 0x96, 0x00}}, {2, [3]byte{0xd7, 0x97, 0x00}}, + {2, [3]byte{0xd7, 0x98, 0x00}}, {2, [3]byte{0xd7, 0x99, 0x00}}, + {2, [3]byte{0xd7, 0x9a, 0x00}}, {2, [3]byte{0xd7, 0x9b, 0x00}}, + {2, [3]byte{0xd7, 0x9c, 0x00}}, {2, [3]byte{0xd7, 0x9d, 0x00}}, + {2, [3]byte{0xd7, 0x9e, 0x00}}, {2, [3]byte{0xd7, 0x9f, 0x00}}, + {2, [3]byte{0xd7, 0xa0, 0x00}}, {2, [3]byte{0xd7, 0xa1, 0x00}}, + {2, [3]byte{0xd7, 0xa2, 0x00}}, {2, [3]byte{0xd7, 0xa3, 0x00}}, + {2, [3]byte{0xd7, 0xa4, 0x00}}, {2, [3]byte{0xd7, 0xa5, 0x00}}, + {2, [3]byte{0xd7, 0xa6, 0x00}}, {2, [3]byte{0xd7, 0xa7, 0x00}}, + {2, [3]byte{0xd7, 0xa8, 0x00}}, {2, [3]byte{0xd7, 0xa9, 0x00}}, + {2, [3]byte{0xd7, 0xaa, 0x00}}, {3, [3]byte{0xef, 0xbf, 0xbd}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {3, [3]byte{0xe2, 0x80, 0x8e}}, + {3, [3]byte{0xe2, 0x80, 0x8f}}, {3, [3]byte{0xef, 0xbf, 0xbd}}, + }, + encode: [256]uint32{ + 0x00000000, 0x01000001, 0x02000002, 0x03000003, 0x04000004, 0x05000005, 0x06000006, 0x07000007, + 0x08000008, 0x09000009, 0x0a00000a, 0x0b00000b, 0x0c00000c, 0x0d00000d, 0x0e00000e, 0x0f00000f, + 0x10000010, 0x11000011, 0x12000012, 0x13000013, 0x14000014, 0x15000015, 0x16000016, 0x17000017, + 0x18000018, 0x19000019, 0x1a00001a, 0x1b00001b, 0x1c00001c, 0x1d00001d, 0x1e00001e, 0x1f00001f, + 0x20000020, 0x21000021, 0x22000022, 0x23000023, 0x24000024, 0x25000025, 0x26000026, 0x27000027, + 0x28000028, 0x29000029, 0x2a00002a, 0x2b00002b, 0x2c00002c, 0x2d00002d, 0x2e00002e, 0x2f00002f, + 0x30000030, 0x31000031, 0x32000032, 0x33000033, 0x34000034, 0x35000035, 0x36000036, 0x37000037, + 0x38000038, 0x39000039, 0x3a00003a, 0x3b00003b, 0x3c00003c, 0x3d00003d, 0x3e00003e, 0x3f00003f, + 0x40000040, 0x41000041, 0x42000042, 0x43000043, 0x44000044, 0x45000045, 0x46000046, 0x47000047, + 0x48000048, 0x49000049, 0x4a00004a, 0x4b00004b, 0x4c00004c, 0x4d00004d, 0x4e00004e, 0x4f00004f, + 0x50000050, 0x51000051, 0x52000052, 0x53000053, 0x54000054, 0x55000055, 0x56000056, 0x57000057, + 0x58000058, 0x59000059, 0x5a00005a, 0x5b00005b, 0x5c00005c, 0x5d00005d, 0x5e00005e, 0x5f00005f, + 0x60000060, 0x61000061, 0x62000062, 0x63000063, 0x64000064, 0x65000065, 0x66000066, 0x67000067, + 0x68000068, 0x69000069, 0x6a00006a, 0x6b00006b, 0x6c00006c, 0x6d00006d, 0x6e00006e, 0x6f00006f, + 0x70000070, 0x71000071, 0x72000072, 0x73000073, 0x74000074, 0x75000075, 0x76000076, 0x77000077, + 0x78000078, 0x79000079, 0x7a00007a, 0x7b00007b, 0x7c00007c, 0x7d00007d, 0x7e00007e, 0x7f00007f, + 0xa00000a0, 0xa10000a1, 0xa20000a2, 0xa30000a3, 0xa50000a5, 0xa60000a6, 0xa70000a7, 0xa80000a8, + 0xa90000a9, 0xab0000ab, 0xac0000ac, 0xad0000ad, 0xae0000ae, 0xaf0000af, 0xb00000b0, 0xb10000b1, + 0xb20000b2, 0xb30000b3, 0xb40000b4, 0xb50000b5, 0xb60000b6, 0xb70000b7, 0xb80000b8, 0xb90000b9, + 0xbb0000bb, 0xbc0000bc, 0xbd0000bd, 0xbe0000be, 0xbf0000bf, 0xaa0000d7, 0xba0000f7, 0x83000192, + 0x880002c6, 0x980002dc, 0xc00005b0, 0xc10005b1, 0xc20005b2, 0xc30005b3, 0xc40005b4, 0xc50005b5, + 0xc60005b6, 0xc70005b7, 0xc80005b8, 0xc90005b9, 0xca0005ba, 0xcb0005bb, 0xcc0005bc, 0xcd0005bd, + 0xce0005be, 0xcf0005bf, 0xd00005c0, 0xd10005c1, 0xd20005c2, 0xd30005c3, 0xe00005d0, 0xe10005d1, + 0xe20005d2, 0xe30005d3, 0xe40005d4, 0xe50005d5, 0xe60005d6, 0xe70005d7, 0xe80005d8, 0xe90005d9, + 0xea0005da, 0xeb0005db, 0xec0005dc, 0xed0005dd, 0xee0005de, 0xef0005df, 0xf00005e0, 0xf10005e1, + 0xf20005e2, 0xf30005e3, 0xf40005e4, 0xf50005e5, 0xf60005e6, 0xf70005e7, 0xf80005e8, 0xf90005e9, + 0xfa0005ea, 0xd40005f0, 0xd50005f1, 0xd60005f2, 0xd70005f3, 0xd80005f4, 0xfd00200e, 0xfe00200f, + 0x96002013, 0x97002014, 0x91002018, 0x92002019, 0x8200201a, 0x9300201c, 0x9400201d, 0x8400201e, + 0x86002020, 0x87002021, 0x95002022, 0x85002026, 0x89002030, 0x8b002039, 0x9b00203a, 0xa40020aa, + 0x800020ac, 0x99002122, 0x99002122, 0x99002122, 0x99002122, 0x99002122, 0x99002122, 0x99002122, + 0x99002122, 0x99002122, 0x99002122, 0x99002122, 0x99002122, 0x99002122, 0x99002122, 0x99002122, + 0x99002122, 0x99002122, 0x99002122, 0x99002122, 0x99002122, 0x99002122, 0x99002122, 0x99002122, + }, +} + +// Windows1256 is the Windows 1256 encoding. +var Windows1256 *Charmap = &windows1256 + +var windows1256 = Charmap{ + name: "Windows 1256", + mib: identifier.Windows1256, + asciiSuperset: true, + low: 0x80, + replacement: 0x1a, + decode: [256]utf8Enc{ + {1, [3]byte{0x00, 0x00, 0x00}}, {1, [3]byte{0x01, 0x00, 0x00}}, + {1, [3]byte{0x02, 0x00, 0x00}}, {1, [3]byte{0x03, 0x00, 0x00}}, + {1, [3]byte{0x04, 0x00, 0x00}}, {1, [3]byte{0x05, 0x00, 0x00}}, + {1, [3]byte{0x06, 0x00, 0x00}}, {1, [3]byte{0x07, 0x00, 0x00}}, + {1, [3]byte{0x08, 0x00, 0x00}}, {1, [3]byte{0x09, 0x00, 0x00}}, + {1, [3]byte{0x0a, 0x00, 0x00}}, {1, [3]byte{0x0b, 0x00, 0x00}}, + {1, [3]byte{0x0c, 0x00, 0x00}}, {1, [3]byte{0x0d, 0x00, 0x00}}, + {1, [3]byte{0x0e, 0x00, 0x00}}, {1, [3]byte{0x0f, 0x00, 0x00}}, + {1, [3]byte{0x10, 0x00, 0x00}}, {1, [3]byte{0x11, 0x00, 0x00}}, + {1, [3]byte{0x12, 0x00, 0x00}}, {1, [3]byte{0x13, 0x00, 0x00}}, + {1, [3]byte{0x14, 0x00, 0x00}}, {1, [3]byte{0x15, 0x00, 0x00}}, + {1, [3]byte{0x16, 0x00, 0x00}}, {1, [3]byte{0x17, 0x00, 0x00}}, + {1, [3]byte{0x18, 0x00, 0x00}}, {1, [3]byte{0x19, 0x00, 0x00}}, + {1, [3]byte{0x1a, 0x00, 0x00}}, {1, [3]byte{0x1b, 0x00, 0x00}}, + {1, [3]byte{0x1c, 0x00, 0x00}}, {1, [3]byte{0x1d, 0x00, 0x00}}, + {1, [3]byte{0x1e, 0x00, 0x00}}, {1, [3]byte{0x1f, 0x00, 0x00}}, + {1, [3]byte{0x20, 0x00, 0x00}}, {1, [3]byte{0x21, 0x00, 0x00}}, + {1, [3]byte{0x22, 0x00, 0x00}}, {1, [3]byte{0x23, 0x00, 0x00}}, + {1, [3]byte{0x24, 0x00, 0x00}}, {1, [3]byte{0x25, 0x00, 0x00}}, + {1, [3]byte{0x26, 0x00, 0x00}}, {1, [3]byte{0x27, 0x00, 0x00}}, + {1, [3]byte{0x28, 0x00, 0x00}}, {1, [3]byte{0x29, 0x00, 0x00}}, + {1, [3]byte{0x2a, 0x00, 0x00}}, {1, [3]byte{0x2b, 0x00, 0x00}}, + {1, [3]byte{0x2c, 0x00, 0x00}}, {1, [3]byte{0x2d, 0x00, 0x00}}, + {1, [3]byte{0x2e, 0x00, 0x00}}, {1, [3]byte{0x2f, 0x00, 0x00}}, + {1, [3]byte{0x30, 0x00, 0x00}}, {1, [3]byte{0x31, 0x00, 0x00}}, + {1, [3]byte{0x32, 0x00, 0x00}}, {1, [3]byte{0x33, 0x00, 0x00}}, + {1, [3]byte{0x34, 0x00, 0x00}}, {1, [3]byte{0x35, 0x00, 0x00}}, + {1, [3]byte{0x36, 0x00, 0x00}}, {1, [3]byte{0x37, 0x00, 0x00}}, + {1, [3]byte{0x38, 0x00, 0x00}}, {1, [3]byte{0x39, 0x00, 0x00}}, + {1, [3]byte{0x3a, 0x00, 0x00}}, {1, [3]byte{0x3b, 0x00, 0x00}}, + {1, [3]byte{0x3c, 0x00, 0x00}}, {1, [3]byte{0x3d, 0x00, 0x00}}, + {1, [3]byte{0x3e, 0x00, 0x00}}, {1, [3]byte{0x3f, 0x00, 0x00}}, + {1, [3]byte{0x40, 0x00, 0x00}}, {1, [3]byte{0x41, 0x00, 0x00}}, + {1, [3]byte{0x42, 0x00, 0x00}}, {1, [3]byte{0x43, 0x00, 0x00}}, + {1, [3]byte{0x44, 0x00, 0x00}}, {1, [3]byte{0x45, 0x00, 0x00}}, + {1, [3]byte{0x46, 0x00, 0x00}}, {1, [3]byte{0x47, 0x00, 0x00}}, + {1, [3]byte{0x48, 0x00, 0x00}}, {1, [3]byte{0x49, 0x00, 0x00}}, + {1, [3]byte{0x4a, 0x00, 0x00}}, {1, [3]byte{0x4b, 0x00, 0x00}}, + {1, [3]byte{0x4c, 0x00, 0x00}}, {1, [3]byte{0x4d, 0x00, 0x00}}, + {1, [3]byte{0x4e, 0x00, 0x00}}, {1, [3]byte{0x4f, 0x00, 0x00}}, + {1, [3]byte{0x50, 0x00, 0x00}}, {1, [3]byte{0x51, 0x00, 0x00}}, + {1, [3]byte{0x52, 0x00, 0x00}}, {1, [3]byte{0x53, 0x00, 0x00}}, + {1, [3]byte{0x54, 0x00, 0x00}}, {1, [3]byte{0x55, 0x00, 0x00}}, + {1, [3]byte{0x56, 0x00, 0x00}}, {1, [3]byte{0x57, 0x00, 0x00}}, + {1, [3]byte{0x58, 0x00, 0x00}}, {1, [3]byte{0x59, 0x00, 0x00}}, + {1, [3]byte{0x5a, 0x00, 0x00}}, {1, [3]byte{0x5b, 0x00, 0x00}}, + {1, [3]byte{0x5c, 0x00, 0x00}}, {1, [3]byte{0x5d, 0x00, 0x00}}, + {1, [3]byte{0x5e, 0x00, 0x00}}, {1, [3]byte{0x5f, 0x00, 0x00}}, + {1, [3]byte{0x60, 0x00, 0x00}}, {1, [3]byte{0x61, 0x00, 0x00}}, + {1, [3]byte{0x62, 0x00, 0x00}}, {1, [3]byte{0x63, 0x00, 0x00}}, + {1, [3]byte{0x64, 0x00, 0x00}}, {1, [3]byte{0x65, 0x00, 0x00}}, + {1, [3]byte{0x66, 0x00, 0x00}}, {1, [3]byte{0x67, 0x00, 0x00}}, + {1, [3]byte{0x68, 0x00, 0x00}}, {1, [3]byte{0x69, 0x00, 0x00}}, + {1, [3]byte{0x6a, 0x00, 0x00}}, {1, [3]byte{0x6b, 0x00, 0x00}}, + {1, [3]byte{0x6c, 0x00, 0x00}}, {1, [3]byte{0x6d, 0x00, 0x00}}, + {1, [3]byte{0x6e, 0x00, 0x00}}, {1, [3]byte{0x6f, 0x00, 0x00}}, + {1, [3]byte{0x70, 0x00, 0x00}}, {1, [3]byte{0x71, 0x00, 0x00}}, + {1, [3]byte{0x72, 0x00, 0x00}}, {1, [3]byte{0x73, 0x00, 0x00}}, + {1, [3]byte{0x74, 0x00, 0x00}}, {1, [3]byte{0x75, 0x00, 0x00}}, + {1, [3]byte{0x76, 0x00, 0x00}}, {1, [3]byte{0x77, 0x00, 0x00}}, + {1, [3]byte{0x78, 0x00, 0x00}}, {1, [3]byte{0x79, 0x00, 0x00}}, + {1, [3]byte{0x7a, 0x00, 0x00}}, {1, [3]byte{0x7b, 0x00, 0x00}}, + {1, [3]byte{0x7c, 0x00, 0x00}}, {1, [3]byte{0x7d, 0x00, 0x00}}, + {1, [3]byte{0x7e, 0x00, 0x00}}, {1, [3]byte{0x7f, 0x00, 0x00}}, + {3, [3]byte{0xe2, 0x82, 0xac}}, {2, [3]byte{0xd9, 0xbe, 0x00}}, + {3, [3]byte{0xe2, 0x80, 0x9a}}, {2, [3]byte{0xc6, 0x92, 0x00}}, + {3, [3]byte{0xe2, 0x80, 0x9e}}, {3, [3]byte{0xe2, 0x80, 0xa6}}, + {3, [3]byte{0xe2, 0x80, 0xa0}}, {3, [3]byte{0xe2, 0x80, 0xa1}}, + {2, [3]byte{0xcb, 0x86, 0x00}}, {3, [3]byte{0xe2, 0x80, 0xb0}}, + {2, [3]byte{0xd9, 0xb9, 0x00}}, {3, [3]byte{0xe2, 0x80, 0xb9}}, + {2, [3]byte{0xc5, 0x92, 0x00}}, {2, [3]byte{0xda, 0x86, 0x00}}, + {2, [3]byte{0xda, 0x98, 0x00}}, {2, [3]byte{0xda, 0x88, 0x00}}, + {2, [3]byte{0xda, 0xaf, 0x00}}, {3, [3]byte{0xe2, 0x80, 0x98}}, + {3, [3]byte{0xe2, 0x80, 0x99}}, {3, [3]byte{0xe2, 0x80, 0x9c}}, + {3, [3]byte{0xe2, 0x80, 0x9d}}, {3, [3]byte{0xe2, 0x80, 0xa2}}, + {3, [3]byte{0xe2, 0x80, 0x93}}, {3, [3]byte{0xe2, 0x80, 0x94}}, + {2, [3]byte{0xda, 0xa9, 0x00}}, {3, [3]byte{0xe2, 0x84, 0xa2}}, + {2, [3]byte{0xda, 0x91, 0x00}}, {3, [3]byte{0xe2, 0x80, 0xba}}, + {2, [3]byte{0xc5, 0x93, 0x00}}, {3, [3]byte{0xe2, 0x80, 0x8c}}, + {3, [3]byte{0xe2, 0x80, 0x8d}}, {2, [3]byte{0xda, 0xba, 0x00}}, + {2, [3]byte{0xc2, 0xa0, 0x00}}, {2, [3]byte{0xd8, 0x8c, 0x00}}, + {2, [3]byte{0xc2, 0xa2, 0x00}}, {2, [3]byte{0xc2, 0xa3, 0x00}}, + {2, [3]byte{0xc2, 0xa4, 0x00}}, {2, [3]byte{0xc2, 0xa5, 0x00}}, + {2, [3]byte{0xc2, 0xa6, 0x00}}, {2, [3]byte{0xc2, 0xa7, 0x00}}, + {2, [3]byte{0xc2, 0xa8, 0x00}}, {2, [3]byte{0xc2, 0xa9, 0x00}}, + {2, [3]byte{0xda, 0xbe, 0x00}}, {2, [3]byte{0xc2, 0xab, 0x00}}, + {2, [3]byte{0xc2, 0xac, 0x00}}, {2, [3]byte{0xc2, 0xad, 0x00}}, + {2, [3]byte{0xc2, 0xae, 0x00}}, {2, [3]byte{0xc2, 0xaf, 0x00}}, + {2, [3]byte{0xc2, 0xb0, 0x00}}, {2, [3]byte{0xc2, 0xb1, 0x00}}, + {2, [3]byte{0xc2, 0xb2, 0x00}}, {2, [3]byte{0xc2, 0xb3, 0x00}}, + {2, [3]byte{0xc2, 0xb4, 0x00}}, {2, [3]byte{0xc2, 0xb5, 0x00}}, + {2, [3]byte{0xc2, 0xb6, 0x00}}, {2, [3]byte{0xc2, 0xb7, 0x00}}, + {2, [3]byte{0xc2, 0xb8, 0x00}}, {2, [3]byte{0xc2, 0xb9, 0x00}}, + {2, [3]byte{0xd8, 0x9b, 0x00}}, {2, [3]byte{0xc2, 0xbb, 0x00}}, + {2, [3]byte{0xc2, 0xbc, 0x00}}, {2, [3]byte{0xc2, 0xbd, 0x00}}, + {2, [3]byte{0xc2, 0xbe, 0x00}}, {2, [3]byte{0xd8, 0x9f, 0x00}}, + {2, [3]byte{0xdb, 0x81, 0x00}}, {2, [3]byte{0xd8, 0xa1, 0x00}}, + {2, [3]byte{0xd8, 0xa2, 0x00}}, {2, [3]byte{0xd8, 0xa3, 0x00}}, + {2, [3]byte{0xd8, 0xa4, 0x00}}, {2, [3]byte{0xd8, 0xa5, 0x00}}, + {2, [3]byte{0xd8, 0xa6, 0x00}}, {2, [3]byte{0xd8, 0xa7, 0x00}}, + {2, [3]byte{0xd8, 0xa8, 0x00}}, {2, [3]byte{0xd8, 0xa9, 0x00}}, + {2, [3]byte{0xd8, 0xaa, 0x00}}, {2, [3]byte{0xd8, 0xab, 0x00}}, + {2, [3]byte{0xd8, 0xac, 0x00}}, {2, [3]byte{0xd8, 0xad, 0x00}}, + {2, [3]byte{0xd8, 0xae, 0x00}}, {2, [3]byte{0xd8, 0xaf, 0x00}}, + {2, [3]byte{0xd8, 0xb0, 0x00}}, {2, [3]byte{0xd8, 0xb1, 0x00}}, + {2, [3]byte{0xd8, 0xb2, 0x00}}, {2, [3]byte{0xd8, 0xb3, 0x00}}, + {2, [3]byte{0xd8, 0xb4, 0x00}}, {2, [3]byte{0xd8, 0xb5, 0x00}}, + {2, [3]byte{0xd8, 0xb6, 0x00}}, {2, [3]byte{0xc3, 0x97, 0x00}}, + {2, [3]byte{0xd8, 0xb7, 0x00}}, {2, [3]byte{0xd8, 0xb8, 0x00}}, + {2, [3]byte{0xd8, 0xb9, 0x00}}, {2, [3]byte{0xd8, 0xba, 0x00}}, + {2, [3]byte{0xd9, 0x80, 0x00}}, {2, [3]byte{0xd9, 0x81, 0x00}}, + {2, [3]byte{0xd9, 0x82, 0x00}}, {2, [3]byte{0xd9, 0x83, 0x00}}, + {2, [3]byte{0xc3, 0xa0, 0x00}}, {2, [3]byte{0xd9, 0x84, 0x00}}, + {2, [3]byte{0xc3, 0xa2, 0x00}}, {2, [3]byte{0xd9, 0x85, 0x00}}, + {2, [3]byte{0xd9, 0x86, 0x00}}, {2, [3]byte{0xd9, 0x87, 0x00}}, + {2, [3]byte{0xd9, 0x88, 0x00}}, {2, [3]byte{0xc3, 0xa7, 0x00}}, + {2, [3]byte{0xc3, 0xa8, 0x00}}, {2, [3]byte{0xc3, 0xa9, 0x00}}, + {2, [3]byte{0xc3, 0xaa, 0x00}}, {2, [3]byte{0xc3, 0xab, 0x00}}, + {2, [3]byte{0xd9, 0x89, 0x00}}, {2, [3]byte{0xd9, 0x8a, 0x00}}, + {2, [3]byte{0xc3, 0xae, 0x00}}, {2, [3]byte{0xc3, 0xaf, 0x00}}, + {2, [3]byte{0xd9, 0x8b, 0x00}}, {2, [3]byte{0xd9, 0x8c, 0x00}}, + {2, [3]byte{0xd9, 0x8d, 0x00}}, {2, [3]byte{0xd9, 0x8e, 0x00}}, + {2, [3]byte{0xc3, 0xb4, 0x00}}, {2, [3]byte{0xd9, 0x8f, 0x00}}, + {2, [3]byte{0xd9, 0x90, 0x00}}, {2, [3]byte{0xc3, 0xb7, 0x00}}, + {2, [3]byte{0xd9, 0x91, 0x00}}, {2, [3]byte{0xc3, 0xb9, 0x00}}, + {2, [3]byte{0xd9, 0x92, 0x00}}, {2, [3]byte{0xc3, 0xbb, 0x00}}, + {2, [3]byte{0xc3, 0xbc, 0x00}}, {3, [3]byte{0xe2, 0x80, 0x8e}}, + {3, [3]byte{0xe2, 0x80, 0x8f}}, {2, [3]byte{0xdb, 0x92, 0x00}}, + }, + encode: [256]uint32{ + 0x00000000, 0x01000001, 0x02000002, 0x03000003, 0x04000004, 0x05000005, 0x06000006, 0x07000007, + 0x08000008, 0x09000009, 0x0a00000a, 0x0b00000b, 0x0c00000c, 0x0d00000d, 0x0e00000e, 0x0f00000f, + 0x10000010, 0x11000011, 0x12000012, 0x13000013, 0x14000014, 0x15000015, 0x16000016, 0x17000017, + 0x18000018, 0x19000019, 0x1a00001a, 0x1b00001b, 0x1c00001c, 0x1d00001d, 0x1e00001e, 0x1f00001f, + 0x20000020, 0x21000021, 0x22000022, 0x23000023, 0x24000024, 0x25000025, 0x26000026, 0x27000027, + 0x28000028, 0x29000029, 0x2a00002a, 0x2b00002b, 0x2c00002c, 0x2d00002d, 0x2e00002e, 0x2f00002f, + 0x30000030, 0x31000031, 0x32000032, 0x33000033, 0x34000034, 0x35000035, 0x36000036, 0x37000037, + 0x38000038, 0x39000039, 0x3a00003a, 0x3b00003b, 0x3c00003c, 0x3d00003d, 0x3e00003e, 0x3f00003f, + 0x40000040, 0x41000041, 0x42000042, 0x43000043, 0x44000044, 0x45000045, 0x46000046, 0x47000047, + 0x48000048, 0x49000049, 0x4a00004a, 0x4b00004b, 0x4c00004c, 0x4d00004d, 0x4e00004e, 0x4f00004f, + 0x50000050, 0x51000051, 0x52000052, 0x53000053, 0x54000054, 0x55000055, 0x56000056, 0x57000057, + 0x58000058, 0x59000059, 0x5a00005a, 0x5b00005b, 0x5c00005c, 0x5d00005d, 0x5e00005e, 0x5f00005f, + 0x60000060, 0x61000061, 0x62000062, 0x63000063, 0x64000064, 0x65000065, 0x66000066, 0x67000067, + 0x68000068, 0x69000069, 0x6a00006a, 0x6b00006b, 0x6c00006c, 0x6d00006d, 0x6e00006e, 0x6f00006f, + 0x70000070, 0x71000071, 0x72000072, 0x73000073, 0x74000074, 0x75000075, 0x76000076, 0x77000077, + 0x78000078, 0x79000079, 0x7a00007a, 0x7b00007b, 0x7c00007c, 0x7d00007d, 0x7e00007e, 0x7f00007f, + 0xa00000a0, 0xa20000a2, 0xa30000a3, 0xa40000a4, 0xa50000a5, 0xa60000a6, 0xa70000a7, 0xa80000a8, + 0xa90000a9, 0xab0000ab, 0xac0000ac, 0xad0000ad, 0xae0000ae, 0xaf0000af, 0xb00000b0, 0xb10000b1, + 0xb20000b2, 0xb30000b3, 0xb40000b4, 0xb50000b5, 0xb60000b6, 0xb70000b7, 0xb80000b8, 0xb90000b9, + 0xbb0000bb, 0xbc0000bc, 0xbd0000bd, 0xbe0000be, 0xd70000d7, 0xe00000e0, 0xe20000e2, 0xe70000e7, + 0xe80000e8, 0xe90000e9, 0xea0000ea, 0xeb0000eb, 0xee0000ee, 0xef0000ef, 0xf40000f4, 0xf70000f7, + 0xf90000f9, 0xfb0000fb, 0xfc0000fc, 0x8c000152, 0x9c000153, 0x83000192, 0x880002c6, 0xa100060c, + 0xba00061b, 0xbf00061f, 0xc1000621, 0xc2000622, 0xc3000623, 0xc4000624, 0xc5000625, 0xc6000626, + 0xc7000627, 0xc8000628, 0xc9000629, 0xca00062a, 0xcb00062b, 0xcc00062c, 0xcd00062d, 0xce00062e, + 0xcf00062f, 0xd0000630, 0xd1000631, 0xd2000632, 0xd3000633, 0xd4000634, 0xd5000635, 0xd6000636, + 0xd8000637, 0xd9000638, 0xda000639, 0xdb00063a, 0xdc000640, 0xdd000641, 0xde000642, 0xdf000643, + 0xe1000644, 0xe3000645, 0xe4000646, 0xe5000647, 0xe6000648, 0xec000649, 0xed00064a, 0xf000064b, + 0xf100064c, 0xf200064d, 0xf300064e, 0xf500064f, 0xf6000650, 0xf8000651, 0xfa000652, 0x8a000679, + 0x8100067e, 0x8d000686, 0x8f000688, 0x9a000691, 0x8e000698, 0x980006a9, 0x900006af, 0x9f0006ba, + 0xaa0006be, 0xc00006c1, 0xff0006d2, 0x9d00200c, 0x9e00200d, 0xfd00200e, 0xfe00200f, 0x96002013, + 0x97002014, 0x91002018, 0x92002019, 0x8200201a, 0x9300201c, 0x9400201d, 0x8400201e, 0x86002020, + 0x87002021, 0x95002022, 0x85002026, 0x89002030, 0x8b002039, 0x9b00203a, 0x800020ac, 0x99002122, + }, +} + +// Windows1257 is the Windows 1257 encoding. +var Windows1257 *Charmap = &windows1257 + +var windows1257 = Charmap{ + name: "Windows 1257", + mib: identifier.Windows1257, + asciiSuperset: true, + low: 0x80, + replacement: 0x1a, + decode: [256]utf8Enc{ + {1, [3]byte{0x00, 0x00, 0x00}}, {1, [3]byte{0x01, 0x00, 0x00}}, + {1, [3]byte{0x02, 0x00, 0x00}}, {1, [3]byte{0x03, 0x00, 0x00}}, + {1, [3]byte{0x04, 0x00, 0x00}}, {1, [3]byte{0x05, 0x00, 0x00}}, + {1, [3]byte{0x06, 0x00, 0x00}}, {1, [3]byte{0x07, 0x00, 0x00}}, + {1, [3]byte{0x08, 0x00, 0x00}}, {1, [3]byte{0x09, 0x00, 0x00}}, + {1, [3]byte{0x0a, 0x00, 0x00}}, {1, [3]byte{0x0b, 0x00, 0x00}}, + {1, [3]byte{0x0c, 0x00, 0x00}}, {1, [3]byte{0x0d, 0x00, 0x00}}, + {1, [3]byte{0x0e, 0x00, 0x00}}, {1, [3]byte{0x0f, 0x00, 0x00}}, + {1, [3]byte{0x10, 0x00, 0x00}}, {1, [3]byte{0x11, 0x00, 0x00}}, + {1, [3]byte{0x12, 0x00, 0x00}}, {1, [3]byte{0x13, 0x00, 0x00}}, + {1, [3]byte{0x14, 0x00, 0x00}}, {1, [3]byte{0x15, 0x00, 0x00}}, + {1, [3]byte{0x16, 0x00, 0x00}}, {1, [3]byte{0x17, 0x00, 0x00}}, + {1, [3]byte{0x18, 0x00, 0x00}}, {1, [3]byte{0x19, 0x00, 0x00}}, + {1, [3]byte{0x1a, 0x00, 0x00}}, {1, [3]byte{0x1b, 0x00, 0x00}}, + {1, [3]byte{0x1c, 0x00, 0x00}}, {1, [3]byte{0x1d, 0x00, 0x00}}, + {1, [3]byte{0x1e, 0x00, 0x00}}, {1, [3]byte{0x1f, 0x00, 0x00}}, + {1, [3]byte{0x20, 0x00, 0x00}}, {1, [3]byte{0x21, 0x00, 0x00}}, + {1, [3]byte{0x22, 0x00, 0x00}}, {1, [3]byte{0x23, 0x00, 0x00}}, + {1, [3]byte{0x24, 0x00, 0x00}}, {1, [3]byte{0x25, 0x00, 0x00}}, + {1, [3]byte{0x26, 0x00, 0x00}}, {1, [3]byte{0x27, 0x00, 0x00}}, + {1, [3]byte{0x28, 0x00, 0x00}}, {1, [3]byte{0x29, 0x00, 0x00}}, + {1, [3]byte{0x2a, 0x00, 0x00}}, {1, [3]byte{0x2b, 0x00, 0x00}}, + {1, [3]byte{0x2c, 0x00, 0x00}}, {1, [3]byte{0x2d, 0x00, 0x00}}, + {1, [3]byte{0x2e, 0x00, 0x00}}, {1, [3]byte{0x2f, 0x00, 0x00}}, + {1, [3]byte{0x30, 0x00, 0x00}}, {1, [3]byte{0x31, 0x00, 0x00}}, + {1, [3]byte{0x32, 0x00, 0x00}}, {1, [3]byte{0x33, 0x00, 0x00}}, + {1, [3]byte{0x34, 0x00, 0x00}}, {1, [3]byte{0x35, 0x00, 0x00}}, + {1, [3]byte{0x36, 0x00, 0x00}}, {1, [3]byte{0x37, 0x00, 0x00}}, + {1, [3]byte{0x38, 0x00, 0x00}}, {1, [3]byte{0x39, 0x00, 0x00}}, + {1, [3]byte{0x3a, 0x00, 0x00}}, {1, [3]byte{0x3b, 0x00, 0x00}}, + {1, [3]byte{0x3c, 0x00, 0x00}}, {1, [3]byte{0x3d, 0x00, 0x00}}, + {1, [3]byte{0x3e, 0x00, 0x00}}, {1, [3]byte{0x3f, 0x00, 0x00}}, + {1, [3]byte{0x40, 0x00, 0x00}}, {1, [3]byte{0x41, 0x00, 0x00}}, + {1, [3]byte{0x42, 0x00, 0x00}}, {1, [3]byte{0x43, 0x00, 0x00}}, + {1, [3]byte{0x44, 0x00, 0x00}}, {1, [3]byte{0x45, 0x00, 0x00}}, + {1, [3]byte{0x46, 0x00, 0x00}}, {1, [3]byte{0x47, 0x00, 0x00}}, + {1, [3]byte{0x48, 0x00, 0x00}}, {1, [3]byte{0x49, 0x00, 0x00}}, + {1, [3]byte{0x4a, 0x00, 0x00}}, {1, [3]byte{0x4b, 0x00, 0x00}}, + {1, [3]byte{0x4c, 0x00, 0x00}}, {1, [3]byte{0x4d, 0x00, 0x00}}, + {1, [3]byte{0x4e, 0x00, 0x00}}, {1, [3]byte{0x4f, 0x00, 0x00}}, + {1, [3]byte{0x50, 0x00, 0x00}}, {1, [3]byte{0x51, 0x00, 0x00}}, + {1, [3]byte{0x52, 0x00, 0x00}}, {1, [3]byte{0x53, 0x00, 0x00}}, + {1, [3]byte{0x54, 0x00, 0x00}}, {1, [3]byte{0x55, 0x00, 0x00}}, + {1, [3]byte{0x56, 0x00, 0x00}}, {1, [3]byte{0x57, 0x00, 0x00}}, + {1, [3]byte{0x58, 0x00, 0x00}}, {1, [3]byte{0x59, 0x00, 0x00}}, + {1, [3]byte{0x5a, 0x00, 0x00}}, {1, [3]byte{0x5b, 0x00, 0x00}}, + {1, [3]byte{0x5c, 0x00, 0x00}}, {1, [3]byte{0x5d, 0x00, 0x00}}, + {1, [3]byte{0x5e, 0x00, 0x00}}, {1, [3]byte{0x5f, 0x00, 0x00}}, + {1, [3]byte{0x60, 0x00, 0x00}}, {1, [3]byte{0x61, 0x00, 0x00}}, + {1, [3]byte{0x62, 0x00, 0x00}}, {1, [3]byte{0x63, 0x00, 0x00}}, + {1, [3]byte{0x64, 0x00, 0x00}}, {1, [3]byte{0x65, 0x00, 0x00}}, + {1, [3]byte{0x66, 0x00, 0x00}}, {1, [3]byte{0x67, 0x00, 0x00}}, + {1, [3]byte{0x68, 0x00, 0x00}}, {1, [3]byte{0x69, 0x00, 0x00}}, + {1, [3]byte{0x6a, 0x00, 0x00}}, {1, [3]byte{0x6b, 0x00, 0x00}}, + {1, [3]byte{0x6c, 0x00, 0x00}}, {1, [3]byte{0x6d, 0x00, 0x00}}, + {1, [3]byte{0x6e, 0x00, 0x00}}, {1, [3]byte{0x6f, 0x00, 0x00}}, + {1, [3]byte{0x70, 0x00, 0x00}}, {1, [3]byte{0x71, 0x00, 0x00}}, + {1, [3]byte{0x72, 0x00, 0x00}}, {1, [3]byte{0x73, 0x00, 0x00}}, + {1, [3]byte{0x74, 0x00, 0x00}}, {1, [3]byte{0x75, 0x00, 0x00}}, + {1, [3]byte{0x76, 0x00, 0x00}}, {1, [3]byte{0x77, 0x00, 0x00}}, + {1, [3]byte{0x78, 0x00, 0x00}}, {1, [3]byte{0x79, 0x00, 0x00}}, + {1, [3]byte{0x7a, 0x00, 0x00}}, {1, [3]byte{0x7b, 0x00, 0x00}}, + {1, [3]byte{0x7c, 0x00, 0x00}}, {1, [3]byte{0x7d, 0x00, 0x00}}, + {1, [3]byte{0x7e, 0x00, 0x00}}, {1, [3]byte{0x7f, 0x00, 0x00}}, + {3, [3]byte{0xe2, 0x82, 0xac}}, {3, [3]byte{0xef, 0xbf, 0xbd}}, + {3, [3]byte{0xe2, 0x80, 0x9a}}, {3, [3]byte{0xef, 0xbf, 0xbd}}, + {3, [3]byte{0xe2, 0x80, 0x9e}}, {3, [3]byte{0xe2, 0x80, 0xa6}}, + {3, [3]byte{0xe2, 0x80, 0xa0}}, {3, [3]byte{0xe2, 0x80, 0xa1}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {3, [3]byte{0xe2, 0x80, 0xb0}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {3, [3]byte{0xe2, 0x80, 0xb9}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {2, [3]byte{0xc2, 0xa8, 0x00}}, + {2, [3]byte{0xcb, 0x87, 0x00}}, {2, [3]byte{0xc2, 0xb8, 0x00}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {3, [3]byte{0xe2, 0x80, 0x98}}, + {3, [3]byte{0xe2, 0x80, 0x99}}, {3, [3]byte{0xe2, 0x80, 0x9c}}, + {3, [3]byte{0xe2, 0x80, 0x9d}}, {3, [3]byte{0xe2, 0x80, 0xa2}}, + {3, [3]byte{0xe2, 0x80, 0x93}}, {3, [3]byte{0xe2, 0x80, 0x94}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {3, [3]byte{0xe2, 0x84, 0xa2}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {3, [3]byte{0xe2, 0x80, 0xba}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {2, [3]byte{0xc2, 0xaf, 0x00}}, + {2, [3]byte{0xcb, 0x9b, 0x00}}, {3, [3]byte{0xef, 0xbf, 0xbd}}, + {2, [3]byte{0xc2, 0xa0, 0x00}}, {3, [3]byte{0xef, 0xbf, 0xbd}}, + {2, [3]byte{0xc2, 0xa2, 0x00}}, {2, [3]byte{0xc2, 0xa3, 0x00}}, + {2, [3]byte{0xc2, 0xa4, 0x00}}, {3, [3]byte{0xef, 0xbf, 0xbd}}, + {2, [3]byte{0xc2, 0xa6, 0x00}}, {2, [3]byte{0xc2, 0xa7, 0x00}}, + {2, [3]byte{0xc3, 0x98, 0x00}}, {2, [3]byte{0xc2, 0xa9, 0x00}}, + {2, [3]byte{0xc5, 0x96, 0x00}}, {2, [3]byte{0xc2, 0xab, 0x00}}, + {2, [3]byte{0xc2, 0xac, 0x00}}, {2, [3]byte{0xc2, 0xad, 0x00}}, + {2, [3]byte{0xc2, 0xae, 0x00}}, {2, [3]byte{0xc3, 0x86, 0x00}}, + {2, [3]byte{0xc2, 0xb0, 0x00}}, {2, [3]byte{0xc2, 0xb1, 0x00}}, + {2, [3]byte{0xc2, 0xb2, 0x00}}, {2, [3]byte{0xc2, 0xb3, 0x00}}, + {2, [3]byte{0xc2, 0xb4, 0x00}}, {2, [3]byte{0xc2, 0xb5, 0x00}}, + {2, [3]byte{0xc2, 0xb6, 0x00}}, {2, [3]byte{0xc2, 0xb7, 0x00}}, + {2, [3]byte{0xc3, 0xb8, 0x00}}, {2, [3]byte{0xc2, 0xb9, 0x00}}, + {2, [3]byte{0xc5, 0x97, 0x00}}, {2, [3]byte{0xc2, 0xbb, 0x00}}, + {2, [3]byte{0xc2, 0xbc, 0x00}}, {2, [3]byte{0xc2, 0xbd, 0x00}}, + {2, [3]byte{0xc2, 0xbe, 0x00}}, {2, [3]byte{0xc3, 0xa6, 0x00}}, + {2, [3]byte{0xc4, 0x84, 0x00}}, {2, [3]byte{0xc4, 0xae, 0x00}}, + {2, [3]byte{0xc4, 0x80, 0x00}}, {2, [3]byte{0xc4, 0x86, 0x00}}, + {2, [3]byte{0xc3, 0x84, 0x00}}, {2, [3]byte{0xc3, 0x85, 0x00}}, + {2, [3]byte{0xc4, 0x98, 0x00}}, {2, [3]byte{0xc4, 0x92, 0x00}}, + {2, [3]byte{0xc4, 0x8c, 0x00}}, {2, [3]byte{0xc3, 0x89, 0x00}}, + {2, [3]byte{0xc5, 0xb9, 0x00}}, {2, [3]byte{0xc4, 0x96, 0x00}}, + {2, [3]byte{0xc4, 0xa2, 0x00}}, {2, [3]byte{0xc4, 0xb6, 0x00}}, + {2, [3]byte{0xc4, 0xaa, 0x00}}, {2, [3]byte{0xc4, 0xbb, 0x00}}, + {2, [3]byte{0xc5, 0xa0, 0x00}}, {2, [3]byte{0xc5, 0x83, 0x00}}, + {2, [3]byte{0xc5, 0x85, 0x00}}, {2, [3]byte{0xc3, 0x93, 0x00}}, + {2, [3]byte{0xc5, 0x8c, 0x00}}, {2, [3]byte{0xc3, 0x95, 0x00}}, + {2, [3]byte{0xc3, 0x96, 0x00}}, {2, [3]byte{0xc3, 0x97, 0x00}}, + {2, [3]byte{0xc5, 0xb2, 0x00}}, {2, [3]byte{0xc5, 0x81, 0x00}}, + {2, [3]byte{0xc5, 0x9a, 0x00}}, {2, [3]byte{0xc5, 0xaa, 0x00}}, + {2, [3]byte{0xc3, 0x9c, 0x00}}, {2, [3]byte{0xc5, 0xbb, 0x00}}, + {2, [3]byte{0xc5, 0xbd, 0x00}}, {2, [3]byte{0xc3, 0x9f, 0x00}}, + {2, [3]byte{0xc4, 0x85, 0x00}}, {2, [3]byte{0xc4, 0xaf, 0x00}}, + {2, [3]byte{0xc4, 0x81, 0x00}}, {2, [3]byte{0xc4, 0x87, 0x00}}, + {2, [3]byte{0xc3, 0xa4, 0x00}}, {2, [3]byte{0xc3, 0xa5, 0x00}}, + {2, [3]byte{0xc4, 0x99, 0x00}}, {2, [3]byte{0xc4, 0x93, 0x00}}, + {2, [3]byte{0xc4, 0x8d, 0x00}}, {2, [3]byte{0xc3, 0xa9, 0x00}}, + {2, [3]byte{0xc5, 0xba, 0x00}}, {2, [3]byte{0xc4, 0x97, 0x00}}, + {2, [3]byte{0xc4, 0xa3, 0x00}}, {2, [3]byte{0xc4, 0xb7, 0x00}}, + {2, [3]byte{0xc4, 0xab, 0x00}}, {2, [3]byte{0xc4, 0xbc, 0x00}}, + {2, [3]byte{0xc5, 0xa1, 0x00}}, {2, [3]byte{0xc5, 0x84, 0x00}}, + {2, [3]byte{0xc5, 0x86, 0x00}}, {2, [3]byte{0xc3, 0xb3, 0x00}}, + {2, [3]byte{0xc5, 0x8d, 0x00}}, {2, [3]byte{0xc3, 0xb5, 0x00}}, + {2, [3]byte{0xc3, 0xb6, 0x00}}, {2, [3]byte{0xc3, 0xb7, 0x00}}, + {2, [3]byte{0xc5, 0xb3, 0x00}}, {2, [3]byte{0xc5, 0x82, 0x00}}, + {2, [3]byte{0xc5, 0x9b, 0x00}}, {2, [3]byte{0xc5, 0xab, 0x00}}, + {2, [3]byte{0xc3, 0xbc, 0x00}}, {2, [3]byte{0xc5, 0xbc, 0x00}}, + {2, [3]byte{0xc5, 0xbe, 0x00}}, {2, [3]byte{0xcb, 0x99, 0x00}}, + }, + encode: [256]uint32{ + 0x00000000, 0x01000001, 0x02000002, 0x03000003, 0x04000004, 0x05000005, 0x06000006, 0x07000007, + 0x08000008, 0x09000009, 0x0a00000a, 0x0b00000b, 0x0c00000c, 0x0d00000d, 0x0e00000e, 0x0f00000f, + 0x10000010, 0x11000011, 0x12000012, 0x13000013, 0x14000014, 0x15000015, 0x16000016, 0x17000017, + 0x18000018, 0x19000019, 0x1a00001a, 0x1b00001b, 0x1c00001c, 0x1d00001d, 0x1e00001e, 0x1f00001f, + 0x20000020, 0x21000021, 0x22000022, 0x23000023, 0x24000024, 0x25000025, 0x26000026, 0x27000027, + 0x28000028, 0x29000029, 0x2a00002a, 0x2b00002b, 0x2c00002c, 0x2d00002d, 0x2e00002e, 0x2f00002f, + 0x30000030, 0x31000031, 0x32000032, 0x33000033, 0x34000034, 0x35000035, 0x36000036, 0x37000037, + 0x38000038, 0x39000039, 0x3a00003a, 0x3b00003b, 0x3c00003c, 0x3d00003d, 0x3e00003e, 0x3f00003f, + 0x40000040, 0x41000041, 0x42000042, 0x43000043, 0x44000044, 0x45000045, 0x46000046, 0x47000047, + 0x48000048, 0x49000049, 0x4a00004a, 0x4b00004b, 0x4c00004c, 0x4d00004d, 0x4e00004e, 0x4f00004f, + 0x50000050, 0x51000051, 0x52000052, 0x53000053, 0x54000054, 0x55000055, 0x56000056, 0x57000057, + 0x58000058, 0x59000059, 0x5a00005a, 0x5b00005b, 0x5c00005c, 0x5d00005d, 0x5e00005e, 0x5f00005f, + 0x60000060, 0x61000061, 0x62000062, 0x63000063, 0x64000064, 0x65000065, 0x66000066, 0x67000067, + 0x68000068, 0x69000069, 0x6a00006a, 0x6b00006b, 0x6c00006c, 0x6d00006d, 0x6e00006e, 0x6f00006f, + 0x70000070, 0x71000071, 0x72000072, 0x73000073, 0x74000074, 0x75000075, 0x76000076, 0x77000077, + 0x78000078, 0x79000079, 0x7a00007a, 0x7b00007b, 0x7c00007c, 0x7d00007d, 0x7e00007e, 0x7f00007f, + 0xa00000a0, 0xa20000a2, 0xa30000a3, 0xa40000a4, 0xa60000a6, 0xa70000a7, 0x8d0000a8, 0xa90000a9, + 0xab0000ab, 0xac0000ac, 0xad0000ad, 0xae0000ae, 0x9d0000af, 0xb00000b0, 0xb10000b1, 0xb20000b2, + 0xb30000b3, 0xb40000b4, 0xb50000b5, 0xb60000b6, 0xb70000b7, 0x8f0000b8, 0xb90000b9, 0xbb0000bb, + 0xbc0000bc, 0xbd0000bd, 0xbe0000be, 0xc40000c4, 0xc50000c5, 0xaf0000c6, 0xc90000c9, 0xd30000d3, + 0xd50000d5, 0xd60000d6, 0xd70000d7, 0xa80000d8, 0xdc0000dc, 0xdf0000df, 0xe40000e4, 0xe50000e5, + 0xbf0000e6, 0xe90000e9, 0xf30000f3, 0xf50000f5, 0xf60000f6, 0xf70000f7, 0xb80000f8, 0xfc0000fc, + 0xc2000100, 0xe2000101, 0xc0000104, 0xe0000105, 0xc3000106, 0xe3000107, 0xc800010c, 0xe800010d, + 0xc7000112, 0xe7000113, 0xcb000116, 0xeb000117, 0xc6000118, 0xe6000119, 0xcc000122, 0xec000123, + 0xce00012a, 0xee00012b, 0xc100012e, 0xe100012f, 0xcd000136, 0xed000137, 0xcf00013b, 0xef00013c, + 0xd9000141, 0xf9000142, 0xd1000143, 0xf1000144, 0xd2000145, 0xf2000146, 0xd400014c, 0xf400014d, + 0xaa000156, 0xba000157, 0xda00015a, 0xfa00015b, 0xd0000160, 0xf0000161, 0xdb00016a, 0xfb00016b, + 0xd8000172, 0xf8000173, 0xca000179, 0xea00017a, 0xdd00017b, 0xfd00017c, 0xde00017d, 0xfe00017e, + 0x8e0002c7, 0xff0002d9, 0x9e0002db, 0x96002013, 0x97002014, 0x91002018, 0x92002019, 0x8200201a, + 0x9300201c, 0x9400201d, 0x8400201e, 0x86002020, 0x87002021, 0x95002022, 0x85002026, 0x89002030, + 0x8b002039, 0x9b00203a, 0x800020ac, 0x99002122, 0x99002122, 0x99002122, 0x99002122, 0x99002122, + 0x99002122, 0x99002122, 0x99002122, 0x99002122, 0x99002122, 0x99002122, 0x99002122, 0x99002122, + }, +} + +// Windows1258 is the Windows 1258 encoding. +var Windows1258 *Charmap = &windows1258 + +var windows1258 = Charmap{ + name: "Windows 1258", + mib: identifier.Windows1258, + asciiSuperset: true, + low: 0x80, + replacement: 0x1a, + decode: [256]utf8Enc{ + {1, [3]byte{0x00, 0x00, 0x00}}, {1, [3]byte{0x01, 0x00, 0x00}}, + {1, [3]byte{0x02, 0x00, 0x00}}, {1, [3]byte{0x03, 0x00, 0x00}}, + {1, [3]byte{0x04, 0x00, 0x00}}, {1, [3]byte{0x05, 0x00, 0x00}}, + {1, [3]byte{0x06, 0x00, 0x00}}, {1, [3]byte{0x07, 0x00, 0x00}}, + {1, [3]byte{0x08, 0x00, 0x00}}, {1, [3]byte{0x09, 0x00, 0x00}}, + {1, [3]byte{0x0a, 0x00, 0x00}}, {1, [3]byte{0x0b, 0x00, 0x00}}, + {1, [3]byte{0x0c, 0x00, 0x00}}, {1, [3]byte{0x0d, 0x00, 0x00}}, + {1, [3]byte{0x0e, 0x00, 0x00}}, {1, [3]byte{0x0f, 0x00, 0x00}}, + {1, [3]byte{0x10, 0x00, 0x00}}, {1, [3]byte{0x11, 0x00, 0x00}}, + {1, [3]byte{0x12, 0x00, 0x00}}, {1, [3]byte{0x13, 0x00, 0x00}}, + {1, [3]byte{0x14, 0x00, 0x00}}, {1, [3]byte{0x15, 0x00, 0x00}}, + {1, [3]byte{0x16, 0x00, 0x00}}, {1, [3]byte{0x17, 0x00, 0x00}}, + {1, [3]byte{0x18, 0x00, 0x00}}, {1, [3]byte{0x19, 0x00, 0x00}}, + {1, [3]byte{0x1a, 0x00, 0x00}}, {1, [3]byte{0x1b, 0x00, 0x00}}, + {1, [3]byte{0x1c, 0x00, 0x00}}, {1, [3]byte{0x1d, 0x00, 0x00}}, + {1, [3]byte{0x1e, 0x00, 0x00}}, {1, [3]byte{0x1f, 0x00, 0x00}}, + {1, [3]byte{0x20, 0x00, 0x00}}, {1, [3]byte{0x21, 0x00, 0x00}}, + {1, [3]byte{0x22, 0x00, 0x00}}, {1, [3]byte{0x23, 0x00, 0x00}}, + {1, [3]byte{0x24, 0x00, 0x00}}, {1, [3]byte{0x25, 0x00, 0x00}}, + {1, [3]byte{0x26, 0x00, 0x00}}, {1, [3]byte{0x27, 0x00, 0x00}}, + {1, [3]byte{0x28, 0x00, 0x00}}, {1, [3]byte{0x29, 0x00, 0x00}}, + {1, [3]byte{0x2a, 0x00, 0x00}}, {1, [3]byte{0x2b, 0x00, 0x00}}, + {1, [3]byte{0x2c, 0x00, 0x00}}, {1, [3]byte{0x2d, 0x00, 0x00}}, + {1, [3]byte{0x2e, 0x00, 0x00}}, {1, [3]byte{0x2f, 0x00, 0x00}}, + {1, [3]byte{0x30, 0x00, 0x00}}, {1, [3]byte{0x31, 0x00, 0x00}}, + {1, [3]byte{0x32, 0x00, 0x00}}, {1, [3]byte{0x33, 0x00, 0x00}}, + {1, [3]byte{0x34, 0x00, 0x00}}, {1, [3]byte{0x35, 0x00, 0x00}}, + {1, [3]byte{0x36, 0x00, 0x00}}, {1, [3]byte{0x37, 0x00, 0x00}}, + {1, [3]byte{0x38, 0x00, 0x00}}, {1, [3]byte{0x39, 0x00, 0x00}}, + {1, [3]byte{0x3a, 0x00, 0x00}}, {1, [3]byte{0x3b, 0x00, 0x00}}, + {1, [3]byte{0x3c, 0x00, 0x00}}, {1, [3]byte{0x3d, 0x00, 0x00}}, + {1, [3]byte{0x3e, 0x00, 0x00}}, {1, [3]byte{0x3f, 0x00, 0x00}}, + {1, [3]byte{0x40, 0x00, 0x00}}, {1, [3]byte{0x41, 0x00, 0x00}}, + {1, [3]byte{0x42, 0x00, 0x00}}, {1, [3]byte{0x43, 0x00, 0x00}}, + {1, [3]byte{0x44, 0x00, 0x00}}, {1, [3]byte{0x45, 0x00, 0x00}}, + {1, [3]byte{0x46, 0x00, 0x00}}, {1, [3]byte{0x47, 0x00, 0x00}}, + {1, [3]byte{0x48, 0x00, 0x00}}, {1, [3]byte{0x49, 0x00, 0x00}}, + {1, [3]byte{0x4a, 0x00, 0x00}}, {1, [3]byte{0x4b, 0x00, 0x00}}, + {1, [3]byte{0x4c, 0x00, 0x00}}, {1, [3]byte{0x4d, 0x00, 0x00}}, + {1, [3]byte{0x4e, 0x00, 0x00}}, {1, [3]byte{0x4f, 0x00, 0x00}}, + {1, [3]byte{0x50, 0x00, 0x00}}, {1, [3]byte{0x51, 0x00, 0x00}}, + {1, [3]byte{0x52, 0x00, 0x00}}, {1, [3]byte{0x53, 0x00, 0x00}}, + {1, [3]byte{0x54, 0x00, 0x00}}, {1, [3]byte{0x55, 0x00, 0x00}}, + {1, [3]byte{0x56, 0x00, 0x00}}, {1, [3]byte{0x57, 0x00, 0x00}}, + {1, [3]byte{0x58, 0x00, 0x00}}, {1, [3]byte{0x59, 0x00, 0x00}}, + {1, [3]byte{0x5a, 0x00, 0x00}}, {1, [3]byte{0x5b, 0x00, 0x00}}, + {1, [3]byte{0x5c, 0x00, 0x00}}, {1, [3]byte{0x5d, 0x00, 0x00}}, + {1, [3]byte{0x5e, 0x00, 0x00}}, {1, [3]byte{0x5f, 0x00, 0x00}}, + {1, [3]byte{0x60, 0x00, 0x00}}, {1, [3]byte{0x61, 0x00, 0x00}}, + {1, [3]byte{0x62, 0x00, 0x00}}, {1, [3]byte{0x63, 0x00, 0x00}}, + {1, [3]byte{0x64, 0x00, 0x00}}, {1, [3]byte{0x65, 0x00, 0x00}}, + {1, [3]byte{0x66, 0x00, 0x00}}, {1, [3]byte{0x67, 0x00, 0x00}}, + {1, [3]byte{0x68, 0x00, 0x00}}, {1, [3]byte{0x69, 0x00, 0x00}}, + {1, [3]byte{0x6a, 0x00, 0x00}}, {1, [3]byte{0x6b, 0x00, 0x00}}, + {1, [3]byte{0x6c, 0x00, 0x00}}, {1, [3]byte{0x6d, 0x00, 0x00}}, + {1, [3]byte{0x6e, 0x00, 0x00}}, {1, [3]byte{0x6f, 0x00, 0x00}}, + {1, [3]byte{0x70, 0x00, 0x00}}, {1, [3]byte{0x71, 0x00, 0x00}}, + {1, [3]byte{0x72, 0x00, 0x00}}, {1, [3]byte{0x73, 0x00, 0x00}}, + {1, [3]byte{0x74, 0x00, 0x00}}, {1, [3]byte{0x75, 0x00, 0x00}}, + {1, [3]byte{0x76, 0x00, 0x00}}, {1, [3]byte{0x77, 0x00, 0x00}}, + {1, [3]byte{0x78, 0x00, 0x00}}, {1, [3]byte{0x79, 0x00, 0x00}}, + {1, [3]byte{0x7a, 0x00, 0x00}}, {1, [3]byte{0x7b, 0x00, 0x00}}, + {1, [3]byte{0x7c, 0x00, 0x00}}, {1, [3]byte{0x7d, 0x00, 0x00}}, + {1, [3]byte{0x7e, 0x00, 0x00}}, {1, [3]byte{0x7f, 0x00, 0x00}}, + {3, [3]byte{0xe2, 0x82, 0xac}}, {3, [3]byte{0xef, 0xbf, 0xbd}}, + {3, [3]byte{0xe2, 0x80, 0x9a}}, {2, [3]byte{0xc6, 0x92, 0x00}}, + {3, [3]byte{0xe2, 0x80, 0x9e}}, {3, [3]byte{0xe2, 0x80, 0xa6}}, + {3, [3]byte{0xe2, 0x80, 0xa0}}, {3, [3]byte{0xe2, 0x80, 0xa1}}, + {2, [3]byte{0xcb, 0x86, 0x00}}, {3, [3]byte{0xe2, 0x80, 0xb0}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {3, [3]byte{0xe2, 0x80, 0xb9}}, + {2, [3]byte{0xc5, 0x92, 0x00}}, {3, [3]byte{0xef, 0xbf, 0xbd}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {3, [3]byte{0xef, 0xbf, 0xbd}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {3, [3]byte{0xe2, 0x80, 0x98}}, + {3, [3]byte{0xe2, 0x80, 0x99}}, {3, [3]byte{0xe2, 0x80, 0x9c}}, + {3, [3]byte{0xe2, 0x80, 0x9d}}, {3, [3]byte{0xe2, 0x80, 0xa2}}, + {3, [3]byte{0xe2, 0x80, 0x93}}, {3, [3]byte{0xe2, 0x80, 0x94}}, + {2, [3]byte{0xcb, 0x9c, 0x00}}, {3, [3]byte{0xe2, 0x84, 0xa2}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {3, [3]byte{0xe2, 0x80, 0xba}}, + {2, [3]byte{0xc5, 0x93, 0x00}}, {3, [3]byte{0xef, 0xbf, 0xbd}}, + {3, [3]byte{0xef, 0xbf, 0xbd}}, {2, [3]byte{0xc5, 0xb8, 0x00}}, + {2, [3]byte{0xc2, 0xa0, 0x00}}, {2, [3]byte{0xc2, 0xa1, 0x00}}, + {2, [3]byte{0xc2, 0xa2, 0x00}}, {2, [3]byte{0xc2, 0xa3, 0x00}}, + {2, [3]byte{0xc2, 0xa4, 0x00}}, {2, [3]byte{0xc2, 0xa5, 0x00}}, + {2, [3]byte{0xc2, 0xa6, 0x00}}, {2, [3]byte{0xc2, 0xa7, 0x00}}, + {2, [3]byte{0xc2, 0xa8, 0x00}}, {2, [3]byte{0xc2, 0xa9, 0x00}}, + {2, [3]byte{0xc2, 0xaa, 0x00}}, {2, [3]byte{0xc2, 0xab, 0x00}}, + {2, [3]byte{0xc2, 0xac, 0x00}}, {2, [3]byte{0xc2, 0xad, 0x00}}, + {2, [3]byte{0xc2, 0xae, 0x00}}, {2, [3]byte{0xc2, 0xaf, 0x00}}, + {2, [3]byte{0xc2, 0xb0, 0x00}}, {2, [3]byte{0xc2, 0xb1, 0x00}}, + {2, [3]byte{0xc2, 0xb2, 0x00}}, {2, [3]byte{0xc2, 0xb3, 0x00}}, + {2, [3]byte{0xc2, 0xb4, 0x00}}, {2, [3]byte{0xc2, 0xb5, 0x00}}, + {2, [3]byte{0xc2, 0xb6, 0x00}}, {2, [3]byte{0xc2, 0xb7, 0x00}}, + {2, [3]byte{0xc2, 0xb8, 0x00}}, {2, [3]byte{0xc2, 0xb9, 0x00}}, + {2, [3]byte{0xc2, 0xba, 0x00}}, {2, [3]byte{0xc2, 0xbb, 0x00}}, + {2, [3]byte{0xc2, 0xbc, 0x00}}, {2, [3]byte{0xc2, 0xbd, 0x00}}, + {2, [3]byte{0xc2, 0xbe, 0x00}}, {2, [3]byte{0xc2, 0xbf, 0x00}}, + {2, [3]byte{0xc3, 0x80, 0x00}}, {2, [3]byte{0xc3, 0x81, 0x00}}, + {2, [3]byte{0xc3, 0x82, 0x00}}, {2, [3]byte{0xc4, 0x82, 0x00}}, + {2, [3]byte{0xc3, 0x84, 0x00}}, {2, [3]byte{0xc3, 0x85, 0x00}}, + {2, [3]byte{0xc3, 0x86, 0x00}}, {2, [3]byte{0xc3, 0x87, 0x00}}, + {2, [3]byte{0xc3, 0x88, 0x00}}, {2, [3]byte{0xc3, 0x89, 0x00}}, + {2, [3]byte{0xc3, 0x8a, 0x00}}, {2, [3]byte{0xc3, 0x8b, 0x00}}, + {2, [3]byte{0xcc, 0x80, 0x00}}, {2, [3]byte{0xc3, 0x8d, 0x00}}, + {2, [3]byte{0xc3, 0x8e, 0x00}}, {2, [3]byte{0xc3, 0x8f, 0x00}}, + {2, [3]byte{0xc4, 0x90, 0x00}}, {2, [3]byte{0xc3, 0x91, 0x00}}, + {2, [3]byte{0xcc, 0x89, 0x00}}, {2, [3]byte{0xc3, 0x93, 0x00}}, + {2, [3]byte{0xc3, 0x94, 0x00}}, {2, [3]byte{0xc6, 0xa0, 0x00}}, + {2, [3]byte{0xc3, 0x96, 0x00}}, {2, [3]byte{0xc3, 0x97, 0x00}}, + {2, [3]byte{0xc3, 0x98, 0x00}}, {2, [3]byte{0xc3, 0x99, 0x00}}, + {2, [3]byte{0xc3, 0x9a, 0x00}}, {2, [3]byte{0xc3, 0x9b, 0x00}}, + {2, [3]byte{0xc3, 0x9c, 0x00}}, {2, [3]byte{0xc6, 0xaf, 0x00}}, + {2, [3]byte{0xcc, 0x83, 0x00}}, {2, [3]byte{0xc3, 0x9f, 0x00}}, + {2, [3]byte{0xc3, 0xa0, 0x00}}, {2, [3]byte{0xc3, 0xa1, 0x00}}, + {2, [3]byte{0xc3, 0xa2, 0x00}}, {2, [3]byte{0xc4, 0x83, 0x00}}, + {2, [3]byte{0xc3, 0xa4, 0x00}}, {2, [3]byte{0xc3, 0xa5, 0x00}}, + {2, [3]byte{0xc3, 0xa6, 0x00}}, {2, [3]byte{0xc3, 0xa7, 0x00}}, + {2, [3]byte{0xc3, 0xa8, 0x00}}, {2, [3]byte{0xc3, 0xa9, 0x00}}, + {2, [3]byte{0xc3, 0xaa, 0x00}}, {2, [3]byte{0xc3, 0xab, 0x00}}, + {2, [3]byte{0xcc, 0x81, 0x00}}, {2, [3]byte{0xc3, 0xad, 0x00}}, + {2, [3]byte{0xc3, 0xae, 0x00}}, {2, [3]byte{0xc3, 0xaf, 0x00}}, + {2, [3]byte{0xc4, 0x91, 0x00}}, {2, [3]byte{0xc3, 0xb1, 0x00}}, + {2, [3]byte{0xcc, 0xa3, 0x00}}, {2, [3]byte{0xc3, 0xb3, 0x00}}, + {2, [3]byte{0xc3, 0xb4, 0x00}}, {2, [3]byte{0xc6, 0xa1, 0x00}}, + {2, [3]byte{0xc3, 0xb6, 0x00}}, {2, [3]byte{0xc3, 0xb7, 0x00}}, + {2, [3]byte{0xc3, 0xb8, 0x00}}, {2, [3]byte{0xc3, 0xb9, 0x00}}, + {2, [3]byte{0xc3, 0xba, 0x00}}, {2, [3]byte{0xc3, 0xbb, 0x00}}, + {2, [3]byte{0xc3, 0xbc, 0x00}}, {2, [3]byte{0xc6, 0xb0, 0x00}}, + {3, [3]byte{0xe2, 0x82, 0xab}}, {2, [3]byte{0xc3, 0xbf, 0x00}}, + }, + encode: [256]uint32{ + 0x00000000, 0x01000001, 0x02000002, 0x03000003, 0x04000004, 0x05000005, 0x06000006, 0x07000007, + 0x08000008, 0x09000009, 0x0a00000a, 0x0b00000b, 0x0c00000c, 0x0d00000d, 0x0e00000e, 0x0f00000f, + 0x10000010, 0x11000011, 0x12000012, 0x13000013, 0x14000014, 0x15000015, 0x16000016, 0x17000017, + 0x18000018, 0x19000019, 0x1a00001a, 0x1b00001b, 0x1c00001c, 0x1d00001d, 0x1e00001e, 0x1f00001f, + 0x20000020, 0x21000021, 0x22000022, 0x23000023, 0x24000024, 0x25000025, 0x26000026, 0x27000027, + 0x28000028, 0x29000029, 0x2a00002a, 0x2b00002b, 0x2c00002c, 0x2d00002d, 0x2e00002e, 0x2f00002f, + 0x30000030, 0x31000031, 0x32000032, 0x33000033, 0x34000034, 0x35000035, 0x36000036, 0x37000037, + 0x38000038, 0x39000039, 0x3a00003a, 0x3b00003b, 0x3c00003c, 0x3d00003d, 0x3e00003e, 0x3f00003f, + 0x40000040, 0x41000041, 0x42000042, 0x43000043, 0x44000044, 0x45000045, 0x46000046, 0x47000047, + 0x48000048, 0x49000049, 0x4a00004a, 0x4b00004b, 0x4c00004c, 0x4d00004d, 0x4e00004e, 0x4f00004f, + 0x50000050, 0x51000051, 0x52000052, 0x53000053, 0x54000054, 0x55000055, 0x56000056, 0x57000057, + 0x58000058, 0x59000059, 0x5a00005a, 0x5b00005b, 0x5c00005c, 0x5d00005d, 0x5e00005e, 0x5f00005f, + 0x60000060, 0x61000061, 0x62000062, 0x63000063, 0x64000064, 0x65000065, 0x66000066, 0x67000067, + 0x68000068, 0x69000069, 0x6a00006a, 0x6b00006b, 0x6c00006c, 0x6d00006d, 0x6e00006e, 0x6f00006f, + 0x70000070, 0x71000071, 0x72000072, 0x73000073, 0x74000074, 0x75000075, 0x76000076, 0x77000077, + 0x78000078, 0x79000079, 0x7a00007a, 0x7b00007b, 0x7c00007c, 0x7d00007d, 0x7e00007e, 0x7f00007f, + 0xa00000a0, 0xa10000a1, 0xa20000a2, 0xa30000a3, 0xa40000a4, 0xa50000a5, 0xa60000a6, 0xa70000a7, + 0xa80000a8, 0xa90000a9, 0xaa0000aa, 0xab0000ab, 0xac0000ac, 0xad0000ad, 0xae0000ae, 0xaf0000af, + 0xb00000b0, 0xb10000b1, 0xb20000b2, 0xb30000b3, 0xb40000b4, 0xb50000b5, 0xb60000b6, 0xb70000b7, + 0xb80000b8, 0xb90000b9, 0xba0000ba, 0xbb0000bb, 0xbc0000bc, 0xbd0000bd, 0xbe0000be, 0xbf0000bf, + 0xc00000c0, 0xc10000c1, 0xc20000c2, 0xc40000c4, 0xc50000c5, 0xc60000c6, 0xc70000c7, 0xc80000c8, + 0xc90000c9, 0xca0000ca, 0xcb0000cb, 0xcd0000cd, 0xce0000ce, 0xcf0000cf, 0xd10000d1, 0xd30000d3, + 0xd40000d4, 0xd60000d6, 0xd70000d7, 0xd80000d8, 0xd90000d9, 0xda0000da, 0xdb0000db, 0xdc0000dc, + 0xdf0000df, 0xe00000e0, 0xe10000e1, 0xe20000e2, 0xe40000e4, 0xe50000e5, 0xe60000e6, 0xe70000e7, + 0xe80000e8, 0xe90000e9, 0xea0000ea, 0xeb0000eb, 0xed0000ed, 0xee0000ee, 0xef0000ef, 0xf10000f1, + 0xf30000f3, 0xf40000f4, 0xf60000f6, 0xf70000f7, 0xf80000f8, 0xf90000f9, 0xfa0000fa, 0xfb0000fb, + 0xfc0000fc, 0xff0000ff, 0xc3000102, 0xe3000103, 0xd0000110, 0xf0000111, 0x8c000152, 0x9c000153, + 0x9f000178, 0x83000192, 0xd50001a0, 0xf50001a1, 0xdd0001af, 0xfd0001b0, 0x880002c6, 0x980002dc, + 0xcc000300, 0xec000301, 0xde000303, 0xd2000309, 0xf2000323, 0x96002013, 0x97002014, 0x91002018, + 0x92002019, 0x8200201a, 0x9300201c, 0x9400201d, 0x8400201e, 0x86002020, 0x87002021, 0x95002022, + 0x85002026, 0x89002030, 0x8b002039, 0x9b00203a, 0xfe0020ab, 0x800020ac, 0x99002122, 0x99002122, + 0x99002122, 0x99002122, 0x99002122, 0x99002122, 0x99002122, 0x99002122, 0x99002122, 0x99002122, + }, +} + +// XUserDefined is the X-User-Defined encoding. +// +// It is defined at http://encoding.spec.whatwg.org/#x-user-defined +var XUserDefined *Charmap = &xUserDefined + +var xUserDefined = Charmap{ + name: "X-User-Defined", + mib: identifier.XUserDefined, + asciiSuperset: true, + low: 0x80, + replacement: 0x1a, + decode: [256]utf8Enc{ + {1, [3]byte{0x00, 0x00, 0x00}}, {1, [3]byte{0x01, 0x00, 0x00}}, + {1, [3]byte{0x02, 0x00, 0x00}}, {1, [3]byte{0x03, 0x00, 0x00}}, + {1, [3]byte{0x04, 0x00, 0x00}}, {1, [3]byte{0x05, 0x00, 0x00}}, + {1, [3]byte{0x06, 0x00, 0x00}}, {1, [3]byte{0x07, 0x00, 0x00}}, + {1, [3]byte{0x08, 0x00, 0x00}}, {1, [3]byte{0x09, 0x00, 0x00}}, + {1, [3]byte{0x0a, 0x00, 0x00}}, {1, [3]byte{0x0b, 0x00, 0x00}}, + {1, [3]byte{0x0c, 0x00, 0x00}}, {1, [3]byte{0x0d, 0x00, 0x00}}, + {1, [3]byte{0x0e, 0x00, 0x00}}, {1, [3]byte{0x0f, 0x00, 0x00}}, + {1, [3]byte{0x10, 0x00, 0x00}}, {1, [3]byte{0x11, 0x00, 0x00}}, + {1, [3]byte{0x12, 0x00, 0x00}}, {1, [3]byte{0x13, 0x00, 0x00}}, + {1, [3]byte{0x14, 0x00, 0x00}}, {1, [3]byte{0x15, 0x00, 0x00}}, + {1, [3]byte{0x16, 0x00, 0x00}}, {1, [3]byte{0x17, 0x00, 0x00}}, + {1, [3]byte{0x18, 0x00, 0x00}}, {1, [3]byte{0x19, 0x00, 0x00}}, + {1, [3]byte{0x1a, 0x00, 0x00}}, {1, [3]byte{0x1b, 0x00, 0x00}}, + {1, [3]byte{0x1c, 0x00, 0x00}}, {1, [3]byte{0x1d, 0x00, 0x00}}, + {1, [3]byte{0x1e, 0x00, 0x00}}, {1, [3]byte{0x1f, 0x00, 0x00}}, + {1, [3]byte{0x20, 0x00, 0x00}}, {1, [3]byte{0x21, 0x00, 0x00}}, + {1, [3]byte{0x22, 0x00, 0x00}}, {1, [3]byte{0x23, 0x00, 0x00}}, + {1, [3]byte{0x24, 0x00, 0x00}}, {1, [3]byte{0x25, 0x00, 0x00}}, + {1, [3]byte{0x26, 0x00, 0x00}}, {1, [3]byte{0x27, 0x00, 0x00}}, + {1, [3]byte{0x28, 0x00, 0x00}}, {1, [3]byte{0x29, 0x00, 0x00}}, + {1, [3]byte{0x2a, 0x00, 0x00}}, {1, [3]byte{0x2b, 0x00, 0x00}}, + {1, [3]byte{0x2c, 0x00, 0x00}}, {1, [3]byte{0x2d, 0x00, 0x00}}, + {1, [3]byte{0x2e, 0x00, 0x00}}, {1, [3]byte{0x2f, 0x00, 0x00}}, + {1, [3]byte{0x30, 0x00, 0x00}}, {1, [3]byte{0x31, 0x00, 0x00}}, + {1, [3]byte{0x32, 0x00, 0x00}}, {1, [3]byte{0x33, 0x00, 0x00}}, + {1, [3]byte{0x34, 0x00, 0x00}}, {1, [3]byte{0x35, 0x00, 0x00}}, + {1, [3]byte{0x36, 0x00, 0x00}}, {1, [3]byte{0x37, 0x00, 0x00}}, + {1, [3]byte{0x38, 0x00, 0x00}}, {1, [3]byte{0x39, 0x00, 0x00}}, + {1, [3]byte{0x3a, 0x00, 0x00}}, {1, [3]byte{0x3b, 0x00, 0x00}}, + {1, [3]byte{0x3c, 0x00, 0x00}}, {1, [3]byte{0x3d, 0x00, 0x00}}, + {1, [3]byte{0x3e, 0x00, 0x00}}, {1, [3]byte{0x3f, 0x00, 0x00}}, + {1, [3]byte{0x40, 0x00, 0x00}}, {1, [3]byte{0x41, 0x00, 0x00}}, + {1, [3]byte{0x42, 0x00, 0x00}}, {1, [3]byte{0x43, 0x00, 0x00}}, + {1, [3]byte{0x44, 0x00, 0x00}}, {1, [3]byte{0x45, 0x00, 0x00}}, + {1, [3]byte{0x46, 0x00, 0x00}}, {1, [3]byte{0x47, 0x00, 0x00}}, + {1, [3]byte{0x48, 0x00, 0x00}}, {1, [3]byte{0x49, 0x00, 0x00}}, + {1, [3]byte{0x4a, 0x00, 0x00}}, {1, [3]byte{0x4b, 0x00, 0x00}}, + {1, [3]byte{0x4c, 0x00, 0x00}}, {1, [3]byte{0x4d, 0x00, 0x00}}, + {1, [3]byte{0x4e, 0x00, 0x00}}, {1, [3]byte{0x4f, 0x00, 0x00}}, + {1, [3]byte{0x50, 0x00, 0x00}}, {1, [3]byte{0x51, 0x00, 0x00}}, + {1, [3]byte{0x52, 0x00, 0x00}}, {1, [3]byte{0x53, 0x00, 0x00}}, + {1, [3]byte{0x54, 0x00, 0x00}}, {1, [3]byte{0x55, 0x00, 0x00}}, + {1, [3]byte{0x56, 0x00, 0x00}}, {1, [3]byte{0x57, 0x00, 0x00}}, + {1, [3]byte{0x58, 0x00, 0x00}}, {1, [3]byte{0x59, 0x00, 0x00}}, + {1, [3]byte{0x5a, 0x00, 0x00}}, {1, [3]byte{0x5b, 0x00, 0x00}}, + {1, [3]byte{0x5c, 0x00, 0x00}}, {1, [3]byte{0x5d, 0x00, 0x00}}, + {1, [3]byte{0x5e, 0x00, 0x00}}, {1, [3]byte{0x5f, 0x00, 0x00}}, + {1, [3]byte{0x60, 0x00, 0x00}}, {1, [3]byte{0x61, 0x00, 0x00}}, + {1, [3]byte{0x62, 0x00, 0x00}}, {1, [3]byte{0x63, 0x00, 0x00}}, + {1, [3]byte{0x64, 0x00, 0x00}}, {1, [3]byte{0x65, 0x00, 0x00}}, + {1, [3]byte{0x66, 0x00, 0x00}}, {1, [3]byte{0x67, 0x00, 0x00}}, + {1, [3]byte{0x68, 0x00, 0x00}}, {1, [3]byte{0x69, 0x00, 0x00}}, + {1, [3]byte{0x6a, 0x00, 0x00}}, {1, [3]byte{0x6b, 0x00, 0x00}}, + {1, [3]byte{0x6c, 0x00, 0x00}}, {1, [3]byte{0x6d, 0x00, 0x00}}, + {1, [3]byte{0x6e, 0x00, 0x00}}, {1, [3]byte{0x6f, 0x00, 0x00}}, + {1, [3]byte{0x70, 0x00, 0x00}}, {1, [3]byte{0x71, 0x00, 0x00}}, + {1, [3]byte{0x72, 0x00, 0x00}}, {1, [3]byte{0x73, 0x00, 0x00}}, + {1, [3]byte{0x74, 0x00, 0x00}}, {1, [3]byte{0x75, 0x00, 0x00}}, + {1, [3]byte{0x76, 0x00, 0x00}}, {1, [3]byte{0x77, 0x00, 0x00}}, + {1, [3]byte{0x78, 0x00, 0x00}}, {1, [3]byte{0x79, 0x00, 0x00}}, + {1, [3]byte{0x7a, 0x00, 0x00}}, {1, [3]byte{0x7b, 0x00, 0x00}}, + {1, [3]byte{0x7c, 0x00, 0x00}}, {1, [3]byte{0x7d, 0x00, 0x00}}, + {1, [3]byte{0x7e, 0x00, 0x00}}, {1, [3]byte{0x7f, 0x00, 0x00}}, + {3, [3]byte{0xef, 0x9e, 0x80}}, {3, [3]byte{0xef, 0x9e, 0x81}}, + {3, [3]byte{0xef, 0x9e, 0x82}}, {3, [3]byte{0xef, 0x9e, 0x83}}, + {3, [3]byte{0xef, 0x9e, 0x84}}, {3, [3]byte{0xef, 0x9e, 0x85}}, + {3, [3]byte{0xef, 0x9e, 0x86}}, {3, [3]byte{0xef, 0x9e, 0x87}}, + {3, [3]byte{0xef, 0x9e, 0x88}}, {3, [3]byte{0xef, 0x9e, 0x89}}, + {3, [3]byte{0xef, 0x9e, 0x8a}}, {3, [3]byte{0xef, 0x9e, 0x8b}}, + {3, [3]byte{0xef, 0x9e, 0x8c}}, {3, [3]byte{0xef, 0x9e, 0x8d}}, + {3, [3]byte{0xef, 0x9e, 0x8e}}, {3, [3]byte{0xef, 0x9e, 0x8f}}, + {3, [3]byte{0xef, 0x9e, 0x90}}, {3, [3]byte{0xef, 0x9e, 0x91}}, + {3, [3]byte{0xef, 0x9e, 0x92}}, {3, [3]byte{0xef, 0x9e, 0x93}}, + {3, [3]byte{0xef, 0x9e, 0x94}}, {3, [3]byte{0xef, 0x9e, 0x95}}, + {3, [3]byte{0xef, 0x9e, 0x96}}, {3, [3]byte{0xef, 0x9e, 0x97}}, + {3, [3]byte{0xef, 0x9e, 0x98}}, {3, [3]byte{0xef, 0x9e, 0x99}}, + {3, [3]byte{0xef, 0x9e, 0x9a}}, {3, [3]byte{0xef, 0x9e, 0x9b}}, + {3, [3]byte{0xef, 0x9e, 0x9c}}, {3, [3]byte{0xef, 0x9e, 0x9d}}, + {3, [3]byte{0xef, 0x9e, 0x9e}}, {3, [3]byte{0xef, 0x9e, 0x9f}}, + {3, [3]byte{0xef, 0x9e, 0xa0}}, {3, [3]byte{0xef, 0x9e, 0xa1}}, + {3, [3]byte{0xef, 0x9e, 0xa2}}, {3, [3]byte{0xef, 0x9e, 0xa3}}, + {3, [3]byte{0xef, 0x9e, 0xa4}}, {3, [3]byte{0xef, 0x9e, 0xa5}}, + {3, [3]byte{0xef, 0x9e, 0xa6}}, {3, [3]byte{0xef, 0x9e, 0xa7}}, + {3, [3]byte{0xef, 0x9e, 0xa8}}, {3, [3]byte{0xef, 0x9e, 0xa9}}, + {3, [3]byte{0xef, 0x9e, 0xaa}}, {3, [3]byte{0xef, 0x9e, 0xab}}, + {3, [3]byte{0xef, 0x9e, 0xac}}, {3, [3]byte{0xef, 0x9e, 0xad}}, + {3, [3]byte{0xef, 0x9e, 0xae}}, {3, [3]byte{0xef, 0x9e, 0xaf}}, + {3, [3]byte{0xef, 0x9e, 0xb0}}, {3, [3]byte{0xef, 0x9e, 0xb1}}, + {3, [3]byte{0xef, 0x9e, 0xb2}}, {3, [3]byte{0xef, 0x9e, 0xb3}}, + {3, [3]byte{0xef, 0x9e, 0xb4}}, {3, [3]byte{0xef, 0x9e, 0xb5}}, + {3, [3]byte{0xef, 0x9e, 0xb6}}, {3, [3]byte{0xef, 0x9e, 0xb7}}, + {3, [3]byte{0xef, 0x9e, 0xb8}}, {3, [3]byte{0xef, 0x9e, 0xb9}}, + {3, [3]byte{0xef, 0x9e, 0xba}}, {3, [3]byte{0xef, 0x9e, 0xbb}}, + {3, [3]byte{0xef, 0x9e, 0xbc}}, {3, [3]byte{0xef, 0x9e, 0xbd}}, + {3, [3]byte{0xef, 0x9e, 0xbe}}, {3, [3]byte{0xef, 0x9e, 0xbf}}, + {3, [3]byte{0xef, 0x9f, 0x80}}, {3, [3]byte{0xef, 0x9f, 0x81}}, + {3, [3]byte{0xef, 0x9f, 0x82}}, {3, [3]byte{0xef, 0x9f, 0x83}}, + {3, [3]byte{0xef, 0x9f, 0x84}}, {3, [3]byte{0xef, 0x9f, 0x85}}, + {3, [3]byte{0xef, 0x9f, 0x86}}, {3, [3]byte{0xef, 0x9f, 0x87}}, + {3, [3]byte{0xef, 0x9f, 0x88}}, {3, [3]byte{0xef, 0x9f, 0x89}}, + {3, [3]byte{0xef, 0x9f, 0x8a}}, {3, [3]byte{0xef, 0x9f, 0x8b}}, + {3, [3]byte{0xef, 0x9f, 0x8c}}, {3, [3]byte{0xef, 0x9f, 0x8d}}, + {3, [3]byte{0xef, 0x9f, 0x8e}}, {3, [3]byte{0xef, 0x9f, 0x8f}}, + {3, [3]byte{0xef, 0x9f, 0x90}}, {3, [3]byte{0xef, 0x9f, 0x91}}, + {3, [3]byte{0xef, 0x9f, 0x92}}, {3, [3]byte{0xef, 0x9f, 0x93}}, + {3, [3]byte{0xef, 0x9f, 0x94}}, {3, [3]byte{0xef, 0x9f, 0x95}}, + {3, [3]byte{0xef, 0x9f, 0x96}}, {3, [3]byte{0xef, 0x9f, 0x97}}, + {3, [3]byte{0xef, 0x9f, 0x98}}, {3, [3]byte{0xef, 0x9f, 0x99}}, + {3, [3]byte{0xef, 0x9f, 0x9a}}, {3, [3]byte{0xef, 0x9f, 0x9b}}, + {3, [3]byte{0xef, 0x9f, 0x9c}}, {3, [3]byte{0xef, 0x9f, 0x9d}}, + {3, [3]byte{0xef, 0x9f, 0x9e}}, {3, [3]byte{0xef, 0x9f, 0x9f}}, + {3, [3]byte{0xef, 0x9f, 0xa0}}, {3, [3]byte{0xef, 0x9f, 0xa1}}, + {3, [3]byte{0xef, 0x9f, 0xa2}}, {3, [3]byte{0xef, 0x9f, 0xa3}}, + {3, [3]byte{0xef, 0x9f, 0xa4}}, {3, [3]byte{0xef, 0x9f, 0xa5}}, + {3, [3]byte{0xef, 0x9f, 0xa6}}, {3, [3]byte{0xef, 0x9f, 0xa7}}, + {3, [3]byte{0xef, 0x9f, 0xa8}}, {3, [3]byte{0xef, 0x9f, 0xa9}}, + {3, [3]byte{0xef, 0x9f, 0xaa}}, {3, [3]byte{0xef, 0x9f, 0xab}}, + {3, [3]byte{0xef, 0x9f, 0xac}}, {3, [3]byte{0xef, 0x9f, 0xad}}, + {3, [3]byte{0xef, 0x9f, 0xae}}, {3, [3]byte{0xef, 0x9f, 0xaf}}, + {3, [3]byte{0xef, 0x9f, 0xb0}}, {3, [3]byte{0xef, 0x9f, 0xb1}}, + {3, [3]byte{0xef, 0x9f, 0xb2}}, {3, [3]byte{0xef, 0x9f, 0xb3}}, + {3, [3]byte{0xef, 0x9f, 0xb4}}, {3, [3]byte{0xef, 0x9f, 0xb5}}, + {3, [3]byte{0xef, 0x9f, 0xb6}}, {3, [3]byte{0xef, 0x9f, 0xb7}}, + {3, [3]byte{0xef, 0x9f, 0xb8}}, {3, [3]byte{0xef, 0x9f, 0xb9}}, + {3, [3]byte{0xef, 0x9f, 0xba}}, {3, [3]byte{0xef, 0x9f, 0xbb}}, + {3, [3]byte{0xef, 0x9f, 0xbc}}, {3, [3]byte{0xef, 0x9f, 0xbd}}, + {3, [3]byte{0xef, 0x9f, 0xbe}}, {3, [3]byte{0xef, 0x9f, 0xbf}}, + }, + encode: [256]uint32{ + 0x00000000, 0x01000001, 0x02000002, 0x03000003, 0x04000004, 0x05000005, 0x06000006, 0x07000007, + 0x08000008, 0x09000009, 0x0a00000a, 0x0b00000b, 0x0c00000c, 0x0d00000d, 0x0e00000e, 0x0f00000f, + 0x10000010, 0x11000011, 0x12000012, 0x13000013, 0x14000014, 0x15000015, 0x16000016, 0x17000017, + 0x18000018, 0x19000019, 0x1a00001a, 0x1b00001b, 0x1c00001c, 0x1d00001d, 0x1e00001e, 0x1f00001f, + 0x20000020, 0x21000021, 0x22000022, 0x23000023, 0x24000024, 0x25000025, 0x26000026, 0x27000027, + 0x28000028, 0x29000029, 0x2a00002a, 0x2b00002b, 0x2c00002c, 0x2d00002d, 0x2e00002e, 0x2f00002f, + 0x30000030, 0x31000031, 0x32000032, 0x33000033, 0x34000034, 0x35000035, 0x36000036, 0x37000037, + 0x38000038, 0x39000039, 0x3a00003a, 0x3b00003b, 0x3c00003c, 0x3d00003d, 0x3e00003e, 0x3f00003f, + 0x40000040, 0x41000041, 0x42000042, 0x43000043, 0x44000044, 0x45000045, 0x46000046, 0x47000047, + 0x48000048, 0x49000049, 0x4a00004a, 0x4b00004b, 0x4c00004c, 0x4d00004d, 0x4e00004e, 0x4f00004f, + 0x50000050, 0x51000051, 0x52000052, 0x53000053, 0x54000054, 0x55000055, 0x56000056, 0x57000057, + 0x58000058, 0x59000059, 0x5a00005a, 0x5b00005b, 0x5c00005c, 0x5d00005d, 0x5e00005e, 0x5f00005f, + 0x60000060, 0x61000061, 0x62000062, 0x63000063, 0x64000064, 0x65000065, 0x66000066, 0x67000067, + 0x68000068, 0x69000069, 0x6a00006a, 0x6b00006b, 0x6c00006c, 0x6d00006d, 0x6e00006e, 0x6f00006f, + 0x70000070, 0x71000071, 0x72000072, 0x73000073, 0x74000074, 0x75000075, 0x76000076, 0x77000077, + 0x78000078, 0x79000079, 0x7a00007a, 0x7b00007b, 0x7c00007c, 0x7d00007d, 0x7e00007e, 0x7f00007f, + 0x8000f780, 0x8100f781, 0x8200f782, 0x8300f783, 0x8400f784, 0x8500f785, 0x8600f786, 0x8700f787, + 0x8800f788, 0x8900f789, 0x8a00f78a, 0x8b00f78b, 0x8c00f78c, 0x8d00f78d, 0x8e00f78e, 0x8f00f78f, + 0x9000f790, 0x9100f791, 0x9200f792, 0x9300f793, 0x9400f794, 0x9500f795, 0x9600f796, 0x9700f797, + 0x9800f798, 0x9900f799, 0x9a00f79a, 0x9b00f79b, 0x9c00f79c, 0x9d00f79d, 0x9e00f79e, 0x9f00f79f, + 0xa000f7a0, 0xa100f7a1, 0xa200f7a2, 0xa300f7a3, 0xa400f7a4, 0xa500f7a5, 0xa600f7a6, 0xa700f7a7, + 0xa800f7a8, 0xa900f7a9, 0xaa00f7aa, 0xab00f7ab, 0xac00f7ac, 0xad00f7ad, 0xae00f7ae, 0xaf00f7af, + 0xb000f7b0, 0xb100f7b1, 0xb200f7b2, 0xb300f7b3, 0xb400f7b4, 0xb500f7b5, 0xb600f7b6, 0xb700f7b7, + 0xb800f7b8, 0xb900f7b9, 0xba00f7ba, 0xbb00f7bb, 0xbc00f7bc, 0xbd00f7bd, 0xbe00f7be, 0xbf00f7bf, + 0xc000f7c0, 0xc100f7c1, 0xc200f7c2, 0xc300f7c3, 0xc400f7c4, 0xc500f7c5, 0xc600f7c6, 0xc700f7c7, + 0xc800f7c8, 0xc900f7c9, 0xca00f7ca, 0xcb00f7cb, 0xcc00f7cc, 0xcd00f7cd, 0xce00f7ce, 0xcf00f7cf, + 0xd000f7d0, 0xd100f7d1, 0xd200f7d2, 0xd300f7d3, 0xd400f7d4, 0xd500f7d5, 0xd600f7d6, 0xd700f7d7, + 0xd800f7d8, 0xd900f7d9, 0xda00f7da, 0xdb00f7db, 0xdc00f7dc, 0xdd00f7dd, 0xde00f7de, 0xdf00f7df, + 0xe000f7e0, 0xe100f7e1, 0xe200f7e2, 0xe300f7e3, 0xe400f7e4, 0xe500f7e5, 0xe600f7e6, 0xe700f7e7, + 0xe800f7e8, 0xe900f7e9, 0xea00f7ea, 0xeb00f7eb, 0xec00f7ec, 0xed00f7ed, 0xee00f7ee, 0xef00f7ef, + 0xf000f7f0, 0xf100f7f1, 0xf200f7f2, 0xf300f7f3, 0xf400f7f4, 0xf500f7f5, 0xf600f7f6, 0xf700f7f7, + 0xf800f7f8, 0xf900f7f9, 0xfa00f7fa, 0xfb00f7fb, 0xfc00f7fc, 0xfd00f7fd, 0xfe00f7fe, 0xff00f7ff, + }, +} +var listAll = []encoding.Encoding{ + CodePage037, + CodePage437, + CodePage850, + CodePage852, + CodePage855, + CodePage858, + CodePage860, + CodePage862, + CodePage863, + CodePage865, + CodePage866, + CodePage1047, + CodePage1140, + ISO8859_1, + ISO8859_2, + ISO8859_3, + ISO8859_4, + ISO8859_5, + ISO8859_6, + ISO8859_6E, + ISO8859_6I, + ISO8859_7, + ISO8859_8, + ISO8859_8E, + ISO8859_8I, + ISO8859_9, + ISO8859_10, + ISO8859_13, + ISO8859_14, + ISO8859_15, + ISO8859_16, + KOI8R, + KOI8U, + Macintosh, + MacintoshCyrillic, + Windows874, + Windows1250, + Windows1251, + Windows1252, + Windows1253, + Windows1254, + Windows1255, + Windows1256, + Windows1257, + Windows1258, + XUserDefined, +} + +// Total table size 87024 bytes (84KiB); checksum: 811C9DC5 diff --git a/vendor/golang.org/x/text/encoding/encoding.go b/vendor/golang.org/x/text/encoding/encoding.go new file mode 100644 index 0000000000..221f175c01 --- /dev/null +++ b/vendor/golang.org/x/text/encoding/encoding.go @@ -0,0 +1,335 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package encoding defines an interface for character encodings, such as Shift +// JIS and Windows 1252, that can convert to and from UTF-8. +// +// Encoding implementations are provided in other packages, such as +// golang.org/x/text/encoding/charmap and +// golang.org/x/text/encoding/japanese. +package encoding // import "golang.org/x/text/encoding" + +import ( + "errors" + "io" + "strconv" + "unicode/utf8" + + "golang.org/x/text/encoding/internal/identifier" + "golang.org/x/text/transform" +) + +// TODO: +// - There seems to be some inconsistency in when decoders return errors +// and when not. Also documentation seems to suggest they shouldn't return +// errors at all (except for UTF-16). +// - Encoders seem to rely on or at least benefit from the input being in NFC +// normal form. Perhaps add an example how users could prepare their output. + +// Encoding is a character set encoding that can be transformed to and from +// UTF-8. +type Encoding interface { + // NewDecoder returns a Decoder. + NewDecoder() *Decoder + + // NewEncoder returns an Encoder. + NewEncoder() *Encoder +} + +// A Decoder converts bytes to UTF-8. It implements transform.Transformer. +// +// Transforming source bytes that are not of that encoding will not result in an +// error per se. Each byte that cannot be transcoded will be represented in the +// output by the UTF-8 encoding of '\uFFFD', the replacement rune. +type Decoder struct { + transform.Transformer + + // This forces external creators of Decoders to use names in struct + // initializers, allowing for future extendibility without having to break + // code. + _ struct{} +} + +// Bytes converts the given encoded bytes to UTF-8. It returns the converted +// bytes or nil, err if any error occurred. +func (d *Decoder) Bytes(b []byte) ([]byte, error) { + b, _, err := transform.Bytes(d, b) + if err != nil { + return nil, err + } + return b, nil +} + +// String converts the given encoded string to UTF-8. It returns the converted +// string or "", err if any error occurred. +func (d *Decoder) String(s string) (string, error) { + s, _, err := transform.String(d, s) + if err != nil { + return "", err + } + return s, nil +} + +// Reader wraps another Reader to decode its bytes. +// +// The Decoder may not be used for any other operation as long as the returned +// Reader is in use. +func (d *Decoder) Reader(r io.Reader) io.Reader { + return transform.NewReader(r, d) +} + +// An Encoder converts bytes from UTF-8. It implements transform.Transformer. +// +// Each rune that cannot be transcoded will result in an error. In this case, +// the transform will consume all source byte up to, not including the offending +// rune. Transforming source bytes that are not valid UTF-8 will be replaced by +// `\uFFFD`. To return early with an error instead, use transform.Chain to +// preprocess the data with a UTF8Validator. +type Encoder struct { + transform.Transformer + + // This forces external creators of Encoders to use names in struct + // initializers, allowing for future extendibility without having to break + // code. + _ struct{} +} + +// Bytes converts bytes from UTF-8. It returns the converted bytes or nil, err if +// any error occurred. +func (e *Encoder) Bytes(b []byte) ([]byte, error) { + b, _, err := transform.Bytes(e, b) + if err != nil { + return nil, err + } + return b, nil +} + +// String converts a string from UTF-8. It returns the converted string or +// "", err if any error occurred. +func (e *Encoder) String(s string) (string, error) { + s, _, err := transform.String(e, s) + if err != nil { + return "", err + } + return s, nil +} + +// Writer wraps another Writer to encode its UTF-8 output. +// +// The Encoder may not be used for any other operation as long as the returned +// Writer is in use. +func (e *Encoder) Writer(w io.Writer) io.Writer { + return transform.NewWriter(w, e) +} + +// ASCIISub is the ASCII substitute character, as recommended by +// http://unicode.org/reports/tr36/#Text_Comparison +const ASCIISub = '\x1a' + +// Nop is the nop encoding. Its transformed bytes are the same as the source +// bytes; it does not replace invalid UTF-8 sequences. +var Nop Encoding = nop{} + +type nop struct{} + +func (nop) NewDecoder() *Decoder { + return &Decoder{Transformer: transform.Nop} +} +func (nop) NewEncoder() *Encoder { + return &Encoder{Transformer: transform.Nop} +} + +// Replacement is the replacement encoding. Decoding from the replacement +// encoding yields a single '\uFFFD' replacement rune. Encoding from UTF-8 to +// the replacement encoding yields the same as the source bytes except that +// invalid UTF-8 is converted to '\uFFFD'. +// +// It is defined at http://encoding.spec.whatwg.org/#replacement +var Replacement Encoding = replacement{} + +type replacement struct{} + +func (replacement) NewDecoder() *Decoder { + return &Decoder{Transformer: replacementDecoder{}} +} + +func (replacement) NewEncoder() *Encoder { + return &Encoder{Transformer: replacementEncoder{}} +} + +func (replacement) ID() (mib identifier.MIB, other string) { + return identifier.Replacement, "" +} + +type replacementDecoder struct{ transform.NopResetter } + +func (replacementDecoder) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error) { + if len(dst) < 3 { + return 0, 0, transform.ErrShortDst + } + if atEOF { + const fffd = "\ufffd" + dst[0] = fffd[0] + dst[1] = fffd[1] + dst[2] = fffd[2] + nDst = 3 + } + return nDst, len(src), nil +} + +type replacementEncoder struct{ transform.NopResetter } + +func (replacementEncoder) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error) { + r, size := rune(0), 0 + + for ; nSrc < len(src); nSrc += size { + r = rune(src[nSrc]) + + // Decode a 1-byte rune. + if r < utf8.RuneSelf { + size = 1 + + } else { + // Decode a multi-byte rune. + r, size = utf8.DecodeRune(src[nSrc:]) + if size == 1 { + // All valid runes of size 1 (those below utf8.RuneSelf) were + // handled above. We have invalid UTF-8 or we haven't seen the + // full character yet. + if !atEOF && !utf8.FullRune(src[nSrc:]) { + err = transform.ErrShortSrc + break + } + r = '\ufffd' + } + } + + if nDst+utf8.RuneLen(r) > len(dst) { + err = transform.ErrShortDst + break + } + nDst += utf8.EncodeRune(dst[nDst:], r) + } + return nDst, nSrc, err +} + +// HTMLEscapeUnsupported wraps encoders to replace source runes outside the +// repertoire of the destination encoding with HTML escape sequences. +// +// This wrapper exists to comply to URL and HTML forms requiring a +// non-terminating legacy encoder. The produced sequences may lead to data +// loss as they are indistinguishable from legitimate input. To avoid this +// issue, use UTF-8 encodings whenever possible. +func HTMLEscapeUnsupported(e *Encoder) *Encoder { + return &Encoder{Transformer: &errorHandler{e, errorToHTML}} +} + +// ReplaceUnsupported wraps encoders to replace source runes outside the +// repertoire of the destination encoding with an encoding-specific +// replacement. +// +// This wrapper is only provided for backwards compatibility and legacy +// handling. Its use is strongly discouraged. Use UTF-8 whenever possible. +func ReplaceUnsupported(e *Encoder) *Encoder { + return &Encoder{Transformer: &errorHandler{e, errorToReplacement}} +} + +type errorHandler struct { + *Encoder + handler func(dst []byte, r rune, err repertoireError) (n int, ok bool) +} + +// TODO: consider making this error public in some form. +type repertoireError interface { + Replacement() byte +} + +func (h errorHandler) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error) { + nDst, nSrc, err = h.Transformer.Transform(dst, src, atEOF) + for err != nil { + rerr, ok := err.(repertoireError) + if !ok { + return nDst, nSrc, err + } + r, sz := utf8.DecodeRune(src[nSrc:]) + n, ok := h.handler(dst[nDst:], r, rerr) + if !ok { + return nDst, nSrc, transform.ErrShortDst + } + err = nil + nDst += n + if nSrc += sz; nSrc < len(src) { + var dn, sn int + dn, sn, err = h.Transformer.Transform(dst[nDst:], src[nSrc:], atEOF) + nDst += dn + nSrc += sn + } + } + return nDst, nSrc, err +} + +func errorToHTML(dst []byte, r rune, err repertoireError) (n int, ok bool) { + buf := [8]byte{} + b := strconv.AppendUint(buf[:0], uint64(r), 10) + if n = len(b) + len("&#;"); n >= len(dst) { + return 0, false + } + dst[0] = '&' + dst[1] = '#' + dst[copy(dst[2:], b)+2] = ';' + return n, true +} + +func errorToReplacement(dst []byte, r rune, err repertoireError) (n int, ok bool) { + if len(dst) == 0 { + return 0, false + } + dst[0] = err.Replacement() + return 1, true +} + +// ErrInvalidUTF8 means that a transformer encountered invalid UTF-8. +var ErrInvalidUTF8 = errors.New("encoding: invalid UTF-8") + +// UTF8Validator is a transformer that returns ErrInvalidUTF8 on the first +// input byte that is not valid UTF-8. +var UTF8Validator transform.Transformer = utf8Validator{} + +type utf8Validator struct{ transform.NopResetter } + +func (utf8Validator) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error) { + n := len(src) + if n > len(dst) { + n = len(dst) + } + for i := 0; i < n; { + if c := src[i]; c < utf8.RuneSelf { + dst[i] = c + i++ + continue + } + _, size := utf8.DecodeRune(src[i:]) + if size == 1 { + // All valid runes of size 1 (those below utf8.RuneSelf) were + // handled above. We have invalid UTF-8 or we haven't seen the + // full character yet. + err = ErrInvalidUTF8 + if !atEOF && !utf8.FullRune(src[i:]) { + err = transform.ErrShortSrc + } + return i, i, err + } + if i+size > len(dst) { + return i, i, transform.ErrShortDst + } + for ; size > 0; size-- { + dst[i] = src[i] + i++ + } + } + if len(src) > len(dst) { + err = transform.ErrShortDst + } + return n, n, err +} diff --git a/vendor/golang.org/x/text/encoding/internal/identifier/gen.go b/vendor/golang.org/x/text/encoding/internal/identifier/gen.go new file mode 100644 index 0000000000..0c8eba7e52 --- /dev/null +++ b/vendor/golang.org/x/text/encoding/internal/identifier/gen.go @@ -0,0 +1,137 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build ignore + +package main + +import ( + "bytes" + "encoding/xml" + "fmt" + "io" + "log" + "strings" + + "golang.org/x/text/internal/gen" +) + +type registry struct { + XMLName xml.Name `xml:"registry"` + Updated string `xml:"updated"` + Registry []struct { + ID string `xml:"id,attr"` + Record []struct { + Name string `xml:"name"` + Xref []struct { + Type string `xml:"type,attr"` + Data string `xml:"data,attr"` + } `xml:"xref"` + Desc struct { + Data string `xml:",innerxml"` + // Any []struct { + // Data string `xml:",chardata"` + // } `xml:",any"` + // Data string `xml:",chardata"` + } `xml:"description,"` + MIB string `xml:"value"` + Alias []string `xml:"alias"` + MIME string `xml:"preferred_alias"` + } `xml:"record"` + } `xml:"registry"` +} + +func main() { + r := gen.OpenIANAFile("assignments/character-sets/character-sets.xml") + reg := ®istry{} + if err := xml.NewDecoder(r).Decode(®); err != nil && err != io.EOF { + log.Fatalf("Error decoding charset registry: %v", err) + } + if len(reg.Registry) == 0 || reg.Registry[0].ID != "character-sets-1" { + log.Fatalf("Unexpected ID %s", reg.Registry[0].ID) + } + + w := &bytes.Buffer{} + fmt.Fprintf(w, "const (\n") + for _, rec := range reg.Registry[0].Record { + constName := "" + for _, a := range rec.Alias { + if strings.HasPrefix(a, "cs") && strings.IndexByte(a, '-') == -1 { + // Some of the constant definitions have comments in them. Strip those. + constName = strings.Title(strings.SplitN(a[2:], "\n", 2)[0]) + } + } + if constName == "" { + switch rec.MIB { + case "2085": + constName = "HZGB2312" // Not listed as alias for some reason. + default: + log.Fatalf("No cs alias defined for %s.", rec.MIB) + } + } + if rec.MIME != "" { + rec.MIME = fmt.Sprintf(" (MIME: %s)", rec.MIME) + } + fmt.Fprintf(w, "// %s is the MIB identifier with IANA name %s%s.\n//\n", constName, rec.Name, rec.MIME) + if len(rec.Desc.Data) > 0 { + fmt.Fprint(w, "// ") + d := xml.NewDecoder(strings.NewReader(rec.Desc.Data)) + inElem := true + attr := "" + for { + t, err := d.Token() + if err != nil { + if err != io.EOF { + log.Fatal(err) + } + break + } + switch x := t.(type) { + case xml.CharData: + attr = "" // Don't need attribute info. + a := bytes.Split([]byte(x), []byte("\n")) + for i, b := range a { + if b = bytes.TrimSpace(b); len(b) != 0 { + if !inElem && i > 0 { + fmt.Fprint(w, "\n// ") + } + inElem = false + fmt.Fprintf(w, "%s ", string(b)) + } + } + case xml.StartElement: + if x.Name.Local == "xref" { + inElem = true + use := false + for _, a := range x.Attr { + if a.Name.Local == "type" { + use = use || a.Value != "person" + } + if a.Name.Local == "data" && use { + attr = a.Value + " " + } + } + } + case xml.EndElement: + inElem = false + fmt.Fprint(w, attr) + } + } + fmt.Fprint(w, "\n") + } + for _, x := range rec.Xref { + switch x.Type { + case "rfc": + fmt.Fprintf(w, "// Reference: %s\n", strings.ToUpper(x.Data)) + case "uri": + fmt.Fprintf(w, "// Reference: %s\n", x.Data) + } + } + fmt.Fprintf(w, "%s MIB = %s\n", constName, rec.MIB) + fmt.Fprintln(w) + } + fmt.Fprintln(w, ")") + + gen.WriteGoFile("mib.go", "identifier", w.Bytes()) +} diff --git a/vendor/golang.org/x/text/encoding/internal/identifier/identifier.go b/vendor/golang.org/x/text/encoding/internal/identifier/identifier.go new file mode 100644 index 0000000000..7351b4ef8a --- /dev/null +++ b/vendor/golang.org/x/text/encoding/internal/identifier/identifier.go @@ -0,0 +1,81 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:generate go run gen.go + +// Package identifier defines the contract between implementations of Encoding +// and Index by defining identifiers that uniquely identify standardized coded +// character sets (CCS) and character encoding schemes (CES), which we will +// together refer to as encodings, for which Encoding implementations provide +// converters to and from UTF-8. This package is typically only of concern to +// implementers of Indexes and Encodings. +// +// One part of the identifier is the MIB code, which is defined by IANA and +// uniquely identifies a CCS or CES. Each code is associated with data that +// references authorities, official documentation as well as aliases and MIME +// names. +// +// Not all CESs are covered by the IANA registry. The "other" string that is +// returned by ID can be used to identify other character sets or versions of +// existing ones. +// +// It is recommended that each package that provides a set of Encodings provide +// the All and Common variables to reference all supported encodings and +// commonly used subset. This allows Index implementations to include all +// available encodings without explicitly referencing or knowing about them. +package identifier + +// Note: this package is internal, but could be made public if there is a need +// for writing third-party Indexes and Encodings. + +// References: +// - http://source.icu-project.org/repos/icu/icu/trunk/source/data/mappings/convrtrs.txt +// - http://www.iana.org/assignments/character-sets/character-sets.xhtml +// - http://www.iana.org/assignments/ianacharset-mib/ianacharset-mib +// - http://www.ietf.org/rfc/rfc2978.txt +// - http://www.unicode.org/reports/tr22/ +// - http://www.w3.org/TR/encoding/ +// - https://encoding.spec.whatwg.org/ +// - https://encoding.spec.whatwg.org/encodings.json +// - https://tools.ietf.org/html/rfc6657#section-5 + +// Interface can be implemented by Encodings to define the CCS or CES for which +// it implements conversions. +type Interface interface { + // ID returns an encoding identifier. Exactly one of the mib and other + // values should be non-zero. + // + // In the usual case it is only necessary to indicate the MIB code. The + // other string can be used to specify encodings for which there is no MIB, + // such as "x-mac-dingbat". + // + // The other string may only contain the characters a-z, A-Z, 0-9, - and _. + ID() (mib MIB, other string) + + // NOTE: the restrictions on the encoding are to allow extending the syntax + // with additional information such as versions, vendors and other variants. +} + +// A MIB identifies an encoding. It is derived from the IANA MIB codes and adds +// some identifiers for some encodings that are not covered by the IANA +// standard. +// +// See http://www.iana.org/assignments/ianacharset-mib. +type MIB uint16 + +// These additional MIB types are not defined in IANA. They are added because +// they are common and defined within the text repo. +const ( + // Unofficial marks the start of encodings not registered by IANA. + Unofficial MIB = 10000 + iota + + // Replacement is the WhatWG replacement encoding. + Replacement + + // XUserDefined is the code for x-user-defined. + XUserDefined + + // MacintoshCyrillic is the code for x-mac-cyrillic. + MacintoshCyrillic +) diff --git a/vendor/golang.org/x/text/encoding/internal/identifier/mib.go b/vendor/golang.org/x/text/encoding/internal/identifier/mib.go new file mode 100644 index 0000000000..768842b0a5 --- /dev/null +++ b/vendor/golang.org/x/text/encoding/internal/identifier/mib.go @@ -0,0 +1,1621 @@ +// Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. + +package identifier + +const ( + // ASCII is the MIB identifier with IANA name US-ASCII (MIME: US-ASCII). + // + // ANSI X3.4-1986 + // Reference: RFC2046 + ASCII MIB = 3 + + // ISOLatin1 is the MIB identifier with IANA name ISO_8859-1:1987 (MIME: ISO-8859-1). + // + // ISO-IR: International Register of Escape Sequences + // Note: The current registration authority is IPSJ/ITSCJ, Japan. + // Reference: RFC1345 + ISOLatin1 MIB = 4 + + // ISOLatin2 is the MIB identifier with IANA name ISO_8859-2:1987 (MIME: ISO-8859-2). + // + // ISO-IR: International Register of Escape Sequences + // Note: The current registration authority is IPSJ/ITSCJ, Japan. + // Reference: RFC1345 + ISOLatin2 MIB = 5 + + // ISOLatin3 is the MIB identifier with IANA name ISO_8859-3:1988 (MIME: ISO-8859-3). + // + // ISO-IR: International Register of Escape Sequences + // Note: The current registration authority is IPSJ/ITSCJ, Japan. + // Reference: RFC1345 + ISOLatin3 MIB = 6 + + // ISOLatin4 is the MIB identifier with IANA name ISO_8859-4:1988 (MIME: ISO-8859-4). + // + // ISO-IR: International Register of Escape Sequences + // Note: The current registration authority is IPSJ/ITSCJ, Japan. + // Reference: RFC1345 + ISOLatin4 MIB = 7 + + // ISOLatinCyrillic is the MIB identifier with IANA name ISO_8859-5:1988 (MIME: ISO-8859-5). + // + // ISO-IR: International Register of Escape Sequences + // Note: The current registration authority is IPSJ/ITSCJ, Japan. + // Reference: RFC1345 + ISOLatinCyrillic MIB = 8 + + // ISOLatinArabic is the MIB identifier with IANA name ISO_8859-6:1987 (MIME: ISO-8859-6). + // + // ISO-IR: International Register of Escape Sequences + // Note: The current registration authority is IPSJ/ITSCJ, Japan. + // Reference: RFC1345 + ISOLatinArabic MIB = 9 + + // ISOLatinGreek is the MIB identifier with IANA name ISO_8859-7:1987 (MIME: ISO-8859-7). + // + // ISO-IR: International Register of Escape Sequences + // Note: The current registration authority is IPSJ/ITSCJ, Japan. + // Reference: RFC1947 + // Reference: RFC1345 + ISOLatinGreek MIB = 10 + + // ISOLatinHebrew is the MIB identifier with IANA name ISO_8859-8:1988 (MIME: ISO-8859-8). + // + // ISO-IR: International Register of Escape Sequences + // Note: The current registration authority is IPSJ/ITSCJ, Japan. + // Reference: RFC1345 + ISOLatinHebrew MIB = 11 + + // ISOLatin5 is the MIB identifier with IANA name ISO_8859-9:1989 (MIME: ISO-8859-9). + // + // ISO-IR: International Register of Escape Sequences + // Note: The current registration authority is IPSJ/ITSCJ, Japan. + // Reference: RFC1345 + ISOLatin5 MIB = 12 + + // ISOLatin6 is the MIB identifier with IANA name ISO-8859-10 (MIME: ISO-8859-10). + // + // ISO-IR: International Register of Escape Sequences + // Note: The current registration authority is IPSJ/ITSCJ, Japan. + // Reference: RFC1345 + ISOLatin6 MIB = 13 + + // ISOTextComm is the MIB identifier with IANA name ISO_6937-2-add. + // + // ISO-IR: International Register of Escape Sequences and ISO 6937-2:1983 + // Note: The current registration authority is IPSJ/ITSCJ, Japan. + // Reference: RFC1345 + ISOTextComm MIB = 14 + + // HalfWidthKatakana is the MIB identifier with IANA name JIS_X0201. + // + // JIS X 0201-1976. One byte only, this is equivalent to + // JIS/Roman (similar to ASCII) plus eight-bit half-width + // Katakana + // Reference: RFC1345 + HalfWidthKatakana MIB = 15 + + // JISEncoding is the MIB identifier with IANA name JIS_Encoding. + // + // JIS X 0202-1991. Uses ISO 2022 escape sequences to + // shift code sets as documented in JIS X 0202-1991. + JISEncoding MIB = 16 + + // ShiftJIS is the MIB identifier with IANA name Shift_JIS (MIME: Shift_JIS). + // + // This charset is an extension of csHalfWidthKatakana by + // adding graphic characters in JIS X 0208. The CCS's are + // JIS X0201:1997 and JIS X0208:1997. The + // complete definition is shown in Appendix 1 of JIS + // X0208:1997. + // This charset can be used for the top-level media type "text". + ShiftJIS MIB = 17 + + // EUCPkdFmtJapanese is the MIB identifier with IANA name Extended_UNIX_Code_Packed_Format_for_Japanese (MIME: EUC-JP). + // + // Standardized by OSF, UNIX International, and UNIX Systems + // Laboratories Pacific. Uses ISO 2022 rules to select + // code set 0: US-ASCII (a single 7-bit byte set) + // code set 1: JIS X0208-1990 (a double 8-bit byte set) + // restricted to A0-FF in both bytes + // code set 2: Half Width Katakana (a single 7-bit byte set) + // requiring SS2 as the character prefix + // code set 3: JIS X0212-1990 (a double 7-bit byte set) + // restricted to A0-FF in both bytes + // requiring SS3 as the character prefix + EUCPkdFmtJapanese MIB = 18 + + // EUCFixWidJapanese is the MIB identifier with IANA name Extended_UNIX_Code_Fixed_Width_for_Japanese. + // + // Used in Japan. Each character is 2 octets. + // code set 0: US-ASCII (a single 7-bit byte set) + // 1st byte = 00 + // 2nd byte = 20-7E + // code set 1: JIS X0208-1990 (a double 7-bit byte set) + // restricted to A0-FF in both bytes + // code set 2: Half Width Katakana (a single 7-bit byte set) + // 1st byte = 00 + // 2nd byte = A0-FF + // code set 3: JIS X0212-1990 (a double 7-bit byte set) + // restricted to A0-FF in + // the first byte + // and 21-7E in the second byte + EUCFixWidJapanese MIB = 19 + + // ISO4UnitedKingdom is the MIB identifier with IANA name BS_4730. + // + // ISO-IR: International Register of Escape Sequences + // Note: The current registration authority is IPSJ/ITSCJ, Japan. + // Reference: RFC1345 + ISO4UnitedKingdom MIB = 20 + + // ISO11SwedishForNames is the MIB identifier with IANA name SEN_850200_C. + // + // ISO-IR: International Register of Escape Sequences + // Note: The current registration authority is IPSJ/ITSCJ, Japan. + // Reference: RFC1345 + ISO11SwedishForNames MIB = 21 + + // ISO15Italian is the MIB identifier with IANA name IT. + // + // ISO-IR: International Register of Escape Sequences + // Note: The current registration authority is IPSJ/ITSCJ, Japan. + // Reference: RFC1345 + ISO15Italian MIB = 22 + + // ISO17Spanish is the MIB identifier with IANA name ES. + // + // ISO-IR: International Register of Escape Sequences + // Note: The current registration authority is IPSJ/ITSCJ, Japan. + // Reference: RFC1345 + ISO17Spanish MIB = 23 + + // ISO21German is the MIB identifier with IANA name DIN_66003. + // + // ISO-IR: International Register of Escape Sequences + // Note: The current registration authority is IPSJ/ITSCJ, Japan. + // Reference: RFC1345 + ISO21German MIB = 24 + + // ISO60Norwegian1 is the MIB identifier with IANA name NS_4551-1. + // + // ISO-IR: International Register of Escape Sequences + // Note: The current registration authority is IPSJ/ITSCJ, Japan. + // Reference: RFC1345 + ISO60Norwegian1 MIB = 25 + + // ISO69French is the MIB identifier with IANA name NF_Z_62-010. + // + // ISO-IR: International Register of Escape Sequences + // Note: The current registration authority is IPSJ/ITSCJ, Japan. + // Reference: RFC1345 + ISO69French MIB = 26 + + // ISO10646UTF1 is the MIB identifier with IANA name ISO-10646-UTF-1. + // + // Universal Transfer Format (1), this is the multibyte + // encoding, that subsets ASCII-7. It does not have byte + // ordering issues. + ISO10646UTF1 MIB = 27 + + // ISO646basic1983 is the MIB identifier with IANA name ISO_646.basic:1983. + // + // ISO-IR: International Register of Escape Sequences + // Note: The current registration authority is IPSJ/ITSCJ, Japan. + // Reference: RFC1345 + ISO646basic1983 MIB = 28 + + // INVARIANT is the MIB identifier with IANA name INVARIANT. + // + // Reference: RFC1345 + INVARIANT MIB = 29 + + // ISO2IntlRefVersion is the MIB identifier with IANA name ISO_646.irv:1983. + // + // ISO-IR: International Register of Escape Sequences + // Note: The current registration authority is IPSJ/ITSCJ, Japan. + // Reference: RFC1345 + ISO2IntlRefVersion MIB = 30 + + // NATSSEFI is the MIB identifier with IANA name NATS-SEFI. + // + // ISO-IR: International Register of Escape Sequences + // Note: The current registration authority is IPSJ/ITSCJ, Japan. + // Reference: RFC1345 + NATSSEFI MIB = 31 + + // NATSSEFIADD is the MIB identifier with IANA name NATS-SEFI-ADD. + // + // ISO-IR: International Register of Escape Sequences + // Note: The current registration authority is IPSJ/ITSCJ, Japan. + // Reference: RFC1345 + NATSSEFIADD MIB = 32 + + // NATSDANO is the MIB identifier with IANA name NATS-DANO. + // + // ISO-IR: International Register of Escape Sequences + // Note: The current registration authority is IPSJ/ITSCJ, Japan. + // Reference: RFC1345 + NATSDANO MIB = 33 + + // NATSDANOADD is the MIB identifier with IANA name NATS-DANO-ADD. + // + // ISO-IR: International Register of Escape Sequences + // Note: The current registration authority is IPSJ/ITSCJ, Japan. + // Reference: RFC1345 + NATSDANOADD MIB = 34 + + // ISO10Swedish is the MIB identifier with IANA name SEN_850200_B. + // + // ISO-IR: International Register of Escape Sequences + // Note: The current registration authority is IPSJ/ITSCJ, Japan. + // Reference: RFC1345 + ISO10Swedish MIB = 35 + + // KSC56011987 is the MIB identifier with IANA name KS_C_5601-1987. + // + // ISO-IR: International Register of Escape Sequences + // Note: The current registration authority is IPSJ/ITSCJ, Japan. + // Reference: RFC1345 + KSC56011987 MIB = 36 + + // ISO2022KR is the MIB identifier with IANA name ISO-2022-KR (MIME: ISO-2022-KR). + // + // rfc1557 (see also KS_C_5601-1987) + // Reference: RFC1557 + ISO2022KR MIB = 37 + + // EUCKR is the MIB identifier with IANA name EUC-KR (MIME: EUC-KR). + // + // rfc1557 (see also KS_C_5861-1992) + // Reference: RFC1557 + EUCKR MIB = 38 + + // ISO2022JP is the MIB identifier with IANA name ISO-2022-JP (MIME: ISO-2022-JP). + // + // rfc1468 (see also rfc2237 ) + // Reference: RFC1468 + ISO2022JP MIB = 39 + + // ISO2022JP2 is the MIB identifier with IANA name ISO-2022-JP-2 (MIME: ISO-2022-JP-2). + // + // rfc1554 + // Reference: RFC1554 + ISO2022JP2 MIB = 40 + + // ISO13JISC6220jp is the MIB identifier with IANA name JIS_C6220-1969-jp. + // + // ISO-IR: International Register of Escape Sequences + // Note: The current registration authority is IPSJ/ITSCJ, Japan. + // Reference: RFC1345 + ISO13JISC6220jp MIB = 41 + + // ISO14JISC6220ro is the MIB identifier with IANA name JIS_C6220-1969-ro. + // + // ISO-IR: International Register of Escape Sequences + // Note: The current registration authority is IPSJ/ITSCJ, Japan. + // Reference: RFC1345 + ISO14JISC6220ro MIB = 42 + + // ISO16Portuguese is the MIB identifier with IANA name PT. + // + // ISO-IR: International Register of Escape Sequences + // Note: The current registration authority is IPSJ/ITSCJ, Japan. + // Reference: RFC1345 + ISO16Portuguese MIB = 43 + + // ISO18Greek7Old is the MIB identifier with IANA name greek7-old. + // + // ISO-IR: International Register of Escape Sequences + // Note: The current registration authority is IPSJ/ITSCJ, Japan. + // Reference: RFC1345 + ISO18Greek7Old MIB = 44 + + // ISO19LatinGreek is the MIB identifier with IANA name latin-greek. + // + // ISO-IR: International Register of Escape Sequences + // Note: The current registration authority is IPSJ/ITSCJ, Japan. + // Reference: RFC1345 + ISO19LatinGreek MIB = 45 + + // ISO25French is the MIB identifier with IANA name NF_Z_62-010_(1973). + // + // ISO-IR: International Register of Escape Sequences + // Note: The current registration authority is IPSJ/ITSCJ, Japan. + // Reference: RFC1345 + ISO25French MIB = 46 + + // ISO27LatinGreek1 is the MIB identifier with IANA name Latin-greek-1. + // + // ISO-IR: International Register of Escape Sequences + // Note: The current registration authority is IPSJ/ITSCJ, Japan. + // Reference: RFC1345 + ISO27LatinGreek1 MIB = 47 + + // ISO5427Cyrillic is the MIB identifier with IANA name ISO_5427. + // + // ISO-IR: International Register of Escape Sequences + // Note: The current registration authority is IPSJ/ITSCJ, Japan. + // Reference: RFC1345 + ISO5427Cyrillic MIB = 48 + + // ISO42JISC62261978 is the MIB identifier with IANA name JIS_C6226-1978. + // + // ISO-IR: International Register of Escape Sequences + // Note: The current registration authority is IPSJ/ITSCJ, Japan. + // Reference: RFC1345 + ISO42JISC62261978 MIB = 49 + + // ISO47BSViewdata is the MIB identifier with IANA name BS_viewdata. + // + // ISO-IR: International Register of Escape Sequences + // Note: The current registration authority is IPSJ/ITSCJ, Japan. + // Reference: RFC1345 + ISO47BSViewdata MIB = 50 + + // ISO49INIS is the MIB identifier with IANA name INIS. + // + // ISO-IR: International Register of Escape Sequences + // Note: The current registration authority is IPSJ/ITSCJ, Japan. + // Reference: RFC1345 + ISO49INIS MIB = 51 + + // ISO50INIS8 is the MIB identifier with IANA name INIS-8. + // + // ISO-IR: International Register of Escape Sequences + // Note: The current registration authority is IPSJ/ITSCJ, Japan. + // Reference: RFC1345 + ISO50INIS8 MIB = 52 + + // ISO51INISCyrillic is the MIB identifier with IANA name INIS-cyrillic. + // + // ISO-IR: International Register of Escape Sequences + // Note: The current registration authority is IPSJ/ITSCJ, Japan. + // Reference: RFC1345 + ISO51INISCyrillic MIB = 53 + + // ISO54271981 is the MIB identifier with IANA name ISO_5427:1981. + // + // ISO-IR: International Register of Escape Sequences + // Note: The current registration authority is IPSJ/ITSCJ, Japan. + // Reference: RFC1345 + ISO54271981 MIB = 54 + + // ISO5428Greek is the MIB identifier with IANA name ISO_5428:1980. + // + // ISO-IR: International Register of Escape Sequences + // Note: The current registration authority is IPSJ/ITSCJ, Japan. + // Reference: RFC1345 + ISO5428Greek MIB = 55 + + // ISO57GB1988 is the MIB identifier with IANA name GB_1988-80. + // + // ISO-IR: International Register of Escape Sequences + // Note: The current registration authority is IPSJ/ITSCJ, Japan. + // Reference: RFC1345 + ISO57GB1988 MIB = 56 + + // ISO58GB231280 is the MIB identifier with IANA name GB_2312-80. + // + // ISO-IR: International Register of Escape Sequences + // Note: The current registration authority is IPSJ/ITSCJ, Japan. + // Reference: RFC1345 + ISO58GB231280 MIB = 57 + + // ISO61Norwegian2 is the MIB identifier with IANA name NS_4551-2. + // + // ISO-IR: International Register of Escape Sequences + // Note: The current registration authority is IPSJ/ITSCJ, Japan. + // Reference: RFC1345 + ISO61Norwegian2 MIB = 58 + + // ISO70VideotexSupp1 is the MIB identifier with IANA name videotex-suppl. + // + // ISO-IR: International Register of Escape Sequences + // Note: The current registration authority is IPSJ/ITSCJ, Japan. + // Reference: RFC1345 + ISO70VideotexSupp1 MIB = 59 + + // ISO84Portuguese2 is the MIB identifier with IANA name PT2. + // + // ISO-IR: International Register of Escape Sequences + // Note: The current registration authority is IPSJ/ITSCJ, Japan. + // Reference: RFC1345 + ISO84Portuguese2 MIB = 60 + + // ISO85Spanish2 is the MIB identifier with IANA name ES2. + // + // ISO-IR: International Register of Escape Sequences + // Note: The current registration authority is IPSJ/ITSCJ, Japan. + // Reference: RFC1345 + ISO85Spanish2 MIB = 61 + + // ISO86Hungarian is the MIB identifier with IANA name MSZ_7795.3. + // + // ISO-IR: International Register of Escape Sequences + // Note: The current registration authority is IPSJ/ITSCJ, Japan. + // Reference: RFC1345 + ISO86Hungarian MIB = 62 + + // ISO87JISX0208 is the MIB identifier with IANA name JIS_C6226-1983. + // + // ISO-IR: International Register of Escape Sequences + // Note: The current registration authority is IPSJ/ITSCJ, Japan. + // Reference: RFC1345 + ISO87JISX0208 MIB = 63 + + // ISO88Greek7 is the MIB identifier with IANA name greek7. + // + // ISO-IR: International Register of Escape Sequences + // Note: The current registration authority is IPSJ/ITSCJ, Japan. + // Reference: RFC1345 + ISO88Greek7 MIB = 64 + + // ISO89ASMO449 is the MIB identifier with IANA name ASMO_449. + // + // ISO-IR: International Register of Escape Sequences + // Note: The current registration authority is IPSJ/ITSCJ, Japan. + // Reference: RFC1345 + ISO89ASMO449 MIB = 65 + + // ISO90 is the MIB identifier with IANA name iso-ir-90. + // + // ISO-IR: International Register of Escape Sequences + // Note: The current registration authority is IPSJ/ITSCJ, Japan. + // Reference: RFC1345 + ISO90 MIB = 66 + + // ISO91JISC62291984a is the MIB identifier with IANA name JIS_C6229-1984-a. + // + // ISO-IR: International Register of Escape Sequences + // Note: The current registration authority is IPSJ/ITSCJ, Japan. + // Reference: RFC1345 + ISO91JISC62291984a MIB = 67 + + // ISO92JISC62991984b is the MIB identifier with IANA name JIS_C6229-1984-b. + // + // ISO-IR: International Register of Escape Sequences + // Note: The current registration authority is IPSJ/ITSCJ, Japan. + // Reference: RFC1345 + ISO92JISC62991984b MIB = 68 + + // ISO93JIS62291984badd is the MIB identifier with IANA name JIS_C6229-1984-b-add. + // + // ISO-IR: International Register of Escape Sequences + // Note: The current registration authority is IPSJ/ITSCJ, Japan. + // Reference: RFC1345 + ISO93JIS62291984badd MIB = 69 + + // ISO94JIS62291984hand is the MIB identifier with IANA name JIS_C6229-1984-hand. + // + // ISO-IR: International Register of Escape Sequences + // Note: The current registration authority is IPSJ/ITSCJ, Japan. + // Reference: RFC1345 + ISO94JIS62291984hand MIB = 70 + + // ISO95JIS62291984handadd is the MIB identifier with IANA name JIS_C6229-1984-hand-add. + // + // ISO-IR: International Register of Escape Sequences + // Note: The current registration authority is IPSJ/ITSCJ, Japan. + // Reference: RFC1345 + ISO95JIS62291984handadd MIB = 71 + + // ISO96JISC62291984kana is the MIB identifier with IANA name JIS_C6229-1984-kana. + // + // ISO-IR: International Register of Escape Sequences + // Note: The current registration authority is IPSJ/ITSCJ, Japan. + // Reference: RFC1345 + ISO96JISC62291984kana MIB = 72 + + // ISO2033 is the MIB identifier with IANA name ISO_2033-1983. + // + // ISO-IR: International Register of Escape Sequences + // Note: The current registration authority is IPSJ/ITSCJ, Japan. + // Reference: RFC1345 + ISO2033 MIB = 73 + + // ISO99NAPLPS is the MIB identifier with IANA name ANSI_X3.110-1983. + // + // ISO-IR: International Register of Escape Sequences + // Note: The current registration authority is IPSJ/ITSCJ, Japan. + // Reference: RFC1345 + ISO99NAPLPS MIB = 74 + + // ISO102T617bit is the MIB identifier with IANA name T.61-7bit. + // + // ISO-IR: International Register of Escape Sequences + // Note: The current registration authority is IPSJ/ITSCJ, Japan. + // Reference: RFC1345 + ISO102T617bit MIB = 75 + + // ISO103T618bit is the MIB identifier with IANA name T.61-8bit. + // + // ISO-IR: International Register of Escape Sequences + // Note: The current registration authority is IPSJ/ITSCJ, Japan. + // Reference: RFC1345 + ISO103T618bit MIB = 76 + + // ISO111ECMACyrillic is the MIB identifier with IANA name ECMA-cyrillic. + // + // ISO registry + // (formerly ECMA + // registry ) + ISO111ECMACyrillic MIB = 77 + + // ISO121Canadian1 is the MIB identifier with IANA name CSA_Z243.4-1985-1. + // + // ISO-IR: International Register of Escape Sequences + // Note: The current registration authority is IPSJ/ITSCJ, Japan. + // Reference: RFC1345 + ISO121Canadian1 MIB = 78 + + // ISO122Canadian2 is the MIB identifier with IANA name CSA_Z243.4-1985-2. + // + // ISO-IR: International Register of Escape Sequences + // Note: The current registration authority is IPSJ/ITSCJ, Japan. + // Reference: RFC1345 + ISO122Canadian2 MIB = 79 + + // ISO123CSAZ24341985gr is the MIB identifier with IANA name CSA_Z243.4-1985-gr. + // + // ISO-IR: International Register of Escape Sequences + // Note: The current registration authority is IPSJ/ITSCJ, Japan. + // Reference: RFC1345 + ISO123CSAZ24341985gr MIB = 80 + + // ISO88596E is the MIB identifier with IANA name ISO_8859-6-E (MIME: ISO-8859-6-E). + // + // rfc1556 + // Reference: RFC1556 + ISO88596E MIB = 81 + + // ISO88596I is the MIB identifier with IANA name ISO_8859-6-I (MIME: ISO-8859-6-I). + // + // rfc1556 + // Reference: RFC1556 + ISO88596I MIB = 82 + + // ISO128T101G2 is the MIB identifier with IANA name T.101-G2. + // + // ISO-IR: International Register of Escape Sequences + // Note: The current registration authority is IPSJ/ITSCJ, Japan. + // Reference: RFC1345 + ISO128T101G2 MIB = 83 + + // ISO88598E is the MIB identifier with IANA name ISO_8859-8-E (MIME: ISO-8859-8-E). + // + // rfc1556 + // Reference: RFC1556 + ISO88598E MIB = 84 + + // ISO88598I is the MIB identifier with IANA name ISO_8859-8-I (MIME: ISO-8859-8-I). + // + // rfc1556 + // Reference: RFC1556 + ISO88598I MIB = 85 + + // ISO139CSN369103 is the MIB identifier with IANA name CSN_369103. + // + // ISO-IR: International Register of Escape Sequences + // Note: The current registration authority is IPSJ/ITSCJ, Japan. + // Reference: RFC1345 + ISO139CSN369103 MIB = 86 + + // ISO141JUSIB1002 is the MIB identifier with IANA name JUS_I.B1.002. + // + // ISO-IR: International Register of Escape Sequences + // Note: The current registration authority is IPSJ/ITSCJ, Japan. + // Reference: RFC1345 + ISO141JUSIB1002 MIB = 87 + + // ISO143IECP271 is the MIB identifier with IANA name IEC_P27-1. + // + // ISO-IR: International Register of Escape Sequences + // Note: The current registration authority is IPSJ/ITSCJ, Japan. + // Reference: RFC1345 + ISO143IECP271 MIB = 88 + + // ISO146Serbian is the MIB identifier with IANA name JUS_I.B1.003-serb. + // + // ISO-IR: International Register of Escape Sequences + // Note: The current registration authority is IPSJ/ITSCJ, Japan. + // Reference: RFC1345 + ISO146Serbian MIB = 89 + + // ISO147Macedonian is the MIB identifier with IANA name JUS_I.B1.003-mac. + // + // ISO-IR: International Register of Escape Sequences + // Note: The current registration authority is IPSJ/ITSCJ, Japan. + // Reference: RFC1345 + ISO147Macedonian MIB = 90 + + // ISO150GreekCCITT is the MIB identifier with IANA name greek-ccitt. + // + // ISO-IR: International Register of Escape Sequences + // Note: The current registration authority is IPSJ/ITSCJ, Japan. + // Reference: RFC1345 + ISO150GreekCCITT MIB = 91 + + // ISO151Cuba is the MIB identifier with IANA name NC_NC00-10:81. + // + // ISO-IR: International Register of Escape Sequences + // Note: The current registration authority is IPSJ/ITSCJ, Japan. + // Reference: RFC1345 + ISO151Cuba MIB = 92 + + // ISO6937Add is the MIB identifier with IANA name ISO_6937-2-25. + // + // ISO-IR: International Register of Escape Sequences + // Note: The current registration authority is IPSJ/ITSCJ, Japan. + // Reference: RFC1345 + ISO6937Add MIB = 93 + + // ISO153GOST1976874 is the MIB identifier with IANA name GOST_19768-74. + // + // ISO-IR: International Register of Escape Sequences + // Note: The current registration authority is IPSJ/ITSCJ, Japan. + // Reference: RFC1345 + ISO153GOST1976874 MIB = 94 + + // ISO8859Supp is the MIB identifier with IANA name ISO_8859-supp. + // + // ISO-IR: International Register of Escape Sequences + // Note: The current registration authority is IPSJ/ITSCJ, Japan. + // Reference: RFC1345 + ISO8859Supp MIB = 95 + + // ISO10367Box is the MIB identifier with IANA name ISO_10367-box. + // + // ISO-IR: International Register of Escape Sequences + // Note: The current registration authority is IPSJ/ITSCJ, Japan. + // Reference: RFC1345 + ISO10367Box MIB = 96 + + // ISO158Lap is the MIB identifier with IANA name latin-lap. + // + // ISO-IR: International Register of Escape Sequences + // Note: The current registration authority is IPSJ/ITSCJ, Japan. + // Reference: RFC1345 + ISO158Lap MIB = 97 + + // ISO159JISX02121990 is the MIB identifier with IANA name JIS_X0212-1990. + // + // ISO-IR: International Register of Escape Sequences + // Note: The current registration authority is IPSJ/ITSCJ, Japan. + // Reference: RFC1345 + ISO159JISX02121990 MIB = 98 + + // ISO646Danish is the MIB identifier with IANA name DS_2089. + // + // Danish Standard, DS 2089, February 1974 + // Reference: RFC1345 + ISO646Danish MIB = 99 + + // USDK is the MIB identifier with IANA name us-dk. + // + // Reference: RFC1345 + USDK MIB = 100 + + // DKUS is the MIB identifier with IANA name dk-us. + // + // Reference: RFC1345 + DKUS MIB = 101 + + // KSC5636 is the MIB identifier with IANA name KSC5636. + // + // Reference: RFC1345 + KSC5636 MIB = 102 + + // Unicode11UTF7 is the MIB identifier with IANA name UNICODE-1-1-UTF-7. + // + // rfc1642 + // Reference: RFC1642 + Unicode11UTF7 MIB = 103 + + // ISO2022CN is the MIB identifier with IANA name ISO-2022-CN. + // + // rfc1922 + // Reference: RFC1922 + ISO2022CN MIB = 104 + + // ISO2022CNEXT is the MIB identifier with IANA name ISO-2022-CN-EXT. + // + // rfc1922 + // Reference: RFC1922 + ISO2022CNEXT MIB = 105 + + // UTF8 is the MIB identifier with IANA name UTF-8. + // + // rfc3629 + // Reference: RFC3629 + UTF8 MIB = 106 + + // ISO885913 is the MIB identifier with IANA name ISO-8859-13. + // + // ISO See http://www.iana.org/assignments/charset-reg/ISO-8859-13 http://www.iana.org/assignments/charset-reg/ISO-8859-13 + ISO885913 MIB = 109 + + // ISO885914 is the MIB identifier with IANA name ISO-8859-14. + // + // ISO See http://www.iana.org/assignments/charset-reg/ISO-8859-14 + ISO885914 MIB = 110 + + // ISO885915 is the MIB identifier with IANA name ISO-8859-15. + // + // ISO + // Please see: http://www.iana.org/assignments/charset-reg/ISO-8859-15 + ISO885915 MIB = 111 + + // ISO885916 is the MIB identifier with IANA name ISO-8859-16. + // + // ISO + ISO885916 MIB = 112 + + // GBK is the MIB identifier with IANA name GBK. + // + // Chinese IT Standardization Technical Committee + // Please see: http://www.iana.org/assignments/charset-reg/GBK + GBK MIB = 113 + + // GB18030 is the MIB identifier with IANA name GB18030. + // + // Chinese IT Standardization Technical Committee + // Please see: http://www.iana.org/assignments/charset-reg/GB18030 + GB18030 MIB = 114 + + // OSDEBCDICDF0415 is the MIB identifier with IANA name OSD_EBCDIC_DF04_15. + // + // Fujitsu-Siemens standard mainframe EBCDIC encoding + // Please see: http://www.iana.org/assignments/charset-reg/OSD-EBCDIC-DF04-15 + OSDEBCDICDF0415 MIB = 115 + + // OSDEBCDICDF03IRV is the MIB identifier with IANA name OSD_EBCDIC_DF03_IRV. + // + // Fujitsu-Siemens standard mainframe EBCDIC encoding + // Please see: http://www.iana.org/assignments/charset-reg/OSD-EBCDIC-DF03-IRV + OSDEBCDICDF03IRV MIB = 116 + + // OSDEBCDICDF041 is the MIB identifier with IANA name OSD_EBCDIC_DF04_1. + // + // Fujitsu-Siemens standard mainframe EBCDIC encoding + // Please see: http://www.iana.org/assignments/charset-reg/OSD-EBCDIC-DF04-1 + OSDEBCDICDF041 MIB = 117 + + // ISO115481 is the MIB identifier with IANA name ISO-11548-1. + // + // See http://www.iana.org/assignments/charset-reg/ISO-11548-1 + ISO115481 MIB = 118 + + // KZ1048 is the MIB identifier with IANA name KZ-1048. + // + // See http://www.iana.org/assignments/charset-reg/KZ-1048 + KZ1048 MIB = 119 + + // Unicode is the MIB identifier with IANA name ISO-10646-UCS-2. + // + // the 2-octet Basic Multilingual Plane, aka Unicode + // this needs to specify network byte order: the standard + // does not specify (it is a 16-bit integer space) + Unicode MIB = 1000 + + // UCS4 is the MIB identifier with IANA name ISO-10646-UCS-4. + // + // the full code space. (same comment about byte order, + // these are 31-bit numbers. + UCS4 MIB = 1001 + + // UnicodeASCII is the MIB identifier with IANA name ISO-10646-UCS-Basic. + // + // ASCII subset of Unicode. Basic Latin = collection 1 + // See ISO 10646, Appendix A + UnicodeASCII MIB = 1002 + + // UnicodeLatin1 is the MIB identifier with IANA name ISO-10646-Unicode-Latin1. + // + // ISO Latin-1 subset of Unicode. Basic Latin and Latin-1 + // Supplement = collections 1 and 2. See ISO 10646, + // Appendix A. See rfc1815 . + UnicodeLatin1 MIB = 1003 + + // UnicodeJapanese is the MIB identifier with IANA name ISO-10646-J-1. + // + // ISO 10646 Japanese, see rfc1815 . + UnicodeJapanese MIB = 1004 + + // UnicodeIBM1261 is the MIB identifier with IANA name ISO-Unicode-IBM-1261. + // + // IBM Latin-2, -3, -5, Extended Presentation Set, GCSGID: 1261 + UnicodeIBM1261 MIB = 1005 + + // UnicodeIBM1268 is the MIB identifier with IANA name ISO-Unicode-IBM-1268. + // + // IBM Latin-4 Extended Presentation Set, GCSGID: 1268 + UnicodeIBM1268 MIB = 1006 + + // UnicodeIBM1276 is the MIB identifier with IANA name ISO-Unicode-IBM-1276. + // + // IBM Cyrillic Greek Extended Presentation Set, GCSGID: 1276 + UnicodeIBM1276 MIB = 1007 + + // UnicodeIBM1264 is the MIB identifier with IANA name ISO-Unicode-IBM-1264. + // + // IBM Arabic Presentation Set, GCSGID: 1264 + UnicodeIBM1264 MIB = 1008 + + // UnicodeIBM1265 is the MIB identifier with IANA name ISO-Unicode-IBM-1265. + // + // IBM Hebrew Presentation Set, GCSGID: 1265 + UnicodeIBM1265 MIB = 1009 + + // Unicode11 is the MIB identifier with IANA name UNICODE-1-1. + // + // rfc1641 + // Reference: RFC1641 + Unicode11 MIB = 1010 + + // SCSU is the MIB identifier with IANA name SCSU. + // + // SCSU See http://www.iana.org/assignments/charset-reg/SCSU + SCSU MIB = 1011 + + // UTF7 is the MIB identifier with IANA name UTF-7. + // + // rfc2152 + // Reference: RFC2152 + UTF7 MIB = 1012 + + // UTF16BE is the MIB identifier with IANA name UTF-16BE. + // + // rfc2781 + // Reference: RFC2781 + UTF16BE MIB = 1013 + + // UTF16LE is the MIB identifier with IANA name UTF-16LE. + // + // rfc2781 + // Reference: RFC2781 + UTF16LE MIB = 1014 + + // UTF16 is the MIB identifier with IANA name UTF-16. + // + // rfc2781 + // Reference: RFC2781 + UTF16 MIB = 1015 + + // CESU8 is the MIB identifier with IANA name CESU-8. + // + // http://www.unicode.org/unicode/reports/tr26 + CESU8 MIB = 1016 + + // UTF32 is the MIB identifier with IANA name UTF-32. + // + // http://www.unicode.org/unicode/reports/tr19/ + UTF32 MIB = 1017 + + // UTF32BE is the MIB identifier with IANA name UTF-32BE. + // + // http://www.unicode.org/unicode/reports/tr19/ + UTF32BE MIB = 1018 + + // UTF32LE is the MIB identifier with IANA name UTF-32LE. + // + // http://www.unicode.org/unicode/reports/tr19/ + UTF32LE MIB = 1019 + + // BOCU1 is the MIB identifier with IANA name BOCU-1. + // + // http://www.unicode.org/notes/tn6/ + BOCU1 MIB = 1020 + + // Windows30Latin1 is the MIB identifier with IANA name ISO-8859-1-Windows-3.0-Latin-1. + // + // Extended ISO 8859-1 Latin-1 for Windows 3.0. + // PCL Symbol Set id: 9U + Windows30Latin1 MIB = 2000 + + // Windows31Latin1 is the MIB identifier with IANA name ISO-8859-1-Windows-3.1-Latin-1. + // + // Extended ISO 8859-1 Latin-1 for Windows 3.1. + // PCL Symbol Set id: 19U + Windows31Latin1 MIB = 2001 + + // Windows31Latin2 is the MIB identifier with IANA name ISO-8859-2-Windows-Latin-2. + // + // Extended ISO 8859-2. Latin-2 for Windows 3.1. + // PCL Symbol Set id: 9E + Windows31Latin2 MIB = 2002 + + // Windows31Latin5 is the MIB identifier with IANA name ISO-8859-9-Windows-Latin-5. + // + // Extended ISO 8859-9. Latin-5 for Windows 3.1 + // PCL Symbol Set id: 5T + Windows31Latin5 MIB = 2003 + + // HPRoman8 is the MIB identifier with IANA name hp-roman8. + // + // LaserJet IIP Printer User's Manual, + // HP part no 33471-90901, Hewlet-Packard, June 1989. + // Reference: RFC1345 + HPRoman8 MIB = 2004 + + // AdobeStandardEncoding is the MIB identifier with IANA name Adobe-Standard-Encoding. + // + // PostScript Language Reference Manual + // PCL Symbol Set id: 10J + AdobeStandardEncoding MIB = 2005 + + // VenturaUS is the MIB identifier with IANA name Ventura-US. + // + // Ventura US. ASCII plus characters typically used in + // publishing, like pilcrow, copyright, registered, trade mark, + // section, dagger, and double dagger in the range A0 (hex) + // to FF (hex). + // PCL Symbol Set id: 14J + VenturaUS MIB = 2006 + + // VenturaInternational is the MIB identifier with IANA name Ventura-International. + // + // Ventura International. ASCII plus coded characters similar + // to Roman8. + // PCL Symbol Set id: 13J + VenturaInternational MIB = 2007 + + // DECMCS is the MIB identifier with IANA name DEC-MCS. + // + // VAX/VMS User's Manual, + // Order Number: AI-Y517A-TE, April 1986. + // Reference: RFC1345 + DECMCS MIB = 2008 + + // PC850Multilingual is the MIB identifier with IANA name IBM850. + // + // IBM NLS RM Vol2 SE09-8002-01, March 1990 + // Reference: RFC1345 + PC850Multilingual MIB = 2009 + + // PC8DanishNorwegian is the MIB identifier with IANA name PC8-Danish-Norwegian. + // + // PC Danish Norwegian + // 8-bit PC set for Danish Norwegian + // PCL Symbol Set id: 11U + PC8DanishNorwegian MIB = 2012 + + // PC862LatinHebrew is the MIB identifier with IANA name IBM862. + // + // IBM NLS RM Vol2 SE09-8002-01, March 1990 + // Reference: RFC1345 + PC862LatinHebrew MIB = 2013 + + // PC8Turkish is the MIB identifier with IANA name PC8-Turkish. + // + // PC Latin Turkish. PCL Symbol Set id: 9T + PC8Turkish MIB = 2014 + + // IBMSymbols is the MIB identifier with IANA name IBM-Symbols. + // + // Presentation Set, CPGID: 259 + IBMSymbols MIB = 2015 + + // IBMThai is the MIB identifier with IANA name IBM-Thai. + // + // Presentation Set, CPGID: 838 + IBMThai MIB = 2016 + + // HPLegal is the MIB identifier with IANA name HP-Legal. + // + // PCL 5 Comparison Guide, Hewlett-Packard, + // HP part number 5961-0510, October 1992 + // PCL Symbol Set id: 1U + HPLegal MIB = 2017 + + // HPPiFont is the MIB identifier with IANA name HP-Pi-font. + // + // PCL 5 Comparison Guide, Hewlett-Packard, + // HP part number 5961-0510, October 1992 + // PCL Symbol Set id: 15U + HPPiFont MIB = 2018 + + // HPMath8 is the MIB identifier with IANA name HP-Math8. + // + // PCL 5 Comparison Guide, Hewlett-Packard, + // HP part number 5961-0510, October 1992 + // PCL Symbol Set id: 8M + HPMath8 MIB = 2019 + + // HPPSMath is the MIB identifier with IANA name Adobe-Symbol-Encoding. + // + // PostScript Language Reference Manual + // PCL Symbol Set id: 5M + HPPSMath MIB = 2020 + + // HPDesktop is the MIB identifier with IANA name HP-DeskTop. + // + // PCL 5 Comparison Guide, Hewlett-Packard, + // HP part number 5961-0510, October 1992 + // PCL Symbol Set id: 7J + HPDesktop MIB = 2021 + + // VenturaMath is the MIB identifier with IANA name Ventura-Math. + // + // PCL 5 Comparison Guide, Hewlett-Packard, + // HP part number 5961-0510, October 1992 + // PCL Symbol Set id: 6M + VenturaMath MIB = 2022 + + // MicrosoftPublishing is the MIB identifier with IANA name Microsoft-Publishing. + // + // PCL 5 Comparison Guide, Hewlett-Packard, + // HP part number 5961-0510, October 1992 + // PCL Symbol Set id: 6J + MicrosoftPublishing MIB = 2023 + + // Windows31J is the MIB identifier with IANA name Windows-31J. + // + // Windows Japanese. A further extension of Shift_JIS + // to include NEC special characters (Row 13), NEC + // selection of IBM extensions (Rows 89 to 92), and IBM + // extensions (Rows 115 to 119). The CCS's are + // JIS X0201:1997, JIS X0208:1997, and these extensions. + // This charset can be used for the top-level media type "text", + // but it is of limited or specialized use (see rfc2278 ). + // PCL Symbol Set id: 19K + Windows31J MIB = 2024 + + // GB2312 is the MIB identifier with IANA name GB2312 (MIME: GB2312). + // + // Chinese for People's Republic of China (PRC) mixed one byte, + // two byte set: + // 20-7E = one byte ASCII + // A1-FE = two byte PRC Kanji + // See GB 2312-80 + // PCL Symbol Set Id: 18C + GB2312 MIB = 2025 + + // Big5 is the MIB identifier with IANA name Big5 (MIME: Big5). + // + // Chinese for Taiwan Multi-byte set. + // PCL Symbol Set Id: 18T + Big5 MIB = 2026 + + // Macintosh is the MIB identifier with IANA name macintosh. + // + // The Unicode Standard ver1.0, ISBN 0-201-56788-1, Oct 1991 + // Reference: RFC1345 + Macintosh MIB = 2027 + + // IBM037 is the MIB identifier with IANA name IBM037. + // + // IBM NLS RM Vol2 SE09-8002-01, March 1990 + // Reference: RFC1345 + IBM037 MIB = 2028 + + // IBM038 is the MIB identifier with IANA name IBM038. + // + // IBM 3174 Character Set Ref, GA27-3831-02, March 1990 + // Reference: RFC1345 + IBM038 MIB = 2029 + + // IBM273 is the MIB identifier with IANA name IBM273. + // + // IBM NLS RM Vol2 SE09-8002-01, March 1990 + // Reference: RFC1345 + IBM273 MIB = 2030 + + // IBM274 is the MIB identifier with IANA name IBM274. + // + // IBM 3174 Character Set Ref, GA27-3831-02, March 1990 + // Reference: RFC1345 + IBM274 MIB = 2031 + + // IBM275 is the MIB identifier with IANA name IBM275. + // + // IBM NLS RM Vol2 SE09-8002-01, March 1990 + // Reference: RFC1345 + IBM275 MIB = 2032 + + // IBM277 is the MIB identifier with IANA name IBM277. + // + // IBM NLS RM Vol2 SE09-8002-01, March 1990 + // Reference: RFC1345 + IBM277 MIB = 2033 + + // IBM278 is the MIB identifier with IANA name IBM278. + // + // IBM NLS RM Vol2 SE09-8002-01, March 1990 + // Reference: RFC1345 + IBM278 MIB = 2034 + + // IBM280 is the MIB identifier with IANA name IBM280. + // + // IBM NLS RM Vol2 SE09-8002-01, March 1990 + // Reference: RFC1345 + IBM280 MIB = 2035 + + // IBM281 is the MIB identifier with IANA name IBM281. + // + // IBM 3174 Character Set Ref, GA27-3831-02, March 1990 + // Reference: RFC1345 + IBM281 MIB = 2036 + + // IBM284 is the MIB identifier with IANA name IBM284. + // + // IBM NLS RM Vol2 SE09-8002-01, March 1990 + // Reference: RFC1345 + IBM284 MIB = 2037 + + // IBM285 is the MIB identifier with IANA name IBM285. + // + // IBM NLS RM Vol2 SE09-8002-01, March 1990 + // Reference: RFC1345 + IBM285 MIB = 2038 + + // IBM290 is the MIB identifier with IANA name IBM290. + // + // IBM 3174 Character Set Ref, GA27-3831-02, March 1990 + // Reference: RFC1345 + IBM290 MIB = 2039 + + // IBM297 is the MIB identifier with IANA name IBM297. + // + // IBM NLS RM Vol2 SE09-8002-01, March 1990 + // Reference: RFC1345 + IBM297 MIB = 2040 + + // IBM420 is the MIB identifier with IANA name IBM420. + // + // IBM NLS RM Vol2 SE09-8002-01, March 1990, + // IBM NLS RM p 11-11 + // Reference: RFC1345 + IBM420 MIB = 2041 + + // IBM423 is the MIB identifier with IANA name IBM423. + // + // IBM NLS RM Vol2 SE09-8002-01, March 1990 + // Reference: RFC1345 + IBM423 MIB = 2042 + + // IBM424 is the MIB identifier with IANA name IBM424. + // + // IBM NLS RM Vol2 SE09-8002-01, March 1990 + // Reference: RFC1345 + IBM424 MIB = 2043 + + // PC8CodePage437 is the MIB identifier with IANA name IBM437. + // + // IBM NLS RM Vol2 SE09-8002-01, March 1990 + // Reference: RFC1345 + PC8CodePage437 MIB = 2011 + + // IBM500 is the MIB identifier with IANA name IBM500. + // + // IBM NLS RM Vol2 SE09-8002-01, March 1990 + // Reference: RFC1345 + IBM500 MIB = 2044 + + // IBM851 is the MIB identifier with IANA name IBM851. + // + // IBM NLS RM Vol2 SE09-8002-01, March 1990 + // Reference: RFC1345 + IBM851 MIB = 2045 + + // PCp852 is the MIB identifier with IANA name IBM852. + // + // IBM NLS RM Vol2 SE09-8002-01, March 1990 + // Reference: RFC1345 + PCp852 MIB = 2010 + + // IBM855 is the MIB identifier with IANA name IBM855. + // + // IBM NLS RM Vol2 SE09-8002-01, March 1990 + // Reference: RFC1345 + IBM855 MIB = 2046 + + // IBM857 is the MIB identifier with IANA name IBM857. + // + // IBM NLS RM Vol2 SE09-8002-01, March 1990 + // Reference: RFC1345 + IBM857 MIB = 2047 + + // IBM860 is the MIB identifier with IANA name IBM860. + // + // IBM NLS RM Vol2 SE09-8002-01, March 1990 + // Reference: RFC1345 + IBM860 MIB = 2048 + + // IBM861 is the MIB identifier with IANA name IBM861. + // + // IBM NLS RM Vol2 SE09-8002-01, March 1990 + // Reference: RFC1345 + IBM861 MIB = 2049 + + // IBM863 is the MIB identifier with IANA name IBM863. + // + // IBM Keyboard layouts and code pages, PN 07G4586 June 1991 + // Reference: RFC1345 + IBM863 MIB = 2050 + + // IBM864 is the MIB identifier with IANA name IBM864. + // + // IBM Keyboard layouts and code pages, PN 07G4586 June 1991 + // Reference: RFC1345 + IBM864 MIB = 2051 + + // IBM865 is the MIB identifier with IANA name IBM865. + // + // IBM DOS 3.3 Ref (Abridged), 94X9575 (Feb 1987) + // Reference: RFC1345 + IBM865 MIB = 2052 + + // IBM868 is the MIB identifier with IANA name IBM868. + // + // IBM NLS RM Vol2 SE09-8002-01, March 1990 + // Reference: RFC1345 + IBM868 MIB = 2053 + + // IBM869 is the MIB identifier with IANA name IBM869. + // + // IBM Keyboard layouts and code pages, PN 07G4586 June 1991 + // Reference: RFC1345 + IBM869 MIB = 2054 + + // IBM870 is the MIB identifier with IANA name IBM870. + // + // IBM NLS RM Vol2 SE09-8002-01, March 1990 + // Reference: RFC1345 + IBM870 MIB = 2055 + + // IBM871 is the MIB identifier with IANA name IBM871. + // + // IBM NLS RM Vol2 SE09-8002-01, March 1990 + // Reference: RFC1345 + IBM871 MIB = 2056 + + // IBM880 is the MIB identifier with IANA name IBM880. + // + // IBM NLS RM Vol2 SE09-8002-01, March 1990 + // Reference: RFC1345 + IBM880 MIB = 2057 + + // IBM891 is the MIB identifier with IANA name IBM891. + // + // IBM NLS RM Vol2 SE09-8002-01, March 1990 + // Reference: RFC1345 + IBM891 MIB = 2058 + + // IBM903 is the MIB identifier with IANA name IBM903. + // + // IBM NLS RM Vol2 SE09-8002-01, March 1990 + // Reference: RFC1345 + IBM903 MIB = 2059 + + // IBBM904 is the MIB identifier with IANA name IBM904. + // + // IBM NLS RM Vol2 SE09-8002-01, March 1990 + // Reference: RFC1345 + IBBM904 MIB = 2060 + + // IBM905 is the MIB identifier with IANA name IBM905. + // + // IBM 3174 Character Set Ref, GA27-3831-02, March 1990 + // Reference: RFC1345 + IBM905 MIB = 2061 + + // IBM918 is the MIB identifier with IANA name IBM918. + // + // IBM NLS RM Vol2 SE09-8002-01, March 1990 + // Reference: RFC1345 + IBM918 MIB = 2062 + + // IBM1026 is the MIB identifier with IANA name IBM1026. + // + // IBM NLS RM Vol2 SE09-8002-01, March 1990 + // Reference: RFC1345 + IBM1026 MIB = 2063 + + // IBMEBCDICATDE is the MIB identifier with IANA name EBCDIC-AT-DE. + // + // IBM 3270 Char Set Ref Ch 10, GA27-2837-9, April 1987 + // Reference: RFC1345 + IBMEBCDICATDE MIB = 2064 + + // EBCDICATDEA is the MIB identifier with IANA name EBCDIC-AT-DE-A. + // + // IBM 3270 Char Set Ref Ch 10, GA27-2837-9, April 1987 + // Reference: RFC1345 + EBCDICATDEA MIB = 2065 + + // EBCDICCAFR is the MIB identifier with IANA name EBCDIC-CA-FR. + // + // IBM 3270 Char Set Ref Ch 10, GA27-2837-9, April 1987 + // Reference: RFC1345 + EBCDICCAFR MIB = 2066 + + // EBCDICDKNO is the MIB identifier with IANA name EBCDIC-DK-NO. + // + // IBM 3270 Char Set Ref Ch 10, GA27-2837-9, April 1987 + // Reference: RFC1345 + EBCDICDKNO MIB = 2067 + + // EBCDICDKNOA is the MIB identifier with IANA name EBCDIC-DK-NO-A. + // + // IBM 3270 Char Set Ref Ch 10, GA27-2837-9, April 1987 + // Reference: RFC1345 + EBCDICDKNOA MIB = 2068 + + // EBCDICFISE is the MIB identifier with IANA name EBCDIC-FI-SE. + // + // IBM 3270 Char Set Ref Ch 10, GA27-2837-9, April 1987 + // Reference: RFC1345 + EBCDICFISE MIB = 2069 + + // EBCDICFISEA is the MIB identifier with IANA name EBCDIC-FI-SE-A. + // + // IBM 3270 Char Set Ref Ch 10, GA27-2837-9, April 1987 + // Reference: RFC1345 + EBCDICFISEA MIB = 2070 + + // EBCDICFR is the MIB identifier with IANA name EBCDIC-FR. + // + // IBM 3270 Char Set Ref Ch 10, GA27-2837-9, April 1987 + // Reference: RFC1345 + EBCDICFR MIB = 2071 + + // EBCDICIT is the MIB identifier with IANA name EBCDIC-IT. + // + // IBM 3270 Char Set Ref Ch 10, GA27-2837-9, April 1987 + // Reference: RFC1345 + EBCDICIT MIB = 2072 + + // EBCDICPT is the MIB identifier with IANA name EBCDIC-PT. + // + // IBM 3270 Char Set Ref Ch 10, GA27-2837-9, April 1987 + // Reference: RFC1345 + EBCDICPT MIB = 2073 + + // EBCDICES is the MIB identifier with IANA name EBCDIC-ES. + // + // IBM 3270 Char Set Ref Ch 10, GA27-2837-9, April 1987 + // Reference: RFC1345 + EBCDICES MIB = 2074 + + // EBCDICESA is the MIB identifier with IANA name EBCDIC-ES-A. + // + // IBM 3270 Char Set Ref Ch 10, GA27-2837-9, April 1987 + // Reference: RFC1345 + EBCDICESA MIB = 2075 + + // EBCDICESS is the MIB identifier with IANA name EBCDIC-ES-S. + // + // IBM 3270 Char Set Ref Ch 10, GA27-2837-9, April 1987 + // Reference: RFC1345 + EBCDICESS MIB = 2076 + + // EBCDICUK is the MIB identifier with IANA name EBCDIC-UK. + // + // IBM 3270 Char Set Ref Ch 10, GA27-2837-9, April 1987 + // Reference: RFC1345 + EBCDICUK MIB = 2077 + + // EBCDICUS is the MIB identifier with IANA name EBCDIC-US. + // + // IBM 3270 Char Set Ref Ch 10, GA27-2837-9, April 1987 + // Reference: RFC1345 + EBCDICUS MIB = 2078 + + // Unknown8BiT is the MIB identifier with IANA name UNKNOWN-8BIT. + // + // Reference: RFC1428 + Unknown8BiT MIB = 2079 + + // Mnemonic is the MIB identifier with IANA name MNEMONIC. + // + // rfc1345 , also known as "mnemonic+ascii+38" + // Reference: RFC1345 + Mnemonic MIB = 2080 + + // Mnem is the MIB identifier with IANA name MNEM. + // + // rfc1345 , also known as "mnemonic+ascii+8200" + // Reference: RFC1345 + Mnem MIB = 2081 + + // VISCII is the MIB identifier with IANA name VISCII. + // + // rfc1456 + // Reference: RFC1456 + VISCII MIB = 2082 + + // VIQR is the MIB identifier with IANA name VIQR. + // + // rfc1456 + // Reference: RFC1456 + VIQR MIB = 2083 + + // KOI8R is the MIB identifier with IANA name KOI8-R (MIME: KOI8-R). + // + // rfc1489 , based on GOST-19768-74, ISO-6937/8, + // INIS-Cyrillic, ISO-5427. + // Reference: RFC1489 + KOI8R MIB = 2084 + + // HZGB2312 is the MIB identifier with IANA name HZ-GB-2312. + // + // rfc1842 , rfc1843 rfc1843 rfc1842 + HZGB2312 MIB = 2085 + + // IBM866 is the MIB identifier with IANA name IBM866. + // + // IBM NLDG Volume 2 (SE09-8002-03) August 1994 + IBM866 MIB = 2086 + + // PC775Baltic is the MIB identifier with IANA name IBM775. + // + // HP PCL 5 Comparison Guide (P/N 5021-0329) pp B-13, 1996 + PC775Baltic MIB = 2087 + + // KOI8U is the MIB identifier with IANA name KOI8-U. + // + // rfc2319 + // Reference: RFC2319 + KOI8U MIB = 2088 + + // IBM00858 is the MIB identifier with IANA name IBM00858. + // + // IBM See http://www.iana.org/assignments/charset-reg/IBM00858 + IBM00858 MIB = 2089 + + // IBM00924 is the MIB identifier with IANA name IBM00924. + // + // IBM See http://www.iana.org/assignments/charset-reg/IBM00924 + IBM00924 MIB = 2090 + + // IBM01140 is the MIB identifier with IANA name IBM01140. + // + // IBM See http://www.iana.org/assignments/charset-reg/IBM01140 + IBM01140 MIB = 2091 + + // IBM01141 is the MIB identifier with IANA name IBM01141. + // + // IBM See http://www.iana.org/assignments/charset-reg/IBM01141 + IBM01141 MIB = 2092 + + // IBM01142 is the MIB identifier with IANA name IBM01142. + // + // IBM See http://www.iana.org/assignments/charset-reg/IBM01142 + IBM01142 MIB = 2093 + + // IBM01143 is the MIB identifier with IANA name IBM01143. + // + // IBM See http://www.iana.org/assignments/charset-reg/IBM01143 + IBM01143 MIB = 2094 + + // IBM01144 is the MIB identifier with IANA name IBM01144. + // + // IBM See http://www.iana.org/assignments/charset-reg/IBM01144 + IBM01144 MIB = 2095 + + // IBM01145 is the MIB identifier with IANA name IBM01145. + // + // IBM See http://www.iana.org/assignments/charset-reg/IBM01145 + IBM01145 MIB = 2096 + + // IBM01146 is the MIB identifier with IANA name IBM01146. + // + // IBM See http://www.iana.org/assignments/charset-reg/IBM01146 + IBM01146 MIB = 2097 + + // IBM01147 is the MIB identifier with IANA name IBM01147. + // + // IBM See http://www.iana.org/assignments/charset-reg/IBM01147 + IBM01147 MIB = 2098 + + // IBM01148 is the MIB identifier with IANA name IBM01148. + // + // IBM See http://www.iana.org/assignments/charset-reg/IBM01148 + IBM01148 MIB = 2099 + + // IBM01149 is the MIB identifier with IANA name IBM01149. + // + // IBM See http://www.iana.org/assignments/charset-reg/IBM01149 + IBM01149 MIB = 2100 + + // Big5HKSCS is the MIB identifier with IANA name Big5-HKSCS. + // + // See http://www.iana.org/assignments/charset-reg/Big5-HKSCS + Big5HKSCS MIB = 2101 + + // IBM1047 is the MIB identifier with IANA name IBM1047. + // + // IBM1047 (EBCDIC Latin 1/Open Systems) http://www-1.ibm.com/servers/eserver/iseries/software/globalization/pdf/cp01047z.pdf + IBM1047 MIB = 2102 + + // PTCP154 is the MIB identifier with IANA name PTCP154. + // + // See http://www.iana.org/assignments/charset-reg/PTCP154 + PTCP154 MIB = 2103 + + // Amiga1251 is the MIB identifier with IANA name Amiga-1251. + // + // See http://www.amiga.ultranet.ru/Amiga-1251.html + Amiga1251 MIB = 2104 + + // KOI7switched is the MIB identifier with IANA name KOI7-switched. + // + // See http://www.iana.org/assignments/charset-reg/KOI7-switched + KOI7switched MIB = 2105 + + // BRF is the MIB identifier with IANA name BRF. + // + // See http://www.iana.org/assignments/charset-reg/BRF + BRF MIB = 2106 + + // TSCII is the MIB identifier with IANA name TSCII. + // + // See http://www.iana.org/assignments/charset-reg/TSCII + TSCII MIB = 2107 + + // CP51932 is the MIB identifier with IANA name CP51932. + // + // See http://www.iana.org/assignments/charset-reg/CP51932 + CP51932 MIB = 2108 + + // Windows874 is the MIB identifier with IANA name windows-874. + // + // See http://www.iana.org/assignments/charset-reg/windows-874 + Windows874 MIB = 2109 + + // Windows1250 is the MIB identifier with IANA name windows-1250. + // + // Microsoft http://www.iana.org/assignments/charset-reg/windows-1250 + Windows1250 MIB = 2250 + + // Windows1251 is the MIB identifier with IANA name windows-1251. + // + // Microsoft http://www.iana.org/assignments/charset-reg/windows-1251 + Windows1251 MIB = 2251 + + // Windows1252 is the MIB identifier with IANA name windows-1252. + // + // Microsoft http://www.iana.org/assignments/charset-reg/windows-1252 + Windows1252 MIB = 2252 + + // Windows1253 is the MIB identifier with IANA name windows-1253. + // + // Microsoft http://www.iana.org/assignments/charset-reg/windows-1253 + Windows1253 MIB = 2253 + + // Windows1254 is the MIB identifier with IANA name windows-1254. + // + // Microsoft http://www.iana.org/assignments/charset-reg/windows-1254 + Windows1254 MIB = 2254 + + // Windows1255 is the MIB identifier with IANA name windows-1255. + // + // Microsoft http://www.iana.org/assignments/charset-reg/windows-1255 + Windows1255 MIB = 2255 + + // Windows1256 is the MIB identifier with IANA name windows-1256. + // + // Microsoft http://www.iana.org/assignments/charset-reg/windows-1256 + Windows1256 MIB = 2256 + + // Windows1257 is the MIB identifier with IANA name windows-1257. + // + // Microsoft http://www.iana.org/assignments/charset-reg/windows-1257 + Windows1257 MIB = 2257 + + // Windows1258 is the MIB identifier with IANA name windows-1258. + // + // Microsoft http://www.iana.org/assignments/charset-reg/windows-1258 + Windows1258 MIB = 2258 + + // TIS620 is the MIB identifier with IANA name TIS-620. + // + // Thai Industrial Standards Institute (TISI) + TIS620 MIB = 2259 + + // CP50220 is the MIB identifier with IANA name CP50220. + // + // See http://www.iana.org/assignments/charset-reg/CP50220 + CP50220 MIB = 2260 +) diff --git a/vendor/golang.org/x/text/encoding/internal/internal.go b/vendor/golang.org/x/text/encoding/internal/internal.go new file mode 100644 index 0000000000..75a5fd1658 --- /dev/null +++ b/vendor/golang.org/x/text/encoding/internal/internal.go @@ -0,0 +1,75 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package internal contains code that is shared among encoding implementations. +package internal + +import ( + "golang.org/x/text/encoding" + "golang.org/x/text/encoding/internal/identifier" + "golang.org/x/text/transform" +) + +// Encoding is an implementation of the Encoding interface that adds the String +// and ID methods to an existing encoding. +type Encoding struct { + encoding.Encoding + Name string + MIB identifier.MIB +} + +// _ verifies that Encoding implements identifier.Interface. +var _ identifier.Interface = (*Encoding)(nil) + +func (e *Encoding) String() string { + return e.Name +} + +func (e *Encoding) ID() (mib identifier.MIB, other string) { + return e.MIB, "" +} + +// SimpleEncoding is an Encoding that combines two Transformers. +type SimpleEncoding struct { + Decoder transform.Transformer + Encoder transform.Transformer +} + +func (e *SimpleEncoding) NewDecoder() *encoding.Decoder { + return &encoding.Decoder{Transformer: e.Decoder} +} + +func (e *SimpleEncoding) NewEncoder() *encoding.Encoder { + return &encoding.Encoder{Transformer: e.Encoder} +} + +// FuncEncoding is an Encoding that combines two functions returning a new +// Transformer. +type FuncEncoding struct { + Decoder func() transform.Transformer + Encoder func() transform.Transformer +} + +func (e FuncEncoding) NewDecoder() *encoding.Decoder { + return &encoding.Decoder{Transformer: e.Decoder()} +} + +func (e FuncEncoding) NewEncoder() *encoding.Encoder { + return &encoding.Encoder{Transformer: e.Encoder()} +} + +// A RepertoireError indicates a rune is not in the repertoire of a destination +// encoding. It is associated with an encoding-specific suggested replacement +// byte. +type RepertoireError byte + +// Error implements the error interrface. +func (r RepertoireError) Error() string { + return "encoding: rune not supported by encoding." +} + +// Replacement returns the replacement string associated with this error. +func (r RepertoireError) Replacement() byte { return byte(r) } + +var ErrASCIIReplacement = RepertoireError(encoding.ASCIISub) diff --git a/vendor/golang.org/x/text/internal/gen/code.go b/vendor/golang.org/x/text/internal/gen/code.go new file mode 100644 index 0000000000..d7031b6945 --- /dev/null +++ b/vendor/golang.org/x/text/internal/gen/code.go @@ -0,0 +1,351 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gen + +import ( + "bytes" + "encoding/gob" + "fmt" + "hash" + "hash/fnv" + "io" + "log" + "os" + "reflect" + "strings" + "unicode" + "unicode/utf8" +) + +// This file contains utilities for generating code. + +// TODO: other write methods like: +// - slices, maps, types, etc. + +// CodeWriter is a utility for writing structured code. It computes the content +// hash and size of written content. It ensures there are newlines between +// written code blocks. +type CodeWriter struct { + buf bytes.Buffer + Size int + Hash hash.Hash32 // content hash + gob *gob.Encoder + // For comments we skip the usual one-line separator if they are followed by + // a code block. + skipSep bool +} + +func (w *CodeWriter) Write(p []byte) (n int, err error) { + return w.buf.Write(p) +} + +// NewCodeWriter returns a new CodeWriter. +func NewCodeWriter() *CodeWriter { + h := fnv.New32() + return &CodeWriter{Hash: h, gob: gob.NewEncoder(h)} +} + +// WriteGoFile appends the buffer with the total size of all created structures +// and writes it as a Go file to the the given file with the given package name. +func (w *CodeWriter) WriteGoFile(filename, pkg string) { + f, err := os.Create(filename) + if err != nil { + log.Fatalf("Could not create file %s: %v", filename, err) + } + defer f.Close() + if _, err = w.WriteGo(f, pkg); err != nil { + log.Fatalf("Error writing file %s: %v", filename, err) + } +} + +// WriteGo appends the buffer with the total size of all created structures and +// writes it as a Go file to the the given writer with the given package name. +func (w *CodeWriter) WriteGo(out io.Writer, pkg string) (n int, err error) { + sz := w.Size + w.WriteComment("Total table size %d bytes (%dKiB); checksum: %X\n", sz, sz/1024, w.Hash.Sum32()) + defer w.buf.Reset() + return WriteGo(out, pkg, w.buf.Bytes()) +} + +func (w *CodeWriter) printf(f string, x ...interface{}) { + fmt.Fprintf(w, f, x...) +} + +func (w *CodeWriter) insertSep() { + if w.skipSep { + w.skipSep = false + return + } + // Use at least two newlines to ensure a blank space between the previous + // block. WriteGoFile will remove extraneous newlines. + w.printf("\n\n") +} + +// WriteComment writes a comment block. All line starts are prefixed with "//". +// Initial empty lines are gobbled. The indentation for the first line is +// stripped from consecutive lines. +func (w *CodeWriter) WriteComment(comment string, args ...interface{}) { + s := fmt.Sprintf(comment, args...) + s = strings.Trim(s, "\n") + + // Use at least two newlines to ensure a blank space between the previous + // block. WriteGoFile will remove extraneous newlines. + w.printf("\n\n// ") + w.skipSep = true + + // strip first indent level. + sep := "\n" + for ; len(s) > 0 && (s[0] == '\t' || s[0] == ' '); s = s[1:] { + sep += s[:1] + } + + strings.NewReplacer(sep, "\n// ", "\n", "\n// ").WriteString(w, s) + + w.printf("\n") +} + +func (w *CodeWriter) writeSizeInfo(size int) { + w.printf("// Size: %d bytes\n", size) +} + +// WriteConst writes a constant of the given name and value. +func (w *CodeWriter) WriteConst(name string, x interface{}) { + w.insertSep() + v := reflect.ValueOf(x) + + switch v.Type().Kind() { + case reflect.String: + w.printf("const %s %s = ", name, typeName(x)) + w.WriteString(v.String()) + w.printf("\n") + default: + w.printf("const %s = %#v\n", name, x) + } +} + +// WriteVar writes a variable of the given name and value. +func (w *CodeWriter) WriteVar(name string, x interface{}) { + w.insertSep() + v := reflect.ValueOf(x) + oldSize := w.Size + sz := int(v.Type().Size()) + w.Size += sz + + switch v.Type().Kind() { + case reflect.String: + w.printf("var %s %s = ", name, typeName(x)) + w.WriteString(v.String()) + case reflect.Struct: + w.gob.Encode(x) + fallthrough + case reflect.Slice, reflect.Array: + w.printf("var %s = ", name) + w.writeValue(v) + w.writeSizeInfo(w.Size - oldSize) + default: + w.printf("var %s %s = ", name, typeName(x)) + w.gob.Encode(x) + w.writeValue(v) + w.writeSizeInfo(w.Size - oldSize) + } + w.printf("\n") +} + +func (w *CodeWriter) writeValue(v reflect.Value) { + x := v.Interface() + switch v.Kind() { + case reflect.String: + w.WriteString(v.String()) + case reflect.Array: + // Don't double count: callers of WriteArray count on the size being + // added, so we need to discount it here. + w.Size -= int(v.Type().Size()) + w.writeSlice(x, true) + case reflect.Slice: + w.writeSlice(x, false) + case reflect.Struct: + w.printf("%s{\n", typeName(v.Interface())) + t := v.Type() + for i := 0; i < v.NumField(); i++ { + w.printf("%s: ", t.Field(i).Name) + w.writeValue(v.Field(i)) + w.printf(",\n") + } + w.printf("}") + default: + w.printf("%#v", x) + } +} + +// WriteString writes a string literal. +func (w *CodeWriter) WriteString(s string) { + s = strings.Replace(s, `\`, `\\`, -1) + io.WriteString(w.Hash, s) // content hash + w.Size += len(s) + + const maxInline = 40 + if len(s) <= maxInline { + w.printf("%q", s) + return + } + + // We will render the string as a multi-line string. + const maxWidth = 80 - 4 - len(`"`) - len(`" +`) + + // When starting on its own line, go fmt indents line 2+ an extra level. + n, max := maxWidth, maxWidth-4 + + // As per https://golang.org/issue/18078, the compiler has trouble + // compiling the concatenation of many strings, s0 + s1 + s2 + ... + sN, + // for large N. We insert redundant, explicit parentheses to work around + // that, lowering the N at any given step: (s0 + s1 + ... + s63) + (s64 + + // ... + s127) + etc + (etc + ... + sN). + explicitParens, extraComment := len(s) > 128*1024, "" + if explicitParens { + w.printf(`(`) + extraComment = "; the redundant, explicit parens are for https://golang.org/issue/18078" + } + + // Print "" +\n, if a string does not start on its own line. + b := w.buf.Bytes() + if p := len(bytes.TrimRight(b, " \t")); p > 0 && b[p-1] != '\n' { + w.printf("\"\" + // Size: %d bytes%s\n", len(s), extraComment) + n, max = maxWidth, maxWidth + } + + w.printf(`"`) + + for sz, p, nLines := 0, 0, 0; p < len(s); { + var r rune + r, sz = utf8.DecodeRuneInString(s[p:]) + out := s[p : p+sz] + chars := 1 + if !unicode.IsPrint(r) || r == utf8.RuneError || r == '"' { + switch sz { + case 1: + out = fmt.Sprintf("\\x%02x", s[p]) + case 2, 3: + out = fmt.Sprintf("\\u%04x", r) + case 4: + out = fmt.Sprintf("\\U%08x", r) + } + chars = len(out) + } + if n -= chars; n < 0 { + nLines++ + if explicitParens && nLines&63 == 63 { + w.printf("\") + (\"") + } + w.printf("\" +\n\"") + n = max - len(out) + } + w.printf("%s", out) + p += sz + } + w.printf(`"`) + if explicitParens { + w.printf(`)`) + } +} + +// WriteSlice writes a slice value. +func (w *CodeWriter) WriteSlice(x interface{}) { + w.writeSlice(x, false) +} + +// WriteArray writes an array value. +func (w *CodeWriter) WriteArray(x interface{}) { + w.writeSlice(x, true) +} + +func (w *CodeWriter) writeSlice(x interface{}, isArray bool) { + v := reflect.ValueOf(x) + w.gob.Encode(v.Len()) + w.Size += v.Len() * int(v.Type().Elem().Size()) + name := typeName(x) + if isArray { + name = fmt.Sprintf("[%d]%s", v.Len(), name[strings.Index(name, "]")+1:]) + } + if isArray { + w.printf("%s{\n", name) + } else { + w.printf("%s{ // %d elements\n", name, v.Len()) + } + + switch kind := v.Type().Elem().Kind(); kind { + case reflect.String: + for _, s := range x.([]string) { + w.WriteString(s) + w.printf(",\n") + } + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, + reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: + // nLine and nBlock are the number of elements per line and block. + nLine, nBlock, format := 8, 64, "%d," + switch kind { + case reflect.Uint8: + format = "%#02x," + case reflect.Uint16: + format = "%#04x," + case reflect.Uint32: + nLine, nBlock, format = 4, 32, "%#08x," + case reflect.Uint, reflect.Uint64: + nLine, nBlock, format = 4, 32, "%#016x," + case reflect.Int8: + nLine = 16 + } + n := nLine + for i := 0; i < v.Len(); i++ { + if i%nBlock == 0 && v.Len() > nBlock { + w.printf("// Entry %X - %X\n", i, i+nBlock-1) + } + x := v.Index(i).Interface() + w.gob.Encode(x) + w.printf(format, x) + if n--; n == 0 { + n = nLine + w.printf("\n") + } + } + w.printf("\n") + case reflect.Struct: + zero := reflect.Zero(v.Type().Elem()).Interface() + for i := 0; i < v.Len(); i++ { + x := v.Index(i).Interface() + w.gob.EncodeValue(v) + if !reflect.DeepEqual(zero, x) { + line := fmt.Sprintf("%#v,\n", x) + line = line[strings.IndexByte(line, '{'):] + w.printf("%d: ", i) + w.printf(line) + } + } + case reflect.Array: + for i := 0; i < v.Len(); i++ { + w.printf("%d: %#v,\n", i, v.Index(i).Interface()) + } + default: + panic("gen: slice elem type not supported") + } + w.printf("}") +} + +// WriteType writes a definition of the type of the given value and returns the +// type name. +func (w *CodeWriter) WriteType(x interface{}) string { + t := reflect.TypeOf(x) + w.printf("type %s struct {\n", t.Name()) + for i := 0; i < t.NumField(); i++ { + w.printf("\t%s %s\n", t.Field(i).Name, t.Field(i).Type) + } + w.printf("}\n") + return t.Name() +} + +// typeName returns the name of the go type of x. +func typeName(x interface{}) string { + t := reflect.ValueOf(x).Type() + return strings.Replace(fmt.Sprint(t), "main.", "", 1) +} diff --git a/vendor/golang.org/x/text/internal/gen/gen.go b/vendor/golang.org/x/text/internal/gen/gen.go new file mode 100644 index 0000000000..2acb0355a2 --- /dev/null +++ b/vendor/golang.org/x/text/internal/gen/gen.go @@ -0,0 +1,281 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package gen contains common code for the various code generation tools in the +// text repository. Its usage ensures consistency between tools. +// +// This package defines command line flags that are common to most generation +// tools. The flags allow for specifying specific Unicode and CLDR versions +// in the public Unicode data repository (http://www.unicode.org/Public). +// +// A local Unicode data mirror can be set through the flag -local or the +// environment variable UNICODE_DIR. The former takes precedence. The local +// directory should follow the same structure as the public repository. +// +// IANA data can also optionally be mirrored by putting it in the iana directory +// rooted at the top of the local mirror. Beware, though, that IANA data is not +// versioned. So it is up to the developer to use the right version. +package gen // import "golang.org/x/text/internal/gen" + +import ( + "bytes" + "flag" + "fmt" + "go/build" + "go/format" + "io" + "io/ioutil" + "log" + "net/http" + "os" + "path" + "path/filepath" + "sync" + "unicode" + + "golang.org/x/text/unicode/cldr" +) + +var ( + url = flag.String("url", + "http://www.unicode.org/Public", + "URL of Unicode database directory") + iana = flag.String("iana", + "http://www.iana.org", + "URL of the IANA repository") + unicodeVersion = flag.String("unicode", + getEnv("UNICODE_VERSION", unicode.Version), + "unicode version to use") + cldrVersion = flag.String("cldr", + getEnv("CLDR_VERSION", cldr.Version), + "cldr version to use") +) + +func getEnv(name, def string) string { + if v := os.Getenv(name); v != "" { + return v + } + return def +} + +// Init performs common initialization for a gen command. It parses the flags +// and sets up the standard logging parameters. +func Init() { + log.SetPrefix("") + log.SetFlags(log.Lshortfile) + flag.Parse() +} + +const header = `// Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. + +package %s + +` + +// UnicodeVersion reports the requested Unicode version. +func UnicodeVersion() string { + return *unicodeVersion +} + +// UnicodeVersion reports the requested CLDR version. +func CLDRVersion() string { + return *cldrVersion +} + +// IsLocal reports whether data files are available locally. +func IsLocal() bool { + dir, err := localReadmeFile() + if err != nil { + return false + } + if _, err = os.Stat(dir); err != nil { + return false + } + return true +} + +// OpenUCDFile opens the requested UCD file. The file is specified relative to +// the public Unicode root directory. It will call log.Fatal if there are any +// errors. +func OpenUCDFile(file string) io.ReadCloser { + return openUnicode(path.Join(*unicodeVersion, "ucd", file)) +} + +// OpenCLDRCoreZip opens the CLDR core zip file. It will call log.Fatal if there +// are any errors. +func OpenCLDRCoreZip() io.ReadCloser { + return OpenUnicodeFile("cldr", *cldrVersion, "core.zip") +} + +// OpenUnicodeFile opens the requested file of the requested category from the +// root of the Unicode data archive. The file is specified relative to the +// public Unicode root directory. If version is "", it will use the default +// Unicode version. It will call log.Fatal if there are any errors. +func OpenUnicodeFile(category, version, file string) io.ReadCloser { + if version == "" { + version = UnicodeVersion() + } + return openUnicode(path.Join(category, version, file)) +} + +// OpenIANAFile opens the requested IANA file. The file is specified relative +// to the IANA root, which is typically either http://www.iana.org or the +// iana directory in the local mirror. It will call log.Fatal if there are any +// errors. +func OpenIANAFile(path string) io.ReadCloser { + return Open(*iana, "iana", path) +} + +var ( + dirMutex sync.Mutex + localDir string +) + +const permissions = 0755 + +func localReadmeFile() (string, error) { + p, err := build.Import("golang.org/x/text", "", build.FindOnly) + if err != nil { + return "", fmt.Errorf("Could not locate package: %v", err) + } + return filepath.Join(p.Dir, "DATA", "README"), nil +} + +func getLocalDir() string { + dirMutex.Lock() + defer dirMutex.Unlock() + + readme, err := localReadmeFile() + if err != nil { + log.Fatal(err) + } + dir := filepath.Dir(readme) + if _, err := os.Stat(readme); err != nil { + if err := os.MkdirAll(dir, permissions); err != nil { + log.Fatalf("Could not create directory: %v", err) + } + ioutil.WriteFile(readme, []byte(readmeTxt), permissions) + } + return dir +} + +const readmeTxt = `Generated by golang.org/x/text/internal/gen. DO NOT EDIT. + +This directory contains downloaded files used to generate the various tables +in the golang.org/x/text subrepo. + +Note that the language subtag repo (iana/assignments/language-subtag-registry) +and all other times in the iana subdirectory are not versioned and will need +to be periodically manually updated. The easiest way to do this is to remove +the entire iana directory. This is mostly of concern when updating the language +package. +` + +// Open opens subdir/path if a local directory is specified and the file exists, +// where subdir is a directory relative to the local root, or fetches it from +// urlRoot/path otherwise. It will call log.Fatal if there are any errors. +func Open(urlRoot, subdir, path string) io.ReadCloser { + file := filepath.Join(getLocalDir(), subdir, filepath.FromSlash(path)) + return open(file, urlRoot, path) +} + +func openUnicode(path string) io.ReadCloser { + file := filepath.Join(getLocalDir(), filepath.FromSlash(path)) + return open(file, *url, path) +} + +// TODO: automatically periodically update non-versioned files. + +func open(file, urlRoot, path string) io.ReadCloser { + if f, err := os.Open(file); err == nil { + return f + } + r := get(urlRoot, path) + defer r.Close() + b, err := ioutil.ReadAll(r) + if err != nil { + log.Fatalf("Could not download file: %v", err) + } + os.MkdirAll(filepath.Dir(file), permissions) + if err := ioutil.WriteFile(file, b, permissions); err != nil { + log.Fatalf("Could not create file: %v", err) + } + return ioutil.NopCloser(bytes.NewReader(b)) +} + +func get(root, path string) io.ReadCloser { + url := root + "/" + path + fmt.Printf("Fetching %s...", url) + defer fmt.Println(" done.") + resp, err := http.Get(url) + if err != nil { + log.Fatalf("HTTP GET: %v", err) + } + if resp.StatusCode != 200 { + log.Fatalf("Bad GET status for %q: %q", url, resp.Status) + } + return resp.Body +} + +// TODO: use Write*Version in all applicable packages. + +// WriteUnicodeVersion writes a constant for the Unicode version from which the +// tables are generated. +func WriteUnicodeVersion(w io.Writer) { + fmt.Fprintf(w, "// UnicodeVersion is the Unicode version from which the tables in this package are derived.\n") + fmt.Fprintf(w, "const UnicodeVersion = %q\n\n", UnicodeVersion()) +} + +// WriteCLDRVersion writes a constant for the CLDR version from which the +// tables are generated. +func WriteCLDRVersion(w io.Writer) { + fmt.Fprintf(w, "// CLDRVersion is the CLDR version from which the tables in this package are derived.\n") + fmt.Fprintf(w, "const CLDRVersion = %q\n\n", CLDRVersion()) +} + +// WriteGoFile prepends a standard file comment and package statement to the +// given bytes, applies gofmt, and writes them to a file with the given name. +// It will call log.Fatal if there are any errors. +func WriteGoFile(filename, pkg string, b []byte) { + w, err := os.Create(filename) + if err != nil { + log.Fatalf("Could not create file %s: %v", filename, err) + } + defer w.Close() + if _, err = WriteGo(w, pkg, b); err != nil { + log.Fatalf("Error writing file %s: %v", filename, err) + } +} + +// WriteGo prepends a standard file comment and package statement to the given +// bytes, applies gofmt, and writes them to w. +func WriteGo(w io.Writer, pkg string, b []byte) (n int, err error) { + src := []byte(fmt.Sprintf(header, pkg)) + src = append(src, b...) + formatted, err := format.Source(src) + if err != nil { + // Print the generated code even in case of an error so that the + // returned error can be meaningfully interpreted. + n, _ = w.Write(src) + return n, err + } + return w.Write(formatted) +} + +// Repackage rewrites a Go file from belonging to package main to belonging to +// the given package. +func Repackage(inFile, outFile, pkg string) { + src, err := ioutil.ReadFile(inFile) + if err != nil { + log.Fatalf("reading %s: %v", inFile, err) + } + const toDelete = "package main\n\n" + i := bytes.Index(src, []byte(toDelete)) + if i < 0 { + log.Fatalf("Could not find %q in %s.", toDelete, inFile) + } + w := &bytes.Buffer{} + w.Write(src[i+len(toDelete):]) + WriteGoFile(outFile, pkg, w.Bytes()) +} diff --git a/vendor/golang.org/x/text/transform/transform.go b/vendor/golang.org/x/text/transform/transform.go new file mode 100644 index 0000000000..fe47b9b35f --- /dev/null +++ b/vendor/golang.org/x/text/transform/transform.go @@ -0,0 +1,705 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package transform provides reader and writer wrappers that transform the +// bytes passing through as well as various transformations. Example +// transformations provided by other packages include normalization and +// conversion between character sets. +package transform // import "golang.org/x/text/transform" + +import ( + "bytes" + "errors" + "io" + "unicode/utf8" +) + +var ( + // ErrShortDst means that the destination buffer was too short to + // receive all of the transformed bytes. + ErrShortDst = errors.New("transform: short destination buffer") + + // ErrShortSrc means that the source buffer has insufficient data to + // complete the transformation. + ErrShortSrc = errors.New("transform: short source buffer") + + // ErrEndOfSpan means that the input and output (the transformed input) + // are not identical. + ErrEndOfSpan = errors.New("transform: input and output are not identical") + + // errInconsistentByteCount means that Transform returned success (nil + // error) but also returned nSrc inconsistent with the src argument. + errInconsistentByteCount = errors.New("transform: inconsistent byte count returned") + + // errShortInternal means that an internal buffer is not large enough + // to make progress and the Transform operation must be aborted. + errShortInternal = errors.New("transform: short internal buffer") +) + +// Transformer transforms bytes. +type Transformer interface { + // Transform writes to dst the transformed bytes read from src, and + // returns the number of dst bytes written and src bytes read. The + // atEOF argument tells whether src represents the last bytes of the + // input. + // + // Callers should always process the nDst bytes produced and account + // for the nSrc bytes consumed before considering the error err. + // + // A nil error means that all of the transformed bytes (whether freshly + // transformed from src or left over from previous Transform calls) + // were written to dst. A nil error can be returned regardless of + // whether atEOF is true. If err is nil then nSrc must equal len(src); + // the converse is not necessarily true. + // + // ErrShortDst means that dst was too short to receive all of the + // transformed bytes. ErrShortSrc means that src had insufficient data + // to complete the transformation. If both conditions apply, then + // either error may be returned. Other than the error conditions listed + // here, implementations are free to report other errors that arise. + Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error) + + // Reset resets the state and allows a Transformer to be reused. + Reset() +} + +// SpanningTransformer extends the Transformer interface with a Span method +// that determines how much of the input already conforms to the Transformer. +type SpanningTransformer interface { + Transformer + + // Span returns a position in src such that transforming src[:n] results in + // identical output src[:n] for these bytes. It does not necessarily return + // the largest such n. The atEOF argument tells whether src represents the + // last bytes of the input. + // + // Callers should always account for the n bytes consumed before + // considering the error err. + // + // A nil error means that all input bytes are known to be identical to the + // output produced by the Transformer. A nil error can be be returned + // regardless of whether atEOF is true. If err is nil, then then n must + // equal len(src); the converse is not necessarily true. + // + // ErrEndOfSpan means that the Transformer output may differ from the + // input after n bytes. Note that n may be len(src), meaning that the output + // would contain additional bytes after otherwise identical output. + // ErrShortSrc means that src had insufficient data to determine whether the + // remaining bytes would change. Other than the error conditions listed + // here, implementations are free to report other errors that arise. + // + // Calling Span can modify the Transformer state as a side effect. In + // effect, it does the transformation just as calling Transform would, only + // without copying to a destination buffer and only up to a point it can + // determine the input and output bytes are the same. This is obviously more + // limited than calling Transform, but can be more efficient in terms of + // copying and allocating buffers. Calls to Span and Transform may be + // interleaved. + Span(src []byte, atEOF bool) (n int, err error) +} + +// NopResetter can be embedded by implementations of Transformer to add a nop +// Reset method. +type NopResetter struct{} + +// Reset implements the Reset method of the Transformer interface. +func (NopResetter) Reset() {} + +// Reader wraps another io.Reader by transforming the bytes read. +type Reader struct { + r io.Reader + t Transformer + err error + + // dst[dst0:dst1] contains bytes that have been transformed by t but + // not yet copied out via Read. + dst []byte + dst0, dst1 int + + // src[src0:src1] contains bytes that have been read from r but not + // yet transformed through t. + src []byte + src0, src1 int + + // transformComplete is whether the transformation is complete, + // regardless of whether or not it was successful. + transformComplete bool +} + +const defaultBufSize = 4096 + +// NewReader returns a new Reader that wraps r by transforming the bytes read +// via t. It calls Reset on t. +func NewReader(r io.Reader, t Transformer) *Reader { + t.Reset() + return &Reader{ + r: r, + t: t, + dst: make([]byte, defaultBufSize), + src: make([]byte, defaultBufSize), + } +} + +// Read implements the io.Reader interface. +func (r *Reader) Read(p []byte) (int, error) { + n, err := 0, error(nil) + for { + // Copy out any transformed bytes and return the final error if we are done. + if r.dst0 != r.dst1 { + n = copy(p, r.dst[r.dst0:r.dst1]) + r.dst0 += n + if r.dst0 == r.dst1 && r.transformComplete { + return n, r.err + } + return n, nil + } else if r.transformComplete { + return 0, r.err + } + + // Try to transform some source bytes, or to flush the transformer if we + // are out of source bytes. We do this even if r.r.Read returned an error. + // As the io.Reader documentation says, "process the n > 0 bytes returned + // before considering the error". + if r.src0 != r.src1 || r.err != nil { + r.dst0 = 0 + r.dst1, n, err = r.t.Transform(r.dst, r.src[r.src0:r.src1], r.err == io.EOF) + r.src0 += n + + switch { + case err == nil: + if r.src0 != r.src1 { + r.err = errInconsistentByteCount + } + // The Transform call was successful; we are complete if we + // cannot read more bytes into src. + r.transformComplete = r.err != nil + continue + case err == ErrShortDst && (r.dst1 != 0 || n != 0): + // Make room in dst by copying out, and try again. + continue + case err == ErrShortSrc && r.src1-r.src0 != len(r.src) && r.err == nil: + // Read more bytes into src via the code below, and try again. + default: + r.transformComplete = true + // The reader error (r.err) takes precedence over the + // transformer error (err) unless r.err is nil or io.EOF. + if r.err == nil || r.err == io.EOF { + r.err = err + } + continue + } + } + + // Move any untransformed source bytes to the start of the buffer + // and read more bytes. + if r.src0 != 0 { + r.src0, r.src1 = 0, copy(r.src, r.src[r.src0:r.src1]) + } + n, r.err = r.r.Read(r.src[r.src1:]) + r.src1 += n + } +} + +// TODO: implement ReadByte (and ReadRune??). + +// Writer wraps another io.Writer by transforming the bytes read. +// The user needs to call Close to flush unwritten bytes that may +// be buffered. +type Writer struct { + w io.Writer + t Transformer + dst []byte + + // src[:n] contains bytes that have not yet passed through t. + src []byte + n int +} + +// NewWriter returns a new Writer that wraps w by transforming the bytes written +// via t. It calls Reset on t. +func NewWriter(w io.Writer, t Transformer) *Writer { + t.Reset() + return &Writer{ + w: w, + t: t, + dst: make([]byte, defaultBufSize), + src: make([]byte, defaultBufSize), + } +} + +// Write implements the io.Writer interface. If there are not enough +// bytes available to complete a Transform, the bytes will be buffered +// for the next write. Call Close to convert the remaining bytes. +func (w *Writer) Write(data []byte) (n int, err error) { + src := data + if w.n > 0 { + // Append bytes from data to the last remainder. + // TODO: limit the amount copied on first try. + n = copy(w.src[w.n:], data) + w.n += n + src = w.src[:w.n] + } + for { + nDst, nSrc, err := w.t.Transform(w.dst, src, false) + if _, werr := w.w.Write(w.dst[:nDst]); werr != nil { + return n, werr + } + src = src[nSrc:] + if w.n == 0 { + n += nSrc + } else if len(src) <= n { + // Enough bytes from w.src have been consumed. We make src point + // to data instead to reduce the copying. + w.n = 0 + n -= len(src) + src = data[n:] + if n < len(data) && (err == nil || err == ErrShortSrc) { + continue + } + } + switch err { + case ErrShortDst: + // This error is okay as long as we are making progress. + if nDst > 0 || nSrc > 0 { + continue + } + case ErrShortSrc: + if len(src) < len(w.src) { + m := copy(w.src, src) + // If w.n > 0, bytes from data were already copied to w.src and n + // was already set to the number of bytes consumed. + if w.n == 0 { + n += m + } + w.n = m + err = nil + } else if nDst > 0 || nSrc > 0 { + // Not enough buffer to store the remainder. Keep processing as + // long as there is progress. Without this case, transforms that + // require a lookahead larger than the buffer may result in an + // error. This is not something one may expect to be common in + // practice, but it may occur when buffers are set to small + // sizes during testing. + continue + } + case nil: + if w.n > 0 { + err = errInconsistentByteCount + } + } + return n, err + } +} + +// Close implements the io.Closer interface. +func (w *Writer) Close() error { + src := w.src[:w.n] + for { + nDst, nSrc, err := w.t.Transform(w.dst, src, true) + if _, werr := w.w.Write(w.dst[:nDst]); werr != nil { + return werr + } + if err != ErrShortDst { + return err + } + src = src[nSrc:] + } +} + +type nop struct{ NopResetter } + +func (nop) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error) { + n := copy(dst, src) + if n < len(src) { + err = ErrShortDst + } + return n, n, err +} + +func (nop) Span(src []byte, atEOF bool) (n int, err error) { + return len(src), nil +} + +type discard struct{ NopResetter } + +func (discard) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error) { + return 0, len(src), nil +} + +var ( + // Discard is a Transformer for which all Transform calls succeed + // by consuming all bytes and writing nothing. + Discard Transformer = discard{} + + // Nop is a SpanningTransformer that copies src to dst. + Nop SpanningTransformer = nop{} +) + +// chain is a sequence of links. A chain with N Transformers has N+1 links and +// N+1 buffers. Of those N+1 buffers, the first and last are the src and dst +// buffers given to chain.Transform and the middle N-1 buffers are intermediate +// buffers owned by the chain. The i'th link transforms bytes from the i'th +// buffer chain.link[i].b at read offset chain.link[i].p to the i+1'th buffer +// chain.link[i+1].b at write offset chain.link[i+1].n, for i in [0, N). +type chain struct { + link []link + err error + // errStart is the index at which the error occurred plus 1. Processing + // errStart at this level at the next call to Transform. As long as + // errStart > 0, chain will not consume any more source bytes. + errStart int +} + +func (c *chain) fatalError(errIndex int, err error) { + if i := errIndex + 1; i > c.errStart { + c.errStart = i + c.err = err + } +} + +type link struct { + t Transformer + // b[p:n] holds the bytes to be transformed by t. + b []byte + p int + n int +} + +func (l *link) src() []byte { + return l.b[l.p:l.n] +} + +func (l *link) dst() []byte { + return l.b[l.n:] +} + +// Chain returns a Transformer that applies t in sequence. +func Chain(t ...Transformer) Transformer { + if len(t) == 0 { + return nop{} + } + c := &chain{link: make([]link, len(t)+1)} + for i, tt := range t { + c.link[i].t = tt + } + // Allocate intermediate buffers. + b := make([][defaultBufSize]byte, len(t)-1) + for i := range b { + c.link[i+1].b = b[i][:] + } + return c +} + +// Reset resets the state of Chain. It calls Reset on all the Transformers. +func (c *chain) Reset() { + for i, l := range c.link { + if l.t != nil { + l.t.Reset() + } + c.link[i].p, c.link[i].n = 0, 0 + } +} + +// TODO: make chain use Span (is going to be fun to implement!) + +// Transform applies the transformers of c in sequence. +func (c *chain) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error) { + // Set up src and dst in the chain. + srcL := &c.link[0] + dstL := &c.link[len(c.link)-1] + srcL.b, srcL.p, srcL.n = src, 0, len(src) + dstL.b, dstL.n = dst, 0 + var lastFull, needProgress bool // for detecting progress + + // i is the index of the next Transformer to apply, for i in [low, high]. + // low is the lowest index for which c.link[low] may still produce bytes. + // high is the highest index for which c.link[high] has a Transformer. + // The error returned by Transform determines whether to increase or + // decrease i. We try to completely fill a buffer before converting it. + for low, i, high := c.errStart, c.errStart, len(c.link)-2; low <= i && i <= high; { + in, out := &c.link[i], &c.link[i+1] + nDst, nSrc, err0 := in.t.Transform(out.dst(), in.src(), atEOF && low == i) + out.n += nDst + in.p += nSrc + if i > 0 && in.p == in.n { + in.p, in.n = 0, 0 + } + needProgress, lastFull = lastFull, false + switch err0 { + case ErrShortDst: + // Process the destination buffer next. Return if we are already + // at the high index. + if i == high { + return dstL.n, srcL.p, ErrShortDst + } + if out.n != 0 { + i++ + // If the Transformer at the next index is not able to process any + // source bytes there is nothing that can be done to make progress + // and the bytes will remain unprocessed. lastFull is used to + // detect this and break out of the loop with a fatal error. + lastFull = true + continue + } + // The destination buffer was too small, but is completely empty. + // Return a fatal error as this transformation can never complete. + c.fatalError(i, errShortInternal) + case ErrShortSrc: + if i == 0 { + // Save ErrShortSrc in err. All other errors take precedence. + err = ErrShortSrc + break + } + // Source bytes were depleted before filling up the destination buffer. + // Verify we made some progress, move the remaining bytes to the errStart + // and try to get more source bytes. + if needProgress && nSrc == 0 || in.n-in.p == len(in.b) { + // There were not enough source bytes to proceed while the source + // buffer cannot hold any more bytes. Return a fatal error as this + // transformation can never complete. + c.fatalError(i, errShortInternal) + break + } + // in.b is an internal buffer and we can make progress. + in.p, in.n = 0, copy(in.b, in.src()) + fallthrough + case nil: + // if i == low, we have depleted the bytes at index i or any lower levels. + // In that case we increase low and i. In all other cases we decrease i to + // fetch more bytes before proceeding to the next index. + if i > low { + i-- + continue + } + default: + c.fatalError(i, err0) + } + // Exhausted level low or fatal error: increase low and continue + // to process the bytes accepted so far. + i++ + low = i + } + + // If c.errStart > 0, this means we found a fatal error. We will clear + // all upstream buffers. At this point, no more progress can be made + // downstream, as Transform would have bailed while handling ErrShortDst. + if c.errStart > 0 { + for i := 1; i < c.errStart; i++ { + c.link[i].p, c.link[i].n = 0, 0 + } + err, c.errStart, c.err = c.err, 0, nil + } + return dstL.n, srcL.p, err +} + +// Deprecated: use runes.Remove instead. +func RemoveFunc(f func(r rune) bool) Transformer { + return removeF(f) +} + +type removeF func(r rune) bool + +func (removeF) Reset() {} + +// Transform implements the Transformer interface. +func (t removeF) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error) { + for r, sz := rune(0), 0; len(src) > 0; src = src[sz:] { + + if r = rune(src[0]); r < utf8.RuneSelf { + sz = 1 + } else { + r, sz = utf8.DecodeRune(src) + + if sz == 1 { + // Invalid rune. + if !atEOF && !utf8.FullRune(src) { + err = ErrShortSrc + break + } + // We replace illegal bytes with RuneError. Not doing so might + // otherwise turn a sequence of invalid UTF-8 into valid UTF-8. + // The resulting byte sequence may subsequently contain runes + // for which t(r) is true that were passed unnoticed. + if !t(r) { + if nDst+3 > len(dst) { + err = ErrShortDst + break + } + nDst += copy(dst[nDst:], "\uFFFD") + } + nSrc++ + continue + } + } + + if !t(r) { + if nDst+sz > len(dst) { + err = ErrShortDst + break + } + nDst += copy(dst[nDst:], src[:sz]) + } + nSrc += sz + } + return +} + +// grow returns a new []byte that is longer than b, and copies the first n bytes +// of b to the start of the new slice. +func grow(b []byte, n int) []byte { + m := len(b) + if m <= 32 { + m = 64 + } else if m <= 256 { + m *= 2 + } else { + m += m >> 1 + } + buf := make([]byte, m) + copy(buf, b[:n]) + return buf +} + +const initialBufSize = 128 + +// String returns a string with the result of converting s[:n] using t, where +// n <= len(s). If err == nil, n will be len(s). It calls Reset on t. +func String(t Transformer, s string) (result string, n int, err error) { + t.Reset() + if s == "" { + // Fast path for the common case for empty input. Results in about a + // 86% reduction of running time for BenchmarkStringLowerEmpty. + if _, _, err := t.Transform(nil, nil, true); err == nil { + return "", 0, nil + } + } + + // Allocate only once. Note that both dst and src escape when passed to + // Transform. + buf := [2 * initialBufSize]byte{} + dst := buf[:initialBufSize:initialBufSize] + src := buf[initialBufSize : 2*initialBufSize] + + // The input string s is transformed in multiple chunks (starting with a + // chunk size of initialBufSize). nDst and nSrc are per-chunk (or + // per-Transform-call) indexes, pDst and pSrc are overall indexes. + nDst, nSrc := 0, 0 + pDst, pSrc := 0, 0 + + // pPrefix is the length of a common prefix: the first pPrefix bytes of the + // result will equal the first pPrefix bytes of s. It is not guaranteed to + // be the largest such value, but if pPrefix, len(result) and len(s) are + // all equal after the final transform (i.e. calling Transform with atEOF + // being true returned nil error) then we don't need to allocate a new + // result string. + pPrefix := 0 + for { + // Invariant: pDst == pPrefix && pSrc == pPrefix. + + n := copy(src, s[pSrc:]) + nDst, nSrc, err = t.Transform(dst, src[:n], pSrc+n == len(s)) + pDst += nDst + pSrc += nSrc + + // TODO: let transformers implement an optional Spanner interface, akin + // to norm's QuickSpan. This would even allow us to avoid any allocation. + if !bytes.Equal(dst[:nDst], src[:nSrc]) { + break + } + pPrefix = pSrc + if err == ErrShortDst { + // A buffer can only be short if a transformer modifies its input. + break + } else if err == ErrShortSrc { + if nSrc == 0 { + // No progress was made. + break + } + // Equal so far and !atEOF, so continue checking. + } else if err != nil || pPrefix == len(s) { + return string(s[:pPrefix]), pPrefix, err + } + } + // Post-condition: pDst == pPrefix + nDst && pSrc == pPrefix + nSrc. + + // We have transformed the first pSrc bytes of the input s to become pDst + // transformed bytes. Those transformed bytes are discontiguous: the first + // pPrefix of them equal s[:pPrefix] and the last nDst of them equal + // dst[:nDst]. We copy them around, into a new dst buffer if necessary, so + // that they become one contiguous slice: dst[:pDst]. + if pPrefix != 0 { + newDst := dst + if pDst > len(newDst) { + newDst = make([]byte, len(s)+nDst-nSrc) + } + copy(newDst[pPrefix:pDst], dst[:nDst]) + copy(newDst[:pPrefix], s[:pPrefix]) + dst = newDst + } + + // Prevent duplicate Transform calls with atEOF being true at the end of + // the input. Also return if we have an unrecoverable error. + if (err == nil && pSrc == len(s)) || + (err != nil && err != ErrShortDst && err != ErrShortSrc) { + return string(dst[:pDst]), pSrc, err + } + + // Transform the remaining input, growing dst and src buffers as necessary. + for { + n := copy(src, s[pSrc:]) + nDst, nSrc, err := t.Transform(dst[pDst:], src[:n], pSrc+n == len(s)) + pDst += nDst + pSrc += nSrc + + // If we got ErrShortDst or ErrShortSrc, do not grow as long as we can + // make progress. This may avoid excessive allocations. + if err == ErrShortDst { + if nDst == 0 { + dst = grow(dst, pDst) + } + } else if err == ErrShortSrc { + if nSrc == 0 { + src = grow(src, 0) + } + } else if err != nil || pSrc == len(s) { + return string(dst[:pDst]), pSrc, err + } + } +} + +// Bytes returns a new byte slice with the result of converting b[:n] using t, +// where n <= len(b). If err == nil, n will be len(b). It calls Reset on t. +func Bytes(t Transformer, b []byte) (result []byte, n int, err error) { + return doAppend(t, 0, make([]byte, len(b)), b) +} + +// Append appends the result of converting src[:n] using t to dst, where +// n <= len(src), If err == nil, n will be len(src). It calls Reset on t. +func Append(t Transformer, dst, src []byte) (result []byte, n int, err error) { + if len(dst) == cap(dst) { + n := len(src) + len(dst) // It is okay for this to be 0. + b := make([]byte, n) + dst = b[:copy(b, dst)] + } + return doAppend(t, len(dst), dst[:cap(dst)], src) +} + +func doAppend(t Transformer, pDst int, dst, src []byte) (result []byte, n int, err error) { + t.Reset() + pSrc := 0 + for { + nDst, nSrc, err := t.Transform(dst[pDst:], src[pSrc:], true) + pDst += nDst + pSrc += nSrc + if err != ErrShortDst { + return dst[:pDst], pSrc, err + } + + // Grow the destination buffer, but do not grow as long as we can make + // progress. This may avoid excessive allocations. + if nDst == 0 { + dst = grow(dst, pDst) + } + } +} diff --git a/vendor/golang.org/x/text/unicode/cldr/base.go b/vendor/golang.org/x/text/unicode/cldr/base.go new file mode 100644 index 0000000000..2382f4d6da --- /dev/null +++ b/vendor/golang.org/x/text/unicode/cldr/base.go @@ -0,0 +1,100 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package cldr + +import ( + "encoding/xml" + "regexp" + "strconv" +) + +// Elem is implemented by every XML element. +type Elem interface { + setEnclosing(Elem) + setName(string) + enclosing() Elem + + GetCommon() *Common +} + +type hidden struct { + CharData string `xml:",chardata"` + Alias *struct { + Common + Source string `xml:"source,attr"` + Path string `xml:"path,attr"` + } `xml:"alias"` + Def *struct { + Common + Choice string `xml:"choice,attr,omitempty"` + Type string `xml:"type,attr,omitempty"` + } `xml:"default"` +} + +// Common holds several of the most common attributes and sub elements +// of an XML element. +type Common struct { + XMLName xml.Name + name string + enclElem Elem + Type string `xml:"type,attr,omitempty"` + Reference string `xml:"reference,attr,omitempty"` + Alt string `xml:"alt,attr,omitempty"` + ValidSubLocales string `xml:"validSubLocales,attr,omitempty"` + Draft string `xml:"draft,attr,omitempty"` + hidden +} + +// Default returns the default type to select from the enclosed list +// or "" if no default value is specified. +func (e *Common) Default() string { + if e.Def == nil { + return "" + } + if e.Def.Choice != "" { + return e.Def.Choice + } else if e.Def.Type != "" { + // Type is still used by the default element in collation. + return e.Def.Type + } + return "" +} + +// GetCommon returns e. It is provided such that Common implements Elem. +func (e *Common) GetCommon() *Common { + return e +} + +// Data returns the character data accumulated for this element. +func (e *Common) Data() string { + e.CharData = charRe.ReplaceAllStringFunc(e.CharData, replaceUnicode) + return e.CharData +} + +func (e *Common) setName(s string) { + e.name = s +} + +func (e *Common) enclosing() Elem { + return e.enclElem +} + +func (e *Common) setEnclosing(en Elem) { + e.enclElem = en +} + +// Escape characters that can be escaped without further escaping the string. +var charRe = regexp.MustCompile(`&#x[0-9a-fA-F]*;|\\u[0-9a-fA-F]{4}|\\U[0-9a-fA-F]{8}|\\x[0-9a-fA-F]{2}|\\[0-7]{3}|\\[abtnvfr]`) + +// replaceUnicode converts hexadecimal Unicode codepoint notations to a one-rune string. +// It assumes the input string is correctly formatted. +func replaceUnicode(s string) string { + if s[1] == '#' { + r, _ := strconv.ParseInt(s[3:len(s)-1], 16, 32) + return string(r) + } + r, _, _, _ := strconv.UnquoteChar(s, 0) + return string(r) +} diff --git a/vendor/golang.org/x/text/unicode/cldr/cldr.go b/vendor/golang.org/x/text/unicode/cldr/cldr.go new file mode 100644 index 0000000000..2197f8ac26 --- /dev/null +++ b/vendor/golang.org/x/text/unicode/cldr/cldr.go @@ -0,0 +1,130 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:generate go run makexml.go -output xml.go + +// Package cldr provides a parser for LDML and related XML formats. +// This package is intended to be used by the table generation tools +// for the various internationalization-related packages. +// As the XML types are generated from the CLDR DTD, and as the CLDR standard +// is periodically amended, this package may change considerably over time. +// This mostly means that data may appear and disappear between versions. +// That is, old code should keep compiling for newer versions, but data +// may have moved or changed. +// CLDR version 22 is the first version supported by this package. +// Older versions may not work. +package cldr // import "golang.org/x/text/unicode/cldr" + +import ( + "fmt" + "sort" +) + +// CLDR provides access to parsed data of the Unicode Common Locale Data Repository. +type CLDR struct { + parent map[string][]string + locale map[string]*LDML + resolved map[string]*LDML + bcp47 *LDMLBCP47 + supp *SupplementalData +} + +func makeCLDR() *CLDR { + return &CLDR{ + parent: make(map[string][]string), + locale: make(map[string]*LDML), + resolved: make(map[string]*LDML), + bcp47: &LDMLBCP47{}, + supp: &SupplementalData{}, + } +} + +// BCP47 returns the parsed BCP47 LDML data. If no such data was parsed, nil is returned. +func (cldr *CLDR) BCP47() *LDMLBCP47 { + return nil +} + +// Draft indicates the draft level of an element. +type Draft int + +const ( + Approved Draft = iota + Contributed + Provisional + Unconfirmed +) + +var drafts = []string{"unconfirmed", "provisional", "contributed", "approved", ""} + +// ParseDraft returns the Draft value corresponding to the given string. The +// empty string corresponds to Approved. +func ParseDraft(level string) (Draft, error) { + if level == "" { + return Approved, nil + } + for i, s := range drafts { + if level == s { + return Unconfirmed - Draft(i), nil + } + } + return Approved, fmt.Errorf("cldr: unknown draft level %q", level) +} + +func (d Draft) String() string { + return drafts[len(drafts)-1-int(d)] +} + +// SetDraftLevel sets which draft levels to include in the evaluated LDML. +// Any draft element for which the draft level is higher than lev will be excluded. +// If multiple draft levels are available for a single element, the one with the +// lowest draft level will be selected, unless preferDraft is true, in which case +// the highest draft will be chosen. +// It is assumed that the underlying LDML is canonicalized. +func (cldr *CLDR) SetDraftLevel(lev Draft, preferDraft bool) { + // TODO: implement + cldr.resolved = make(map[string]*LDML) +} + +// RawLDML returns the LDML XML for id in unresolved form. +// id must be one of the strings returned by Locales. +func (cldr *CLDR) RawLDML(loc string) *LDML { + return cldr.locale[loc] +} + +// LDML returns the fully resolved LDML XML for loc, which must be one of +// the strings returned by Locales. +func (cldr *CLDR) LDML(loc string) (*LDML, error) { + return cldr.resolve(loc) +} + +// Supplemental returns the parsed supplemental data. If no such data was parsed, +// nil is returned. +func (cldr *CLDR) Supplemental() *SupplementalData { + return cldr.supp +} + +// Locales returns the locales for which there exist files. +// Valid sublocales for which there is no file are not included. +// The root locale is always sorted first. +func (cldr *CLDR) Locales() []string { + loc := []string{"root"} + hasRoot := false + for l, _ := range cldr.locale { + if l == "root" { + hasRoot = true + continue + } + loc = append(loc, l) + } + sort.Strings(loc[1:]) + if !hasRoot { + return loc[1:] + } + return loc +} + +// Get fills in the fields of x based on the XPath path. +func Get(e Elem, path string) (res Elem, err error) { + return walkXPath(e, path) +} diff --git a/vendor/golang.org/x/text/unicode/cldr/collate.go b/vendor/golang.org/x/text/unicode/cldr/collate.go new file mode 100644 index 0000000000..80ee28d795 --- /dev/null +++ b/vendor/golang.org/x/text/unicode/cldr/collate.go @@ -0,0 +1,359 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package cldr + +import ( + "bufio" + "encoding/xml" + "errors" + "fmt" + "strconv" + "strings" + "unicode" + "unicode/utf8" +) + +// RuleProcessor can be passed to Collator's Process method, which +// parses the rules and calls the respective method for each rule found. +type RuleProcessor interface { + Reset(anchor string, before int) error + Insert(level int, str, context, extend string) error + Index(id string) +} + +const ( + // cldrIndex is a Unicode-reserved sentinel value used to mark the start + // of a grouping within an index. + // We ignore any rule that starts with this rune. + // See http://unicode.org/reports/tr35/#Collation_Elements for details. + cldrIndex = "\uFDD0" + + // specialAnchor is the format in which to represent logical reset positions, + // such as "first tertiary ignorable". + specialAnchor = "<%s/>" +) + +// Process parses the rules for the tailorings of this collation +// and calls the respective methods of p for each rule found. +func (c Collation) Process(p RuleProcessor) (err error) { + if len(c.Cr) > 0 { + if len(c.Cr) > 1 { + return fmt.Errorf("multiple cr elements, want 0 or 1") + } + return processRules(p, c.Cr[0].Data()) + } + if c.Rules.Any != nil { + return c.processXML(p) + } + return errors.New("no tailoring data") +} + +// processRules parses rules in the Collation Rule Syntax defined in +// http://www.unicode.org/reports/tr35/tr35-collation.html#Collation_Tailorings. +func processRules(p RuleProcessor, s string) (err error) { + chk := func(s string, e error) string { + if err == nil { + err = e + } + return s + } + i := 0 // Save the line number for use after the loop. + scanner := bufio.NewScanner(strings.NewReader(s)) + for ; scanner.Scan() && err == nil; i++ { + for s := skipSpace(scanner.Text()); s != "" && s[0] != '#'; s = skipSpace(s) { + level := 5 + var ch byte + switch ch, s = s[0], s[1:]; ch { + case '&': // followed by or '[' ']' + if s = skipSpace(s); consume(&s, '[') { + s = chk(parseSpecialAnchor(p, s)) + } else { + s = chk(parseAnchor(p, 0, s)) + } + case '<': // sort relation '<'{1,4}, optionally followed by '*'. + for level = 1; consume(&s, '<'); level++ { + } + if level > 4 { + err = fmt.Errorf("level %d > 4", level) + } + fallthrough + case '=': // identity relation, optionally followed by *. + if consume(&s, '*') { + s = chk(parseSequence(p, level, s)) + } else { + s = chk(parseOrder(p, level, s)) + } + default: + chk("", fmt.Errorf("illegal operator %q", ch)) + break + } + } + } + if chk("", scanner.Err()); err != nil { + return fmt.Errorf("%d: %v", i, err) + } + return nil +} + +// parseSpecialAnchor parses the anchor syntax which is either of the form +// ['before' ] +// or +// [