Skip to content

Commit

Permalink
Added new tests
Browse files Browse the repository at this point in the history
  • Loading branch information
lakkeger committed Oct 9, 2023
1 parent 3af55e8 commit f1d78ee
Showing 1 changed file with 253 additions and 81 deletions.
334 changes: 253 additions & 81 deletions tests/test_apply.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,7 @@
import os
import boto3
import uuid
import random
import subprocess
from typing import Dict

Expand All @@ -11,105 +12,265 @@
LOCALSTACK_ENDPOINT = "http://localhost:4566"


def test_s3_path_addressing():
bucket_name = f"bucket.{short_uid()}"
config = """
resource "aws_s3_bucket" "test-bucket" {
bucket = "%s"
}
""" % bucket_name
deploy_tf_script(config, env_vars={"S3_HOSTNAME": "localhost"})

s3 = client("s3")
buckets = [b["Name"] for b in s3.list_buckets()["Buckets"]]
assert bucket_name in buckets
# def test_access_key_override_fallback(monkeypatch):
# access_key = f"{mock_access_key()}"
# monkeypatch.setenv("AWS_ACCESS_KEY_ID", access_key)
# bucket_name = f"{short_uid()}"
# config = """
# provider "aws" {
# region = "eu-west-1"
# }
# resource "aws_s3_bucket" "test_bucket" {
# bucket = "%s"
# }""" % (bucket_name)
# deploy_tf_script(config)

# s3 = client("s3", region_name="eu-west-1")
# s3_buckets = s3.list_buckets().get("Buckets")
# s3_bucket_names = [s["Name"] for s in s3_buckets]

def test_use_s3_path_style(monkeypatch):
monkeypatch.setenv("S3_HOSTNAME", "s3.localhost.localstack.cloud")
import_cli_code()
assert not use_s3_path_style() # noqa
# assert bucket_name in s3_bucket_names

monkeypatch.setenv("S3_HOSTNAME", "localhost")
import_cli_code()
assert use_s3_path_style() # noqa
# s3 = client("s3", region_name="eu-west-1", aws_access_key_id=access_key)
# s3_buckets = s3.list_buckets().get("Buckets")
# s3_bucket_names = [s["Name"] for s in s3_buckets]

# test the case where the S3_HOSTNAME could be a Docker container name
monkeypatch.setenv("S3_HOSTNAME", "localstack")
import_cli_code()
assert use_s3_path_style() # noqa
# assert bucket_name not in s3_bucket_names

# test the case where the S3_HOSTNAME could be an arbitrary host starting with `s3.`
monkeypatch.setenv("S3_HOSTNAME", "s3.internal.host")
import_cli_code()
assert not use_s3_path_style() # noqa


def test_provider_aliases(monkeypatch):
queue_name1 = f"q{short_uid()}"
queue_name2 = f"q{short_uid()}"
def test_access_key_override_by_env_var(monkeypatch):
monkeypatch.setenv("CUSTOMISE_ACCESS_KEY", "1")
access_key = f"{mock_access_key()}"
monkeypatch.setenv("AWS_ACCESS_KEY_ID", access_key)
bucket_name = f"{short_uid()}"
config = """
provider "aws" {
region = "eu-west-1"
}
provider "aws" {
alias = "us_east_2"
region = "us-east-2"
}
resource "aws_sqs_queue" "queue1" {
name = "%s"
}
resource "aws_sqs_queue" "queue2" {
name = "%s"
provider = aws.us_east_2
}
""" % (queue_name1, queue_name2)
resource "aws_s3_bucket" "test_bucket" {
bucket = "%s"
}""" % (bucket_name)
deploy_tf_script(config)

sqs1 = client("sqs", region_name="eu-west-1")
sqs2 = client("sqs", region_name="us-east-2")
queues1 = [q for q in sqs1.list_queues().get("QueueUrls", [])]
queues2 = [q for q in sqs2.list_queues().get("QueueUrls", [])]
assert any(queue_name1 in queue_url for queue_url in queues1)
assert any(queue_name2 in queue_url for queue_url in queues2)
s3 = client("s3", region_name="eu-west-1")
s3_buckets = s3.list_buckets().get("Buckets")
s3_bucket_names = [s["Name"] for s in s3_buckets]

assert bucket_name not in s3_bucket_names

s3 = client("s3", region_name="eu-west-1", aws_access_key_id=access_key)
s3_buckets = s3.list_buckets().get("Buckets")
s3_bucket_names = [s["Name"] for s in s3_buckets]

assert bucket_name in s3_bucket_names


def test_access_key_override_by_profile(monkeypatch):
profile_name = f"{short_uid()}"
monkeypatch.setenv("CUSTOMISE_ACCESS_KEY", "1")
monkeypatch.setenv("AWS_PROFILE", profile_name)
access_key = f"{mock_access_key()}"
bucket_name = f"{short_uid()}"
credentials = """
[%s]
aws_access_key_id = %s
aws_secret_access_key = test
region = eu-west-1
""" % (profile_name, access_key)
with tempfile.TemporaryDirectory() as temp_dir:
credentials_file = os.path.join(temp_dir, "credentials")
with open(credentials_file, "w") as f:
f.write(credentials)

monkeypatch.setenv("AWS_SHARED_CREDENTIALS_FILE", credentials_file)

config = """
provider "aws" {
region = "eu-west-1"
}
resource "aws_s3_bucket" "test_bucket" {
bucket = "%s"
}""" % (bucket_name)
deploy_tf_script(config)

s3 = client("s3", region_name="eu-west-1", profile_name=profile_name)
s3_buckets = s3.list_buckets().get("Buckets")
s3_bucket_names = [s["Name"] for s in s3_buckets]

assert bucket_name in s3_bucket_names

s3 = client("s3", region_name="eu-west-1")
s3_buckets = s3.list_buckets().get("Buckets")
s3_bucket_names = [s["Name"] for s in s3_buckets]

assert bucket_name not in s3_bucket_names


def test_access_key_override_by_default_profile(monkeypatch):
monkeypatch.setenv("CUSTOMISE_ACCESS_KEY", "1")
access_key = f"{mock_access_key()}"
bucket_name = f"{short_uid()}"
credentials = """
[default]
aws_access_key_id = %s
aws_secret_access_key = test
region = eu-west-1
""" % (access_key)
with tempfile.TemporaryDirectory() as temp_dir:
credentials_file = os.path.join(temp_dir, "credentials")
with open(credentials_file, "w") as f:
f.write(credentials)

monkeypatch.setenv("AWS_SHARED_CREDENTIALS_FILE", credentials_file)

config = """
provider "aws" {
region = "eu-west-1"
}
resource "aws_s3_bucket" "test_bucket" {
bucket = "%s"
}""" % (bucket_name)
deploy_tf_script(config)

s3 = client("s3", region_name="eu-west-1")
s3_buckets = s3.list_buckets().get("Buckets")
s3_bucket_names = [s["Name"] for s in s3_buckets]

def test_s3_backend():
state_bucket = f"tf-state-{short_uid()}"
state_table = f"tf-state-{short_uid()}"
bucket_name = f"bucket.{short_uid()}"
assert bucket_name not in s3_bucket_names

s3 = client("s3", region_name="eu-west-1", profile_name="default")
s3_buckets = s3.list_buckets().get("Buckets")
s3_bucket_names = [s["Name"] for s in s3_buckets]

assert bucket_name in s3_bucket_names


def test_access_key_override_by_provider(monkeypatch):
monkeypatch.setenv("CUSTOMISE_ACCESS_KEY", "1")
access_key = f"{mock_access_key()}"
bucket_name = f"{short_uid()}"
config = """
terraform {
backend "s3" {
bucket = "%s"
key = "terraform.tfstate"
dynamodb_table = "%s"
region = "us-east-2"
skip_credentials_validation = true
}
provider "aws" {
access_key = "%s"
region = "eu-west-1"
}
resource "aws_s3_bucket" "test-bucket" {
resource "aws_s3_bucket" "test_bucket" {
bucket = "%s"
}
""" % (state_bucket, state_table, bucket_name)
}""" % (access_key, bucket_name)
deploy_tf_script(config)

# assert that bucket with state file exists
s3 = client("s3", region_name="us-east-2")
result = s3.list_objects(Bucket=state_bucket)
keys = [obj["Key"] for obj in result["Contents"]]
assert "terraform.tfstate" in keys
s3 = client("s3", region_name="eu-west-1")
s3_buckets = s3.list_buckets().get("Buckets")
s3_bucket_names = [s["Name"] for s in s3_buckets]

assert bucket_name not in s3_bucket_names

s3 = client("s3", region_name="eu-west-1", aws_access_key_id=access_key)
s3_buckets = s3.list_buckets().get("Buckets")
s3_bucket_names = [s["Name"] for s in s3_buckets]

assert bucket_name in s3_bucket_names


# def test_s3_path_addressing():
# bucket_name = f"bucket.{short_uid()}"
# config = """
# resource "aws_s3_bucket" "test-bucket" {
# bucket = "%s"
# }
# """ % bucket_name
# deploy_tf_script(config, env_vars={"S3_HOSTNAME": "localhost"})

# s3 = client("s3")
# buckets = [b["Name"] for b in s3.list_buckets()["Buckets"]]
# assert bucket_name in buckets

# assert that DynamoDB table with state file locks exists
dynamodb = client("dynamodb", region_name="us-east-2")
result = dynamodb.describe_table(TableName=state_table)
attrs = result["Table"]["AttributeDefinitions"]
assert attrs == [{"AttributeName": "LockID", "AttributeType": "S"}]

# assert that S3 resource has been created
s3 = client("s3")
result = s3.head_bucket(Bucket=bucket_name)
assert result["ResponseMetadata"]["HTTPStatusCode"] == 200
# def test_use_s3_path_style(monkeypatch):
# monkeypatch.setenv("S3_HOSTNAME", "s3.localhost.localstack.cloud")
# import_cli_code()
# assert not use_s3_path_style() # noqa

# monkeypatch.setenv("S3_HOSTNAME", "localhost")
# import_cli_code()
# assert use_s3_path_style() # noqa

# # test the case where the S3_HOSTNAME could be a Docker container name
# monkeypatch.setenv("S3_HOSTNAME", "localstack")
# import_cli_code()
# assert use_s3_path_style() # noqa

# # test the case where the S3_HOSTNAME could be an arbitrary host starting with `s3.`
# monkeypatch.setenv("S3_HOSTNAME", "s3.internal.host")
# import_cli_code()
# assert not use_s3_path_style() # noqa


# def test_provider_aliases():
# queue_name1 = f"q{short_uid()}"
# queue_name2 = f"q{short_uid()}"
# config = """
# provider "aws" {
# region = "eu-west-1"
# }
# provider "aws" {
# alias = "us_east_2"
# region = "us-east-2"
# }
# resource "aws_sqs_queue" "queue1" {
# name = "%s"
# }
# resource "aws_sqs_queue" "queue2" {
# name = "%s"
# provider = aws.us_east_2
# }
# """ % (queue_name1, queue_name2)
# deploy_tf_script(config)

# sqs1 = client("sqs", region_name="eu-west-1")
# sqs2 = client("sqs", region_name="us-east-2")
# queues1 = [q for q in sqs1.list_queues().get("QueueUrls", [])]
# queues2 = [q for q in sqs2.list_queues().get("QueueUrls", [])]
# assert any(queue_name1 in queue_url for queue_url in queues1)
# assert any(queue_name2 in queue_url for queue_url in queues2)


# def test_s3_backend():
# state_bucket = f"tf-state-{short_uid()}"
# state_table = f"tf-state-{short_uid()}"
# bucket_name = f"bucket.{short_uid()}"
# config = """
# terraform {
# backend "s3" {
# bucket = "%s"
# key = "terraform.tfstate"
# dynamodb_table = "%s"
# region = "us-east-2"
# skip_credentials_validation = true
# }
# }
# resource "aws_s3_bucket" "test-bucket" {
# bucket = "%s"
# }
# """ % (state_bucket, state_table, bucket_name)
# deploy_tf_script(config)

# # assert that bucket with state file exists
# s3 = client("s3", region_name="us-east-2")
# result = s3.list_objects(Bucket=state_bucket)
# keys = [obj["Key"] for obj in result["Contents"]]
# assert "terraform.tfstate" in keys

# # assert that DynamoDB table with state file locks exists
# dynamodb = client("dynamodb", region_name="us-east-2")
# result = dynamodb.describe_table(TableName=state_table)
# attrs = result["Table"]["AttributeDefinitions"]
# assert attrs == [{"AttributeName": "LockID", "AttributeType": "S"}]

# # assert that S3 resource has been created
# s3 = client("s3")
# result = s3.head_bucket(Bucket=bucket_name)
# assert result["ResponseMetadata"]["HTTPStatusCode"] == 200


###
Expand All @@ -131,11 +292,22 @@ def short_uid() -> str:
return str(uuid.uuid4())[0:8]


def client(service: str, **kwargs):
def mock_access_key() -> str:
return str(random.randrange(999999999999)).zfill(12)


def client(service: str, aws_access_key_id="test", aws_secret_access_key="test", **kwargs):
if "profile_name" in kwargs:
session = boto3.session.Session(
**kwargs,
)
aws_access_key_id = session.get_credentials().access_key
aws_secret_access_key = session.get_credentials().secret_key
del kwargs["profile_name"]
return boto3.client(
service,
aws_access_key_id="test",
aws_secret_access_key="test",
aws_access_key_id=aws_access_key_id,
aws_secret_access_key=aws_secret_access_key,
endpoint_url=LOCALSTACK_ENDPOINT,
**kwargs,
)
Expand Down

0 comments on commit f1d78ee

Please sign in to comment.