Terraform Syntax

Provider

I tested successfully the instruction in the module copied from here
https://medium.com/scalereal/how-to-use-multiple-aws-providers-in-a-terraform-project-672da074c3eb
I still have a warning but doesn't matter.

for_each

it is one of the dynamic blocks https://www.terraform.io/docs/language/expressions/dynamic-blocks.html

this example is copied from here https://github.com/linuxacademy/content-hashicorp-certified-terraform-associate-foundations/tree/master/section7-HoL-TF-DynBlocks-Funcs

variable "rules" {
  type = list(object({
    port        = number
    proto       = string
    cidr_blocks = list(string)
  }))
  default = [
    {
      port        = 80
      proto       = "tcp"
      cidr_blocks = ["0.0.0.0/0"]
    },
    {
      port        = 22
      proto       = "tcp"
      cidr_blocks = ["0.0.0.0/0"]
    },
    {
      port        = 3689
      proto       = "tcp"
      cidr_blocks = ["6.7.8.9/32"]
    }
  ]

}
resource "aws_security_group" "my-sg" {
  vpc_id = module.vpc.vpc_id
  name   = join("_", ["sg", module.vpc.vpc_id])
  dynamic "ingress" {
    for_each = var.rules
    content {
      from_port   = ingress.value["port"]
      to_port     = ingress.value["port"]
      protocol    = ingress.value["proto"]
      cidr_blocks = ingress.value["cidr_blocks"]
    }
  }
  egress {
    from_port   = 0
    to_port     = 0
    protocol    = "-1"
    cidr_blocks = ["0.0.0.0/0"]
  }

  tags = {
    Name = "Terraform-Dynamic-SG"
  }
}

variables

more about constraints https://www.terraform.io/docs/language/expressions/type-constraints.html#collection-types

variables

variable "external_port" {
    type = number
    default = 8080
    validation {
        condition = can(regex("8080|80", var.external_port))
        error_message = "Port Values can only be 8080 or 80." 
    }
}

#with any the type will be determined at running time
variable "data" {
       type = list(any)
       default = [1, 3, 5]
}

Security Groups

declaration

resource "aws_security_group" "mysg" { 
    vpc_id = "${module.vpc_app.vpc_id}"
    name = "My SG"

    tags = {
        Name  = "My SG"
    }
}

Ping from
resource "aws_security_group_rule" "pingfrom" {
    description                      = "Ping from"
    type                               = "ingress"
    from_port                       = -1
    to_port                           = -1
    protocol                          = "icmp"
    source_security_group_id = "${aws_security_group.othersg.id}"
    security_group_id            = "${aws_security_group.mysg.id}"
}

All traffic from another sg

resource "aws_security_group_rule" "all_traffic" {
    description                        = "All traffic from License Server"
    type                                 = "ingress"
    from_port                         = 0
    to_port                             = 0
    protocol                            = "all"
    source_security_group_id  = "${aws_security_group.othersg.id}"
    security_group_id             = "${aws_security_group.mysg.id}"
}

all output
resource "aws_security_group_rule" "app_servers_out_all" {
    type                    = "egress"
    from_port            = 0
    to_port                = 0
    protocol               = "all"
    cidr_blocks           = ["0.0.0.0/0"]
    security_group_id = "${aws_security_group.mysg.id}"
}

Data section

declare in one point

data "aws_caller_identity" "current" {}

you can use in the resources like this
 "arn:aws:logs:eu-west-1:${data.aws_caller_identity.current.account_id}:log-group:*:log-stream:*"

data null

## declare it
data "null_data_source" "logging_bucket" {
  inputs = {
    # If not empty, var.logging_bucket_name is used, otherwise bucket is named logging-<account-id>
    bucket_name = "${var.logging_bucket_name != "" ? var.logging_bucket_name : "${var.default_bucket_name_prefix}-${var.aws_account_id}"}"
  }
}

#use it
    resources = [
      "arn:aws:s3:::${data.null_data_source.logging_bucket.inputs.bucket_name}",
      "arn:aws:s3:::${data.null_data_source.logging_bucket.inputs.bucket_name}/*"
    ]

Policy with Data

data "aws_iam_policy_document" "mycloudwatch" {
  statement {
    actions   = [
      "cloudwatch:PutMetricAlarm",
      "cloudwatch:PutDashboard",
      "cloudwatch:DeleteDashboards",
      "cloudwatch:DeleteAlarms"
    ]
    resources = ["*"]
  }
}

resource "aws_iam_policy" "mycloudwatch" {
    name        = "mycloudwatch"
    path        = "/"
    description = "CloudWatch Write Dashboard And Alarms"
    policy = "${data.aws_iam_policy_document.mycloudwatch.json}"
}

resource "aws_iam_role_policy_attachment" "mycloudwatch" {
    role       = "${var.role}"
    policy_arn = "${aws_iam_policy.mycloudwatch.arn}"
}

s3 bucket data

variable "ansibled_accounts" {
  type = "list"
  default = [
    "arn:aws:iam::444444444444:root",   # four four four
    "arn:aws:iam::222222222222:root",   # two two two
    "arn:aws:iam::111111111111:root",   # uno uno uno
 ]
}
resource "aws_s3_bucket" "s3_bucket" {
    bucket = "${var.bucket_name}"
    acl = "private"
    ...........
    policy = "${data.aws_iam_policy_document.bucket_policy.json}"

  server_side_encryption_configuration {
     rule {
       apply_server_side_encryption_by_default {
         sse_algorithm     = "AES256"
       }
     }
   }

}
data "aws_iam_policy_document" "bucket_policy" {
  statement {
    principals {
      type = "AWS"
      identifiers = ["${var.accounts}"]
    }
    actions = [
      "s3:GetObject",
      "s3:GetObjectAcl"
    ]
    resources = [
      "arn:aws:s3:::${var.bucket_name}/*"
    ]
  }

  statement { ## this is to enforce the https instead of the http
    sid = "enforce the https instead of the http"
    principals {
      type        = "AWS"
      identifiers = ["*"]
    }
    effect = "Deny"
    actions = [ "s3:*" ]
    resources = [
      "arn:aws:s3:::${var.bucket_name}",
      "arn:aws:s3:::${var.bucket_name}/*"
    ]
    condition {
      test = "Bool"
      variable =  "aws:SecureTransport"
      values = ["false",]
    }
  }
}

Module

count and if

in the variables.tf

variable "dns_zone_id" { default = "dont_create"}

in the main.tf
resource "aws_route53_record" "ec2record" {
    count = "${var.dns_zone_id == "dont_create" ? 0 : 1}"

Generic

elastic ip and depends on

you can also use a profile inside the ~/.aws/credentials file in linux and refer it

file project.tf (it can be any name because all the .tf files are evaluated)

provider "aws" {
  shared_credentials_file  = "${var.cred-file}"
  profile                  = "${var.profile}"
  region                   = "${var.region}"
}

resource "aws_instance" "example" {
  ami           = "ami-7172b611"
  instance_type = "t2.micro"
}

resource "aws_eip" "ip" {
    instance = "${aws_instance.example.id}"
    depends_on = ["aws_instance.example"]
}

file variables.tf

variable "region" {
  default = "sa-east-1"
}
variable "profile" {
  default = "default"
  description = "AWS credentials profile you want to use"
}
variable "cred-file" {
  default = "/home/vagrant/.aws/credentials"
  description = "the credentials files in your machine you can't use the ~ char"
}

many machines with the same code

#create the ec2 machine
resource "aws_instance" "genericinstances" {
  count = "${var.number}"
  ami = "ami-xxxxxx"
  instance_type = "t2.micro"
  associate_public_ip_address = "false"
  subnet_id = "subnet-xxxxxx"
  private_ip = "172.31.48.${count.index+4}"
  vpc_security_group_ids = ["sg-xxxxxx"]
  key_name = "mypemkey"
  tags ={
        Name = "MachineNumber-${count.index}"
  }
}
# associate an elastic ip
resource "aws_eip" "ips" {
  count = "${var.number}"
  instance = "${element(aws_instance.genericinstances.*.id, count.index)}"
}
# define a dns record in it
resource "aws_route53_record" "records" {
   count = "${var.number}"
   zone_id = "xxxxxxxxxxx"
   name = "Mymachinename-${count.index}.mydomain.net"
   type = "A"
   ttl = "10"
   records = ["${element(aws_instance.genericinstances.*.private_ip , count.index)}"]
}

advanced userdata , this can be used inside an ec2 machine or a Launch configuration, the indentation is VERY IMPORTANT

  user_data = <<HEREDOC
     #!/bin/bash
     FILENAME=/home/ec2-user/logstash-config/logstash.conf
     for (( c=1; c<=8; c++ )) do sed -i '$ d' $FILENAME ; done
     echo '   hosts => ["${var.cloudelasticcom}:9243"] ' >> $FILENAME
     echo '   ssl => true ' >> $FILENAME
     echo '   user => "myusername" ' >> $FILENAME
     echo '   password => "mypassword" ' >> $FILENAME
     echo ' } ' >> $FILENAME
     echo '} ' >> $FILENAME
     docker restart logstash
HEREDOC

if you indent correctly you will have this color inside Atom
userdata-terraform.jpg
if you choose a wrong indentation you will have these colors
wrong-terraform-userdata.jpg

windows template with power shell to change the search name

resource "aws_instance" "winmgt" {
  ami           = "${var.win10AmiID}"
  instance_type = "t2.large"
  associate_public_ip_address = "false"
  subnet_id = "${var.Subnet-Destination-id}"
  vpc_security_group_ids = ["${aws_security_group.lab1.id}"]
  key_name = "${var.key_name}"
  tags={
        Name = "${var.EnvIdentifier}-win10"
        OperativeSystem = "Windows 10"
  }
  user_data = " <powershell> Invoke-WmiMethod -class win32_networkadapterconfiguration -name setdnssuffixsearchorder -argumentlist @('${var.region}.compute.internal', '${var.region}.ec2-utilities.amazonaws.com', '${var.EnvIdentifier}'), $null </powershell>"
}

Workaround the sns problem

as it is described in this page https://www.terraform.io/docs/providers/aws/r/sns_topic_subscription.html

Unsupported protocols include the following:

email -- delivery of message via SMTP
email-json -- delivery of JSON-encoded message via SMTP
These are unsupported because the endpoint needs to be authorized and does not generate an ARN until the target email address has been validated. This breaks the Terraform model and as a result are not currently supported.

so to workaround this problem you can do like this
resource "aws_sns_topic" "alarm" {
  name = "alarming"
  provisioner "local-exec" {
    command = "aws sns subscribe --topic-arn ${self.arn} --protocol email --notification-endpoint myemail@mydomain.mine"
  }
}

Tags

 tags = "${merge(map(
      "Name",              "DBServer",
      "BackupAMI",         "Yes",
      "CloudWatchAlarms",  "Yes",
      "OS",                "Windows",
      "VPC",               "mybuilding",
     ),
       "${var.scheduling}",
       "${var.backup}"
     )}"

in variables.tf
variable "scheduling" {
  type = "map"
  default = {
    Scheduling        = "Managed"
#    Schedule-Mon      = "UTC 0400-2359"
    Schedule-Weekday  = "UTC 0700-2000"
    Schedule-Sat      = "UTC 0700-1600"
  }
}

variable "backup" {
  type = "map"
  default = {
    BT-Backup             = "True"
    Backup-Daily          = "True"
    Backup-Incremental    = "4 hours"
    Retention-Daily       = "21 days"
    Retention-Incremental = "2 days"
  }
}

in terraform.tfvars
scheduling = {
  Scheduling        = "Manual"
}

Null Resources

save the result on a file on the hard disk

resource "null_resource" "null_id" {
    provisioner "local-exec" {
              command = "echo ${module.mymodule.name} >> myoutput.txt"
    }
}

Maps and Lookups

With terraform console you can control and simulate the terraform behaviour

export TF_VAR_env=prod
terraform console
> lookup(var.ext_port, var.env)
80
> lookup(var.container_name, var.env)
prod_blog

Typical Lambda Function

variable "filename" {  default = "discover-outdated-launchconfiguration" }

resource "aws_lambda_function" "discover" {
  filename         = "${path.module}/${var.filename}.py.zip"
  source_code_hash = filebase64sha256("${path.module}/${var.filename}.py.zip")
  function_name    = "${var.prefix}-${var.filename}"
  role             = var.role_arn_for_this_account
  handler          = "${var.filename}.lambda_handler"
  runtime          = "python3.7"
  publish          = "true"
  timeout          = 300
  memory_size      = 128
  description      = "Created by Terraform, this discover outadate launch configuration"
  environment {
   variables = {
     role_arn_cross_account_lists = var.role_arn_cross_account_lists
   }
 }
}

#### trust policy for assume role
  assume_role_policy = <<POLICY
{
  "Version": "2012-10-17",
  "Statement": [
    {
      "Effect": "Allow",
      "Principal": {
         "AWS": "arn:aws:iam::${local.digitaltwin_account_id}:root"
      },
      "Action": "sts:AssumeRole"
    }
  ]
}
POLICY
#####
resource "aws_iam_role" "lambda" {
  name = "${var.prefix}-${var.filename}"
  assume_role_policy = <<POLICY
{
  "Version": "2012-10-17",
  "Statement": [
    {
      "Effect": "Allow",
      "Principal": {
        "Service": "lambda.amazonaws.com"
      },
      "Action": "sts:AssumeRole"
    }
  ]
}
POLICY
}

resource "aws_iam_role_policy_attachment" "readonly" {
    role = aws_iam_role.lambda.id
    policy_arn = "arn:aws:iam::aws:policy/AmazonEC2ReadOnlyAccess"
}

resource "aws_iam_role_policy" "enable-assume-role-in-other-account" {
  name = "${var.prefix}-assume_role_other_accounts"
  role = aws_iam_role.lambda.id
  policy = <<POLICY
{
    "Version": "2012-10-17",
    "Statement": [
        {
            "Effect": "Allow",
            "Action": [
                "sts:AssumeRole"
            ],
            "Resource": [
                "*"
            ]
        }
    ]
}
POLICY
}

resource "aws_iam_role_policy" "allowcloudwatchlogging" {
  name = "${var.prefix}-Enable-login"
  role = aws_iam_role.lambda.id
  policy = <<POLICY
{
  "Version": "2012-10-17",
  "Statement": [
    {
      "Effect": "Allow",
      "Action": [
        "logs:CreateLogGroup",
        "logs:CreateLogStream",
        "logs:PutLogEvents"
      ],
      "Resource": "arn:aws:logs:*:*:*"
    }
  ]
}
POLICY
}
Salvo diversa indicazione, il contenuto di questa pagina è sotto licenza Creative Commons Attribution-ShareAlike 3.0 License