NashTech Blog

Table of Contents
chatgpt, ai, man-7867916.jpg

Cluster-on-Cloud-in-Nomad

Deploy the Cluster on the Cloud (AWS)

Let’s learn about the Cluster on Cloud. Hello Everyone we are going to learn about the deployment in (AWS) using the terraform scripts to deploy the cluster using Nomad.

Cluster on Cloud (AWS)

We are using here terraform scripts for the deployment of the Cluster:
Main.tf

terraform {
required_version=”>= 0.12″
}
provider “aws” {
region=var.region
}
data “aws_vpc” “default” {
default=true
}
locals {
retry_join=”provider=aws tag_key=NomadJoinTag tag_value=auto-join”
}
resource “aws_security_group” “nomad_ui_ingress” {
name=”${var.name}-ui-ingress”
vpc_id=data.aws_vpc.default.id
# Nomad
ingress {
from_port=4646
to_port=4646
protocol=”tcp”
cidr_blocks=[var.allowlist_ip]
}
ingress {
from_port=0
to_port=0
protocol=”-1″
self=true
}
egress {
from_port=0
to_port=0
protocol=”-1″
cidr_blocks=[“0.0.0.0/0”]
}
}
resource “aws_security_group” “ssh_ingress” {
name=”${var.name}-ssh-ingress”
vpc_id=data.aws_vpc.default.id
# SSH
ingress {
from_port=22
to_port=22
protocol=”tcp”
cidr_blocks=[var.allowlist_ip]
}
ingress {
from_port=0
to_port=0
protocol=”-1″
self=true
}
egress {
from_port=0
to_port=0
protocol=”-1″
cidr_blocks=[“0.0.0.0/0”]
}
}
resource “aws_security_group” “allow_all_internal” {
name=”${var.name}-allow-all-internal”
vpc_id=data.aws_vpc.default.id
ingress {
from_port=0
to_port=0
protocol=”-1″
self=true
}
egress {
from_port=0
to_port=0
protocol=”-1″
self=true
}
}
resource “aws_security_group” “clients_ingress” {
name=”${var.name}-clients-ingress”
vpc_id=data.aws_vpc.default.id
ingress {
from_port=0
to_port=0
protocol=”-1″
self=true
}
egress {
from_port=0
to_port=0
protocol=”-1″
cidr_blocks=[“0.0.0.0/0”]
}
# Add application ingress rules here
# These rules are applied only to the client nodes
# nginx example
# ingress {
# from_port = 80
# to_port = 80
# protocol = “tcp”
# cidr_blocks = [“0.0.0.0/0”]
# }
ingress {
from_port=5000
to_port=5000
protocol=”tcp”
cidr_blocks=[“0.0.0.0/0”]
}
}
data “aws_ami” “ubuntu” {
most_recent=true
owners=[“099720109477”]
filter {
name=”name”
values=[“ubuntu/images/hvm-ssd/ubuntu-xenial-16.04-amd64-server-*”]
}
filter {
name=”root-device-type”
values=[“ebs”]
}
filter {
name=”virtualization-type”
values=[“hvm”]
}
filter {
name=”architecture”
values=[“x86_64”]
}
}
resource “tls_private_key” “private_key” {
algorithm=”RSA”
rsa_bits=4096
}
resource “aws_key_pair” “generated_key” {
key_name=”tf-key”
public_key=tls_private_key.private_key.public_key_openssh
}
# Uncomment the private key resource below if you want to SSH to any of the instances
# Run init and apply again after uncommenting:
# terraform init && terraform apply
# Then SSH with the tf-key.pem file:
# ssh -i tf-key.pem ubuntu@INSTANCE_PUBLIC_IP
# resource “local_file” “tf_pem” {
# filename = “${path.module}/tf-key.pem”
# content = tls_private_key.private_key.private_key_pem
# file_permission = “0400”
# }
resource “aws_instance” “server” {
ami=data.aws_ami.ubuntu.id
instance_type=var.server_instance_type
key_name=aws_key_pair.generated_key.key_name
vpc_security_group_ids=[aws_security_group.nomad_ui_ingress.id, aws_security_group.ssh_ingress.id, aws_security_group.allow_all_internal.id]
count=var.server_count
connection {
type=”ssh”
user=”ubuntu”
private_key=tls_private_key.private_key.private_key_pem
host=self.public_ip
}
# NomadJoinTag is necessary for nodes to automatically join the cluster
tags=merge(
{
“Name” = “${var.name}-server-${count.index}”
},
{
“NomadJoinTag” = “auto-join”
},
{
“NomadType” = “server”
}
)
root_block_device {
volume_type=”gp2″
volume_size=var.root_block_device_size
delete_on_termination=”true”
}
provisioner”remote-exec” {
inline=[“sudo mkdir -p /ops”, “sudo chmod 777 -R /ops”]
}
provisioner”file” {
source=”../shared”
destination=”/ops”
}
user_data=templatefile(“../shared/data-scripts/user-data-server.sh”, {
server_count = var.server_count
region = var.region
cloud_env = “aws”
retry_join = local.retry_join
nomad_version = var.nomad_version
})
iam_instance_profile=aws_iam_instance_profile.instance_profile.name
metadata_options {
http_endpoint=”enabled”
instance_metadata_tags=”enabled”
}
}
resource “aws_instance” “client” {
ami=data.aws_ami.ubuntu.id
instance_type=var.client_instance_type
key_name=aws_key_pair.generated_key.key_name
vpc_security_group_ids=[aws_security_group.nomad_ui_ingress.id, aws_security_group.ssh_ingress.id, aws_security_group.clients_ingress.id, aws_security_group.allow_all_internal.id]
count=var.client_count
connection {
type=”ssh”
user=”ubuntu”
private_key=tls_private_key.private_key.private_key_pem
host=self.public_ip
}
# NomadJoinTag is necessary for nodes to automatically join the cluster
tags=merge(
{
“Name” = “${var.name}-client-${count.index}”
},
{
“NomadJoinTag” = “auto-join”
},
{
“NomadType” = “client”
}
)
root_block_device {
volume_type=”gp2″
volume_size=var.root_block_device_size
delete_on_termination=”true”
}
ebs_block_device {
device_name=”/dev/xvdd”
volume_type=”gp2″
volume_size=”50″
delete_on_termination=”true”
}
provisioner”remote-exec” {
inline=[“sudo mkdir -p /ops”, “sudo chmod 777 -R /ops”]
}
provisioner”file” {
source=”../shared”
destination=”/ops”
}
user_data=templatefile(“../shared/data-scripts/user-data-client.sh”, {
region = var.region
cloud_env = “aws”
retry_join = local.retry_join
nomad_version = var.nomad_version
})
iam_instance_profile=aws_iam_instance_profile.instance_profile.name
metadata_options {
http_endpoint=”enabled”
instance_metadata_tags=”enabled”
}
}
resource “aws_iam_instance_profile” “instance_profile” {
name_prefix=var.name
role=aws_iam_role.instance_role.name
}
resource “aws_iam_role” “instance_role” {
name_prefix=var.name
assume_role_policy=data.aws_iam_policy_document.instance_role.json
}
data “aws_iam_policy_document” “instance_role” {
statement {
effect=”Allow”
actions=[“sts:AssumeRole”]
principals {
type=”Service”
identifiers=[“ec2.amazonaws.com”]
}
}
}
resource “aws_iam_role_policy” “auto_discover_cluster” {
name=”${var.name}-auto-discover-cluster”
role=aws_iam_role.instance_role.id
policy=data.aws_iam_policy_document.auto_discover_cluster.json
}
data “aws_iam_policy_document” “auto_discover_cluster” {
statement {
effect=”Allow”
actions=[
“ec2:DescribeInstances”,
“ec2:DescribeTags”,
“autoscaling:DescribeAutoScalingGroups”,
]
resources=[“*”]
}
}
 Variables.tf
variable “name” {
description=”Prefix used to name various infrastructure components. Alphanumeric characters only.”
default=”nomad”
}
variable “region” {
description=”The AWS region to deploy to.”
}
variable “allowlist_ip” {
description=”IP to allow access for the security groups (set 0.0.0.0/0 for world)”
default=”0.0.0.0/0″
}
variable “server_instance_type” {
description=”The AWS instance type to use for servers.”
default=”t2.micro”
}
variable “client_instance_type” {
description=”The AWS instance type to use for clients.”
default=”t2.micro”
}
variable “server_count” {
description=”The number of servers to provision.”
default=”3″
}
variable “client_count” {
description=”The number of clients to provision.”
default=”2″
}
variable “root_block_device_size” {
description=”The volume size of the root block device.”
default=16
}
variable “nomad_version” {
description=”The version of the Nomad binary to install.”
default=”1.5.0″
}
output.tf
output “nomad_ip” {
value=”http://${aws_instance.server[0].public_ip}:4646/ui”
}

Conclusion

We have covered the fundamentals of deploying clusters on the cloud using Nomad and implementing best practices to maintain a healthy environment. By following the recommended strategies such as monitoring cluster performance, automating scaling policies, updating software components, and establishing clear communication channels, you can ensure the efficiency and reliability of your cluster. Moving forward, consider exploring advanced features of Nomad, such as multi-region deployments or integrating with other tools for enhanced automation. Continuously evaluate and optimize your cluster configuration based on evolving requirements and industry best practices to stay ahead in the dynamic environment. Stay tuned for more in-depth guides and tips on cloud computing strategies.

You can find more amazing blogs on this link and the official documentation.
Picture of Vikas Vashisth

Vikas Vashisth

Vikas Vashisth is working as a Sr.DevOps Engineer at Knoldus | Part of Nashtech with having more than multiple years of experience in the field of continuous integration and delivery, infrastructure automation, and containerization enables me to build scalable, reliable, and highly available environments. I am proficient in tools such as Docker, Kubernetes, Jenkins, Ansible, and Terraform, and have experience working with cloud platforms such as AWS, GCP, and Azure.

Leave a Comment

Your email address will not be published. Required fields are marked *

Suggested Article

Scroll to Top