- Version
- Download 2
- File Size 4.00 KB
- File Count 1
- Create Date 8 December 2025
- Last Updated 8 January 2026
S4.3_main.tf
terraform {
backend "azurerm" {
resource_group_name = "tfstate-rg"
storage_account_name = "tfstate2063319063" # Replace it with storage account name from Azure, create in previous step
container_name = "tfstate"
key = "terraform.tfstate"
}
}
terraform {
required_providers {
azurerm = {
source = "hashicorp/azurerm"
version = "4.34.0"
}
}
}
provider "azurerm" {
# Configuration options
subscription_id = "075e507b-a8ca-4bef-907c-0fc1d7a69fa6"
features {}
}
# Parameters
locals {
resource_group_VM = "${var.name_prefix}-RG-DEV-VMs"
resource_group_storage = "${var.name_prefix}-RG-DEV-ST"
resource_group_Databricks = "${var.name_prefix}-RG-DEV-DBR"
location = var.location
StorageName = var.storage_account_name
ContainerName = var.storage_container_name
Puplic_ip_Dev_Kafka_server = "Dev-kafka-Server-ip"
Puplic_ip_Dev_Flask_server = "Dev-kafka-Flask-ip"
NetworkInterface_dev_kafka = "Dev-kafka-Server-NI"
NetworkInterface_dev_Flask = "Dev-flask_Server-NI"
Security_group_Dev_Kafka_server = "Dev-Kafka-Server-nsg"
Security_group_Dev_Flask_server = "Dev-Flask-Server-nsg"
Ubuntu_VM_Kafka = "DEV-Kafka-Server-vm"
Ubuntu_VM_Flask = "DEV-Flask-Server-vm"
Databricks_Work_Space = "${var.name_prefix}-Databricks-ws"
virtual_network = {
name = "${var.name_prefix}-kafka-api-vnet-test"
address_space = var.vnet_cidr
}
subnets = [
{
name = "default"
address_prefixes = [var.subnet_cidr]
}
]
kafka_nsg_rules = {
ssh = {
name = "SSH"
description = "Training: allow SSH from anywhere. Prod: restrict by IP/CIDR via source_address_prefix."
priority = 300
protocol = "Tcp"
dest_port = "22"
}
icmp = {
name = "ICMP"
description = "Allow ICMP (ping) for diagnostics."
priority = 310
protocol = "Icmp"
dest_port = "*"
}
kafka = {
name = "AllowAnyCustomer9092Inbound"
description = "Kafka broker port. Consider restricting source in prod."
priority = 320
protocol = "Tcp"
dest_port = "9092"
}
}
flask_nsg_rules = {
ssh = {
name = "SSH"
description = "Training: allow SSH from anywhere. Prod: restrict via source_address_prefix."
priority = 300
protocol = "Tcp"
dest_port = "22"
}
icmp = {
name = "ICMP"
description = "Allow ICMP (ping) for diagnostics."
priority = 305
protocol = "Icmp"
dest_port = "*"
}
flask = {
name = "Flask_5000_Inbound"
description = "Flask dev server. In prod, front with 80/443 and close 5000."
priority = 310
protocol = "Tcp"
dest_port = "5000"
}
}
# === NSGs data model (one entry per server) ===
nsgs = {
kafka = {
name = local.Security_group_Dev_Kafka_server
nsg_rules = local.kafka_nsg_rules
}
flask = {
name = local.Security_group_Dev_Flask_server
nsg_rules = local.flask_nsg_rules
}
}
# === Public IPs data model (one entry per server) ===
pips = {
kafka = {
name = local.Puplic_ip_Dev_Kafka_server
}
flask = {
name = local.Puplic_ip_Dev_Flask_server
}
}
# === NICs data model (one entry per server) ===
nics = {
kafka = {
name = local.NetworkInterface_dev_kafka
pip_key = "kafka" # which Public IP to attach
subnet_key = "default" # which subnet key to use (if you looped subnets)
}
flask = {
name = local.NetworkInterface_dev_Flask
pip_key = "flask"
subnet_key = "default"
}
}
# === VMs data model (one entry per server) ===
vms = {
kafka = {
vm_name = local.Ubuntu_VM_Kafka
nic_key = "kafka" # which NIC to attach (matches local.nics key)
size = "Standard_D2s_v3"
admin_password = "Terraform-Kafka-2025" # demo only
os_disk_type = "Premium_LRS"
image = {
publisher = "Canonical"
offer = "0001-com-ubuntu-server-jammy"
sku = "22_04-lts-gen2"
version = "latest"
}
}
flask = {
vm_name = local.Ubuntu_VM_Flask
nic_key = "flask"
size = "Standard_D2s_v3"
admin_password = "Terraform-Flask-2025" # demo only
os_disk_type = "StandardSSD_LRS"
image = {
publisher = "Canonical"
offer = "UbuntuServer"
sku = "18.04-LTS"
version = "latest"
}
}
}
}
#Resource Groups
resource "azurerm_resource_group" "rg_VMs" {
name = local.resource_group_VM
location = local.location
}
resource "azurerm_resource_group" "rg_Storage" {
name = local.resource_group_storage
location = local.location
}
resource "azurerm_resource_group" "rg_DataBricks" {
name = local.resource_group_Databricks
location = local.location
}
# Storage
resource "azurerm_storage_account" "st_apikafkastorage" {
name = local.StorageName
resource_group_name = local.resource_group_storage
location = local.location
account_tier = "Standard"
account_replication_type = "LRS"
account_kind = "StorageV2"
depends_on = [azurerm_resource_group.rg_Storage]
}
# Create container within the storage account
resource "azurerm_storage_container" "StContainer" {
name = local.ContainerName
storage_account_id = azurerm_storage_account.st_apikafkastorage.id #Use ID instead of name, this is from >= version 3.0
container_access_type = "private"
depends_on = [azurerm_resource_group.rg_Storage, azurerm_storage_account.st_apikafkastorage]
}
# create Virtual_network
resource "azurerm_virtual_network" "VirtualNetwork1" {
name = local.virtual_network.name
location = local.location
resource_group_name = local.resource_group_VM
address_space = [local.virtual_network.address_space]
#dns_servers = []
depends_on = [azurerm_resource_group.rg_VMs]
}
# Add the default subnet
resource "azurerm_subnet" "DefaultSubnet1" {
name = local.subnets[0].name
resource_group_name = local.resource_group_VM
virtual_network_name = local.virtual_network.name
address_prefixes = local.subnets[0].address_prefixes
depends_on = [azurerm_virtual_network.VirtualNetwork1]
}
###################################################################Virtual Machines ###################################################################################
# Puplic Ip
resource "azurerm_public_ip" "pip" {
for_each = local.pips
name = each.value.name
resource_group_name = local.resource_group_VM
location = local.location
allocation_method = "Static"
depends_on = [azurerm_resource_group.rg_VMs]
}
# NetWork_Interface
resource "azurerm_network_interface" "nic" {
for_each = local.nics
name = each.value.name
location = local.location
resource_group_name = local.resource_group_VM
ip_configuration {
name = "internal"
subnet_id = azurerm_subnet.DefaultSubnet1.id
private_ip_address_allocation = "Dynamic"
public_ip_address_id = azurerm_public_ip.pip[each.value.pip_key].id
}
depends_on = [azurerm_resource_group.rg_VMs]
}
# Security_Group
resource "azurerm_network_security_group" "nsg" {
for_each = local.nsgs
name = each.value.name
location = local.location
resource_group_name = local.resource_group_VM
# dynamic rules (value-only style)
dynamic "security_rule" {
for_each = each.value.nsg_rules
content {
name = security_rule.value.name
description = security_rule.value.description
priority = security_rule.value.priority
direction = "Inbound"
access = "Allow"
protocol = security_rule.value.protocol
source_port_range = "*"
destination_port_range = security_rule.value.dest_port
source_address_prefix = "*"
destination_address_prefix = "*"
}
}
depends_on = [azurerm_resource_group.rg_VMs]
}
# NIC-level association: attach the Kafka NSG to the Kafka NIC (per-VM control).
resource "azurerm_network_interface_security_group_association" "assoc" {
for_each = local.nics
network_interface_id = azurerm_network_interface.nic[each.key].id
network_security_group_id = azurerm_network_security_group.nsg[each.key].id
}
# Virtual Machine definition
resource "azurerm_linux_virtual_machine" "vm" {
for_each = local.vms
name = each.value.vm_name
resource_group_name = local.resource_group_VM
location = local.location
size = each.value.size
admin_username = var.admin_username
admin_password = each.value.admin_password # demo only
disable_password_authentication = false
network_interface_ids = [
azurerm_network_interface.nic[each.value.nic_key].id
]
os_disk {
caching = "ReadWrite"
storage_account_type = each.value.os_disk_type
disk_size_gb = 40
}
source_image_reference {
publisher = each.value.image.publisher
offer = each.value.image.offer
sku = each.value.image.sku
version = each.value.image.version
}
}
/*
########################################################################DataBricks############################################################################################
# Databricks Workspace (Standard tier, default networking)
resource "azurerm_databricks_workspace" "dbws" {
name = local.Databricks_Work_Space # Name of the Databricks workspace
location =local.location # Azure region where the workspace will be deployed; Example: "eastus" or "westeurope"
resource_group_name =local.resource_group_Databricks # Resource Group that *you* own where the workspace definition lives
sku = "standard" # Pricing tier for Databricks; Options: "standard", "premium", "trial"; # "Standard" = Apache Spark + basic features (good for training)# Pricing tier
managed_resource_group_name = "databricks-ws-demo-mrg" # Special "Managed Resource Group" (MRG) that Azure creates/controls
# It will contain hidden infra: VNet, NSGs, storage, NAT gateway, etc.
# Do not delete or manually edit resources inside this group
public_network_access_enabled = true # Controls if workspace endpoints are reachable over the public internet;true = public access allowed ;false = only private endpoints (advanced networking setup)
}
*/
